Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/src/include/win32/win32_errno.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
// We're going to preserve the error numbers defined by the Windows SDK but not
// by Mingw headers. For others, we're going to use numbers greater than 256 to
// avoid unintended overlaps.
#ifndef WIN32_ERRNO_H
#define WIN32_ERRNO_H 1
#include <errno.h>
#include "include/int_types.h"
#ifndef EBADMSG
#define EBADMSG 104
#endif
#ifndef ENODATA
#define ENODATA 120
#endif
#ifndef ENOLINK
#define ENOLINK 121
#endif
#ifndef ENOMSG
#define ENOMSG 122
#endif
#ifndef ENOTRECOVERABLE
#define ENOTRECOVERABLE 127
#endif
#ifndef ETIME
#define ETIME 137
#endif
#ifndef ETXTBSY
#define ETXTBSY 139
#endif
#ifndef ENODATA
#define ENODATA 120
#endif
#define ESTALE 256
#define EREMOTEIO 257
#ifndef EBADE
#define EBADE 258
#endif
#define EUCLEAN 259
#define EREMCHG 260
#define EKEYREJECTED 261
#define EREMOTE 262
// Not used at moment. Full coverage ensures that remote errors will be
// converted and handled properly.
#define EADV 263
#define EBADFD 264
#define EBADR 265
#define EBADRQC 266
#define EBADSLT 267
#define EBFONT 268
#define ECHRNG 269
#define ECOMM 270
#define EDOTDOT 271
#define EHOSTDOWN 272
#define EHWPOISON 273
// Defined by Boost.
#ifndef EIDRM
#define EIDRM 274
#endif
#define EISNAM 275
#define EKEYEXPIRED 276
#define EKEYREVOKED 277
#define EL2HLT 278
#define EL2NSYNC 279
#define EL3HLT 280
#define EL3RST 281
#define ELIBACC 282
#define ELIBBAD 283
#define ELIBEXEC 284
#define ELIBMAX 285
#define ELIBSCN 286
#define ELNRNG 287
#define EMEDIUMTYPE 288
#define EMULTIHOP 289
#define ENAVAIL 290
#define ENOANO 291
#define ENOCSI 292
#define ENOKEY 293
#define ENOMEDIUM 294
#define ENONET 295
#define ENOPKG 296
#ifndef ENOSR
#define ENOSR 297
#endif
#ifndef ENOSTR
#define ENOSTR 298
#endif
#define ENOTNAM 299
#define ENOTUNIQ 300
#define EPFNOSUPPORT 301
#define ERFKILL 302
#define ESOCKTNOSUPPORT 303
#define ESRMNT 304
#define ESTRPIPE 305
#define ETOOMANYREFS 306
#define EUNATCH 307
#define EUSERS 308
#define EXFULL 309
#define ENOTBLK 310
#ifndef EDQUOT
#define EDQUOT 311
#endif
#define ESHUTDOWN 312
#ifdef __cplusplus
extern "C" {
#endif
__s32 wsae_to_errno(__s32 r);
__u32 errno_to_ntstatus(__s32 r);
__u32 cephfs_errno_to_ntstatus_map(int cephfs_errno);
#ifdef __cplusplus
}
#endif
#endif // WIN32_ERRNO_H
| 2,671 | 17.176871 | 79 |
h
|
null |
ceph-main/src/include/win32/winsock_compat.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (c) 2019 SUSE LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef WINSOCK_COMPAT_H
#define WINSOCK_COMPAT_H 1
#include "winsock_wrapper.h"
#ifndef poll
#define poll WSAPoll
#endif
// afunix.h is available starting with Windows SDK 17063. Still, it wasn't
// picked up by mingw yet, for which reason we're going to define sockaddr_un
// here.
#ifndef _AFUNIX_
#define UNIX_PATH_MAX 108
typedef struct sockaddr_un
{
ADDRESS_FAMILY sun_family; /* AF_UNIX */
char sun_path[UNIX_PATH_MAX]; /* pathname */
} SOCKADDR_UN, *PSOCKADDR_UN;
#define SIO_AF_UNIX_GETPEERPID _WSAIOR(IOC_VENDOR, 256)
#endif /* _AFUNIX */
#endif /* WINSOCK_COMPAT_H */
| 1,000 | 24.025 | 77 |
h
|
null |
ceph-main/src/include/win32/winsock_wrapper.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (c) 2020 SUSE LLC
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef WINSOCK_WRAPPER_H
#define WINSOCK_WRAPPER_H 1
#ifdef __cplusplus
// Boost complains if winsock2.h (or windows.h) is included before asio.hpp.
#include <boost/asio.hpp>
#endif
#include <winsock2.h>
#include <ws2ipdef.h>
#include <ws2tcpip.h>
#endif /* WINSOCK_WRAPPER_H */
| 683 | 23.428571 | 76 |
h
|
null |
ceph-main/src/include/win32/arpa/inet.h
|
#include "winsock_compat.h"
| 28 | 13.5 | 27 |
h
|
null |
ceph-main/src/include/win32/netinet/in.h
|
#include "winsock_compat.h"
| 28 | 13.5 | 27 |
h
|
null |
ceph-main/src/include/win32/netinet/ip.h
| 0 | 0 | 0 |
h
|
|
null |
ceph-main/src/include/win32/netinet/tcp.h
| 0 | 0 | 0 |
h
|
|
null |
ceph-main/src/include/win32/sys/errno.h
|
#include <errno.h>
| 19 | 9 | 18 |
h
|
null |
ceph-main/src/include/win32/sys/select.h
| 0 | 0 | 0 |
h
|
|
null |
ceph-main/src/include/win32/sys/socket.h
|
#include "winsock_compat.h"
| 28 | 13.5 | 27 |
h
|
null |
ceph-main/src/include/win32/sys/statvfs.h
|
#ifndef _SYS_STATVFS_H
#define _SYS_STATVFS_H 1
typedef unsigned __int64 fsfilcnt64_t;
typedef unsigned __int64 fsblkcnt64_t;
typedef unsigned __int64 fsblkcnt_t;
struct statvfs
{
unsigned long int f_bsize;
unsigned long int f_frsize;
fsblkcnt64_t f_blocks;
fsblkcnt64_t f_bfree;
fsblkcnt64_t f_bavail;
fsfilcnt64_t f_files;
fsfilcnt64_t f_ffree;
fsfilcnt64_t f_favail;
unsigned long int f_fsid;
unsigned long int f_flag;
unsigned long int f_namemax;
int __f_spare[6];
};
struct flock {
short l_type;
short l_whence;
off_t l_start;
off_t l_len;
pid_t l_pid;
};
#define F_RDLCK 0
#define F_WRLCK 1
#define F_UNLCK 2
#define F_SETLK 6
#endif /* _SYS_STATVFS_H */
| 734 | 18.864865 | 38 |
h
|
null |
ceph-main/src/include/win32/sys/uio.h
|
#include "include/compat.h"
| 28 | 13.5 | 27 |
h
|
null |
ceph-main/src/include/win32/sys/un.h
|
#include "include/win32/winsock_compat.h"
| 42 | 20.5 | 41 |
h
|
null |
ceph-main/src/jaegertracing/config.yml
|
disabled: false
reporter:
logSpans: true
sampler:
type: const
param: 1
| 79 | 10.428571 | 18 |
yml
|
null |
ceph-main/src/java/java/com/ceph/crush/Bucket.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.crush;
public class Bucket {
private String type;
private String name;
public Bucket(String type, String name) {
this.type = type;
this.name = name;
}
public String getType() {
return type;
}
public String getName() {
return name;
}
public String toString() {
return "bucket[" + type + "," + name + "]";
}
}
| 1,449 | 32.72093 | 78 |
java
|
null |
ceph-main/src/java/java/com/ceph/fs/CephAlreadyMountedException.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
import java.io.IOException;
/**
* Ceph is already mounted.
*/
public class CephAlreadyMountedException extends IOException {
private static final long serialVersionUID = 1L;
/**
* Construct CephAlreadyMountedException.
*/
public CephAlreadyMountedException() {
super();
}
/**
* Construct CephAlreadyMountedException with message.
*/
public CephAlreadyMountedException(String s) {
super(s);
}
}
| 1,535 | 33.133333 | 78 |
java
|
null |
ceph-main/src/java/java/com/ceph/fs/CephFileAlreadyExistsException.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
import java.io.IOException;
/**
* Ceph file/directory already exists.
*/
public class CephFileAlreadyExistsException extends IOException {
private static final long serialVersionUID = 1L;
/**
* Construct CephFileAlreadyExistsException.
*/
public CephFileAlreadyExistsException() {
super();
}
/**
* Construct CephFileAlreadyExistsException with message.
*/
public CephFileAlreadyExistsException(String s) {
super(s);
}
}
| 1,561 | 33.711111 | 78 |
java
|
null |
ceph-main/src/java/java/com/ceph/fs/CephFileExtent.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
import java.util.Arrays;
/**
* Holds information about a file extent in CephFS.
*/
public class CephFileExtent {
private long offset;
private long length;
private int[] osds;
CephFileExtent(long offset, long length, int[] osds) {
this.offset = offset;
this.length = length;
this.osds = osds;
}
/**
* Get starting offset of extent.
*/
public long getOffset() {
return offset;
}
/**
* Get length of extent.
*/
public long getLength() {
return length;
}
/**
* Get list of OSDs with this extent.
*/
public int[] getOSDs() {
return osds;
}
/**
* Pretty print.
*/
public String toString() {
return "extent[" + offset + "," + length + ","
+ Arrays.toString(osds) + "]";
}
}
| 1,868 | 26.895522 | 78 |
java
|
null |
ceph-main/src/java/java/com/ceph/fs/CephMount.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
import java.io.IOException;
import java.io.FileNotFoundException;
import java.net.InetAddress;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.lang.String;
import com.ceph.crush.Bucket;
public class CephMount {
/*
* Set via JNI callback in native_ceph_create
*
* Do not touch!
*/
private long instance_ptr;
/*
* Flags for open().
*
* Must be synchronized with JNI if changed.
*/
public static final int O_RDONLY = 1;
public static final int O_RDWR = 2;
public static final int O_APPEND = 4;
public static final int O_CREAT = 8;
public static final int O_TRUNC = 16;
public static final int O_EXCL = 32;
public static final int O_WRONLY = 64;
public static final int O_DIRECTORY = 128;
/*
* Whence flags for seek().
*
* Must be synchronized with JNI if changed.
*/
public static final int SEEK_SET = 1;
public static final int SEEK_CUR = 2;
public static final int SEEK_END = 3;
/*
* Attribute flags for setattr().
*
* Must be synchronized with JNI if changed.
*/
public static final int SETATTR_MODE = 1;
public static final int SETATTR_UID = 2;
public static final int SETATTR_GID = 4;
public static final int SETATTR_MTIME = 8;
public static final int SETATTR_ATIME = 16;
/*
* Flags for setxattr();
*
* Must be synchronized with JNI if changed.
*/
public static final int XATTR_CREATE = 1;
public static final int XATTR_REPLACE = 2;
public static final int XATTR_NONE = 3;
/*
* Flags for flock();
*
* Must be synchronized with JNI if changed.
*/
public static final int LOCK_SH = 1;
public static final int LOCK_EX = 2;
public static final int LOCK_NB = 4;
public static final int LOCK_UN = 8;
/*
* This is run by the class loader and will report early any problems
* finding or linking in the shared JNI library.
*/
static {
loadLibrary();
}
static synchronized void loadLibrary() {
CephNativeLoader.getInstance().loadLibrary();
}
/*
* Package-private: called from CephNativeLoader
*/
static native void native_initialize();
/*
* RW lock used for fine grained synchronization to native
*/
private final ReentrantReadWriteLock rwlock = new ReentrantReadWriteLock();
private final Lock rlock = rwlock.readLock();
private final Lock wlock = rwlock.writeLock();
/*
* Controls clean-up synchronization between the constructor and finalize().
* If native_ceph_create fails, then we want a call to finalize() to not
* attempt to clean-up native context, because there is none.
*/
private boolean initialized = false;
/*
* Try to clean-up. First, unmount() will catch users who forget to do the
* unmount manually. Second, release() will destroy the entire context. It
* is safe to call release after a failure in unmount.
*/
protected void finalize() throws Throwable {
if (initialized) {
try {
unmount();
} catch (Exception e) {}
try {
native_ceph_release(instance_ptr);
} catch (Exception e) {}
}
super.finalize();
}
/**
* Create a new CephMount with specific client id.
*
* @param id client id.
*/
public CephMount(String id) {
native_ceph_create(this, id);
initialized = true;
}
private static native int native_ceph_create(CephMount mount, String id);
/**
* Create a new CephMount with default client id.
*/
public CephMount() {
this(null);
}
/**
* Activate the mount with a given root path.
*
* @param root The path to use as the root (pass null for "/").
*/
public void mount(String root) {
wlock.lock();
try {
native_ceph_mount(instance_ptr, root);
} finally {
wlock.unlock();
}
}
private static native int native_ceph_mount(long mountp, String root);
/**
* Deactivate the mount.
*
* The mount can be reactivated using mount(). Configuration parameters
* previously set are not reset.
*/
public void unmount() {
wlock.lock();
try {
native_ceph_unmount(instance_ptr);
} finally {
wlock.unlock();
}
}
private static native int native_ceph_unmount(long mountp);
/*
* Private access to low-level ceph_release.
*/
private static native int native_ceph_release(long mountp);
/**
* Load configuration from a file.
*
* @param path The path to the configuration file.
*/
public void conf_read_file(String path) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_conf_read_file(instance_ptr, path);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_conf_read_file(long mountp, String path);
/**
* Set the value of a configuration option.
*
* @param option The configuration option to modify.
* @param value The new value of the option.
*/
public void conf_set(String option, String value) {
rlock.lock();
try {
native_ceph_conf_set(instance_ptr, option, value);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_conf_set(long mountp, String option, String value);
/**
* Get the value of a configuration option.
*
* @param option The name of the configuration option.
* @return The value of the option or null if option not found
*/
public String conf_get(String option) {
rlock.lock();
try {
return native_ceph_conf_get(instance_ptr, option);
} finally {
rlock.unlock();
}
}
private static native String native_ceph_conf_get(long mountp, String option);
/**
* Get file system status.
*
* @param path Path to file in file system.
* @param statvfs CephStatVFS structure to hold status.
*/
public void statfs(String path, CephStatVFS statvfs) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_statfs(instance_ptr, path, statvfs);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_statfs(long mountp, String path, CephStatVFS statvfs);
/**
* Get the current working directory.
*
* @return The current working directory in Ceph.
*/
public String getcwd() {
rlock.lock();
try {
return native_ceph_getcwd(instance_ptr);
} finally {
rlock.unlock();
}
}
private static native String native_ceph_getcwd(long mountp);
/**
* Set the current working directory.
*
* @param path The directory set as the cwd.
*/
public void chdir(String path) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_chdir(instance_ptr, path);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_chdir(long mountp, String cwd);
/**
* List the contents of a directory.
*
* @param dir The directory.
* @return List of files and directories excluding "." and "..".
*/
public String[] listdir(String dir) throws FileNotFoundException {
rlock.lock();
try {
return native_ceph_listdir(instance_ptr, dir);
} finally {
rlock.unlock();
}
}
private static native String[] native_ceph_listdir(long mountp, String path);
/**
* Create a hard link to an existing file.
*
* @param oldpath The target path of the link.
* @param newpath The name of the link.
*/
public void link(String oldpath, String newpath) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_link(instance_ptr, oldpath, newpath);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_link(long mountp, String existing, String newname);
/**
* Unlink/delete a name from the file system.
*
* @param path The name to unlink/delete.
*/
public void unlink(String path) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_unlink(instance_ptr, path);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_unlink(long mountp, String path);
/**
* Rename a file or directory.
*
* @param from The current path.
* @param to The new path.
*/
public void rename(String from, String to) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_rename(instance_ptr, from, to);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_rename(long mountp, String from, String to);
/**
* Create a directory.
*
* @param path The directory to create.
* @param mode The mode of the new directory.
*/
public void mkdir(String path, int mode) {
rlock.lock();
try {
native_ceph_mkdir(instance_ptr, path, mode);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_mkdir(long mountp, String path, int mode);
/**
* Create a directory and all parents.
*
* @param path The directory to create.
* @param mode The mode of the new directory.
*/
public void mkdirs(String path, int mode) throws IOException {
rlock.lock();
try {
native_ceph_mkdirs(instance_ptr, path, mode);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_mkdirs(long mountp, String path, int mode);
/**
* Delete a directory.
*
* @param path The directory to delete.
*/
public void rmdir(String path) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_rmdir(instance_ptr, path);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_rmdir(long mountp, String path);
/**
* Read the value of a symbolic link.
*/
public String readlink(String path) throws FileNotFoundException {
rlock.lock();
try {
return native_ceph_readlink(instance_ptr, path);
} finally {
rlock.unlock();
}
}
private static native String native_ceph_readlink(long mountp, String path);
/**
* Create a symbolic link.
*
* @param oldpath Target of the symbolic link.
* @param newpath Name of the link.
*/
public void symlink(String oldpath, String newpath) {
rlock.lock();
try {
native_ceph_symlink(instance_ptr, oldpath, newpath);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_symlink(long mountp, String existing, String newname);
/**
* Get file status.
*
* @param path Path of file to stat.
* @param stat CephStat structure to hold file status.
*/
public void stat(String path, CephStat stat) throws FileNotFoundException, CephNotDirectoryException {
rlock.lock();
try {
native_ceph_stat(instance_ptr, path, stat);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_stat(long mountp, String path, CephStat stat);
/**
* Get file status, without following symlinks.
*
* @param path Path of file to stat.
* @param stat CephStat structure to hold file status.
*/
public void lstat(String path, CephStat stat) throws FileNotFoundException, CephNotDirectoryException {
rlock.lock();
try {
native_ceph_lstat(instance_ptr, path, stat);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_lstat(long mountp, String path, CephStat stat);
/**
* Set file attributes.
*
* @param path Path to file.
* @param stat CephStat structure holding attributes.
* @param mask Mask specifying which attributes to set.
*/
public void setattr(String path, CephStat stat, int mask) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_setattr(instance_ptr, path, stat, mask);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_setattr(long mountp, String relpath, CephStat stat, int mask);
/**
* Change file mode.
*
* @param path Path to file.
* @param mode New mode bits.
*/
public void chmod(String path, int mode) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_chmod(instance_ptr, path, mode);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_chmod(long mountp, String path, int mode);
/**
* Change file mode of an open file.
*
* @param fd The open file descriptor to change the mode bits on.
* @param mode New mode bits.
*/
public void fchmod(int fd, int mode) {
rlock.lock();
try {
native_ceph_fchmod(instance_ptr, fd, mode);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_fchmod(long mountp, int fd, int mode);
/**
* Truncate a file to a specified length.
*
* @param path Path of the file.
* @param size New file length.
*/
public void truncate(String path, long size) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_truncate(instance_ptr, path, size);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_truncate(long mountp, String path, long size);
/**
* Open a file.
*
* @param path Path of file to open or create.
* @param flags Open flags.
* @param mode Permission mode.
* @return File descriptor.
*/
public int open(String path, int flags, int mode) throws FileNotFoundException {
rlock.lock();
try {
return native_ceph_open(instance_ptr, path, flags, mode);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_open(long mountp, String path, int flags, int mode);
/**
* Open a file with a specific file layout.
*
* @param path Path of file to open or create.
* @param flags Open flags.
* @param mode Permission mode.
* @param stripe_unit File layout stripe unit size.
* @param stripe_count File layout stripe count.
* @param object_size Size of each object.
* @param data_pool The target data pool.
* @return File descriptor.
*/
public int open(String path, int flags, int mode, int stripe_unit, int stripe_count,
int object_size, String data_pool) throws FileNotFoundException {
rlock.lock();
try {
return native_ceph_open_layout(instance_ptr, path, flags, mode, stripe_unit,
stripe_count, object_size, data_pool);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_open_layout(long mountp, String path,
int flags, int mode, int stripe_unit, int stripe_count, int object_size, String data_pool);
/**
* Close an open file.
*
* @param fd The file descriptor.
*/
public void close(int fd) {
rlock.lock();
try {
native_ceph_close(instance_ptr, fd);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_close(long mountp, int fd);
/**
* Seek to a position in a file.
*
* @param fd File descriptor.
* @param offset New offset.
* @param whence Whence value.
* @return The new offset.
*/
public long lseek(int fd, long offset, int whence) {
rlock.lock();
try {
return native_ceph_lseek(instance_ptr, fd, offset, whence);
} finally {
rlock.unlock();
}
}
private static native long native_ceph_lseek(long mountp, int fd, long offset, int whence);
/**
* Read from a file.
*
* @param fd The file descriptor.
* @param buf Buffer to for data read.
* @param size Amount of data to read into the buffer.
* @param offset Offset to read from (-1 for current position).
* @return The number of bytes read.
*/
public long read(int fd, byte[] buf, long size, long offset) {
rlock.lock();
try {
return native_ceph_read(instance_ptr, fd, buf, size, offset);
} finally {
rlock.unlock();
}
}
private static native long native_ceph_read(long mountp, int fd, byte[] buf, long size, long offset);
/**
* Write to a file at a specific offset.
*
* @param fd The file descriptor.
* @param buf Buffer to write.
* @param size Amount of data to write.
* @param offset Offset to write from (-1 for current position).
* @return The number of bytes written.
*/
public long write(int fd, byte[] buf, long size, long offset) {
rlock.lock();
try {
return native_ceph_write(instance_ptr, fd, buf, size, offset);
} finally {
rlock.unlock();
}
}
private static native long native_ceph_write(long mountp, int fd, byte[] buf, long size, long offset);
/**
* Truncate a file.
*
* @param fd File descriptor of the file to truncate.
* @param size New file size.
*/
public void ftruncate(int fd, long size) {
rlock.lock();
try {
native_ceph_ftruncate(instance_ptr, fd, size);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_ftruncate(long mountp, int fd, long size);
/**
* Synchronize a file with the file system.
*
* @param fd File descriptor to synchronize.
* @param dataonly Synchronize only data.
*/
public void fsync(int fd, boolean dataonly) {
rlock.lock();
try {
native_ceph_fsync(instance_ptr, fd, dataonly);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_fsync(long mountp, int fd, boolean dataonly);
/**
* Apply or remove an advisory lock.
*
* @param fd File descriptor to lock or unlock.
* @param operation the advisory lock operation to be performed on the file
* descriptor among LOCK_SH (shared lock), LOCK_EX (exclusive lock),
* or LOCK_UN (remove lock). The LOCK_NB value can be ORed to perform a
* non-blocking operation.
* @param owner the user-supplied owner identifier (an arbitrary integer)
*/
public void flock(int fd, int operation, long owner) throws IOException {
rlock.lock();
try {
native_ceph_flock(instance_ptr, fd, operation, owner);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_flock(long mountp, int fd, int operation, long owner);
/**
* Get file status.
*
* @param fd The file descriptor.
* @param stat The object in which to store the status.
*/
public void fstat(int fd, CephStat stat) {
rlock.lock();
try {
native_ceph_fstat(instance_ptr, fd, stat);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_fstat(long mountp, int fd, CephStat stat);
/**
* Synchronize the client with the file system.
*/
public void sync_fs() {
rlock.lock();
try {
native_ceph_sync_fs(instance_ptr);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_sync_fs(long mountp);
/**
* Get an extended attribute value.
*
* If the buffer is large enough to hold the entire attribute value, or
* buf is null, the size of the value is returned.
*
* @param path File path.
* @param name Name of the attribute.
* @param buf Buffer to store attribute value.
* @return The length of the attribute value. See description for more
* details.
*/
public long getxattr(String path, String name, byte[] buf) throws FileNotFoundException {
rlock.lock();
try {
return native_ceph_getxattr(instance_ptr, path, name, buf);
} finally {
rlock.unlock();
}
}
private static native long native_ceph_getxattr(long mountp, String path, String name, byte[] buf);
/**
* Get an extended attribute value of a symbolic link.
*
* If the buffer is large enough to hold the entire attribute value, or
* buf is null, the size of the value is returned.
*
* @param path File path.
* @param name Name of attribute.
* @param buf Buffer to store attribute value.
* @return The length of the attribute value. See description for more
* details.
*/
public long lgetxattr(String path, String name, byte[] buf) throws FileNotFoundException {
rlock.lock();
try {
return native_ceph_lgetxattr(instance_ptr, path, name, buf);
} finally {
rlock.unlock();
}
}
private static native long native_ceph_lgetxattr(long mountp, String path, String name, byte[] buf);
/**
* List extended attributes.
*
* @param path File path.
* @return List of attribute names.
*/
public String[] listxattr(String path) throws FileNotFoundException {
rlock.lock();
try {
return native_ceph_listxattr(instance_ptr, path);
} finally {
rlock.unlock();
}
}
private static native String[] native_ceph_listxattr(long mountp, String path);
/**
* List extended attributes of a symbolic link.
*
* @param path File path.
* @return List of attribute names.
*/
public String[] llistxattr(String path) throws FileNotFoundException {
rlock.lock();
try {
return native_ceph_llistxattr(instance_ptr, path);
} finally {
rlock.unlock();
}
}
private static native String[] native_ceph_llistxattr(long mountp, String path);
/**
* Remove an extended attribute.
*
* @param path File path.
* @param name Name of attribute.
*/
public void removexattr(String path, String name) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_removexattr(instance_ptr, path, name);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_removexattr(long mountp, String path, String name);
/**
* Remove an extended attribute from a symbolic link.
*
* @param path File path.
* @param name Name of attribute.
*/
public void lremovexattr(String path, String name) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_lremovexattr(instance_ptr, path, name);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_lremovexattr(long mountp, String path, String name);
/**
* Set the value of an extended attribute.
*
* @param path The file path.
* @param name The attribute name.
* @param buf The attribute value.
* @param size The size of the attribute value.
* @param flags Flag controlling behavior (XATTR_CREATE/REPLACE/NONE).
*/
public void setxattr(String path, String name, byte[] buf, long size, int flags) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_setxattr(instance_ptr, path, name, buf, size, flags);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_setxattr(long mountp, String path, String name, byte[] buf, long size, int flags);
/**
* Set the value of an extended attribute on a symbolic link.
*
* @param path The file path.
* @param name The attribute name.
* @param buf The attribute value.
* @param size The size of the attribute value.
* @param flags Flag controlling behavior (XATTR_CREATE/REPLACE/NONE).
*/
public void lsetxattr(String path, String name, byte[] buf, long size, int flags) throws FileNotFoundException {
rlock.lock();
try {
native_ceph_lsetxattr(instance_ptr, path, name, buf, size, flags);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_lsetxattr(long mountp, String path, String name, byte[] buf, long size, int flags);
/**
* Get the stripe unit of a file.
*
* @param fd The file descriptor.
* @return The stripe unit.
*/
public int get_file_stripe_unit(int fd) {
rlock.lock();
try {
return native_ceph_get_file_stripe_unit(instance_ptr, fd);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_get_file_stripe_unit(long mountp, int fd);
/**
* Get the name of the pool a file is stored in.
*
* @param fd An open file descriptor.
* @return The pool name.
*/
public String get_file_pool_name(int fd) {
rlock.lock();
try {
return native_ceph_get_file_pool_name(instance_ptr, fd);
} finally {
rlock.unlock();
}
}
private static native String native_ceph_get_file_pool_name(long mountp, int fd);
/**
* Get the default data pool of cephfs.
*
* @return The pool name.
*/
public String get_default_data_pool_name() {
rlock.lock();
try {
return native_ceph_get_default_data_pool_name(instance_ptr);
} finally {
rlock.unlock();
}
}
private static native String native_ceph_get_default_data_pool_name(long mountp);
/**
* Get the replication of a file.
*
* @param fd The file descriptor.
* @return The file replication.
*/
public int get_file_replication(int fd) {
rlock.lock();
try {
return native_ceph_get_file_replication(instance_ptr, fd);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_get_file_replication(long mountp, int fd);
/**
* Favor reading from local replicas when possible.
*
* @param state Enable or disable localized reads.
*/
public void localize_reads(boolean state) {
rlock.lock();
try {
native_ceph_localize_reads(instance_ptr, state);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_localize_reads(long mountp, boolean on);
/**
* Get file layout stripe unit granularity.
*
* @return Stripe unit granularity.
*/
public int get_stripe_unit_granularity() {
rlock.lock();
try {
return native_ceph_get_stripe_unit_granularity(instance_ptr);
} finally {
rlock.unlock();
}
}
private static native int native_ceph_get_stripe_unit_granularity(long mountp);
/**
* Get the pool id for the named pool.
*
* @param name The pool name.
* @return The pool id.
*/
public int get_pool_id(String name) throws CephPoolException {
rlock.lock();
try {
return native_ceph_get_pool_id(instance_ptr, name);
} catch (FileNotFoundException e) {
throw new CephPoolException("pool name " + name + " not found");
} finally {
rlock.unlock();
}
}
private static native int native_ceph_get_pool_id(long mountp, String name) throws FileNotFoundException;
/**
* Get the pool replication factor.
*
* @param pool_id The pool id.
* @return Number of replicas stored in the pool.
*/
public int get_pool_replication(int pool_id) throws CephPoolException {
rlock.lock();
try {
return native_ceph_get_pool_replication(instance_ptr, pool_id);
} catch (FileNotFoundException e) {
throw new CephPoolException("pool id " + pool_id + " not found");
} finally {
rlock.unlock();
}
}
private static native int native_ceph_get_pool_replication(long mountp, int pool_id) throws FileNotFoundException;
/**
* Get file extent containing a given offset.
*
* @param fd The file descriptor.
* @param offset Offset in file.
* @return A CephFileExtent object.
*/
public CephFileExtent get_file_extent(int fd, long offset) {
rlock.lock();
try {
return native_ceph_get_file_extent_osds(instance_ptr, fd, offset);
} finally {
rlock.unlock();
}
}
private static native CephFileExtent native_ceph_get_file_extent_osds(long mountp, int fd, long offset);
/**
* Get the fully qualified CRUSH location of an OSD.
*
* Returns (type, name) string pairs for each device in the CRUSH bucket
* hierarchy starting from the given OSD to the root.
*
* @param osd The OSD device id.
* @return List of pairs.
*/
public Bucket[] get_osd_crush_location(int osd) {
rlock.lock();
try {
String[] parts = native_ceph_get_osd_crush_location(instance_ptr, osd);
Bucket[] path = new Bucket[parts.length / 2];
for (int i = 0; i < path.length; i++)
path[i] = new Bucket(parts[i*2], parts[i*2+1]);
return path;
} finally {
rlock.unlock();
}
}
private static native String[] native_ceph_get_osd_crush_location(long mountp, int osd);
/**
* Get the network address of an OSD.
*
* @param osd The OSD device id.
* @return The network address.
*/
public InetAddress get_osd_address(int osd) {
rlock.lock();
try {
return native_ceph_get_osd_addr(instance_ptr, osd);
} finally {
rlock.unlock();
}
}
private static native InetAddress native_ceph_get_osd_addr(long mountp, int osd);
}
| 29,477 | 25.701087 | 123 |
java
|
null |
ceph-main/src/java/java/com/ceph/fs/CephNativeLoader.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
class CephNativeLoader {
private static final CephNativeLoader instance = new CephNativeLoader();
private static boolean initialized = false;
private static final String JNI_PATH_ENV_VAR = "CEPH_JNI_PATH";
private static final String LIBRARY_NAME = "cephfs_jni";
private static final String LIBRARY_FILE = "libcephfs_jni.so";
private CephNativeLoader() {}
public static CephNativeLoader getInstance() {
return instance;
}
public synchronized void loadLibrary() {
if (initialized)
return;
boolean success = false;
/*
* Allow a Ceph specific environment variable to force
* the loading path.
*/
String path = System.getenv(JNI_PATH_ENV_VAR);
try {
if (path != null) {
System.out.println("Loading libcephfs-jni: " + path);
System.load(path);
success = true;
} else {
try {
/*
* Try default Java loading path(s)
*/
System.out.println("Loading libcephfs-jni from default path: " +
System.getProperty("java.library.path"));
System.loadLibrary(LIBRARY_NAME);
success = true;
} catch (final UnsatisfiedLinkError ule1) {
try {
/*
* Try RHEL/CentOS default path
*/
path = "/usr/lib64/" + LIBRARY_FILE;
System.out.println("Loading libcephfs-jni: " + path);
System.load(path);
success = true;
} catch (final UnsatisfiedLinkError ule2) {
/*
* Try Ubuntu default path
*/
path = "/usr/lib/jni/" + LIBRARY_FILE;
System.out.println("Loading libcephfs-jni: " + path);
System.load(path);
success = true;
}
}
}
} finally {
System.out.println("Loading libcephfs-jni: " +
(success ? "Success!" : "Failure!"));
}
/*
* Finish initialization
*/
CephMount.native_initialize();
initialized = true;
}
}
| 3,139 | 32.404255 | 78 |
java
|
null |
ceph-main/src/java/java/com/ceph/fs/CephNotDirectoryException.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
import java.io.IOException;
/**
* Component of path is not a directory.
*/
public class CephNotDirectoryException extends IOException {
private static final long serialVersionUID = 1L;
/**
* Construct CephNotDirectoryException.
*/
public CephNotDirectoryException() {
super();
}
/**
* Construct CephNotDirectoryException with message.
*/
public CephNotDirectoryException(String s) {
super(s);
}
}
| 1,538 | 33.2 | 78 |
java
|
null |
ceph-main/src/java/java/com/ceph/fs/CephNotMountedException.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
import java.io.IOException;
/**
* Ceph is not mounted.
*/
public class CephNotMountedException extends IOException {
private static final long serialVersionUID = 1L;
/**
* Construct CephNotMountedException.
*/
public CephNotMountedException() {
super();
}
/**
* Construct CephNotMountedException with message.
*/
public CephNotMountedException(String s) {
super(s);
}
}
| 1,511 | 32.6 | 78 |
java
|
null |
ceph-main/src/java/java/com/ceph/fs/CephPoolException.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
import java.io.IOException;
/**
* Exception related to Ceph pool.
*/
public class CephPoolException extends IOException {
private static final long serialVersionUID = 1L;
/**
* Construct CephPoolException.
*/
public CephPoolException() {
super();
}
/**
* Construct CephPoolException with message.
*/
public CephPoolException(String s) {
super(s);
}
}
| 1,492 | 32.177778 | 78 |
java
|
null |
ceph-main/src/java/java/com/ceph/fs/CephStat.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
/**
* Holds struct stat fields.
*/
public class CephStat {
/* Set from native */
private boolean is_file; /* S_ISREG */
private boolean is_directory; /* S_ISDIR */
private boolean is_symlink; /* S_ISLNK */
public int mode;
public int uid;
public int gid;
public long size;
public long blksize;
public long blocks;
public long a_time;
public long m_time;
public boolean isFile() {
return is_file;
}
public boolean isDir() {
return is_directory;
}
public boolean isSymlink() {
return is_symlink;
}
}
| 1,665 | 29.851852 | 78 |
java
|
null |
ceph-main/src/java/java/com/ceph/fs/CephStatVFS.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
/**
* Holds struct statvfs fields.
*/
public class CephStatVFS {
public long bsize;
public long frsize;
public long blocks;
public long bavail;
public long files;
public long fsid;
public long namemax;
}
| 1,321 | 37.882353 | 78 |
java
|
null |
ceph-main/src/java/native/JniConstants.cpp
|
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "JniConstants.h"
#include "ScopedLocalRef.h"
#include <stdlib.h>
jclass JniConstants::inet6AddressClass;
jclass JniConstants::inetAddressClass;
jclass JniConstants::inetSocketAddressClass;
jclass JniConstants::stringClass;
static jclass findClass(JNIEnv* env, const char* name) {
ScopedLocalRef<jclass> localClass(env, env->FindClass(name));
jclass result = reinterpret_cast<jclass>(env->NewGlobalRef(localClass.get()));
if (result == NULL) {
fprintf(stderr, "failed to find class '%s'", name);
abort();
}
return result;
}
void JniConstants::init(JNIEnv* env) {
inet6AddressClass = findClass(env, "java/net/Inet6Address");
inetAddressClass = findClass(env, "java/net/InetAddress");
inetSocketAddressClass = findClass(env, "java/net/InetSocketAddress");
stringClass = findClass(env, "java/lang/String");
}
| 1,491 | 33.697674 | 82 |
cpp
|
null |
ceph-main/src/java/native/JniConstants.h
|
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef JNI_CONSTANTS_H_included
#define JNI_CONSTANTS_H_included
#include <jni.h>
/**
* A cache to avoid calling FindClass at runtime.
*
* Class lookup is relatively expensive (2.5us on passion-eng at the time of writing), so we do
* all such lookups eagerly at VM startup. This means that code that never uses, say,
* java.util.zip.Deflater still has to pay for the lookup, but it means that on a device the cost
* is definitely paid during boot and amortized. A central cache also removes the temptation to
* dynamically call FindClass rather than add a small cache to each file that needs one. Another
* cost is that each class cached here requires a global reference, though in practice we save
* enough by not having a global reference for each file that uses a class such as java.lang.String
* which is used in several files.
*
* FindClass is still called in a couple of situations: when throwing exceptions, and in some of
* the serialization code. The former is clearly not a performance case, and we're currently
* assuming that neither is the latter.
*
* TODO: similar arguments hold for field and method IDs; we should cache them centrally too.
*/
struct JniConstants {
static void init(JNIEnv* env);
static jclass inet6AddressClass;
static jclass inetAddressClass;
static jclass inetSocketAddressClass;
static jclass stringClass;
};
#define NATIVE_METHOD(className, functionName, signature) \
{ #functionName, signature, reinterpret_cast<void*>(className ## _ ## functionName) }
#endif // JNI_CONSTANTS_H_included
| 2,195 | 40.433962 | 99 |
h
|
null |
ceph-main/src/java/native/ScopedLocalRef.h
|
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SCOPED_LOCAL_REF_H_included
#define SCOPED_LOCAL_REF_H_included
#include "jni.h"
#include <stddef.h>
// A smart pointer that deletes a JNI local reference when it goes out of scope.
template<typename T>
class ScopedLocalRef {
public:
ScopedLocalRef(JNIEnv* env, T localRef) : mEnv(env), mLocalRef(localRef) {
}
~ScopedLocalRef() {
reset();
}
void reset(T ptr = NULL) {
if (ptr != mLocalRef) {
if (mLocalRef != NULL) {
mEnv->DeleteLocalRef(mLocalRef);
}
mLocalRef = ptr;
}
}
T release() __attribute__((warn_unused_result)) {
T localRef = mLocalRef;
mLocalRef = NULL;
return localRef;
}
T get() const {
return mLocalRef;
}
private:
JNIEnv* mEnv;
T mLocalRef;
// Disallow copy and assignment.
ScopedLocalRef(const ScopedLocalRef&);
void operator=(const ScopedLocalRef&);
};
#endif // SCOPED_LOCAL_REF_H_included
| 1,617 | 24.28125 | 80 |
h
|
null |
ceph-main/src/java/native/libcephfs_jni.cc
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/un.h>
#include <jni.h>
#include "ScopedLocalRef.h"
#include "JniConstants.h"
#include "include/cephfs/libcephfs.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_javaclient
#include "com_ceph_fs_CephMount.h"
#define CEPH_STAT_CP "com/ceph/fs/CephStat"
#define CEPH_STAT_VFS_CP "com/ceph/fs/CephStatVFS"
#define CEPH_FILE_EXTENT_CP "com/ceph/fs/CephFileExtent"
#define CEPH_MOUNT_CP "com/ceph/fs/CephMount"
#define CEPH_NOTMOUNTED_CP "com/ceph/fs/CephNotMountedException"
#define CEPH_FILEEXISTS_CP "com/ceph/fs/CephFileAlreadyExistsException"
#define CEPH_ALREADYMOUNTED_CP "com/ceph/fs/CephAlreadyMountedException"
#define CEPH_NOTDIR_CP "com/ceph/fs/CephNotDirectoryException"
/*
* Flags to open(). must be synchronized with CephMount.java
*
* There are two versions of flags: the version in Java and the version in the
* target library (e.g. libc or libcephfs). We control the Java values and map
* to the target value with fixup_* functions below. This is much faster than
* keeping the values in Java and making a cross-JNI up-call to retrieve them,
* and makes it easy to keep any platform specific value changes in this file.
*/
#define JAVA_O_RDONLY 1
#define JAVA_O_RDWR 2
#define JAVA_O_APPEND 4
#define JAVA_O_CREAT 8
#define JAVA_O_TRUNC 16
#define JAVA_O_EXCL 32
#define JAVA_O_WRONLY 64
#define JAVA_O_DIRECTORY 128
/*
* Whence flags for seek(). sync with CephMount.java if changed.
*
* Mapping of SEEK_* done in seek function.
*/
#define JAVA_SEEK_SET 1
#define JAVA_SEEK_CUR 2
#define JAVA_SEEK_END 3
/*
* File attribute flags. sync with CephMount.java if changed.
*/
#define JAVA_SETATTR_MODE 1
#define JAVA_SETATTR_UID 2
#define JAVA_SETATTR_GID 4
#define JAVA_SETATTR_MTIME 8
#define JAVA_SETATTR_ATIME 16
/*
* Setxattr flags. sync with CephMount.java if changed.
*/
#define JAVA_XATTR_CREATE 1
#define JAVA_XATTR_REPLACE 2
#define JAVA_XATTR_NONE 3
/*
* flock flags. sync with CephMount.java if changed.
*/
#define JAVA_LOCK_SH 1
#define JAVA_LOCK_EX 2
#define JAVA_LOCK_NB 4
#define JAVA_LOCK_UN 8
using namespace std;
/* Map JAVA_O_* open flags to values in libc */
static inline int fixup_open_flags(jint jflags)
{
int ret = 0;
#define FIXUP_OPEN_FLAG(name) \
if (jflags & JAVA_##name) \
ret |= name;
FIXUP_OPEN_FLAG(O_RDONLY)
FIXUP_OPEN_FLAG(O_RDWR)
FIXUP_OPEN_FLAG(O_APPEND)
FIXUP_OPEN_FLAG(O_CREAT)
FIXUP_OPEN_FLAG(O_TRUNC)
FIXUP_OPEN_FLAG(O_EXCL)
FIXUP_OPEN_FLAG(O_WRONLY)
FIXUP_OPEN_FLAG(O_DIRECTORY)
#undef FIXUP_OPEN_FLAG
return ret;
}
/* Map JAVA_SETATTR_* to values in ceph lib */
static inline int fixup_attr_mask(jint jmask)
{
int mask = 0;
#define FIXUP_ATTR_MASK(name) \
if (jmask & JAVA_##name) \
mask |= CEPH_##name;
FIXUP_ATTR_MASK(SETATTR_MODE)
FIXUP_ATTR_MASK(SETATTR_UID)
FIXUP_ATTR_MASK(SETATTR_GID)
FIXUP_ATTR_MASK(SETATTR_MTIME)
FIXUP_ATTR_MASK(SETATTR_ATIME)
#undef FIXUP_ATTR_MASK
return mask;
}
/* Cached field IDs for com.ceph.fs.CephStat */
static jfieldID cephstat_mode_fid;
static jfieldID cephstat_uid_fid;
static jfieldID cephstat_gid_fid;
static jfieldID cephstat_size_fid;
static jfieldID cephstat_blksize_fid;
static jfieldID cephstat_blocks_fid;
static jfieldID cephstat_a_time_fid;
static jfieldID cephstat_m_time_fid;
static jfieldID cephstat_is_file_fid;
static jfieldID cephstat_is_directory_fid;
static jfieldID cephstat_is_symlink_fid;
/* Cached field IDs for com.ceph.fs.CephStatVFS */
static jfieldID cephstatvfs_bsize_fid;
static jfieldID cephstatvfs_frsize_fid;
static jfieldID cephstatvfs_blocks_fid;
static jfieldID cephstatvfs_bavail_fid;
static jfieldID cephstatvfs_files_fid;
static jfieldID cephstatvfs_fsid_fid;
static jfieldID cephstatvfs_namemax_fid;
/* Cached field IDs for com.ceph.fs.CephMount */
static jfieldID cephmount_instance_ptr_fid;
/* Cached field IDs for com.ceph.fs.CephFileExtent */
static jclass cephfileextent_cls;
static jmethodID cephfileextent_ctor_fid;
/*
* Exception throwing helper. Adapted from Apache Hadoop header
* org_apache_hadoop.h by adding the do {} while (0) construct.
*/
#define THROW(env, exception_name, message) \
do { \
jclass ecls = env->FindClass(exception_name); \
if (ecls) { \
int ret = env->ThrowNew(ecls, message); \
if (ret < 0) { \
printf("(CephFS) Fatal Error\n"); \
} \
env->DeleteLocalRef(ecls); \
} \
} while (0)
static void cephThrowNullArg(JNIEnv *env, const char *msg)
{
THROW(env, "java/lang/NullPointerException", msg);
}
static void cephThrowOutOfMemory(JNIEnv *env, const char *msg)
{
THROW(env, "java/lang/OutOfMemoryError", msg);
}
static void cephThrowInternal(JNIEnv *env, const char *msg)
{
THROW(env, "java/lang/InternalError", msg);
}
static void cephThrowIndexBounds(JNIEnv *env, const char *msg)
{
THROW(env, "java/lang/IndexOutOfBoundsException", msg);
}
static void cephThrowIllegalArg(JNIEnv *env, const char *msg)
{
THROW(env, "java/lang/IllegalArgumentException", msg);
}
static void cephThrowFNF(JNIEnv *env, const char *msg)
{
THROW(env, "java/io/FileNotFoundException", msg);
}
static void cephThrowFileExists(JNIEnv *env, const char *msg)
{
THROW(env, CEPH_FILEEXISTS_CP, msg);
}
static void cephThrowNotDir(JNIEnv *env, const char *msg)
{
THROW(env, CEPH_NOTDIR_CP, msg);
}
static void handle_error(JNIEnv *env, int rc)
{
switch (rc) {
case -ENOENT:
cephThrowFNF(env, "");
return;
case -EEXIST:
cephThrowFileExists(env, "");
return;
case -ENOTDIR:
cephThrowNotDir(env, "");
return;
default:
break;
}
THROW(env, "java/io/IOException", strerror(-rc));
}
#define CHECK_ARG_NULL(v, m, r) do { \
if (!(v)) { \
cephThrowNullArg(env, (m)); \
return (r); \
} } while (0)
#define CHECK_ARG_BOUNDS(c, m, r) do { \
if ((c)) { \
cephThrowIndexBounds(env, (m)); \
return (r); \
} } while (0)
#define CHECK_MOUNTED(_c, _r) do { \
if (!ceph_is_mounted((_c))) { \
THROW(env, CEPH_NOTMOUNTED_CP, "not mounted"); \
return (_r); \
} } while (0)
/*
* Cast a jlong to ceph_mount_info. Each JNI function is expected to pass in
* the class instance variable instance_ptr. Passing a parameter is faster
* than reaching back into Java via an upcall to retrieve this pointer.
*/
static inline struct ceph_mount_info *get_ceph_mount(jlong j_mntp)
{
return (struct ceph_mount_info *)j_mntp;
}
/*
* Setup cached field IDs
*/
static void setup_field_ids(JNIEnv *env, jclass clz)
{
jclass cephstat_cls;
jclass cephstatvfs_cls;
jclass tmp_cephfileextent_cls;
/*
* Get a fieldID from a class with a specific type
*
* clz: jclass
* field: field in clz
* type: integer, long, etc..
*
* This macro assumes some naming convention that is used
* only in this file:
*
* GETFID(cephstat, mode, I) gets translated into
* cephstat_mode_fid = env->GetFieldID(cephstat_cls, "mode", "I");
*/
#define GETFID(clz, field, type) do { \
clz ## _ ## field ## _fid = env->GetFieldID(clz ## _cls, #field, #type); \
if ( ! clz ## _ ## field ## _fid ) \
return; \
} while (0)
/* Cache CephStat fields */
cephstat_cls = env->FindClass(CEPH_STAT_CP);
if (!cephstat_cls)
return;
GETFID(cephstat, mode, I);
GETFID(cephstat, uid, I);
GETFID(cephstat, gid, I);
GETFID(cephstat, size, J);
GETFID(cephstat, blksize, J);
GETFID(cephstat, blocks, J);
GETFID(cephstat, a_time, J);
GETFID(cephstat, m_time, J);
GETFID(cephstat, is_file, Z);
GETFID(cephstat, is_directory, Z);
GETFID(cephstat, is_symlink, Z);
/* Cache CephStatVFS fields */
cephstatvfs_cls = env->FindClass(CEPH_STAT_VFS_CP);
if (!cephstatvfs_cls)
return;
GETFID(cephstatvfs, bsize, J);
GETFID(cephstatvfs, frsize, J);
GETFID(cephstatvfs, blocks, J);
GETFID(cephstatvfs, bavail, J);
GETFID(cephstatvfs, files, J);
GETFID(cephstatvfs, fsid, J);
GETFID(cephstatvfs, namemax, J);
/* Cache CephFileExtent fields */
tmp_cephfileextent_cls = env->FindClass(CEPH_FILE_EXTENT_CP);
if (!tmp_cephfileextent_cls)
return;
cephfileextent_cls = (jclass)env->NewGlobalRef(tmp_cephfileextent_cls);
env->DeleteLocalRef(tmp_cephfileextent_cls);
cephfileextent_ctor_fid = env->GetMethodID(cephfileextent_cls, "<init>", "(JJ[I)V");
if (!cephfileextent_ctor_fid)
return;
JniConstants::init(env);
#undef GETFID
cephmount_instance_ptr_fid = env->GetFieldID(clz, "instance_ptr", "J");
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_initialize
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_com_ceph_fs_CephMount_native_1initialize
(JNIEnv *env, jclass clz)
{
setup_field_ids(env, clz);
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_create
* Signature: (Lcom/ceph/fs/CephMount;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1create
(JNIEnv *env, jclass clz, jobject j_cephmount, jstring j_id)
{
struct ceph_mount_info *cmount;
const char *c_id = NULL;
int ret;
CHECK_ARG_NULL(j_cephmount, "@mount is null", -1);
if (j_id) {
c_id = env->GetStringUTFChars(j_id, NULL);
if (!c_id) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
}
ret = ceph_create(&cmount, c_id);
if (c_id)
env->ReleaseStringUTFChars(j_id, c_id);
if (ret) {
THROW(env, "java/lang/RuntimeException", "failed to create Ceph mount object");
return ret;
}
env->SetLongField(j_cephmount, cephmount_instance_ptr_fid, (long)cmount);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_mount
* Signature: (JLjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1mount
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_root)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_root = NULL;
int ret;
/*
* Toss a message up if we are already mounted.
*/
if (ceph_is_mounted(cmount)) {
THROW(env, CEPH_ALREADYMOUNTED_CP, "");
return -1;
}
if (j_root) {
c_root = env->GetStringUTFChars(j_root, NULL);
if (!c_root) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
}
ldout(cct, 10) << "jni: ceph_mount: " << (c_root ? c_root : "<NULL>") << dendl;
ret = ceph_mount(cmount, c_root);
ldout(cct, 10) << "jni: ceph_mount: exit ret " << ret << dendl;
if (c_root)
env->ReleaseStringUTFChars(j_root, c_root);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_unmount
* Signature: (J)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1unmount
(JNIEnv *env, jclass clz, jlong j_mntp)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
ldout(cct, 10) << "jni: ceph_unmount enter" << dendl;
CHECK_MOUNTED(cmount, -1);
ret = ceph_unmount(cmount);
ldout(cct, 10) << "jni: ceph_unmount exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_release
* Signature: (J)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1release
(JNIEnv *env, jclass clz, jlong j_mntp)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
ldout(cct, 10) << "jni: ceph_release called" << dendl;
ret = ceph_release(cmount);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_conf_set
* Signature: (JLjava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1conf_1set
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_opt, jstring j_val)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_opt, *c_val;
int ret;
CHECK_ARG_NULL(j_opt, "@option is null", -1);
CHECK_ARG_NULL(j_val, "@value is null", -1);
c_opt = env->GetStringUTFChars(j_opt, NULL);
if (!c_opt) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
c_val = env->GetStringUTFChars(j_val, NULL);
if (!c_val) {
env->ReleaseStringUTFChars(j_opt, c_opt);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: conf_set: opt " << c_opt << " val " << c_val << dendl;
ret = ceph_conf_set(cmount, c_opt, c_val);
ldout(cct, 10) << "jni: conf_set: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_opt, c_opt);
env->ReleaseStringUTFChars(j_val, c_val);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_conf_get
* Signature: (JLjava/lang/String;)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1conf_1get
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_opt)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_opt;
jstring value = NULL;
int ret, buflen;
char *buf;
CHECK_ARG_NULL(j_opt, "@option is null", NULL);
c_opt = env->GetStringUTFChars(j_opt, NULL);
if (!c_opt) {
cephThrowInternal(env, "failed to pin memory");
return NULL;
}
buflen = 128;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "head allocation failed");
goto out;
}
while (1) {
memset(buf, 0, sizeof(char)*buflen);
ldout(cct, 10) << "jni: conf_get: opt " << c_opt << " len " << buflen << dendl;
ret = ceph_conf_get(cmount, c_opt, buf, buflen);
if (ret == -ENAMETOOLONG) {
buflen *= 2;
delete [] buf;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "head allocation failed");
goto out;
}
} else
break;
}
ldout(cct, 10) << "jni: conf_get: ret " << ret << dendl;
if (ret == 0)
value = env->NewStringUTF(buf);
else if (ret != -ENOENT)
handle_error(env, ret);
delete [] buf;
out:
env->ReleaseStringUTFChars(j_opt, c_opt);
return value;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_conf_read_file
* Signature: (JLjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1conf_1read_1file
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: conf_read_file: path " << c_path << dendl;
ret = ceph_conf_read_file(cmount, c_path);
ldout(cct, 10) << "jni: conf_read_file: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_statfs
* Signature: (JLjava/lang/String;Lcom/ceph/fs/CephStatVFS;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1statfs
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jobject j_cephstatvfs)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
struct statvfs st;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_cephstatvfs, "@stat is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: statfs: path " << c_path << dendl;
ret = ceph_statfs(cmount, c_path, &st);
ldout(cct, 10) << "jni: statfs: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret) {
handle_error(env, ret);
return ret;
}
env->SetLongField(j_cephstatvfs, cephstatvfs_bsize_fid, st.f_bsize);
env->SetLongField(j_cephstatvfs, cephstatvfs_frsize_fid, st.f_frsize);
env->SetLongField(j_cephstatvfs, cephstatvfs_blocks_fid, st.f_blocks);
env->SetLongField(j_cephstatvfs, cephstatvfs_bavail_fid, st.f_bavail);
env->SetLongField(j_cephstatvfs, cephstatvfs_files_fid, st.f_files);
env->SetLongField(j_cephstatvfs, cephstatvfs_fsid_fid, st.f_fsid);
env->SetLongField(j_cephstatvfs, cephstatvfs_namemax_fid, st.f_namemax);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_getcwd
* Signature: (J)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1getcwd
(JNIEnv *env, jclass clz, jlong j_mntp)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_cwd;
CHECK_MOUNTED(cmount, NULL);
ldout(cct, 10) << "jni: getcwd: enter" << dendl;
c_cwd = ceph_getcwd(cmount);
if (!c_cwd) {
cephThrowOutOfMemory(env, "ceph_getcwd");
return NULL;
}
ldout(cct, 10) << "jni: getcwd: exit ret " << c_cwd << dendl;
return env->NewStringUTF(c_cwd);
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_chdir
* Signature: (JLjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1chdir
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: chdir: path " << c_path << dendl;
ret = ceph_chdir(cmount, c_path);
ldout(cct, 10) << "jni: chdir: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_listdir
* Signature: (JLjava/lang/String;)[Ljava/lang/String;
*/
JNIEXPORT jobjectArray JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1listdir
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
struct ceph_dir_result *dirp;
list<string>::iterator it;
list<string> contents;
const char *c_path;
jobjectArray dirlist;
string *ent;
int ret, buflen, bufpos, i;
jstring name;
char *buf;
CHECK_ARG_NULL(j_path, "@path is null", NULL);
CHECK_MOUNTED(cmount, NULL);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return NULL;
}
ldout(cct, 10) << "jni: listdir: opendir: path " << c_path << dendl;
/* ret < 0 also includes -ENOTDIR which should return NULL */
ret = ceph_opendir(cmount, c_path, &dirp);
if (ret) {
env->ReleaseStringUTFChars(j_path, c_path);
handle_error(env, ret);
return NULL;
}
ldout(cct, 10) << "jni: listdir: opendir: exit ret " << ret << dendl;
/* buffer for ceph_getdnames() results */
buflen = 256;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
while (1) {
ldout(cct, 10) << "jni: listdir: getdnames: enter" << dendl;
ret = ceph_getdnames(cmount, dirp, buf, buflen);
if (ret == -ERANGE) {
delete [] buf;
buflen *= 2;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
continue;
}
ldout(cct, 10) << "jni: listdir: getdnames: exit ret " << ret << dendl;
if (ret <= 0)
break;
/* got at least one name */
bufpos = 0;
while (bufpos < ret) {
ent = new (std::nothrow) string(buf + bufpos);
if (!ent) {
delete [] buf;
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
/* filter out dot files: xref: java.io.File::list() */
if (ent->compare(".") && ent->compare("..")) {
contents.push_back(*ent);
ldout(cct, 20) << "jni: listdir: take path " << *ent << dendl;
}
bufpos += ent->size() + 1;
delete ent;
}
}
delete [] buf;
if (ret < 0) {
handle_error(env, ret);
goto out;
}
/* directory list */
dirlist = env->NewObjectArray(contents.size(), env->FindClass("java/lang/String"), NULL);
if (!dirlist)
goto out;
/*
* Fill directory listing array.
*
* FIXME: how should a partially filled array be cleaned-up properly?
*/
for (i = 0, it = contents.begin(); it != contents.end(); ++it) {
name = env->NewStringUTF(it->c_str());
if (!name)
goto out;
env->SetObjectArrayElement(dirlist, i++, name);
if (env->ExceptionOccurred())
goto out;
env->DeleteLocalRef(name);
}
env->ReleaseStringUTFChars(j_path, c_path);
ceph_closedir(cmount, dirp);
return dirlist;
out:
env->ReleaseStringUTFChars(j_path, c_path);
ceph_closedir(cmount, dirp);
return NULL;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_link
* Signature: (JLjava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1link
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_oldpath, jstring j_newpath)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_oldpath, *c_newpath;
int ret;
CHECK_ARG_NULL(j_oldpath, "@oldpath is null", -1);
CHECK_ARG_NULL(j_newpath, "@newpath is null", -1);
CHECK_MOUNTED(cmount, -1);
c_oldpath = env->GetStringUTFChars(j_oldpath, NULL);
if (!c_oldpath) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
c_newpath = env->GetStringUTFChars(j_newpath, NULL);
if (!c_newpath) {
env->ReleaseStringUTFChars(j_oldpath, c_oldpath);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: link: oldpath " << c_oldpath <<
" newpath " << c_newpath << dendl;
ret = ceph_link(cmount, c_oldpath, c_newpath);
ldout(cct, 10) << "jni: link: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_oldpath, c_oldpath);
env->ReleaseStringUTFChars(j_newpath, c_newpath);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_unlink
* Signature: (JLjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1unlink
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: unlink: path " << c_path << dendl;
ret = ceph_unlink(cmount, c_path);
ldout(cct, 10) << "jni: unlink: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_rename
* Signature: (JLjava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1rename
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_from, jstring j_to)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_from, *c_to;
int ret;
CHECK_ARG_NULL(j_from, "@from is null", -1);
CHECK_ARG_NULL(j_to, "@to is null", -1);
CHECK_MOUNTED(cmount, -1);
c_from = env->GetStringUTFChars(j_from, NULL);
if (!c_from) {
cephThrowInternal(env, "Failed to pin memory!");
return -1;
}
c_to = env->GetStringUTFChars(j_to, NULL);
if (!c_to) {
env->ReleaseStringUTFChars(j_from, c_from);
cephThrowInternal(env, "Failed to pin memory.");
return -1;
}
ldout(cct, 10) << "jni: rename: from " << c_from << " to " << c_to << dendl;
ret = ceph_rename(cmount, c_from, c_to);
ldout(cct, 10) << "jni: rename: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_from, c_from);
env->ReleaseStringUTFChars(j_to, c_to);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_mkdir
* Signature: (JLjava/lang/String;I)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1mkdir
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jint j_mode)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: mkdir: path " << c_path << " mode " << (int)j_mode << dendl;
ret = ceph_mkdir(cmount, c_path, (int)j_mode);
ldout(cct, 10) << "jni: mkdir: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_mkdirs
* Signature: (JLjava/lang/String;I)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1mkdirs
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jint j_mode)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: mkdirs: path " << c_path << " mode " << (int)j_mode << dendl;
ret = ceph_mkdirs(cmount, c_path, (int)j_mode);
ldout(cct, 10) << "jni: mkdirs: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_rmdir
* Signature: (JLjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1rmdir
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: rmdir: path " << c_path << dendl;
ret = ceph_rmdir(cmount, c_path);
ldout(cct, 10) << "jni: rmdir: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_readlink
* Signature: (JLjava/lang/String;)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1readlink
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
char *linkname;
struct ceph_statx stx;
jstring j_linkname;
CHECK_ARG_NULL(j_path, "@path is null", NULL);
CHECK_MOUNTED(cmount, NULL);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return NULL;
}
for (;;) {
ldout(cct, 10) << "jni: readlink: lstatx " << c_path << dendl;
int ret = ceph_statx(cmount, c_path, &stx, CEPH_STATX_SIZE,
AT_SYMLINK_NOFOLLOW);
ldout(cct, 10) << "jni: readlink: lstat exit ret " << ret << dendl;
if (ret) {
env->ReleaseStringUTFChars(j_path, c_path);
handle_error(env, ret);
return NULL;
}
linkname = new (std::nothrow) char[stx.stx_size + 1];
if (!linkname) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowOutOfMemory(env, "head allocation failed");
return NULL;
}
ldout(cct, 10) << "jni: readlink: size " << stx.stx_size << " path " << c_path << dendl;
ret = ceph_readlink(cmount, c_path, linkname, stx.stx_size + 1);
ldout(cct, 10) << "jni: readlink: exit ret " << ret << dendl;
if (ret < 0) {
delete [] linkname;
env->ReleaseStringUTFChars(j_path, c_path);
handle_error(env, ret);
return NULL;
}
/* re-stat and try again */
if (ret > (int)stx.stx_size) {
delete [] linkname;
continue;
}
linkname[ret] = '\0';
break;
}
env->ReleaseStringUTFChars(j_path, c_path);
j_linkname = env->NewStringUTF(linkname);
delete [] linkname;
return j_linkname;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_symlink
* Signature: (JLjava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1symlink
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_oldpath, jstring j_newpath)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_oldpath, *c_newpath;
int ret;
CHECK_ARG_NULL(j_oldpath, "@oldpath is null", -1);
CHECK_ARG_NULL(j_newpath, "@newpath is null", -1);
CHECK_MOUNTED(cmount, -1);
c_oldpath = env->GetStringUTFChars(j_oldpath, NULL);
if (!c_oldpath) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
c_newpath = env->GetStringUTFChars(j_newpath, NULL);
if (!c_newpath) {
env->ReleaseStringUTFChars(j_oldpath, c_oldpath);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: symlink: oldpath " << c_oldpath <<
" newpath " << c_newpath << dendl;
ret = ceph_symlink(cmount, c_oldpath, c_newpath);
ldout(cct, 10) << "jni: symlink: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_oldpath, c_oldpath);
env->ReleaseStringUTFChars(j_newpath, c_newpath);
if (ret)
handle_error(env, ret);
return ret;
}
#define CEPH_J_CEPHSTAT_MASK (CEPH_STATX_UID|CEPH_STATX_GID|CEPH_STATX_SIZE|CEPH_STATX_BLOCKS|CEPH_STATX_MTIME|CEPH_STATX_ATIME)
static void fill_cephstat(JNIEnv *env, jobject j_cephstat, struct ceph_statx *stx)
{
env->SetIntField(j_cephstat, cephstat_mode_fid, stx->stx_mode);
env->SetIntField(j_cephstat, cephstat_uid_fid, stx->stx_uid);
env->SetIntField(j_cephstat, cephstat_gid_fid, stx->stx_gid);
env->SetLongField(j_cephstat, cephstat_size_fid, stx->stx_size);
env->SetLongField(j_cephstat, cephstat_blksize_fid, stx->stx_blksize);
env->SetLongField(j_cephstat, cephstat_blocks_fid, stx->stx_blocks);
long long time = stx->stx_mtime.tv_sec;
time *= 1000;
time += stx->stx_mtime.tv_nsec / 1000000;
env->SetLongField(j_cephstat, cephstat_m_time_fid, time);
time = stx->stx_atime.tv_sec;
time *= 1000;
time += stx->stx_atime.tv_nsec / 1000000;
env->SetLongField(j_cephstat, cephstat_a_time_fid, time);
env->SetBooleanField(j_cephstat, cephstat_is_file_fid,
S_ISREG(stx->stx_mode) ? JNI_TRUE : JNI_FALSE);
env->SetBooleanField(j_cephstat, cephstat_is_directory_fid,
S_ISDIR(stx->stx_mode) ? JNI_TRUE : JNI_FALSE);
env->SetBooleanField(j_cephstat, cephstat_is_symlink_fid,
S_ISLNK(stx->stx_mode) ? JNI_TRUE : JNI_FALSE);
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_lstat
* Signature: (JLjava/lang/String;Lcom/ceph/fs/CephStat;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1lstat
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jobject j_cephstat)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
struct ceph_statx stx;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_cephstat, "@stat is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: lstat: path " << c_path << dendl;
ret = ceph_statx(cmount, c_path, &stx, CEPH_J_CEPHSTAT_MASK, AT_SYMLINK_NOFOLLOW);
ldout(cct, 10) << "jni: lstat exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret) {
handle_error(env, ret);
return ret;
}
fill_cephstat(env, j_cephstat, &stx);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_stat
* Signature: (JLjava/lang/String;Lcom/ceph/fs/CephStat;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1stat
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jobject j_cephstat)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
struct ceph_statx stx;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_cephstat, "@stat is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: stat: path " << c_path << dendl;
ret = ceph_statx(cmount, c_path, &stx, CEPH_J_CEPHSTAT_MASK, 0);
ldout(cct, 10) << "jni: stat exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret) {
handle_error(env, ret);
return ret;
}
fill_cephstat(env, j_cephstat, &stx);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_setattr
* Signature: (JLjava/lang/String;Lcom/ceph/fs/CephStat;I)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1setattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jobject j_cephstat, jint j_mask)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
struct ceph_statx stx;
int ret, mask = fixup_attr_mask(j_mask);
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_cephstat, "@stat is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
memset(&stx, 0, sizeof(stx));
stx.stx_mode = env->GetIntField(j_cephstat, cephstat_mode_fid);
stx.stx_uid = env->GetIntField(j_cephstat, cephstat_uid_fid);
stx.stx_gid = env->GetIntField(j_cephstat, cephstat_gid_fid);
long mtime_msec = env->GetLongField(j_cephstat, cephstat_m_time_fid);
long atime_msec = env->GetLongField(j_cephstat, cephstat_a_time_fid);
stx.stx_mtime.tv_sec = mtime_msec / 1000;
stx.stx_mtime.tv_nsec = (mtime_msec % 1000) * 1000000;
stx.stx_atime.tv_sec = atime_msec / 1000;
stx.stx_atime.tv_nsec = (atime_msec % 1000) * 1000000;
ldout(cct, 10) << "jni: setattr: path " << c_path << " mask " << mask << dendl;
ret = ceph_setattrx(cmount, c_path, &stx, mask, 0);
ldout(cct, 10) << "jni: setattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_chmod
* Signature: (JLjava/lang/String;I)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1chmod
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jint j_mode)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: chmod: path " << c_path << " mode " << (int)j_mode << dendl;
ret = ceph_chmod(cmount, c_path, (int)j_mode);
ldout(cct, 10) << "jni: chmod: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_fchmod
* Signature: (JII)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1fchmod
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jint j_mode)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: fchmod: fd " << (int)j_fd << " mode " << (int)j_mode << dendl;
ret = ceph_fchmod(cmount, (int)j_fd, (int)j_mode);
ldout(cct, 10) << "jni: fchmod: exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_truncate
* Signature: (JLjava/lang/String;J)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1truncate
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jlong j_size)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: truncate: path " << c_path << " size " << (loff_t)j_size << dendl;
ret = ceph_truncate(cmount, c_path, (loff_t)j_size);
ldout(cct, 10) << "jni: truncate: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_open
* Signature: (JLjava/lang/String;II)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1open
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jint j_flags, jint j_mode)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret, flags = fixup_open_flags(j_flags);
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: open: path " << c_path << " flags " << flags
<< " mode " << (int)j_mode << dendl;
ret = ceph_open(cmount, c_path, flags, (int)j_mode);
ldout(cct, 10) << "jni: open: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret < 0)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_open_layout
* Signature: (JLjava/lang/String;IIIIILjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1open_1layout
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jint j_flags, jint j_mode,
jint stripe_unit, jint stripe_count, jint object_size, jstring j_data_pool)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path, *c_data_pool = NULL;
int ret, flags = fixup_open_flags(j_flags);
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
if (j_data_pool) {
c_data_pool = env->GetStringUTFChars(j_data_pool, NULL);
if (!c_data_pool) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
}
ldout(cct, 10) << "jni: open_layout: path " << c_path << " flags " << flags
<< " mode " << (int)j_mode << " stripe_unit " << stripe_unit
<< " stripe_count " << stripe_count << " object_size " << object_size
<< " data_pool " << (c_data_pool ? c_data_pool : "<NULL>") << dendl;
ret = ceph_open_layout(cmount, c_path, flags, (int)j_mode,
(int)stripe_unit, (int)stripe_count, (int)object_size, c_data_pool);
ldout(cct, 10) << "jni: open_layout: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (j_data_pool)
env->ReleaseStringUTFChars(j_data_pool, c_data_pool);
if (ret < 0)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_close
* Signature: (JI)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1close
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: close: fd " << (int)j_fd << dendl;
ret = ceph_close(cmount, (int)j_fd);
ldout(cct, 10) << "jni: close: ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_lseek
* Signature: (JIJI)J
*/
JNIEXPORT jlong JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1lseek
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jlong j_offset, jint j_whence)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int whence;
jlong ret;
CHECK_MOUNTED(cmount, -1);
switch (j_whence) {
case JAVA_SEEK_SET:
whence = SEEK_SET;
break;
case JAVA_SEEK_CUR:
whence = SEEK_CUR;
break;
case JAVA_SEEK_END:
whence = SEEK_END;
break;
default:
cephThrowIllegalArg(env, "Unknown whence value");
return -1;
}
ldout(cct, 10) << "jni: lseek: fd " << (int)j_fd << " offset "
<< (long)j_offset << " whence " << whence << dendl;
ret = ceph_lseek(cmount, (int)j_fd, (long)j_offset, whence);
ldout(cct, 10) << "jni: lseek: exit ret " << ret << dendl;
if (ret < 0)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_read
* Signature: (JI[BJJ)J
*/
JNIEXPORT jlong JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1read
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jbyteArray j_buf, jlong j_size, jlong j_offset)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jsize buf_size;
jbyte *c_buf;
long ret;
CHECK_ARG_NULL(j_buf, "@buf is null", -1);
CHECK_ARG_BOUNDS(j_size < 0, "@size is negative", -1);
CHECK_MOUNTED(cmount, -1);
buf_size = env->GetArrayLength(j_buf);
CHECK_ARG_BOUNDS(j_size > buf_size, "@size > @buf.length", -1);
c_buf = env->GetByteArrayElements(j_buf, NULL);
if (!c_buf) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: read: fd " << (int)j_fd << " len " << (long)j_size <<
" offset " << (long)j_offset << dendl;
ret = ceph_read(cmount, (int)j_fd, (char*)c_buf, (long)j_size, (long)j_offset);
ldout(cct, 10) << "jni: read: exit ret " << ret << dendl;
if (ret < 0)
handle_error(env, (int)ret);
else
env->ReleaseByteArrayElements(j_buf, c_buf, 0);
return (jlong)ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_write
* Signature: (JI[BJJ)J
*/
JNIEXPORT jlong JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1write
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jbyteArray j_buf, jlong j_size, jlong j_offset)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jsize buf_size;
jbyte *c_buf;
long ret;
CHECK_ARG_NULL(j_buf, "@buf is null", -1);
CHECK_ARG_BOUNDS(j_size < 0, "@size is negative", -1);
CHECK_MOUNTED(cmount, -1);
buf_size = env->GetArrayLength(j_buf);
CHECK_ARG_BOUNDS(j_size > buf_size, "@size > @buf.length", -1);
c_buf = env->GetByteArrayElements(j_buf, NULL);
if (!c_buf) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: write: fd " << (int)j_fd << " len " << (long)j_size <<
" offset " << (long)j_offset << dendl;
ret = ceph_write(cmount, (int)j_fd, (char*)c_buf, (long)j_size, (long)j_offset);
ldout(cct, 10) << "jni: write: exit ret " << ret << dendl;
if (ret < 0)
handle_error(env, (int)ret);
else
env->ReleaseByteArrayElements(j_buf, c_buf, JNI_ABORT);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_ftruncate
* Signature: (JIJ)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1ftruncate
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jlong j_size)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: ftruncate: fd " << (int)j_fd <<
" size " << (loff_t)j_size << dendl;
ret = ceph_ftruncate(cmount, (int)j_fd, (loff_t)j_size);
ldout(cct, 10) << "jni: ftruncate: exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_fsync
* Signature: (JIZ)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1fsync
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jboolean j_dataonly)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
ldout(cct, 10) << "jni: fsync: fd " << (int)j_fd <<
" dataonly " << (j_dataonly ? 1 : 0) << dendl;
ret = ceph_fsync(cmount, (int)j_fd, j_dataonly ? 1 : 0);
ldout(cct, 10) << "jni: fsync: exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_flock
* Signature: (JIZ)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1flock
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jint j_operation, jlong j_owner)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
ldout(cct, 10) << "jni: flock: fd " << (int)j_fd <<
" operation " << j_operation << " owner " << j_owner << dendl;
int operation = 0;
#define MAP_FLOCK_FLAG(JNI_MASK, NATIVE_MASK) do { \
if ((j_operation & JNI_MASK) != 0) { \
operation |= NATIVE_MASK; \
j_operation &= ~JNI_MASK; \
} \
} while(0)
MAP_FLOCK_FLAG(JAVA_LOCK_SH, LOCK_SH);
MAP_FLOCK_FLAG(JAVA_LOCK_EX, LOCK_EX);
MAP_FLOCK_FLAG(JAVA_LOCK_NB, LOCK_NB);
MAP_FLOCK_FLAG(JAVA_LOCK_UN, LOCK_UN);
if (j_operation != 0) {
cephThrowIllegalArg(env, "flock flags");
return -EINVAL;
}
#undef MAP_FLOCK_FLAG
ret = ceph_flock(cmount, (int)j_fd, operation, (uint64_t) j_owner);
ldout(cct, 10) << "jni: flock: exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_fstat
* Signature: (JILcom/ceph/fs/CephStat;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1fstat
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jobject j_cephstat)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
struct ceph_statx stx;
int ret;
CHECK_ARG_NULL(j_cephstat, "@stat is null", -1);
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: fstat: fd " << (int)j_fd << dendl;
ret = ceph_fstatx(cmount, (int)j_fd, &stx, CEPH_J_CEPHSTAT_MASK, 0);
ldout(cct, 10) << "jni: fstat exit ret " << ret << dendl;
if (ret) {
handle_error(env, ret);
return ret;
}
fill_cephstat(env, j_cephstat, &stx);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_sync_fs
* Signature: (J)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1sync_1fs
(JNIEnv *env, jclass clz, jlong j_mntp)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
ldout(cct, 10) << "jni: sync_fs: enter" << dendl;
ret = ceph_sync_fs(cmount);
ldout(cct, 10) << "jni: sync_fs: exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_getxattr
* Signature: (JLjava/lang/String;Ljava/lang/String;[B)J
*/
JNIEXPORT jlong JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1getxattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jstring j_name, jbyteArray j_buf)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
const char *c_name;
jsize buf_size;
jbyte *c_buf = NULL; /* please gcc with goto */
long ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_name, "@name is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_name = env->GetStringUTFChars(j_name, NULL);
if (!c_name) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
/* just lookup the size if buf is null */
if (!j_buf) {
buf_size = 0;
goto do_getxattr;
}
c_buf = env->GetByteArrayElements(j_buf, NULL);
if (!c_buf) {
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
buf_size = env->GetArrayLength(j_buf);
do_getxattr:
ldout(cct, 10) << "jni: getxattr: path " << c_path << " name " << c_name <<
" len " << buf_size << dendl;
ret = ceph_getxattr(cmount, c_path, c_name, c_buf, buf_size);
if (ret == -ERANGE)
ret = ceph_getxattr(cmount, c_path, c_name, c_buf, 0);
ldout(cct, 10) << "jni: getxattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
if (j_buf)
env->ReleaseByteArrayElements(j_buf, c_buf, 0);
if (ret < 0)
handle_error(env, (int)ret);
return (jlong)ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_lgetxattr
* Signature: (JLjava/lang/String;Ljava/lang/String;[B)I
*/
JNIEXPORT jlong JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1lgetxattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jstring j_name, jbyteArray j_buf)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
const char *c_name;
jsize buf_size;
jbyte *c_buf = NULL; /* please gcc with goto */
long ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_name, "@name is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_name = env->GetStringUTFChars(j_name, NULL);
if (!c_name) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
/* just lookup the size if buf is null */
if (!j_buf) {
buf_size = 0;
goto do_lgetxattr;
}
c_buf = env->GetByteArrayElements(j_buf, NULL);
if (!c_buf) {
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
buf_size = env->GetArrayLength(j_buf);
do_lgetxattr:
ldout(cct, 10) << "jni: lgetxattr: path " << c_path << " name " << c_name <<
" len " << buf_size << dendl;
ret = ceph_lgetxattr(cmount, c_path, c_name, c_buf, buf_size);
if (ret == -ERANGE)
ret = ceph_lgetxattr(cmount, c_path, c_name, c_buf, 0);
ldout(cct, 10) << "jni: lgetxattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
if (j_buf)
env->ReleaseByteArrayElements(j_buf, c_buf, 0);
if (ret < 0)
handle_error(env, (int)ret);
return (jlong)ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_listxattr
* Signature: (JLjava/lang/String;)[Ljava/lang/String;
*/
JNIEXPORT jobjectArray JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1listxattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jobjectArray xattrlist;
const char *c_path;
string *ent;
jstring name;
list<string>::iterator it;
list<string> contents;
int ret, buflen, bufpos, i;
char *buf;
CHECK_ARG_NULL(j_path, "@path is null", NULL);
CHECK_MOUNTED(cmount, NULL);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return NULL;
}
buflen = 1024;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "head allocation failed");
goto out;
}
while (1) {
ldout(cct, 10) << "jni: listxattr: path " << c_path << " len " << buflen << dendl;
ret = ceph_listxattr(cmount, c_path, buf, buflen);
if (ret == -ERANGE) {
delete [] buf;
buflen *= 2;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
continue;
}
break;
}
ldout(cct, 10) << "jni: listxattr: ret " << ret << dendl;
if (ret < 0) {
delete [] buf;
handle_error(env, ret);
goto out;
}
bufpos = 0;
while (bufpos < ret) {
ent = new (std::nothrow) string(buf + bufpos);
if (!ent) {
delete [] buf;
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
contents.push_back(*ent);
bufpos += ent->size() + 1;
delete ent;
}
delete [] buf;
xattrlist = env->NewObjectArray(contents.size(), env->FindClass("java/lang/String"), NULL);
if (!xattrlist)
goto out;
for (i = 0, it = contents.begin(); it != contents.end(); ++it) {
name = env->NewStringUTF(it->c_str());
if (!name)
goto out;
env->SetObjectArrayElement(xattrlist, i++, name);
if (env->ExceptionOccurred())
goto out;
env->DeleteLocalRef(name);
}
env->ReleaseStringUTFChars(j_path, c_path);
return xattrlist;
out:
env->ReleaseStringUTFChars(j_path, c_path);
return NULL;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_llistxattr
* Signature: (JLjava/lang/String;)[Ljava/lang/String;
*/
JNIEXPORT jobjectArray JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1llistxattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jobjectArray xattrlist;
const char *c_path;
string *ent;
jstring name;
list<string>::iterator it;
list<string> contents;
int ret, buflen, bufpos, i;
char *buf;
CHECK_ARG_NULL(j_path, "@path is null", NULL);
CHECK_MOUNTED(cmount, NULL);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return NULL;
}
buflen = 1024;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "head allocation failed");
goto out;
}
while (1) {
ldout(cct, 10) << "jni: llistxattr: path " << c_path << " len " << buflen << dendl;
ret = ceph_llistxattr(cmount, c_path, buf, buflen);
if (ret == -ERANGE) {
delete [] buf;
buflen *= 2;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
continue;
}
break;
}
ldout(cct, 10) << "jni: llistxattr: ret " << ret << dendl;
if (ret < 0) {
delete [] buf;
handle_error(env, ret);
goto out;
}
bufpos = 0;
while (bufpos < ret) {
ent = new (std::nothrow) string(buf + bufpos);
if (!ent) {
delete [] buf;
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
contents.push_back(*ent);
bufpos += ent->size() + 1;
delete ent;
}
delete [] buf;
xattrlist = env->NewObjectArray(contents.size(), env->FindClass("java/lang/String"), NULL);
if (!xattrlist)
goto out;
for (i = 0, it = contents.begin(); it != contents.end(); ++it) {
name = env->NewStringUTF(it->c_str());
if (!name)
goto out;
env->SetObjectArrayElement(xattrlist, i++, name);
if (env->ExceptionOccurred())
goto out;
env->DeleteLocalRef(name);
}
env->ReleaseStringUTFChars(j_path, c_path);
return xattrlist;
out:
env->ReleaseStringUTFChars(j_path, c_path);
return NULL;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_removexattr
* Signature: (JLjava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1removexattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jstring j_name)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
const char *c_name;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_name, "@name is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_name = env->GetStringUTFChars(j_name, NULL);
if (!c_name) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: removexattr: path " << c_path << " name " << c_name << dendl;
ret = ceph_removexattr(cmount, c_path, c_name);
ldout(cct, 10) << "jni: removexattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_lremovexattr
* Signature: (JLjava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1lremovexattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jstring j_name)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
const char *c_name;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_name, "@name is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_name = env->GetStringUTFChars(j_name, NULL);
if (!c_name) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: lremovexattr: path " << c_path << " name " << c_name << dendl;
ret = ceph_lremovexattr(cmount, c_path, c_name);
ldout(cct, 10) << "jni: lremovexattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_setxattr
* Signature: (JLjava/lang/String;Ljava/lang/String;[BJI)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1setxattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jstring j_name,
jbyteArray j_buf, jlong j_size, jint j_flags)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
const char *c_name;
jsize buf_size;
jbyte *c_buf;
int ret, flags;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_name, "@name is null", -1);
CHECK_ARG_NULL(j_buf, "@buf is null", -1);
CHECK_ARG_BOUNDS(j_size < 0, "@size is negative", -1);
CHECK_MOUNTED(cmount, -1);
buf_size = env->GetArrayLength(j_buf);
CHECK_ARG_BOUNDS(j_size > buf_size, "@size > @buf.length", -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_name = env->GetStringUTFChars(j_name, NULL);
if (!c_name) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_buf = env->GetByteArrayElements(j_buf, NULL);
if (!c_buf) {
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
switch (j_flags) {
case JAVA_XATTR_CREATE:
flags = CEPH_XATTR_CREATE;
break;
case JAVA_XATTR_REPLACE:
flags = CEPH_XATTR_REPLACE;
break;
case JAVA_XATTR_NONE:
flags = 0;
break;
default:
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
env->ReleaseByteArrayElements(j_buf, c_buf, JNI_ABORT);
cephThrowIllegalArg(env, "setxattr flag");
return -1;
}
ldout(cct, 10) << "jni: setxattr: path " << c_path << " name " << c_name
<< " len " << j_size << " flags " << flags << dendl;
ret = ceph_setxattr(cmount, c_path, c_name, c_buf, j_size, flags);
ldout(cct, 10) << "jni: setxattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
env->ReleaseByteArrayElements(j_buf, c_buf, JNI_ABORT);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_lsetxattr
* Signature: (JLjava/lang/String;Ljava/lang/String;[BJI)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1lsetxattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jstring j_name,
jbyteArray j_buf, jlong j_size, jint j_flags)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
const char *c_name;
jsize buf_size;
jbyte *c_buf;
int ret, flags;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_name, "@name is null", -1);
CHECK_ARG_NULL(j_buf, "@buf is null", -1);
CHECK_ARG_BOUNDS(j_size < 0, "@size is negative", -1);
CHECK_MOUNTED(cmount, -1);
buf_size = env->GetArrayLength(j_buf);
CHECK_ARG_BOUNDS(j_size > buf_size, "@size > @buf.length", -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_name = env->GetStringUTFChars(j_name, NULL);
if (!c_name) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_buf = env->GetByteArrayElements(j_buf, NULL);
if (!c_buf) {
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
switch (j_flags) {
case JAVA_XATTR_CREATE:
flags = CEPH_XATTR_CREATE;
break;
case JAVA_XATTR_REPLACE:
flags = CEPH_XATTR_REPLACE;
break;
case JAVA_XATTR_NONE:
flags = 0;
break;
default:
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
env->ReleaseByteArrayElements(j_buf, c_buf, JNI_ABORT);
cephThrowIllegalArg(env, "lsetxattr flag");
return -1;
}
ldout(cct, 10) << "jni: lsetxattr: path " << c_path << " name " << c_name
<< " len " << j_size << " flags " << flags << dendl;
ret = ceph_lsetxattr(cmount, c_path, c_name, c_buf, j_size, flags);
ldout(cct, 10) << "jni: lsetxattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
env->ReleaseByteArrayElements(j_buf, c_buf, JNI_ABORT);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_file_stripe_unit
* Signature: (JI)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1file_1stripe_1unit
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: get_file_stripe_unit: fd " << (int)j_fd << dendl;
ret = ceph_get_file_stripe_unit(cmount, (int)j_fd);
ldout(cct, 10) << "jni: get_file_stripe_unit: exit ret " << ret << dendl;
if (ret < 0)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_file_replication
* Signature: (JI)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1file_1replication
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: get_file_replication: fd " << (int)j_fd << dendl;
ret = ceph_get_file_replication(cmount, (int)j_fd);
ldout(cct, 10) << "jni: get_file_replication: exit ret " << ret << dendl;
if (ret < 0)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_file_pool_name
* Signature: (JI)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1file_1pool_1name
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jstring pool = NULL;
int ret, buflen = 0;
char *buf = NULL;
CHECK_MOUNTED(cmount, NULL);
ldout(cct, 10) << "jni: get_file_pool_name: fd " << (int)j_fd << dendl;
for (;;) {
/* get pool name length (len==0) */
ret = ceph_get_file_pool_name(cmount, (int)j_fd, NULL, 0);
if (ret < 0)
break;
/* allocate buffer */
if (buf)
delete [] buf;
buflen = ret;
buf = new (std::nothrow) char[buflen+1]; /* +1 for '\0' */
if (!buf) {
cephThrowOutOfMemory(env, "head allocation failed");
goto out;
}
memset(buf, 0, (buflen+1)*sizeof(*buf));
/* handle zero-length pool name!? */
if (buflen == 0)
break;
/* fill buffer */
ret = ceph_get_file_pool_name(cmount, (int)j_fd, buf, buflen);
if (ret == -ERANGE) /* size changed! */
continue;
else
break;
}
ldout(cct, 10) << "jni: get_file_pool_name: ret " << ret << dendl;
if (ret < 0)
handle_error(env, ret);
else
pool = env->NewStringUTF(buf);
out:
if (buf)
delete [] buf;
return pool;
}
/**
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_default_data_pool_name
* Signature: (J)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1default_1data_1pool_1name
(JNIEnv *env, jclass clz, jlong j_mntp)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jstring pool = NULL;
int ret, buflen = 0;
char *buf = NULL;
CHECK_MOUNTED(cmount, NULL);
ldout(cct, 10) << "jni: get_default_data_pool_name" << dendl;
ret = ceph_get_default_data_pool_name(cmount, NULL, 0);
if (ret < 0)
goto out;
buflen = ret;
buf = new (std::nothrow) char[buflen+1]; /* +1 for '\0' */
if (!buf) {
cephThrowOutOfMemory(env, "head allocation failed");
goto out;
}
memset(buf, 0, (buflen+1)*sizeof(*buf));
ret = ceph_get_default_data_pool_name(cmount, buf, buflen);
ldout(cct, 10) << "jni: get_default_data_pool_name: ret " << ret << dendl;
if (ret < 0)
handle_error(env, ret);
else
pool = env->NewStringUTF(buf);
out:
if (buf)
delete [] buf;
return pool;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_localize_reads
* Signature: (JZ)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1localize_1reads
(JNIEnv *env, jclass clz, jlong j_mntp, jboolean j_on)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret, val = j_on ? 1 : 0;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: localize_reads: val " << val << dendl;
ret = ceph_localize_reads(cmount, val);
ldout(cct, 10) << "jni: localize_reads: exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_stripe_unit_granularity
* Signature: (J)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1stripe_1unit_1granularity
(JNIEnv *env, jclass clz, jlong j_mntp)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: get_stripe_unit_granularity" << dendl;
ret = ceph_get_stripe_unit_granularity(cmount);
ldout(cct, 10) << "jni: get_stripe_unit_granularity: exit ret " << ret << dendl;
if (ret < 0)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_pool_id
* Signature: (JLjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1pool_1id
(JNIEnv *env, jclass clz, jlong j_mntp, jstring jname)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_name;
int ret;
CHECK_MOUNTED(cmount, -1);
CHECK_ARG_NULL(jname, "@name is null", -1);
c_name = env->GetStringUTFChars(jname, NULL);
if (!c_name) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: get_pool_id: name " << c_name << dendl;
ret = ceph_get_pool_id(cmount, c_name);
if (ret < 0)
handle_error(env, ret);
ldout(cct, 10) << "jni: get_pool_id: ret " << ret << dendl;
env->ReleaseStringUTFChars(jname, c_name);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_pool_replication
* Signature: (JI)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1pool_1replication
(JNIEnv *env, jclass clz, jlong j_mntp, jint jpoolid)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: get_pool_replication: poolid " << jpoolid << dendl;
ret = ceph_get_pool_replication(cmount, jpoolid);
if (ret < 0)
handle_error(env, ret);
ldout(cct, 10) << "jni: get_pool_replication: ret " << ret << dendl;
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_file_extent_osds
* Signature: (JIJ)Lcom/ceph/fs/CephFileExtent;
*/
JNIEXPORT jobject JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1file_1extent_1osds
(JNIEnv *env, jclass clz, jlong mntp, jint fd, jlong off)
{
struct ceph_mount_info *cmount = get_ceph_mount(mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jobject extent = NULL;
int ret, nosds, *osds = NULL;
jintArray osd_array;
loff_t len;
CHECK_MOUNTED(cmount, NULL);
ldout(cct, 10) << "jni: get_file_extent_osds: fd " << fd << " off " << off << dendl;
for (;;) {
/* get pg size */
ret = ceph_get_file_extent_osds(cmount, fd, off, NULL, NULL, 0);
if (ret < 0)
break;
/* alloc osd id array */
if (osds)
delete [] osds;
nosds = ret;
osds = new int[nosds];
/* get osd ids */
ret = ceph_get_file_extent_osds(cmount, fd, off, &len, osds, nosds);
if (ret == -ERANGE)
continue;
else
break;
}
ldout(cct, 10) << "jni: get_file_extent_osds: ret " << ret << dendl;
if (ret < 0) {
handle_error(env, ret);
goto out;
}
nosds = ret;
osd_array = env->NewIntArray(nosds);
if (!osd_array)
goto out;
env->SetIntArrayRegion(osd_array, 0, nosds, osds);
if (env->ExceptionOccurred())
goto out;
extent = env->NewObject(cephfileextent_cls, cephfileextent_ctor_fid, off, len, osd_array);
if (!extent)
goto out;
out:
if (osds)
delete [] osds;
return extent;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_osd_crush_location
* Signature: (JI)[Ljava/lang/String;
*/
JNIEXPORT jobjectArray JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1osd_1crush_1location
(JNIEnv *env, jclass clz, jlong j_mntp, jint osdid)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jobjectArray path = NULL;
vector<string> str_path;
int ret, bufpos, buflen = 0;
char *buf = NULL;
CHECK_MOUNTED(cmount, NULL);
ldout(cct, 10) << "jni: osd loc: osd " << osdid << dendl;
for (;;) {
/* get length of the location path */
ret = ceph_get_osd_crush_location(cmount, osdid, NULL, 0);
if (ret < 0)
break;
/* alloc path buffer */
if (buf)
delete [] buf;
buflen = ret;
buf = new char[buflen+1];
memset(buf, 0, buflen*sizeof(*buf));
/* empty path */
if (buflen == 0)
break;
/* get the path */
ret = ceph_get_osd_crush_location(cmount, osdid, buf, buflen);
if (ret == -ERANGE)
continue;
else
break;
}
ldout(cct, 10) << "jni: osd loc: osd " << osdid << " ret " << ret << dendl;
if (ret < 0) {
handle_error(env, ret);
goto out;
}
bufpos = 0;
while (bufpos < ret) {
string type(buf + bufpos);
bufpos += type.size() + 1;
string name(buf + bufpos);
bufpos += name.size() + 1;
str_path.push_back(type);
str_path.push_back(name);
}
path = env->NewObjectArray(str_path.size(), env->FindClass("java/lang/String"), NULL);
if (!path)
goto out;
for (unsigned i = 0; i < str_path.size(); i++) {
jstring ent = env->NewStringUTF(str_path[i].c_str());
if (!ent)
goto out;
env->SetObjectArrayElement(path, i, ent);
if (env->ExceptionOccurred())
goto out;
env->DeleteLocalRef(ent);
}
out:
if (buf)
delete [] buf;
return path;
}
/*
* sockaddrToInetAddress uses with the following license, and is adapted for
* use in this project by using Ceph JNI exception utilities.
*
* ----
*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
jobject sockaddrToInetAddress(JNIEnv* env, const sockaddr_storage& ss, jint* port) {
// Convert IPv4-mapped IPv6 addresses to IPv4 addresses.
// The RI states "Java will never return an IPv4-mapped address".
const sockaddr_in6& sin6 = reinterpret_cast<const sockaddr_in6&>(ss);
if (ss.ss_family == AF_INET6 && IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
// Copy the IPv6 address into the temporary sockaddr_storage.
sockaddr_storage tmp;
memset(&tmp, 0, sizeof(tmp));
memcpy(&tmp, &ss, sizeof(sockaddr_in6));
// Unmap it into an IPv4 address.
sockaddr_in& sin = reinterpret_cast<sockaddr_in&>(tmp);
sin.sin_family = AF_INET;
sin.sin_port = sin6.sin6_port;
memcpy(&sin.sin_addr.s_addr, &sin6.sin6_addr.s6_addr[12], 4);
// Do the regular conversion using the unmapped address.
return sockaddrToInetAddress(env, tmp, port);
}
const void* rawAddress;
size_t addressLength;
int sin_port = 0;
int scope_id = 0;
if (ss.ss_family == AF_INET) {
const sockaddr_in& sin = reinterpret_cast<const sockaddr_in&>(ss);
rawAddress = &sin.sin_addr.s_addr;
addressLength = 4;
sin_port = ntohs(sin.sin_port);
} else if (ss.ss_family == AF_INET6) {
const sockaddr_in6& sin6 = reinterpret_cast<const sockaddr_in6&>(ss);
rawAddress = &sin6.sin6_addr.s6_addr;
addressLength = 16;
sin_port = ntohs(sin6.sin6_port);
scope_id = sin6.sin6_scope_id;
} else if (ss.ss_family == AF_UNIX) {
const sockaddr_un& sun = reinterpret_cast<const sockaddr_un&>(ss);
rawAddress = &sun.sun_path;
addressLength = strlen(sun.sun_path);
} else {
// We can't throw SocketException. We aren't meant to see bad addresses, so seeing one
// really does imply an internal error.
//jniThrowExceptionFmt(env, "java/lang/IllegalArgumentException",
// "sockaddrToInetAddress unsupported ss_family: %i", ss.ss_family);
cephThrowIllegalArg(env, "sockaddrToInetAddress unsupposed ss_family");
return NULL;
}
if (port != NULL) {
*port = sin_port;
}
ScopedLocalRef<jbyteArray> byteArray(env, env->NewByteArray(addressLength));
if (byteArray.get() == NULL) {
return NULL;
}
env->SetByteArrayRegion(byteArray.get(), 0, addressLength,
reinterpret_cast<jbyte*>(const_cast<void*>(rawAddress)));
if (ss.ss_family == AF_UNIX) {
// Note that we get here for AF_UNIX sockets on accept(2). The unix(7) man page claims
// that the peer's sun_path will contain the path, but in practice it doesn't, and the
// peer length is returned as 2 (meaning only the sun_family field was set).
//
// Ceph Note: this isn't supported. inetUnixAddress appears to just be
// something in Dalvik/Android stuff.
cephThrowInternal(env, "OSD address should never be a UNIX socket");
return NULL;
//static jmethodID ctor = env->GetMethodID(JniConstants::inetUnixAddressClass, "<init>", "([B)V");
//return env->NewObject(JniConstants::inetUnixAddressClass, ctor, byteArray.get());
}
if (addressLength == 4) {
static jmethodID getByAddressMethod = env->GetStaticMethodID(JniConstants::inetAddressClass,
"getByAddress", "(Ljava/lang/String;[B)Ljava/net/InetAddress;");
if (getByAddressMethod == NULL) {
return NULL;
}
return env->CallStaticObjectMethod(JniConstants::inetAddressClass, getByAddressMethod,
NULL, byteArray.get());
} else if (addressLength == 16) {
static jmethodID getByAddressMethod = env->GetStaticMethodID(JniConstants::inet6AddressClass,
"getByAddress", "(Ljava/lang/String;[BI)Ljava/net/Inet6Address;");
if (getByAddressMethod == NULL) {
return NULL;
}
return env->CallStaticObjectMethod(JniConstants::inet6AddressClass, getByAddressMethod,
NULL, byteArray.get(), scope_id);
} else {
abort();
return NULL;
}
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_osd_addr
* Signature: (JI)Ljava/net/InetAddress;
*/
JNIEXPORT jobject JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1osd_1addr
(JNIEnv *env, jclass clz, jlong j_mntp, jint osd)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
struct sockaddr_storage addr;
int ret;
CHECK_MOUNTED(cmount, NULL);
ldout(cct, 10) << "jni: get_osd_addr: osd " << osd << dendl;
ret = ceph_get_osd_addr(cmount, osd, &addr);
ldout(cct, 10) << "jni: get_osd_addr: ret " << ret << dendl;
if (ret < 0) {
handle_error(env, ret);
return NULL;
}
return sockaddrToInetAddress(env, addr, NULL);
}
| 79,866 | 25.323995 | 128 |
cc
|
null |
ceph-main/src/java/test/com/ceph/fs/CephAllTests.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.UUID;
import org.junit.*;
import org.junit.runners.Suite;
import org.junit.runner.RunWith;
import static org.junit.Assert.*;
@RunWith( Suite.class )
@Suite.SuiteClasses( {
CephDoubleMountTest.class,
CephMountCreateTest.class,
CephMountTest.class,
CephUnmountedTest.class,
})
/**
* Every Java test class must be added to this list in order to be executed with 'ant test'
*/
public class CephAllTests{
}
| 1,601 | 34.6 | 91 |
java
|
null |
ceph-main/src/java/test/com/ceph/fs/CephDoubleMountTest.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.UUID;
import org.junit.*;
import static org.junit.Assert.*;
public class CephDoubleMountTest {
@Test(expected=CephAlreadyMountedException.class)
public void test_double_mount() throws Exception {
CephMount mount = new CephMount("admin");
String conf_file = System.getProperty("CEPH_CONF_FILE");
if (conf_file != null)
mount.conf_read_file(conf_file);
mount.mount(null);
try {
mount.mount(null);
} finally {
mount.unmount();
}
}
}
| 1,670 | 35.326087 | 78 |
java
|
null |
ceph-main/src/java/test/com/ceph/fs/CephMountCreateTest.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
import java.io.FileNotFoundException;
import org.junit.*;
import java.util.UUID;
import static org.junit.Assert.*;
/*
* This tests the mount root dir functionality. It creates an empty
* directory in the real root, then it re-mounts the file system
* with the empty directory specified as the root. Assertions are
* that the "/" in the normal mount is non-empty, and that "/" is
* empty in the mount with the empty directory as the root.
*/
public class CephMountCreateTest {
private static String conf_file;
@BeforeClass
public static void class_setup() throws Exception {
conf_file = System.getProperty("CEPH_CONF_FILE");
}
private CephMount setupMount(String root) throws Exception {
CephMount mount = new CephMount("admin");
if (conf_file != null)
mount.conf_read_file(conf_file);
mount.conf_set("client_permissions", "0");
mount.mount(root);
return mount;
}
@Test
public void test_CephMountCreate() throws Exception {
CephMount mount;
boolean found;
String dir = "libcephfs_junit_" + UUID.randomUUID();
/* root dir has more than one dir */
mount = setupMount("/");
try {
mount.rmdir("/" + dir);
} catch (FileNotFoundException e) {}
mount.mkdirs("/" + dir, 777);
String[] subdirs = mount.listdir("/");
found = false;
for (String d : subdirs) {
if (d.compareTo(dir) == 0)
found = true;
}
assertTrue(found);
mount.unmount();
/* changing root to empty dir */
mount = setupMount("/" + dir);
subdirs = mount.listdir("/");
found = false;
for (String d : subdirs) {
found = true;
}
assertFalse(found);
mount.unmount();
/* cleanup */
mount = setupMount("/");
mount.rmdir("/" + dir);
mount.unmount();
}
}
| 2,896 | 30.48913 | 78 |
java
|
null |
ceph-main/src/java/test/com/ceph/fs/CephMountTest.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetAddress;
import java.util.UUID;
import org.junit.*;
import static org.junit.Assert.*;
import com.ceph.crush.Bucket;
/*
* Coverage
* - Everything is covered in at least success cases.
* - l[set,get,remove]xattr are not working
*/
public class CephMountTest {
private static CephMount mount;
private static String basedir = null;
@BeforeClass
public static void setup() throws Exception {
mount = new CephMount("admin");
String conf_file = System.getProperty("CEPH_CONF_FILE");
if (conf_file != null)
mount.conf_read_file(conf_file);
mount.conf_set("client_permissions", "0");
mount.mount(null);
basedir = "/libcephfs_junit_" + UUID.randomUUID();
mount.mkdir(basedir, 0777);
}
@AfterClass
public static void destroy() throws Exception {
String[] list = mount.listdir(basedir);
for (String l : list)
System.out.println(l);
mount.rmdir(basedir);
mount.unmount();
}
/*
* Helper function to construct a unique path.
*/
public String makePath() {
String path = basedir + "/" + UUID.randomUUID();
return path;
}
/*
* Helper to learn the data pool name, by reading it
* from the '/' dir inode.
*/
public String getRootPoolName() throws Exception
{
int fd = mount.open("/", CephMount.O_DIRECTORY, 0600);
String pool = mount.get_file_pool_name(fd);
mount.close(fd);
return pool;
}
/*
* Helper function to create a file with the given path and size. The file
* is filled with size bytes and the file descriptor is returned.
*/
public int createFile(String path, int size) throws Exception {
int fd = mount.open(path, CephMount.O_RDWR|CephMount.O_CREAT, 0600);
byte[] buf = new byte[4096];
int left = size;
while (left > 0) {
size = Math.min(buf.length, left);
long ret = mount.write(fd, buf, size, -1);
left -= ret;
}
return fd;
}
/*
* Helper function to create a unique file and fill it with size bytes. The
* file descriptor is returned.
*/
public int createFile(int size) throws Exception {
return createFile(makePath(), size);
}
@Test(expected=FileNotFoundException.class)
public void test_mount_dne() throws Exception {
CephMount mount2 = new CephMount("admin");
String conf_file = System.getProperty("CEPH_CONF_FILE");
if (conf_file != null)
mount2.conf_read_file(conf_file);
mount2.mount("/wlfkjwlekfjwlejfwe");
mount2.unmount();
}
/*
* Test loading of conf file that doesn't exist.
*
* FIXME:
* Ceph returns -ENOSYS rather than -ENOENT. Correct?
*/
//@Test(expected=FileNotFoundException.class)
@Test
public void test_conf_read_file_dne() throws Exception {
//mount.conf_read_file("/this_file_does_not_exist");
}
/*
* Test loading of conf file that isn't valid
*
* FIXME: implement
*/
@Test
public void test_conf_read_file_invalid() throws Exception {
}
@Test(expected=NullPointerException.class)
public void test_conf_read_file_null() throws Exception {
mount.conf_read_file(null);
}
/*
* conf_set/conf_get
*/
@Test(expected=NullPointerException.class)
public void test_conf_set_null_opt() throws Exception {
mount.conf_set(null, "value");
}
@Test(expected=NullPointerException.class)
public void test_conf_set_null_val() throws Exception {
mount.conf_set("option", null);
}
@Test(expected=NullPointerException.class)
public void test_conf_get_null_opt() throws Exception {
mount.conf_get(null);
}
@Test
public void test_conf() throws Exception {
String opt = "log to stderr";
String val1, val2, val3;
/* get the current value */
val1 = mount.conf_get(opt);
/*
* flip the value. this may make some debug information be dumped to the
* console when the value becomes true. TODO: find a better config option
* to toggle.
*/
if (val1.compareTo("true") == 0)
val2 = "false";
else
val2 = "true";
mount.conf_set(opt, val2);
/* verify the change */
val3 = mount.conf_get(opt);
assertTrue(val3.compareTo(val2) == 0);
/* reset to original value */
mount.conf_set(opt, val1);
val3 = mount.conf_get(opt);
assertTrue(val3.compareTo(val1) == 0);
}
/*
* statfs
*/
@Test
public void test_statfs() throws Exception {
CephStatVFS st1 = new CephStatVFS();
mount.statfs("/", st1);
/*
* FIXME: a better test here is to see if changes to the file system are
* reflected through statfs (e.g. increasing number of files). However, it
* appears that the updates aren't immediately visible.
*/
assertTrue(st1.bsize > 0);
assertTrue(st1.frsize > 0);
assertTrue(st1.blocks > 0);
assertTrue(st1.bavail > 0);
assertTrue(st1.namemax > 0);
}
/*
* getcwd/chdir
*/
@Test
public void test_getcwd() throws Exception {
mount.chdir(basedir);
String cwd = mount.getcwd();
assertTrue(cwd.compareTo(basedir) == 0);
/* Make sure to reset cwd to root */
mount.chdir("/");
cwd = mount.getcwd();
assertTrue(cwd.compareTo("/") == 0);
}
@Test(expected=NullPointerException.class)
public void test_chdir_null() throws Exception {
mount.chdir(null);
}
@Test(expected=FileNotFoundException.class)
public void test_chdir_dne() throws Exception {
mount.chdir("/this/path/does/not/exist/");
}
/*
* FIXME: this test should throw an error (but does not)?
*/
//@Test(expected=IOException.class)
@Test
public void test_chdir_not_dir() throws Exception {
String path = makePath();
int fd = createFile(path, 1);
mount.close(fd);
//mount.chdir(path); shouldn't be able to do this?
mount.unlink(path);
/*
* Switch back. Other tests seem to be sensitive to the current directory
* being something other than "/". This shouldn't happen once this tests
* passes and the call to chdir fails anyway.
*/
mount.chdir("/");
}
/*
* listdir
*/
@Test(expected=NullPointerException.class)
public void test_listdir_null() throws Exception {
mount.listdir(null);
}
@Test(expected=FileNotFoundException.class)
public void test_listdir_dne() throws Exception {
mount.listdir("/this/path/does/not/exist/");
}
@Test(expected=IOException.class)
public void test_listdir_not_dir() throws Exception {
String path = makePath();
int fd = createFile(path, 1);
mount.close(fd);
try {
mount.listdir(path);
} finally {
mount.unlink(path);
}
}
@Test
public void test_listdir() throws Exception {
String dir = makePath();
mount.mkdir(dir, 0777);
/* test that new directory is empty */
String[] list = mount.listdir(dir);
assertTrue(list.length == 0);
/* test that new directories are seen */
for (int i = 0; i < 3; i++)
mount.mkdir(dir + "/" + i, 777);
list = mount.listdir(dir);
assertTrue(list.length == 3);
/* test that more new directories are seen */
for (int i = 0; i < 30; i++)
mount.mkdir(dir + "/x" + i, 777);
list = mount.listdir(dir);
assertTrue(list.length == 33);
/* remove */
for (int i = 0; i < 30; i++)
mount.rmdir(dir + "/x" + i);
for (int i = 0; i < 3; i++)
mount.rmdir(dir + "/" + i);
mount.rmdir(dir);
}
/*
* Missing
*
* ceph_link
* ceph_unlink
*/
/*
* rename
*/
@Test(expected=NullPointerException.class)
public void test_rename_null_from() throws Exception {
mount.rename(null, "to");
}
@Test(expected=NullPointerException.class)
public void test_rename_null_to() throws Exception {
mount.rename("from", null);
}
@Test(expected=FileNotFoundException.class)
public void test_rename_dne() throws Exception {
mount.rename("/this/doesnt/exist", "/this/neither");
}
@Test
public void test_rename() throws Exception {
/* create a file */
String path = makePath();
int fd = createFile(path, 1);
mount.close(fd);
/* move it to a new name */
String newpath = makePath();
mount.rename(path, newpath);
/* verfiy the sizes are the same */
CephStat st = new CephStat();
mount.lstat(newpath, st);
assertTrue(st.size == 1);
/* remove the file */
mount.unlink(newpath);
}
/*
* mkdir/mkdirs/rmdir
*/
@Test(expected=IOException.class)
public void test_mkdir_exists() throws Exception {
String path = makePath();
mount.mkdir(path, 0777);
try {
mount.mkdir(path, 0777);
} finally {
mount.rmdir(path);
}
}
@Test(expected=IOException.class)
public void test_mkdirs_exists() throws Exception {
String path = makePath();
mount.mkdirs(path, 0777);
try {
mount.mkdirs(path, 0777);
} finally {
mount.rmdir(path);
}
}
@Test
public void test_mkdir() throws Exception {
String path = makePath();
mount.mkdir(path, 0777);
CephStat st = new CephStat();
mount.lstat(path, st);
assertTrue(st.isDir());
mount.rmdir(path);
}
@Test
public void test_mkdirs() throws Exception {
String path = makePath();
mount.mkdirs(path + "/x/y", 0777);
CephStat st = new CephStat();
mount.lstat(path, st);
assertTrue(st.isDir());
mount.lstat(path + "/x", st);
assertTrue(st.isDir());
mount.lstat(path + "/x/y", st);
assertTrue(st.isDir());
mount.rmdir(path + "/x/y");
mount.rmdir(path + "/x");
mount.rmdir(path);
}
@Test(expected=FileNotFoundException.class)
public void test_rmdir() throws Exception {
/* make a new directory */
String path = makePath();
mount.mkdir(path, 0777);
CephStat st = new CephStat();
mount.lstat(path, st);
assertTrue(st.isDir());
/* remove it */
mount.rmdir(path);
/* should not exist now */
mount.lstat(path, st);
}
/*
* readlink
* symlink
*/
@Test
public void test_symlink() throws Exception {
String oldpath = makePath();
String newpath = makePath();
mount.symlink(oldpath, newpath);
CephStat stat = new CephStat();
mount.lstat(newpath, stat);
assertTrue(stat.isSymlink());
String symlink = mount.readlink(newpath);
assertTrue(symlink.compareTo(oldpath) == 0);
mount.unlink(newpath);
}
/*
* lstat
*/
@Test(expected=NullPointerException.class)
public void test_lstat_null_path() throws Exception {
mount.lstat(null, new CephStat());
}
@Test(expected=NullPointerException.class)
public void test_lstat_null_stat() throws Exception {
mount.lstat("/path", null);
}
@Test(expected=FileNotFoundException.class)
public void test_lstat_null_dne() throws Exception {
mount.lstat("/path/does/not/exist", new CephStat());
}
/*
* test_stat covers lstat and fstat and stat.
*
* TODO: create test that for lstat vs stat with symlink follow/nofollow.
*/
@Test
public void test_stat() throws Exception {
/* create a new file */
String path = makePath();
int size = 12345;
int fd = createFile(path, size);
mount.close(fd);
/* test some basic info about the new file */
CephStat orig_st = new CephStat();
mount.lstat(path, orig_st);
assertTrue(orig_st.size == size);
assertTrue(orig_st.blksize > 0);
assertTrue(orig_st.blocks > 0);
/* now try stat */
CephStat stat_st = new CephStat();
mount.stat(path, stat_st);
/* now try fstat */
CephStat other_st = new CephStat();
fd = mount.open(path, CephMount.O_RDWR, 0);
mount.fstat(fd, other_st);
mount.close(fd);
mount.unlink(path);
/* compare to fstat results */
assertTrue(orig_st.mode == other_st.mode);
assertTrue(orig_st.uid == other_st.uid);
assertTrue(orig_st.gid == other_st.gid);
assertTrue(orig_st.size == other_st.size);
assertTrue(orig_st.blksize == other_st.blksize);
assertTrue(orig_st.blocks == other_st.blocks);
/* compare to stat results */
assertTrue(orig_st.mode == stat_st.mode);
assertTrue(orig_st.uid == stat_st.uid);
assertTrue(orig_st.gid == stat_st.gid);
assertTrue(orig_st.size == stat_st.size);
assertTrue(orig_st.blksize == stat_st.blksize);
assertTrue(orig_st.blocks == stat_st.blocks);
}
/*
* stat
*/
@Test(expected=NullPointerException.class)
public void test_stat_null_path() throws Exception {
mount.stat(null, new CephStat());
}
@Test(expected=NullPointerException.class)
public void test_stat_null_stat() throws Exception {
mount.stat("/path", null);
}
@Test(expected=FileNotFoundException.class)
public void test_stat_null_dne() throws Exception {
mount.stat("/path/does/not/exist", new CephStat());
}
@Test(expected=CephNotDirectoryException.class)
public void test_enotdir() throws Exception {
String path = makePath();
int fd = createFile(path, 1);
mount.close(fd);
try {
CephStat stat = new CephStat();
mount.lstat(path + "/blah", stat);
} finally {
mount.unlink(path);
}
}
/*
* setattr
*/
@Test(expected=NullPointerException.class)
public void test_setattr_null_path() throws Exception {
mount.setattr(null, new CephStat(), 0);
}
@Test(expected=NullPointerException.class)
public void test_setattr_null_stat() throws Exception {
mount.setattr("/path", null, 0);
}
@Test(expected=FileNotFoundException.class)
public void test_setattr_dne() throws Exception {
mount.setattr("/path/does/not/exist", new CephStat(), 0);
}
@Test
public void test_setattr() throws Exception {
/* create a file */
String path = makePath();
int fd = createFile(path, 1);
mount.close(fd);
CephStat st1 = new CephStat();
mount.lstat(path, st1);
st1.uid += 1;
st1.gid += 1;
mount.setattr(path, st1, mount.SETATTR_UID|mount.SETATTR_GID);
CephStat st2 = new CephStat();
mount.lstat(path, st2);
assertTrue(st2.uid == st1.uid);
assertTrue(st2.gid == st1.gid);
/* remove the file */
mount.unlink(path);
}
/*
* chmod
*/
@Test(expected=NullPointerException.class)
public void test_chmod_null_path() throws Exception {
mount.chmod(null, 0);
}
@Test(expected=FileNotFoundException.class)
public void test_chmod_dne() throws Exception {
mount.chmod("/path/does/not/exist", 0);
}
@Test
public void test_chmod() throws Exception {
/* create a file */
String path = makePath();
int fd = createFile(path, 1);
mount.close(fd);
CephStat st = new CephStat();
mount.lstat(path, st);
/* flip a bit */
int mode = st.mode;
if ((mode & 1) != 0)
mode -= 1;
else
mode += 1;
mount.chmod(path, mode);
CephStat st2 = new CephStat();
mount.lstat(path, st2);
assertTrue(st2.mode == mode);
mount.unlink(path);
}
/*
* fchmod
*/
@Test
public void test_fchmod() throws Exception {
/* create a file */
String path = makePath();
int fd = createFile(path, 1);
CephStat st = new CephStat();
mount.lstat(path, st);
/* flip a bit */
int mode = st.mode;
if ((mode & 1) != 0)
mode -= 1;
else
mode += 1;
mount.fchmod(fd, mode);
mount.close(fd);
CephStat st2 = new CephStat();
mount.lstat(path, st2);
assertTrue(st2.mode == mode);
mount.unlink(path);
}
/*
* truncate
*/
@Test(expected=FileNotFoundException.class)
public void test_truncate_dne() throws Exception {
mount.truncate("/path/does/not/exist", 0);
}
@Test(expected=NullPointerException.class)
public void test_truncate_null_path() throws Exception {
mount.truncate(null, 0);
}
@Test
public void test_truncate() throws Exception {
// make file
String path = makePath();
int orig_size = 1398331;
int fd = createFile(path, orig_size);
mount.close(fd);
// check file size
CephStat st = new CephStat();
mount.lstat(path, st);
assertTrue(st.size == orig_size);
// truncate and check
int crop_size = 333333;
mount.truncate(path, crop_size);
mount.lstat(path, st);
assertTrue(st.size == crop_size);
// check after re-open
fd = mount.open(path, CephMount.O_RDWR, 0);
mount.fstat(fd, st);
assertTrue(st.size == crop_size);
mount.close(fd);
mount.unlink(path);
}
@Test
public void test_open_layout() throws Exception {
String path = makePath();
int fd = mount.open(path, CephMount.O_WRONLY|CephMount.O_CREAT, 0,
(1<<20), 1, (1<<20), null);
mount.close(fd);
mount.unlink(path);
}
/*
* open/close
*/
@Test(expected=FileNotFoundException.class)
public void test_open_dne() throws Exception {
mount.open("/path/doesnt/exist", 0, 0);
}
/*
* lseek
*/
@Test
public void test_lseek() throws Exception {
/* create a new file */
String path = makePath();
int size = 12345;
int fd = createFile(path, size);
mount.close(fd);
/* open and check size */
fd = mount.open(path, CephMount.O_RDWR, 0);
long end = mount.lseek(fd, 0, CephMount.SEEK_END);
mount.close(fd);
mount.unlink(path);
assertTrue(size == (int)end);
}
/*
* read/write
*/
@Test
public void test_read() throws Exception {
String path = makePath();
int fd = createFile(path, 1500);
byte[] buf = new byte[1500];
long ret = mount.read(fd, buf, 1500, 0);
assertTrue(ret == 1500);
mount.unlink(path);
}
/*
* ftruncate
*/
@Test
public void test_ftruncate() throws Exception {
// make file
String path = makePath();
int orig_size = 1398331;
int fd = createFile(path, orig_size);
// check file size
CephStat st = new CephStat();
mount.fstat(fd, st);
assertTrue(st.size == orig_size);
// truncate and check
int crop_size = 333333;
mount.ftruncate(fd, crop_size);
mount.fstat(fd, st);
if (st.size != crop_size) {
System.err.println("ftruncate error: st.size=" + st.size + " crop_size=" + crop_size);
assertTrue(false);
}
assertTrue(st.size == crop_size);
mount.close(fd);
// check after re-open
fd = mount.open(path, CephMount.O_RDWR, 0);
mount.fstat(fd, st);
assertTrue(st.size == crop_size);
mount.close(fd);
mount.unlink(path);
}
/*
* fsync
*/
@Test
public void test_fsync() throws Exception {
String path = makePath();
int fd = createFile(path, 123);
mount.fsync(fd, false);
mount.fsync(fd, true);
mount.close(fd);
mount.unlink(path);
}
/*
* flock
*/
@Test
public void test_flock() throws Exception {
String path = makePath();
int fd = createFile(path, 123);
mount.flock(fd, CephMount.LOCK_SH | CephMount.LOCK_NB, 42);
mount.flock(fd, CephMount.LOCK_SH | CephMount.LOCK_NB, 43);
mount.flock(fd, CephMount.LOCK_UN, 42);
mount.flock(fd, CephMount.LOCK_UN, 43);
mount.flock(fd, CephMount.LOCK_EX | CephMount.LOCK_NB, 42);
try {
mount.flock(fd, CephMount.LOCK_SH | CephMount.LOCK_NB, 43);
assertTrue(false);
} catch(IOException io) {}
try {
mount.flock(fd, CephMount.LOCK_EX | CephMount.LOCK_NB, 43);
assertTrue(false);
} catch(IOException io) {}
mount.flock(fd, CephMount.LOCK_SH, 42); // downgrade
mount.flock(fd, CephMount.LOCK_SH, 43);
mount.flock(fd, CephMount.LOCK_UN, 42);
mount.flock(fd, CephMount.LOCK_UN, 43);
mount.close(fd);
mount.unlink(path);
}
/*
* fstat
*
* success case is handled in test_stat along with lstat.
*/
/*
* sync_fs
*/
@Test
public void test_sync_fs() throws Exception {
mount.sync_fs();
}
/*
* get/set/list/remove xattr
*/
@Test
public void test_xattr() throws Exception {
/* make file */
String path = makePath();
int fd = createFile(path, 123);
mount.close(fd);
/* make xattrs */
String val1 = "This is a new xattr";
String val2 = "This is a different xattr";
byte[] buf1 = val1.getBytes();
byte[] buf2 = val2.getBytes();
mount.setxattr(path, "user.attr1", buf1, buf1.length, mount.XATTR_CREATE);
mount.setxattr(path, "user.attr2", buf2, buf2.length, mount.XATTR_CREATE);
/* list xattrs */
String[] xattrs = mount.listxattr(path);
assertTrue(xattrs.length == 2);
int found = 0;
for (String xattr : xattrs) {
if (xattr.compareTo("user.attr1") == 0) {
found++;
continue;
}
if (xattr.compareTo("user.attr2") == 0) {
found++;
continue;
}
System.out.println("found unwanted xattr: " + xattr);
}
assertTrue(found == 2);
/* get first xattr by looking up length */
long attr1_len = mount.getxattr(path, "user.attr1", null);
byte[] out = new byte[(int)attr1_len];
mount.getxattr(path, "user.attr1", out);
String outStr = new String(out);
assertTrue(outStr.compareTo(val1) == 0);
/* get second xattr assuming original length */
out = new byte[buf2.length];
mount.getxattr(path, "user.attr2", out);
outStr = new String(out);
assertTrue(outStr.compareTo(val2) == 0);
/* remove the attributes */
/* FIXME: the MDS returns ENODATA for removexattr */
/*
mount.removexattr(path, "attr1");
xattrs = mount.listxattr(path);
assertTrue(xattrs.length == 1);
mount.removexattr(path, "attr2");
xattrs = mount.listxattr(path);
assertTrue(xattrs.length == 0);
*/
mount.unlink(path);
}
/*
* get/set/list/remove symlink xattr
*
* Currently not working. Code is the same as for regular xattrs, so there
* might be a deeper issue.
*/
@Test
public void test_get_stripe_unit() throws Exception {
String path = makePath();
int fd = createFile(path, 1);
assertTrue(mount.get_file_stripe_unit(fd) > 0);
mount.close(fd);
mount.unlink(path);
}
@Test
public void test_get_repl() throws Exception {
String path = makePath();
int fd = createFile(path, 1);
assertTrue(mount.get_file_replication(fd) > 0);
mount.close(fd);
mount.unlink(path);
}
/*
* stripe unit granularity
*/
@Test
public void test_get_stripe_unit_gran() throws Exception {
assertTrue(mount.get_stripe_unit_granularity() > 0);
}
@Test
public void test_get_pool_id() throws Exception {
String data_pool_name = getRootPoolName();
/* returns valid pool id */
assertTrue(mount.get_pool_id(data_pool_name) >= 0);
/* test non-existent pool name */
try {
mount.get_pool_id("asdlfkjlsejflkjef");
assertTrue(false);
} catch (CephPoolException e) {}
}
@Test
public void test_get_pool_replication() throws Exception {
/* test invalid pool id */
try {
mount.get_pool_replication(-1);
assertTrue(false);
} catch (CephPoolException e) {}
/* test valid pool id */
String data_pool_name = getRootPoolName();
int poolid = mount.get_pool_id(data_pool_name);
assertTrue(poolid >= 0);
assertTrue(mount.get_pool_replication(poolid) > 0);
}
@Test
public void test_get_file_pool_name() throws Exception {
String data_pool_name = getRootPoolName();
String path = makePath();
int fd = createFile(path, 1);
String pool = mount.get_file_pool_name(fd);
mount.close(fd);
assertTrue(pool != null);
/* assumes using default data pool */
assertTrue(pool.compareTo(data_pool_name) == 0);
mount.unlink(path);
}
@Test(expected=IOException.class)
public void test_get_file_pool_name_ebadf() throws Exception {
String pool = mount.get_file_pool_name(-40);
}
@Test
public void test_get_file_extent() throws Exception {
int stripe_unit = 1<<18;
String path = makePath();
int fd = mount.open(path, CephMount.O_WRONLY|CephMount.O_CREAT, 0,
stripe_unit, 2, stripe_unit*2, null);
CephFileExtent e = mount.get_file_extent(fd, 0);
assertTrue(e.getOSDs().length > 0);
assertTrue(e.getOffset() == 0);
assertTrue(e.getLength() == stripe_unit);
e = mount.get_file_extent(fd, stripe_unit/2);
assertTrue(e.getOffset() == stripe_unit/2);
assertTrue(e.getLength() == stripe_unit/2);
e = mount.get_file_extent(fd, 3*stripe_unit/2-1);
assertTrue(e.getOffset() == 3*stripe_unit/2-1);
assertTrue(e.getLength() == stripe_unit/2+1);
e = mount.get_file_extent(fd, 3*stripe_unit/2+1);
assertTrue(e.getLength() == stripe_unit/2-1);
mount.close(fd);
mount.unlink(path);
}
@Test
public void test_get_osd_crush_location() throws Exception {
Bucket[] path = mount.get_osd_crush_location(0);
assertTrue(path.length > 0);
for (Bucket b : path) {
assertTrue(b.getType().length() > 0);
assertTrue(b.getName().length() > 0);
}
}
@Test
public void test_get_osd_address() throws Exception {
InetAddress addr = mount.get_osd_address(0);
assertTrue(addr.getHostAddress().length() > 0);
}
}
| 26,218 | 24.186359 | 92 |
java
|
null |
ceph-main/src/java/test/com/ceph/fs/CephUnmountedTest.java
|
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.ceph.fs;
import org.junit.*;
import static org.junit.Assert.*;
public class CephUnmountedTest {
private CephMount mount;
@Before
public void setup() throws Exception {
mount = new CephMount("admin");
}
@Test(expected=CephNotMountedException.class)
public void test_unmount() throws Exception {
mount.unmount();
}
@Test(expected=CephNotMountedException.class)
public void test_statfs() throws Exception {
CephStatVFS stat = new CephStatVFS();
mount.statfs("/a/path", stat);
}
@Test(expected=CephNotMountedException.class)
public void test_getcwd() throws Exception {
mount.getcwd();
}
@Test(expected=CephNotMountedException.class)
public void test_chdir() throws Exception {
mount.chdir("/a/path");
}
@Test(expected=CephNotMountedException.class)
public void test_listdir() throws Exception {
mount.listdir("/a/path");
}
@Test(expected=CephNotMountedException.class)
public void test_unlink() throws Exception {
mount.unlink("/a/path");
}
@Test(expected=CephNotMountedException.class)
public void test_rename() throws Exception {
mount.rename("/a/path", "/another/path");
}
@Test(expected=CephNotMountedException.class)
public void test_mkdirs() throws Exception {
mount.mkdirs("/a/path", 0);
}
@Test(expected=CephNotMountedException.class)
public void test_rmdir() throws Exception {
mount.rmdir("/a/path");
}
@Test(expected=CephNotMountedException.class)
public void test_stat() throws Exception {
CephStat stat = new CephStat();
mount.stat("/a/path", stat);
}
@Test(expected=CephNotMountedException.class)
public void test_lstat() throws Exception {
CephStat stat = new CephStat();
mount.lstat("/a/path", stat);
}
@Test(expected=CephNotMountedException.class)
public void test_setattr() throws Exception {
CephStat stat = new CephStat();
mount.setattr("/a/path", stat, 0);
}
@Test(expected=CephNotMountedException.class)
public void test_open() throws Exception {
mount.open("/a/path", 0, 0);
}
@Test(expected=CephNotMountedException.class)
public void test_open_layout() throws Exception {
mount.open("/a/path", 0, 0, 0, 0, 0, null);
}
@Test(expected=CephNotMountedException.class)
public void test_close() throws Exception {
mount.close(0);
}
@Test(expected=CephNotMountedException.class)
public void test_lseek() throws Exception {
mount.lseek(0, 0, CephMount.SEEK_CUR);
}
@Test(expected=CephNotMountedException.class)
public void test_read() throws Exception {
byte[] buf = new byte[1];
mount.read(0, buf, 1, 0);
}
@Test(expected=CephNotMountedException.class)
public void test_write() throws Exception {
byte[] buf = new byte[1];
mount.write(0, buf, 1, 0);
}
@Test(expected=CephNotMountedException.class)
public void test_get_stripe_unit() throws Exception {
mount.get_file_stripe_unit(0);
}
@Test(expected=CephNotMountedException.class)
public void test_get_repl() throws Exception {
mount.get_file_replication(0);
}
@Test(expected=CephNotMountedException.class)
public void test_get_stripe_unit_gran() throws Exception {
mount.get_stripe_unit_granularity();
}
@Test(expected=CephNotMountedException.class)
public void test_get_pool_id() throws Exception {
mount.get_pool_id("data");
}
@Test(expected=CephNotMountedException.class)
public void test_get_pool_replication() throws Exception {
mount.get_pool_replication(1);
}
@Test(expected=CephNotMountedException.class)
public void test_fchmod() throws Exception {
mount.fchmod(1, 0);
}
@Test(expected=CephNotMountedException.class)
public void test_chmod() throws Exception {
mount.chmod("/foo", 0);
}
}
| 4,869 | 28.515152 | 78 |
java
|
null |
ceph-main/src/journal/Entry.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/Entry.h"
#include "include/encoding.h"
#include "include/stringify.h"
#include "common/Formatter.h"
#include <strstream>
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "Entry: " << this << " "
namespace journal {
namespace {
const uint32_t HEADER_FIXED_SIZE = 25; /// preamble, version, entry tid, tag id
const uint32_t REMAINDER_FIXED_SIZE = 8; /// data size, crc
} // anonymous namespace
uint32_t Entry::get_fixed_size() {
return HEADER_FIXED_SIZE + REMAINDER_FIXED_SIZE;
}
void Entry::encode(bufferlist &bl) const {
using ceph::encode;
bufferlist data_bl;
encode(preamble, data_bl);
encode(static_cast<uint8_t>(1), data_bl);
encode(m_entry_tid, data_bl);
encode(m_tag_tid, data_bl);
encode(m_data, data_bl);
uint32_t crc = data_bl.crc32c(0);
uint32_t bl_offset = bl.length();
bl.claim_append(data_bl);
encode(crc, bl);
ceph_assert(get_fixed_size() + m_data.length() + bl_offset == bl.length());
}
void Entry::decode(bufferlist::const_iterator &iter) {
using ceph::decode;
uint32_t start_offset = iter.get_off();
uint64_t bl_preamble;
decode(bl_preamble, iter);
if (bl_preamble != preamble) {
throw buffer::malformed_input("incorrect preamble: " +
stringify(bl_preamble));
}
uint8_t version;
decode(version, iter);
if (version != 1) {
throw buffer::malformed_input("unknown version: " + stringify(version));
}
decode(m_entry_tid, iter);
decode(m_tag_tid, iter);
decode(m_data, iter);
uint32_t end_offset = iter.get_off();
uint32_t crc;
decode(crc, iter);
bufferlist data_bl;
data_bl.substr_of(iter.get_bl(), start_offset, end_offset - start_offset);
uint32_t actual_crc = data_bl.crc32c(0);
if (crc != actual_crc) {
throw buffer::malformed_input("crc mismatch: " + stringify(crc) +
" != " + stringify(actual_crc));
}
}
void Entry::dump(Formatter *f) const {
f->dump_unsigned("tag_tid", m_tag_tid);
f->dump_unsigned("entry_tid", m_entry_tid);
std::stringstream data;
m_data.hexdump(data);
f->dump_string("data", data.str());
}
bool Entry::is_readable(bufferlist::const_iterator iter, uint32_t *bytes_needed) {
using ceph::decode;
uint32_t start_off = iter.get_off();
if (iter.get_remaining() < HEADER_FIXED_SIZE) {
bufferlist sub_bl;
sub_bl.substr_of(iter.get_bl(), iter.get_off(), iter.get_remaining());
if (sub_bl.length() > 0 && sub_bl.is_zero()) {
// pad bytes
*bytes_needed = 0;
} else {
*bytes_needed = HEADER_FIXED_SIZE - iter.get_remaining();
}
return false;
}
uint64_t bl_preamble;
decode(bl_preamble, iter);
if (bl_preamble != preamble) {
*bytes_needed = 0;
return false;
}
iter += HEADER_FIXED_SIZE - sizeof(bl_preamble);
if (iter.get_remaining() < sizeof(uint32_t)) {
*bytes_needed = sizeof(uint32_t) - iter.get_remaining();
return false;
}
uint32_t data_size;
decode(data_size, iter);
if (iter.get_remaining() < data_size) {
*bytes_needed = data_size - iter.get_remaining();
return false;
}
iter += data_size;
uint32_t end_off = iter.get_off();
if (iter.get_remaining() < sizeof(uint32_t)) {
*bytes_needed = sizeof(uint32_t) - iter.get_remaining();
return false;
}
bufferlist crc_bl;
crc_bl.substr_of(iter.get_bl(), start_off, end_off - start_off);
*bytes_needed = 0;
uint32_t crc;
decode(crc, iter);
if (crc != crc_bl.crc32c(0)) {
return false;
}
return true;
}
void Entry::generate_test_instances(std::list<Entry *> &o) {
o.push_back(new Entry(1, 123, bufferlist()));
bufferlist bl;
bl.append("data");
o.push_back(new Entry(2, 123, bl));
}
bool Entry::operator==(const Entry& rhs) const {
return (m_tag_tid == rhs.m_tag_tid && m_entry_tid == rhs.m_entry_tid &&
const_cast<bufferlist&>(m_data).contents_equal(
const_cast<bufferlist&>(rhs.m_data)));
}
std::ostream &operator<<(std::ostream &os, const Entry &entry) {
os << "Entry[tag_tid=" << entry.get_tag_tid() << ", "
<< "entry_tid=" << entry.get_entry_tid() << ", "
<< "data size=" << entry.get_data().length() << "]";
return os;
}
} // namespace journal
| 4,346 | 26.16875 | 82 |
cc
|
null |
ceph-main/src/journal/Entry.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_ENTRY_H
#define CEPH_JOURNAL_ENTRY_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/encoding.h"
#include <iosfwd>
#include <string>
namespace ceph {
class Formatter;
}
namespace journal {
class Entry {
public:
Entry() : m_tag_tid(0), m_entry_tid() {}
Entry(uint64_t tag_tid, uint64_t entry_tid, const bufferlist &data)
: m_tag_tid(tag_tid), m_entry_tid(entry_tid), m_data(data)
{
}
static uint32_t get_fixed_size();
inline uint64_t get_tag_tid() const {
return m_tag_tid;
}
inline uint64_t get_entry_tid() const {
return m_entry_tid;
}
inline const bufferlist &get_data() const {
return m_data;
}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &iter);
void dump(ceph::Formatter *f) const;
bool operator==(const Entry& rhs) const;
static bool is_readable(bufferlist::const_iterator iter, uint32_t *bytes_needed);
static void generate_test_instances(std::list<Entry *> &o);
private:
static const uint64_t preamble = 0x3141592653589793;
uint64_t m_tag_tid;
uint64_t m_entry_tid;
bufferlist m_data;
};
std::ostream &operator<<(std::ostream &os, const Entry &entry);
WRITE_CLASS_ENCODER(journal::Entry)
} // namespace journal
#endif // CEPH_JOURNAL_ENTRY_H
| 1,402 | 21.269841 | 83 |
h
|
null |
ceph-main/src/journal/Future.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/Future.h"
#include "journal/FutureImpl.h"
#include "include/ceph_assert.h"
namespace journal {
Future::Future() = default;
Future::Future(const Future& o) = default;
Future& Future::operator=(const Future& o) = default;
Future::Future(Future&& o) = default;
Future& Future::operator=(Future&& o) = default;
Future::Future(ceph::ref_t<FutureImpl> future_impl) : m_future_impl(std::move(future_impl)) {}
Future::~Future() = default;
void Future::flush(Context *on_safe) {
m_future_impl->flush(on_safe);
}
void Future::wait(Context *on_safe) {
ceph_assert(on_safe != NULL);
m_future_impl->wait(on_safe);
}
bool Future::is_complete() const {
return m_future_impl->is_complete();
}
int Future::get_return_value() const {
return m_future_impl->get_return_value();
}
std::ostream &operator<<(std::ostream &os, const Future &future) {
return os << *future.m_future_impl;
}
} // namespace journal
| 1,026 | 24.04878 | 94 |
cc
|
null |
ceph-main/src/journal/Future.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_FUTURE_H
#define CEPH_JOURNAL_FUTURE_H
#include <iosfwd>
#include <string>
#include "include/ceph_assert.h"
#include "include/int_types.h"
#include "common/ref.h"
class Context;
namespace journal {
class FutureImpl;
class Future {
public:
Future();
Future(const Future&);
Future& operator=(const Future&);
Future(Future&&);
Future& operator=(Future&&);
Future(ceph::ref_t<FutureImpl> future_impl);
~Future();
bool is_valid() const {
return bool(m_future_impl);
}
void flush(Context *on_safe);
void wait(Context *on_safe);
bool is_complete() const;
int get_return_value() const;
private:
friend class Journaler;
friend std::ostream& operator<<(std::ostream&, const Future&);
const auto& get_future_impl() const {
return m_future_impl;
}
ceph::ref_t<FutureImpl> m_future_impl;
};
std::ostream &operator<<(std::ostream &os, const Future &future);
} // namespace journal
using journal::operator<<;
#endif // CEPH_JOURNAL_FUTURE_H
| 1,106 | 18.086207 | 70 |
h
|
null |
ceph-main/src/journal/FutureImpl.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/FutureImpl.h"
#include "journal/Utils.h"
namespace journal {
FutureImpl::FutureImpl(uint64_t tag_tid, uint64_t entry_tid,
uint64_t commit_tid)
: m_tag_tid(tag_tid),
m_entry_tid(entry_tid),
m_commit_tid(commit_tid),
m_consistent_ack(this)
{
}
void FutureImpl::init(const ceph::ref_t<FutureImpl> &prev_future) {
// chain ourself to the prior future (if any) to that we known when the
// journal is consistent
if (prev_future) {
m_prev_future = prev_future;
m_prev_future->wait(&m_consistent_ack);
} else {
m_consistent_ack.complete(0);
}
}
void FutureImpl::flush(Context *on_safe) {
bool complete;
FlushHandlers flush_handlers;
ceph::ref_t<FutureImpl> prev_future;
{
std::lock_guard locker{m_lock};
complete = (m_safe && m_consistent);
if (!complete) {
if (on_safe != nullptr) {
m_contexts.push_back(on_safe);
}
prev_future = prepare_flush(&flush_handlers, m_lock);
}
}
// instruct prior futures to flush as well
while (prev_future) {
prev_future = prev_future->prepare_flush(&flush_handlers);
}
if (complete && on_safe != NULL) {
on_safe->complete(m_return_value);
} else if (!flush_handlers.empty()) {
// attached to journal object -- instruct it to flush all entries through
// this one. possible to become detached while lock is released, so flush
// will be re-requested by the object if it doesn't own the future
for (auto &pair : flush_handlers) {
pair.first->flush(pair.second);
}
}
}
ceph::ref_t<FutureImpl> FutureImpl::prepare_flush(FlushHandlers *flush_handlers) {
std::lock_guard locker{m_lock};
return prepare_flush(flush_handlers, m_lock);
}
ceph::ref_t<FutureImpl> FutureImpl::prepare_flush(FlushHandlers *flush_handlers,
ceph::mutex &lock) {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_flush_state == FLUSH_STATE_NONE) {
m_flush_state = FLUSH_STATE_REQUESTED;
auto h = m_flush_handler;
if (h) {
flush_handlers->try_emplace(std::move(h), this);
}
}
return m_prev_future;
}
void FutureImpl::wait(Context *on_safe) {
ceph_assert(on_safe != NULL);
{
std::lock_guard locker{m_lock};
if (!m_safe || !m_consistent) {
m_contexts.push_back(on_safe);
return;
}
}
on_safe->complete(m_return_value);
}
bool FutureImpl::is_complete() const {
std::lock_guard locker{m_lock};
return m_safe && m_consistent;
}
int FutureImpl::get_return_value() const {
std::lock_guard locker{m_lock};
ceph_assert(m_safe && m_consistent);
return m_return_value;
}
bool FutureImpl::attach(FlushHandler::ref flush_handler) {
std::lock_guard locker{m_lock};
ceph_assert(!m_flush_handler);
m_flush_handler = std::move(flush_handler);
return m_flush_state != FLUSH_STATE_NONE;
}
void FutureImpl::safe(int r) {
m_lock.lock();
ceph_assert(!m_safe);
m_safe = true;
if (m_return_value == 0) {
m_return_value = r;
}
m_flush_handler.reset();
if (m_consistent) {
finish_unlock();
} else {
m_lock.unlock();
}
}
void FutureImpl::consistent(int r) {
m_lock.lock();
ceph_assert(!m_consistent);
m_consistent = true;
m_prev_future.reset();
if (m_return_value == 0) {
m_return_value = r;
}
if (m_safe) {
finish_unlock();
} else {
m_lock.unlock();
}
}
void FutureImpl::finish_unlock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_safe && m_consistent);
Contexts contexts;
contexts.swap(m_contexts);
m_lock.unlock();
for (Contexts::iterator it = contexts.begin();
it != contexts.end(); ++it) {
(*it)->complete(m_return_value);
}
}
std::ostream &operator<<(std::ostream &os, const FutureImpl &future) {
os << "Future[tag_tid=" << future.m_tag_tid << ", "
<< "entry_tid=" << future.m_entry_tid << ", "
<< "commit_tid=" << future.m_commit_tid << "]";
return os;
}
} // namespace journal
| 4,087 | 23.333333 | 82 |
cc
|
null |
ceph-main/src/journal/FutureImpl.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_FUTURE_IMPL_H
#define CEPH_JOURNAL_FUTURE_IMPL_H
#include "include/int_types.h"
#include "common/RefCountedObj.h"
#include "include/Context.h"
#include "journal/Future.h"
#include <list>
#include <map>
#include <boost/noncopyable.hpp>
#include "include/ceph_assert.h"
class Context;
namespace journal {
class FutureImpl : public RefCountedObject, boost::noncopyable {
public:
struct FlushHandler {
using ref = std::shared_ptr<FlushHandler>;
virtual void flush(const ceph::ref_t<FutureImpl> &future) = 0;
virtual ~FlushHandler() = default;
};
void init(const ceph::ref_t<FutureImpl> &prev_future);
inline uint64_t get_tag_tid() const {
return m_tag_tid;
}
inline uint64_t get_entry_tid() const {
return m_entry_tid;
}
inline uint64_t get_commit_tid() const {
return m_commit_tid;
}
void flush(Context *on_safe = NULL);
void wait(Context *on_safe);
bool is_complete() const;
int get_return_value() const;
inline bool is_flush_in_progress() const {
std::lock_guard locker{m_lock};
return (m_flush_state == FLUSH_STATE_IN_PROGRESS);
}
inline void set_flush_in_progress() {
auto h = std::move(m_flush_handler);
ceph_assert(h);
std::lock_guard locker{m_lock};
m_flush_state = FLUSH_STATE_IN_PROGRESS;
}
bool attach(FlushHandler::ref flush_handler);
inline void detach() {
m_flush_handler.reset();
}
inline FlushHandler::ref get_flush_handler() const {
return m_flush_handler;
}
void safe(int r);
private:
friend std::ostream &operator<<(std::ostream &, const FutureImpl &);
typedef std::map<FlushHandler::ref, ceph::ref_t<FutureImpl>> FlushHandlers;
typedef std::list<Context *> Contexts;
enum FlushState {
FLUSH_STATE_NONE,
FLUSH_STATE_REQUESTED,
FLUSH_STATE_IN_PROGRESS
};
struct C_ConsistentAck : public Context {
ceph::ref_t<FutureImpl> future;
C_ConsistentAck(ceph::ref_t<FutureImpl> _future) : future(std::move(_future)) {}
void complete(int r) override {
future->consistent(r);
future.reset();
}
void finish(int r) override {}
};
FRIEND_MAKE_REF(FutureImpl);
FutureImpl(uint64_t tag_tid, uint64_t entry_tid, uint64_t commit_tid);
~FutureImpl() override = default;
uint64_t m_tag_tid;
uint64_t m_entry_tid;
uint64_t m_commit_tid;
mutable ceph::mutex m_lock = ceph::make_mutex("FutureImpl::m_lock", false);
ceph::ref_t<FutureImpl> m_prev_future;
bool m_safe = false;
bool m_consistent = false;
int m_return_value = 0;
FlushHandler::ref m_flush_handler;
FlushState m_flush_state = FLUSH_STATE_NONE;
C_ConsistentAck m_consistent_ack;
Contexts m_contexts;
ceph::ref_t<FutureImpl> prepare_flush(FlushHandlers *flush_handlers);
ceph::ref_t<FutureImpl> prepare_flush(FlushHandlers *flush_handlers, ceph::mutex &lock);
void consistent(int r);
void finish_unlock();
};
std::ostream &operator<<(std::ostream &os, const FutureImpl &future);
} // namespace journal
using journal::operator<<;
#endif // CEPH_JOURNAL_FUTURE_IMPL_H
| 3,161 | 24.707317 | 90 |
h
|
null |
ceph-main/src/journal/JournalMetadata.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalMetadata.h"
#include "journal/Utils.h"
#include "common/errno.h"
#include "common/Timer.h"
#include "cls/journal/cls_journal_client.h"
#include <functional>
#include <set>
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "JournalMetadata: " << this << " "
namespace journal {
using namespace cls::journal;
namespace {
struct C_GetClient : public Context {
CephContext *cct;
librados::IoCtx &ioctx;
const std::string &oid;
AsyncOpTracker &async_op_tracker;
std::string client_id;
cls::journal::Client *client;
Context *on_finish;
bufferlist out_bl;
C_GetClient(CephContext *cct, librados::IoCtx &ioctx, const std::string &oid,
AsyncOpTracker &async_op_tracker, const std::string &client_id,
cls::journal::Client *client, Context *on_finish)
: cct(cct), ioctx(ioctx), oid(oid), async_op_tracker(async_op_tracker),
client_id(client_id), client(client), on_finish(on_finish) {
async_op_tracker.start_op();
}
~C_GetClient() override {
async_op_tracker.finish_op();
}
virtual void send() {
send_get_client();
}
void send_get_client() {
ldout(cct, 20) << "C_GetClient: " << __func__ << dendl;
librados::ObjectReadOperation op;
client::get_client_start(&op, client_id);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_GetClient, &C_GetClient::handle_get_client>);
int r = ioctx.aio_operate(oid, comp, &op, &out_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_get_client(int r) {
ldout(cct, 20) << "C_GetClient: " << __func__ << ": r=" << r << dendl;
if (r == 0) {
auto it = out_bl.cbegin();
r = client::get_client_finish(&it, client);
}
complete(r);
}
void finish(int r) override {
on_finish->complete(r);
}
};
struct C_AllocateTag : public Context {
CephContext *cct;
librados::IoCtx &ioctx;
const std::string &oid;
AsyncOpTracker &async_op_tracker;
uint64_t tag_class;
Tag *tag;
Context *on_finish;
bufferlist out_bl;
C_AllocateTag(CephContext *cct, librados::IoCtx &ioctx,
const std::string &oid, AsyncOpTracker &async_op_tracker,
uint64_t tag_class, const bufferlist &data, Tag *tag,
Context *on_finish)
: cct(cct), ioctx(ioctx), oid(oid), async_op_tracker(async_op_tracker),
tag_class(tag_class), tag(tag), on_finish(on_finish) {
async_op_tracker.start_op();
tag->data = data;
}
~C_AllocateTag() override {
async_op_tracker.finish_op();
}
void send() {
send_get_next_tag_tid();
}
void send_get_next_tag_tid() {
ldout(cct, 20) << "C_AllocateTag: " << __func__ << dendl;
librados::ObjectReadOperation op;
client::get_next_tag_tid_start(&op);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_AllocateTag, &C_AllocateTag::handle_get_next_tag_tid>);
out_bl.clear();
int r = ioctx.aio_operate(oid, comp, &op, &out_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_get_next_tag_tid(int r) {
ldout(cct, 20) << "C_AllocateTag: " << __func__ << ": r=" << r << dendl;
if (r == 0) {
auto iter = out_bl.cbegin();
r = client::get_next_tag_tid_finish(&iter, &tag->tid);
}
if (r < 0) {
complete(r);
return;
}
send_tag_create();
}
void send_tag_create() {
ldout(cct, 20) << "C_AllocateTag: " << __func__ << dendl;
librados::ObjectWriteOperation op;
client::tag_create(&op, tag->tid, tag_class, tag->data);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_AllocateTag, &C_AllocateTag::handle_tag_create>);
int r = ioctx.aio_operate(oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void handle_tag_create(int r) {
ldout(cct, 20) << "C_AllocateTag: " << __func__ << ": r=" << r << dendl;
if (r == -ESTALE) {
send_get_next_tag_tid();
return;
} else if (r < 0) {
complete(r);
return;
}
send_get_tag();
}
void send_get_tag() {
ldout(cct, 20) << "C_AllocateTag: " << __func__ << dendl;
librados::ObjectReadOperation op;
client::get_tag_start(&op, tag->tid);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_AllocateTag, &C_AllocateTag::handle_get_tag>);
out_bl.clear();
int r = ioctx.aio_operate(oid, comp, &op, &out_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_get_tag(int r) {
ldout(cct, 20) << "C_AllocateTag: " << __func__ << ": r=" << r << dendl;
if (r == 0) {
auto iter = out_bl.cbegin();
cls::journal::Tag journal_tag;
r = client::get_tag_finish(&iter, &journal_tag);
if (r == 0) {
*tag = journal_tag;
}
}
complete(r);
}
void finish(int r) override {
on_finish->complete(r);
}
};
struct C_GetTag : public Context {
CephContext *cct;
librados::IoCtx &ioctx;
const std::string &oid;
AsyncOpTracker &async_op_tracker;
uint64_t tag_tid;
JournalMetadata::Tag *tag;
Context *on_finish;
bufferlist out_bl;
C_GetTag(CephContext *cct, librados::IoCtx &ioctx, const std::string &oid,
AsyncOpTracker &async_op_tracker, uint64_t tag_tid,
JournalMetadata::Tag *tag, Context *on_finish)
: cct(cct), ioctx(ioctx), oid(oid), async_op_tracker(async_op_tracker),
tag_tid(tag_tid), tag(tag), on_finish(on_finish) {
async_op_tracker.start_op();
}
~C_GetTag() override {
async_op_tracker.finish_op();
}
void send() {
send_get_tag();
}
void send_get_tag() {
librados::ObjectReadOperation op;
client::get_tag_start(&op, tag_tid);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_GetTag, &C_GetTag::handle_get_tag>);
int r = ioctx.aio_operate(oid, comp, &op, &out_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_get_tag(int r) {
if (r == 0) {
auto iter = out_bl.cbegin();
r = client::get_tag_finish(&iter, tag);
}
complete(r);
}
void finish(int r) override {
on_finish->complete(r);
}
};
struct C_GetTags : public Context {
CephContext *cct;
librados::IoCtx &ioctx;
const std::string &oid;
const std::string &client_id;
AsyncOpTracker &async_op_tracker;
uint64_t start_after_tag_tid;
boost::optional<uint64_t> tag_class;
JournalMetadata::Tags *tags;
Context *on_finish;
const uint64_t MAX_RETURN = 64;
bufferlist out_bl;
C_GetTags(CephContext *cct, librados::IoCtx &ioctx, const std::string &oid,
const std::string &client_id, AsyncOpTracker &async_op_tracker,
uint64_t start_after_tag_tid,
const boost::optional<uint64_t> &tag_class,
JournalMetadata::Tags *tags, Context *on_finish)
: cct(cct), ioctx(ioctx), oid(oid), client_id(client_id),
async_op_tracker(async_op_tracker),
start_after_tag_tid(start_after_tag_tid), tag_class(tag_class),
tags(tags), on_finish(on_finish) {
async_op_tracker.start_op();
}
~C_GetTags() override {
async_op_tracker.finish_op();
}
void send() {
send_tag_list();
}
void send_tag_list() {
librados::ObjectReadOperation op;
client::tag_list_start(&op, start_after_tag_tid, MAX_RETURN, client_id,
tag_class);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_GetTags, &C_GetTags::handle_tag_list>);
out_bl.clear();
int r = ioctx.aio_operate(oid, comp, &op, &out_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_tag_list(int r) {
if (r == 0) {
std::set<cls::journal::Tag> journal_tags;
auto iter = out_bl.cbegin();
r = client::tag_list_finish(&iter, &journal_tags);
if (r == 0) {
for (auto &journal_tag : journal_tags) {
tags->push_back(journal_tag);
start_after_tag_tid = journal_tag.tid;
}
if (journal_tags.size() == MAX_RETURN) {
send_tag_list();
return;
}
}
}
complete(r);
}
void finish(int r) override {
on_finish->complete(r);
}
};
struct C_FlushCommitPosition : public Context {
Context *commit_position_ctx;
Context *on_finish;
C_FlushCommitPosition(Context *commit_position_ctx, Context *on_finish)
: commit_position_ctx(commit_position_ctx), on_finish(on_finish) {
}
void finish(int r) override {
if (commit_position_ctx != nullptr) {
commit_position_ctx->complete(r);
}
on_finish->complete(r);
}
};
struct C_AssertActiveTag : public Context {
CephContext *cct;
librados::IoCtx &ioctx;
const std::string &oid;
AsyncOpTracker &async_op_tracker;
std::string client_id;
uint64_t tag_tid;
Context *on_finish;
bufferlist out_bl;
C_AssertActiveTag(CephContext *cct, librados::IoCtx &ioctx,
const std::string &oid, AsyncOpTracker &async_op_tracker,
const std::string &client_id, uint64_t tag_tid,
Context *on_finish)
: cct(cct), ioctx(ioctx), oid(oid), async_op_tracker(async_op_tracker),
client_id(client_id), tag_tid(tag_tid), on_finish(on_finish) {
async_op_tracker.start_op();
}
~C_AssertActiveTag() override {
async_op_tracker.finish_op();
}
void send() {
ldout(cct, 20) << "C_AssertActiveTag: " << __func__ << dendl;
librados::ObjectReadOperation op;
client::tag_list_start(&op, tag_tid, 2, client_id, boost::none);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_AssertActiveTag, &C_AssertActiveTag::handle_send>);
int r = ioctx.aio_operate(oid, comp, &op, &out_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_send(int r) {
ldout(cct, 20) << "C_AssertActiveTag: " << __func__ << ": r=" << r << dendl;
std::set<cls::journal::Tag> tags;
if (r == 0) {
auto it = out_bl.cbegin();
r = client::tag_list_finish(&it, &tags);
}
// NOTE: since 0 is treated as an uninitialized list filter, we need to
// load to entries and look at the last tid
if (r == 0 && !tags.empty() && tags.rbegin()->tid > tag_tid) {
r = -ESTALE;
}
complete(r);
}
void finish(int r) override {
on_finish->complete(r);
}
};
} // anonymous namespace
JournalMetadata::JournalMetadata(ContextWQ *work_queue, SafeTimer *timer,
ceph::mutex *timer_lock, librados::IoCtx &ioctx,
const std::string &oid,
const std::string &client_id,
const Settings &settings)
: m_oid(oid),
m_client_id(client_id), m_settings(settings),
m_work_queue(work_queue), m_timer(timer), m_timer_lock(timer_lock),
m_watch_ctx(this)
{
m_ioctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext*>(m_ioctx.cct());
}
JournalMetadata::~JournalMetadata() {
std::lock_guard locker{m_lock};
ceph_assert(!m_initialized);
}
void JournalMetadata::init(Context *on_finish) {
{
std::lock_guard locker{m_lock};
ceph_assert(!m_initialized);
m_initialized = true;
}
// chain the init sequence (reverse order)
on_finish = utils::create_async_context_callback(
this, on_finish);
on_finish = new C_ImmutableMetadata(this, on_finish);
on_finish = new LambdaContext([this, on_finish](int r) {
if (r < 0) {
lderr(m_cct) << __func__ << ": failed to watch journal"
<< cpp_strerror(r) << dendl;
std::lock_guard locker{m_lock};
m_watch_handle = 0;
on_finish->complete(r);
return;
}
get_immutable_metadata(&m_order, &m_splay_width, &m_pool_id, on_finish);
});
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
on_finish, utils::rados_ctx_callback);
int r = m_ioctx.aio_watch(m_oid, comp, &m_watch_handle, &m_watch_ctx);
ceph_assert(r == 0);
comp->release();
}
void JournalMetadata::shut_down(Context *on_finish) {
ldout(m_cct, 20) << __func__ << dendl;
uint64_t watch_handle = 0;
{
std::lock_guard locker{m_lock};
m_initialized = false;
std::swap(watch_handle, m_watch_handle);
}
// chain the shut down sequence (reverse order)
on_finish = utils::create_async_context_callback(
this, on_finish);
on_finish = new LambdaContext([this, on_finish](int r) {
ldout(m_cct, 20) << "shut_down: waiting for ops" << dendl;
m_async_op_tracker.wait_for_ops(on_finish);
});
on_finish = new LambdaContext([this, on_finish](int r) {
ldout(m_cct, 20) << "shut_down: flushing watch" << dendl;
librados::Rados rados(m_ioctx);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
on_finish, utils::rados_ctx_callback);
r = rados.aio_watch_flush(comp);
ceph_assert(r == 0);
comp->release();
});
on_finish = new LambdaContext([this, on_finish](int r) {
flush_commit_position(on_finish);
});
if (watch_handle != 0) {
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
on_finish, utils::rados_ctx_callback);
int r = m_ioctx.aio_unwatch(watch_handle, comp);
ceph_assert(r == 0);
comp->release();
} else {
on_finish->complete(0);
}
}
void JournalMetadata::get_immutable_metadata(uint8_t *order,
uint8_t *splay_width,
int64_t *pool_id,
Context *on_finish) {
client::get_immutable_metadata(m_ioctx, m_oid, order, splay_width, pool_id,
on_finish);
}
void JournalMetadata::get_mutable_metadata(uint64_t *minimum_set,
uint64_t *active_set,
RegisteredClients *clients,
Context *on_finish) {
client::get_mutable_metadata(m_ioctx, m_oid, minimum_set, active_set, clients,
on_finish);
}
void JournalMetadata::register_client(const bufferlist &data,
Context *on_finish) {
ldout(m_cct, 10) << __func__ << ": " << m_client_id << dendl;
librados::ObjectWriteOperation op;
client::client_register(&op, m_client_id, data);
C_NotifyUpdate *ctx = new C_NotifyUpdate(this, on_finish);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(ctx,
utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void JournalMetadata::update_client(const bufferlist &data,
Context *on_finish) {
ldout(m_cct, 10) << __func__ << ": " << m_client_id << dendl;
librados::ObjectWriteOperation op;
client::client_update_data(&op, m_client_id, data);
C_NotifyUpdate *ctx = new C_NotifyUpdate(this, on_finish);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void JournalMetadata::unregister_client(Context *on_finish) {
ceph_assert(!m_client_id.empty());
ldout(m_cct, 10) << __func__ << ": " << m_client_id << dendl;
librados::ObjectWriteOperation op;
client::client_unregister(&op, m_client_id);
C_NotifyUpdate *ctx = new C_NotifyUpdate(this, on_finish);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void JournalMetadata::allocate_tag(uint64_t tag_class, const bufferlist &data,
Tag *tag, Context *on_finish) {
on_finish = new C_NotifyUpdate(this, on_finish);
C_AllocateTag *ctx = new C_AllocateTag(m_cct, m_ioctx, m_oid,
m_async_op_tracker, tag_class,
data, tag, on_finish);
ctx->send();
}
void JournalMetadata::get_client(const std::string &client_id,
cls::journal::Client *client,
Context *on_finish) {
C_GetClient *ctx = new C_GetClient(m_cct, m_ioctx, m_oid, m_async_op_tracker,
client_id, client, on_finish);
ctx->send();
}
void JournalMetadata::get_tag(uint64_t tag_tid, Tag *tag, Context *on_finish) {
C_GetTag *ctx = new C_GetTag(m_cct, m_ioctx, m_oid, m_async_op_tracker,
tag_tid, tag, on_finish);
ctx->send();
}
void JournalMetadata::get_tags(uint64_t start_after_tag_tid,
const boost::optional<uint64_t> &tag_class,
Tags *tags, Context *on_finish) {
C_GetTags *ctx = new C_GetTags(m_cct, m_ioctx, m_oid, m_client_id,
m_async_op_tracker, start_after_tag_tid,
tag_class, tags, on_finish);
ctx->send();
}
void JournalMetadata::add_listener(JournalMetadataListener *listener) {
std::unique_lock locker{m_lock};
m_update_cond.wait(locker, [this] {
return m_update_notifications <= 0;
});
m_listeners.push_back(listener);
}
void JournalMetadata::remove_listener(JournalMetadataListener *listener) {
std::unique_lock locker{m_lock};
m_update_cond.wait(locker, [this] {
return m_update_notifications <= 0;
});
m_listeners.remove(listener);
}
void JournalMetadata::set_minimum_set(uint64_t object_set) {
std::lock_guard locker{m_lock};
ldout(m_cct, 20) << __func__ << ": current=" << m_minimum_set
<< ", new=" << object_set << dendl;
if (m_minimum_set >= object_set) {
return;
}
librados::ObjectWriteOperation op;
client::set_minimum_set(&op, object_set);
C_NotifyUpdate *ctx = new C_NotifyUpdate(this);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
m_minimum_set = object_set;
}
int JournalMetadata::set_active_set(uint64_t object_set) {
C_SaferCond ctx;
set_active_set(object_set, &ctx);
return ctx.wait();
}
void JournalMetadata::set_active_set(uint64_t object_set, Context *on_finish) {
std::lock_guard locker{m_lock};
ldout(m_cct, 20) << __func__ << ": current=" << m_active_set
<< ", new=" << object_set << dendl;
if (m_active_set >= object_set) {
m_work_queue->queue(on_finish, 0);
return;
}
librados::ObjectWriteOperation op;
client::set_active_set(&op, object_set);
C_NotifyUpdate *ctx = new C_NotifyUpdate(this, on_finish);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
m_active_set = object_set;
}
void JournalMetadata::assert_active_tag(uint64_t tag_tid, Context *on_finish) {
std::lock_guard locker{m_lock};
C_AssertActiveTag *ctx = new C_AssertActiveTag(m_cct, m_ioctx, m_oid,
m_async_op_tracker,
m_client_id, tag_tid,
on_finish);
ctx->send();
}
void JournalMetadata::flush_commit_position() {
ldout(m_cct, 20) << __func__ << dendl;
C_SaferCond ctx;
flush_commit_position(&ctx);
ctx.wait();
}
void JournalMetadata::flush_commit_position(Context *on_safe) {
ldout(m_cct, 20) << __func__ << dendl;
std::scoped_lock locker{*m_timer_lock, m_lock};
if (m_commit_position_ctx == nullptr && m_flush_commits_in_progress == 0) {
// nothing to flush
if (on_safe != nullptr) {
m_work_queue->queue(on_safe, 0);
}
return;
}
if (on_safe != nullptr) {
m_flush_commit_position_ctxs.push_back(on_safe);
}
if (m_commit_position_ctx == nullptr) {
return;
}
cancel_commit_task();
handle_commit_position_task();
}
void JournalMetadata::reserve_entry_tid(uint64_t tag_tid, uint64_t entry_tid) {
std::lock_guard locker{m_lock};
uint64_t &allocated_entry_tid = m_allocated_entry_tids[tag_tid];
if (allocated_entry_tid <= entry_tid) {
allocated_entry_tid = entry_tid + 1;
}
}
bool JournalMetadata::get_last_allocated_entry_tid(uint64_t tag_tid,
uint64_t *entry_tid) const {
std::lock_guard locker{m_lock};
AllocatedEntryTids::const_iterator it = m_allocated_entry_tids.find(tag_tid);
if (it == m_allocated_entry_tids.end()) {
return false;
}
ceph_assert(it->second > 0);
*entry_tid = it->second - 1;
return true;
}
void JournalMetadata::handle_immutable_metadata(int r, Context *on_init) {
if (r < 0) {
lderr(m_cct) << "failed to initialize immutable metadata: "
<< cpp_strerror(r) << dendl;
on_init->complete(r);
return;
}
ldout(m_cct, 10) << "initialized immutable metadata" << dendl;
refresh(on_init);
}
void JournalMetadata::refresh(Context *on_complete) {
ldout(m_cct, 10) << "refreshing mutable metadata" << dendl;
{
std::lock_guard locker{m_lock};
if (on_complete != nullptr) {
m_refresh_ctxs.push_back(on_complete);
}
++m_refreshes_in_progress;
}
auto refresh = new C_Refresh(this);
get_mutable_metadata(&refresh->minimum_set, &refresh->active_set,
&refresh->registered_clients, refresh);
}
void JournalMetadata::handle_refresh_complete(C_Refresh *refresh, int r) {
ldout(m_cct, 10) << "refreshed mutable metadata: r=" << r << dendl;
m_lock.lock();
if (r == 0) {
Client client(m_client_id, bufferlist());
RegisteredClients::iterator it = refresh->registered_clients.find(client);
if (it != refresh->registered_clients.end()) {
if (it->state == cls::journal::CLIENT_STATE_DISCONNECTED) {
ldout(m_cct, 0) << "client flagged disconnected: " << m_client_id
<< dendl;
}
m_minimum_set = std::max(m_minimum_set, refresh->minimum_set);
m_active_set = std::max(m_active_set, refresh->active_set);
m_registered_clients = refresh->registered_clients;
m_client = *it;
++m_update_notifications;
m_lock.unlock();
for (Listeners::iterator it = m_listeners.begin();
it != m_listeners.end(); ++it) {
(*it)->handle_update(this);
}
m_lock.lock();
if (--m_update_notifications == 0) {
m_update_cond.notify_all();
}
} else {
lderr(m_cct) << "failed to locate client: " << m_client_id << dendl;
r = -ENOENT;
}
}
Contexts refresh_ctxs;
ceph_assert(m_refreshes_in_progress > 0);
--m_refreshes_in_progress;
if (m_refreshes_in_progress == 0) {
std::swap(refresh_ctxs, m_refresh_ctxs);
}
m_lock.unlock();
for (auto ctx : refresh_ctxs) {
ctx->complete(r);
}
}
void JournalMetadata::cancel_commit_task() {
ldout(m_cct, 20) << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_commit_position_ctx != nullptr);
ceph_assert(m_commit_position_task_ctx != nullptr);
m_timer->cancel_event(m_commit_position_task_ctx);
m_commit_position_task_ctx = NULL;
}
void JournalMetadata::schedule_commit_task() {
ldout(m_cct, 20) << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_commit_position_ctx != nullptr);
if (m_commit_position_task_ctx == nullptr) {
m_commit_position_task_ctx =
m_timer->add_event_after(m_settings.commit_interval,
new C_CommitPositionTask(this));
}
}
void JournalMetadata::handle_commit_position_task() {
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
ceph_assert(ceph_mutex_is_locked(m_lock));
ldout(m_cct, 20) << __func__ << ": "
<< "client_id=" << m_client_id << ", "
<< "commit_position=" << m_commit_position << dendl;
m_commit_position_task_ctx = nullptr;
Context* commit_position_ctx = nullptr;
std::swap(commit_position_ctx, m_commit_position_ctx);
m_async_op_tracker.start_op();
++m_flush_commits_in_progress;
Context* ctx = new LambdaContext([this, commit_position_ctx](int r) {
Contexts flush_commit_position_ctxs;
m_lock.lock();
ceph_assert(m_flush_commits_in_progress > 0);
--m_flush_commits_in_progress;
if (m_flush_commits_in_progress == 0) {
std::swap(flush_commit_position_ctxs, m_flush_commit_position_ctxs);
}
m_lock.unlock();
commit_position_ctx->complete(0);
for (auto ctx : flush_commit_position_ctxs) {
ctx->complete(0);
}
m_async_op_tracker.finish_op();
});
ctx = new C_NotifyUpdate(this, ctx);
ctx = new LambdaContext([this, ctx](int r) {
// manually kick of a refresh in case the notification is missed
// and ignore the next notification that we are about to send
m_lock.lock();
++m_ignore_watch_notifies;
m_lock.unlock();
refresh(ctx);
});
ctx = new LambdaContext([this, ctx](int r) {
schedule_laggy_clients_disconnect(ctx);
});
librados::ObjectWriteOperation op;
client::client_commit(&op, m_client_id, m_commit_position);
auto comp = librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void JournalMetadata::schedule_watch_reset() {
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
m_timer->add_event_after(1, new C_WatchReset(this));
}
void JournalMetadata::handle_watch_reset() {
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
if (!m_initialized) {
return;
}
int r = m_ioctx.watch2(m_oid, &m_watch_handle, &m_watch_ctx);
if (r < 0) {
if (r == -ENOENT) {
ldout(m_cct, 5) << __func__ << ": journal header not found" << dendl;
} else if (r == -EBLOCKLISTED) {
ldout(m_cct, 5) << __func__ << ": client blocklisted" << dendl;
} else {
lderr(m_cct) << __func__ << ": failed to watch journal: "
<< cpp_strerror(r) << dendl;
}
schedule_watch_reset();
} else {
ldout(m_cct, 10) << __func__ << ": reset journal watch" << dendl;
refresh(NULL);
}
}
void JournalMetadata::handle_watch_notify(uint64_t notify_id, uint64_t cookie) {
ldout(m_cct, 10) << "journal header updated" << dendl;
bufferlist bl;
m_ioctx.notify_ack(m_oid, notify_id, cookie, bl);
{
std::lock_guard locker{m_lock};
if (m_ignore_watch_notifies > 0) {
--m_ignore_watch_notifies;
return;
}
}
refresh(NULL);
}
void JournalMetadata::handle_watch_error(int err) {
if (err == -ENOTCONN) {
ldout(m_cct, 5) << "journal watch error: header removed" << dendl;
} else if (err == -EBLOCKLISTED) {
lderr(m_cct) << "journal watch error: client blocklisted" << dendl;
} else {
lderr(m_cct) << "journal watch error: " << cpp_strerror(err) << dendl;
}
std::scoped_lock locker{*m_timer_lock, m_lock};
// release old watch on error
if (m_watch_handle != 0) {
m_ioctx.unwatch2(m_watch_handle);
m_watch_handle = 0;
}
if (m_initialized && err != -ENOENT) {
schedule_watch_reset();
}
}
uint64_t JournalMetadata::allocate_commit_tid(uint64_t object_num,
uint64_t tag_tid,
uint64_t entry_tid) {
std::lock_guard locker{m_lock};
uint64_t commit_tid = ++m_commit_tid;
m_pending_commit_tids[commit_tid] = CommitEntry(object_num, tag_tid,
entry_tid);
ldout(m_cct, 20) << "allocated commit tid: commit_tid=" << commit_tid << " ["
<< "object_num=" << object_num << ", "
<< "tag_tid=" << tag_tid << ", "
<< "entry_tid=" << entry_tid << "]"
<< dendl;
return commit_tid;
}
void JournalMetadata::overflow_commit_tid(uint64_t commit_tid,
uint64_t object_num) {
std::lock_guard locker{m_lock};
auto it = m_pending_commit_tids.find(commit_tid);
ceph_assert(it != m_pending_commit_tids.end());
ceph_assert(it->second.object_num < object_num);
ldout(m_cct, 20) << __func__ << ": "
<< "commit_tid=" << commit_tid << ", "
<< "old_object_num=" << it->second.object_num << ", "
<< "new_object_num=" << object_num << dendl;
it->second.object_num = object_num;
}
void JournalMetadata::get_commit_entry(uint64_t commit_tid,
uint64_t *object_num,
uint64_t *tag_tid, uint64_t *entry_tid) {
std::lock_guard locker{m_lock};
auto it = m_pending_commit_tids.find(commit_tid);
ceph_assert(it != m_pending_commit_tids.end());
*object_num = it->second.object_num;
*tag_tid = it->second.tag_tid;
*entry_tid = it->second.entry_tid;
}
void JournalMetadata::committed(uint64_t commit_tid,
const CreateContext &create_context) {
ldout(m_cct, 20) << "committed tid=" << commit_tid << dendl;
ObjectSetPosition commit_position;
Context *stale_ctx = nullptr;
{
std::scoped_lock locker{*m_timer_lock, m_lock};
ceph_assert(commit_tid > m_commit_position_tid);
if (!m_commit_position.object_positions.empty()) {
// in-flight commit position update
commit_position = m_commit_position;
} else {
// safe commit position
commit_position = m_client.commit_position;
}
CommitTids::iterator it = m_pending_commit_tids.find(commit_tid);
ceph_assert(it != m_pending_commit_tids.end());
CommitEntry &commit_entry = it->second;
commit_entry.committed = true;
bool update_commit_position = false;
while (!m_pending_commit_tids.empty()) {
CommitTids::iterator it = m_pending_commit_tids.begin();
CommitEntry &commit_entry = it->second;
if (!commit_entry.committed) {
break;
}
commit_position.object_positions.emplace_front(
commit_entry.object_num, commit_entry.tag_tid,
commit_entry.entry_tid);
m_pending_commit_tids.erase(it);
update_commit_position = true;
}
if (!update_commit_position) {
return;
}
// prune the position to have one position per splay offset
std::set<uint8_t> in_use_splay_offsets;
ObjectPositions::iterator ob_it = commit_position.object_positions.begin();
while (ob_it != commit_position.object_positions.end()) {
uint8_t splay_offset = ob_it->object_number % m_splay_width;
if (!in_use_splay_offsets.insert(splay_offset).second) {
ob_it = commit_position.object_positions.erase(ob_it);
} else {
++ob_it;
}
}
stale_ctx = m_commit_position_ctx;
m_commit_position_ctx = create_context();
m_commit_position = commit_position;
m_commit_position_tid = commit_tid;
ldout(m_cct, 20) << "updated commit position: " << commit_position << ", "
<< "on_safe=" << m_commit_position_ctx << dendl;
schedule_commit_task();
}
if (stale_ctx != nullptr) {
ldout(m_cct, 20) << "canceling stale commit: on_safe=" << stale_ctx
<< dendl;
stale_ctx->complete(-ESTALE);
}
}
void JournalMetadata::notify_update() {
ldout(m_cct, 10) << "notifying journal header update" << dendl;
bufferlist bl;
m_ioctx.notify2(m_oid, bl, 5000, NULL);
}
void JournalMetadata::async_notify_update(Context *on_safe) {
ldout(m_cct, 10) << "async notifying journal header update" << dendl;
C_AioNotify *ctx = new C_AioNotify(this, on_safe);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
bufferlist bl;
int r = m_ioctx.aio_notify(m_oid, comp, bl, 5000, NULL);
ceph_assert(r == 0);
comp->release();
}
void JournalMetadata::wait_for_ops() {
C_SaferCond ctx;
m_async_op_tracker.wait_for_ops(&ctx);
ctx.wait();
}
void JournalMetadata::handle_notified(int r) {
ldout(m_cct, 10) << "notified journal header update: r=" << r << dendl;
}
void JournalMetadata::schedule_laggy_clients_disconnect(Context *on_finish) {
ldout(m_cct, 20) << __func__ << dendl;
if (m_settings.max_concurrent_object_sets <= 0) {
on_finish->complete(0);
return;
}
Context *ctx = on_finish;
{
std::lock_guard locker{m_lock};
for (auto &c : m_registered_clients) {
if (c.state == cls::journal::CLIENT_STATE_DISCONNECTED ||
c.id == m_client_id ||
m_settings.ignored_laggy_clients.count(c.id) > 0) {
continue;
}
const std::string &client_id = c.id;
uint64_t object_set = 0;
if (!c.commit_position.object_positions.empty()) {
auto &position = *(c.commit_position.object_positions.begin());
object_set = position.object_number / m_splay_width;
}
if (m_active_set > object_set + m_settings.max_concurrent_object_sets) {
ldout(m_cct, 1) << __func__ << ": " << client_id
<< ": scheduling disconnect" << dendl;
ctx = new LambdaContext([this, client_id, ctx](int r1) {
ldout(m_cct, 10) << __func__ << ": " << client_id
<< ": flagging disconnected" << dendl;
librados::ObjectWriteOperation op;
client::client_update_state(
&op, client_id, cls::journal::CLIENT_STATE_DISCONNECTED);
auto comp = librados::Rados::aio_create_completion(
ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
});
}
}
}
if (ctx == on_finish) {
ldout(m_cct, 20) << __func__ << ": no laggy clients to disconnect" << dendl;
}
ctx->complete(0);
}
std::ostream &operator<<(std::ostream &os,
const JournalMetadata::RegisteredClients &clients) {
os << "[";
for (JournalMetadata::RegisteredClients::const_iterator c = clients.begin();
c != clients.end(); ++c) {
os << (c == clients.begin() ? "" : ", " ) << *c;
}
os << "]";
return os;
}
std::ostream &operator<<(std::ostream &os,
const JournalMetadata &jm) {
std::lock_guard locker{jm.m_lock};
os << "[oid=" << jm.m_oid << ", "
<< "initialized=" << jm.m_initialized << ", "
<< "order=" << (int)jm.m_order << ", "
<< "splay_width=" << (int)jm.m_splay_width << ", "
<< "pool_id=" << jm.m_pool_id << ", "
<< "minimum_set=" << jm.m_minimum_set << ", "
<< "active_set=" << jm.m_active_set << ", "
<< "client_id=" << jm.m_client_id << ", "
<< "commit_tid=" << jm.m_commit_tid << ", "
<< "commit_interval=" << jm.m_settings.commit_interval << ", "
<< "commit_position=" << jm.m_commit_position << ", "
<< "registered_clients=" << jm.m_registered_clients << "]";
return os;
}
} // namespace journal
| 35,478 | 29.427959 | 137 |
cc
|
null |
ceph-main/src/journal/JournalMetadata.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_JOURNAL_METADATA_H
#define CEPH_JOURNAL_JOURNAL_METADATA_H
#include "include/int_types.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
#include "common/Cond.h"
#include "common/Timer.h"
#include "common/ceph_mutex.h"
#include "common/RefCountedObj.h"
#include "common/WorkQueue.h"
#include "cls/journal/cls_journal_types.h"
#include "journal/JournalMetadataListener.h"
#include "journal/Settings.h"
#include <boost/noncopyable.hpp>
#include <boost/optional.hpp>
#include <functional>
#include <list>
#include <map>
#include <string>
#include "include/ceph_assert.h"
namespace journal {
class JournalMetadata : public RefCountedObject, boost::noncopyable {
public:
typedef std::function<Context*()> CreateContext;
typedef cls::journal::ObjectPosition ObjectPosition;
typedef cls::journal::ObjectPositions ObjectPositions;
typedef cls::journal::ObjectSetPosition ObjectSetPosition;
typedef cls::journal::Client Client;
typedef cls::journal::Tag Tag;
typedef std::set<Client> RegisteredClients;
typedef std::list<Tag> Tags;
void init(Context *on_init);
void shut_down(Context *on_finish);
bool is_initialized() const { return m_initialized; }
void get_immutable_metadata(uint8_t *order, uint8_t *splay_width,
int64_t *pool_id, Context *on_finish);
void get_mutable_metadata(uint64_t *minimum_set, uint64_t *active_set,
RegisteredClients *clients, Context *on_finish);
void add_listener(JournalMetadataListener *listener);
void remove_listener(JournalMetadataListener *listener);
void register_client(const bufferlist &data, Context *on_finish);
void update_client(const bufferlist &data, Context *on_finish);
void unregister_client(Context *on_finish);
void get_client(const std::string &client_id, cls::journal::Client *client,
Context *on_finish);
void allocate_tag(uint64_t tag_class, const bufferlist &data,
Tag *tag, Context *on_finish);
void get_tag(uint64_t tag_tid, Tag *tag, Context *on_finish);
void get_tags(uint64_t start_after_tag_tid,
const boost::optional<uint64_t> &tag_class, Tags *tags,
Context *on_finish);
inline const Settings &get_settings() const {
return m_settings;
}
inline const std::string &get_client_id() const {
return m_client_id;
}
inline uint8_t get_order() const {
return m_order;
}
inline uint64_t get_object_size() const {
return 1 << m_order;
}
inline uint8_t get_splay_width() const {
return m_splay_width;
}
inline int64_t get_pool_id() const {
return m_pool_id;
}
inline void queue(Context *on_finish, int r) {
m_work_queue->queue(on_finish, r);
}
inline ContextWQ *get_work_queue() {
return m_work_queue;
}
inline SafeTimer &get_timer() {
return *m_timer;
}
inline ceph::mutex &get_timer_lock() {
return *m_timer_lock;
}
void set_minimum_set(uint64_t object_set);
inline uint64_t get_minimum_set() const {
std::lock_guard locker{m_lock};
return m_minimum_set;
}
int set_active_set(uint64_t object_set);
void set_active_set(uint64_t object_set, Context *on_finish);
inline uint64_t get_active_set() const {
std::lock_guard locker{m_lock};
return m_active_set;
}
void assert_active_tag(uint64_t tag_tid, Context *on_finish);
void flush_commit_position();
void flush_commit_position(Context *on_safe);
void get_commit_position(ObjectSetPosition *commit_position) const {
std::lock_guard locker{m_lock};
*commit_position = m_client.commit_position;
}
void get_registered_clients(RegisteredClients *registered_clients) {
std::lock_guard locker{m_lock};
*registered_clients = m_registered_clients;
}
inline uint64_t allocate_entry_tid(uint64_t tag_tid) {
std::lock_guard locker{m_lock};
return m_allocated_entry_tids[tag_tid]++;
}
void reserve_entry_tid(uint64_t tag_tid, uint64_t entry_tid);
bool get_last_allocated_entry_tid(uint64_t tag_tid, uint64_t *entry_tid) const;
uint64_t allocate_commit_tid(uint64_t object_num, uint64_t tag_tid,
uint64_t entry_tid);
void overflow_commit_tid(uint64_t commit_tid, uint64_t object_num);
void get_commit_entry(uint64_t commit_tid, uint64_t *object_num,
uint64_t *tag_tid, uint64_t *entry_tid);
void committed(uint64_t commit_tid, const CreateContext &create_context);
void notify_update();
void async_notify_update(Context *on_safe);
void wait_for_ops();
private:
FRIEND_MAKE_REF(JournalMetadata);
JournalMetadata(ContextWQ *work_queue, SafeTimer *timer, ceph::mutex *timer_lock,
librados::IoCtx &ioctx, const std::string &oid,
const std::string &client_id, const Settings &settings);
~JournalMetadata() override;
typedef std::map<uint64_t, uint64_t> AllocatedEntryTids;
typedef std::list<JournalMetadataListener*> Listeners;
typedef std::list<Context*> Contexts;
struct CommitEntry {
uint64_t object_num;
uint64_t tag_tid;
uint64_t entry_tid;
bool committed;
CommitEntry() : object_num(0), tag_tid(0), entry_tid(0), committed(false) {
}
CommitEntry(uint64_t _object_num, uint64_t _tag_tid, uint64_t _entry_tid)
: object_num(_object_num), tag_tid(_tag_tid), entry_tid(_entry_tid),
committed(false) {
}
};
typedef std::map<uint64_t, CommitEntry> CommitTids;
struct C_WatchCtx : public librados::WatchCtx2 {
JournalMetadata *journal_metadata;
C_WatchCtx(JournalMetadata *_journal_metadata)
: journal_metadata(_journal_metadata) {}
void handle_notify(uint64_t notify_id, uint64_t cookie,
uint64_t notifier_id, bufferlist& bl) override {
journal_metadata->handle_watch_notify(notify_id, cookie);
}
void handle_error(uint64_t cookie, int err) override {
journal_metadata->handle_watch_error(err);
}
};
struct C_WatchReset : public Context {
JournalMetadata *journal_metadata;
C_WatchReset(JournalMetadata *_journal_metadata)
: journal_metadata(_journal_metadata) {
journal_metadata->m_async_op_tracker.start_op();
}
~C_WatchReset() override {
journal_metadata->m_async_op_tracker.finish_op();
}
void finish(int r) override {
journal_metadata->handle_watch_reset();
}
};
struct C_CommitPositionTask : public Context {
JournalMetadata *journal_metadata;
C_CommitPositionTask(JournalMetadata *_journal_metadata)
: journal_metadata(_journal_metadata) {
journal_metadata->m_async_op_tracker.start_op();
}
~C_CommitPositionTask() override {
journal_metadata->m_async_op_tracker.finish_op();
}
void finish(int r) override {
std::lock_guard locker{journal_metadata->m_lock};
journal_metadata->handle_commit_position_task();
};
};
struct C_AioNotify : public Context {
JournalMetadata* journal_metadata;
Context *on_safe;
C_AioNotify(JournalMetadata *_journal_metadata, Context *_on_safe)
: journal_metadata(_journal_metadata), on_safe(_on_safe) {
journal_metadata->m_async_op_tracker.start_op();
}
~C_AioNotify() override {
journal_metadata->m_async_op_tracker.finish_op();
}
void finish(int r) override {
journal_metadata->handle_notified(r);
if (on_safe != nullptr) {
on_safe->complete(0);
}
}
};
struct C_NotifyUpdate : public Context {
JournalMetadata* journal_metadata;
Context *on_safe;
C_NotifyUpdate(JournalMetadata *_journal_metadata, Context *_on_safe = NULL)
: journal_metadata(_journal_metadata), on_safe(_on_safe) {
journal_metadata->m_async_op_tracker.start_op();
}
~C_NotifyUpdate() override {
journal_metadata->m_async_op_tracker.finish_op();
}
void finish(int r) override {
if (r == 0) {
journal_metadata->async_notify_update(on_safe);
return;
}
if (on_safe != NULL) {
on_safe->complete(r);
}
}
};
struct C_ImmutableMetadata : public Context {
JournalMetadata* journal_metadata;
Context *on_finish;
C_ImmutableMetadata(JournalMetadata *_journal_metadata, Context *_on_finish)
: journal_metadata(_journal_metadata), on_finish(_on_finish) {
std::lock_guard locker{journal_metadata->m_lock};
journal_metadata->m_async_op_tracker.start_op();
}
~C_ImmutableMetadata() override {
journal_metadata->m_async_op_tracker.finish_op();
}
void finish(int r) override {
journal_metadata->handle_immutable_metadata(r, on_finish);
}
};
struct C_Refresh : public Context {
JournalMetadata* journal_metadata;
uint64_t minimum_set;
uint64_t active_set;
RegisteredClients registered_clients;
C_Refresh(JournalMetadata *_journal_metadata)
: journal_metadata(_journal_metadata), minimum_set(0), active_set(0) {
std::lock_guard locker{journal_metadata->m_lock};
journal_metadata->m_async_op_tracker.start_op();
}
~C_Refresh() override {
journal_metadata->m_async_op_tracker.finish_op();
}
void finish(int r) override {
journal_metadata->handle_refresh_complete(this, r);
}
};
librados::IoCtx m_ioctx;
CephContext *m_cct = nullptr;
std::string m_oid;
std::string m_client_id;
Settings m_settings;
uint8_t m_order = 0;
uint8_t m_splay_width = 0;
int64_t m_pool_id = -1;
bool m_initialized = false;
ContextWQ *m_work_queue;
SafeTimer *m_timer;
ceph::mutex *m_timer_lock;
mutable ceph::mutex m_lock = ceph::make_mutex("JournalMetadata::m_lock");
uint64_t m_commit_tid = 0;
CommitTids m_pending_commit_tids;
Listeners m_listeners;
C_WatchCtx m_watch_ctx;
uint64_t m_watch_handle = 0;
uint64_t m_minimum_set = 0;
uint64_t m_active_set = 0;
RegisteredClients m_registered_clients;
Client m_client;
AllocatedEntryTids m_allocated_entry_tids;
size_t m_update_notifications = 0;
ceph::condition_variable m_update_cond;
size_t m_ignore_watch_notifies = 0;
size_t m_refreshes_in_progress = 0;
Contexts m_refresh_ctxs;
uint64_t m_commit_position_tid = 0;
ObjectSetPosition m_commit_position;
Context *m_commit_position_ctx = nullptr;
Context *m_commit_position_task_ctx = nullptr;
size_t m_flush_commits_in_progress = 0;
Contexts m_flush_commit_position_ctxs;
AsyncOpTracker m_async_op_tracker;
void handle_immutable_metadata(int r, Context *on_init);
void refresh(Context *on_finish);
void handle_refresh_complete(C_Refresh *refresh, int r);
void cancel_commit_task();
void schedule_commit_task();
void handle_commit_position_task();
void schedule_watch_reset();
void handle_watch_reset();
void handle_watch_notify(uint64_t notify_id, uint64_t cookie);
void handle_watch_error(int err);
void handle_notified(int r);
void schedule_laggy_clients_disconnect(Context *on_finish);
friend std::ostream &operator<<(std::ostream &os,
const JournalMetadata &journal_metadata);
};
std::ostream &operator<<(std::ostream &os,
const JournalMetadata::RegisteredClients &clients);
std::ostream &operator<<(std::ostream &os,
const JournalMetadata &journal_metadata);
} // namespace journal
#endif // CEPH_JOURNAL_JOURNAL_METADATA_H
| 11,486 | 29.550532 | 83 |
h
|
null |
ceph-main/src/journal/JournalMetadataListener.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_JOURNAL_JOURNAL_METADATA_LISTENER_H
#define CEPH_JOURNAL_JOURNAL_METADATA_LISTENER_H
namespace journal {
class JournalMetadata;
struct JournalMetadataListener {
virtual ~JournalMetadataListener() {};
virtual void handle_update(JournalMetadata *) = 0;
};
} // namespace journal
#endif // CEPH_JOURNAL_JOURNAL_METADATA_LISTENER_H
| 758 | 23.483871 | 70 |
h
|
null |
ceph-main/src/journal/JournalPlayer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/PriorityCache.h"
#include "include/stringify.h"
#include "journal/JournalPlayer.h"
#include "journal/Entry.h"
#include "journal/ReplayHandler.h"
#include "journal/Types.h"
#include "journal/Utils.h"
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "JournalPlayer: " << this << " "
namespace journal {
namespace {
static const uint64_t MIN_FETCH_BYTES = 32768;
struct C_HandleComplete : public Context {
ReplayHandler* replay_handler;
explicit C_HandleComplete(ReplayHandler* r) : replay_handler(std::move(r)) {}
~C_HandleComplete() override {}
void finish(int r) override {
replay_handler->handle_complete(r);
}
};
struct C_HandleEntriesAvailable : public Context {
ReplayHandler* replay_handler;
explicit C_HandleEntriesAvailable(ReplayHandler* r) : replay_handler(std::move(r)) {}
~C_HandleEntriesAvailable() override {}
void finish(int r) override {
replay_handler->handle_entries_available();
}
};
} // anonymous namespace
JournalPlayer::JournalPlayer(librados::IoCtx &ioctx,
std::string_view object_oid_prefix,
ceph::ref_t<JournalMetadata> journal_metadata,
ReplayHandler* replay_handler,
CacheManagerHandler *cache_manager_handler)
: m_object_oid_prefix(object_oid_prefix),
m_journal_metadata(std::move(journal_metadata)),
m_replay_handler(std::move(replay_handler)),
m_cache_manager_handler(cache_manager_handler),
m_cache_rebalance_handler(this)
{
m_ioctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
ObjectSetPosition commit_position;
m_journal_metadata->get_commit_position(&commit_position);
if (!commit_position.object_positions.empty()) {
ldout(m_cct, 5) << "commit position: " << commit_position << dendl;
// start replay after the last committed entry's object
uint8_t splay_width = m_journal_metadata->get_splay_width();
auto &active_position = commit_position.object_positions.front();
m_active_tag_tid = active_position.tag_tid;
m_commit_position_valid = true;
m_commit_position = active_position;
m_splay_offset = active_position.object_number % splay_width;
for (auto &position : commit_position.object_positions) {
uint8_t splay_offset = position.object_number % splay_width;
m_commit_positions[splay_offset] = position;
}
}
if (m_cache_manager_handler != nullptr) {
m_cache_name = "JournalPlayer/" + stringify(m_ioctx.get_id()) + "/" +
m_object_oid_prefix;
auto order = m_journal_metadata->get_order();
auto splay_width = m_journal_metadata->get_splay_width();
uint64_t min_size = MIN_FETCH_BYTES * splay_width;
uint64_t max_size = (2 << order) * splay_width;
m_cache_manager_handler->register_cache(m_cache_name, min_size, max_size,
&m_cache_rebalance_handler);
m_max_fetch_bytes = 0;
} else {
m_max_fetch_bytes = 2 << m_journal_metadata->get_order();
}
}
JournalPlayer::~JournalPlayer() {
ceph_assert(m_async_op_tracker.empty());
{
std::lock_guard locker{m_lock};
ceph_assert(m_shut_down);
ceph_assert(m_fetch_object_numbers.empty());
ceph_assert(!m_watch_scheduled);
}
if (m_cache_manager_handler != nullptr) {
m_cache_manager_handler->unregister_cache(m_cache_name);
}
}
void JournalPlayer::prefetch() {
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_INIT);
if (m_shut_down) {
return;
}
if (m_cache_manager_handler != nullptr && m_max_fetch_bytes == 0) {
m_state = STATE_WAITCACHE;
return;
}
m_state = STATE_PREFETCH;
m_active_set = m_journal_metadata->get_active_set();
uint8_t splay_width = m_journal_metadata->get_splay_width();
for (uint8_t splay_offset = 0; splay_offset < splay_width; ++splay_offset) {
m_prefetch_splay_offsets.insert(splay_offset);
}
// compute active object for each splay offset (might be before
// active set)
std::map<uint8_t, uint64_t> splay_offset_to_objects;
for (auto &position : m_commit_positions) {
ceph_assert(splay_offset_to_objects.count(position.first) == 0);
splay_offset_to_objects[position.first] = position.second.object_number;
}
// prefetch the active object for each splay offset
std::set<uint64_t> prefetch_object_numbers;
for (uint8_t splay_offset = 0; splay_offset < splay_width; ++splay_offset) {
uint64_t object_number = splay_offset;
if (splay_offset_to_objects.count(splay_offset) != 0) {
object_number = splay_offset_to_objects[splay_offset];
}
prefetch_object_numbers.insert(object_number);
}
ldout(m_cct, 10) << __func__ << ": prefetching "
<< prefetch_object_numbers.size() << " " << "objects"
<< dendl;
for (auto object_number : prefetch_object_numbers) {
fetch(object_number);
}
}
void JournalPlayer::prefetch_and_watch(double interval) {
{
std::lock_guard locker{m_lock};
m_watch_enabled = true;
m_watch_interval = interval;
m_watch_step = WATCH_STEP_FETCH_CURRENT;
}
prefetch();
}
void JournalPlayer::shut_down(Context *on_finish) {
ldout(m_cct, 20) << __func__ << dendl;
std::lock_guard locker{m_lock};
ceph_assert(!m_shut_down);
m_shut_down = true;
m_watch_enabled = false;
on_finish = utils::create_async_context_callback(
m_journal_metadata, on_finish);
if (m_watch_scheduled) {
auto object_player = get_object_player();
switch (m_watch_step) {
case WATCH_STEP_FETCH_FIRST:
object_player = m_object_players.begin()->second;
// fallthrough
case WATCH_STEP_FETCH_CURRENT:
object_player->unwatch();
break;
case WATCH_STEP_ASSERT_ACTIVE:
break;
}
}
m_async_op_tracker.wait_for_ops(on_finish);
}
bool JournalPlayer::try_pop_front(Entry *entry, uint64_t *commit_tid) {
ldout(m_cct, 20) << __func__ << dendl;
std::lock_guard locker{m_lock};
if (m_state != STATE_PLAYBACK) {
m_handler_notified = false;
return false;
}
if (!verify_playback_ready()) {
if (!is_object_set_ready()) {
m_handler_notified = false;
} else {
refetch(true);
}
return false;
}
auto object_player = get_object_player();
ceph_assert(object_player && !object_player->empty());
object_player->front(entry);
object_player->pop_front();
uint64_t last_entry_tid;
if (m_journal_metadata->get_last_allocated_entry_tid(
entry->get_tag_tid(), &last_entry_tid) &&
entry->get_entry_tid() != last_entry_tid + 1) {
lderr(m_cct) << "missing prior journal entry: " << *entry << dendl;
m_state = STATE_ERROR;
notify_complete(-ENOMSG);
return false;
}
advance_splay_object();
remove_empty_object_player(object_player);
m_journal_metadata->reserve_entry_tid(entry->get_tag_tid(),
entry->get_entry_tid());
*commit_tid = m_journal_metadata->allocate_commit_tid(
object_player->get_object_number(), entry->get_tag_tid(),
entry->get_entry_tid());
return true;
}
void JournalPlayer::process_state(uint64_t object_number, int r) {
ldout(m_cct, 10) << __func__ << ": object_num=" << object_number << ", "
<< "r=" << r << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
if (r >= 0) {
switch (m_state) {
case STATE_PREFETCH:
ldout(m_cct, 10) << "PREFETCH" << dendl;
r = process_prefetch(object_number);
break;
case STATE_PLAYBACK:
ldout(m_cct, 10) << "PLAYBACK" << dendl;
r = process_playback(object_number);
break;
case STATE_ERROR:
ldout(m_cct, 10) << "ERROR" << dendl;
break;
default:
lderr(m_cct) << "UNEXPECTED STATE (" << m_state << ")" << dendl;
ceph_abort();
break;
}
}
if (r < 0) {
m_state = STATE_ERROR;
notify_complete(r);
}
}
int JournalPlayer::process_prefetch(uint64_t object_number) {
ldout(m_cct, 10) << __func__ << ": object_num=" << object_number << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint8_t splay_offset = object_number % splay_width;
PrefetchSplayOffsets::iterator it = m_prefetch_splay_offsets.find(
splay_offset);
if (it == m_prefetch_splay_offsets.end()) {
return 0;
}
bool prefetch_complete = false;
ceph_assert(m_object_players.count(splay_offset) == 1);
auto object_player = m_object_players[splay_offset];
// prefetch in-order since a newer splay object could prefetch first
if (m_fetch_object_numbers.count(object_player->get_object_number()) == 0) {
// skip past known committed records
if (m_commit_positions.count(splay_offset) != 0 &&
!object_player->empty()) {
ObjectPosition &position = m_commit_positions[splay_offset];
ldout(m_cct, 15) << "seeking known commit position " << position << " in "
<< object_player->get_oid() << dendl;
bool found_commit = false;
Entry entry;
while (!object_player->empty()) {
object_player->front(&entry);
if (entry.get_tag_tid() == position.tag_tid &&
entry.get_entry_tid() == position.entry_tid) {
found_commit = true;
} else if (found_commit) {
ldout(m_cct, 10) << "located next uncommitted entry: " << entry
<< dendl;
break;
}
ldout(m_cct, 20) << "skipping committed entry: " << entry << dendl;
m_journal_metadata->reserve_entry_tid(entry.get_tag_tid(),
entry.get_entry_tid());
object_player->pop_front();
}
// do not search for commit position for this object
// if we've already seen it
if (found_commit) {
m_commit_positions.erase(splay_offset);
}
}
// if the object is empty, pre-fetch the next splay object
if (object_player->empty() && object_player->refetch_required()) {
ldout(m_cct, 10) << "refetching potentially partially decoded object"
<< dendl;
object_player->set_refetch_state(ObjectPlayer::REFETCH_STATE_NONE);
fetch(object_player);
} else if (!remove_empty_object_player(object_player)) {
ldout(m_cct, 10) << "prefetch of object complete" << dendl;
prefetch_complete = true;
}
}
if (!prefetch_complete) {
return 0;
}
m_prefetch_splay_offsets.erase(it);
if (!m_prefetch_splay_offsets.empty()) {
return 0;
}
ldout(m_cct, 10) << "switching to playback mode" << dendl;
m_state = STATE_PLAYBACK;
// if we have a valid commit position, our read should start with
// the next consistent journal entry in the sequence
if (m_commit_position_valid) {
splay_offset = m_commit_position.object_number % splay_width;
object_player = m_object_players[splay_offset];
if (object_player->empty()) {
if (!object_player->refetch_required()) {
advance_splay_object();
}
} else {
Entry entry;
object_player->front(&entry);
if (entry.get_tag_tid() == m_commit_position.tag_tid) {
advance_splay_object();
}
}
}
if (verify_playback_ready()) {
notify_entries_available();
} else if (is_object_set_ready()) {
refetch(false);
}
return 0;
}
int JournalPlayer::process_playback(uint64_t object_number) {
ldout(m_cct, 10) << __func__ << ": object_num=" << object_number << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
if (verify_playback_ready()) {
notify_entries_available();
} else if (is_object_set_ready()) {
refetch(false);
}
return 0;
}
bool JournalPlayer::is_object_set_ready() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_watch_scheduled || !m_fetch_object_numbers.empty()) {
ldout(m_cct, 20) << __func__ << ": waiting for in-flight fetch" << dendl;
return false;
}
return true;
}
bool JournalPlayer::verify_playback_ready() {
ceph_assert(ceph_mutex_is_locked(m_lock));
while (true) {
if (!is_object_set_ready()) {
ldout(m_cct, 10) << __func__ << ": waiting for full object set" << dendl;
return false;
}
auto object_player = get_object_player();
ceph_assert(object_player);
uint64_t object_num = object_player->get_object_number();
// Verify is the active object player has another entry available
// in the sequence
// NOTE: replay currently does not check tag class to playback multiple tags
// from different classes (issue #14909). When a new tag is discovered, it
// is assumed that the previous tag was closed at the last replayable entry.
Entry entry;
if (!object_player->empty()) {
m_watch_prune_active_tag = false;
object_player->front(&entry);
if (!m_active_tag_tid) {
ldout(m_cct, 10) << __func__ << ": "
<< "object_num=" << object_num << ", "
<< "initial tag=" << entry.get_tag_tid()
<< dendl;
m_active_tag_tid = entry.get_tag_tid();
return true;
} else if (entry.get_tag_tid() < *m_active_tag_tid ||
(m_prune_tag_tid && entry.get_tag_tid() <= *m_prune_tag_tid)) {
// entry occurred before the current active tag
ldout(m_cct, 10) << __func__ << ": detected stale entry: "
<< "object_num=" << object_num << ", "
<< "entry=" << entry << dendl;
prune_tag(entry.get_tag_tid());
continue;
} else if (entry.get_tag_tid() > *m_active_tag_tid) {
// new tag at current playback position -- implies that previous
// tag ended abruptly without flushing out all records
// search for the start record for the next tag
ldout(m_cct, 10) << __func__ << ": new tag detected: "
<< "object_num=" << object_num << ", "
<< "active_tag=" << *m_active_tag_tid << ", "
<< "new_tag=" << entry.get_tag_tid() << dendl;
if (entry.get_entry_tid() == 0) {
// first entry in new tag -- can promote to active
prune_active_tag(entry.get_tag_tid());
return true;
} else {
// prune current active and wait for initial entry for new tag
prune_active_tag(boost::none);
continue;
}
} else {
ldout(m_cct, 20) << __func__ << ": "
<< "object_num=" << object_num << ", "
<< "entry: " << entry << dendl;
ceph_assert(entry.get_tag_tid() == *m_active_tag_tid);
return true;
}
} else {
if (!m_active_tag_tid) {
// waiting for our first entry
ldout(m_cct, 10) << __func__ << ": waiting for first entry: "
<< "object_num=" << object_num << dendl;
return false;
} else if (m_prune_tag_tid && *m_prune_tag_tid == *m_active_tag_tid) {
ldout(m_cct, 10) << __func__ << ": no more entries" << dendl;
return false;
} else if (m_watch_enabled && m_watch_prune_active_tag) {
// detected current tag is now longer active and we have re-read the
// current object but it's still empty, so this tag is done
ldout(m_cct, 10) << __func__ << ": assuming no more in-sequence entries: "
<< "object_num=" << object_num << ", "
<< "active_tag " << *m_active_tag_tid << dendl;
prune_active_tag(boost::none);
continue;
} else if (object_player->refetch_required()) {
// if the active object requires a refetch, don't proceed looking for a
// new tag before this process completes
ldout(m_cct, 10) << __func__ << ": refetch required: "
<< "object_num=" << object_num << dendl;
return false;
} else if (!m_watch_enabled) {
// current playback position is empty so this tag is done
ldout(m_cct, 10) << __func__ << ": no more in-sequence entries: "
<< "object_num=" << object_num << ", "
<< "active_tag=" << *m_active_tag_tid << dendl;
prune_active_tag(boost::none);
continue;
} else if (!m_watch_scheduled) {
// no more entries and we don't have an active watch in-progress
ldout(m_cct, 10) << __func__ << ": no more entries -- watch required"
<< dendl;
return false;
}
}
}
return false;
}
void JournalPlayer::prune_tag(uint64_t tag_tid) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ldout(m_cct, 10) << __func__ << ": pruning remaining entries for tag "
<< tag_tid << dendl;
// prune records that are at or below the largest prune tag tid
if (!m_prune_tag_tid || *m_prune_tag_tid < tag_tid) {
m_prune_tag_tid = tag_tid;
}
bool pruned = false;
for (const auto &player_pair : m_object_players) {
auto& object_player = player_pair.second;
ldout(m_cct, 15) << __func__ << ": checking " << object_player->get_oid()
<< dendl;
while (!object_player->empty()) {
Entry entry;
object_player->front(&entry);
if (entry.get_tag_tid() == tag_tid) {
ldout(m_cct, 20) << __func__ << ": pruned " << entry << dendl;
object_player->pop_front();
pruned = true;
} else {
break;
}
}
}
// avoid watch delay when pruning stale tags from journal objects
if (pruned) {
ldout(m_cct, 15) << __func__ << ": resetting refetch state to immediate"
<< dendl;
for (const auto &player_pair : m_object_players) {
auto& object_player = player_pair.second;
object_player->set_refetch_state(ObjectPlayer::REFETCH_STATE_IMMEDIATE);
}
}
// trim empty player to prefetch the next available object
for (const auto &player_pair : m_object_players) {
remove_empty_object_player(player_pair.second);
}
}
void JournalPlayer::prune_active_tag(const boost::optional<uint64_t>& tag_tid) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_active_tag_tid);
uint64_t active_tag_tid = *m_active_tag_tid;
if (tag_tid) {
m_active_tag_tid = tag_tid;
}
m_splay_offset = 0;
m_watch_step = WATCH_STEP_FETCH_CURRENT;
prune_tag(active_tag_tid);
}
ceph::ref_t<ObjectPlayer> JournalPlayer::get_object_player() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
SplayedObjectPlayers::const_iterator it = m_object_players.find(
m_splay_offset);
ceph_assert(it != m_object_players.end());
return it->second;
}
ceph::ref_t<ObjectPlayer> JournalPlayer::get_object_player(uint64_t object_number) const {
ceph_assert(ceph_mutex_is_locked(m_lock));
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint8_t splay_offset = object_number % splay_width;
auto splay_it = m_object_players.find(splay_offset);
ceph_assert(splay_it != m_object_players.end());
auto object_player = splay_it->second;
ceph_assert(object_player->get_object_number() == object_number);
return object_player;
}
void JournalPlayer::advance_splay_object() {
ceph_assert(ceph_mutex_is_locked(m_lock));
++m_splay_offset;
m_splay_offset %= m_journal_metadata->get_splay_width();
m_watch_step = WATCH_STEP_FETCH_CURRENT;
ldout(m_cct, 20) << __func__ << ": new offset "
<< static_cast<uint32_t>(m_splay_offset) << dendl;
}
bool JournalPlayer::remove_empty_object_player(const ceph::ref_t<ObjectPlayer> &player) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_watch_scheduled);
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint64_t object_set = player->get_object_number() / splay_width;
uint64_t active_set = m_journal_metadata->get_active_set();
if (!player->empty() || object_set == active_set) {
return false;
} else if (player->refetch_required()) {
ldout(m_cct, 20) << __func__ << ": " << player->get_oid() << " requires "
<< "a refetch" << dendl;
return false;
} else if (m_active_set != active_set) {
ldout(m_cct, 20) << __func__ << ": new active set detected, all players "
<< "require refetch" << dendl;
m_active_set = active_set;
for (const auto& pair : m_object_players) {
pair.second->set_refetch_state(ObjectPlayer::REFETCH_STATE_IMMEDIATE);
}
return false;
}
ldout(m_cct, 15) << __func__ << ": " << player->get_oid() << " empty"
<< dendl;
m_watch_prune_active_tag = false;
m_watch_step = WATCH_STEP_FETCH_CURRENT;
uint64_t next_object_num = player->get_object_number() + splay_width;
fetch(next_object_num);
return true;
}
void JournalPlayer::fetch(uint64_t object_num) {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto object_player = ceph::make_ref<ObjectPlayer>(
m_ioctx, m_object_oid_prefix, object_num, m_journal_metadata->get_timer(),
m_journal_metadata->get_timer_lock(), m_journal_metadata->get_order(),
m_max_fetch_bytes);
auto splay_width = m_journal_metadata->get_splay_width();
m_object_players[object_num % splay_width] = object_player;
fetch(object_player);
}
void JournalPlayer::fetch(const ceph::ref_t<ObjectPlayer> &object_player) {
ceph_assert(ceph_mutex_is_locked(m_lock));
uint64_t object_num = object_player->get_object_number();
std::string oid = utils::get_object_name(m_object_oid_prefix, object_num);
ceph_assert(m_fetch_object_numbers.count(object_num) == 0);
m_fetch_object_numbers.insert(object_num);
ldout(m_cct, 10) << __func__ << ": " << oid << dendl;
C_Fetch *fetch_ctx = new C_Fetch(this, object_num);
object_player->fetch(fetch_ctx);
}
void JournalPlayer::handle_fetched(uint64_t object_num, int r) {
ldout(m_cct, 10) << __func__ << ": "
<< utils::get_object_name(m_object_oid_prefix, object_num)
<< ": r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_fetch_object_numbers.count(object_num) == 1);
m_fetch_object_numbers.erase(object_num);
if (m_shut_down) {
return;
}
if (r == 0) {
auto object_player = get_object_player(object_num);
remove_empty_object_player(object_player);
}
process_state(object_num, r);
}
void JournalPlayer::refetch(bool immediate) {
ldout(m_cct, 10) << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
m_handler_notified = false;
// if watching the object, handle the periodic re-fetch
if (m_watch_enabled) {
schedule_watch(immediate);
return;
}
auto object_player = get_object_player();
if (object_player->refetch_required()) {
object_player->set_refetch_state(ObjectPlayer::REFETCH_STATE_NONE);
fetch(object_player);
return;
}
notify_complete(0);
}
void JournalPlayer::schedule_watch(bool immediate) {
ldout(m_cct, 10) << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_watch_scheduled) {
return;
}
m_watch_scheduled = true;
if (m_watch_step == WATCH_STEP_ASSERT_ACTIVE) {
// detect if a new tag has been created in case we are blocked
// by an incomplete tag sequence
ldout(m_cct, 20) << __func__ << ": asserting active tag="
<< *m_active_tag_tid << dendl;
m_async_op_tracker.start_op();
auto ctx = new LambdaContext([this](int r) {
handle_watch_assert_active(r);
});
m_journal_metadata->assert_active_tag(*m_active_tag_tid, ctx);
return;
}
ceph::ref_t<ObjectPlayer> object_player;
double watch_interval = m_watch_interval;
switch (m_watch_step) {
case WATCH_STEP_FETCH_CURRENT:
{
object_player = get_object_player();
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint64_t active_set = m_journal_metadata->get_active_set();
uint64_t object_set = object_player->get_object_number() / splay_width;
if (immediate ||
(object_player->get_refetch_state() ==
ObjectPlayer::REFETCH_STATE_IMMEDIATE) ||
(object_set < active_set && object_player->refetch_required())) {
ldout(m_cct, 20) << __func__ << ": immediately refetching "
<< object_player->get_oid()
<< dendl;
object_player->set_refetch_state(ObjectPlayer::REFETCH_STATE_NONE);
watch_interval = 0;
}
}
break;
case WATCH_STEP_FETCH_FIRST:
object_player = m_object_players.begin()->second;
watch_interval = 0;
break;
default:
ceph_abort();
}
ldout(m_cct, 20) << __func__ << ": scheduling watch on "
<< object_player->get_oid() << dendl;
Context *ctx = utils::create_async_context_callback(
m_journal_metadata, new C_Watch(this, object_player->get_object_number()));
object_player->watch(ctx, watch_interval);
}
void JournalPlayer::handle_watch(uint64_t object_num, int r) {
ldout(m_cct, 10) << __func__ << ": r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_watch_scheduled);
m_watch_scheduled = false;
if (m_shut_down || r == -ECANCELED) {
// unwatch of object player(s)
return;
}
auto object_player = get_object_player(object_num);
if (r == 0 && object_player->empty()) {
// possibly need to prune this empty object player if we've
// already fetched it after the active set was advanced with no
// new records
remove_empty_object_player(object_player);
}
// determine what object to query on next watch schedule tick
uint8_t splay_width = m_journal_metadata->get_splay_width();
if (m_watch_step == WATCH_STEP_FETCH_CURRENT &&
object_player->get_object_number() % splay_width != 0) {
m_watch_step = WATCH_STEP_FETCH_FIRST;
} else if (m_active_tag_tid) {
m_watch_step = WATCH_STEP_ASSERT_ACTIVE;
} else {
m_watch_step = WATCH_STEP_FETCH_CURRENT;
}
process_state(object_num, r);
}
void JournalPlayer::handle_watch_assert_active(int r) {
ldout(m_cct, 10) << __func__ << ": r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_watch_scheduled);
m_watch_scheduled = false;
if (r == -ESTALE) {
// newer tag exists -- since we are at this step in the watch sequence,
// we know we can prune the active tag if watch fails again
ldout(m_cct, 10) << __func__ << ": tag " << *m_active_tag_tid << " "
<< "no longer active" << dendl;
m_watch_prune_active_tag = true;
}
m_watch_step = WATCH_STEP_FETCH_CURRENT;
if (!m_shut_down && m_watch_enabled) {
schedule_watch(false);
}
m_async_op_tracker.finish_op();
}
void JournalPlayer::notify_entries_available() {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_handler_notified) {
return;
}
m_handler_notified = true;
ldout(m_cct, 10) << __func__ << ": entries available" << dendl;
m_journal_metadata->queue(new C_HandleEntriesAvailable(m_replay_handler), 0);
}
void JournalPlayer::notify_complete(int r) {
ceph_assert(ceph_mutex_is_locked(m_lock));
m_handler_notified = true;
ldout(m_cct, 10) << __func__ << ": replay complete: r=" << r << dendl;
m_journal_metadata->queue(new C_HandleComplete(m_replay_handler), r);
}
void JournalPlayer::handle_cache_rebalanced(uint64_t new_cache_bytes) {
std::lock_guard locker{m_lock};
if (m_state == STATE_ERROR || m_shut_down) {
return;
}
auto splay_width = m_journal_metadata->get_splay_width();
m_max_fetch_bytes = p2align<uint64_t>(new_cache_bytes / splay_width, 4096);
ldout(m_cct, 10) << __func__ << ": new_cache_bytes=" << new_cache_bytes
<< ", max_fetch_bytes=" << m_max_fetch_bytes << dendl;
uint64_t min_bytes = MIN_FETCH_BYTES;
if (m_state == STATE_WAITCACHE) {
m_state = STATE_INIT;
if (m_max_fetch_bytes >= min_bytes) {
m_async_op_tracker.start_op();
auto ctx = new LambdaContext(
[this](int r) {
prefetch();
m_async_op_tracker.finish_op();
});
m_journal_metadata->queue(ctx, 0);
return;
}
} else {
min_bytes = p2align<uint64_t>(min_bytes - (rand() % min_bytes) / 2, 4096);
}
if (m_max_fetch_bytes < min_bytes) {
lderr(m_cct) << __func__ << ": can't allocate enough memory from cache"
<< dendl;
m_state = STATE_ERROR;
notify_complete(-ENOMEM);
return;
}
for (auto &pair : m_object_players) {
pair.second->set_max_fetch_bytes(m_max_fetch_bytes);
}
}
} // namespace journal
| 28,705 | 31.919725 | 90 |
cc
|
null |
ceph-main/src/journal/JournalPlayer.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_JOURNAL_PLAYER_H
#define CEPH_JOURNAL_JOURNAL_PLAYER_H
#include "include/int_types.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
#include "common/Timer.h"
#include "journal/JournalMetadata.h"
#include "journal/ObjectPlayer.h"
#include "journal/Types.h"
#include "cls/journal/cls_journal_types.h"
#include <boost/none.hpp>
#include <boost/optional.hpp>
#include <map>
namespace journal {
class CacheManagerHandler;
class Entry;
class ReplayHandler;
class JournalPlayer {
public:
typedef cls::journal::ObjectPosition ObjectPosition;
typedef cls::journal::ObjectPositions ObjectPositions;
typedef cls::journal::ObjectSetPosition ObjectSetPosition;
JournalPlayer(librados::IoCtx &ioctx, std::string_view object_oid_prefix,
ceph::ref_t<JournalMetadata> journal_metadata,
ReplayHandler* replay_handler,
CacheManagerHandler *cache_manager_handler);
~JournalPlayer();
void prefetch();
void prefetch_and_watch(double interval);
void shut_down(Context *on_finish);
bool try_pop_front(Entry *entry, uint64_t *commit_tid);
private:
typedef std::set<uint8_t> PrefetchSplayOffsets;
typedef std::map<uint8_t, ceph::ref_t<ObjectPlayer>> SplayedObjectPlayers;
typedef std::map<uint8_t, ObjectPosition> SplayedObjectPositions;
typedef std::set<uint64_t> ObjectNumbers;
enum State {
STATE_INIT,
STATE_WAITCACHE,
STATE_PREFETCH,
STATE_PLAYBACK,
STATE_ERROR
};
enum WatchStep {
WATCH_STEP_FETCH_CURRENT,
WATCH_STEP_FETCH_FIRST,
WATCH_STEP_ASSERT_ACTIVE
};
struct C_Fetch : public Context {
JournalPlayer *player;
uint64_t object_num;
C_Fetch(JournalPlayer *p, uint64_t o) : player(p), object_num(o) {
player->m_async_op_tracker.start_op();
}
~C_Fetch() override {
player->m_async_op_tracker.finish_op();
}
void finish(int r) override {
player->handle_fetched(object_num, r);
}
};
struct C_Watch : public Context {
JournalPlayer *player;
uint64_t object_num;
C_Watch(JournalPlayer *player, uint64_t object_num)
: player(player), object_num(object_num) {
player->m_async_op_tracker.start_op();
}
~C_Watch() override {
player->m_async_op_tracker.finish_op();
}
void finish(int r) override {
player->handle_watch(object_num, r);
}
};
struct CacheRebalanceHandler : public journal::CacheRebalanceHandler {
JournalPlayer *player;
CacheRebalanceHandler(JournalPlayer *player) : player(player) {
}
void handle_cache_rebalanced(uint64_t new_cache_bytes) override {
player->handle_cache_rebalanced(new_cache_bytes);
}
};
librados::IoCtx m_ioctx;
CephContext *m_cct = nullptr;
std::string m_object_oid_prefix;
ceph::ref_t<JournalMetadata> m_journal_metadata;
ReplayHandler* m_replay_handler;
CacheManagerHandler *m_cache_manager_handler;
std::string m_cache_name;
CacheRebalanceHandler m_cache_rebalance_handler;
uint64_t m_max_fetch_bytes;
AsyncOpTracker m_async_op_tracker;
mutable ceph::mutex m_lock = ceph::make_mutex("JournalPlayer::m_lock");
State m_state = STATE_INIT;
uint8_t m_splay_offset = 0;
bool m_watch_enabled = false;
bool m_watch_scheduled = false;
double m_watch_interval = 0;
WatchStep m_watch_step = WATCH_STEP_FETCH_CURRENT;
bool m_watch_prune_active_tag = false;
bool m_shut_down = false;
bool m_handler_notified = false;
ObjectNumbers m_fetch_object_numbers;
PrefetchSplayOffsets m_prefetch_splay_offsets;
SplayedObjectPlayers m_object_players;
bool m_commit_position_valid = false;
ObjectPosition m_commit_position;
SplayedObjectPositions m_commit_positions;
uint64_t m_active_set = 0;
boost::optional<uint64_t> m_active_tag_tid = boost::none;
boost::optional<uint64_t> m_prune_tag_tid = boost::none;
void advance_splay_object();
bool is_object_set_ready() const;
bool verify_playback_ready();
void prune_tag(uint64_t tag_tid);
void prune_active_tag(const boost::optional<uint64_t>& tag_tid);
ceph::ref_t<ObjectPlayer> get_object_player() const;
ceph::ref_t<ObjectPlayer> get_object_player(uint64_t object_number) const;
bool remove_empty_object_player(const ceph::ref_t<ObjectPlayer> &object_player);
void process_state(uint64_t object_number, int r);
int process_prefetch(uint64_t object_number);
int process_playback(uint64_t object_number);
void fetch(uint64_t object_num);
void fetch(const ceph::ref_t<ObjectPlayer> &object_player);
void handle_fetched(uint64_t object_num, int r);
void refetch(bool immediate);
void schedule_watch(bool immediate);
void handle_watch(uint64_t object_num, int r);
void handle_watch_assert_active(int r);
void notify_entries_available();
void notify_complete(int r);
void handle_cache_rebalanced(uint64_t new_cache_bytes);
};
} // namespace journal
#endif // CEPH_JOURNAL_JOURNAL_PLAYER_H
| 5,090 | 27.762712 | 82 |
h
|
null |
ceph-main/src/journal/JournalRecorder.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalRecorder.h"
#include "common/errno.h"
#include "journal/Entry.h"
#include "journal/Utils.h"
#include <atomic>
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "JournalRecorder: " << this << " " << __func__ \
<< ": "
using std::shared_ptr;
namespace journal {
namespace {
struct C_Flush : public Context {
ceph::ref_t<JournalMetadata> journal_metadata;
Context *on_finish;
std::atomic<int64_t> pending_flushes{0};
int ret_val = 0;
C_Flush(ceph::ref_t<JournalMetadata> _journal_metadata, Context *_on_finish,
size_t _pending_flushes)
: journal_metadata(std::move(_journal_metadata)),
on_finish(_on_finish),
pending_flushes(_pending_flushes) {
}
void complete(int r) override {
if (r < 0 && ret_val == 0) {
ret_val = r;
}
if (--pending_flushes == 0) {
// ensure all prior callback have been flushed as well
journal_metadata->queue(on_finish, ret_val);
delete this;
}
}
void finish(int r) override {
}
};
} // anonymous namespace
JournalRecorder::JournalRecorder(librados::IoCtx &ioctx,
std::string_view object_oid_prefix,
ceph::ref_t<JournalMetadata> journal_metadata,
uint64_t max_in_flight_appends)
: m_object_oid_prefix(object_oid_prefix),
m_journal_metadata(std::move(journal_metadata)),
m_max_in_flight_appends(max_in_flight_appends),
m_listener(this),
m_object_handler(this),
m_current_set(m_journal_metadata->get_active_set()),
m_object_locks{ceph::make_lock_container<ceph::mutex>(
m_journal_metadata->get_splay_width(), [](const size_t splay_offset) {
return ceph::make_mutex("ObjectRecorder::m_lock::" +
std::to_string(splay_offset));
})}
{
std::lock_guard locker{m_lock};
m_ioctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext*>(m_ioctx.cct());
uint8_t splay_width = m_journal_metadata->get_splay_width();
for (uint8_t splay_offset = 0; splay_offset < splay_width; ++splay_offset) {
uint64_t object_number = splay_offset + (m_current_set * splay_width);
std::lock_guard locker{m_object_locks[splay_offset]};
m_object_ptrs[splay_offset] = create_object_recorder(
object_number, &m_object_locks[splay_offset]);
}
m_journal_metadata->add_listener(&m_listener);
}
JournalRecorder::~JournalRecorder() {
m_journal_metadata->remove_listener(&m_listener);
std::lock_guard locker{m_lock};
ceph_assert(m_in_flight_advance_sets == 0);
ceph_assert(m_in_flight_object_closes == 0);
}
void JournalRecorder::shut_down(Context *on_safe) {
on_safe = new LambdaContext(
[this, on_safe](int r) {
Context *ctx = nullptr;
{
std::lock_guard locker{m_lock};
if (m_in_flight_advance_sets != 0) {
ceph_assert(m_on_object_set_advanced == nullptr);
m_on_object_set_advanced = new LambdaContext(
[on_safe, r](int) {
on_safe->complete(r);
});
} else {
ctx = on_safe;
}
}
if (ctx != nullptr) {
ctx->complete(r);
}
});
flush(on_safe);
}
void JournalRecorder::set_append_batch_options(int flush_interval,
uint64_t flush_bytes,
double flush_age) {
ldout(m_cct, 5) << "flush_interval=" << flush_interval << ", "
<< "flush_bytes=" << flush_bytes << ", "
<< "flush_age=" << flush_age << dendl;
std::lock_guard locker{m_lock};
m_flush_interval = flush_interval;
m_flush_bytes = flush_bytes;
m_flush_age = flush_age;
uint8_t splay_width = m_journal_metadata->get_splay_width();
for (uint8_t splay_offset = 0; splay_offset < splay_width; ++splay_offset) {
std::lock_guard object_locker{m_object_locks[splay_offset]};
auto object_recorder = get_object(splay_offset);
object_recorder->set_append_batch_options(flush_interval, flush_bytes,
flush_age);
}
}
Future JournalRecorder::append(uint64_t tag_tid,
const bufferlist &payload_bl) {
ldout(m_cct, 20) << "tag_tid=" << tag_tid << dendl;
m_lock.lock();
uint64_t entry_tid = m_journal_metadata->allocate_entry_tid(tag_tid);
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint8_t splay_offset = entry_tid % splay_width;
auto object_ptr = get_object(splay_offset);
uint64_t commit_tid = m_journal_metadata->allocate_commit_tid(
object_ptr->get_object_number(), tag_tid, entry_tid);
auto future = ceph::make_ref<FutureImpl>(tag_tid, entry_tid, commit_tid);
future->init(m_prev_future);
m_prev_future = future;
m_object_locks[splay_offset].lock();
m_lock.unlock();
bufferlist entry_bl;
encode(Entry(future->get_tag_tid(), future->get_entry_tid(), payload_bl),
entry_bl);
ceph_assert(entry_bl.length() <= m_journal_metadata->get_object_size());
bool object_full = object_ptr->append({{future, entry_bl}});
m_object_locks[splay_offset].unlock();
if (object_full) {
ldout(m_cct, 10) << "object " << object_ptr->get_oid() << " now full"
<< dendl;
std::lock_guard l{m_lock};
close_and_advance_object_set(object_ptr->get_object_number() / splay_width);
}
return Future(future);
}
void JournalRecorder::flush(Context *on_safe) {
ldout(m_cct, 20) << dendl;
C_Flush *ctx;
{
std::lock_guard locker{m_lock};
ctx = new C_Flush(m_journal_metadata, on_safe, m_object_ptrs.size() + 1);
for (const auto& p : m_object_ptrs) {
p.second->flush(ctx);
}
}
// avoid holding the lock in case there is nothing to flush
ctx->complete(0);
}
ceph::ref_t<ObjectRecorder> JournalRecorder::get_object(uint8_t splay_offset) {
ceph_assert(ceph_mutex_is_locked(m_lock));
const auto& object_recorder = m_object_ptrs.at(splay_offset);
ceph_assert(object_recorder);
return object_recorder;
}
void JournalRecorder::close_and_advance_object_set(uint64_t object_set) {
ceph_assert(ceph_mutex_is_locked(m_lock));
// entry overflow from open object
if (m_current_set != object_set) {
ldout(m_cct, 20) << "close already in-progress" << dendl;
return;
}
// we shouldn't overflow upon append if already closed and we
// shouldn't receive an overflowed callback if already closed
ceph_assert(m_in_flight_advance_sets == 0);
ceph_assert(m_in_flight_object_closes == 0);
uint64_t active_set = m_journal_metadata->get_active_set();
ceph_assert(m_current_set == active_set);
++m_current_set;
++m_in_flight_advance_sets;
ldout(m_cct, 10) << "closing active object set " << object_set << dendl;
if (close_object_set(m_current_set)) {
advance_object_set();
}
}
void JournalRecorder::advance_object_set() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_in_flight_object_closes == 0);
ldout(m_cct, 10) << "advance to object set " << m_current_set << dendl;
m_journal_metadata->set_active_set(m_current_set, new C_AdvanceObjectSet(
this));
}
void JournalRecorder::handle_advance_object_set(int r) {
Context *on_object_set_advanced = nullptr;
{
std::lock_guard locker{m_lock};
ldout(m_cct, 20) << __func__ << ": r=" << r << dendl;
ceph_assert(m_in_flight_advance_sets > 0);
--m_in_flight_advance_sets;
if (r < 0 && r != -ESTALE) {
lderr(m_cct) << "failed to advance object set: " << cpp_strerror(r)
<< dendl;
}
if (m_in_flight_advance_sets == 0 && m_in_flight_object_closes == 0) {
open_object_set();
std::swap(on_object_set_advanced, m_on_object_set_advanced);
}
}
if (on_object_set_advanced != nullptr) {
on_object_set_advanced->complete(0);
}
}
void JournalRecorder::open_object_set() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ldout(m_cct, 10) << "opening object set " << m_current_set << dendl;
uint8_t splay_width = m_journal_metadata->get_splay_width();
bool overflowed = false;
auto lockers{lock_object_recorders()};
for (const auto& p : m_object_ptrs) {
const auto& object_recorder = p.second;
uint64_t object_number = object_recorder->get_object_number();
if (object_number / splay_width != m_current_set) {
ceph_assert(object_recorder->is_closed());
// ready to close object and open object in active set
if (create_next_object_recorder(object_recorder)) {
overflowed = true;
}
}
}
lockers.clear();
if (overflowed) {
ldout(m_cct, 10) << "object set " << m_current_set << " now full" << dendl;
ldout(m_cct, 10) << "" << dendl;
close_and_advance_object_set(m_current_set);
}
}
bool JournalRecorder::close_object_set(uint64_t active_set) {
ldout(m_cct, 10) << "active_set=" << active_set << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
// object recorders will invoke overflow handler as they complete
// closing the object to ensure correct order of future appends
uint8_t splay_width = m_journal_metadata->get_splay_width();
auto lockers{lock_object_recorders()};
for (const auto& p : m_object_ptrs) {
const auto& object_recorder = p.second;
if (object_recorder->get_object_number() / splay_width != active_set) {
ldout(m_cct, 10) << "closing object " << object_recorder->get_oid()
<< dendl;
// flush out all queued appends and hold future appends
if (!object_recorder->close()) {
++m_in_flight_object_closes;
ldout(m_cct, 10) << "object " << object_recorder->get_oid() << " "
<< "close in-progress" << dendl;
} else {
ldout(m_cct, 10) << "object " << object_recorder->get_oid() << " closed"
<< dendl;
}
}
}
return (m_in_flight_object_closes == 0);
}
ceph::ref_t<ObjectRecorder> JournalRecorder::create_object_recorder(
uint64_t object_number, ceph::mutex* lock) {
ldout(m_cct, 10) << "object_number=" << object_number << dendl;
auto object_recorder = ceph::make_ref<ObjectRecorder>(
m_ioctx, utils::get_object_name(m_object_oid_prefix, object_number),
object_number, lock, m_journal_metadata->get_work_queue(),
&m_object_handler, m_journal_metadata->get_order(),
m_max_in_flight_appends);
object_recorder->set_append_batch_options(m_flush_interval, m_flush_bytes,
m_flush_age);
return object_recorder;
}
bool JournalRecorder::create_next_object_recorder(
ceph::ref_t<ObjectRecorder> object_recorder) {
ceph_assert(ceph_mutex_is_locked(m_lock));
uint64_t object_number = object_recorder->get_object_number();
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint8_t splay_offset = object_number % splay_width;
ldout(m_cct, 10) << "object_number=" << object_number << dendl;
ceph_assert(ceph_mutex_is_locked(m_object_locks[splay_offset]));
auto new_object_recorder = create_object_recorder(
(m_current_set * splay_width) + splay_offset, &m_object_locks[splay_offset]);
ldout(m_cct, 10) << "old oid=" << object_recorder->get_oid() << ", "
<< "new oid=" << new_object_recorder->get_oid() << dendl;
AppendBuffers append_buffers;
object_recorder->claim_append_buffers(&append_buffers);
// update the commit record to point to the correct object number
for (auto &append_buffer : append_buffers) {
m_journal_metadata->overflow_commit_tid(
append_buffer.first->get_commit_tid(),
new_object_recorder->get_object_number());
}
bool object_full = new_object_recorder->append(std::move(append_buffers));
if (object_full) {
ldout(m_cct, 10) << "object " << new_object_recorder->get_oid() << " "
<< "now full" << dendl;
}
m_object_ptrs[splay_offset] = std::move(new_object_recorder);
return object_full;
}
void JournalRecorder::handle_update() {
std::lock_guard locker{m_lock};
uint64_t active_set = m_journal_metadata->get_active_set();
if (m_current_set < active_set) {
// peer journal client advanced the active set
ldout(m_cct, 10) << "current_set=" << m_current_set << ", "
<< "active_set=" << active_set << dendl;
uint64_t current_set = m_current_set;
m_current_set = active_set;
if (m_in_flight_advance_sets == 0 && m_in_flight_object_closes == 0) {
ldout(m_cct, 10) << "closing current object set " << current_set << dendl;
if (close_object_set(active_set)) {
open_object_set();
}
}
}
}
void JournalRecorder::handle_closed(ObjectRecorder *object_recorder) {
ldout(m_cct, 10) << object_recorder->get_oid() << dendl;
std::lock_guard locker{m_lock};
uint64_t object_number = object_recorder->get_object_number();
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint8_t splay_offset = object_number % splay_width;
auto& active_object_recorder = m_object_ptrs.at(splay_offset);
ceph_assert(active_object_recorder->get_object_number() == object_number);
ceph_assert(m_in_flight_object_closes > 0);
--m_in_flight_object_closes;
// object closed after advance active set committed
ldout(m_cct, 10) << "object " << active_object_recorder->get_oid()
<< " closed" << dendl;
if (m_in_flight_object_closes == 0) {
if (m_in_flight_advance_sets == 0) {
// peer forced closing of object set
open_object_set();
} else {
// local overflow advanced object set
advance_object_set();
}
}
}
void JournalRecorder::handle_overflow(ObjectRecorder *object_recorder) {
ldout(m_cct, 10) << object_recorder->get_oid() << dendl;
std::lock_guard locker{m_lock};
uint64_t object_number = object_recorder->get_object_number();
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint8_t splay_offset = object_number % splay_width;
auto& active_object_recorder = m_object_ptrs.at(splay_offset);
ceph_assert(active_object_recorder->get_object_number() == object_number);
ldout(m_cct, 10) << "object " << active_object_recorder->get_oid()
<< " overflowed" << dendl;
close_and_advance_object_set(object_number / splay_width);
}
JournalRecorder::Lockers JournalRecorder::lock_object_recorders() {
Lockers lockers;
lockers.reserve(m_object_ptrs.size());
for (auto& lock : m_object_locks) {
lockers.emplace_back(lock);
}
return lockers;
}
} // namespace journal
| 14,655 | 32.691954 | 82 |
cc
|
null |
ceph-main/src/journal/JournalRecorder.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_JOURNAL_RECORDER_H
#define CEPH_JOURNAL_JOURNAL_RECORDER_H
#include "include/int_types.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include "common/containers.h"
#include "common/Timer.h"
#include "journal/Future.h"
#include "journal/FutureImpl.h"
#include "journal/JournalMetadata.h"
#include "journal/ObjectRecorder.h"
#include <map>
#include <string>
namespace journal {
class JournalRecorder {
public:
JournalRecorder(librados::IoCtx &ioctx, std::string_view object_oid_prefix,
ceph::ref_t<JournalMetadata> journal_metadata,
uint64_t max_in_flight_appends);
~JournalRecorder();
void shut_down(Context *on_safe);
void set_append_batch_options(int flush_interval, uint64_t flush_bytes,
double flush_age);
Future append(uint64_t tag_tid, const bufferlist &bl);
void flush(Context *on_safe);
ceph::ref_t<ObjectRecorder> get_object(uint8_t splay_offset);
private:
typedef std::map<uint8_t, ceph::ref_t<ObjectRecorder>> ObjectRecorderPtrs;
typedef std::vector<std::unique_lock<ceph::mutex>> Lockers;
struct Listener : public JournalMetadataListener {
JournalRecorder *journal_recorder;
Listener(JournalRecorder *_journal_recorder)
: journal_recorder(_journal_recorder) {}
void handle_update(JournalMetadata *) override {
journal_recorder->handle_update();
}
};
struct ObjectHandler : public ObjectRecorder::Handler {
JournalRecorder *journal_recorder;
ObjectHandler(JournalRecorder *_journal_recorder)
: journal_recorder(_journal_recorder) {
}
void closed(ObjectRecorder *object_recorder) override {
journal_recorder->handle_closed(object_recorder);
}
void overflow(ObjectRecorder *object_recorder) override {
journal_recorder->handle_overflow(object_recorder);
}
};
struct C_AdvanceObjectSet : public Context {
JournalRecorder *journal_recorder;
C_AdvanceObjectSet(JournalRecorder *_journal_recorder)
: journal_recorder(_journal_recorder) {
}
void finish(int r) override {
journal_recorder->handle_advance_object_set(r);
}
};
librados::IoCtx m_ioctx;
CephContext *m_cct = nullptr;
std::string m_object_oid_prefix;
ceph::ref_t<JournalMetadata> m_journal_metadata;
uint32_t m_flush_interval = 0;
uint64_t m_flush_bytes = 0;
double m_flush_age = 0;
uint64_t m_max_in_flight_appends;
Listener m_listener;
ObjectHandler m_object_handler;
ceph::mutex m_lock = ceph::make_mutex("JournalerRecorder::m_lock");
uint32_t m_in_flight_advance_sets = 0;
uint32_t m_in_flight_object_closes = 0;
uint64_t m_current_set;
ObjectRecorderPtrs m_object_ptrs;
ceph::containers::tiny_vector<ceph::mutex> m_object_locks;
ceph::ref_t<FutureImpl> m_prev_future;
Context *m_on_object_set_advanced = nullptr;
void open_object_set();
bool close_object_set(uint64_t active_set);
void advance_object_set();
void handle_advance_object_set(int r);
void close_and_advance_object_set(uint64_t object_set);
ceph::ref_t<ObjectRecorder> create_object_recorder(uint64_t object_number,
ceph::mutex* lock);
bool create_next_object_recorder(ceph::ref_t<ObjectRecorder> object_recorder);
void handle_update();
void handle_closed(ObjectRecorder *object_recorder);
void handle_overflow(ObjectRecorder *object_recorder);
Lockers lock_object_recorders();
};
} // namespace journal
#endif // CEPH_JOURNAL_JOURNAL_RECORDER_H
| 3,690 | 27.612403 | 80 |
h
|
null |
ceph-main/src/journal/JournalTrimmer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalTrimmer.h"
#include "journal/Utils.h"
#include "common/Cond.h"
#include "common/errno.h"
#include <limits>
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "JournalTrimmer: " << this << " "
namespace journal {
struct JournalTrimmer::C_RemoveSet : public Context {
JournalTrimmer *journal_trimmer;
uint64_t object_set;
ceph::mutex lock = ceph::make_mutex("JournalTrimmer::m_lock");
uint32_t refs;
int return_value;
C_RemoveSet(JournalTrimmer *_journal_trimmer, uint64_t _object_set,
uint8_t _splay_width);
void complete(int r) override;
void finish(int r) override {
journal_trimmer->handle_set_removed(r, object_set);
journal_trimmer->m_async_op_tracker.finish_op();
}
};
JournalTrimmer::JournalTrimmer(librados::IoCtx &ioctx,
const std::string &object_oid_prefix,
const ceph::ref_t<JournalMetadata>& journal_metadata)
: m_cct(NULL), m_object_oid_prefix(object_oid_prefix),
m_journal_metadata(journal_metadata), m_metadata_listener(this),
m_remove_set_pending(false),
m_remove_set(0), m_remove_set_ctx(NULL) {
m_ioctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
m_journal_metadata->add_listener(&m_metadata_listener);
}
JournalTrimmer::~JournalTrimmer() {
ceph_assert(m_shutdown);
}
void JournalTrimmer::shut_down(Context *on_finish) {
ldout(m_cct, 20) << __func__ << dendl;
{
std::lock_guard locker{m_lock};
ceph_assert(!m_shutdown);
m_shutdown = true;
}
m_journal_metadata->remove_listener(&m_metadata_listener);
// chain the shut down sequence (reverse order)
on_finish = new LambdaContext([this, on_finish](int r) {
m_async_op_tracker.wait_for_ops(on_finish);
});
m_journal_metadata->flush_commit_position(on_finish);
}
void JournalTrimmer::remove_objects(bool force, Context *on_finish) {
ldout(m_cct, 20) << __func__ << dendl;
on_finish = new LambdaContext([this, force, on_finish](int r) {
std::lock_guard locker{m_lock};
if (m_remove_set_pending) {
on_finish->complete(-EBUSY);
}
if (!force) {
JournalMetadata::RegisteredClients registered_clients;
m_journal_metadata->get_registered_clients(®istered_clients);
if (registered_clients.size() == 0) {
on_finish->complete(-EINVAL);
return;
} else if (registered_clients.size() > 1) {
on_finish->complete(-EBUSY);
return;
}
}
m_remove_set = std::numeric_limits<uint64_t>::max();
m_remove_set_pending = true;
m_remove_set_ctx = on_finish;
remove_set(m_journal_metadata->get_minimum_set());
});
m_async_op_tracker.wait_for_ops(on_finish);
}
void JournalTrimmer::committed(uint64_t commit_tid) {
ldout(m_cct, 20) << __func__ << ": commit_tid=" << commit_tid << dendl;
m_journal_metadata->committed(commit_tid,
m_create_commit_position_safe_context);
}
void JournalTrimmer::trim_objects(uint64_t minimum_set) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ldout(m_cct, 20) << __func__ << ": min_set=" << minimum_set << dendl;
if (minimum_set <= m_journal_metadata->get_minimum_set()) {
return;
}
if (m_remove_set_pending) {
m_remove_set = std::max(m_remove_set, minimum_set);
return;
}
m_remove_set = minimum_set;
m_remove_set_pending = true;
remove_set(m_journal_metadata->get_minimum_set());
}
void JournalTrimmer::remove_set(uint64_t object_set) {
ceph_assert(ceph_mutex_is_locked(m_lock));
m_async_op_tracker.start_op();
uint8_t splay_width = m_journal_metadata->get_splay_width();
C_RemoveSet *ctx = new C_RemoveSet(this, object_set, splay_width);
ldout(m_cct, 20) << __func__ << ": removing object set " << object_set
<< dendl;
for (uint64_t object_number = object_set * splay_width;
object_number < (object_set + 1) * splay_width;
++object_number) {
std::string oid = utils::get_object_name(m_object_oid_prefix,
object_number);
ldout(m_cct, 20) << "removing journal object " << oid << dendl;
auto comp =
librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_remove(oid, comp,
CEPH_OSD_FLAG_FULL_FORCE | CEPH_OSD_FLAG_FULL_TRY);
ceph_assert(r == 0);
comp->release();
}
}
void JournalTrimmer::handle_metadata_updated() {
ldout(m_cct, 20) << __func__ << dendl;
std::lock_guard locker{m_lock};
JournalMetadata::RegisteredClients registered_clients;
m_journal_metadata->get_registered_clients(®istered_clients);
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint64_t minimum_set = m_journal_metadata->get_minimum_set();
uint64_t active_set = m_journal_metadata->get_active_set();
uint64_t minimum_commit_set = active_set;
std::string minimum_client_id;
for (auto &client : registered_clients) {
if (client.state == cls::journal::CLIENT_STATE_DISCONNECTED) {
continue;
}
if (client.commit_position.object_positions.empty()) {
// client hasn't recorded any commits
minimum_commit_set = minimum_set;
minimum_client_id = client.id;
break;
}
for (auto &position : client.commit_position.object_positions) {
uint64_t object_set = position.object_number / splay_width;
if (object_set < minimum_commit_set) {
minimum_client_id = client.id;
minimum_commit_set = object_set;
}
}
}
if (minimum_commit_set > minimum_set) {
trim_objects(minimum_commit_set);
} else {
ldout(m_cct, 20) << "object set " << minimum_commit_set << " still "
<< "in-use by client " << minimum_client_id << dendl;
}
}
void JournalTrimmer::handle_set_removed(int r, uint64_t object_set) {
ldout(m_cct, 20) << __func__ << ": r=" << r << ", set=" << object_set << ", "
<< "trim=" << m_remove_set << dendl;
std::lock_guard locker{m_lock};
m_remove_set_pending = false;
if (r == -ENOENT) {
// no objects within the set existed
r = 0;
}
if (r == 0) {
// advance the minimum set to the next set
m_journal_metadata->set_minimum_set(object_set + 1);
uint64_t active_set = m_journal_metadata->get_active_set();
uint64_t minimum_set = m_journal_metadata->get_minimum_set();
if (m_remove_set > minimum_set && minimum_set <= active_set) {
m_remove_set_pending = true;
remove_set(minimum_set);
}
}
if (m_remove_set_ctx != nullptr && !m_remove_set_pending) {
ldout(m_cct, 20) << "completing remove set context" << dendl;
m_remove_set_ctx->complete(r);
m_remove_set_ctx = nullptr;
}
}
JournalTrimmer::C_RemoveSet::C_RemoveSet(JournalTrimmer *_journal_trimmer,
uint64_t _object_set,
uint8_t _splay_width)
: journal_trimmer(_journal_trimmer), object_set(_object_set),
lock(ceph::make_mutex(utils::unique_lock_name("C_RemoveSet::lock", this))),
refs(_splay_width), return_value(-ENOENT) {
}
void JournalTrimmer::C_RemoveSet::complete(int r) {
lock.lock();
if (r < 0 && r != -ENOENT &&
(return_value == -ENOENT || return_value == 0)) {
return_value = r;
} else if (r == 0 && return_value == -ENOENT) {
return_value = 0;
}
if (--refs == 0) {
finish(return_value);
lock.unlock();
delete this;
} else {
lock.unlock();
}
}
} // namespace journal
| 7,743 | 30.225806 | 84 |
cc
|
null |
ceph-main/src/journal/JournalTrimmer.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_JOURNAL_TRIMMER_H
#define CEPH_JOURNAL_JOURNAL_TRIMMER_H
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "include/Context.h"
#include "common/AsyncOpTracker.h"
#include "journal/JournalMetadata.h"
#include "cls/journal/cls_journal_types.h"
#include <functional>
struct Context;
namespace journal {
class JournalTrimmer {
public:
typedef cls::journal::ObjectSetPosition ObjectSetPosition;
JournalTrimmer(librados::IoCtx &ioctx, const std::string &object_oid_prefix,
const ceph::ref_t<JournalMetadata> &journal_metadata);
~JournalTrimmer();
void shut_down(Context *on_finish);
void remove_objects(bool force, Context *on_finish);
void committed(uint64_t commit_tid);
private:
typedef std::function<Context*()> CreateContext;
struct MetadataListener : public JournalMetadataListener {
JournalTrimmer *journal_trimmer;
MetadataListener(JournalTrimmer *journal_trimmer)
: journal_trimmer(journal_trimmer) {
}
void handle_update(JournalMetadata *) override {
journal_trimmer->handle_metadata_updated();
}
};
struct C_CommitPositionSafe : public Context {
JournalTrimmer *journal_trimmer;
C_CommitPositionSafe(JournalTrimmer *_journal_trimmer)
: journal_trimmer(_journal_trimmer) {
journal_trimmer->m_async_op_tracker.start_op();
}
~C_CommitPositionSafe() override {
journal_trimmer->m_async_op_tracker.finish_op();
}
void finish(int r) override {
}
};
struct C_RemoveSet;
librados::IoCtx m_ioctx;
CephContext *m_cct;
std::string m_object_oid_prefix;
ceph::ref_t<JournalMetadata> m_journal_metadata;
MetadataListener m_metadata_listener;
AsyncOpTracker m_async_op_tracker;
ceph::mutex m_lock = ceph::make_mutex("JournalTrimmer::m_lock");
bool m_remove_set_pending;
uint64_t m_remove_set;
Context *m_remove_set_ctx;
bool m_shutdown = false;
CreateContext m_create_commit_position_safe_context = [this]() {
return new C_CommitPositionSafe(this);
};
void trim_objects(uint64_t minimum_set);
void remove_set(uint64_t object_set);
void handle_metadata_updated();
void handle_set_removed(int r, uint64_t object_set);
};
} // namespace journal
#endif // CEPH_JOURNAL_JOURNAL_TRIMMER_H
| 2,407 | 24.617021 | 78 |
h
|
null |
ceph-main/src/journal/Journaler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/Journaler.h"
#include "include/stringify.h"
#include "common/errno.h"
#include "common/Timer.h"
#include "common/WorkQueue.h"
#include "journal/Entry.h"
#include "journal/FutureImpl.h"
#include "journal/JournalMetadata.h"
#include "journal/JournalPlayer.h"
#include "journal/JournalRecorder.h"
#include "journal/JournalTrimmer.h"
#include "journal/ReplayEntry.h"
#include "journal/ReplayHandler.h"
#include "cls/journal/cls_journal_client.h"
#include "cls/journal/cls_journal_types.h"
#include "Utils.h"
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "Journaler: " << this << " "
namespace journal {
namespace {
static const std::string JOURNAL_HEADER_PREFIX = "journal.";
static const std::string JOURNAL_OBJECT_PREFIX = "journal_data.";
} // anonymous namespace
using namespace cls::journal;
using utils::rados_ctx_callback;
std::string Journaler::header_oid(const std::string &journal_id) {
return JOURNAL_HEADER_PREFIX + journal_id;
}
std::string Journaler::object_oid_prefix(int pool_id,
const std::string &journal_id) {
return JOURNAL_OBJECT_PREFIX + stringify(pool_id) + "." + journal_id + ".";
}
Journaler::Threads::Threads(CephContext *cct) {
thread_pool = new ThreadPool(cct, "Journaler::thread_pool", "tp_journal", 1);
thread_pool->start();
work_queue = new ContextWQ("Journaler::work_queue",
ceph::make_timespan(60),
thread_pool);
timer = new SafeTimer(cct, timer_lock, true);
timer->init();
}
Journaler::Threads::~Threads() {
{
std::lock_guard timer_locker{timer_lock};
timer->shutdown();
}
delete timer;
timer = nullptr;
work_queue->drain();
delete work_queue;
work_queue = nullptr;
thread_pool->stop();
delete thread_pool;
thread_pool = nullptr;
}
Journaler::Journaler(librados::IoCtx &header_ioctx,
const std::string &journal_id,
const std::string &client_id, const Settings &settings,
CacheManagerHandler *cache_manager_handler)
: m_threads(new Threads(reinterpret_cast<CephContext*>(header_ioctx.cct()))),
m_client_id(client_id), m_cache_manager_handler(cache_manager_handler) {
set_up(m_threads->work_queue, m_threads->timer, &m_threads->timer_lock,
header_ioctx, journal_id, settings);
}
Journaler::Journaler(ContextWQ *work_queue, SafeTimer *timer,
ceph::mutex *timer_lock, librados::IoCtx &header_ioctx,
const std::string &journal_id,
const std::string &client_id, const Settings &settings,
CacheManagerHandler *cache_manager_handler)
: m_client_id(client_id), m_cache_manager_handler(cache_manager_handler) {
set_up(work_queue, timer, timer_lock, header_ioctx, journal_id,
settings);
}
void Journaler::set_up(ContextWQ *work_queue, SafeTimer *timer,
ceph::mutex *timer_lock, librados::IoCtx &header_ioctx,
const std::string &journal_id,
const Settings &settings) {
m_header_ioctx.dup(header_ioctx);
m_cct = reinterpret_cast<CephContext *>(m_header_ioctx.cct());
m_header_oid = header_oid(journal_id);
m_object_oid_prefix = object_oid_prefix(m_header_ioctx.get_id(), journal_id);
m_metadata = ceph::make_ref<JournalMetadata>(work_queue, timer, timer_lock,
m_header_ioctx, m_header_oid, m_client_id,
settings);
}
Journaler::~Journaler() {
if (m_metadata != nullptr) {
ceph_assert(!m_metadata->is_initialized());
if (!m_initialized) {
// never initialized -- ensure any in-flight ops are complete
// since we wouldn't expect shut_down to be invoked
m_metadata->wait_for_ops();
}
m_metadata.reset();
}
ceph_assert(m_trimmer == nullptr);
ceph_assert(m_player == nullptr);
ceph_assert(m_recorder == nullptr);
delete m_threads;
m_threads = nullptr;
}
void Journaler::exists(Context *on_finish) const {
librados::ObjectReadOperation op;
op.stat(nullptr, nullptr, nullptr);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(on_finish, rados_ctx_callback);
int r = m_header_ioctx.aio_operate(m_header_oid, comp, &op, nullptr);
ceph_assert(r == 0);
comp->release();
}
void Journaler::init(Context *on_init) {
m_initialized = true;
m_metadata->init(new C_InitJournaler(this, on_init));
}
int Journaler::init_complete() {
int64_t pool_id = m_metadata->get_pool_id();
if (pool_id < 0 || pool_id == m_header_ioctx.get_id()) {
ldout(m_cct, 20) << "using image pool for journal data" << dendl;
m_data_ioctx.dup(m_header_ioctx);
} else {
ldout(m_cct, 20) << "using pool id=" << pool_id << " for journal data"
<< dendl;
librados::Rados rados(m_header_ioctx);
int r = rados.ioctx_create2(pool_id, m_data_ioctx);
if (r < 0) {
if (r == -ENOENT) {
ldout(m_cct, 1) << "pool id=" << pool_id << " no longer exists"
<< dendl;
}
return r;
}
}
m_trimmer = new JournalTrimmer(m_data_ioctx, m_object_oid_prefix,
m_metadata);
return 0;
}
void Journaler::shut_down() {
C_SaferCond ctx;
shut_down(&ctx);
ctx.wait();
}
void Journaler::shut_down(Context *on_finish) {
ceph_assert(m_player == nullptr);
ceph_assert(m_recorder == nullptr);
auto metadata = std::move(m_metadata);
ceph_assert(metadata);
on_finish = new LambdaContext([metadata, on_finish](int r) {
on_finish->complete(0);
});
JournalTrimmer *trimmer = nullptr;
std::swap(trimmer, m_trimmer);
if (!trimmer) {
metadata->shut_down(on_finish);
return;
}
on_finish = new LambdaContext([trimmer, metadata, on_finish](int r) {
delete trimmer;
metadata->shut_down(on_finish);
});
trimmer->shut_down(on_finish);
}
bool Journaler::is_initialized() const {
return m_metadata->is_initialized();
}
void Journaler::get_immutable_metadata(uint8_t *order, uint8_t *splay_width,
int64_t *pool_id, Context *on_finish) {
m_metadata->get_immutable_metadata(order, splay_width, pool_id, on_finish);
}
void Journaler::get_mutable_metadata(uint64_t *minimum_set,
uint64_t *active_set,
RegisteredClients *clients,
Context *on_finish) {
m_metadata->get_mutable_metadata(minimum_set, active_set, clients, on_finish);
}
void Journaler::create(uint8_t order, uint8_t splay_width,
int64_t pool_id, Context *on_finish) {
if (order > 26 || order < 12) {
lderr(m_cct) << "order must be in the range [12, 26]" << dendl;
on_finish->complete(-EDOM);
return;
}
if (splay_width == 0) {
on_finish->complete(-EINVAL);
return;
}
ldout(m_cct, 5) << "creating new journal: " << m_header_oid << dendl;
librados::ObjectWriteOperation op;
client::create(&op, order, splay_width, pool_id);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(on_finish, rados_ctx_callback);
int r = m_header_ioctx.aio_operate(m_header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void Journaler::remove(bool force, Context *on_finish) {
// chain journal removal (reverse order)
on_finish = new LambdaContext([this, on_finish](int r) {
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
on_finish, utils::rados_ctx_callback);
r = m_header_ioctx.aio_remove(m_header_oid, comp);
ceph_assert(r == 0);
comp->release();
});
on_finish = new LambdaContext([this, force, on_finish](int r) {
m_trimmer->remove_objects(force, on_finish);
});
m_metadata->shut_down(on_finish);
}
void Journaler::flush_commit_position(Context *on_safe) {
m_metadata->flush_commit_position(on_safe);
}
void Journaler::add_listener(JournalMetadataListener *listener) {
m_metadata->add_listener(listener);
}
void Journaler::remove_listener(JournalMetadataListener *listener) {
m_metadata->remove_listener(listener);
}
int Journaler::register_client(const bufferlist &data) {
C_SaferCond cond;
register_client(data, &cond);
return cond.wait();
}
int Journaler::unregister_client() {
C_SaferCond cond;
unregister_client(&cond);
return cond.wait();
}
void Journaler::register_client(const bufferlist &data, Context *on_finish) {
return m_metadata->register_client(data, on_finish);
}
void Journaler::update_client(const bufferlist &data, Context *on_finish) {
return m_metadata->update_client(data, on_finish);
}
void Journaler::unregister_client(Context *on_finish) {
return m_metadata->unregister_client(on_finish);
}
void Journaler::get_client(const std::string &client_id,
cls::journal::Client *client,
Context *on_finish) {
m_metadata->get_client(client_id, client, on_finish);
}
int Journaler::get_cached_client(const std::string &client_id,
cls::journal::Client *client) {
RegisteredClients clients;
m_metadata->get_registered_clients(&clients);
auto it = clients.find({client_id, {}});
if (it == clients.end()) {
return -ENOENT;
}
*client = *it;
return 0;
}
void Journaler::allocate_tag(const bufferlist &data, cls::journal::Tag *tag,
Context *on_finish) {
m_metadata->allocate_tag(cls::journal::Tag::TAG_CLASS_NEW, data, tag,
on_finish);
}
void Journaler::allocate_tag(uint64_t tag_class, const bufferlist &data,
cls::journal::Tag *tag, Context *on_finish) {
m_metadata->allocate_tag(tag_class, data, tag, on_finish);
}
void Journaler::get_tag(uint64_t tag_tid, Tag *tag, Context *on_finish) {
m_metadata->get_tag(tag_tid, tag, on_finish);
}
void Journaler::get_tags(uint64_t tag_class, Tags *tags, Context *on_finish) {
m_metadata->get_tags(0, tag_class, tags, on_finish);
}
void Journaler::get_tags(uint64_t start_after_tag_tid, uint64_t tag_class,
Tags *tags, Context *on_finish) {
m_metadata->get_tags(start_after_tag_tid, tag_class, tags, on_finish);
}
void Journaler::start_replay(ReplayHandler* replay_handler) {
create_player(replay_handler);
m_player->prefetch();
}
void Journaler::start_live_replay(ReplayHandler* replay_handler,
double interval) {
create_player(replay_handler);
m_player->prefetch_and_watch(interval);
}
bool Journaler::try_pop_front(ReplayEntry *replay_entry,
uint64_t *tag_tid) {
ceph_assert(m_player != nullptr);
Entry entry;
uint64_t commit_tid;
if (!m_player->try_pop_front(&entry, &commit_tid)) {
return false;
}
*replay_entry = ReplayEntry(entry.get_data(), commit_tid);
if (tag_tid != nullptr) {
*tag_tid = entry.get_tag_tid();
}
return true;
}
void Journaler::stop_replay() {
C_SaferCond ctx;
stop_replay(&ctx);
ctx.wait();
}
void Journaler::stop_replay(Context *on_finish) {
auto player = std::move(m_player);
auto* playerp = player.get();
auto f = [player=std::move(player), on_finish](int r) {
on_finish->complete(r);
};
on_finish = new LambdaContext(std::move(f));
playerp->shut_down(on_finish);
}
void Journaler::committed(const ReplayEntry &replay_entry) {
m_trimmer->committed(replay_entry.get_commit_tid());
}
void Journaler::committed(const Future &future) {
auto& future_impl = future.get_future_impl();
m_trimmer->committed(future_impl->get_commit_tid());
}
void Journaler::start_append(uint64_t max_in_flight_appends) {
ceph_assert(m_recorder == nullptr);
// TODO verify active object set >= current replay object set
m_recorder = std::make_unique<JournalRecorder>(m_data_ioctx, m_object_oid_prefix,
m_metadata, max_in_flight_appends);
}
void Journaler::set_append_batch_options(int flush_interval,
uint64_t flush_bytes,
double flush_age) {
ceph_assert(m_recorder != nullptr);
m_recorder->set_append_batch_options(flush_interval, flush_bytes, flush_age);
}
void Journaler::stop_append(Context *on_safe) {
auto recorder = std::move(m_recorder);
ceph_assert(recorder);
auto* recorderp = recorder.get();
on_safe = new LambdaContext([recorder=std::move(recorder), on_safe](int r) {
on_safe->complete(r);
});
recorderp->shut_down(on_safe);
}
uint64_t Journaler::get_max_append_size() const {
uint64_t max_payload_size = m_metadata->get_object_size() -
Entry::get_fixed_size();
if (m_metadata->get_settings().max_payload_bytes > 0) {
max_payload_size = std::min(max_payload_size,
m_metadata->get_settings().max_payload_bytes);
}
return max_payload_size;
}
Future Journaler::append(uint64_t tag_tid, const bufferlist &payload_bl) {
return m_recorder->append(tag_tid, payload_bl);
}
void Journaler::flush_append(Context *on_safe) {
m_recorder->flush(on_safe);
}
void Journaler::create_player(ReplayHandler* replay_handler) {
ceph_assert(m_player == nullptr);
m_player = std::make_unique<JournalPlayer>(m_data_ioctx, m_object_oid_prefix, m_metadata,
replay_handler, m_cache_manager_handler);
}
void Journaler::get_metadata(uint8_t *order, uint8_t *splay_width,
int64_t *pool_id) {
ceph_assert(m_metadata != nullptr);
*order = m_metadata->get_order();
*splay_width = m_metadata->get_splay_width();
*pool_id = m_metadata->get_pool_id();
}
std::ostream &operator<<(std::ostream &os,
const Journaler &journaler) {
os << "[metadata=";
if (journaler.m_metadata) {
os << *journaler.m_metadata;
} else {
os << "NULL";
}
os << "]";
return os;
}
} // namespace journal
| 13,888 | 28.99784 | 91 |
cc
|
null |
ceph-main/src/journal/Journaler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_JOURNALER_H
#define CEPH_JOURNAL_JOURNALER_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "journal/Future.h"
#include "journal/JournalMetadataListener.h"
#include "cls/journal/cls_journal_types.h"
#include "common/Timer.h"
#include <list>
#include <map>
#include <string>
#include "include/ceph_assert.h"
class ContextWQ;
class ThreadPool;
namespace journal {
struct CacheManagerHandler;
class JournalTrimmer;
class ReplayEntry;
class ReplayHandler;
class Settings;
class Journaler {
public:
struct Threads {
Threads(CephContext *cct);
~Threads();
ThreadPool *thread_pool = nullptr;
ContextWQ *work_queue = nullptr;
SafeTimer *timer;
ceph::mutex timer_lock = ceph::make_mutex("Journaler::timer_lock");
};
typedef cls::journal::Tag Tag;
typedef std::list<cls::journal::Tag> Tags;
typedef std::set<cls::journal::Client> RegisteredClients;
static std::string header_oid(const std::string &journal_id);
static std::string object_oid_prefix(int pool_id,
const std::string &journal_id);
Journaler(librados::IoCtx &header_ioctx, const std::string &journal_id,
const std::string &client_id, const Settings &settings,
CacheManagerHandler *cache_manager_handler);
Journaler(ContextWQ *work_queue, SafeTimer *timer, ceph::mutex *timer_lock,
librados::IoCtx &header_ioctx, const std::string &journal_id,
const std::string &client_id, const Settings &settings,
CacheManagerHandler *cache_manager_handler);
~Journaler();
void exists(Context *on_finish) const;
void create(uint8_t order, uint8_t splay_width, int64_t pool_id, Context *ctx);
void remove(bool force, Context *on_finish);
void init(Context *on_init);
void shut_down();
void shut_down(Context *on_finish);
bool is_initialized() const;
void get_immutable_metadata(uint8_t *order, uint8_t *splay_width,
int64_t *pool_id, Context *on_finish);
void get_mutable_metadata(uint64_t *minimum_set, uint64_t *active_set,
RegisteredClients *clients, Context *on_finish);
void add_listener(JournalMetadataListener *listener);
void remove_listener(JournalMetadataListener *listener);
int register_client(const bufferlist &data);
void register_client(const bufferlist &data, Context *on_finish);
int unregister_client();
void unregister_client(Context *on_finish);
void update_client(const bufferlist &data, Context *on_finish);
void get_client(const std::string &client_id, cls::journal::Client *client,
Context *on_finish);
int get_cached_client(const std::string &client_id,
cls::journal::Client *client);
void flush_commit_position(Context *on_safe);
void allocate_tag(const bufferlist &data, cls::journal::Tag *tag,
Context *on_finish);
void allocate_tag(uint64_t tag_class, const bufferlist &data,
cls::journal::Tag *tag, Context *on_finish);
void get_tag(uint64_t tag_tid, Tag *tag, Context *on_finish);
void get_tags(uint64_t tag_class, Tags *tags, Context *on_finish);
void get_tags(uint64_t start_after_tag_tid, uint64_t tag_class, Tags *tags,
Context *on_finish);
void start_replay(ReplayHandler* replay_handler);
void start_live_replay(ReplayHandler* replay_handler, double interval);
bool try_pop_front(ReplayEntry *replay_entry, uint64_t *tag_tid = nullptr);
void stop_replay();
void stop_replay(Context *on_finish);
uint64_t get_max_append_size() const;
void start_append(uint64_t max_in_flight_appends);
void set_append_batch_options(int flush_interval, uint64_t flush_bytes,
double flush_age);
Future append(uint64_t tag_tid, const bufferlist &bl);
void flush_append(Context *on_safe);
void stop_append(Context *on_safe);
void committed(const ReplayEntry &replay_entry);
void committed(const Future &future);
void get_metadata(uint8_t *order, uint8_t *splay_width, int64_t *pool_id);
private:
struct C_InitJournaler : public Context {
Journaler *journaler;
Context *on_safe;
C_InitJournaler(Journaler *_journaler, Context *_on_safe)
: journaler(_journaler), on_safe(_on_safe) {
}
void finish(int r) override {
if (r == 0) {
r = journaler->init_complete();
}
on_safe->complete(r);
}
};
Threads *m_threads = nullptr;
mutable librados::IoCtx m_header_ioctx;
librados::IoCtx m_data_ioctx;
CephContext *m_cct;
std::string m_client_id;
CacheManagerHandler *m_cache_manager_handler;
std::string m_header_oid;
std::string m_object_oid_prefix;
bool m_initialized = false;
ceph::ref_t<class JournalMetadata> m_metadata;
std::unique_ptr<class JournalPlayer> m_player;
std::unique_ptr<class JournalRecorder> m_recorder;
JournalTrimmer *m_trimmer = nullptr;
void set_up(ContextWQ *work_queue, SafeTimer *timer, ceph::mutex *timer_lock,
librados::IoCtx &header_ioctx, const std::string &journal_id,
const Settings &settings);
int init_complete();
void create_player(ReplayHandler* replay_handler);
friend std::ostream &operator<<(std::ostream &os,
const Journaler &journaler);
};
std::ostream &operator<<(std::ostream &os,
const Journaler &journaler);
} // namespace journal
#endif // CEPH_JOURNAL_JOURNALER_H
| 5,545 | 31.432749 | 81 |
h
|
null |
ceph-main/src/journal/ObjectPlayer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/ObjectPlayer.h"
#include "journal/Utils.h"
#include "common/Timer.h"
#include <limits>
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "ObjectPlayer: " << this << " "
namespace journal {
namespace {
bool advance_to_last_pad_byte(uint32_t off, bufferlist::const_iterator *iter,
uint32_t *pad_len, bool *partial_entry) {
const uint32_t MAX_PAD = 8;
auto pad_bytes = MAX_PAD - off % MAX_PAD;
auto next = *iter;
ceph_assert(!next.end());
if (*next != '\0') {
return false;
}
for (auto i = pad_bytes - 1; i > 0; i--) {
if ((++next).end()) {
*partial_entry = true;
return false;
}
if (*next != '\0') {
return false;
}
}
*iter = next;
*pad_len += pad_bytes;
return true;
}
} // anonymous namespace
ObjectPlayer::ObjectPlayer(librados::IoCtx &ioctx,
const std::string& object_oid_prefix,
uint64_t object_num, SafeTimer &timer,
ceph::mutex &timer_lock, uint8_t order,
uint64_t max_fetch_bytes)
: m_object_num(object_num),
m_oid(utils::get_object_name(object_oid_prefix, m_object_num)),
m_timer(timer), m_timer_lock(timer_lock), m_order(order),
m_max_fetch_bytes(max_fetch_bytes > 0 ? max_fetch_bytes : 2 << order),
m_lock(ceph::make_mutex(utils::unique_lock_name("ObjectPlayer::m_lock", this)))
{
m_ioctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext*>(m_ioctx.cct());
}
ObjectPlayer::~ObjectPlayer() {
{
std::lock_guard timer_locker{m_timer_lock};
std::lock_guard locker{m_lock};
ceph_assert(!m_fetch_in_progress);
ceph_assert(m_watch_ctx == nullptr);
}
}
void ObjectPlayer::fetch(Context *on_finish) {
ldout(m_cct, 10) << __func__ << ": " << m_oid << dendl;
std::lock_guard locker{m_lock};
ceph_assert(!m_fetch_in_progress);
m_fetch_in_progress = true;
C_Fetch *context = new C_Fetch(this, on_finish);
librados::ObjectReadOperation op;
op.read(m_read_off, m_max_fetch_bytes, &context->read_bl, NULL);
op.set_op_flags2(CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
auto rados_completion =
librados::Rados::aio_create_completion(context, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op, 0, NULL);
ceph_assert(r == 0);
rados_completion->release();
}
void ObjectPlayer::watch(Context *on_fetch, double interval) {
ldout(m_cct, 20) << __func__ << ": " << m_oid << " watch" << dendl;
std::lock_guard timer_locker{m_timer_lock};
m_watch_interval = interval;
ceph_assert(m_watch_ctx == nullptr);
m_watch_ctx = on_fetch;
schedule_watch();
}
void ObjectPlayer::unwatch() {
ldout(m_cct, 20) << __func__ << ": " << m_oid << " unwatch" << dendl;
Context *watch_ctx = nullptr;
{
std::lock_guard timer_locker{m_timer_lock};
ceph_assert(!m_unwatched);
m_unwatched = true;
if (!cancel_watch()) {
return;
}
std::swap(watch_ctx, m_watch_ctx);
}
if (watch_ctx != nullptr) {
watch_ctx->complete(-ECANCELED);
}
}
void ObjectPlayer::front(Entry *entry) const {
std::lock_guard locker{m_lock};
ceph_assert(!m_entries.empty());
*entry = m_entries.front();
}
void ObjectPlayer::pop_front() {
std::lock_guard locker{m_lock};
ceph_assert(!m_entries.empty());
auto &entry = m_entries.front();
m_entry_keys.erase({entry.get_tag_tid(), entry.get_entry_tid()});
m_entries.pop_front();
}
int ObjectPlayer::handle_fetch_complete(int r, const bufferlist &bl,
bool *refetch) {
ldout(m_cct, 10) << __func__ << ": " << m_oid << ", r=" << r << ", len="
<< bl.length() << dendl;
*refetch = false;
if (r == -ENOENT) {
return 0;
} else if (r < 0) {
return r;
} else if (bl.length() == 0) {
return 0;
}
std::lock_guard locker{m_lock};
ceph_assert(m_fetch_in_progress);
m_read_off += bl.length();
m_read_bl.append(bl);
m_refetch_state = REFETCH_STATE_REQUIRED;
bool full_fetch = (m_max_fetch_bytes == 2U << m_order);
bool partial_entry = false;
bool invalid = false;
uint32_t invalid_start_off = 0;
clear_invalid_range(m_read_bl_off, m_read_bl.length());
bufferlist::const_iterator iter{&m_read_bl, 0};
uint32_t pad_len = 0;
while (!iter.end()) {
uint32_t bytes_needed;
uint32_t bl_off = iter.get_off();
if (!Entry::is_readable(iter, &bytes_needed)) {
if (bytes_needed != 0) {
invalid_start_off = m_read_bl_off + bl_off;
invalid = true;
partial_entry = true;
if (full_fetch) {
lderr(m_cct) << ": partial record at offset " << invalid_start_off
<< dendl;
} else {
ldout(m_cct, 20) << ": partial record detected, will re-fetch"
<< dendl;
}
break;
}
if (!advance_to_last_pad_byte(m_read_bl_off + iter.get_off(), &iter,
&pad_len, &partial_entry)) {
invalid_start_off = m_read_bl_off + bl_off;
invalid = true;
if (partial_entry) {
if (full_fetch) {
lderr(m_cct) << ": partial pad at offset " << invalid_start_off
<< dendl;
} else {
ldout(m_cct, 20) << ": partial pad detected, will re-fetch"
<< dendl;
}
} else {
lderr(m_cct) << ": detected corrupt journal entry at offset "
<< invalid_start_off << dendl;
}
break;
}
++iter;
continue;
}
Entry entry;
decode(entry, iter);
ldout(m_cct, 20) << ": " << entry << " decoded" << dendl;
uint32_t entry_len = iter.get_off() - bl_off;
if (invalid) {
// new corrupt region detected
uint32_t invalid_end_off = m_read_bl_off + bl_off;
lderr(m_cct) << ": corruption range [" << invalid_start_off
<< ", " << invalid_end_off << ")" << dendl;
m_invalid_ranges.insert(invalid_start_off,
invalid_end_off - invalid_start_off);
invalid = false;
m_read_bl_off = invalid_end_off;
}
EntryKey entry_key(std::make_pair(entry.get_tag_tid(),
entry.get_entry_tid()));
if (m_entry_keys.find(entry_key) == m_entry_keys.end()) {
m_entry_keys[entry_key] = m_entries.insert(m_entries.end(), entry);
} else {
ldout(m_cct, 10) << ": " << entry << " is duplicate, replacing" << dendl;
*m_entry_keys[entry_key] = entry;
}
// prune decoded / corrupted journal entries from front of bl
bufferlist sub_bl;
sub_bl.substr_of(m_read_bl, iter.get_off(),
m_read_bl.length() - iter.get_off());
sub_bl.swap(m_read_bl);
iter = bufferlist::iterator(&m_read_bl, 0);
// advance the decoded entry offset
m_read_bl_off += entry_len + pad_len;
pad_len = 0;
}
if (invalid) {
uint32_t invalid_end_off = m_read_bl_off + m_read_bl.length();
if (!partial_entry) {
lderr(m_cct) << ": corruption range [" << invalid_start_off
<< ", " << invalid_end_off << ")" << dendl;
}
m_invalid_ranges.insert(invalid_start_off,
invalid_end_off - invalid_start_off);
}
if (!m_invalid_ranges.empty() && !partial_entry) {
return -EBADMSG;
} else if (partial_entry && (full_fetch || m_entries.empty())) {
*refetch = true;
return -EAGAIN;
}
return 0;
}
void ObjectPlayer::clear_invalid_range(uint32_t off, uint32_t len) {
// possibly remove previously partial record region
InvalidRanges decode_range;
decode_range.insert(off, len);
InvalidRanges intersect_range;
intersect_range.intersection_of(m_invalid_ranges, decode_range);
if (!intersect_range.empty()) {
ldout(m_cct, 20) << ": clearing invalid range: " << intersect_range
<< dendl;
m_invalid_ranges.subtract(intersect_range);
}
}
void ObjectPlayer::schedule_watch() {
ceph_assert(ceph_mutex_is_locked(m_timer_lock));
if (m_watch_ctx == NULL) {
return;
}
ldout(m_cct, 20) << __func__ << ": " << m_oid << " scheduling watch" << dendl;
ceph_assert(m_watch_task == nullptr);
m_watch_task = m_timer.add_event_after(
m_watch_interval,
new LambdaContext([this](int) {
handle_watch_task();
}));
}
bool ObjectPlayer::cancel_watch() {
ceph_assert(ceph_mutex_is_locked(m_timer_lock));
ldout(m_cct, 20) << __func__ << ": " << m_oid << " cancelling watch" << dendl;
if (m_watch_task != nullptr) {
bool canceled = m_timer.cancel_event(m_watch_task);
ceph_assert(canceled);
m_watch_task = nullptr;
return true;
}
return false;
}
void ObjectPlayer::handle_watch_task() {
ceph_assert(ceph_mutex_is_locked(m_timer_lock));
ldout(m_cct, 10) << __func__ << ": " << m_oid << " polling" << dendl;
ceph_assert(m_watch_ctx != nullptr);
ceph_assert(m_watch_task != nullptr);
m_watch_task = nullptr;
fetch(new C_WatchFetch(this));
}
void ObjectPlayer::handle_watch_fetched(int r) {
ldout(m_cct, 10) << __func__ << ": " << m_oid << " poll complete, r=" << r
<< dendl;
Context *watch_ctx = nullptr;
{
std::lock_guard timer_locker{m_timer_lock};
std::swap(watch_ctx, m_watch_ctx);
if (m_unwatched) {
m_unwatched = false;
r = -ECANCELED;
}
}
if (watch_ctx != nullptr) {
watch_ctx->complete(r);
}
}
void ObjectPlayer::C_Fetch::finish(int r) {
bool refetch = false;
r = object_player->handle_fetch_complete(r, read_bl, &refetch);
{
std::lock_guard locker{object_player->m_lock};
object_player->m_fetch_in_progress = false;
}
if (refetch) {
object_player->fetch(on_finish);
return;
}
object_player.reset();
on_finish->complete(r);
}
void ObjectPlayer::C_WatchFetch::finish(int r) {
object_player->handle_watch_fetched(r);
}
} // namespace journal
| 10,124 | 27.441011 | 83 |
cc
|
null |
ceph-main/src/journal/ObjectPlayer.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_OBJECT_PLAYER_H
#define CEPH_JOURNAL_OBJECT_PLAYER_H
#include "include/Context.h"
#include "include/interval_set.h"
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "common/RefCountedObj.h"
#include "journal/Entry.h"
#include <list>
#include <string>
#include <boost/noncopyable.hpp>
#include <boost/unordered_map.hpp>
#include "include/ceph_assert.h"
namespace journal {
class ObjectPlayer : public RefCountedObject {
public:
typedef std::list<Entry> Entries;
typedef interval_set<uint64_t> InvalidRanges;
enum RefetchState {
REFETCH_STATE_NONE,
REFETCH_STATE_REQUIRED,
REFETCH_STATE_IMMEDIATE
};
inline const std::string &get_oid() const {
return m_oid;
}
inline uint64_t get_object_number() const {
return m_object_num;
}
void fetch(Context *on_finish);
void watch(Context *on_fetch, double interval);
void unwatch();
void front(Entry *entry) const;
void pop_front();
inline bool empty() const {
std::lock_guard locker{m_lock};
return m_entries.empty();
}
inline void get_entries(Entries *entries) {
std::lock_guard locker{m_lock};
*entries = m_entries;
}
inline void get_invalid_ranges(InvalidRanges *invalid_ranges) {
std::lock_guard locker{m_lock};
*invalid_ranges = m_invalid_ranges;
}
inline bool refetch_required() const {
return (get_refetch_state() != REFETCH_STATE_NONE);
}
inline RefetchState get_refetch_state() const {
return m_refetch_state;
}
inline void set_refetch_state(RefetchState refetch_state) {
m_refetch_state = refetch_state;
}
inline void set_max_fetch_bytes(uint64_t max_fetch_bytes) {
std::lock_guard locker{m_lock};
m_max_fetch_bytes = max_fetch_bytes;
}
private:
FRIEND_MAKE_REF(ObjectPlayer);
ObjectPlayer(librados::IoCtx &ioctx, const std::string& object_oid_prefix,
uint64_t object_num, SafeTimer &timer, ceph::mutex &timer_lock,
uint8_t order, uint64_t max_fetch_bytes);
~ObjectPlayer() override;
typedef std::pair<uint64_t, uint64_t> EntryKey;
typedef boost::unordered_map<EntryKey, Entries::iterator> EntryKeys;
struct C_Fetch : public Context {
ceph::ref_t<ObjectPlayer> object_player;
Context *on_finish;
bufferlist read_bl;
C_Fetch(ObjectPlayer *o, Context *ctx) : object_player(o), on_finish(ctx) {
}
void finish(int r) override;
};
struct C_WatchFetch : public Context {
ceph::ref_t<ObjectPlayer> object_player;
C_WatchFetch(ObjectPlayer *o) : object_player(o) {
}
void finish(int r) override;
};
librados::IoCtx m_ioctx;
uint64_t m_object_num;
std::string m_oid;
CephContext *m_cct = nullptr;
SafeTimer &m_timer;
ceph::mutex &m_timer_lock;
uint8_t m_order;
uint64_t m_max_fetch_bytes;
double m_watch_interval = 0;
Context *m_watch_task = nullptr;
mutable ceph::mutex m_lock;
bool m_fetch_in_progress = false;
bufferlist m_read_bl;
uint32_t m_read_off = 0;
uint32_t m_read_bl_off = 0;
Entries m_entries;
EntryKeys m_entry_keys;
InvalidRanges m_invalid_ranges;
Context *m_watch_ctx = nullptr;
bool m_unwatched = false;
RefetchState m_refetch_state = REFETCH_STATE_IMMEDIATE;
int handle_fetch_complete(int r, const bufferlist &bl, bool *refetch);
void clear_invalid_range(uint32_t off, uint32_t len);
void schedule_watch();
bool cancel_watch();
void handle_watch_task();
void handle_watch_fetched(int r);
};
} // namespace journal
#endif // CEPH_JOURNAL_OBJECT_PLAYER_H
| 3,672 | 24.866197 | 79 |
h
|
null |
ceph-main/src/journal/ObjectRecorder.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/ObjectRecorder.h"
#include "journal/Future.h"
#include "journal/Utils.h"
#include "include/ceph_assert.h"
#include "common/Timer.h"
#include "common/errno.h"
#include "cls/journal/cls_journal_client.h"
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "ObjectRecorder: " << this << " " \
<< __func__ << " (" << m_oid << "): "
using namespace cls::journal;
using std::shared_ptr;
namespace journal {
ObjectRecorder::ObjectRecorder(librados::IoCtx &ioctx, std::string_view oid,
uint64_t object_number, ceph::mutex* lock,
ContextWQ *work_queue, Handler *handler,
uint8_t order, int32_t max_in_flight_appends)
: m_oid(oid), m_object_number(object_number),
m_op_work_queue(work_queue), m_handler(handler),
m_order(order), m_soft_max_size(1 << m_order),
m_max_in_flight_appends(max_in_flight_appends),
m_lock(lock)
{
m_ioctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext*>(m_ioctx.cct());
ceph_assert(m_handler != NULL);
librados::Rados rados(m_ioctx);
int8_t require_osd_release = 0;
int r = rados.get_min_compatible_osd(&require_osd_release);
if (r < 0) {
ldout(m_cct, 0) << "failed to retrieve min OSD release: "
<< cpp_strerror(r) << dendl;
}
m_compat_mode = require_osd_release < CEPH_RELEASE_OCTOPUS;
ldout(m_cct, 20) << dendl;
}
ObjectRecorder::~ObjectRecorder() {
ldout(m_cct, 20) << dendl;
ceph_assert(m_pending_buffers.empty());
ceph_assert(m_in_flight_tids.empty());
ceph_assert(m_in_flight_appends.empty());
}
void ObjectRecorder::set_append_batch_options(int flush_interval,
uint64_t flush_bytes,
double flush_age) {
ldout(m_cct, 5) << "flush_interval=" << flush_interval << ", "
<< "flush_bytes=" << flush_bytes << ", "
<< "flush_age=" << flush_age << dendl;
ceph_assert(ceph_mutex_is_locked(*m_lock));
m_flush_interval = flush_interval;
m_flush_bytes = flush_bytes;
m_flush_age = flush_age;
}
bool ObjectRecorder::append(AppendBuffers &&append_buffers) {
ldout(m_cct, 20) << "count=" << append_buffers.size() << dendl;
ceph_assert(ceph_mutex_is_locked(*m_lock));
ceph::ref_t<FutureImpl> last_flushed_future;
auto flush_handler = get_flush_handler();
for (auto& append_buffer : append_buffers) {
ldout(m_cct, 20) << *append_buffer.first << ", "
<< "size=" << append_buffer.second.length() << dendl;
bool flush_requested = append_buffer.first->attach(flush_handler);
if (flush_requested) {
last_flushed_future = append_buffer.first;
}
m_pending_buffers.push_back(append_buffer);
m_pending_bytes += append_buffer.second.length();
}
return send_appends(!!last_flushed_future, last_flushed_future);
}
void ObjectRecorder::flush(Context *on_safe) {
ldout(m_cct, 20) << dendl;
Future future;
{
std::unique_lock locker{*m_lock};
// if currently handling flush notifications, wait so that
// we notify in the correct order (since lock is dropped on
// callback)
while (m_in_flight_callbacks > 0) {
m_in_flight_callbacks_cond.wait(locker);
}
// attach the flush to the most recent append
if (!m_pending_buffers.empty()) {
future = Future(m_pending_buffers.rbegin()->first);
} else if (!m_in_flight_appends.empty()) {
AppendBuffers &append_buffers = m_in_flight_appends.rbegin()->second;
ceph_assert(!append_buffers.empty());
future = Future(append_buffers.rbegin()->first);
}
}
if (future.is_valid()) {
// cannot be invoked while the same lock context
m_op_work_queue->queue(new LambdaContext(
[future, on_safe] (int r) mutable {
future.flush(on_safe);
}));
} else {
on_safe->complete(0);
}
}
void ObjectRecorder::flush(const ceph::ref_t<FutureImpl>& future) {
ldout(m_cct, 20) << "flushing " << *future << dendl;
std::unique_lock locker{*m_lock};
auto flush_handler = future->get_flush_handler();
auto my_handler = get_flush_handler();
if (flush_handler != my_handler) {
// if we don't own this future, re-issue the flush so that it hits the
// correct journal object owner
future->flush();
return;
} else if (future->is_flush_in_progress()) {
return;
}
if (!m_object_closed && !m_overflowed && send_appends(true, future)) {
++m_in_flight_callbacks;
notify_handler_unlock(locker, true);
}
}
void ObjectRecorder::claim_append_buffers(AppendBuffers *append_buffers) {
ldout(m_cct, 20) << dendl;
ceph_assert(ceph_mutex_is_locked(*m_lock));
ceph_assert(m_in_flight_tids.empty());
ceph_assert(m_in_flight_appends.empty());
ceph_assert(m_object_closed || m_overflowed);
for (auto& append_buffer : m_pending_buffers) {
ldout(m_cct, 20) << "detached " << *append_buffer.first << dendl;
append_buffer.first->detach();
}
append_buffers->splice(append_buffers->end(), m_pending_buffers,
m_pending_buffers.begin(), m_pending_buffers.end());
}
bool ObjectRecorder::close() {
ceph_assert(ceph_mutex_is_locked(*m_lock));
ldout(m_cct, 20) << dendl;
send_appends(true, {});
ceph_assert(!m_object_closed);
m_object_closed = true;
if (!m_in_flight_tids.empty() || m_in_flight_callbacks > 0) {
m_object_closed_notify = true;
return false;
}
return true;
}
void ObjectRecorder::handle_append_flushed(uint64_t tid, int r) {
ldout(m_cct, 20) << "tid=" << tid << ", r=" << r << dendl;
std::unique_lock locker{*m_lock};
++m_in_flight_callbacks;
auto tid_iter = m_in_flight_tids.find(tid);
ceph_assert(tid_iter != m_in_flight_tids.end());
m_in_flight_tids.erase(tid_iter);
InFlightAppends::iterator iter = m_in_flight_appends.find(tid);
ceph_assert(iter != m_in_flight_appends.end());
bool notify_overflowed = false;
AppendBuffers append_buffers;
if (r == -EOVERFLOW) {
ldout(m_cct, 10) << "append overflowed: "
<< "idle=" << m_in_flight_tids.empty() << ", "
<< "previous_overflow=" << m_overflowed << dendl;
if (m_in_flight_tids.empty()) {
append_overflowed();
}
if (!m_object_closed && !m_overflowed) {
notify_overflowed = true;
}
m_overflowed = true;
} else {
append_buffers.swap(iter->second);
ceph_assert(!append_buffers.empty());
for (auto& append_buffer : append_buffers) {
auto length = append_buffer.second.length();
m_object_bytes += length;
ceph_assert(m_in_flight_bytes >= length);
m_in_flight_bytes -= length;
}
ldout(m_cct, 20) << "object_bytes=" << m_object_bytes << dendl;
m_in_flight_appends.erase(iter);
}
locker.unlock();
// Flag the associated futures as complete.
for (auto& append_buffer : append_buffers) {
ldout(m_cct, 20) << *append_buffer.first << " marked safe" << dendl;
append_buffer.first->safe(r);
}
// attempt to kick off more appends to the object
locker.lock();
if (!m_object_closed && !m_overflowed && send_appends(false, {})) {
notify_overflowed = true;
}
ldout(m_cct, 20) << "pending tids=" << m_in_flight_tids << dendl;
// notify of overflow if one just occurred or indicate that all in-flight
// appends have completed on a closed object (or wake up stalled flush
// requests that was waiting for this strand to complete).
notify_handler_unlock(locker, notify_overflowed);
}
void ObjectRecorder::append_overflowed() {
ldout(m_cct, 10) << dendl;
ceph_assert(ceph_mutex_is_locked(*m_lock));
ceph_assert(!m_in_flight_appends.empty());
InFlightAppends in_flight_appends;
in_flight_appends.swap(m_in_flight_appends);
AppendBuffers restart_append_buffers;
for (InFlightAppends::iterator it = in_flight_appends.begin();
it != in_flight_appends.end(); ++it) {
restart_append_buffers.insert(restart_append_buffers.end(),
it->second.begin(), it->second.end());
}
restart_append_buffers.splice(restart_append_buffers.end(),
m_pending_buffers,
m_pending_buffers.begin(),
m_pending_buffers.end());
restart_append_buffers.swap(m_pending_buffers);
}
bool ObjectRecorder::send_appends(bool force, ceph::ref_t<FutureImpl> flush_future) {
ldout(m_cct, 20) << dendl;
ceph_assert(ceph_mutex_is_locked(*m_lock));
if (m_object_closed || m_overflowed) {
ldout(m_cct, 20) << "already closed or overflowed" << dendl;
return false;
}
if (m_pending_buffers.empty()) {
ldout(m_cct, 20) << "append buffers empty" << dendl;
return false;
}
if (!force &&
((m_flush_interval > 0 && m_pending_buffers.size() >= m_flush_interval) ||
(m_flush_bytes > 0 && m_pending_bytes >= m_flush_bytes) ||
(m_flush_age > 0 && !m_last_flush_time.is_zero() &&
m_last_flush_time + m_flush_age <= ceph_clock_now()))) {
ldout(m_cct, 20) << "forcing batch flush" << dendl;
force = true;
}
// start tracking flush time after the first append event
if (m_last_flush_time.is_zero()) {
m_last_flush_time = ceph_clock_now();
}
auto max_in_flight_appends = m_max_in_flight_appends;
if (m_flush_interval > 0 || m_flush_bytes > 0 || m_flush_age > 0) {
if (!force && max_in_flight_appends == 0) {
ldout(m_cct, 20) << "attempting to batch AIO appends" << dendl;
max_in_flight_appends = 1;
}
} else if (max_in_flight_appends < 0) {
max_in_flight_appends = 0;
}
if (!force && max_in_flight_appends != 0 &&
static_cast<int32_t>(m_in_flight_tids.size()) >= max_in_flight_appends) {
ldout(m_cct, 10) << "max in flight appends reached" << dendl;
return false;
}
librados::ObjectWriteOperation op;
if (m_compat_mode) {
client::guard_append(&op, m_soft_max_size);
}
size_t append_bytes = 0;
AppendBuffers append_buffers;
bufferlist append_bl;
for (auto it = m_pending_buffers.begin(); it != m_pending_buffers.end(); ) {
auto& future = it->first;
auto& bl = it->second;
auto size = m_object_bytes + m_in_flight_bytes + append_bytes + bl.length();
if (size == m_soft_max_size) {
ldout(m_cct, 10) << "object at capacity (" << size << ") " << *future << dendl;
m_overflowed = true;
} else if (size > m_soft_max_size) {
ldout(m_cct, 10) << "object beyond capacity (" << size << ") " << *future << dendl;
m_overflowed = true;
break;
}
bool flush_break = (force && flush_future && flush_future == future);
ldout(m_cct, 20) << "flushing " << *future << dendl;
future->set_flush_in_progress();
if (m_compat_mode) {
op.append(bl);
op.set_op_flags2(CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
} else {
append_bl.append(bl);
}
append_bytes += bl.length();
append_buffers.push_back(*it);
it = m_pending_buffers.erase(it);
if (flush_break) {
ldout(m_cct, 20) << "stopping at requested flush future" << dendl;
break;
}
}
if (append_bytes > 0) {
m_last_flush_time = ceph_clock_now();
uint64_t append_tid = m_append_tid++;
m_in_flight_tids.insert(append_tid);
m_in_flight_appends[append_tid].swap(append_buffers);
m_in_flight_bytes += append_bytes;
ceph_assert(m_pending_bytes >= append_bytes);
m_pending_bytes -= append_bytes;
if (!m_compat_mode) {
client::append(&op, m_soft_max_size, append_bl);
}
auto rados_completion = librados::Rados::aio_create_completion(
new C_AppendFlush(this, append_tid), utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
ldout(m_cct, 20) << "flushing journal tid=" << append_tid << ", "
<< "append_bytes=" << append_bytes << ", "
<< "in_flight_bytes=" << m_in_flight_bytes << ", "
<< "pending_bytes=" << m_pending_bytes << dendl;
}
return m_overflowed;
}
void ObjectRecorder::wake_up_flushes() {
ceph_assert(ceph_mutex_is_locked(*m_lock));
--m_in_flight_callbacks;
if (m_in_flight_callbacks == 0) {
m_in_flight_callbacks_cond.notify_all();
}
}
void ObjectRecorder::notify_handler_unlock(
std::unique_lock<ceph::mutex>& locker, bool notify_overflowed) {
ceph_assert(ceph_mutex_is_locked(*m_lock));
ceph_assert(m_in_flight_callbacks > 0);
if (!m_object_closed && notify_overflowed) {
// TODO need to delay completion until after aio_notify completes
ldout(m_cct, 10) << "overflow" << dendl;
ceph_assert(m_overflowed);
locker.unlock();
m_handler->overflow(this);
locker.lock();
}
// wake up blocked flush requests
wake_up_flushes();
// An overflow notification might have blocked a close. A close
// notification could lead to the immediate destruction of this object
// so the object shouldn't be referenced anymore
bool object_closed_notify = false;
if (m_in_flight_tids.empty()) {
std::swap(object_closed_notify, m_object_closed_notify);
}
ceph_assert(m_object_closed || !object_closed_notify);
locker.unlock();
if (object_closed_notify) {
ldout(m_cct, 10) << "closed" << dendl;
m_handler->closed(this);
}
}
} // namespace journal
| 13,578 | 30.950588 | 89 |
cc
|
null |
ceph-main/src/journal/ObjectRecorder.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_OBJECT_RECORDER_H
#define CEPH_JOURNAL_OBJECT_RECORDER_H
#include "include/utime.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include "common/RefCountedObj.h"
#include "common/WorkQueue.h"
#include "common/Timer.h"
#include "journal/FutureImpl.h"
#include <list>
#include <map>
#include <set>
#include <boost/noncopyable.hpp>
#include "include/ceph_assert.h"
namespace journal {
class ObjectRecorder;
typedef std::pair<ceph::ref_t<FutureImpl>, bufferlist> AppendBuffer;
typedef std::list<AppendBuffer> AppendBuffers;
class ObjectRecorder : public RefCountedObject, boost::noncopyable {
public:
struct Handler {
virtual ~Handler() {
}
virtual void closed(ObjectRecorder *object_recorder) = 0;
virtual void overflow(ObjectRecorder *object_recorder) = 0;
};
void set_append_batch_options(int flush_interval, uint64_t flush_bytes,
double flush_age);
inline uint64_t get_object_number() const {
return m_object_number;
}
inline const std::string &get_oid() const {
return m_oid;
}
bool append(AppendBuffers &&append_buffers);
void flush(Context *on_safe);
void flush(const ceph::ref_t<FutureImpl> &future);
void claim_append_buffers(AppendBuffers *append_buffers);
bool is_closed() const {
ceph_assert(ceph_mutex_is_locked(*m_lock));
return (m_object_closed && m_in_flight_appends.empty());
}
bool close();
inline CephContext *cct() const {
return m_cct;
}
inline size_t get_pending_appends() const {
std::lock_guard locker{*m_lock};
return m_pending_buffers.size();
}
private:
FRIEND_MAKE_REF(ObjectRecorder);
ObjectRecorder(librados::IoCtx &ioctx, std::string_view oid,
uint64_t object_number, ceph::mutex* lock,
ContextWQ *work_queue, Handler *handler, uint8_t order,
int32_t max_in_flight_appends);
~ObjectRecorder() override;
typedef std::set<uint64_t> InFlightTids;
typedef std::map<uint64_t, AppendBuffers> InFlightAppends;
struct FlushHandler : public FutureImpl::FlushHandler {
ceph::ref_t<ObjectRecorder> object_recorder;
virtual void flush(const ceph::ref_t<FutureImpl> &future) override {
object_recorder->flush(future);
}
FlushHandler(ceph::ref_t<ObjectRecorder> o) : object_recorder(std::move(o)) {}
};
struct C_AppendFlush : public Context {
ceph::ref_t<ObjectRecorder> object_recorder;
uint64_t tid;
C_AppendFlush(ceph::ref_t<ObjectRecorder> o, uint64_t _tid)
: object_recorder(std::move(o)), tid(_tid) {
}
void finish(int r) override {
object_recorder->handle_append_flushed(tid, r);
}
};
librados::IoCtx m_ioctx;
std::string m_oid;
uint64_t m_object_number;
CephContext *m_cct = nullptr;
ContextWQ *m_op_work_queue;
Handler *m_handler;
uint8_t m_order;
uint64_t m_soft_max_size;
uint32_t m_flush_interval = 0;
uint64_t m_flush_bytes = 0;
double m_flush_age = 0;
int32_t m_max_in_flight_appends;
bool m_compat_mode;
/* So that ObjectRecorder::FlushHandler doesn't create a circular reference: */
std::weak_ptr<FlushHandler> m_flush_handler;
auto get_flush_handler() {
auto h = m_flush_handler.lock();
if (!h) {
h = std::make_shared<FlushHandler>(this);
m_flush_handler = h;
}
return h;
}
mutable ceph::mutex* m_lock;
AppendBuffers m_pending_buffers;
uint64_t m_pending_bytes = 0;
utime_t m_last_flush_time;
uint64_t m_append_tid = 0;
InFlightTids m_in_flight_tids;
InFlightAppends m_in_flight_appends;
uint64_t m_object_bytes = 0;
bool m_overflowed = false;
bool m_object_closed = false;
bool m_object_closed_notify = false;
bufferlist m_prefetch_bl;
uint32_t m_in_flight_callbacks = 0;
ceph::condition_variable m_in_flight_callbacks_cond;
uint64_t m_in_flight_bytes = 0;
bool send_appends(bool force, ceph::ref_t<FutureImpl> flush_sentinel);
void handle_append_flushed(uint64_t tid, int r);
void append_overflowed();
void wake_up_flushes();
void notify_handler_unlock(std::unique_lock<ceph::mutex>& locker,
bool notify_overflowed);
};
} // namespace journal
#endif // CEPH_JOURNAL_OBJECT_RECORDER_H
| 4,394 | 26.298137 | 82 |
h
|
null |
ceph-main/src/journal/ReplayEntry.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_REPLAY_ENTRY_H
#define CEPH_JOURNAL_REPLAY_ENTRY_H
#include "include/int_types.h"
#include "include/buffer.h"
namespace journal {
class ReplayEntry {
public:
ReplayEntry() : m_commit_tid(0) {
}
ReplayEntry(const bufferlist &data, uint64_t commit_tid)
: m_data(data), m_commit_tid(commit_tid) {
}
inline const bufferlist &get_data() const {
return m_data;
}
inline uint64_t get_commit_tid() const {
return m_commit_tid;
}
private:
bufferlist m_data;
uint64_t m_commit_tid;
};
} // namespace journal
#endif // CEPH_JOURNAL_REPLAY_ENTRY_H
| 695 | 18.885714 | 70 |
h
|
null |
ceph-main/src/journal/ReplayHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_REPLAY_HANDLER_H
#define CEPH_JOURNAL_REPLAY_HANDLER_H
namespace journal {
struct ReplayHandler {
virtual void handle_entries_available() = 0;
virtual void handle_complete(int r) = 0;
virtual ~ReplayHandler() {}
};
} // namespace journal
#endif // CEPH_JOURNAL_REPLAY_HANDLER_H
| 408 | 21.722222 | 70 |
h
|
null |
ceph-main/src/journal/Settings.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_SETTINGS_H
#define CEPH_JOURNAL_SETTINGS_H
#include "include/int_types.h"
namespace journal {
struct Settings {
double commit_interval = 5; ///< commit position throttle (in secs)
uint64_t max_payload_bytes = 0; ///< 0 implies object size limit
int max_concurrent_object_sets = 0; ///< 0 implies no limit
std::set<std::string> ignored_laggy_clients;
///< clients that mustn't be disconnected
};
} // namespace journal
#endif // # CEPH_JOURNAL_SETTINGS_H
| 637 | 28 | 79 |
h
|
null |
ceph-main/src/journal/Types.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_TYPES_H
#define CEPH_JOURNAL_TYPES_H
namespace journal {
struct CacheRebalanceHandler {
virtual ~CacheRebalanceHandler() {
}
virtual void handle_cache_rebalanced(uint64_t new_cache_bytes) = 0;
};
struct CacheManagerHandler {
virtual ~CacheManagerHandler() {
}
virtual void register_cache(const std::string &cache_name,
uint64_t min_size, uint64_t max_size,
CacheRebalanceHandler* handler) = 0;
virtual void unregister_cache(const std::string &cache_name) = 0;
};
} // namespace journal
#endif // # CEPH_JOURNAL_TYPES_H
| 720 | 23.862069 | 70 |
h
|
null |
ceph-main/src/journal/Utils.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/Utils.h"
#include "include/Context.h"
#include "include/stringify.h"
namespace journal {
namespace utils {
std::string get_object_name(const std::string &prefix, uint64_t number) {
return prefix + stringify(number);
}
std::string unique_lock_name(const std::string &name, void *address) {
return name + " (" + stringify(address) + ")";
}
void rados_ctx_callback(rados_completion_t c, void *arg) {
Context *comp = reinterpret_cast<Context *>(arg);
comp->complete(rados_aio_get_return_value(c));
}
} // namespace utils
} // namespace journal
| 670 | 24.807692 | 73 |
cc
|
null |
ceph-main/src/journal/Utils.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_UTILS_H
#define CEPH_JOURNAL_UTILS_H
#include "include/int_types.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include <string>
namespace journal {
namespace utils {
namespace detail {
template <typename M>
struct C_AsyncCallback : public Context {
M journal_metadata;
Context *on_finish;
C_AsyncCallback(M journal_metadata, Context *on_finish)
: journal_metadata(journal_metadata), on_finish(on_finish) {
}
void finish(int r) override {
journal_metadata->queue(on_finish, r);
}
};
} // namespace detail
template <typename T, void(T::*MF)(int)>
void rados_state_callback(rados_completion_t c, void *arg) {
T *obj = reinterpret_cast<T*>(arg);
int r = rados_aio_get_return_value(c);
(obj->*MF)(r);
}
std::string get_object_name(const std::string &prefix, uint64_t number);
std::string unique_lock_name(const std::string &name, void *address);
void rados_ctx_callback(rados_completion_t c, void *arg);
template <typename M>
Context *create_async_context_callback(M journal_metadata, Context *on_finish) {
// use async callback to acquire a clean lock context
return new detail::C_AsyncCallback<M>(journal_metadata, on_finish);
}
} // namespace utils
} // namespace journal
#endif // CEPH_JOURNAL_UTILS_H
| 1,389 | 24.272727 | 80 |
h
|
null |
ceph-main/src/json_spirit/json_spirit.h
|
#ifndef JSON_SPIRIT
#define JSON_SPIRIT
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_value.h"
#include "json_spirit_reader.h"
#include "json_spirit_writer.h"
#include "json_spirit_utils.h"
#endif
| 395 | 19.842105 | 71 |
h
|
null |
ceph-main/src/json_spirit/json_spirit_error_position.h
|
#ifndef JSON_SPIRIT_ERROR_POSITION
#define JSON_SPIRIT_ERROR_POSITION
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include <string>
namespace json_spirit
{
// An Error_position exception is thrown by the "read_or_throw" functions below on finding an error.
// Note the "read_or_throw" functions are around 3 times slower than the standard functions "read"
// functions that return a bool.
//
struct Error_position
{
Error_position();
Error_position( unsigned int line, unsigned int column, const std::string& reason );
bool operator==( const Error_position& lhs ) const;
unsigned int line_;
unsigned int column_;
std::string reason_;
};
inline Error_position::Error_position()
: line_( 0 )
, column_( 0 )
{
}
inline Error_position::Error_position( unsigned int line, unsigned int column, const std::string& reason )
: line_( line )
, column_( column )
, reason_( reason )
{
}
inline bool Error_position::operator==( const Error_position& lhs ) const
{
if( this == &lhs ) return true;
return ( reason_ == lhs.reason_ ) &&
( line_ == lhs.line_ ) &&
( column_ == lhs.column_ );
}
}
#endif
| 1,461 | 25.581818 | 110 |
h
|
null |
ceph-main/src/json_spirit/json_spirit_reader.cpp
|
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#include "json_spirit_reader.h"
#include "json_spirit_reader_template.h"
using namespace json_spirit;
#ifdef JSON_SPIRIT_VALUE_ENABLED
bool json_spirit::read( const std::string& s, Value& value )
{
return read_string( s, value );
}
void json_spirit::read_or_throw( const std::string& s, Value& value )
{
read_string_or_throw( s, value );
}
bool json_spirit::read( std::istream& is, Value& value )
{
return read_stream( is, value );
}
void json_spirit::read_or_throw( std::istream& is, Value& value )
{
read_stream_or_throw( is, value );
}
bool json_spirit::read( std::string::const_iterator& begin, std::string::const_iterator end, Value& value )
{
return read_range( begin, end, value );
}
void json_spirit::read_or_throw( std::string::const_iterator& begin, std::string::const_iterator end, Value& value )
{
begin = read_range_or_throw( begin, end, value );
}
#endif
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
bool json_spirit::read( const std::wstring& s, wValue& value )
{
return read_string( s, value );
}
void json_spirit::read_or_throw( const std::wstring& s, wValue& value )
{
read_string_or_throw( s, value );
}
bool json_spirit::read( std::wistream& is, wValue& value )
{
return read_stream( is, value );
}
void json_spirit::read_or_throw( std::wistream& is, wValue& value )
{
read_stream_or_throw( is, value );
}
bool json_spirit::read( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wValue& value )
{
return read_range( begin, end, value );
}
void json_spirit::read_or_throw( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wValue& value )
{
begin = read_range_or_throw( begin, end, value );
}
#endif
#ifdef JSON_SPIRIT_MVALUE_ENABLED
bool json_spirit::read( const std::string& s, mValue& value )
{
return read_string( s, value );
}
void json_spirit::read_or_throw( const std::string& s, mValue& value )
{
read_string_or_throw( s, value );
}
bool json_spirit::read( std::istream& is, mValue& value )
{
return read_stream( is, value );
}
void json_spirit::read_or_throw( std::istream& is, mValue& value )
{
read_stream_or_throw( is, value );
}
bool json_spirit::read( std::string::const_iterator& begin, std::string::const_iterator end, mValue& value )
{
return read_range( begin, end, value );
}
void json_spirit::read_or_throw( std::string::const_iterator& begin, std::string::const_iterator end, mValue& value )
{
begin = read_range_or_throw( begin, end, value );
}
#endif
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
bool json_spirit::read( const std::wstring& s, wmValue& value )
{
return read_string( s, value );
}
void json_spirit::read_or_throw( const std::wstring& s, wmValue& value )
{
read_string_or_throw( s, value );
}
bool json_spirit::read( std::wistream& is, wmValue& value )
{
return read_stream( is, value );
}
void json_spirit::read_or_throw( std::wistream& is, wmValue& value )
{
read_stream_or_throw( is, value );
}
bool json_spirit::read( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wmValue& value )
{
return read_range( begin, end, value );
}
void json_spirit::read_or_throw( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wmValue& value )
{
begin = read_range_or_throw( begin, end, value );
}
#endif
| 3,969 | 27.768116 | 124 |
cpp
|
null |
ceph-main/src/json_spirit/json_spirit_reader.h
|
#ifndef JSON_SPIRIT_READER
#define JSON_SPIRIT_READER
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_value.h"
#include "json_spirit_error_position.h"
#include <iostream>
namespace json_spirit
{
// functions to reads a JSON values
#ifdef JSON_SPIRIT_VALUE_ENABLED
bool read( const std::string& s, Value& value );
bool read( std::istream& is, Value& value );
bool read( std::string::const_iterator& begin, std::string::const_iterator end, Value& value );
void read_or_throw( const std::string& s, Value& value );
void read_or_throw( std::istream& is, Value& value );
void read_or_throw( std::string::const_iterator& begin, std::string::const_iterator end, Value& value );
#endif
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
bool read( const std::wstring& s, wValue& value );
bool read( std::wistream& is, wValue& value );
bool read( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wValue& value );
void read_or_throw( const std::wstring& s, wValue& value );
void read_or_throw( std::wistream& is, wValue& value );
void read_or_throw( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wValue& value );
#endif
#ifdef JSON_SPIRIT_MVALUE_ENABLED
bool read( const std::string& s, mValue& value );
bool read( std::istream& is, mValue& value );
bool read( std::string::const_iterator& begin, std::string::const_iterator end, mValue& value );
void read_or_throw( const std::string& s, mValue& value );
void read_or_throw( std::istream& is, mValue& value );
void read_or_throw( std::string::const_iterator& begin, std::string::const_iterator end, mValue& value );
#endif
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
bool read( const std::wstring& s, wmValue& value );
bool read( std::wistream& is, wmValue& value );
bool read( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wmValue& value );
void read_or_throw( const std::wstring& s, wmValue& value );
void read_or_throw( std::wistream& is, wmValue& value );
void read_or_throw( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wmValue& value );
#endif
}
#endif
| 2,509 | 38.84127 | 112 |
h
|
null |
ceph-main/src/json_spirit/json_spirit_reader_template.h
|
#ifndef JSON_SPIRIT_READER_TEMPLATE
#define JSON_SPIRIT_READER_TEMPLATE
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_value.h"
#include "json_spirit_error_position.h"
#include "common/utf8.h"
#define BOOST_SPIRIT_THREADSAFE // uncomment for multithreaded use, requires linking to boost.thread
#include <boost/bind/bind.hpp>
#include <boost/function.hpp>
#include <boost/version.hpp>
#include <boost/spirit/include/classic_core.hpp>
#include <boost/spirit/include/classic_confix.hpp>
#include <boost/spirit/include/classic_escape_char.hpp>
#include <boost/spirit/include/classic_multi_pass.hpp>
#include <boost/spirit/include/classic_position_iterator.hpp>
#include "include/ceph_assert.h"
namespace json_spirit
{
namespace spirit_namespace = boost::spirit::classic;
const spirit_namespace::int_parser < boost::int64_t > int64_p = spirit_namespace::int_parser < boost::int64_t >();
const spirit_namespace::uint_parser< boost::uint64_t > uint64_p = spirit_namespace::uint_parser< boost::uint64_t >();
template< class Iter_type >
bool is_eq( Iter_type first, Iter_type last, const char* c_str )
{
for( Iter_type i = first; i != last; ++i, ++c_str )
{
if( *c_str == 0 ) return false;
if( *i != *c_str ) return false;
}
return true;
}
template< class Char_type >
Char_type hex_to_num( const Char_type c )
{
if( ( c >= '0' ) && ( c <= '9' ) ) return c - '0';
if( ( c >= 'a' ) && ( c <= 'f' ) ) return c - 'a' + 10;
if( ( c >= 'A' ) && ( c <= 'F' ) ) return c - 'A' + 10;
return 0;
}
template< class Char_type, class Iter_type >
Char_type hex_str_to_char( Iter_type& begin )
{
const Char_type c1( *( ++begin ) );
const Char_type c2( *( ++begin ) );
return ( hex_to_num( c1 ) << 4 ) + hex_to_num( c2 );
}
template< class String_type, class Iter_type >
String_type unicode_str_to_utf8( Iter_type& begin );
template<>
std::string unicode_str_to_utf8( std::string::const_iterator & begin )
{
typedef std::string::value_type Char_type;
const Char_type c1( *( ++begin ) );
const Char_type c2( *( ++begin ) );
const Char_type c3( *( ++begin ) );
const Char_type c4( *( ++begin ) );
unsigned long uc = ( hex_to_num( c1 ) << 12 ) +
( hex_to_num( c2 ) << 8 ) +
( hex_to_num( c3 ) << 4 ) +
hex_to_num( c4 );
unsigned char buf[7]; // MAX_UTF8_SZ is 6 (see src/common/utf8.c)
int r = encode_utf8(uc, buf);
if (r >= 0) {
return std::string(reinterpret_cast<char *>(buf), r);
}
return std::string("_");
}
template< class String_type >
void append_esc_char_and_incr_iter( String_type& s,
typename String_type::const_iterator& begin,
typename String_type::const_iterator end )
{
typedef typename String_type::value_type Char_type;
const Char_type c2( *begin );
switch( c2 )
{
case 't': s += '\t'; break;
case 'b': s += '\b'; break;
case 'f': s += '\f'; break;
case 'n': s += '\n'; break;
case 'r': s += '\r'; break;
case '\\': s += '\\'; break;
case '/': s += '/'; break;
case '"': s += '"'; break;
case 'x':
{
if( end - begin >= 3 ) // expecting "xHH..."
{
s += hex_str_to_char< Char_type >( begin );
}
break;
}
case 'u':
{
if( end - begin >= 5 ) // expecting "uHHHH..."
{
s += unicode_str_to_utf8< String_type >( begin );
}
break;
}
}
}
template< class String_type >
String_type substitute_esc_chars( typename String_type::const_iterator begin,
typename String_type::const_iterator end )
{
typedef typename String_type::const_iterator Iter_type;
if( end - begin < 2 ) return String_type( begin, end );
String_type result;
result.reserve( end - begin );
const Iter_type end_minus_1( end - 1 );
Iter_type substr_start = begin;
Iter_type i = begin;
for( ; i < end_minus_1; ++i )
{
if( *i == '\\' )
{
result.append( substr_start, i );
++i; // skip the '\'
append_esc_char_and_incr_iter( result, i, end );
substr_start = i + 1;
}
}
result.append( substr_start, end );
return result;
}
template< class String_type >
String_type get_str_( typename String_type::const_iterator begin,
typename String_type::const_iterator end )
{
ceph_assert( end - begin >= 2 );
typedef typename String_type::const_iterator Iter_type;
Iter_type str_without_quotes( ++begin );
Iter_type end_without_quotes( --end );
return substitute_esc_chars< String_type >( str_without_quotes, end_without_quotes );
}
inline std::string get_str( std::string::const_iterator begin, std::string::const_iterator end )
{
return get_str_< std::string >( begin, end );
}
// Need this guard else it tries to instantiate unicode_str_to_utf8 with a
// std::wstring, which isn't presently implemented
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
inline std::wstring get_str( std::wstring::const_iterator begin, std::wstring::const_iterator end )
{
return get_str_< std::wstring >( begin, end );
}
#endif
template< class String_type, class Iter_type >
String_type get_str( Iter_type begin, Iter_type end )
{
const String_type tmp( begin, end ); // convert multipass iterators to string iterators
return get_str( tmp.begin(), tmp.end() );
}
using namespace boost::placeholders;
// this class's methods get called by the spirit parse resulting
// in the creation of a JSON object or array
//
// NB Iter_type could be a std::string iterator, wstring iterator, a position iterator or a multipass iterator
//
template< class Value_type, class Iter_type >
class Semantic_actions
{
public:
typedef typename Value_type::Config_type Config_type;
typedef typename Config_type::String_type String_type;
typedef typename Config_type::Object_type Object_type;
typedef typename Config_type::Array_type Array_type;
typedef typename String_type::value_type Char_type;
Semantic_actions( Value_type& value )
: value_( value )
, current_p_( 0 )
{
}
void begin_obj( Char_type c )
{
ceph_assert( c == '{' );
begin_compound< Object_type >();
}
void end_obj( Char_type c )
{
ceph_assert( c == '}' );
end_compound();
}
void begin_array( Char_type c )
{
ceph_assert( c == '[' );
begin_compound< Array_type >();
}
void end_array( Char_type c )
{
ceph_assert( c == ']' );
end_compound();
}
void new_name( Iter_type begin, Iter_type end )
{
ceph_assert( current_p_->type() == obj_type );
name_ = get_str< String_type >( begin, end );
}
void new_str( Iter_type begin, Iter_type end )
{
add_to_current( get_str< String_type >( begin, end ) );
}
void new_true( Iter_type begin, Iter_type end )
{
ceph_assert( is_eq( begin, end, "true" ) );
add_to_current( true );
}
void new_false( Iter_type begin, Iter_type end )
{
ceph_assert( is_eq( begin, end, "false" ) );
add_to_current( false );
}
void new_null( Iter_type begin, Iter_type end )
{
ceph_assert( is_eq( begin, end, "null" ) );
add_to_current( Value_type() );
}
void new_int( boost::int64_t i )
{
add_to_current( i );
}
void new_uint64( boost::uint64_t ui )
{
add_to_current( ui );
}
void new_real( double d )
{
add_to_current( d );
}
private:
Semantic_actions& operator=( const Semantic_actions& );
// to prevent "assignment operator could not be generated" warning
Value_type* add_first( const Value_type& value )
{
ceph_assert( current_p_ == 0 );
value_ = value;
current_p_ = &value_;
return current_p_;
}
template< class Array_or_obj >
void begin_compound()
{
if( current_p_ == 0 )
{
add_first( Array_or_obj() );
}
else
{
stack_.push_back( current_p_ );
Array_or_obj new_array_or_obj; // avoid copy by building new array or object in place
current_p_ = add_to_current( new_array_or_obj );
}
}
void end_compound()
{
if( current_p_ != &value_ )
{
current_p_ = stack_.back();
stack_.pop_back();
}
}
Value_type* add_to_current( const Value_type& value )
{
if( current_p_ == 0 )
{
return add_first( value );
}
else if( current_p_->type() == array_type )
{
current_p_->get_array().push_back( value );
return ¤t_p_->get_array().back();
}
ceph_assert( current_p_->type() == obj_type );
return &Config_type::add( current_p_->get_obj(), name_, value );
}
Value_type& value_; // this is the object or array that is being created
Value_type* current_p_; // the child object or array that is currently being constructed
std::vector< Value_type* > stack_; // previous child objects and arrays
String_type name_; // of current name/value pair
};
template< typename Iter_type >
void throw_error( spirit_namespace::position_iterator< Iter_type > i, const std::string& reason )
{
throw Error_position( i.get_position().line, i.get_position().column, reason );
}
template< typename Iter_type >
void throw_error( Iter_type i, const std::string& reason )
{
throw reason;
}
// the spirit grammar
//
template< class Value_type, class Iter_type >
class Json_grammer : public spirit_namespace::grammar< Json_grammer< Value_type, Iter_type > >
{
public:
typedef Semantic_actions< Value_type, Iter_type > Semantic_actions_t;
Json_grammer( Semantic_actions_t& semantic_actions )
: actions_( semantic_actions )
{
}
static void throw_not_value( Iter_type begin, Iter_type end )
{
throw_error( begin, "not a value" );
}
static void throw_not_array( Iter_type begin, Iter_type end )
{
throw_error( begin, "not an array" );
}
static void throw_not_object( Iter_type begin, Iter_type end )
{
throw_error( begin, "not an object" );
}
static void throw_not_pair( Iter_type begin, Iter_type end )
{
throw_error( begin, "not a pair" );
}
static void throw_not_colon( Iter_type begin, Iter_type end )
{
throw_error( begin, "no colon in pair" );
}
static void throw_not_string( Iter_type begin, Iter_type end )
{
throw_error( begin, "not a string" );
}
template< typename ScannerT >
class definition
{
public:
definition( const Json_grammer& self )
{
using namespace spirit_namespace;
typedef typename Value_type::String_type::value_type Char_type;
// first we convert the semantic action class methods to functors with the
// parameter signature expected by spirit
typedef boost::function< void( Char_type ) > Char_action;
typedef boost::function< void( Iter_type, Iter_type ) > Str_action;
typedef boost::function< void( double ) > Real_action;
typedef boost::function< void( boost::int64_t ) > Int_action;
typedef boost::function< void( boost::uint64_t ) > Uint64_action;
Char_action begin_obj ( boost::bind( &Semantic_actions_t::begin_obj, &self.actions_, _1 ) );
Char_action end_obj ( boost::bind( &Semantic_actions_t::end_obj, &self.actions_, _1 ) );
Char_action begin_array( boost::bind( &Semantic_actions_t::begin_array, &self.actions_, _1 ) );
Char_action end_array ( boost::bind( &Semantic_actions_t::end_array, &self.actions_, _1 ) );
Str_action new_name ( boost::bind( &Semantic_actions_t::new_name, &self.actions_, _1, _2 ) );
Str_action new_str ( boost::bind( &Semantic_actions_t::new_str, &self.actions_, _1, _2 ) );
Str_action new_true ( boost::bind( &Semantic_actions_t::new_true, &self.actions_, _1, _2 ) );
Str_action new_false ( boost::bind( &Semantic_actions_t::new_false, &self.actions_, _1, _2 ) );
Str_action new_null ( boost::bind( &Semantic_actions_t::new_null, &self.actions_, _1, _2 ) );
Real_action new_real ( boost::bind( &Semantic_actions_t::new_real, &self.actions_, _1 ) );
Int_action new_int ( boost::bind( &Semantic_actions_t::new_int, &self.actions_, _1 ) );
Uint64_action new_uint64 ( boost::bind( &Semantic_actions_t::new_uint64, &self.actions_, _1 ) );
// actual grammar
json_
= value_ | eps_p[ &throw_not_value ]
;
value_
= string_[ new_str ]
| number_
| object_
| array_
| str_p( "true" ) [ new_true ]
| str_p( "false" )[ new_false ]
| str_p( "null" ) [ new_null ]
;
object_
= ch_p('{')[ begin_obj ]
>> !members_
>> ( ch_p('}')[ end_obj ] | eps_p[ &throw_not_object ] )
;
members_
= pair_ >> *( ',' >> pair_ | ch_p(',') )
;
pair_
= string_[ new_name ]
>> ( ':' | eps_p[ &throw_not_colon ] )
>> ( value_ | eps_p[ &throw_not_value ] )
;
array_
= ch_p('[')[ begin_array ]
>> !elements_
>> ( ch_p(']')[ end_array ] | eps_p[ &throw_not_array ] )
;
elements_
= value_ >> *( ',' >> value_ | ch_p(',') )
;
string_
= lexeme_d // this causes white space inside a string to be retained
[
confix_p
(
'"',
*lex_escape_ch_p,
'"'
)
]
;
number_
= strict_real_p[ new_real ]
| int64_p [ new_int ]
| uint64_p [ new_uint64 ]
;
}
spirit_namespace::rule< ScannerT > json_, object_, members_, pair_, array_, elements_, value_, string_, number_;
const spirit_namespace::rule< ScannerT >& start() const { return json_; }
};
private:
Json_grammer& operator=( const Json_grammer& ); // to prevent "assignment operator could not be generated" warning
Semantic_actions_t& actions_;
};
template< class Iter_type, class Value_type >
void add_posn_iter_and_read_range_or_throw( Iter_type begin, Iter_type end, Value_type& value )
{
typedef spirit_namespace::position_iterator< Iter_type > Posn_iter_t;
const Posn_iter_t posn_begin( begin, end );
const Posn_iter_t posn_end( end, end );
read_range_or_throw( posn_begin, posn_end, value );
}
template< class Istream_type >
struct Multi_pass_iters
{
typedef typename Istream_type::char_type Char_type;
typedef std::istream_iterator< Char_type, Char_type > istream_iter;
typedef spirit_namespace::multi_pass< istream_iter > Mp_iter;
Multi_pass_iters( Istream_type& is )
{
is.unsetf( std::ios::skipws );
begin_ = spirit_namespace::make_multi_pass( istream_iter( is ) );
end_ = spirit_namespace::make_multi_pass( istream_iter() );
}
Mp_iter begin_;
Mp_iter end_;
};
// reads a JSON Value from a pair of input iterators throwing an exception on invalid input, e.g.
//
// string::const_iterator start = str.begin();
// const string::const_iterator next = read_range_or_throw( str.begin(), str.end(), value );
//
// The iterator 'next' will point to the character past the
// last one read.
//
template< class Iter_type, class Value_type >
Iter_type read_range_or_throw( Iter_type begin, Iter_type end, Value_type& value )
{
Semantic_actions< Value_type, Iter_type > semantic_actions( value );
const spirit_namespace::parse_info< Iter_type > info =
spirit_namespace::parse( begin, end,
Json_grammer< Value_type, Iter_type >( semantic_actions ),
spirit_namespace::space_p );
if( !info.hit )
{
ceph_assert( false ); // in theory exception should already have been thrown
throw_error( info.stop, "error" );
}
return info.stop;
}
// reads a JSON Value from a pair of input iterators, e.g.
//
// string::const_iterator start = str.begin();
// const bool success = read_string( start, str.end(), value );
//
// The iterator 'start' will point to the character past the
// last one read.
//
template< class Iter_type, class Value_type >
bool read_range( Iter_type& begin, Iter_type end, Value_type& value )
{
try
{
begin = read_range_or_throw( begin, end, value );
return true;
}
catch( ... )
{
return false;
}
}
// reads a JSON Value from a string, e.g.
//
// const bool success = read_string( str, value );
//
template< class String_type, class Value_type >
bool read_string( const String_type& s, Value_type& value )
{
typename String_type::const_iterator begin = s.begin();
return read_range( begin, s.end(), value );
}
// reads a JSON Value from a string throwing an exception on invalid input, e.g.
//
// read_string_or_throw( is, value );
//
template< class String_type, class Value_type >
void read_string_or_throw( const String_type& s, Value_type& value )
{
add_posn_iter_and_read_range_or_throw( s.begin(), s.end(), value );
}
// reads a JSON Value from a stream, e.g.
//
// const bool success = read_stream( is, value );
//
template< class Istream_type, class Value_type >
bool read_stream( Istream_type& is, Value_type& value )
{
Multi_pass_iters< Istream_type > mp_iters( is );
return read_range( mp_iters.begin_, mp_iters.end_, value );
}
// reads a JSON Value from a stream throwing an exception on invalid input, e.g.
//
// read_stream_or_throw( is, value );
//
template< class Istream_type, class Value_type >
void read_stream_or_throw( Istream_type& is, Value_type& value )
{
const Multi_pass_iters< Istream_type > mp_iters( is );
add_posn_iter_and_read_range_or_throw( mp_iters.begin_, mp_iters.end_, value );
}
}
#endif
| 21,383 | 31.302115 | 124 |
h
|
null |
ceph-main/src/json_spirit/json_spirit_stream_reader.h
|
#ifndef JSON_SPIRIT_READ_STREAM
#define JSON_SPIRIT_READ_STREAM
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_reader_template.h"
namespace json_spirit
{
// these classes allows you to read multiple top level contiguous values from a stream,
// the normal stream read functions have a bug that prevent multiple top level values
// from being read unless they are separated by spaces
template< class Istream_type, class Value_type >
class Stream_reader
{
public:
Stream_reader( Istream_type& is )
: iters_( is )
{
}
bool read_next( Value_type& value )
{
return read_range( iters_.begin_, iters_.end_, value );
}
private:
typedef Multi_pass_iters< Istream_type > Mp_iters;
Mp_iters iters_;
};
template< class Istream_type, class Value_type >
class Stream_reader_thrower
{
public:
Stream_reader_thrower( Istream_type& is )
: iters_( is )
, posn_begin_( iters_.begin_, iters_.end_ )
, posn_end_( iters_.end_, iters_.end_ )
{
}
void read_next( Value_type& value )
{
posn_begin_ = read_range_or_throw( posn_begin_, posn_end_, value );
}
private:
typedef Multi_pass_iters< Istream_type > Mp_iters;
typedef spirit_namespace::position_iterator< typename Mp_iters::Mp_iter > Posn_iter_t;
Mp_iters iters_;
Posn_iter_t posn_begin_, posn_end_;
};
}
#endif
| 1,724 | 23.295775 | 94 |
h
|
null |
ceph-main/src/json_spirit/json_spirit_utils.h
|
#ifndef JSON_SPIRIT_UTILS
#define JSON_SPIRIT_UTILS
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_value.h"
#include <map>
namespace json_spirit
{
template< class Obj_t, class Map_t >
void obj_to_map( const Obj_t& obj, Map_t& mp_obj )
{
mp_obj.clear();
for( typename Obj_t::const_iterator i = obj.begin(); i != obj.end(); ++i )
{
mp_obj[ i->name_ ] = i->value_;
}
}
template< class Obj_t, class Map_t >
void map_to_obj( const Map_t& mp_obj, Obj_t& obj )
{
obj.clear();
for( typename Map_t::const_iterator i = mp_obj.begin(); i != mp_obj.end(); ++i )
{
obj.push_back( typename Obj_t::value_type( i->first, i->second ) );
}
}
#ifdef JSON_SPIRIT_VALUE_ENABLED
typedef std::map< std::string, Value > Mapped_obj;
#endif
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
typedef std::map< std::wstring, wValue > wMapped_obj;
#endif
template< class Object_type, class String_type >
const typename Object_type::value_type::Value_type& find_value( const Object_type& obj, const String_type& name )
{
for( typename Object_type::const_iterator i = obj.begin(); i != obj.end(); ++i )
{
if( i->name_ == name )
{
return i->value_;
}
}
return Object_type::value_type::Value_type::null;
}
}
#endif
| 1,644 | 24.703125 | 117 |
h
|
null |
ceph-main/src/json_spirit/json_spirit_value.cpp
|
/* Copyright (c) 2007 John W Wilkinson
This source code can be used for any purpose as long as
this comment is retained. */
// json spirit version 2.00
#include "json_spirit_value.h"
| 192 | 20.444444 | 58 |
cpp
|
null |
ceph-main/src/json_spirit/json_spirit_value.h
|
#ifndef JSON_SPIRIT_VALUE
#define JSON_SPIRIT_VALUE
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include <vector>
#include <map>
#include <string>
#include <sstream>
#include <stdexcept>
#include <boost/config.hpp>
#include <boost/cstdint.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/variant.hpp>
// comment out the value types you don't need to reduce build times and intermediate file sizes
#define JSON_SPIRIT_VALUE_ENABLED
//#define JSON_SPIRIT_WVALUE_ENABLED
#define JSON_SPIRIT_MVALUE_ENABLED
//#define JSON_SPIRIT_WMVALUE_ENABLED
namespace json_spirit
{
enum Value_type{ obj_type, array_type, str_type, bool_type, int_type, real_type, null_type };
struct Null{};
template< class Config > // Config determines whether the value uses std::string or std::wstring and
// whether JSON Objects are represented as vectors or maps
class Value_impl
{
public:
typedef Config Config_type;
typedef typename Config::String_type String_type;
typedef typename Config::Object_type Object;
typedef typename Config::Array_type Array;
typedef typename String_type::const_pointer Const_str_ptr; // eg const char*
Value_impl(); // creates null value
Value_impl( Const_str_ptr value );
Value_impl( const String_type& value );
Value_impl( const Object& value );
Value_impl( const Array& value );
Value_impl( bool value );
Value_impl( int value );
Value_impl( boost::int64_t value );
Value_impl( boost::uint64_t value );
Value_impl( double value );
template< class Iter >
Value_impl( Iter first, Iter last ); // constructor from containers, e.g. std::vector or std::list
template< BOOST_VARIANT_ENUM_PARAMS( typename T ) >
Value_impl( const boost::variant< BOOST_VARIANT_ENUM_PARAMS(T) >& variant ); // constructor for compatible variant types
Value_impl( const Value_impl& other );
bool operator==( const Value_impl& lhs ) const;
Value_impl& operator=( const Value_impl& lhs );
Value_type type() const;
bool is_uint64() const;
bool is_null() const;
const String_type& get_str() const;
const Object& get_obj() const;
const Array& get_array() const;
bool get_bool() const;
int get_int() const;
boost::int64_t get_int64() const;
boost::uint64_t get_uint64() const;
double get_real() const;
Object& get_obj();
Array& get_array();
template< typename T > T get_value() const; // example usage: int i = value.get_value< int >();
// or double d = value.get_value< double >();
static const Value_impl null;
private:
void check_type( const Value_type vtype ) const;
typedef boost::variant< boost::recursive_wrapper< Object >, boost::recursive_wrapper< Array >,
String_type, bool, boost::int64_t, double, Null, boost::uint64_t > Variant;
Variant v_;
class Variant_converter_visitor : public boost::static_visitor< Variant >
{
public:
template< typename T, typename A, template< typename, typename > class Cont >
Variant operator()( const Cont< T, A >& cont ) const
{
return Array( cont.begin(), cont.end() );
}
Variant operator()( int i ) const
{
return static_cast< boost::int64_t >( i );
}
template<class T>
Variant operator()( const T& t ) const
{
return t;
}
};
};
// vector objects
template< class Config >
struct Pair_impl
{
typedef typename Config::String_type String_type;
typedef typename Config::Value_type Value_type;
Pair_impl()
{
}
Pair_impl( const String_type& name, const Value_type& value );
bool operator==( const Pair_impl& lhs ) const;
String_type name_;
Value_type value_;
};
#if defined( JSON_SPIRIT_VALUE_ENABLED ) || defined( JSON_SPIRIT_WVALUE_ENABLED )
template< class String >
struct Config_vector
{
typedef String String_type;
typedef Value_impl< Config_vector > Value_type;
typedef Pair_impl < Config_vector > Pair_type;
typedef std::vector< Value_type > Array_type;
typedef std::vector< Pair_type > Object_type;
static Value_type& add( Object_type& obj, const String_type& name, const Value_type& value )
{
obj.push_back( Pair_type( name , value ) );
return obj.back().value_;
}
static String_type get_name( const Pair_type& pair )
{
return pair.name_;
}
static Value_type get_value( const Pair_type& pair )
{
return pair.value_;
}
};
#endif
// typedefs for ASCII
#ifdef JSON_SPIRIT_VALUE_ENABLED
typedef Config_vector< std::string > Config;
typedef Config::Value_type Value;
typedef Config::Pair_type Pair;
typedef Config::Object_type Object;
typedef Config::Array_type Array;
#endif
// typedefs for Unicode
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
typedef Config_vector< std::wstring > wConfig;
typedef wConfig::Value_type wValue;
typedef wConfig::Pair_type wPair;
typedef wConfig::Object_type wObject;
typedef wConfig::Array_type wArray;
#endif
// map objects
#if defined( JSON_SPIRIT_MVALUE_ENABLED ) || defined( JSON_SPIRIT_WMVALUE_ENABLED )
template< class String >
struct Config_map
{
typedef String String_type;
typedef Value_impl< Config_map > Value_type;
typedef std::vector< Value_type > Array_type;
typedef std::map< String_type, Value_type > Object_type;
typedef std::pair< String_type, Value_type > Pair_type;
static Value_type& add( Object_type& obj, const String_type& name, const Value_type& value )
{
return obj[ name ] = value;
}
static String_type get_name( const Pair_type& pair )
{
return pair.first;
}
static Value_type get_value( const Pair_type& pair )
{
return pair.second;
}
};
#endif
// typedefs for ASCII
#ifdef JSON_SPIRIT_MVALUE_ENABLED
typedef Config_map< std::string > mConfig;
typedef mConfig::Value_type mValue;
typedef mConfig::Object_type mObject;
typedef mConfig::Array_type mArray;
#endif
// typedefs for Unicode
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
typedef Config_map< std::wstring > wmConfig;
typedef wmConfig::Value_type wmValue;
typedef wmConfig::Object_type wmObject;
typedef wmConfig::Array_type wmArray;
#endif
///////////////////////////////////////////////////////////////////////////////////////////////
//
// implementation
inline bool operator==( const Null&, const Null& )
{
return true;
}
template< class Config >
const Value_impl< Config > Value_impl< Config >::null;
template< class Config >
Value_impl< Config >::Value_impl()
: v_( Null() )
{
}
template< class Config >
Value_impl< Config >::Value_impl( const Const_str_ptr value )
: v_( String_type( value ) )
{
}
template< class Config >
Value_impl< Config >::Value_impl( const String_type& value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( const Object& value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( const Array& value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( bool value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( int value )
: v_( static_cast< boost::int64_t >( value ) )
{
}
template< class Config >
Value_impl< Config >::Value_impl( boost::int64_t value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( boost::uint64_t value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( double value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( const Value_impl< Config >& other )
: v_( other.v_ )
{
}
template< class Config >
template< class Iter >
Value_impl< Config >::Value_impl( Iter first, Iter last )
: v_( Array( first, last ) )
{
}
template< class Config >
template< BOOST_VARIANT_ENUM_PARAMS( typename T ) >
Value_impl< Config >::Value_impl( const boost::variant< BOOST_VARIANT_ENUM_PARAMS(T) >& variant )
: v_( boost::apply_visitor( Variant_converter_visitor(), variant) )
{
}
template< class Config >
Value_impl< Config >& Value_impl< Config >::operator=( const Value_impl& lhs )
{
Value_impl tmp( lhs );
std::swap( v_, tmp.v_ );
return *this;
}
template< class Config >
bool Value_impl< Config >::operator==( const Value_impl& lhs ) const
{
if( this == &lhs ) return true;
if( type() != lhs.type() ) return false;
return v_ == lhs.v_;
}
template< class Config >
Value_type Value_impl< Config >::type() const
{
if( is_uint64() )
{
return int_type;
}
return static_cast< Value_type >( v_.which() );
}
template< class Config >
bool Value_impl< Config >::is_uint64() const
{
return v_.which() == null_type + 1;
}
template< class Config >
bool Value_impl< Config >::is_null() const
{
return type() == null_type;
}
template< class Config >
void Value_impl< Config >::check_type( const Value_type vtype ) const
{
if( type() != vtype )
{
std::ostringstream os;
os << "value type is " << type() << " not " << vtype;
throw std::runtime_error( os.str() );
}
}
template< class Config >
const typename Config::String_type& Value_impl< Config >::get_str() const
{
check_type( str_type );
return *boost::get< String_type >( &v_ );
}
template< class Config >
const typename Value_impl< Config >::Object& Value_impl< Config >::get_obj() const
{
check_type( obj_type );
return *boost::get< Object >( &v_ );
}
template< class Config >
const typename Value_impl< Config >::Array& Value_impl< Config >::get_array() const
{
check_type( array_type );
return *boost::get< Array >( &v_ );
}
template< class Config >
bool Value_impl< Config >::get_bool() const
{
check_type( bool_type );
return boost::get< bool >( v_ );
}
template< class Config >
int Value_impl< Config >::get_int() const
{
check_type( int_type );
return static_cast< int >( get_int64() );
}
template< class Config >
boost::int64_t Value_impl< Config >::get_int64() const
{
check_type( int_type );
if( is_uint64() )
{
return static_cast< boost::int64_t >( get_uint64() );
}
return boost::get< boost::int64_t >( v_ );
}
template< class Config >
boost::uint64_t Value_impl< Config >::get_uint64() const
{
check_type( int_type );
if( !is_uint64() )
{
return static_cast< boost::uint64_t >( get_int64() );
}
return boost::get< boost::uint64_t >( v_ );
}
template< class Config >
double Value_impl< Config >::get_real() const
{
if( type() == int_type )
{
return is_uint64() ? static_cast< double >( get_uint64() )
: static_cast< double >( get_int64() );
}
check_type( real_type );
return boost::get< double >( v_ );
}
template< class Config >
typename Value_impl< Config >::Object& Value_impl< Config >::get_obj()
{
check_type( obj_type );
return *boost::get< Object >( &v_ );
}
template< class Config >
typename Value_impl< Config >::Array& Value_impl< Config >::get_array()
{
check_type( array_type );
return *boost::get< Array >( &v_ );
}
template< class Config >
Pair_impl< Config >::Pair_impl( const String_type& name, const Value_type& value )
: name_( name )
, value_( value )
{
}
template< class Config >
bool Pair_impl< Config >::operator==( const Pair_impl< Config >& lhs ) const
{
if( this == &lhs ) return true;
return ( name_ == lhs.name_ ) && ( value_ == lhs.value_ );
}
// converts a C string, ie. 8 bit char array, to a string object
//
template < class String_type >
String_type to_str( const char* c_str )
{
String_type result;
for( const char* p = c_str; *p != 0; ++p )
{
result += *p;
}
return result;
}
//
namespace internal_
{
template< typename T >
struct Type_to_type
{
};
template< class Value >
int get_value( const Value& value, Type_to_type< int > )
{
return value.get_int();
}
template< class Value >
boost::int64_t get_value( const Value& value, Type_to_type< boost::int64_t > )
{
return value.get_int64();
}
template< class Value >
boost::uint64_t get_value( const Value& value, Type_to_type< boost::uint64_t > )
{
return value.get_uint64();
}
template< class Value >
double get_value( const Value& value, Type_to_type< double > )
{
return value.get_real();
}
template< class Value >
typename Value::String_type get_value( const Value& value, Type_to_type< typename Value::String_type > )
{
return value.get_str();
}
template< class Value >
typename Value::Array get_value( const Value& value, Type_to_type< typename Value::Array > )
{
return value.get_array();
}
template< class Value >
typename Value::Object get_value( const Value& value, Type_to_type< typename Value::Object > )
{
return value.get_obj();
}
template< class Value >
bool get_value( const Value& value, Type_to_type< bool > )
{
return value.get_bool();
}
}
template< class Config >
template< typename T >
T Value_impl< Config >::get_value() const
{
return internal_::get_value( *this, internal_::Type_to_type< T >() );
}
}
#endif
| 15,731 | 25.892308 | 128 |
h
|
null |
ceph-main/src/json_spirit/json_spirit_writer.cpp
|
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#include "json_spirit_writer.h"
#include "json_spirit_writer_template.h"
using namespace json_spirit;
#ifdef JSON_SPIRIT_VALUE_ENABLED
void json_spirit::write( const Value& value, std::ostream& os, unsigned int options )
{
write_stream( value, os, options );
}
std::string json_spirit::write( const Value& value, unsigned int options )
{
return write_string( value, options );
}
void json_spirit::write_formatted( const Value& value, std::ostream& os )
{
write_stream( value, os, pretty_print );
}
std::string json_spirit::write_formatted( const Value& value )
{
return write_string( value, pretty_print );
}
#endif
#ifdef JSON_SPIRIT_MVALUE_ENABLED
void json_spirit::write( const mValue& value, std::ostream& os, unsigned int options )
{
write_stream( value, os, options );
}
std::string json_spirit::write( const mValue& value, unsigned int options )
{
return write_string( value, options );
}
void json_spirit::write_formatted( const mValue& value, std::ostream& os )
{
write_stream( value, os, pretty_print );
}
std::string json_spirit::write_formatted( const mValue& value )
{
return write_string( value, pretty_print );
}
#endif
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
void json_spirit::write( const wValue& value, std::wostream& os, unsigned int options )
{
write_stream( value, os, options );
}
std::wstring json_spirit::write( const wValue& value, unsigned int options )
{
return write_string( value, options );
}
void json_spirit::write_formatted( const wValue& value, std::wostream& os )
{
write_stream( value, os, pretty_print );
}
std::wstring json_spirit::write_formatted( const wValue& value )
{
return write_string( value, pretty_print );
}
#endif
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
void json_spirit::write_formatted( const wmValue& value, std::wostream& os )
{
write_stream( value, os, pretty_print );
}
std::wstring json_spirit::write_formatted( const wmValue& value )
{
return write_string( value, pretty_print );
}
void json_spirit::write( const wmValue& value, std::wostream& os, unsigned int options )
{
write_stream( value, os, options );
}
std::wstring json_spirit::write( const wmValue& value, unsigned int options )
{
return write_string( value, options );
}
#endif
| 2,764 | 27.505155 | 92 |
cpp
|
null |
ceph-main/src/json_spirit/json_spirit_writer.h
|
#ifndef JSON_SPIRIT_WRITER
#define JSON_SPIRIT_WRITER
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_value.h"
#include "json_spirit_writer_options.h"
#include <iostream>
namespace json_spirit
{
// these functions to convert JSON Values to text
#ifdef JSON_SPIRIT_VALUE_ENABLED
void write( const Value& value, std::ostream& os, unsigned int options = 0 );
std::string write( const Value& value, unsigned int options = 0 );
#endif
#ifdef JSON_SPIRIT_MVALUE_ENABLED
void write( const mValue& value, std::ostream& os, unsigned int options = 0 );
std::string write( const mValue& value, unsigned int options = 0 );
#endif
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
void write( const wValue& value, std::wostream& os, unsigned int options = 0 );
std::wstring write( const wValue& value, unsigned int options = 0 );
#endif
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
void write( const wmValue& value, std::wostream& os, unsigned int options = 0 );
std::wstring write( const wmValue& value, unsigned int options = 0 );
#endif
// these "formatted" versions of the "write" functions are the equivalent of the above functions
// with option "pretty_print"
#ifdef JSON_SPIRIT_VALUE_ENABLED
void write_formatted( const Value& value, std::ostream& os );
std::string write_formatted( const Value& value );
#endif
#ifdef JSON_SPIRIT_MVALUE_ENABLED
void write_formatted( const mValue& value, std::ostream& os );
std::string write_formatted( const mValue& value );
#endif
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
void write_formatted( const wValue& value, std::wostream& os );
std::wstring write_formatted( const wValue& value );
#endif
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
void write_formatted( const wmValue& value, std::wostream& os );
std::wstring write_formatted( const wmValue& value );
#endif
}
#endif
| 2,301 | 34.96875 | 100 |
h
|
null |
ceph-main/src/json_spirit/json_spirit_writer_options.h
|
#ifndef JSON_SPIRIT_WRITER_OPTIONS
#define JSON_SPIRIT_WRITER_OPTIONS
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
namespace json_spirit
{
enum Output_options{ pretty_print = 0x01, // Add whitespace to format the output nicely.
raw_utf8 = 0x02, // This prevents non-printable characters from being escapted using "\uNNNN" notation.
// Note, this is an extension to the JSON standard. It disables the escaping of
// non-printable characters allowing UTF-8 sequences held in 8 bit char strings
// to pass through unaltered.
remove_trailing_zeros = 0x04,
// outputs e.g. "1.200000000000000" as "1.2"
single_line_arrays = 0x08,
// pretty printing except that arrays printed on single lines unless they contain
// composite elements, i.e. objects or arrays
};
}
#endif
| 1,341 | 42.290323 | 134 |
h
|
null |
ceph-main/src/json_spirit/json_spirit_writer_template.h
|
#ifndef JSON_SPIRIT_WRITER_TEMPLATE
#define JSON_SPIRIT_WRITER_TEMPLATE
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_value.h"
#include "json_spirit_writer_options.h"
#include <sstream>
#include <iomanip>
#include <boost/io/ios_state.hpp>
#include "include/ceph_assert.h"
namespace json_spirit
{
inline char to_hex_char( unsigned int c )
{
ceph_assert( c <= 0xF );
const char ch = static_cast< char >( c );
if( ch < 10 ) return '0' + ch;
return 'A' - 10 + ch;
}
template< class String_type >
String_type non_printable_to_string( unsigned int c )
{
String_type result( 6, '\\' );
result[1] = 'u';
result[ 5 ] = to_hex_char( c & 0x000F ); c >>= 4;
result[ 4 ] = to_hex_char( c & 0x000F ); c >>= 4;
result[ 3 ] = to_hex_char( c & 0x000F ); c >>= 4;
result[ 2 ] = to_hex_char( c & 0x000F );
return result;
}
template< typename Char_type, class String_type >
bool add_esc_char( Char_type c, String_type& s )
{
switch( c )
{
case '"': s += to_str< String_type >( "\\\"" ); return true;
case '\\': s += to_str< String_type >( "\\\\" ); return true;
case '\b': s += to_str< String_type >( "\\b" ); return true;
case '\f': s += to_str< String_type >( "\\f" ); return true;
case '\n': s += to_str< String_type >( "\\n" ); return true;
case '\r': s += to_str< String_type >( "\\r" ); return true;
case '\t': s += to_str< String_type >( "\\t" ); return true;
}
return false;
}
template< class String_type >
String_type add_esc_chars( const String_type& s, bool raw_utf8 )
{
typedef typename String_type::const_iterator Iter_type;
typedef typename String_type::value_type Char_type;
String_type result;
const Iter_type end( s.end() );
for( Iter_type i = s.begin(); i != end; ++i )
{
const Char_type c( *i );
if( add_esc_char( c, result ) ) continue;
if( raw_utf8 )
{
result += c;
}
else
{
const wint_t unsigned_c( ( c >= 0 ) ? c : 256 + c );
if( iswprint( unsigned_c ) )
{
result += c;
}
else
{
result += non_printable_to_string< String_type >( unsigned_c );
}
}
}
return result;
}
template< class Ostream >
void append_double( Ostream& os, const double d, const int precision )
{
os << std::showpoint << std::setprecision( precision ) << d;
}
template< class String_type >
void erase_and_extract_exponent( String_type& str, String_type& exp )
{
const typename String_type::size_type exp_start= str.find( 'e' );
if( exp_start != String_type::npos )
{
exp = str.substr( exp_start );
str.erase( exp_start );
}
}
template< class String_type >
typename String_type::size_type find_first_non_zero( const String_type& str )
{
typename String_type::size_type result = str.size() - 1;
for( ; result != 0; --result )
{
if( str[ result ] != '0' )
{
break;
}
}
return result;
}
template< class String_type >
void remove_trailing( String_type& str )
{
String_type exp;
erase_and_extract_exponent( str, exp );
const typename String_type::size_type first_non_zero = find_first_non_zero( str );
if( first_non_zero != 0 )
{
const int offset = str[first_non_zero] == '.' ? 2 : 1; // note zero digits following a decimal point is non standard
str.erase( first_non_zero + offset );
}
str += exp;
}
// this class generates the JSON text,
// it keeps track of the indentation level etc.
//
template< class Value_type, class Ostream_type >
class Generator
{
typedef typename Value_type::Config_type Config_type;
typedef typename Config_type::String_type String_type;
typedef typename Config_type::Object_type Object_type;
typedef typename Config_type::Array_type Array_type;
typedef typename String_type::value_type Char_type;
typedef typename Object_type::value_type Obj_member_type;
public:
Generator( const Value_type& value, Ostream_type& os, unsigned int options )
: os_( os )
, indentation_level_( 0 )
, pretty_( ( options & pretty_print ) != 0 || ( options & single_line_arrays ) != 0 )
, raw_utf8_( ( options & raw_utf8 ) != 0 )
, remove_trailing_zeros_( ( options & remove_trailing_zeros ) != 0 )
, single_line_arrays_( ( options & single_line_arrays ) != 0 )
, ios_saver_( os )
{
output( value );
}
private:
void output( const Value_type& value )
{
switch( value.type() )
{
case obj_type: output( value.get_obj() ); break;
case array_type: output( value.get_array() ); break;
case str_type: output( value.get_str() ); break;
case bool_type: output( value.get_bool() ); break;
case real_type: output( value.get_real() ); break;
case int_type: output_int( value ); break;
case null_type: os_ << "null"; break;
default: ceph_assert( false );
}
}
void output( const Object_type& obj )
{
output_array_or_obj( obj, '{', '}' );
}
void output( const Obj_member_type& member )
{
output( Config_type::get_name( member ) ); space();
os_ << ':'; space();
output( Config_type::get_value( member ) );
}
void output_int( const Value_type& value )
{
if( value.is_uint64() )
{
os_ << value.get_uint64();
}
else
{
os_ << value.get_int64();
}
}
void output( const String_type& s )
{
os_ << '"' << add_esc_chars( s, raw_utf8_ ) << '"';
}
void output( bool b )
{
os_ << to_str< String_type >( b ? "true" : "false" );
}
void output( double d )
{
if( remove_trailing_zeros_ )
{
std::basic_ostringstream< Char_type > os;
append_double( os, d, 16 ); // note precision is 16 so that we get some trailing space that we can remove,
// otherwise, 0.1234 gets converted to "0.12399999..."
String_type str = os.str();
remove_trailing( str );
os_ << str;
}
else
{
append_double( os_, d, 17 );
}
}
static bool contains_composite_elements( const Array_type& arr )
{
for( typename Array_type::const_iterator i = arr.begin(); i != arr.end(); ++i )
{
const Value_type& val = *i;
if( val.type() == obj_type ||
val.type() == array_type )
{
return true;
}
}
return false;
}
template< class Iter >
void output_composite_item( Iter i, Iter last )
{
output( *i );
if( ++i != last )
{
os_ << ',';
}
}
void output( const Array_type& arr )
{
if( single_line_arrays_ && !contains_composite_elements( arr ) )
{
os_ << '['; space();
for( typename Array_type::const_iterator i = arr.begin(); i != arr.end(); ++i )
{
output_composite_item( i, arr.end() );
space();
}
os_ << ']';
}
else
{
output_array_or_obj( arr, '[', ']' );
}
}
template< class T >
void output_array_or_obj( const T& t, Char_type start_char, Char_type end_char )
{
os_ << start_char; new_line();
++indentation_level_;
for( typename T::const_iterator i = t.begin(); i != t.end(); ++i )
{
indent();
output_composite_item( i, t.end() );
new_line();
}
--indentation_level_;
indent(); os_ << end_char;
}
void indent()
{
if( !pretty_ ) return;
for( int i = 0; i < indentation_level_; ++i )
{
os_ << " ";
}
}
void space()
{
if( pretty_ ) os_ << ' ';
}
void new_line()
{
if( pretty_ ) os_ << '\n';
}
Generator& operator=( const Generator& ); // to prevent "assignment operator could not be generated" warning
Ostream_type& os_;
int indentation_level_;
bool pretty_;
bool raw_utf8_;
bool remove_trailing_zeros_;
bool single_line_arrays_;
boost::io::basic_ios_all_saver< Char_type > ios_saver_; // so that ostream state is reset after control is returned to the caller
};
// writes JSON Value to a stream, e.g.
//
// write_stream( value, os, pretty_print );
//
template< class Value_type, class Ostream_type >
void write_stream( const Value_type& value, Ostream_type& os, unsigned int options = 0 )
{
os << std::dec;
Generator< Value_type, Ostream_type >( value, os, options );
}
// writes JSON Value to a stream, e.g.
//
// const string json_str = write( value, pretty_print );
//
template< class Value_type >
typename Value_type::String_type write_string( const Value_type& value, unsigned int options = 0 )
{
typedef typename Value_type::String_type::value_type Char_type;
std::basic_ostringstream< Char_type > os;
write_stream( value, os, options );
return os.str();
}
}
#endif
| 10,833 | 27.14026 | 138 |
h
|
null |
ceph-main/src/key_value_store/cls_kvs.cc
|
/*
* OSD classes for the key value store
*
* Created on: Aug 10, 2012
* Author: Eleanor Cawthon
*/
#include "include/compat.h"
#include "objclass/objclass.h"
#include <errno.h>
#include "key_value_store/kvs_arg_types.h"
#include "include/types.h"
#include <iostream>
#include <climits>
using std::string;
using std::map;
using std::set;
/**
* finds the index_data where a key belongs.
*
* @param key: the key to search for
* @param idata: the index_data for the first index value such that idata.key
* is greater than key.
* @param next_idata: the index_data for the next index entry after idata
* @pre: key is not encoded
* @post: idata contains complete information
* stored
*/
static int get_idata_from_key(cls_method_context_t hctx, const string &key,
index_data &idata, index_data &next_idata) {
bufferlist raw_val;
int r = 0;
std::map<std::string, bufferlist> kvmap;
bool more;
r = cls_cxx_map_get_vals(hctx, key_data(key).encoded(), "", 2, &kvmap, &more);
if (r < 0) {
CLS_LOG(20, "error reading index for range %s: %d", key.c_str(), r);
return r;
}
r = cls_cxx_map_get_val(hctx, key_data(key).encoded(), &raw_val);
if (r == 0){
CLS_LOG(20, "%s is already in the index: %d", key.c_str(), r);
auto b = raw_val.cbegin();
idata.decode(b);
if (!kvmap.empty()) {
auto b = kvmap.begin()->second.cbegin();
next_idata.decode(b);
}
return r;
} else if (r == -ENOENT || r == -ENODATA) {
auto b = kvmap.begin()->second.cbegin();
idata.decode(b);
if (idata.kdata.prefix != "1") {
auto nb = (++kvmap.begin())->second.cbegin();
next_idata.decode(nb);
}
r = 0;
} else if (r < 0) {
CLS_LOG(20, "error reading index for duplicates %s: %d", key.c_str(), r);
return r;
}
CLS_LOG(20, "idata is %s", idata.str().c_str());
return r;
}
static int get_idata_from_key_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "get_idata_from_key_op");
idata_from_key_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
CLS_LOG(20, "error decoding idata_from_key_args.");
return -EINVAL;
}
int r = get_idata_from_key(hctx, op.key, op.idata, op.next_idata);
if (r < 0) {
return r;
} else {
encode(op, *out);
return 0;
}
}
/**
* finds the object in the index with the lowest key value that is greater
* than idata.key. If idata.key is the max key, returns -EOVERFLOW. If
* idata has a prefix and has timed out, cleans up.
*
* @param idata: idata for the object to search for.
* @param out_data: the idata for the next object.
*
* @pre: idata must contain a key.
* @post: out_data contains complete information
*/
static int get_next_idata(cls_method_context_t hctx, const index_data &idata,
index_data &out_data) {
int r = 0;
std::map<std::string, bufferlist> kvs;
bool more;
r = cls_cxx_map_get_vals(hctx, idata.kdata.encoded(), "", 1, &kvs, &more);
if (r < 0){
CLS_LOG(20, "getting kvs failed with error %d", r);
return r;
}
if (!kvs.empty()) {
out_data.kdata.parse(kvs.begin()->first);
auto b = kvs.begin()->second.cbegin();
out_data.decode(b);
} else {
r = -EOVERFLOW;
}
return r;
}
static int get_next_idata_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "get_next_idata_op");
idata_from_idata_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
return -EINVAL;
}
int r = get_next_idata(hctx, op.idata, op.next_idata);
if (r < 0) {
return r;
} else {
op.encode(*out);
return 0;
}
}
/**
* finds the object in the index with the highest key value that is less
* than idata.key. If idata.key is the lowest key, returns -ERANGE If
* idata has a prefix and has timed out, cleans up.
*
* @param idata: idata for the object to search for.
* @param out_data: the idata for the next object.
*
* @pre: idata must contain a key.
* @ost: out_data contains complete information
*/
static int get_prev_idata(cls_method_context_t hctx, const index_data &idata,
index_data &out_data) {
int r = 0;
std::map<std::string, bufferlist> kvs;
bool more;
r = cls_cxx_map_get_vals(hctx, "", "", LONG_MAX, &kvs, &more);
if (r < 0){
CLS_LOG(20, "getting kvs failed with error %d", r);
return r;
}
std::map<std::string, bufferlist>::iterator it =
kvs.lower_bound(idata.kdata.encoded());
if (it->first != idata.kdata.encoded()) {
CLS_LOG(20, "object %s not found in the index (expected %s, found %s)",
idata.str().c_str(), idata.kdata.encoded().c_str(),
it->first.c_str());
return -ENODATA;
}
if (it == kvs.begin()) {
//it is the first object, there is no previous.
return -ERANGE;
} else {
--it;
}
out_data.kdata.parse(it->first);
auto b = it->second.cbegin();
out_data.decode(b);
return 0;
}
static int get_prev_idata_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "get_next_idata_op");
idata_from_idata_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
return -EINVAL;
}
int r = get_prev_idata(hctx, op.idata, op.next_idata);
if (r < 0) {
return r;
} else {
op.encode(*out);
return 0;
}
}
/**
* Read all of the index entries where any keys in the map go
*/
static int read_many(cls_method_context_t hctx, const set<string> &keys,
map<string, bufferlist> * out) {
int r = 0;
bool more;
CLS_ERR("reading from a map of size %d, first key encoded is %s",
(int)keys.size(), key_data(*keys.begin()).encoded().c_str());
r = cls_cxx_map_get_vals(hctx, key_data(*keys.begin()).encoded().c_str(),
"", LONG_MAX, out, &more);
if (r < 0) {
CLS_ERR("getting omap vals failed with error %d", r);
}
CLS_ERR("got map of size %d ", (int)out->size());
if (out->size() > 1) {
out->erase(out->upper_bound(key_data(*keys.rbegin()).encoded().c_str()),
out->end());
}
CLS_ERR("returning map of size %d", (int)out->size());
return r;
}
static int read_many_op(cls_method_context_t hctx, bufferlist *in,
bufferlist *out) {
CLS_LOG(20, "read_many_op");
set<string> op;
map<string, bufferlist> outmap;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error & err) {
return -EINVAL;
}
int r = read_many(hctx, op, &outmap);
if (r < 0) {
return r;
} else {
encode(outmap, *out);
return 0;
}
}
/**
* Checks the unwritable xattr. If it is "1" (i.e., it is unwritable), returns
* -EACCES. otherwise, returns 0.
*/
static int check_writable(cls_method_context_t hctx) {
bufferlist bl;
int r = cls_cxx_getxattr(hctx, "unwritable", &bl);
if (r < 0) {
CLS_LOG(20, "error reading xattr %s: %d", "unwritable", r);
return r;
}
if (string(bl.c_str(), bl.length()) == "1") {
return -EACCES;
} else{
return 0;
}
}
static int check_writable_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "check_writable_op");
return check_writable(hctx);
}
/**
* returns -EKEYREJECTED if size is outside of bound, according to comparator.
*
* @bound: the limit to test
* @comparator: should be CEPH_OSD_CMPXATTR_OP_[EQ|GT|LT]
*/
static int assert_size_in_bound(cls_method_context_t hctx, int bound,
int comparator) {
//determine size
bufferlist size_bl;
int r = cls_cxx_getxattr(hctx, "size", &size_bl);
if (r < 0) {
CLS_LOG(20, "error reading xattr %s: %d", "size", r);
return r;
}
int size = atoi(string(size_bl.c_str(), size_bl.length()).c_str());
CLS_LOG(20, "size is %d, bound is %d", size, bound);
//compare size to comparator
switch (comparator) {
case CEPH_OSD_CMPXATTR_OP_EQ:
if (size != bound) {
return -EKEYREJECTED;
}
break;
case CEPH_OSD_CMPXATTR_OP_LT:
if (size >= bound) {
return -EKEYREJECTED;
}
break;
case CEPH_OSD_CMPXATTR_OP_GT:
if (size <= bound) {
return -EKEYREJECTED;
}
break;
default:
CLS_LOG(20, "invalid argument passed to assert_size_in_bound: %d",
comparator);
return -EINVAL;
}
return 0;
}
static int assert_size_in_bound_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "assert_size_in_bound_op");
assert_size_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
return -EINVAL;
}
return assert_size_in_bound(hctx, op.bound, op.comparator);
}
/**
* Attempts to insert omap into this object's omap.
*
* @return:
* if unwritable, returns -EACCES.
* if size > bound and key doesn't already exist in the omap, returns -EBALANCE.
* if exclusive is true, returns -EEXIST if any keys already exist.
*
* @post: object has omap entries inserted, and size xattr is updated
*/
static int omap_insert(cls_method_context_t hctx,
const map<string, bufferlist> &omap, int bound, bool exclusive) {
uint64_t size;
time_t time;
int r = cls_cxx_stat(hctx, &size, &time);
if (r < 0) {
return r;
}
CLS_LOG(20, "inserting %s", omap.begin()->first.c_str());
r = check_writable(hctx);
if (r < 0) {
CLS_LOG(20, "omap_insert: this object is unwritable: %d", r);
return r;
}
int assert_bound = bound;
//if this is an exclusive insert, make sure the key doesn't already exist.
for (map<string, bufferlist>::const_iterator it = omap.begin();
it != omap.end(); ++it) {
bufferlist bl;
r = cls_cxx_map_get_val(hctx, it->first, &bl);
if (r == 0 && string(bl.c_str(), bl.length()) != ""){
if (exclusive) {
CLS_LOG(20, "error: this is an exclusive insert and %s exists.",
it->first.c_str());
return -EEXIST;
}
assert_bound++;
CLS_LOG(20, "increased assert_bound to %d", assert_bound);
} else if (r != -ENODATA && r != -ENOENT) {
CLS_LOG(20, "error reading omap val for %s: %d", it->first.c_str(), r);
return r;
}
}
bufferlist old_size;
r = cls_cxx_getxattr(hctx, "size", &old_size);
if (r < 0) {
CLS_LOG(20, "error reading xattr %s: %d", "size", r);
return r;
}
int old_size_int = atoi(string(old_size.c_str(), old_size.length()).c_str());
CLS_LOG(20, "asserting size is less than %d (bound is %d)", assert_bound, bound);
if (old_size_int >= assert_bound) {
return -EKEYREJECTED;
}
int new_size_int = old_size_int + omap.size() - (assert_bound - bound);
CLS_LOG(20, "old size is %d, new size is %d", old_size_int, new_size_int);
bufferlist new_size;
std::stringstream s;
s << new_size_int;
new_size.append(s.str());
r = cls_cxx_map_set_vals(hctx, &omap);
if (r < 0) {
CLS_LOG(20, "error setting omap: %d", r);
return r;
}
r = cls_cxx_setxattr(hctx, "size", &new_size);
if (r < 0) {
CLS_LOG(20, "error setting xattr %s: %d", "size", r);
return r;
}
CLS_LOG(20, "successfully inserted %s", omap.begin()->first.c_str());
return 0;
}
static int omap_insert_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "omap_insert");
omap_set_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
return -EINVAL;
}
return omap_insert(hctx, op.omap, op.bound, op.exclusive);
}
static int create_with_omap(cls_method_context_t hctx,
const map<string, bufferlist> &omap) {
CLS_LOG(20, "creating with omap: %s", omap.begin()->first.c_str());
//first make sure the object is writable
int r = cls_cxx_create(hctx, true);
if (r < 0) {
CLS_LOG(20, "omap create: creating failed: %d", r);
return r;
}
int new_size_int = omap.size();
CLS_LOG(20, "omap insert: new size is %d", new_size_int);
bufferlist new_size;
std::stringstream s;
s << new_size_int;
new_size.append(s.str());
r = cls_cxx_map_set_vals(hctx, &omap);
if (r < 0) {
CLS_LOG(20, "omap create: error setting omap: %d", r);
return r;
}
r = cls_cxx_setxattr(hctx, "size", &new_size);
if (r < 0) {
CLS_LOG(20, "omap create: error setting xattr %s: %d", "size", r);
return r;
}
bufferlist u;
u.append("0");
r = cls_cxx_setxattr(hctx, "unwritable", &u);
if (r < 0) {
CLS_LOG(20, "omap create: error setting xattr %s: %d", "unwritable", r);
return r;
}
CLS_LOG(20, "successfully created %s", omap.begin()->first.c_str());
return 0;
}
static int create_with_omap_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "omap_insert");
map<string, bufferlist> omap;
auto it = in->cbegin();
try {
decode(omap, it);
} catch (buffer::error& err) {
return -EINVAL;
}
return create_with_omap(hctx, omap);
}
/**
* Attempts to remove omap from this object's omap.
*
* @return:
* if unwritable, returns -EACCES.
* if size < bound and key doesn't already exist in the omap, returns -EBALANCE.
* if any of the keys are not in this object, returns -ENODATA.
*
* @post: object has omap entries removed, and size xattr is updated
*/
static int omap_remove(cls_method_context_t hctx,
const std::set<string> &omap, int bound) {
int r;
uint64_t size;
time_t time;
r = cls_cxx_stat(hctx, &size, &time);
if (r < 0) {
return r;
}
//first make sure the object is writable
r = check_writable(hctx);
if (r < 0) {
return r;
}
//check for existance of the key first
for (set<string>::const_iterator it = omap.begin();
it != omap.end(); ++it) {
bufferlist bl;
r = cls_cxx_map_get_val(hctx, *it, &bl);
if (r == -ENOENT || r == -ENODATA
|| string(bl.c_str(), bl.length()) == ""){
return -ENODATA;
} else if (r < 0) {
CLS_LOG(20, "error reading omap val for %s: %d", it->c_str(), r);
return r;
}
}
//fail if removing from an object with only bound entries.
bufferlist old_size;
r = cls_cxx_getxattr(hctx, "size", &old_size);
if (r < 0) {
CLS_LOG(20, "error reading xattr %s: %d", "size", r);
return r;
}
int old_size_int = atoi(string(old_size.c_str(), old_size.length()).c_str());
CLS_LOG(20, "asserting size is greater than %d", bound);
if (old_size_int <= bound) {
return -EKEYREJECTED;
}
int new_size_int = old_size_int - omap.size();
CLS_LOG(20, "old size is %d, new size is %d", old_size_int, new_size_int);
bufferlist new_size;
std::stringstream s;
s << new_size_int;
new_size.append(s.str());
r = cls_cxx_setxattr(hctx, "size", &new_size);
if (r < 0) {
CLS_LOG(20, "error setting xattr %s: %d", "unwritable", r);
return r;
}
for (std::set<string>::const_iterator it = omap.begin();
it != omap.end(); ++it) {
r = cls_cxx_map_remove_key(hctx, *it);
if (r < 0) {
CLS_LOG(20, "error removing omap: %d", r);
return r;
}
}
return 0;
}
static int omap_remove_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "omap_remove");
omap_rm_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
return -EINVAL;
}
return omap_remove(hctx, op.omap, op.bound);
}
/**
* checks to see if this object needs to be split or rebalanced. if so, reads
* information about it.
*
* @post: if assert_size_in_bound(hctx, bound, comparator) succeeds,
* odata contains the size, omap, and unwritable attributes for this object.
* Otherwise, odata contains the size and unwritable attribute.
*/
static int maybe_read_for_balance(cls_method_context_t hctx,
object_data &odata, int bound, int comparator) {
CLS_LOG(20, "rebalance reading");
//if unwritable, return
int r = check_writable(hctx);
if (r < 0) {
odata.unwritable = true;
CLS_LOG(20, "rebalance read: error getting xattr %s: %d", "unwritable", r);
return r;
} else {
odata.unwritable = false;
}
//get the size attribute
bufferlist size;
r = cls_cxx_getxattr(hctx, "size", &size);
if (r < 0) {
CLS_LOG(20, "rebalance read: error getting xattr %s: %d", "size", r);
return r;
}
odata.size = atoi(string(size.c_str(), size.length()).c_str());
//check if it needs to be balanced
r = assert_size_in_bound(hctx, bound, comparator);
if (r < 0) {
CLS_LOG(20, "rebalance read: error on asserting size: %d", r);
return -EBALANCE;
}
//if the assert succeeded, it needs to be balanced
bool more;
r = cls_cxx_map_get_vals(hctx, "", "", LONG_MAX, &odata.omap, &more);
if (r < 0){
CLS_LOG(20, "rebalance read: getting kvs failed with error %d", r);
return r;
}
CLS_LOG(20, "rebalance read: size xattr is %llu, omap size is %llu",
(unsigned long long)odata.size,
(unsigned long long)odata.omap.size());
return 0;
}
static int maybe_read_for_balance_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "maybe_read_for_balance");
rebalance_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
return -EINVAL;
}
int r = maybe_read_for_balance(hctx, op.odata, op.bound, op.comparator);
if (r < 0) {
return r;
} else {
op.encode(*out);
return 0;
}
}
CLS_INIT(kvs)
{
CLS_LOG(20, "Loaded assert condition class!");
cls_handle_t h_class;
cls_method_handle_t h_get_idata_from_key;
cls_method_handle_t h_get_next_idata;
cls_method_handle_t h_get_prev_idata;
cls_method_handle_t h_read_many;
cls_method_handle_t h_check_writable;
cls_method_handle_t h_assert_size_in_bound;
cls_method_handle_t h_omap_insert;
cls_method_handle_t h_create_with_omap;
cls_method_handle_t h_omap_remove;
cls_method_handle_t h_maybe_read_for_balance;
cls_register("kvs", &h_class);
cls_register_cxx_method(h_class, "get_idata_from_key",
CLS_METHOD_RD,
get_idata_from_key_op, &h_get_idata_from_key);
cls_register_cxx_method(h_class, "get_next_idata",
CLS_METHOD_RD,
get_next_idata_op, &h_get_next_idata);
cls_register_cxx_method(h_class, "get_prev_idata",
CLS_METHOD_RD,
get_prev_idata_op, &h_get_prev_idata);
cls_register_cxx_method(h_class, "read_many",
CLS_METHOD_RD,
read_many_op, &h_read_many);
cls_register_cxx_method(h_class, "check_writable",
CLS_METHOD_RD | CLS_METHOD_WR,
check_writable_op, &h_check_writable);
cls_register_cxx_method(h_class, "assert_size_in_bound",
CLS_METHOD_WR,
assert_size_in_bound_op, &h_assert_size_in_bound);
cls_register_cxx_method(h_class, "omap_insert",
CLS_METHOD_WR,
omap_insert_op, &h_omap_insert);
cls_register_cxx_method(h_class, "create_with_omap",
CLS_METHOD_WR,
create_with_omap_op, &h_create_with_omap);
cls_register_cxx_method(h_class, "omap_remove",
CLS_METHOD_WR,
omap_remove_op, &h_omap_remove);
cls_register_cxx_method(h_class, "maybe_read_for_balance",
CLS_METHOD_RD,
maybe_read_for_balance_op, &h_maybe_read_for_balance);
return;
}
| 19,454 | 27.033141 | 83 |
cc
|
null |
ceph-main/src/key_value_store/key_value_structure.h
|
/*
* Interface for key-value store using librados
*
* September 2, 2012
* Eleanor Cawthon
* [email protected]
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef KEY_VALUE_STRUCTURE_HPP_
#define KEY_VALUE_STRUCTURE_HPP_
#include "include/rados/librados.hpp"
#include "include/utime.h"
#include <vector>
using ceph::bufferlist;
class KeyValueStructure;
/**An injection_t is a function that is called before every
* ObjectWriteOperation to test concurrency issues. For example,
* one injection_t might cause the client to have a greater chance of dying
* mid-split/merge.
*/
typedef int (KeyValueStructure::*injection_t)();
/**
* Passed to aio methods to be called when the operation completes
*/
typedef void (*callback)(int * err, void *arg);
class KeyValueStructure{
public:
std::map<char, int> opmap;
//these are injection methods. By default, nothing is called at each
//interruption point.
/**
* returns 0
*/
virtual int nothing() = 0;
/**
* 10% chance of waiting wait_ms seconds
*/
virtual int wait() = 0;
/**
* 10% chance of killing the client.
*/
virtual int suicide() = 0;
////////////////DESTRUCTOR/////////////////
virtual ~KeyValueStructure() {}
////////////////UPDATERS///////////////////
/**
* set up the KeyValueStructure (i.e., initialize rados/io_ctx, etc.)
*/
virtual int setup(int argc, const char** argv) = 0;
/**
* set the method that gets called before each ObjectWriteOperation.
* If waite_time is set and the method passed involves waiting, it will wait
* for that many milliseconds.
*/
virtual void set_inject(injection_t inject, int wait_time) = 0;
/**
* if update_on_existing is false, returns an error if
* key already exists in the structure
*/
virtual int set(const std::string &key, const bufferlist &val,
bool update_on_existing) = 0;
/**
* efficiently insert the contents of in_map into the structure
*/
virtual int set_many(const std::map<std::string, bufferlist> &in_map) = 0;
/**
* removes the key-value for key. returns an error if key does not exist
*/
virtual int remove(const std::string &key) = 0;
/**
* removes all keys and values
*/
virtual int remove_all() = 0;
/**
* launches a thread to get the value of key. When complete, calls cb(cb_args)
*/
virtual void aio_get(const std::string &key, bufferlist *val, callback cb,
void *cb_args, int * err) = 0;
/**
* launches a thread to set key to val. When complete, calls cb(cb_args)
*/
virtual void aio_set(const std::string &key, const bufferlist &val, bool exclusive,
callback cb, void * cb_args, int * err) = 0;
/**
* launches a thread to remove key. When complete, calls cb(cb_args)
*/
virtual void aio_remove(const std::string &key, callback cb, void *cb_args,
int * err) = 0;
////////////////READERS////////////////////
/**
* gets the val associated with key.
*
* @param key the key to get
* @param val the value is stored in this
* @return error code
*/
virtual int get(const std::string &key, bufferlist *val) = 0;
/**
* stores all keys in keys. set should put them in order by key.
*/
virtual int get_all_keys(std::set<std::string> *keys) = 0;
/**
* stores all keys and values in kv_map. map should put them in order by key.
*/
virtual int get_all_keys_and_values(std::map<std::string,bufferlist> *kv_map) = 0;
/**
* True if the structure meets its own requirements for consistency.
*/
virtual bool is_consistent() = 0;
/**
* prints a string representation of the structure
*/
virtual std::string str() = 0;
};
#endif /* KEY_VALUE_STRUCTURE_HPP_ */
| 3,898 | 25.52381 | 85 |
h
|
null |
ceph-main/src/key_value_store/kv_flat_btree_async.cc
|
/*
* Key-value store using librados
*
* September 2, 2012
* Eleanor Cawthon
* [email protected]
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "include/compat.h"
#include "key_value_store/key_value_structure.h"
#include "key_value_store/kv_flat_btree_async.h"
#include "key_value_store/kvs_arg_types.h"
#include "include/rados/librados.hpp"
#include "common/ceph_context.h"
#include "common/Clock.h"
#include "include/types.h"
#include <errno.h>
#include <string>
#include <iostream>
#include <cassert>
#include <climits>
#include <cmath>
#include <sstream>
#include <stdlib.h>
#include <iterator>
using ceph::bufferlist;
using namespace std;
bool index_data::is_timed_out(utime_t now, utime_t timeout) const {
return prefix != "" && now - ts > timeout;
}
void IndexCache::clear() {
k2itmap.clear();
t2kmap.clear();
}
void IndexCache::push(const string &key, const index_data &idata) {
if (cache_size == 0) {
return;
}
index_data old_idata;
std::map<key_data, std::pair<index_data, utime_t> >::iterator old_it =
k2itmap.lower_bound(key_data(key));
if (old_it != k2itmap.end()) {
t2kmap.erase(old_it->second.second);
k2itmap.erase(old_it);
}
std::map<key_data, std::pair<index_data, utime_t> >::iterator new_it =
k2itmap.find(idata.kdata);
if (new_it != k2itmap.end()) {
utime_t old_time = new_it->second.second;
t2kmap.erase(old_time);
}
utime_t time = ceph_clock_now();
k2itmap[idata.kdata] = std::make_pair(idata, time);
t2kmap[time] = idata.kdata;
if ((int)k2itmap.size() > cache_size) {
pop();
}
}
void IndexCache::push(const index_data &idata) {
if (cache_size == 0) {
return;
}
if (k2itmap.count(idata.kdata) > 0) {
utime_t old_time = k2itmap[idata.kdata].second;
t2kmap.erase(old_time);
k2itmap.erase(idata.kdata);
}
utime_t time = ceph_clock_now();
k2itmap[idata.kdata] = std::make_pair(idata, time);
t2kmap[time] = idata.kdata;
if ((int)k2itmap.size() > cache_size) {
pop();
}
}
void IndexCache::pop() {
if (cache_size == 0) {
return;
}
std::map<utime_t, key_data>::iterator it = t2kmap.begin();
utime_t time = it->first;
key_data kdata = it->second;
k2itmap.erase(kdata);
t2kmap.erase(time);
}
void IndexCache::erase(key_data kdata) {
if (cache_size == 0) {
return;
}
if (k2itmap.count(kdata) > 0) {
utime_t c = k2itmap[kdata].second;
k2itmap.erase(kdata);
t2kmap.erase(c);
}
}
int IndexCache::get(const string &key, index_data *idata) const {
if (cache_size == 0) {
return -ENODATA;
}
if ((int)k2itmap.size() == 0) {
return -ENODATA;
}
std::map<key_data, std::pair<index_data, utime_t> >::const_iterator it =
k2itmap.lower_bound(key_data(key));
if (it == k2itmap.end() || !(it->second.first.min_kdata < key_data(key))) {
return -ENODATA;
} else {
*idata = it->second.first;
}
return 0;
}
int IndexCache::get(const string &key, index_data *idata,
index_data *next_idata) const {
if (cache_size == 0) {
return -ENODATA;
}
std::map<key_data, std::pair<index_data, utime_t> >::const_iterator it =
k2itmap.lower_bound(key_data(key));
if (it == k2itmap.end() || ++it == k2itmap.end()) {
return -ENODATA;
} else {
--it;
if (!(it->second.first.min_kdata < key_data(key))){
//stale, should be reread.
return -ENODATA;
} else {
*idata = it->second.first;
++it;
if (it != k2itmap.end()) {
*next_idata = it->second.first;
}
}
}
return 0;
}
int KvFlatBtreeAsync::nothing() {
return 0;
}
int KvFlatBtreeAsync::wait() {
if (rand() % 10 == 0) {
usleep(wait_ms);
}
return 0;
}
int KvFlatBtreeAsync::suicide() {
if (rand() % 10 == 0) {
if (verbose) cout << client_name << " is suiciding" << std::endl;
return 1;
}
return 0;
}
int KvFlatBtreeAsync::next(const index_data &idata, index_data * out_data)
{
if (verbose) cout << "\t\t" << client_name << "-next: finding next of "
<< idata.str()
<< std::endl;
int err = 0;
librados::ObjectReadOperation oro;
std::map<std::string, bufferlist> kvs;
oro.omap_get_vals2(idata.kdata.encoded(),1,&kvs, nullptr, &err);
err = io_ctx.operate(index_name, &oro, NULL);
if (err < 0){
if (verbose) cout << "\t\t\t" << client_name
<< "-next: getting index failed with error "
<< err << std::endl;
return err;
}
if (!kvs.empty()) {
out_data->kdata.parse(kvs.begin()->first);
auto b = kvs.begin()->second.cbegin();
out_data->decode(b);
if (idata.is_timed_out(ceph_clock_now(), timeout)) {
if (verbose) cout << client_name << " THINKS THE OTHER CLIENT DIED."
<< std::endl;
//the client died after deleting the object. clean up.
cleanup(idata, err);
}
} else {
err = -EOVERFLOW;
}
return err;
}
int KvFlatBtreeAsync::prev(const index_data &idata, index_data * out_data)
{
if (verbose) cout << "\t\t" << client_name << "-prev: finding prev of "
<< idata.str() << std::endl;
int err = 0;
bufferlist inbl;
idata_from_idata_args in_args;
in_args.idata = idata;
in_args.encode(inbl);
bufferlist outbl;
err = io_ctx.exec(index_name,"kvs", "get_prev_idata", inbl, outbl);
if (err < 0){
if (verbose) cout << "\t\t\t" << client_name
<< "-prev: getting index failed with error "
<< err << std::endl;
if (idata.is_timed_out(ceph_clock_now(), timeout)) {
if (verbose) cout << client_name << " THINKS THE OTHER CLIENT DIED."
<< std::endl;
//the client died after deleting the object. clean up.
err = cleanup(idata, err);
if (err == -ESUICIDE) {
return err;
} else {
err = 0;
}
}
return err;
}
auto it = outbl.cbegin();
in_args.decode(it);
*out_data = in_args.next_idata;
if (verbose) cout << "\t\t" << client_name << "-prev: prev is "
<< out_data->str()
<< std::endl;
return err;
}
int KvFlatBtreeAsync::read_index(const string &key, index_data * idata,
index_data * next_idata, bool force_update) {
int err = 0;
if (!force_update) {
if (verbose) cout << "\t" << client_name
<< "-read_index: getting index_data for " << key
<< " from cache" << std::endl;
icache_lock.lock();
if (next_idata != NULL) {
err = icache.get(key, idata, next_idata);
} else {
err = icache.get(key, idata);
}
icache_lock.unlock();
if (err == 0) {
//if (verbose) cout << "CACHE SUCCESS" << std::endl;
return err;
} else {
if (verbose) cout << "NOT IN CACHE" << std::endl;
}
}
if (verbose) cout << "\t" << client_name
<< "-read_index: getting index_data for " << key
<< " from object" << std::endl;
librados::ObjectReadOperation oro;
bufferlist raw_val;
std::set<std::string> key_set;
key_set.insert(key_data(key).encoded());
std::map<std::string, bufferlist> kvmap;
std::map<std::string, bufferlist> dupmap;
oro.omap_get_vals_by_keys(key_set, &dupmap, &err);
oro.omap_get_vals2(key_data(key).encoded(),
(cache_size / cache_refresh >= 2? cache_size / cache_refresh: 2),
&kvmap, nullptr, &err);
err = io_ctx.operate(index_name, &oro, NULL);
utime_t mytime = ceph_clock_now();
if (err < 0){
cerr << "\t" << client_name
<< "-read_index: getting keys failed with "
<< err << std::endl;
ceph_abort_msg(client_name + "-read_index: reading index failed");
return err;
}
kvmap.insert(dupmap.begin(), dupmap.end());
for (map<string, bufferlist>::iterator it = ++kvmap.begin();
it != kvmap.end();
++it) {
bufferlist bl = it->second;
auto blit = bl.cbegin();
index_data this_idata;
this_idata.decode(blit);
if (this_idata.is_timed_out(mytime, timeout)) {
if (verbose) cout << client_name
<< " THINKS THE OTHER CLIENT DIED. (mytime is "
<< mytime.sec() << "." << mytime.usec() << ", idata.ts is "
<< this_idata.ts.sec() << "." << this_idata.ts.usec()
<< ", it has been " << (mytime - this_idata.ts).sec()
<< '.' << (mytime - this_idata.ts).usec()
<< ", timeout is " << timeout << ")" << std::endl;
//the client died after deleting the object. clean up.
if (cleanup(this_idata, -EPREFIX) == -ESUICIDE) {
return -ESUICIDE;
}
return read_index(key, idata, next_idata, force_update);
}
std::scoped_lock l{icache_lock};
icache.push(this_idata);
}
auto b = kvmap.begin()->second.cbegin();
idata->decode(b);
idata->kdata.parse(kvmap.begin()->first);
if (verbose) cout << "\t" << client_name << "-read_index: kvmap_size is "
<< kvmap.size()
<< ", idata is " << idata->str() << std::endl;
ceph_assert(idata->obj != "");
icache_lock.lock();
icache.push(key, *idata);
icache_lock.unlock();
if (next_idata != NULL && idata->kdata.prefix != "1") {
next_idata->kdata.parse((++kvmap.begin())->first);
auto nb = (++kvmap.begin())->second.cbegin();
next_idata->decode(nb);
std::scoped_lock l{icache_lock};
icache.push(*next_idata);
}
return err;
}
int KvFlatBtreeAsync::split(const index_data &idata) {
int err = 0;
opmap['l']++;
if (idata.prefix != "") {
return -EPREFIX;
}
rebalance_args args;
args.bound = 2 * k - 1;
args.comparator = CEPH_OSD_CMPXATTR_OP_GT;
err = read_object(idata.obj, &args);
args.odata.max_kdata = idata.kdata;
if (err < 0) {
if (verbose) cout << "\t\t" << client_name << "-split: read object "
<< args.odata.name
<< " got " << err << std::endl;
return err;
}
if (verbose) cout << "\t\t" << client_name << "-split: splitting "
<< idata.obj
<< ", which has size " << args.odata.size
<< " and actual size " << args.odata.omap.size() << std::endl;
///////preparations that happen outside the critical section
//for prefix index
vector<object_data> to_create;
vector<object_data> to_delete;
to_delete.push_back(object_data(idata.min_kdata,
args.odata.max_kdata, args.odata.name, args.odata.version));
//for lower half object
std::map<std::string, bufferlist>::const_iterator it = args.odata.omap.begin();
client_index_lock.lock();
to_create.push_back(object_data(to_string(client_name, client_index++)));
client_index_lock.unlock();
for (int i = 0; i < k; i++) {
to_create[0].omap.insert(*it);
++it;
}
to_create[0].min_kdata = idata.min_kdata;
to_create[0].max_kdata = key_data(to_create[0].omap.rbegin()->first);
//for upper half object
client_index_lock.lock();
to_create.push_back(object_data(to_create[0].max_kdata,
args.odata.max_kdata,
to_string(client_name, client_index++)));
client_index_lock.unlock();
to_create[1].omap.insert(
++args.odata.omap.find(to_create[0].omap.rbegin()->first),
args.odata.omap.end());
//setting up operations
librados::ObjectWriteOperation owos[6];
vector<std::pair<std::pair<int, string>, librados::ObjectWriteOperation*> > ops;
index_data out_data;
set_up_prefix_index(to_create, to_delete, &owos[0], &out_data, &err);
ops.push_back(std::make_pair(
std::pair<int, string>(ADD_PREFIX, index_name),
&owos[0]));
for (int i = 1; i < 6; i++) {
ops.push_back(std::make_pair(std::make_pair(0,""), &owos[i]));
}
set_up_ops(to_create, to_delete, &ops, out_data, &err);
/////BEGIN CRITICAL SECTION/////
//put prefix on index entry for idata.val
err = perform_ops("\t\t" + client_name + "-split:", out_data, &ops);
if (err < 0) {
return err;
}
if (verbose) cout << "\t\t" << client_name << "-split: done splitting."
<< std::endl;
/////END CRITICAL SECTION/////
icache_lock.lock();
for (vector<delete_data>::iterator it = out_data.to_delete.begin();
it != out_data.to_delete.end(); ++it) {
icache.erase(it->max);
}
for (vector<create_data>::iterator it = out_data.to_create.begin();
it != out_data.to_create.end(); ++it) {
icache.push(index_data(*it));
}
icache_lock.unlock();
return err;
}
int KvFlatBtreeAsync::rebalance(const index_data &idata1,
const index_data &next_idata){
opmap['m']++;
int err = 0;
if (idata1.prefix != "") {
return -EPREFIX;
}
rebalance_args args1;
args1.bound = k + 1;
args1.comparator = CEPH_OSD_CMPXATTR_OP_LT;
index_data idata2 = next_idata;
rebalance_args args2;
args2.bound = k + 1;
args2.comparator = CEPH_OSD_CMPXATTR_OP_LT;
if (idata1.kdata.prefix == "1") {
//this is the highest key in the index, so it doesn't have a next.
//read the index for the previous entry
err = prev(idata1, &idata2);
if (err == -ERANGE) {
if (verbose) cout << "\t\t" << client_name
<< "-rebalance: this is the only node, "
<< "so aborting" << std::endl;
return -EUCLEAN;
} else if (err < 0) {
return err;
}
//read the first object
err = read_object(idata1.obj, &args2);
if (err < 0) {
if (verbose) cout << "reading " << idata1.obj << " failed with " << err
<< std::endl;
if (err == -ENOENT) {
return -ECANCELED;
}
return err;
}
args2.odata.min_kdata = idata1.min_kdata;
args2.odata.max_kdata = idata1.kdata;
//read the second object
args1.bound = 2 * k + 1;
err = read_object(idata2.obj, &args1);
if (err < 0) {
if (verbose) cout << "reading " << idata1.obj << " failed with " << err
<< std::endl;
return err;
}
args1.odata.min_kdata = idata2.min_kdata;
args1.odata.max_kdata = idata2.kdata;
if (verbose) cout << "\t\t" << client_name << "-rebalance: read "
<< idata2.obj
<< ". size: " << args1.odata.size << " version: "
<< args1.odata.version
<< std::endl;
} else {
assert (next_idata.obj != "");
//there is a next key, so get it.
err = read_object(idata1.obj, &args1);
if (err < 0) {
if (verbose) cout << "reading " << idata1.obj << " failed with " << err
<< std::endl;
return err;
}
args1.odata.min_kdata = idata1.min_kdata;
args1.odata.max_kdata = idata1.kdata;
args2.bound = 2 * k + 1;
err = read_object(idata2.obj, &args2);
if (err < 0) {
if (verbose) cout << "reading " << idata1.obj << " failed with " << err
<< std::endl;
if (err == -ENOENT) {
return -ECANCELED;
}
return err;
}
args2.odata.min_kdata = idata2.min_kdata;
args2.odata.max_kdata = idata2.kdata;
if (verbose) cout << "\t\t" << client_name << "-rebalance: read "
<< idata2.obj
<< ". size: " << args2.odata.size << " version: "
<< args2.odata.version
<< std::endl;
}
if (verbose) cout << "\t\t" << client_name << "-rebalance: o1 is "
<< args1.odata.max_kdata.encoded() << ","
<< args1.odata.name << " with size " << args1.odata.size
<< " , o2 is " << args2.odata.max_kdata.encoded()
<< "," << args2.odata.name << " with size " << args2.odata.size
<< std::endl;
//calculations
if ((int)args1.odata.size > k && (int)args1.odata.size <= 2*k
&& (int)args2.odata.size > k
&& (int)args2.odata.size <= 2*k) {
//nothing to do
if (verbose) cout << "\t\t" << client_name
<< "-rebalance: both sizes in range, so"
<< " aborting " << std::endl;
return -EBALANCE;
} else if (idata1.prefix != "" || idata2.prefix != "") {
return -EPREFIX;
}
//this is the high object. it gets created regardless of rebalance or merge.
client_index_lock.lock();
string o2w = to_string(client_name, client_index++);
client_index_lock.unlock();
index_data idata;
vector<object_data> to_create;
vector<object_data> to_delete;
librados::ObjectWriteOperation create[2];//possibly only 1 will be used
librados::ObjectWriteOperation other_ops[6];
vector<std::pair<std::pair<int, string>, librados::ObjectWriteOperation*> > ops;
ops.push_back(std::make_pair(
std::pair<int, string>(ADD_PREFIX, index_name),
&other_ops[0]));
if ((int)args1.odata.size + (int)args2.odata.size <= 2*k) {
//merge
if (verbose) cout << "\t\t" << client_name << "-rebalance: merging "
<< args1.odata.name
<< " and " << args2.odata.name << " to get " << o2w
<< std::endl;
std::map<string, bufferlist> write2_map;
write2_map.insert(args1.odata.omap.begin(), args1.odata.omap.end());
write2_map.insert(args2.odata.omap.begin(), args2.odata.omap.end());
to_create.push_back(object_data(args1.odata.min_kdata,
args2.odata.max_kdata, o2w, write2_map));
ops.push_back(std::make_pair(
std::pair<int, std::string>(MAKE_OBJECT, o2w),
&create[0]));
ceph_assert((int)write2_map.size() <= 2*k);
} else {
//rebalance
if (verbose) cout << "\t\t" << client_name << "-rebalance: rebalancing "
<< args1.odata.name
<< " and " << args2.odata.name << std::endl;
std::map<std::string, bufferlist> write1_map;
std::map<std::string, bufferlist> write2_map;
std::map<std::string, bufferlist>::iterator it;
client_index_lock.lock();
string o1w = to_string(client_name, client_index++);
client_index_lock.unlock();
int target_size_1 = ceil(((int)args1.odata.size + (int)args2.odata.size)
/ 2.0);
if (args1.odata.max_kdata != idata1.kdata) {
//this should be true if idata1 is the high object
target_size_1 = floor(((int)args1.odata.size + (int)args2.odata.size)
/ 2.0);
}
for (it = args1.odata.omap.begin();
it != args1.odata.omap.end() && (int)write1_map.size()
< target_size_1;
++it) {
write1_map.insert(*it);
}
if (it != args1.odata.omap.end()){
//write1_map is full, so put the rest in write2_map
write2_map.insert(it, args1.odata.omap.end());
write2_map.insert(args2.odata.omap.begin(), args2.odata.omap.end());
} else {
//args1.odata.omap was small, and write2_map still needs more
std::map<std::string, bufferlist>::iterator it2;
for(it2 = args2.odata.omap.begin();
(it2 != args2.odata.omap.end()) && ((int)write1_map.size()
< target_size_1);
++it2) {
write1_map.insert(*it2);
}
write2_map.insert(it2, args2.odata.omap.end());
}
if (verbose) cout << "\t\t" << client_name
<< "-rebalance: write1_map has size "
<< write1_map.size() << ", write2_map.size() is " << write2_map.size()
<< std::endl;
//at this point, write1_map and write2_map should have the correct pairs
to_create.push_back(object_data(args1.odata.min_kdata,
key_data(write1_map.rbegin()->first),
o1w,write1_map));
to_create.push_back(object_data( key_data(write1_map.rbegin()->first),
args2.odata.max_kdata, o2w, write2_map));
ops.push_back(std::make_pair(
std::pair<int, std::string>(MAKE_OBJECT, o1w),
&create[0]));
ops.push_back(std::make_pair(
std::pair<int, std::string>(MAKE_OBJECT, o2w),
&create[1]));
}
to_delete.push_back(object_data(args1.odata.min_kdata,
args1.odata.max_kdata, args1.odata.name, args1.odata.version));
to_delete.push_back(object_data(args2.odata.min_kdata,
args2.odata.max_kdata, args2.odata.name, args2.odata.version));
for (int i = 1; i < 6; i++) {
ops.push_back(std::make_pair(std::make_pair(0,""), &other_ops[i]));
}
index_data out_data;
set_up_prefix_index(to_create, to_delete, &other_ops[0], &out_data, &err);
set_up_ops(to_create, to_delete, &ops, out_data, &err);
//at this point, all operations should be completely set up.
/////BEGIN CRITICAL SECTION/////
err = perform_ops("\t\t" + client_name + "-rebalance:", out_data, &ops);
if (err < 0) {
return err;
}
icache_lock.lock();
for (vector<delete_data>::iterator it = out_data.to_delete.begin();
it != out_data.to_delete.end(); ++it) {
icache.erase(it->max);
}
for (vector<create_data>::iterator it = out_data.to_create.begin();
it != out_data.to_create.end(); ++it) {
icache.push(index_data(*it));
}
icache_lock.unlock();
if (verbose) cout << "\t\t" << client_name << "-rebalance: done rebalancing."
<< std::endl;
/////END CRITICAL SECTION/////
return err;
}
int KvFlatBtreeAsync::read_object(const string &obj, object_data * odata) {
librados::ObjectReadOperation get_obj;
librados::AioCompletion * obj_aioc = rados.aio_create_completion();
int err;
bufferlist unw_bl;
odata->name = obj;
get_obj.omap_get_vals2("", LONG_MAX, &odata->omap, nullptr, &err);
get_obj.getxattr("unwritable", &unw_bl, &err);
io_ctx.aio_operate(obj, obj_aioc, &get_obj, NULL);
obj_aioc->wait_for_complete();
err = obj_aioc->get_return_value();
if (err < 0){
//possibly -ENOENT, meaning someone else deleted it.
obj_aioc->release();
return err;
}
odata->unwritable = string(unw_bl.c_str(), unw_bl.length()) == "1";
odata->version = obj_aioc->get_version64();
odata->size = odata->omap.size();
obj_aioc->release();
return 0;
}
int KvFlatBtreeAsync::read_object(const string &obj, rebalance_args * args) {
bufferlist inbl;
args->encode(inbl);
bufferlist outbl;
int err;
librados::AioCompletion * a = rados.aio_create_completion();
io_ctx.aio_exec(obj, a, "kvs", "maybe_read_for_balance", inbl, &outbl);
a->wait_for_complete();
err = a->get_return_value();
if (err < 0) {
if (verbose) cout << "\t\t" << client_name
<< "-read_object: reading failed with "
<< err << std::endl;
a->release();
return err;
}
auto it = outbl.cbegin();
args->decode(it);
args->odata.name = obj;
args->odata.version = a->get_version64();
a->release();
return err;
}
void KvFlatBtreeAsync::set_up_prefix_index(
const vector<object_data> &to_create,
const vector<object_data> &to_delete,
librados::ObjectWriteOperation * owo,
index_data * idata,
int * err) {
std::map<std::string, std::pair<bufferlist, int> > assertions;
std::map<string, bufferlist> to_insert;
idata->prefix = "1";
idata->ts = ceph_clock_now();
for(vector<object_data>::const_iterator it = to_create.begin();
it != to_create.end();
++it) {
create_data c(it->min_kdata, it->max_kdata, it->name);
idata->to_create.push_back(c);
}
for(vector<object_data>::const_iterator it = to_delete.begin();
it != to_delete.end();
++it) {
delete_data d(it->min_kdata, it->max_kdata, it->name, it->version);
idata->to_delete.push_back(d);
}
for(vector<object_data>::const_iterator it = to_delete.begin();
it != to_delete.end();
++it) {
idata->obj = it->name;
idata->min_kdata = it->min_kdata;
idata->kdata = it->max_kdata;
bufferlist insert;
idata->encode(insert);
to_insert[it->max_kdata.encoded()] = insert;
index_data this_entry;
this_entry.min_kdata = idata->min_kdata;
this_entry.kdata = idata->kdata;
this_entry.obj = idata->obj;
assertions[it->max_kdata.encoded()] = std::pair<bufferlist, int>
(to_bl(this_entry), CEPH_OSD_CMPXATTR_OP_EQ);
if (verbose) cout << "\t\t\t" << client_name
<< "-setup_prefix: will assert "
<< this_entry.str() << std::endl;
}
ceph_assert(*err == 0);
owo->omap_cmp(assertions, err);
if (to_create.size() <= 2) {
owo->omap_set(to_insert);
}
}
//some args can be null if there are no corresponding entries in p
void KvFlatBtreeAsync::set_up_ops(
const vector<object_data> &create_vector,
const vector<object_data> &delete_vector,
vector<std::pair<std::pair<int, string>, librados::ObjectWriteOperation*> > * ops,
const index_data &idata,
int * err) {
vector<std::pair<std::pair<int, string>,
librados::ObjectWriteOperation* > >::iterator it;
//skip the prefixing part
for(it = ops->begin(); it->first.first == ADD_PREFIX; ++it) {}
std::map<string, bufferlist> to_insert;
std::set<string> to_remove;
std::map<string, std::pair<bufferlist, int> > assertions;
if (create_vector.size() > 0) {
for (int i = 0; i < (int)idata.to_delete.size(); ++i) {
it->first = std::pair<int, string>(UNWRITE_OBJECT, idata.to_delete[i].obj);
set_up_unwrite_object(delete_vector[i].version, it->second);
++it;
}
}
for (int i = 0; i < (int)idata.to_create.size(); ++i) {
index_data this_entry(idata.to_create[i].max, idata.to_create[i].min,
idata.to_create[i].obj);
to_insert[idata.to_create[i].max.encoded()] = to_bl(this_entry);
if (idata.to_create.size() <= 2) {
it->first = std::pair<int, string>(MAKE_OBJECT, idata.to_create[i].obj);
} else {
it->first = std::pair<int, string>(AIO_MAKE_OBJECT, idata.to_create[i].obj);
}
set_up_make_object(create_vector[i].omap, it->second);
++it;
}
for (int i = 0; i < (int)idata.to_delete.size(); ++i) {
index_data this_entry = idata;
this_entry.obj = idata.to_delete[i].obj;
this_entry.min_kdata = idata.to_delete[i].min;
this_entry.kdata = idata.to_delete[i].max;
if (verbose) cout << "\t\t\t" << client_name << "-setup_ops: will assert "
<< this_entry.str() << std::endl;
assertions[idata.to_delete[i].max.encoded()] = std::pair<bufferlist, int>(
to_bl(this_entry), CEPH_OSD_CMPXATTR_OP_EQ);
to_remove.insert(idata.to_delete[i].max.encoded());
it->first = std::pair<int, string>(REMOVE_OBJECT, idata.to_delete[i].obj);
set_up_delete_object(it->second);
++it;
}
if ((int)idata.to_create.size() <= 2) {
it->second->omap_cmp(assertions, err);
}
it->second->omap_rm_keys(to_remove);
it->second->omap_set(to_insert);
it->first = std::pair<int, string>(REMOVE_PREFIX, index_name);
}
void KvFlatBtreeAsync::set_up_make_object(
const std::map<std::string, bufferlist> &to_set,
librados::ObjectWriteOperation *owo) {
bufferlist inbl;
encode(to_set, inbl);
owo->exec("kvs", "create_with_omap", inbl);
}
void KvFlatBtreeAsync::set_up_unwrite_object(
const int &ver, librados::ObjectWriteOperation *owo) {
if (ver > 0) {
owo->assert_version(ver);
}
owo->cmpxattr("unwritable", CEPH_OSD_CMPXATTR_OP_EQ, to_bl("0"));
owo->setxattr("unwritable", to_bl("1"));
}
void KvFlatBtreeAsync::set_up_restore_object(
librados::ObjectWriteOperation *owo) {
owo->cmpxattr("unwritable", CEPH_OSD_CMPXATTR_OP_EQ, to_bl("1"));
owo->setxattr("unwritable", to_bl("0"));
}
void KvFlatBtreeAsync::set_up_delete_object(
librados::ObjectWriteOperation *owo) {
owo->cmpxattr("unwritable", CEPH_OSD_CMPXATTR_OP_EQ, to_bl("1"));
owo->remove();
}
int KvFlatBtreeAsync::perform_ops(const string &debug_prefix,
const index_data &idata,
vector<std::pair<std::pair<int, string>, librados::ObjectWriteOperation*> > *ops) {
int err = 0;
vector<librados::AioCompletion*> aiocs(idata.to_create.size());
int count = 0;
for (vector<std::pair<std::pair<int, string>,
librados::ObjectWriteOperation*> >::iterator it = ops->begin();
it != ops->end(); ++it) {
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
return -ESUICIDE;
}
switch (it->first.first) {
case ADD_PREFIX://prefixing
if (verbose) cout << debug_prefix << " adding prefix" << std::endl;
err = io_ctx.operate(index_name, it->second);
if (err < 0) {
if (verbose) cout << debug_prefix << " prefixing the index failed with "
<< err << std::endl;
return -EPREFIX;
}
if (verbose) cout << debug_prefix << " prefix added." << std::endl;
break;
case UNWRITE_OBJECT://marking
if (verbose) cout << debug_prefix << " marking " << it->first.second
<< std::endl;
err = io_ctx.operate(it->first.second, it->second);
if (err < 0) {
//most likely because it changed, in which case it will be -ERANGE
if (verbose) cout << debug_prefix << " marking " << it->first.second
<< "failed with code" << err << std::endl;
if (it->first.second == (*idata.to_delete.begin()).max.encoded()) {
if (cleanup(idata, -EFIRSTOBJ) == -ESUICIDE) {
return -ESUICIDE;
}
} else {
if (cleanup(idata, -ERANGE) == -ESUICIDE) {
return -ESUICIDE;
}
}
return err;
}
if (verbose) cout << debug_prefix << " marked " << it->first.second
<< std::endl;
break;
case MAKE_OBJECT://creating
if (verbose) cout << debug_prefix << " creating " << it->first.second
<< std::endl;
err = io_ctx.operate(it->first.second, it->second);
if (err < 0) {
//this can happen if someone else was cleaning up after us.
if (verbose) cout << debug_prefix << " creating " << it->first.second
<< " failed"
<< " with code " << err << std::endl;
if (err == -EEXIST) {
//someone thinks we died, so die
if (verbose) cout << client_name << " is suiciding!" << std::endl;
return -ESUICIDE;
} else {
ceph_abort();
}
return err;
}
if (verbose || idata.to_create.size() > 2) {
cout << debug_prefix << " created object " << it->first.second
<< std::endl;
}
break;
case AIO_MAKE_OBJECT:
cout << debug_prefix << " launching asynchronous create "
<< it->first.second << std::endl;
aiocs[count] = rados.aio_create_completion();
io_ctx.aio_operate(it->first.second, aiocs[count], it->second);
count++;
if ((int)idata.to_create.size() == count) {
cout << "starting aiowrite waiting loop" << std::endl;
for (count -= 1; count >= 0; count--) {
aiocs[count]->wait_for_complete();
err = aiocs[count]->get_return_value();
if (err < 0) {
//this can happen if someone else was cleaning up after us.
cerr << debug_prefix << " a create failed"
<< " with code " << err << std::endl;
if (err == -EEXIST) {
//someone thinks we died, so die
cerr << client_name << " is suiciding!" << std::endl;
return -ESUICIDE;
} else {
ceph_abort();
}
return err;
}
if (verbose || idata.to_create.size() > 2) {
cout << debug_prefix << " completed aio " << aiocs.size() - count
<< "/" << aiocs.size() << std::endl;
}
}
}
break;
case REMOVE_OBJECT://deleting
if (verbose) cout << debug_prefix << " deleting " << it->first.second
<< std::endl;
err = io_ctx.operate(it->first.second, it->second);
if (err < 0) {
//if someone else called cleanup on this prefix first
if (verbose) cout << debug_prefix << " deleting " << it->first.second
<< "failed with code" << err << std::endl;
}
if (verbose) cout << debug_prefix << " deleted " << it->first.second
<< std::endl;
break;
case REMOVE_PREFIX://rewriting index
if (verbose) cout << debug_prefix << " updating index " << std::endl;
err = io_ctx.operate(index_name, it->second);
if (err < 0) {
if (verbose) cout << debug_prefix
<< " rewriting the index failed with code " << err
<< ". someone else must have thought we died, so dying" << std::endl;
return -ETIMEDOUT;
}
if (verbose) cout << debug_prefix << " updated index." << std::endl;
break;
case RESTORE_OBJECT:
if (verbose) cout << debug_prefix << " restoring " << it->first.second
<< std::endl;
err = io_ctx.operate(it->first.second, it->second);
if (err < 0) {
if (verbose) cout << debug_prefix << "restoring " << it->first.second
<< " failed"
<< " with " << err << std::endl;
return err;
}
if (verbose) cout << debug_prefix << " restored " << it->first.second
<< std::endl;
break;
default:
if (verbose) cout << debug_prefix << " performing unknown op on "
<< it->first.second
<< std::endl;
err = io_ctx.operate(index_name, it->second);
if (err < 0) {
if (verbose) cout << debug_prefix << " unknown op on "
<< it->first.second
<< " failed with " << err << std::endl;
return err;
}
if (verbose) cout << debug_prefix << " unknown op on "
<< it->first.second
<< " succeeded." << std::endl;
break;
}
}
return err;
}
int KvFlatBtreeAsync::cleanup(const index_data &idata, const int &error) {
if (verbose) cout << "\t\t" << client_name << ": cleaning up after "
<< idata.str()
<< std::endl;
int err = 0;
ceph_assert(idata.prefix != "");
std::map<std::string,bufferlist> new_index;
std::map<std::string, std::pair<bufferlist, int> > assertions;
switch (error) {
case -EFIRSTOBJ: {
//this happens if the split or rebalance failed to mark the first object,
//meaning only the index needs to be changed.
//restore objects that had been marked unwritable.
for(vector<delete_data >::const_iterator it =
idata.to_delete.begin();
it != idata.to_delete.end(); ++it) {
index_data this_entry;
this_entry.obj = (*it).obj;
this_entry.min_kdata = it->min;
this_entry.kdata = it->max;
new_index[it->max.encoded()] = to_bl(this_entry);
this_entry = idata;
this_entry.obj = it->obj;
this_entry.min_kdata = it->min;
this_entry.kdata = it->max;
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: will assert index contains "
<< this_entry.str() << std::endl;
assertions[it->max.encoded()] =
std::pair<bufferlist, int>(to_bl(this_entry),
CEPH_OSD_CMPXATTR_OP_EQ);
}
//update the index
librados::ObjectWriteOperation update_index;
update_index.omap_cmp(assertions, &err);
update_index.omap_set(new_index);
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: updating index"
<< std::endl;
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
return -ESUICIDE;
}
err = io_ctx.operate(index_name, &update_index);
if (err < 0) {
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: rewriting failed with "
<< err << ". returning -ECANCELED" << std::endl;
return -ECANCELED;
}
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: updated index. cleanup done."
<< std::endl;
break;
}
case -ERANGE: {
//this happens if a split or rebalance fails to mark an object. It is a
//special case of rolling back that does not have to deal with new objects.
//restore objects that had been marked unwritable.
vector<delete_data >::const_iterator it;
for(it = idata.to_delete.begin();
it != idata.to_delete.end(); ++it) {
index_data this_entry;
this_entry.obj = (*it).obj;
this_entry.min_kdata = it->min;
this_entry.kdata = it->max;
new_index[it->max.encoded()] = to_bl(this_entry);
this_entry = idata;
this_entry.obj = it->obj;
this_entry.min_kdata = it->min;
this_entry.kdata = it->max;
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: will assert index contains "
<< this_entry.str() << std::endl;
assertions[it->max.encoded()] =
std::pair<bufferlist, int>(to_bl(this_entry),
CEPH_OSD_CMPXATTR_OP_EQ);
}
it = idata.to_delete.begin();
librados::ObjectWriteOperation restore;
set_up_restore_object(&restore);
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
return -ESUICIDE;
}
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: restoring "
<< it->obj
<< std::endl;
err = io_ctx.operate(it->obj, &restore);
if (err < 0) {
//i.e., -ECANCELED because the object was already restored by someone
//else
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: restoring "
<< it->obj
<< " failed with " << err << std::endl;
} else {
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: restored "
<< it->obj
<< std::endl;
}
//update the index
librados::ObjectWriteOperation update_index;
update_index.omap_cmp(assertions, &err);
update_index.omap_set(new_index);
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: updating index"
<< std::endl;
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
return -ESUICIDE;
}
err = io_ctx.operate(index_name, &update_index);
if (err < 0) {
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: rewriting failed with "
<< err << ". returning -ECANCELED" << std::endl;
return -ECANCELED;
}
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: updated index. cleanup done."
<< std::endl;
break;
}
case -ENOENT: {
if (verbose) cout << "\t\t" << client_name << "-cleanup: rolling forward"
<< std::endl;
//all changes were created except for updating the index and possibly
//deleting the objects. roll forward.
vector<std::pair<std::pair<int, string>, librados::ObjectWriteOperation*> > ops;
vector<librados::ObjectWriteOperation> owos(idata.to_delete.size() + 1);
for (int i = 0; i <= (int)idata.to_delete.size(); ++i) {
ops.push_back(std::make_pair(std::pair<int, std::string>(0, ""), &owos[i]));
}
set_up_ops(vector<object_data>(),
vector<object_data>(), &ops, idata, &err);
err = perform_ops("\t\t" + client_name + "-cleanup:", idata, &ops);
if (err < 0) {
if (err == -ESUICIDE) {
return -ESUICIDE;
}
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: rewriting failed with "
<< err << ". returning -ECANCELED" << std::endl;
return -ECANCELED;
}
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: updated index"
<< std::endl;
break;
}
default: {
//roll back all changes.
if (verbose) cout << "\t\t" << client_name << "-cleanup: rolling back"
<< std::endl;
std::map<std::string,bufferlist> new_index;
std::set<std::string> to_remove;
std::map<std::string, std::pair<bufferlist, int> > assertions;
//mark the objects to be created. if someone else already has, die.
for(vector<create_data >::const_reverse_iterator it =
idata.to_create.rbegin();
it != idata.to_create.rend(); ++it) {
librados::ObjectWriteOperation rm;
set_up_unwrite_object(0, &rm);
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 )
{
return -ESUICIDE;
}
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: marking "
<< it->obj
<< std::endl;
err = io_ctx.operate(it->obj, &rm);
if (err < 0) {
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: marking "
<< it->obj
<< " failed with " << err << std::endl;
} else {
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: marked "
<< it->obj
<< std::endl;
}
}
//restore objects that had been marked unwritable.
for(vector<delete_data >::const_iterator it =
idata.to_delete.begin();
it != idata.to_delete.end(); ++it) {
index_data this_entry;
this_entry.obj = (*it).obj;
this_entry.min_kdata = it->min;
this_entry.kdata = it->max;
new_index[it->max.encoded()] = to_bl(this_entry);
this_entry = idata;
this_entry.obj = it->obj;
this_entry.min_kdata = it->min;
this_entry.kdata = it->max;
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: will assert index contains "
<< this_entry.str() << std::endl;
assertions[it->max.encoded()] =
std::pair<bufferlist, int>(to_bl(this_entry),
CEPH_OSD_CMPXATTR_OP_EQ);
librados::ObjectWriteOperation restore;
set_up_restore_object(&restore);
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: will assert index contains "
<< this_entry.str() << std::endl;
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 )
{
return -ESUICIDE;
}
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: restoring "
<< it->obj
<< std::endl;
err = io_ctx.operate(it->obj, &restore);
if (err == -ENOENT) {
//it had gotten far enough to be rolled forward - unmark the objects
//and roll forward.
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: roll forward instead"
<< std::endl;
for(vector<create_data >::const_iterator cit =
idata.to_create.begin();
cit != idata.to_create.end(); ++cit) {
librados::ObjectWriteOperation res;
set_up_restore_object(&res);
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)()
== 1 ) {
return -ECANCELED;
}
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: restoring " << cit->obj
<< std::endl;
err = io_ctx.operate(cit->obj, &res);
if (err < 0) {
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: restoring "
<< cit->obj << " failed with " << err << std::endl;
}
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: restored "
<< cit->obj
<< std::endl;
}
return cleanup(idata, -ENOENT);
} else if (err < 0) {
//i.e., -ECANCELED because the object was already restored by someone
//else
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: restoring " << it->obj
<< " failed with " << err << std::endl;
} else {
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: restored "
<< it->obj
<< std::endl;
}
}
//remove the new objects
for(vector<create_data >::const_reverse_iterator it =
idata.to_create.rbegin();
it != idata.to_create.rend(); ++it) {
to_remove.insert(it->max.encoded());
librados::ObjectWriteOperation rm;
rm.remove();
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 )
{
return -ESUICIDE;
}
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: removing "
<< it->obj
<< std::endl;
err = io_ctx.operate(it->obj, &rm);
if (err < 0) {
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: failed to remove "
<< it->obj << std::endl;
} else {
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: removed "
<< it->obj
<< std::endl;
}
}
//update the index
librados::ObjectWriteOperation update_index;
update_index.omap_cmp(assertions, &err);
update_index.omap_rm_keys(to_remove);
update_index.omap_set(new_index);
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: updating index"
<< std::endl;
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
return -ESUICIDE;
}
err = io_ctx.operate(index_name, &update_index);
if (err < 0) {
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: rewriting failed with "
<< err << ". returning -ECANCELED" << std::endl;
return -ECANCELED;
}
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: updated index. cleanup done."
<< std::endl;
break;
}
}
return err;
}
string KvFlatBtreeAsync::to_string(string s, int i) {
stringstream ret;
ret << s << i;
return ret.str();
}
string KvFlatBtreeAsync::get_name() {
return rados_id;
}
void KvFlatBtreeAsync::set_inject(injection_t inject, int wait_time) {
interrupt = inject;
wait_ms = wait_time;
}
int KvFlatBtreeAsync::setup(int argc, const char** argv) {
int r = rados.init(rados_id.c_str());
if (r < 0) {
cerr << "error during init" << r << std::endl;
return r;
}
r = rados.conf_parse_argv(argc, argv);
if (r < 0) {
cerr << "error during parsing args" << r << std::endl;
return r;
}
r = rados.conf_parse_env(NULL);
if (r < 0) {
cerr << "error during parsing env" << r << std::endl;
return r;
}
r = rados.conf_read_file(NULL);
if (r < 0) {
cerr << "error during read file: " << r << std::endl;
return r;
}
r = rados.connect();
if (r < 0) {
cerr << "error during connect: " << r << std::endl;
return r;
}
r = rados.ioctx_create(pool_name.c_str(), io_ctx);
if (r < 0) {
cerr << "error creating io ctx: " << r << std::endl;
rados.shutdown();
return r;
}
librados::ObjectWriteOperation make_index;
make_index.create(true);
std::map<std::string,bufferlist> index_map;
index_data idata;
idata.obj = client_name;
idata.min_kdata.raw_key = "";
idata.kdata = key_data("");
index_map["1"] = to_bl(idata);
make_index.omap_set(index_map);
r = io_ctx.operate(index_name, &make_index);
if (r < 0) {
if (verbose) cout << client_name << ": Making the index failed with code "
<< r
<< std::endl;
return 0;
}
if (verbose) cout << client_name << ": created index object" << std::endl;
librados::ObjectWriteOperation make_max_obj;
make_max_obj.create(true);
make_max_obj.setxattr("unwritable", to_bl("0"));
make_max_obj.setxattr("size", to_bl("0"));
r = io_ctx.operate(client_name, &make_max_obj);
if (r < 0) {
if (verbose) cout << client_name << ": Setting xattr failed with code "
<< r
<< std::endl;
}
return 0;
}
int KvFlatBtreeAsync::set(const string &key, const bufferlist &val,
bool update_on_existing) {
if (verbose) cout << client_name << " is "
<< (update_on_existing? "updating " : "setting ")
<< key << std::endl;
int err = 0;
utime_t mytime;
index_data idata(key);
if (verbose) cout << "\t" << client_name << ": finding oid" << std::endl;
err = read_index(key, &idata, NULL, false);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
if (verbose) cout << "\t" << client_name << ": index data is " << idata.str()
<< ", object is " << idata.obj << std::endl;
err = set_op(key, val, update_on_existing, idata);
if (verbose) cout << "\t" << client_name << ": finished set with " << err
<< std::endl;
return err;
}
int KvFlatBtreeAsync::set_op(const string &key, const bufferlist &val,
bool update_on_existing, index_data &idata) {
//write
bufferlist inbl;
omap_set_args args;
args.bound = 2 * k;
args.exclusive = !update_on_existing;
args.omap[key] = val;
args.encode(inbl);
librados::ObjectWriteOperation owo;
owo.exec("kvs", "omap_insert", inbl);
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
if (verbose) cout << client_name << " IS SUICIDING!" << std::endl;
return -ESUICIDE;
}
if (verbose) cout << "\t" << client_name << ": inserting " << key
<< " into object "
<< idata.obj << std::endl;
int err = io_ctx.operate(idata.obj, &owo);
if (err < 0) {
switch (err) {
case -EEXIST: {
//the key already exists and this is an exclusive insert.
cerr << "\t" << client_name << ": writing key failed with "
<< err << std::endl;
return err;
}
case -EKEYREJECTED: {
//the object needs to be split.
do {
if (verbose) cout << "\t" << client_name << ": running split on "
<< idata.obj
<< std::endl;
err = read_index(key, &idata, NULL, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
err = split(idata);
if (err < 0 && err != -ENOENT && err != -EBALANCE) {
if (verbose) cerr << "\t" << client_name << ": split failed with "
<< err << std::endl;
int ret = handle_set_rm_errors(err, idata.obj, key, &idata, NULL);
switch (ret) {
case -ESUICIDE:
if (verbose) cout << client_name << " IS SUICIDING!" << std::endl;
return ret;
case 1:
return set_op(key, val, update_on_existing, idata);
case 2:
return err;
}
}
} while (err < 0 && err != -EBALANCE && err != -ENOENT);
err = read_index(key, &idata, NULL, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
return set_op(key, val, update_on_existing, idata);
}
default:
if (verbose) cerr << "\t" << client_name << ": writing obj failed with "
<< err << std::endl;
if (err == -ENOENT || err == -EACCES) {
if (err == -ENOENT) {
if (verbose) cout << "CACHE FAILURE" << std::endl;
}
err = read_index(key, &idata, NULL, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
if (verbose) cout << "\t" << client_name << ": index data is "
<< idata.str()
<< ", object is " << idata.obj << std::endl;
return set_op(key, val, update_on_existing, idata);
} else {
return err;
}
}
}
return 0;
}
int KvFlatBtreeAsync::remove(const string &key) {
if (verbose) cout << client_name << ": removing " << key << std::endl;
int err = 0;
string obj;
utime_t mytime;
index_data idata;
index_data next_idata;
if (verbose) cout << "\t" << client_name << ": finding oid" << std::endl;
err = read_index(key, &idata, &next_idata, false);
if (err < 0) {
if (verbose) cout << "getting oid failed with code " << err << std::endl;
return err;
}
obj = idata.obj;
if (verbose) cout << "\t" << client_name << ": idata is " << idata.str()
<< ", next_idata is " << next_idata.str()
<< ", obj is " << obj << std::endl;
err = remove_op(key, idata, next_idata);
if (verbose) cout << "\t" << client_name << ": finished remove with " << err
<< " and exiting" << std::endl;
return err;
}
int KvFlatBtreeAsync::remove_op(const string &key, index_data &idata,
index_data &next_idata) {
//write
bufferlist inbl;
omap_rm_args args;
args.bound = k;
args.omap.insert(key);
args.encode(inbl);
librados::ObjectWriteOperation owo;
owo.exec("kvs", "omap_remove", inbl);
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
if (verbose) cout << client_name << " IS SUICIDING!" << std::endl;
return -ESUICIDE;
}
if (verbose) cout << "\t" << client_name << ": removing " << key << " from "
<< idata.obj
<< std::endl;
int err = io_ctx.operate(idata.obj, &owo);
if (err < 0) {
if (verbose) cout << "\t" << client_name << ": writing obj failed with "
<< err << std::endl;
switch (err) {
case -ENODATA: {
//the key does not exist in the object
return err;
}
case -EKEYREJECTED: {
//the object needs to be split.
do {
if (verbose) cerr << "\t" << client_name << ": running rebalance on "
<< idata.obj << std::endl;
err = read_index(key, &idata, &next_idata, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
err = rebalance(idata, next_idata);
if (err < 0 && err != -ENOENT && err != -EBALANCE) {
if (verbose) cerr << "\t" << client_name << ": rebalance returned "
<< err << std::endl;
int ret = handle_set_rm_errors(err, idata.obj, key, &idata,
&next_idata);
switch (ret) {
case -ESUICIDE:
if (verbose) cout << client_name << " IS SUICIDING!" << std::endl;
return err;
case 1:
return remove_op(key, idata, next_idata);
case 2:
return err;
break;
case -EUCLEAN:
//this is the only node, so it's ok to go below k.
librados::ObjectWriteOperation owo;
bufferlist inbl;
omap_rm_args args;
args.bound = 0;
args.omap.insert(key);
args.encode(inbl);
owo.exec("kvs", "omap_remove", inbl);
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)()
== 1 ) {
if (verbose) cout << client_name << " IS SUICIDING!"
<< std::endl;
return -ESUICIDE;
}
if (verbose) cout << "\t" << client_name << ": removing " << key
<< " from "
<< idata.obj
<< std::endl;
int err = io_ctx.operate(idata.obj, &owo);
if (err == 0) {
return 0;
}
}
}
} while (err < 0 && err != -EBALANCE && err != -ENOENT);
err = read_index(key, &idata, &next_idata, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
return remove(key);
}
default:
if (err == -ENOENT || err == -EACCES) {
err = read_index(key, &idata, &next_idata, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
if (verbose) cout << "\t" << client_name << ": index data is "
<< idata.str()
<< ", object is " << idata.obj << std::endl;
//idea: we read the time every time we read the index anyway - store it.
return remove_op(key, idata, next_idata);
} else {
return err;
}
}
}
return 0;
}
int KvFlatBtreeAsync::handle_set_rm_errors(int &err, string obj,
string key,
index_data * idata, index_data * next_idata) {
if (err == -ESUICIDE) {
return err;
} else if (err == -ECANCELED //if an object was unwritable or index changed
|| err == -EPREFIX //if there is currently a prefix
|| err == -ETIMEDOUT// if the index changes during the op - i.e. cleanup
|| err == -EACCES) //possible if we were acting on old index data
{
err = read_index(key, idata, next_idata, true);
if (err < 0) {
return err;
}
if (verbose) cout << "\t" << client_name << ": prefix is " << idata->str()
<< std::endl;
if (idata->obj != obj) {
//someone else has split or cleaned up or something. start over.
return 1;//meaning repeat
}
} else if (err != -ETIMEDOUT && err != -ERANGE && err != -EACCES
&& err != -EUCLEAN){
if (verbose) cout << "\t" << client_name
<< ": split encountered an unexpected error: " << err
<< std::endl;
return 2;
}
return err;
}
int KvFlatBtreeAsync::get(const string &key, bufferlist *val) {
opmap['g']++;
if (verbose) cout << client_name << ": getting " << key << std::endl;
int err = 0;
index_data idata;
utime_t mytime;
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
return -ESUICIDE;
}
err = read_index(key, &idata, NULL, false);
mytime = ceph_clock_now();
if (err < 0) {
if (verbose) cout << "getting oid failed with code " << err << std::endl;
return err;
}
err = get_op(key, val, idata);
if (verbose) cout << client_name << ": got " << key << " with " << err
<< std::endl;
return err;
}
int KvFlatBtreeAsync::get_op(const string &key, bufferlist *val,
index_data &idata) {
int err = 0;
std::set<std::string> key_set;
key_set.insert(key);
std::map<std::string,bufferlist> omap;
librados::ObjectReadOperation read;
read.omap_get_vals_by_keys(key_set, &omap, &err);
err = io_ctx.operate(idata.obj, &read, NULL);
if (err < 0) {
if (err == -ENOENT) {
err = read_index(key, &idata, NULL, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
if (verbose) cout << "\t" << client_name << ": index data is "
<< idata.str()
<< ", object is " << idata.obj << std::endl;
return get_op(key, val, idata);
} else {
if (verbose) cout << client_name
<< ": get encountered an unexpected error: " << err
<< std::endl;
return err;
}
}
*val = omap[key];
return err;
}
void *KvFlatBtreeAsync::pset(void *ptr) {
struct aio_set_args *args = (struct aio_set_args *)ptr;
*args->err =
args->kvba->KvFlatBtreeAsync::set((string)args->key,
(bufferlist)args->val, (bool)args->exc);
args->cb(args->err, args->cb_args);
delete args;
return NULL;
}
void KvFlatBtreeAsync::aio_set(const string &key, const bufferlist &val,
bool exclusive, callback cb, void * cb_args, int * err) {
aio_set_args *args = new aio_set_args();
args->kvba = this;
args->key = key;
args->val = val;
args->exc = exclusive;
args->cb = cb;
args->cb_args = cb_args;
args->err = err;
pthread_t t;
int r = pthread_create(&t, NULL, pset, (void*)args);
if (r < 0) {
*args->err = r;
return;
}
pthread_detach(t);
}
void *KvFlatBtreeAsync::prm(void *ptr) {
struct aio_rm_args *args = (struct aio_rm_args *)ptr;
*args->err =
args->kvba->KvFlatBtreeAsync::remove((string)args->key);
args->cb(args->err, args->cb_args);
delete args;
return NULL;
}
void KvFlatBtreeAsync::aio_remove(const string &key,
callback cb, void * cb_args, int * err) {
aio_rm_args * args = new aio_rm_args();
args->kvba = this;
args->key = key;
args->cb = cb;
args->cb_args = cb_args;
args->err = err;
pthread_t t;
int r = pthread_create(&t, NULL, prm, (void*)args);
if (r < 0) {
*args->err = r;
return;
}
pthread_detach(t);
}
void *KvFlatBtreeAsync::pget(void *ptr) {
struct aio_get_args *args = (struct aio_get_args *)ptr;
*args->err =
args->kvba->KvFlatBtreeAsync::get((string)args->key,
(bufferlist *)args->val);
args->cb(args->err, args->cb_args);
delete args;
return NULL;
}
void KvFlatBtreeAsync::aio_get(const string &key, bufferlist *val,
callback cb, void * cb_args, int * err) {
aio_get_args * args = new aio_get_args();
args->kvba = this;
args->key = key;
args->val = val;
args->cb = cb;
args->cb_args = cb_args;
args->err = err;
pthread_t t;
int r = pthread_create(&t, NULL, pget, (void*)args);
if (r < 0) {
*args->err = r;
return;
}
pthread_detach(t);
}
int KvFlatBtreeAsync::set_many(const std::map<string, bufferlist> &in_map) {
int err = 0;
bufferlist inbl;
bufferlist outbl;
std::set<string> keys;
std::map<string, bufferlist> big_map;
for (map<string, bufferlist>::const_iterator it = in_map.begin();
it != in_map.end(); ++it) {
keys.insert(it->first);
big_map.insert(*it);
}
if (verbose) cout << "created key set and big_map" << std::endl;
encode(keys, inbl);
librados::AioCompletion * aioc = rados.aio_create_completion();
io_ctx.aio_exec(index_name, aioc, "kvs", "read_many", inbl, &outbl);
aioc->wait_for_complete();
err = aioc->get_return_value();
aioc->release();
if (err < 0) {
cerr << "getting index failed with " << err << std::endl;
return err;
}
std::map<string, bufferlist> imap;//read from the index
auto blit = outbl.cbegin();
decode(imap, blit);
if (verbose) cout << "finished reading index for objects. there are "
<< imap.size() << " entries that need to be changed. " << std::endl;
vector<object_data> to_delete;
vector<object_data> to_create;
if (verbose) cout << "setting up to_delete and to_create vectors from index "
<< "map" << std::endl;
//set up to_delete from index map
for (map<string, bufferlist>::iterator it = imap.begin(); it != imap.end();
++it){
index_data idata;
blit = it->second.begin();
idata.decode(blit);
to_delete.push_back(object_data(idata.min_kdata, idata.kdata, idata.obj));
err = read_object(idata.obj, &to_delete[to_delete.size() - 1]);
if (err < 0) {
if (verbose) cout << "reading " << idata.obj << " failed with " << err
<< std::endl;
return set_many(in_map);
}
big_map.insert(to_delete[to_delete.size() - 1].omap.begin(),
to_delete[to_delete.size() - 1].omap.end());
}
to_create.push_back(object_data(
to_string(client_name, client_index++)));
to_create[0].min_kdata = to_delete[0].min_kdata;
for(map<string, bufferlist>::iterator it = big_map.begin();
it != big_map.end(); ++it) {
if (to_create[to_create.size() - 1].omap.size() == 1.5 * k) {
to_create[to_create.size() - 1].max_kdata =
key_data(to_create[to_create.size() - 1]
.omap.rbegin()->first);
to_create.push_back(object_data(
to_string(client_name, client_index++)));
to_create[to_create.size() - 1].min_kdata =
to_create[to_create.size() - 2].max_kdata;
}
to_create[to_create.size() - 1].omap.insert(*it);
}
to_create[to_create.size() - 1].max_kdata =
to_delete[to_delete.size() - 1].max_kdata;
vector<librados::ObjectWriteOperation> owos(2 + 2 * to_delete.size()
+ to_create.size());
vector<std::pair<std::pair<int, string>, librados::ObjectWriteOperation*> > ops;
index_data idata;
set_up_prefix_index(to_create, to_delete, &owos[0], &idata, &err);
if (verbose) cout << "finished making to_create and to_delete. "
<< std::endl;
ops.push_back(std::make_pair(
std::pair<int, string>(ADD_PREFIX, index_name),
&owos[0]));
for (int i = 1; i < 2 + 2 * (int)to_delete.size() + (int)to_create.size();
i++) {
ops.push_back(std::make_pair(std::make_pair(0,""), &owos[i]));
}
set_up_ops(to_create, to_delete, &ops, idata, &err);
cout << "finished setting up ops. Starting critical section..." << std::endl;
/////BEGIN CRITICAL SECTION/////
//put prefix on index entry for idata.val
err = perform_ops("\t\t" + client_name + "-set_many:", idata, &ops);
if (err < 0) {
return set_many(in_map);
}
if (verbose) cout << "\t\t" << client_name << "-split: done splitting."
<< std::endl;
/////END CRITICAL SECTION/////
std::scoped_lock l{icache_lock};
for (vector<delete_data>::iterator it = idata.to_delete.begin();
it != idata.to_delete.end(); ++it) {
icache.erase(it->max);
}
for (vector<create_data>::iterator it = idata.to_create.begin();
it != idata.to_create.end(); ++it) {
icache.push(index_data(*it));
}
return err;
}
int KvFlatBtreeAsync::remove_all() {
if (verbose) cout << client_name << ": removing all" << std::endl;
int err = 0;
librados::ObjectReadOperation oro;
librados::AioCompletion * oro_aioc = rados.aio_create_completion();
std::map<std::string, bufferlist> index_set;
oro.omap_get_vals2("",LONG_MAX,&index_set, nullptr, &err);
err = io_ctx.aio_operate(index_name, oro_aioc, &oro, NULL);
if (err < 0){
if (err == -ENOENT) {
return 0;
}
if (verbose) cout << "getting keys failed with error " << err << std::endl;
return err;
}
oro_aioc->wait_for_complete();
oro_aioc->release();
librados::ObjectWriteOperation rm_index;
librados::AioCompletion * rm_index_aioc = rados.aio_create_completion();
std::map<std::string,bufferlist> new_index;
new_index["1"] = index_set["1"];
rm_index.omap_clear();
rm_index.omap_set(new_index);
io_ctx.aio_operate(index_name, rm_index_aioc, &rm_index);
err = rm_index_aioc->get_return_value();
rm_index_aioc->release();
if (err < 0) {
if (verbose) cout << "rm index aioc failed with " << err
<< std::endl;
return err;
}
if (!index_set.empty()) {
for (std::map<std::string,bufferlist>::iterator it = index_set.begin();
it != index_set.end(); ++it){
librados::ObjectWriteOperation sub;
if (it->first == "1") {
sub.omap_clear();
} else {
sub.remove();
}
index_data idata;
auto b = it->second.cbegin();
idata.decode(b);
io_ctx.operate(idata.obj, &sub);
}
}
icache.clear();
return 0;
}
int KvFlatBtreeAsync::get_all_keys(std::set<std::string> *keys) {
if (verbose) cout << client_name << ": getting all keys" << std::endl;
int err = 0;
librados::ObjectReadOperation oro;
std::map<std::string,bufferlist> index_set;
oro.omap_get_vals2("",LONG_MAX,&index_set, nullptr, &err);
io_ctx.operate(index_name, &oro, NULL);
if (err < 0){
if (verbose) cout << "getting keys failed with error " << err << std::endl;
return err;
}
for (std::map<std::string,bufferlist>::iterator it = index_set.begin();
it != index_set.end(); ++it){
librados::ObjectReadOperation sub;
std::set<std::string> ret;
sub.omap_get_keys2("",LONG_MAX,&ret, nullptr, &err);
index_data idata;
auto b = it->second.cbegin();
idata.decode(b);
io_ctx.operate(idata.obj, &sub, NULL);
keys->insert(ret.begin(), ret.end());
}
return err;
}
int KvFlatBtreeAsync::get_all_keys_and_values(
std::map<std::string,bufferlist> *kv_map) {
if (verbose) cout << client_name << ": getting all keys and values"
<< std::endl;
int err = 0;
librados::ObjectReadOperation first_read;
std::set<std::string> index_set;
first_read.omap_get_keys2("",LONG_MAX,&index_set, nullptr, &err);
io_ctx.operate(index_name, &first_read, NULL);
if (err < 0){
if (verbose) cout << "getting keys failed with error " << err << std::endl;
return err;
}
for (std::set<std::string>::iterator it = index_set.begin();
it != index_set.end(); ++it){
librados::ObjectReadOperation sub;
std::map<std::string, bufferlist> ret;
sub.omap_get_vals2("",LONG_MAX,&ret, nullptr, &err);
io_ctx.operate(*it, &sub, NULL);
kv_map->insert(ret.begin(), ret.end());
}
return err;
}
bool KvFlatBtreeAsync::is_consistent() {
int err;
bool ret = true;
if (verbose) cout << client_name << ": checking consistency" << std::endl;
std::map<std::string,bufferlist> index;
std::map<std::string, std::set<std::string> > sub_objs;
librados::ObjectReadOperation oro;
oro.omap_get_vals2("",LONG_MAX,&index, nullptr, &err);
io_ctx.operate(index_name, &oro, NULL);
if (err < 0){
//probably because the index doesn't exist - this might be ok.
for (librados::NObjectIterator oit = io_ctx.nobjects_begin();
oit != io_ctx.nobjects_end(); ++oit) {
//if this executes, there are floating objects.
cerr << "Not consistent! found floating object " << oit->get_oid()
<< std::endl;
ret = false;
}
return ret;
}
std::map<std::string, string> parsed_index;
std::set<std::string> onames;
std::set<std::string> special_names;
for (map<std::string,bufferlist>::iterator it = index.begin();
it != index.end(); ++it) {
if (it->first != "") {
index_data idata;
auto b = it->second.cbegin();
idata.decode(b);
if (idata.prefix != "") {
for(vector<delete_data>::iterator dit = idata.to_delete.begin();
dit != idata.to_delete.end(); ++dit) {
librados::ObjectReadOperation oro;
librados::AioCompletion * aioc = rados.aio_create_completion();
bufferlist un;
oro.getxattr("unwritable", &un, &err);
io_ctx.aio_operate(dit->obj, aioc, &oro, NULL);
aioc->wait_for_complete();
err = aioc->get_return_value();
if (ceph_clock_now() - idata.ts > timeout) {
if (err < 0) {
aioc->release();
if (err == -ENOENT) {
continue;
} else {
cerr << "Not consistent! reading object " << dit->obj
<< "returned " << err << std::endl;
ret = false;
break;
}
}
if (atoi(string(un.c_str(), un.length()).c_str()) != 1 &&
aioc->get_version64() != dit->version) {
cerr << "Not consistent! object " << dit->obj << " has been "
<< " modified since the client died was not cleaned up."
<< std::endl;
ret = false;
}
}
special_names.insert(dit->obj);
aioc->release();
}
for(vector<create_data >::iterator cit = idata.to_create.begin();
cit != idata.to_create.end(); ++cit) {
special_names.insert(cit->obj);
}
}
parsed_index.insert(std::make_pair(it->first, idata.obj));
onames.insert(idata.obj);
}
}
//make sure that an object exists iff it either is the index
//or is listed in the index
for (librados::NObjectIterator oit = io_ctx.nobjects_begin();
oit != io_ctx.nobjects_end(); ++oit) {
string name = oit->get_oid();
if (name != index_name && onames.count(name) == 0
&& special_names.count(name) == 0) {
cerr << "Not consistent! found floating object " << name << std::endl;
ret = false;
}
}
//check objects
string prev = "";
for (std::map<std::string, string>::iterator it = parsed_index.begin();
it != parsed_index.end();
++it) {
librados::ObjectReadOperation read;
read.omap_get_keys2("", LONG_MAX, &sub_objs[it->second], nullptr, &err);
err = io_ctx.operate(it->second, &read, NULL);
int size_int = (int)sub_objs[it->second].size();
//check that size is in the right range
if (it->first != "1" && special_names.count(it->second) == 0 &&
err != -ENOENT && (size_int > 2*k|| size_int < k)
&& parsed_index.size() > 1) {
cerr << "Not consistent! Object " << *it << " has size " << size_int
<< ", which is outside the acceptable range." << std::endl;
ret = false;
}
//check that all keys belong in that object
for(std::set<std::string>::iterator subit = sub_objs[it->second].begin();
subit != sub_objs[it->second].end(); ++subit) {
if ((it->first != "1"
&& *subit > it->first.substr(1,it->first.length()))
|| *subit <= prev) {
cerr << "Not consistent! key " << *subit << " does not belong in "
<< *it << std::endl;
cerr << "not last element, i.e. " << it->first << " not equal to 1? "
<< (it->first != "1") << std::endl
<< "greater than " << it->first.substr(1,it->first.length())
<<"? " << (*subit > it->first.substr(1,it->first.length()))
<< std::endl
<< "less than or equal to " << prev << "? "
<< (*subit <= prev) << std::endl;
ret = false;
}
}
prev = it->first.substr(1,it->first.length());
}
if (!ret) {
if (verbose) cout << "failed consistency test - see error log"
<< std::endl;
cerr << str();
} else {
if (verbose) cout << "passed consistency test" << std::endl;
}
return ret;
}
string KvFlatBtreeAsync::str() {
stringstream ret;
ret << "Top-level map:" << std::endl;
int err = 0;
std::set<std::string> keys;
std::map<std::string,bufferlist> index;
librados::ObjectReadOperation oro;
librados::AioCompletion * top_aioc = rados.aio_create_completion();
oro.omap_get_vals2("",LONG_MAX,&index, nullptr, &err);
io_ctx.aio_operate(index_name, top_aioc, &oro, NULL);
top_aioc->wait_for_complete();
err = top_aioc->get_return_value();
top_aioc->release();
if (err < 0 && err != -5){
if (verbose) cout << "getting keys failed with error " << err << std::endl;
return ret.str();
}
if(index.empty()) {
ret << "There are no objects!" << std::endl;
return ret.str();
}
for (map<std::string,bufferlist>::iterator it = index.begin();
it != index.end(); ++it) {
keys.insert(string(it->second.c_str(), it->second.length())
.substr(1,it->second.length()));
}
vector<std::string> all_names;
vector<int> all_sizes(index.size());
vector<int> all_versions(index.size());
vector<bufferlist> all_unwrit(index.size());
vector<map<std::string,bufferlist> > all_maps(keys.size());
vector<map<std::string,bufferlist>::iterator> its(keys.size());
unsigned done = 0;
vector<bool> dones(keys.size());
ret << std::endl << string(150,'-') << std::endl;
for (map<std::string,bufferlist>::iterator it = index.begin();
it != index.end(); ++it){
index_data idata;
auto b = it->second.cbegin();
idata.decode(b);
string s = idata.str();
ret << "|" << string((148 -
((*it).first.length()+s.length()+3))/2,' ');
ret << (*it).first;
ret << " | ";
ret << string(idata.str());
ret << string((148 -
((*it).first.length()+s.length()+3))/2,' ');
ret << "|\t";
all_names.push_back(idata.obj);
ret << std::endl << string(150,'-') << std::endl;
}
int indexer = 0;
//get the object names and sizes
for(vector<std::string>::iterator it = all_names.begin(); it
!= all_names.end();
++it) {
librados::ObjectReadOperation oro;
librados::AioCompletion *aioc = rados.aio_create_completion();
oro.omap_get_vals2("", LONG_MAX, &all_maps[indexer], nullptr, &err);
oro.getxattr("unwritable", &all_unwrit[indexer], &err);
io_ctx.aio_operate(*it, aioc, &oro, NULL);
aioc->wait_for_complete();
if (aioc->get_return_value() < 0) {
ret << "reading" << *it << "failed: " << err << std::endl;
//return ret.str();
}
all_sizes[indexer] = all_maps[indexer].size();
all_versions[indexer] = aioc->get_version64();
indexer++;
aioc->release();
}
ret << "///////////////////OBJECT NAMES////////////////" << std::endl;
//HEADERS
ret << std::endl;
for (int i = 0; i < indexer; i++) {
ret << "---------------------------\t";
}
ret << std::endl;
for (int i = 0; i < indexer; i++) {
ret << "|" << string((25 -
(string("Bucket: ").length() + all_names[i].length()))/2, ' ');
ret << "Bucket: " << all_names[i];
ret << string((25 -
(string("Bucket: ").length() + all_names[i].length()))/2, ' ') << "|\t";
}
ret << std::endl;
for (int i = 0; i < indexer; i++) {
its[i] = all_maps[i].begin();
ret << "|" << string((25 - (string("size: ").length()
+ to_string("",all_sizes[i]).length()))/2, ' ');
ret << "size: " << all_sizes[i];
ret << string((25 - (string("size: ").length()
+ to_string("",all_sizes[i]).length()))/2, ' ') << "|\t";
}
ret << std::endl;
for (int i = 0; i < indexer; i++) {
its[i] = all_maps[i].begin();
ret << "|" << string((25 - (string("version: ").length()
+ to_string("",all_versions[i]).length()))/2, ' ');
ret << "version: " << all_versions[i];
ret << string((25 - (string("version: ").length()
+ to_string("",all_versions[i]).length()))/2, ' ') << "|\t";
}
ret << std::endl;
for (int i = 0; i < indexer; i++) {
its[i] = all_maps[i].begin();
ret << "|" << string((25 - (string("unwritable? ").length()
+ 1))/2, ' ');
ret << "unwritable? " << string(all_unwrit[i].c_str(),
all_unwrit[i].length());
ret << string((25 - (string("unwritable? ").length()
+ 1))/2, ' ') << "|\t";
}
ret << std::endl;
for (int i = 0; i < indexer; i++) {
ret << "---------------------------\t";
}
ret << std::endl;
ret << "///////////////////THE ACTUAL BLOCKS////////////////" << std::endl;
ret << std::endl;
for (int i = 0; i < indexer; i++) {
ret << "---------------------------\t";
}
ret << std::endl;
//each time through this part is two lines
while(done < keys.size()) {
for(int i = 0; i < indexer; i++) {
if(dones[i]){
ret << " \t";
} else {
if (its[i] == all_maps[i].end()){
done++;
dones[i] = true;
ret << " \t";
} else {
ret << "|" << string((25 -
((*its[i]).first.length()+its[i]->second.length()+3))/2,' ');
ret << (*its[i]).first;
ret << " | ";
ret << string(its[i]->second.c_str(), its[i]->second.length());
ret << string((25 -
((*its[i]).first.length()+its[i]->second.length()+3))/2,' ');
ret << "|\t";
++(its[i]);
}
}
}
ret << std::endl;
for (int i = 0; i < indexer; i++) {
if(dones[i]){
ret << " \t";
} else {
ret << "---------------------------\t";
}
}
ret << std::endl;
}
return ret.str();
}
| 74,925 | 31.019658 | 87 |
cc
|
null |
ceph-main/src/key_value_store/kv_flat_btree_async.h
|
/*
* Uses a two-level B-tree to store a set of key-value pairs.
*
* September 2, 2012
* Eleanor Cawthon
* [email protected]
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef KVFLATBTREEASYNC_H_
#define KVFLATBTREEASYNC_H_
#define ESUICIDE 134
#define EPREFIX 136
#define EFIRSTOBJ 138
#include "key_value_store/key_value_structure.h"
#include "include/utime.h"
#include "include/types.h"
#include "include/encoding.h"
#include "common/ceph_mutex.h"
#include "common/Clock.h"
#include "common/Formatter.h"
#include "global/global_context.h"
#include "include/rados/librados.hpp"
#include <cfloat>
#include <queue>
#include <sstream>
#include <stdarg.h>
using ceph::bufferlist;
enum {
ADD_PREFIX = 1,
MAKE_OBJECT = 2,
UNWRITE_OBJECT = 3,
RESTORE_OBJECT = 4,
REMOVE_OBJECT = 5,
REMOVE_PREFIX = 6,
AIO_MAKE_OBJECT = 7
};
struct rebalance_args;
/**
* stores information about a key in the index.
*
* prefix is "0" unless key is "", in which case it is "1". This ensures that
* the object with key "" will always be the highest key in the index.
*/
struct key_data {
std::string raw_key;
std::string prefix;
key_data()
{}
/**
* @pre: key is a raw key (does not contain a prefix)
*/
key_data(std::string key)
: raw_key(key)
{
raw_key == "" ? prefix = "1" : prefix = "0";
}
bool operator==(key_data k) const {
return ((raw_key == k.raw_key) && (prefix == k.prefix));
}
bool operator!=(key_data k) const {
return ((raw_key != k.raw_key) || (prefix != k.prefix));
}
bool operator<(key_data k) const {
return this->encoded() < k.encoded();
}
bool operator>(key_data k) const {
return this->encoded() > k.encoded();
}
/**
* parses the prefix from encoded and stores the data in this.
*
* @pre: encoded has a prefix
*/
void parse(std::string encoded) {
prefix = encoded[0];
raw_key = encoded.substr(1,encoded.length());
}
/**
* returns a string containing the encoded (prefixed) key
*/
std::string encoded() const {
return prefix + raw_key;
}
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(raw_key, bl);
encode(prefix, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(raw_key, p);
decode(prefix, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(key_data)
/**
* Stores information read from a librados object.
*/
struct object_data {
key_data min_kdata; //the max key from the previous index entry
key_data max_kdata; //the max key, from the index
std::string name; //the object's name
std::map<std::string, bufferlist> omap; // the omap of the object
bool unwritable; // an xattr that, if false, means an op is in
// progress and other clients should not write to it.
uint64_t version; //the version at time of read
uint64_t size; //the number of elements in the omap
object_data()
: unwritable(false),
version(0),
size(0)
{}
object_data(std::string the_name)
: name(the_name),
unwritable(false),
version(0),
size(0)
{}
object_data(key_data min, key_data kdat, std::string the_name)
: min_kdata(min),
max_kdata(kdat),
name(the_name),
unwritable(false),
version(0),
size(0)
{}
object_data(key_data min, key_data kdat, std::string the_name,
std::map<std::string, bufferlist> the_omap)
: min_kdata(min),
max_kdata(kdat),
name(the_name),
omap(the_omap),
unwritable(false),
version(0),
size(0)
{}
object_data(key_data min, key_data kdat, std::string the_name, int the_version)
: min_kdata(min),
max_kdata(kdat),
name(the_name),
unwritable(false),
version(the_version),
size(0)
{}
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(min_kdata, bl);
encode(max_kdata, bl);
encode(name, bl);
encode(omap, bl);
encode(unwritable, bl);
encode(version, bl);
encode(size, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(min_kdata, p);
decode(max_kdata, p);
decode(name, p);
decode(omap, p);
decode(unwritable, p);
decode(version, p);
decode(size, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(object_data)
/**
* information about objects to be created by a split or merge - stored in the
* index_data.
*/
struct create_data {
key_data min;
key_data max;
std::string obj;
create_data()
{}
create_data(key_data n, key_data x, std::string o)
: min(n),
max(x),
obj(o)
{}
create_data(object_data o)
: min(o.min_kdata),
max(o.max_kdata),
obj(o.name)
{}
create_data & operator=(const create_data &c) {
min = c.min;
max = c.max;
obj = c.obj;
return *this;
}
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(min, bl);
encode(max, bl);
encode(obj, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(min, p);
decode(max, p);
decode(obj, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(create_data)
/**
* information about objects to be deleted by a split or merge - stored in the
* index_data.
*/
struct delete_data {
key_data min;
key_data max;
std::string obj;
uint64_t version;
delete_data()
: version(0)
{}
delete_data(key_data n, key_data x, std::string o, uint64_t v)
: min(n),
max(x),
obj(o),
version(v)
{}
delete_data & operator=(const delete_data &d) {
min = d.min;
max = d.max;
obj = d.obj;
version = d.version;
return *this;
}
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(min, bl);
encode(max, bl);
encode(obj, bl);
encode(version, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(min, p);
decode(max, p);
decode(obj, p);
decode(version, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(delete_data)
/**
* The index object is a key value map that stores
* the highest key stored in an object as keys, and an index_data
* as the corresponding value. The index_data contains the encoded
* high and low keys (where keys in this object are > min_kdata and
* <= kdata), the name of the librados object where keys containing
* that range of keys are located, and information about split and
* merge operations that may need to be cleaned up if a client dies.
*/
struct index_data {
//the encoded key corresponding to the object
key_data kdata;
//"1" if there is a prefix (because a split or merge is
//in progress), otherwise ""
std::string prefix;
//the kdata of the previous index entry
key_data min_kdata;
utime_t ts; //time that a split/merge started
//objects to be created
std::vector<create_data > to_create;
//objects to be deleted
std::vector<delete_data > to_delete;
//the name of the object where the key range is located.
std::string obj;
index_data()
{}
index_data(std::string raw_key)
: kdata(raw_key)
{}
index_data(key_data max, key_data min, std::string o)
: kdata(max),
min_kdata(min),
obj(o)
{}
index_data(create_data c)
: kdata(c.max),
min_kdata(c.min),
obj(c.obj)
{}
bool operator<(const index_data &other) const {
return (kdata.encoded() < other.kdata.encoded());
}
//true if there is a prefix and now - ts > timeout.
bool is_timed_out(utime_t now, utime_t timeout) const;
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(prefix, bl);
encode(min_kdata, bl);
encode(kdata, bl);
encode(ts, bl);
encode(to_create, bl);
encode(to_delete, bl);
encode(obj, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(prefix, p);
decode(min_kdata, p);
decode(kdata, p);
decode(ts, p);
decode(to_create, p);
decode(to_delete, p);
decode(obj, p);
DECODE_FINISH(p);
}
/*
* Prints a string representation of the information, in the following format:
* (min_kdata/
* kdata,
* prefix
* ts
* elements of to_create, organized into (high key| obj name)
* ;
* elements of to_delete, organized into (high key| obj name | version number)
* :
* val)
*/
std::string str() const {
std::stringstream strm;
strm << '(' << min_kdata.encoded() << "/" << kdata.encoded() << ','
<< prefix;
if (prefix == "1") {
strm << ts.sec() << '.' << ts.usec();
for(std::vector<create_data>::const_iterator it = to_create.begin();
it != to_create.end(); ++it) {
strm << '(' << it->min.encoded() << '/' << it->max.encoded() << '|'
<< it->obj << ')';
}
strm << ';';
for(std::vector<delete_data >::const_iterator it = to_delete.begin();
it != to_delete.end(); ++it) {
strm << '(' << it->min.encoded() << '/' << it->max.encoded() << '|'
<< it->obj << '|'
<< it->version << ')';
}
strm << ':';
}
strm << obj << ')';
return strm.str();
}
};
WRITE_CLASS_ENCODER(index_data)
/**
* Structure to store information read from the index for reuse.
*/
class IndexCache {
protected:
std::map<key_data, std::pair<index_data, utime_t> > k2itmap;
std::map<utime_t, key_data> t2kmap;
int cache_size;
public:
IndexCache(int n)
: cache_size(n)
{}
/**
* Inserts idata into the cache and removes whatever key mapped to before.
* If the cache is full, pops the oldest entry.
*/
void push(const std::string &key, const index_data &idata);
/**
* Inserts idata into the cache. If idata.kdata is already in the cache,
* replaces the old one. Pops the oldest entry if the cache is full.
*/
void push(const index_data &idata);
/**
* Removes the oldest entry from the cache
*/
void pop();
/**
* Removes the value associated with kdata from both maps
*/
void erase(key_data kdata);
/**
* gets the idata where key belongs. If none, returns -ENODATA.
*/
int get(const std::string &key, index_data *idata) const;
/**
* Gets the idata where key goes and the one after it. If there are not
* valid entries for both of them, returns -ENODATA.
*/
int get(const std::string &key, index_data *idata, index_data * next_idata) const;
void clear();
};
class KvFlatBtreeAsync;
/**
* These are used internally to translate aio operations into useful thread
* arguments.
*/
struct aio_set_args {
KvFlatBtreeAsync * kvba;
std::string key;
bufferlist val;
bool exc;
callback cb;
void * cb_args;
int * err;
};
struct aio_rm_args {
KvFlatBtreeAsync * kvba;
std::string key;
callback cb;
void * cb_args;
int * err;
};
struct aio_get_args {
KvFlatBtreeAsync * kvba;
std::string key;
bufferlist * val;
bool exc;
callback cb;
void * cb_args;
int * err;
};
class KvFlatBtreeAsync : public KeyValueStructure {
protected:
//don't change these once operations start being called - they are not
//protected with mutexes!
int k;
std::string index_name;
librados::IoCtx io_ctx;
std::string rados_id;
std::string client_name;
librados::Rados rados;
std::string pool_name;
injection_t interrupt;
int wait_ms;
utime_t timeout; //declare a client dead if it goes this long without
//finishing a split/merge
int cache_size;
double cache_refresh; //read cache_size / cache_refresh entries each time the
//index is read
bool verbose;//if true, display lots of debug output
//shared variables protected with mutexes
ceph::mutex client_index_lock = ceph::make_mutex("client_index_lock");
int client_index; //names of new objects are client_name.client_index
ceph::mutex icache_lock = ceph::make_mutex("icache_lock");
IndexCache icache;
friend struct index_data;
/**
* finds the object in the index with the lowest key value that is greater
* than idata.kdata. If idata.kdata is the max key, returns -EOVERFLOW. If
* idata has a prefix and has timed out, cleans up.
*
* @param idata: idata for the object to search for.
* @param out_data: the idata for the next object.
*
* @pre: idata must contain a key_data.
* @post: out_data contains complete information
*/
int next(const index_data &idata, index_data * out_data);
/**
* finds the object in the index with the lowest key value that is greater
* than idata.kdata. If idata.kdata is the lowest key, returns -ERANGE If
* idata has a prefix and has timed out, cleans up.
*
* @param idata: idata for the object to search for.
* @param out_data: the idata for the next object.
*
* @pre: idata must contain a key_data.
* @post: out_data contains complete information
*/
int prev(const index_data &idata, index_data * out_data);
/**
* finds the index_data where a key belongs, from cache if possible. If it
* reads the index object, it will read the first cache_size entries after
* key and put them in the cache.
*
* @param key: the key to search for
* @param idata: the index_data for the first index value such that idata.key
* is greater than key.
* @param next_idata: if not NULL, this will be set to the idata after idata
* @param force_update: if false, will try to read from cache first.
*
* @pre: key is not encoded
* @post: idata contains complete information
* stored
*/
int read_index(const std::string &key, index_data * idata,
index_data * next_idata, bool force_update);
/**
* Reads obj and generates information about it. Iff the object has >= 2k
* entries, reads the whole omap and then splits it.
*
* @param idata: index data for the object being split
* @pre: idata contains a key and an obj
* @post: idata.obj has been split and icache has been updated
* @return -EBALANCE if obj does not need to be split, 0 if split successful,
* error from read_object or perform_ops if there is one.
*/
int split(const index_data &idata);
/**
* reads o1 and the next object after o1 and, if necessary, rebalances them.
* if hk1 is the highest key in the index, calls rebalance on the next highest
* key.
*
* @param idata: index data for the object being rebalanced
* @param next_idata: index data for the next object. If blank, will read.
* @pre: idata contains a key and an obj
* @post: idata.obj has been rebalanced and icache has been updated
* @return -EBALANCE if no change needed, -ENOENT if o1 does not exist,
* -ECANCELED if second object does not exist, otherwise, error from
* perform_ops
*/
int rebalance(const index_data &idata1, const index_data &next_idata);
/**
* performs an ObjectReadOperation to populate odata
*
* @post: odata has all information about obj except for key (which is "")
*/
int read_object(const std::string &obj, object_data * odata);
/**
* performs a maybe_read_for_balance ObjectOperation so the omap is only
* read if the object is out of bounds.
*/
int read_object(const std::string &obj, rebalance_args * args);
/**
* sets up owo to change the index in preparation for a split/merge.
*
* @param to_create: vector of object_data to be created.
* @param to_delete: vector of object_data to be deleted.
* @param owo: the ObjectWriteOperation to set up
* @param idata: will be populated by index data for this op.
* @param err: error code reference to pass to omap_cmp
* @pre: entries in to_create and to_delete must have keys and names.
*/
void set_up_prefix_index(
const std::vector<object_data> &to_create,
const std::vector<object_data> &to_delete,
librados::ObjectWriteOperation * owo,
index_data * idata,
int * err);
/**
* sets up all make, mark, restore, and delete ops, as well as the remove
* prefix op, based on idata.
*
* @param create_vector: vector of data about the objects to be created.
* @pre: entries in create_data must have names and omaps and be in idata
* order
* @param delete_vector: vector of data about the objects to be deleted
* @pre: entries in to_delete must have versions and be in idata order
* @param ops: the owos to set up. the pair is a pair of op identifiers
* and names of objects - set_up_ops fills these in.
* @pre: ops must be the correct size and the ObjectWriteOperation pointers
* must be valid.
* @param idata: the idata with information about how to set up the ops
* @pre: idata has valid to_create and to_delete
* @param err: the int to get the error value for omap_cmp
*/
void set_up_ops(
const std::vector<object_data> &create_vector,
const std::vector<object_data> &delete_vector,
std::vector<std::pair<std::pair<int, std::string>, librados::ObjectWriteOperation*> > * ops,
const index_data &idata,
int * err);
/**
* sets up owo to exclusive create, set omap to to_set, and set
* unwritable to "0"
*/
void set_up_make_object(
const std::map<std::string, bufferlist> &to_set,
librados::ObjectWriteOperation *owo);
/**
* sets up owo to assert object version and that object version is
* writable,
* then mark it unwritable.
*
* @param ver: if this is 0, no version is asserted.
*/
void set_up_unwrite_object(
const int &ver, librados::ObjectWriteOperation *owo);
/**
* sets up owo to assert that an object is unwritable and then mark it
* writable
*/
void set_up_restore_object(
librados::ObjectWriteOperation *owo);
/**
* sets up owo to assert that the object is unwritable and then remove it
*/
void set_up_delete_object(
librados::ObjectWriteOperation *owo);
/**
* perform the operations in ops and handles errors.
*
* @param debug_prefix: what to print at the beginning of debug output
* @param idata: the idata for the object being operated on, to be
* passed to cleanup if necessary
* @param ops: this contains an int identifying the type of op,
* a string that is the name of the object to operate on, and a pointer
* to the ObjectWriteOperation to use. All of this must be complete.
* @post: all operations are performed and most errors are handled
* (e.g., cleans up if an assertion fails). If an unknown error is found,
* returns it.
*/
int perform_ops( const std::string &debug_prefix,
const index_data &idata,
std::vector<std::pair<std::pair<int, std::string>, librados::ObjectWriteOperation*> > * ops);
/**
* Called when a client discovers that another client has died during a
* split or a merge. cleans up after that client.
*
* @param idata: the index data parsed from the index entry left by the dead
* client.
* @param error: the error that caused the client to realize the other client
* died (should be -ENOENT or -ETIMEDOUT)
* @post: rolls forward if -ENOENT, otherwise rolls back.
*/
int cleanup(const index_data &idata, const int &error);
/**
* does the ObjectWriteOperation and splits, reads the index, and/or retries
* until success.
*/
int set_op(const std::string &key, const bufferlist &val,
bool update_on_existing, index_data &idata);
/**
* does the ObjectWriteOperation and merges, reads the index, and/or retries
* until success.
*/
int remove_op(const std::string &key, index_data &idata, index_data &next_idata);
/**
* does the ObjectWriteOperation and reads the index and/or retries
* until success.
*/
int get_op(const std::string &key, bufferlist * val, index_data &idata);
/**
* does the ObjectWriteOperation and splits, reads the index, and/or retries
* until success.
*/
int handle_set_rm_errors(int &err, std::string key, std::string obj,
index_data * idata, index_data * next_idata);
/**
* called by aio_set, aio_remove, and aio_get, respectively.
*/
static void* pset(void *ptr);
static void* prm(void *ptr);
static void* pget(void *ptr);
public:
//interruption methods, for correctness testing
/**
* returns 0
*/
int nothing() override;
/**
* 10% chance of waiting wait_ms seconds
*/
int wait() override;
/**
* 10% chance of killing the client.
*/
int suicide() override;
KvFlatBtreeAsync(int k_val, std::string name, int cache, double cache_r,
bool verb)
: k(k_val),
index_name("index_object"),
rados_id(name),
client_name(std::string(name).append(".")),
pool_name("rbd"),
interrupt(&KeyValueStructure::nothing),
wait_ms(0),
timeout(100000,0),
cache_size(cache),
cache_refresh(cache_r),
verbose(verb),
client_index(0),
icache(cache)
{}
/**
* creates a string with an int at the end.
*
* @param s: the string on the left
* @param i: the int to be appended to the string
* @return the string
*/
static std::string to_string(std::string s, int i);
/**
* returns in encoded
*/
static bufferlist to_bl(const std::string &in) {
bufferlist bl;
bl.append(in);
return bl;
}
/**
* returns idata encoded;
*/
static bufferlist to_bl(const index_data &idata) {
bufferlist bl;
idata.encode(bl);
return bl;
}
/**
* returns the rados_id of this KvFlatBtreeAsync
*/
std::string get_name();
/**
* sets this kvba to call inject before every ObjectWriteOperation.
* If inject is wait and wait_time is set, wait will have a 10% chance of
* sleeping for waite_time milliseconds.
*/
void set_inject(injection_t inject, int wait_time) override;
/**
* sets up the rados and io_ctx of this KvFlatBtreeAsync. If the don't already
* exist, creates the index and max object.
*/
int setup(int argc, const char** argv) override;
int set(const std::string &key, const bufferlist &val,
bool update_on_existing) override;
int remove(const std::string &key) override;
/**
* returns true if all of the following are true:
*
* all objects are accounted for in the index or a prefix
* (i.e., no floating objects)
* all objects have k <= size <= 2k
* all keys in an object are within the specified predicted by the index
*
* if any of those fails, states that the problem(s) are, and prints str().
*
* @pre: no operations are in progress
*/
bool is_consistent() override;
/**
* returns an ASCII representation of the index and sub objects, showing
* stats about each object and all omaps. Don't use if you have more than
* about 10 objects.
*/
std::string str() override;
int get(const std::string &key, bufferlist *val) override;
//async versions of these methods
void aio_get(const std::string &key, bufferlist *val, callback cb,
void *cb_args, int * err) override;
void aio_set(const std::string &key, const bufferlist &val, bool exclusive,
callback cb, void * cb_args, int * err) override;
void aio_remove(const std::string &key, callback cb, void *cb_args, int * err) override;
//these methods that deal with multiple keys at once are efficient, but make
//no guarantees about atomicity!
/**
* Removes all objects and resets the store as if setup had just run. Makes no
* attempt to do this safely - make sure this is the only operation running
* when it is called!
*/
int remove_all() override;
/**
* This does not add prefixes to the index and therefore DOES NOT guarantee
* consistency! It is ONLY safe if there is only one instance at a time.
* It follows the same general logic as a rebalance, but
* with all objects that contain any of the keys in in_map. It is O(n), where
* n is the number of librados objects it has to change. Higher object sizes
* (i.e., k values) also decrease the efficiency of this method because it
* copies all of the entries in each object it modifies. Writing new objects
* is done in parallel.
*
* This is efficient if:
* * other clients are very unlikely to be modifying any of the objects while
* this operation is in progress
* * The entries in in_map are close together
* * It is especially efficient for initially entering lots of entries into
* an empty structure.
*
* It is very inefficient compared to setting one key and/or will starve if:
* * other clients are modifying the objects it tries to modify
* * The keys are distributed across the range of keys in the store
* * there is a small number of keys compared to k
*/
int set_many(const std::map<std::string, bufferlist> &in_map) override;
int get_all_keys(std::set<std::string> *keys) override;
int get_all_keys_and_values(std::map<std::string,bufferlist> *kv_map) override;
};
#endif /* KVFLATBTREEASYNC_H_ */
| 25,031 | 26.875278 | 99 |
h
|
null |
ceph-main/src/key_value_store/kvs_arg_types.h
|
/*
* Argument types used by cls_kvs.cc
*
* Created on: Aug 10, 2012
* Author: eleanor
*/
#ifndef CLS_KVS_H_
#define CLS_KVS_H_
#define EBALANCE 137
#include "include/encoding.h"
#include "key_value_store/kv_flat_btree_async.h"
using ceph::bufferlist;
struct assert_size_args {
uint64_t bound; //the size to compare to - should be k or 2k
uint64_t comparator; //should be CEPH_OSD_CMPXATTR_OP_EQ,
//CEPH_OSD_CMPXATTR_OP_LT, or
//CEPH_OSD_CMPXATTR_OP_GT
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(bound, bl);
encode(comparator, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(bound, p);
decode(comparator, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(assert_size_args)
struct idata_from_key_args {
std::string key;
index_data idata;
index_data next_idata;
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(key, bl);
encode(idata, bl);
encode(next_idata, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(key, p);
decode(idata, p);
decode(next_idata, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(idata_from_key_args)
struct idata_from_idata_args {
index_data idata;
index_data next_idata;
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(idata, bl);
encode(next_idata, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(idata, p);
decode(next_idata, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(idata_from_idata_args)
struct omap_set_args {
std::map<std::string, bufferlist> omap;
uint64_t bound;
bool exclusive;
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(omap, bl);
encode(bound, bl);
encode(exclusive, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(omap, p);
decode(bound, p);
decode(exclusive, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(omap_set_args)
struct omap_rm_args {
std::set<std::string> omap;
uint64_t bound;
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(omap, bl);
encode(bound, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(omap, p);
decode(bound, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(omap_rm_args)
struct rebalance_args {
object_data odata;
uint64_t bound;
uint64_t comparator;
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(odata, bl);
encode(bound, bl);
encode(comparator, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(odata,p);
decode(bound, p);
decode(comparator, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(rebalance_args)
#endif /* CLS_KVS_H_ */
| 3,030 | 19.903448 | 62 |
h
|
null |
ceph-main/src/kv/KeyValueDB.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "KeyValueDB.h"
#include "RocksDBStore.h"
using std::map;
using std::string;
KeyValueDB *KeyValueDB::create(CephContext *cct, const string& type,
const string& dir,
map<string,string> options,
void *p)
{
if (type == "rocksdb") {
return new RocksDBStore(cct, dir, options, p);
}
return NULL;
}
int KeyValueDB::test_init(const string& type, const string& dir)
{
if (type == "rocksdb") {
return RocksDBStore::_test_init(dir);
}
return -EINVAL;
}
| 603 | 20.571429 | 70 |
cc
|
null |
ceph-main/src/kv/KeyValueDB.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef KEY_VALUE_DB_H
#define KEY_VALUE_DB_H
#include "include/buffer.h"
#include <ostream>
#include <set>
#include <map>
#include <optional>
#include <string>
#include <boost/scoped_ptr.hpp>
#include "include/encoding.h"
#include "common/Formatter.h"
#include "common/perf_counters.h"
#include "common/PriorityCache.h"
/**
* Defines virtual interface to be implemented by key value store
*
* Kyoto Cabinet should implement this
*/
class KeyValueDB {
public:
class TransactionImpl {
public:
/// Set Keys
void set(
const std::string &prefix, ///< [in] Prefix for keys, or CF name
const std::map<std::string, ceph::buffer::list> &to_set ///< [in] keys/values to set
) {
for (auto it = to_set.cbegin(); it != to_set.cend(); ++it)
set(prefix, it->first, it->second);
}
/// Set Keys (via encoded ceph::buffer::list)
void set(
const std::string &prefix, ///< [in] prefix, or CF name
ceph::buffer::list& to_set_bl ///< [in] encoded key/values to set
) {
using ceph::decode;
auto p = std::cbegin(to_set_bl);
uint32_t num;
decode(num, p);
while (num--) {
std::string key;
ceph::buffer::list value;
decode(key, p);
decode(value, p);
set(prefix, key, value);
}
}
/// Set Key
virtual void set(
const std::string &prefix, ///< [in] Prefix or CF for the key
const std::string &k, ///< [in] Key to set
const ceph::buffer::list &bl ///< [in] Value to set
) = 0;
virtual void set(
const std::string &prefix,
const char *k,
size_t keylen,
const ceph::buffer::list& bl) {
set(prefix, std::string(k, keylen), bl);
}
/// Removes Keys (via encoded ceph::buffer::list)
void rmkeys(
const std::string &prefix, ///< [in] Prefix or CF to search for
ceph::buffer::list &keys_bl ///< [in] Keys to remove
) {
using ceph::decode;
auto p = std::cbegin(keys_bl);
uint32_t num;
decode(num, p);
while (num--) {
std::string key;
decode(key, p);
rmkey(prefix, key);
}
}
/// Removes Keys
void rmkeys(
const std::string &prefix, ///< [in] Prefix/CF to search for
const std::set<std::string> &keys ///< [in] Keys to remove
) {
for (auto it = keys.cbegin(); it != keys.cend(); ++it)
rmkey(prefix, *it);
}
/// Remove Key
virtual void rmkey(
const std::string &prefix, ///< [in] Prefix/CF to search for
const std::string &k ///< [in] Key to remove
) = 0;
virtual void rmkey(
const std::string &prefix, ///< [in] Prefix to search for
const char *k, ///< [in] Key to remove
size_t keylen
) {
rmkey(prefix, std::string(k, keylen));
}
/// Remove Single Key which exists and was not overwritten.
/// This API is only related to performance optimization, and should only be
/// re-implemented by log-insert-merge tree based keyvalue stores(such as RocksDB).
/// If a key is overwritten (by calling set multiple times), then the result
/// of calling rm_single_key on this key is undefined.
virtual void rm_single_key(
const std::string &prefix, ///< [in] Prefix/CF to search for
const std::string &k ///< [in] Key to remove
) { return rmkey(prefix, k);}
/// Removes keys beginning with prefix
virtual void rmkeys_by_prefix(
const std::string &prefix ///< [in] Prefix/CF by which to remove keys
) = 0;
virtual void rm_range_keys(
const std::string &prefix, ///< [in] Prefix by which to remove keys
const std::string &start, ///< [in] The start bound of remove keys
const std::string &end ///< [in] The start bound of remove keys
) = 0;
/// Merge value into key
virtual void merge(
const std::string &prefix, ///< [in] Prefix/CF ==> MUST match some established merge operator
const std::string &key, ///< [in] Key to be merged
const ceph::buffer::list &value ///< [in] value to be merged into key
) { ceph_abort_msg("Not implemented"); }
virtual ~TransactionImpl() {}
};
typedef std::shared_ptr< TransactionImpl > Transaction;
/// create a new instance
static KeyValueDB *create(CephContext *cct, const std::string& type,
const std::string& dir,
std::map<std::string,std::string> options = {},
void *p = NULL);
/// test whether we can successfully initialize; may have side effects (e.g., create)
static int test_init(const std::string& type, const std::string& dir);
virtual int init(std::string option_str="") = 0;
virtual int open(std::ostream &out, const std::string& cfs="") = 0;
// std::vector cfs contains column families to be created when db is created.
virtual int create_and_open(std::ostream &out, const std::string& cfs="") = 0;
virtual int open_read_only(std::ostream &out, const std::string& cfs="") {
return -ENOTSUP;
}
virtual void close() { }
/// Try to repair K/V database. rocksdb requires that database must be not opened.
virtual int repair(std::ostream &out) { return 0; }
virtual Transaction get_transaction() = 0;
virtual int submit_transaction(Transaction) = 0;
virtual int submit_transaction_sync(Transaction t) {
return submit_transaction(t);
}
/// Retrieve Keys
virtual int get(
const std::string &prefix, ///< [in] Prefix/CF for key
const std::set<std::string> &key, ///< [in] Key to retrieve
std::map<std::string, ceph::buffer::list> *out ///< [out] Key value retrieved
) = 0;
virtual int get(const std::string &prefix, ///< [in] prefix or CF name
const std::string &key, ///< [in] key
ceph::buffer::list *value) { ///< [out] value
std::set<std::string> ks;
ks.insert(key);
std::map<std::string,ceph::buffer::list> om;
int r = get(prefix, ks, &om);
if (om.find(key) != om.end()) {
*value = std::move(om[key]);
} else {
*value = ceph::buffer::list();
r = -ENOENT;
}
return r;
}
virtual int get(const std::string &prefix,
const char *key, size_t keylen,
ceph::buffer::list *value) {
return get(prefix, std::string(key, keylen), value);
}
// This superclass is used both by kv iterators *and* by the ObjectMap
// omap iterator. The class hierarchies are unfortunately tied together
// by the legacy DBOjectMap implementation :(.
class SimplestIteratorImpl {
public:
virtual int seek_to_first() = 0;
virtual int upper_bound(const std::string &after) = 0;
virtual int lower_bound(const std::string &to) = 0;
virtual bool valid() = 0;
virtual int next() = 0;
virtual std::string key() = 0;
virtual std::string tail_key() {
return "";
}
virtual ceph::buffer::list value() = 0;
virtual int status() = 0;
virtual ~SimplestIteratorImpl() {}
};
class IteratorImpl : public SimplestIteratorImpl {
public:
virtual ~IteratorImpl() {}
virtual int seek_to_last() = 0;
virtual int prev() = 0;
virtual std::pair<std::string, std::string> raw_key() = 0;
virtual ceph::buffer::ptr value_as_ptr() {
ceph::buffer::list bl = value();
if (bl.length() == 1) {
return *bl.buffers().begin();
} else if (bl.length() == 0) {
return ceph::buffer::ptr();
} else {
ceph_abort();
}
}
};
typedef std::shared_ptr< IteratorImpl > Iterator;
// This is the low-level iterator implemented by the underlying KV store.
class WholeSpaceIteratorImpl {
public:
virtual int seek_to_first() = 0;
virtual int seek_to_first(const std::string &prefix) = 0;
virtual int seek_to_last() = 0;
virtual int seek_to_last(const std::string &prefix) = 0;
virtual int upper_bound(const std::string &prefix, const std::string &after) = 0;
virtual int lower_bound(const std::string &prefix, const std::string &to) = 0;
virtual bool valid() = 0;
virtual int next() = 0;
virtual int prev() = 0;
virtual std::string key() = 0;
virtual std::pair<std::string,std::string> raw_key() = 0;
virtual bool raw_key_is_prefixed(const std::string &prefix) = 0;
virtual ceph::buffer::list value() = 0;
virtual ceph::buffer::ptr value_as_ptr() {
ceph::buffer::list bl = value();
if (bl.length()) {
return *bl.buffers().begin();
} else {
return ceph::buffer::ptr();
}
}
virtual int status() = 0;
virtual size_t key_size() {
return 0;
}
virtual size_t value_size() {
return 0;
}
virtual ~WholeSpaceIteratorImpl() { }
};
typedef std::shared_ptr< WholeSpaceIteratorImpl > WholeSpaceIterator;
private:
// This class filters a WholeSpaceIterator by a prefix.
// Performs as a dummy wrapper over WholeSpaceIterator
// if prefix is empty
class PrefixIteratorImpl : public IteratorImpl {
const std::string prefix;
WholeSpaceIterator generic_iter;
public:
PrefixIteratorImpl(const std::string &prefix, WholeSpaceIterator iter) :
prefix(prefix), generic_iter(iter) { }
~PrefixIteratorImpl() override { }
int seek_to_first() override {
return prefix.empty() ?
generic_iter->seek_to_first() :
generic_iter->seek_to_first(prefix);
}
int seek_to_last() override {
return prefix.empty() ?
generic_iter->seek_to_last() :
generic_iter->seek_to_last(prefix);
}
int upper_bound(const std::string &after) override {
return generic_iter->upper_bound(prefix, after);
}
int lower_bound(const std::string &to) override {
return generic_iter->lower_bound(prefix, to);
}
bool valid() override {
if (!generic_iter->valid())
return false;
if (prefix.empty())
return true;
return prefix.empty() ?
true :
generic_iter->raw_key_is_prefixed(prefix);
}
int next() override {
return generic_iter->next();
}
int prev() override {
return generic_iter->prev();
}
std::string key() override {
return generic_iter->key();
}
std::pair<std::string, std::string> raw_key() override {
return generic_iter->raw_key();
}
ceph::buffer::list value() override {
return generic_iter->value();
}
ceph::buffer::ptr value_as_ptr() override {
return generic_iter->value_as_ptr();
}
int status() override {
return generic_iter->status();
}
};
protected:
Iterator make_iterator(const std::string &prefix, WholeSpaceIterator w_iter) {
return std::make_shared<PrefixIteratorImpl>(
prefix,
w_iter);
}
public:
typedef uint32_t IteratorOpts;
static const uint32_t ITERATOR_NOCACHE = 1;
struct IteratorBounds {
std::optional<std::string> lower_bound;
std::optional<std::string> upper_bound;
};
virtual WholeSpaceIterator get_wholespace_iterator(IteratorOpts opts = 0) = 0;
virtual Iterator get_iterator(const std::string &prefix, IteratorOpts opts = 0, IteratorBounds bounds = IteratorBounds()) {
return make_iterator(prefix,
get_wholespace_iterator(opts));
}
virtual uint64_t get_estimated_size(std::map<std::string,uint64_t> &extra) = 0;
virtual int get_statfs(struct store_statfs_t *buf) {
return -EOPNOTSUPP;
}
virtual int set_cache_size(uint64_t) {
return -EOPNOTSUPP;
}
virtual int set_cache_high_pri_pool_ratio(double ratio) {
return -EOPNOTSUPP;
}
virtual int64_t get_cache_usage() const {
return -EOPNOTSUPP;
}
virtual int64_t get_cache_usage(std::string prefix) const {
return -EOPNOTSUPP;
}
virtual std::shared_ptr<PriorityCache::PriCache> get_priority_cache() const {
return nullptr;
}
virtual std::shared_ptr<PriorityCache::PriCache> get_priority_cache(std::string prefix) const {
return nullptr;
}
virtual ~KeyValueDB() {}
/// estimate space utilization for a prefix (in bytes)
virtual int64_t estimate_prefix_size(const std::string& prefix,
const std::string& key_prefix) {
return 0;
}
/// compact the underlying store
virtual void compact() {}
/// compact the underlying store in async mode
virtual void compact_async() {}
/// compact db for all keys with a given prefix
virtual void compact_prefix(const std::string& prefix) {}
/// compact db for all keys with a given prefix, async
virtual void compact_prefix_async(const std::string& prefix) {}
virtual void compact_range(const std::string& prefix,
const std::string& start, const std::string& end) {}
virtual void compact_range_async(const std::string& prefix,
const std::string& start, const std::string& end) {}
// See RocksDB merge operator definition, we support the basic
// associative merge only right now.
class MergeOperator {
public:
/// Merge into a key that doesn't exist
virtual void merge_nonexistent(
const char *rdata, size_t rlen,
std::string *new_value) = 0;
/// Merge into a key that does exist
virtual void merge(
const char *ldata, size_t llen,
const char *rdata, size_t rlen,
std::string *new_value) = 0;
/// We use each operator name and each prefix to construct the overall RocksDB operator name for consistency check at open time.
virtual const char *name() const = 0;
virtual ~MergeOperator() {}
};
/// Setup one or more operators, this needs to be done BEFORE the DB is opened.
virtual int set_merge_operator(const std::string& prefix,
std::shared_ptr<MergeOperator> mop) {
return -EOPNOTSUPP;
}
virtual void get_statistics(ceph::Formatter *f) {
return;
}
/**
* Return your perf counters if you have any. Subclasses are not
* required to implement this, and callers must respect a null return
* value.
*/
virtual PerfCounters *get_perf_counters() {
return nullptr;
}
/**
* Access implementation specific integral property corresponding
* to passed property and prefic.
* Return value is true if property is valid for prefix, populates out.
*/
virtual bool get_property(
const std::string &property,
uint64_t *out) {
return false;
}
protected:
/// List of matching prefixes/ColumnFamilies and merge operators
std::vector<std::pair<std::string,
std::shared_ptr<MergeOperator> > > merge_ops;
};
#endif
| 14,552 | 30.914474 | 132 |
h
|
null |
ceph-main/src/kv/KeyValueHistogram.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/stringify.h"
#include "KeyValueHistogram.h"
using std::map;
using std::string;
using ceph::Formatter;
#define KEY_SLAB 32
#define VALUE_SLAB 64
int KeyValueHistogram::get_key_slab(size_t sz)
{
return (sz / KEY_SLAB);
}
string KeyValueHistogram::get_key_slab_to_range(int slab)
{
int lower_bound = slab * KEY_SLAB;
int upper_bound = (slab + 1) * KEY_SLAB;
string ret = "[" + stringify(lower_bound) + "," + stringify(upper_bound) + ")";
return ret;
}
int KeyValueHistogram::get_value_slab(size_t sz)
{
return (sz / VALUE_SLAB);
}
string KeyValueHistogram::get_value_slab_to_range(int slab)
{
int lower_bound = slab * VALUE_SLAB;
int upper_bound = (slab + 1) * VALUE_SLAB;
string ret = "[" + stringify(lower_bound) + "," + stringify(upper_bound) + ")";
return ret;
}
void KeyValueHistogram::update_hist_entry(map<string, map<int, struct key_dist> >& key_hist,
const string& prefix, size_t key_size, size_t value_size)
{
uint32_t key_slab = get_key_slab(key_size);
uint32_t value_slab = get_value_slab(value_size);
key_hist[prefix][key_slab].count++;
key_hist[prefix][key_slab].max_len =
std::max<size_t>(key_size, key_hist[prefix][key_slab].max_len);
key_hist[prefix][key_slab].val_map[value_slab].count++;
key_hist[prefix][key_slab].val_map[value_slab].max_len =
std::max<size_t>(value_size,
key_hist[prefix][key_slab].val_map[value_slab].max_len);
}
void KeyValueHistogram::dump(Formatter* f)
{
f->open_object_section("rocksdb_value_distribution");
for (auto i : value_hist) {
f->dump_unsigned(get_value_slab_to_range(i.first).data(), i.second);
}
f->close_section();
f->open_object_section("rocksdb_key_value_histogram");
for (auto i : key_hist) {
f->dump_string("prefix", i.first);
f->open_object_section("key_hist");
for (auto k : i.second) {
f->dump_unsigned(get_key_slab_to_range(k.first).data(), k.second.count);
f->dump_unsigned("max_len", k.second.max_len);
f->open_object_section("value_hist");
for (auto j : k.second.val_map) {
f->dump_unsigned(get_value_slab_to_range(j.first).data(), j.second.count);
f->dump_unsigned("max_len", j.second.max_len);
}
f->close_section();
}
f->close_section();
}
f->close_section();
}
| 2,392 | 29.291139 | 92 |
cc
|
null |
ceph-main/src/kv/KeyValueHistogram.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef KeyValueHistogram_H
#define KeyValueHistogram_H
#include <map>
#include <string>
#include "common/Formatter.h"
/**
*
* Key Value DB Histogram generator
*
*/
struct KeyValueHistogram {
struct value_dist {
uint64_t count;
uint32_t max_len;
};
struct key_dist {
uint64_t count;
uint32_t max_len;
std::map<int, struct value_dist> val_map; ///< slab id to count, max length of value and key
};
std::map<std::string, std::map<int, struct key_dist> > key_hist;
std::map<int, uint64_t> value_hist;
int get_key_slab(size_t sz);
std::string get_key_slab_to_range(int slab);
int get_value_slab(size_t sz);
std::string get_value_slab_to_range(int slab);
void update_hist_entry(std::map<std::string, std::map<int, struct key_dist> >& key_hist,
const std::string& prefix, size_t key_size, size_t value_size);
void dump(ceph::Formatter* f);
};
#endif
| 999 | 24.641026 | 96 |
h
|
null |
ceph-main/src/kv/RocksDBStore.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <filesystem>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <errno.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "rocksdb/db.h"
#include "rocksdb/table.h"
#include "rocksdb/env.h"
#include "rocksdb/slice.h"
#include "rocksdb/cache.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/utilities/convenience.h"
#include "rocksdb/utilities/table_properties_collectors.h"
#include "rocksdb/merge_operator.h"
#include "common/perf_counters.h"
#include "common/PriorityCache.h"
#include "include/common_fwd.h"
#include "include/scope_guard.h"
#include "include/str_list.h"
#include "include/stringify.h"
#include "include/str_map.h"
#include "KeyValueDB.h"
#include "RocksDBStore.h"
#include "common/debug.h"
#define dout_context cct
#define dout_subsys ceph_subsys_rocksdb
#undef dout_prefix
#define dout_prefix *_dout << "rocksdb: "
namespace fs = std::filesystem;
using std::function;
using std::list;
using std::map;
using std::ostream;
using std::pair;
using std::set;
using std::string;
using std::unique_ptr;
using std::vector;
using ceph::bufferlist;
using ceph::bufferptr;
using ceph::Formatter;
static const char* sharding_def_dir = "sharding";
static const char* sharding_def_file = "sharding/def";
static const char* sharding_recreate = "sharding/recreate_columns";
static const char* resharding_column_lock = "reshardingXcommencingXlocked";
static bufferlist to_bufferlist(rocksdb::Slice in) {
bufferlist bl;
bl.append(bufferptr(in.data(), in.size()));
return bl;
}
static rocksdb::SliceParts prepare_sliceparts(const bufferlist &bl,
vector<rocksdb::Slice> *slices)
{
unsigned n = 0;
for (auto& buf : bl.buffers()) {
(*slices)[n].data_ = buf.c_str();
(*slices)[n].size_ = buf.length();
n++;
}
return rocksdb::SliceParts(slices->data(), slices->size());
}
//
// One of these for the default rocksdb column family, routing each prefix
// to the appropriate MergeOperator.
//
class RocksDBStore::MergeOperatorRouter
: public rocksdb::AssociativeMergeOperator
{
RocksDBStore& store;
public:
const char *Name() const override {
// Construct a name that rocksDB will validate against. We want to
// do this in a way that doesn't constrain the ordering of calls
// to set_merge_operator, so sort the merge operators and then
// construct a name from all of those parts.
store.assoc_name.clear();
map<std::string,std::string> names;
for (auto& p : store.merge_ops) {
names[p.first] = p.second->name();
}
for (auto& p : names) {
store.assoc_name += '.';
store.assoc_name += p.first;
store.assoc_name += ':';
store.assoc_name += p.second;
}
return store.assoc_name.c_str();
}
explicit MergeOperatorRouter(RocksDBStore &_store) : store(_store) {}
bool Merge(const rocksdb::Slice& key,
const rocksdb::Slice* existing_value,
const rocksdb::Slice& value,
std::string* new_value,
rocksdb::Logger* logger) const override {
// for default column family
// extract prefix from key and compare against each registered merge op;
// even though merge operator for explicit CF is included in merge_ops,
// it won't be picked up, since it won't match.
for (auto& p : store.merge_ops) {
if (p.first.compare(0, p.first.length(),
key.data(), p.first.length()) == 0 &&
key.data()[p.first.length()] == 0) {
if (existing_value) {
p.second->merge(existing_value->data(), existing_value->size(),
value.data(), value.size(),
new_value);
} else {
p.second->merge_nonexistent(value.data(), value.size(), new_value);
}
break;
}
}
return true; // OK :)
}
};
//
// One of these per non-default column family, linked directly to the
// merge operator for that CF/prefix (if any).
//
class RocksDBStore::MergeOperatorLinker
: public rocksdb::AssociativeMergeOperator
{
private:
std::shared_ptr<KeyValueDB::MergeOperator> mop;
public:
explicit MergeOperatorLinker(const std::shared_ptr<KeyValueDB::MergeOperator> &o) : mop(o) {}
const char *Name() const override {
return mop->name();
}
bool Merge(const rocksdb::Slice& key,
const rocksdb::Slice* existing_value,
const rocksdb::Slice& value,
std::string* new_value,
rocksdb::Logger* logger) const override {
if (existing_value) {
mop->merge(existing_value->data(), existing_value->size(),
value.data(), value.size(),
new_value);
} else {
mop->merge_nonexistent(value.data(), value.size(), new_value);
}
return true;
}
};
int RocksDBStore::set_merge_operator(
const string& prefix,
std::shared_ptr<KeyValueDB::MergeOperator> mop)
{
// If you fail here, it's because you can't do this on an open database
ceph_assert(db == nullptr);
merge_ops.push_back(std::make_pair(prefix,mop));
return 0;
}
class CephRocksdbLogger : public rocksdb::Logger {
CephContext *cct;
public:
explicit CephRocksdbLogger(CephContext *c) : cct(c) {
cct->get();
}
~CephRocksdbLogger() override {
cct->put();
}
// Write an entry to the log file with the specified format.
void Logv(const char* format, va_list ap) override {
Logv(rocksdb::INFO_LEVEL, format, ap);
}
// Write an entry to the log file with the specified log level
// and format. Any log with level under the internal log level
// of *this (see @SetInfoLogLevel and @GetInfoLogLevel) will not be
// printed.
void Logv(const rocksdb::InfoLogLevel log_level, const char* format,
va_list ap) override {
int v = rocksdb::NUM_INFO_LOG_LEVELS - log_level - 1;
dout(ceph::dout::need_dynamic(v));
char buf[65536];
vsnprintf(buf, sizeof(buf), format, ap);
*_dout << buf << dendl;
}
};
rocksdb::Logger *create_rocksdb_ceph_logger()
{
return new CephRocksdbLogger(g_ceph_context);
}
static int string2bool(const string &val, bool &b_val)
{
if (strcasecmp(val.c_str(), "false") == 0) {
b_val = false;
return 0;
} else if (strcasecmp(val.c_str(), "true") == 0) {
b_val = true;
return 0;
} else {
std::string err;
int b = strict_strtol(val.c_str(), 10, &err);
if (!err.empty())
return -EINVAL;
b_val = !!b;
return 0;
}
}
namespace rocksdb {
extern std::string trim(const std::string& str);
}
// this function is a modification of rocksdb's StringToMap:
// 1) accepts ' \n ; as separators
// 2) leaves compound options with enclosing { and }
rocksdb::Status StringToMap(const std::string& opts_str,
std::unordered_map<std::string, std::string>* opts_map)
{
using rocksdb::Status;
using rocksdb::trim;
assert(opts_map);
// Example:
// opts_str = "write_buffer_size=1024;max_write_buffer_number=2;"
// "nested_opt={opt1=1;opt2=2};max_bytes_for_level_base=100"
size_t pos = 0;
std::string opts = trim(opts_str);
while (pos < opts.size()) {
size_t eq_pos = opts.find('=', pos);
if (eq_pos == std::string::npos) {
return Status::InvalidArgument("Mismatched key value pair, '=' expected");
}
std::string key = trim(opts.substr(pos, eq_pos - pos));
if (key.empty()) {
return Status::InvalidArgument("Empty key found");
}
// skip space after '=' and look for '{' for possible nested options
pos = eq_pos + 1;
while (pos < opts.size() && isspace(opts[pos])) {
++pos;
}
// Empty value at the end
if (pos >= opts.size()) {
(*opts_map)[key] = "";
break;
}
if (opts[pos] == '{') {
int count = 1;
size_t brace_pos = pos + 1;
while (brace_pos < opts.size()) {
if (opts[brace_pos] == '{') {
++count;
} else if (opts[brace_pos] == '}') {
--count;
if (count == 0) {
break;
}
}
++brace_pos;
}
// found the matching closing brace
if (count == 0) {
//include both '{' and '}'
(*opts_map)[key] = trim(opts.substr(pos, brace_pos - pos + 1));
// skip all whitespace and move to the next ';,'
// brace_pos points to the matching '}'
pos = brace_pos + 1;
while (pos < opts.size() && isspace(opts[pos])) {
++pos;
}
if (pos < opts.size() && opts[pos] != ';' && opts[pos] != ',') {
return Status::InvalidArgument(
"Unexpected chars after nested options");
}
++pos;
} else {
return Status::InvalidArgument(
"Mismatched curly braces for nested options");
}
} else {
size_t sc_pos = opts.find_first_of(",;", pos);
if (sc_pos == std::string::npos) {
(*opts_map)[key] = trim(opts.substr(pos));
// It either ends with a trailing , ; or the last key-value pair
break;
} else {
(*opts_map)[key] = trim(opts.substr(pos, sc_pos - pos));
}
pos = sc_pos + 1;
}
}
return Status::OK();
}
int RocksDBStore::tryInterpret(const string &key, const string &val, rocksdb::Options &opt)
{
if (key == "compaction_threads") {
std::string err;
int f = strict_iecstrtoll(val, &err);
if (!err.empty())
return -EINVAL;
//Low priority threadpool is used for compaction
opt.env->SetBackgroundThreads(f, rocksdb::Env::Priority::LOW);
} else if (key == "flusher_threads") {
std::string err;
int f = strict_iecstrtoll(val, &err);
if (!err.empty())
return -EINVAL;
//High priority threadpool is used for flusher
opt.env->SetBackgroundThreads(f, rocksdb::Env::Priority::HIGH);
} else if (key == "compact_on_mount") {
int ret = string2bool(val, compact_on_mount);
if (ret != 0)
return ret;
} else if (key == "disableWAL") {
int ret = string2bool(val, disableWAL);
if (ret != 0)
return ret;
} else {
//unrecognize config options.
return -EINVAL;
}
return 0;
}
int RocksDBStore::ParseOptionsFromString(const string &opt_str, rocksdb::Options &opt)
{
return ParseOptionsFromStringStatic(cct, opt_str, opt,
[&](const string& k, const string& v, rocksdb::Options& o) {
return tryInterpret(k, v, o);
}
);
}
int RocksDBStore::ParseOptionsFromStringStatic(
CephContext *cct,
const string& opt_str,
rocksdb::Options& opt,
function<int(const string&, const string&, rocksdb::Options&)> interp)
{
// keep aligned with func tryInterpret
const set<string> need_interp_keys = {"compaction_threads", "flusher_threads", "compact_on_mount", "disableWAL"};
rocksdb::Status status;
std::unordered_map<std::string, std::string> str_map;
status = StringToMap(opt_str, &str_map);
if (!status.ok()) {
dout(5) << __func__ << " error '" << status.getState() <<
"' while parsing options '" << opt_str << "'" << dendl;
return -EINVAL;
}
for (auto it = str_map.begin(); it != str_map.end(); ++it) {
string this_opt = it->first + "=" + it->second;
rocksdb::Status status =
rocksdb::GetOptionsFromString(opt, this_opt, &opt);
int r = 0;
if (!status.ok()) {
if (interp != nullptr) {
r = interp(it->first, it->second, opt);
} else if (!need_interp_keys.count(it->first)) {
r = -1;
}
if (r < 0) {
derr << status.ToString() << dendl;
return -EINVAL;
}
}
lgeneric_dout(cct, 1) << " set rocksdb option " << it->first
<< " = " << it->second << dendl;
}
return 0;
}
int RocksDBStore::init(string _options_str)
{
options_str = _options_str;
rocksdb::Options opt;
//try parse options
if (options_str.length()) {
int r = ParseOptionsFromString(options_str, opt);
if (r != 0) {
return -EINVAL;
}
}
return 0;
}
int RocksDBStore::create_db_dir()
{
if (env) {
unique_ptr<rocksdb::Directory> dir;
env->NewDirectory(path, &dir);
} else {
if (!fs::exists(path)) {
std::error_code ec;
if (!fs::create_directory(path, ec)) {
derr << __func__ << " failed to create " << path
<< ": " << ec.message() << dendl;
return -ec.value();
}
fs::permissions(path,
fs::perms::owner_all |
fs::perms::group_read | fs::perms::group_exec |
fs::perms::others_read | fs::perms::others_exec);
}
}
return 0;
}
int RocksDBStore::install_cf_mergeop(
const string &key_prefix,
rocksdb::ColumnFamilyOptions *cf_opt)
{
ceph_assert(cf_opt != nullptr);
cf_opt->merge_operator.reset();
for (auto& i : merge_ops) {
if (i.first == key_prefix) {
cf_opt->merge_operator.reset(new MergeOperatorLinker(i.second));
}
}
return 0;
}
int RocksDBStore::create_and_open(ostream &out,
const std::string& cfs)
{
int r = create_db_dir();
if (r < 0)
return r;
return do_open(out, true, false, cfs);
}
std::shared_ptr<rocksdb::Cache> RocksDBStore::create_block_cache(
const std::string& cache_type, size_t cache_size, double cache_prio_high) {
std::shared_ptr<rocksdb::Cache> cache;
auto shard_bits = cct->_conf->rocksdb_cache_shard_bits;
if (cache_type == "binned_lru") {
cache = rocksdb_cache::NewBinnedLRUCache(cct, cache_size, shard_bits, false, cache_prio_high);
} else if (cache_type == "lru") {
cache = rocksdb::NewLRUCache(cache_size, shard_bits);
} else if (cache_type == "clock") {
cache = rocksdb::NewClockCache(cache_size, shard_bits);
if (!cache) {
derr << "rocksdb_cache_type '" << cache
<< "' chosen, but RocksDB not compiled with LibTBB. "
<< dendl;
}
} else {
derr << "unrecognized rocksdb_cache_type '" << cache_type << "'" << dendl;
}
return cache;
}
int RocksDBStore::load_rocksdb_options(bool create_if_missing, rocksdb::Options& opt)
{
rocksdb::Status status;
if (options_str.length()) {
int r = ParseOptionsFromString(options_str, opt);
if (r != 0) {
return -EINVAL;
}
}
if (cct->_conf->rocksdb_perf) {
dbstats = rocksdb::CreateDBStatistics();
opt.statistics = dbstats;
}
opt.create_if_missing = create_if_missing;
if (kv_options.count("separate_wal_dir")) {
opt.wal_dir = path + ".wal";
}
// Since ceph::for_each_substr doesn't return a value and
// std::stoull does throw, we may as well just catch everything here.
try {
if (kv_options.count("db_paths")) {
list<string> paths;
get_str_list(kv_options["db_paths"], "; \t", paths);
for (auto& p : paths) {
size_t pos = p.find(',');
if (pos == std::string::npos) {
derr << __func__ << " invalid db path item " << p << " in "
<< kv_options["db_paths"] << dendl;
return -EINVAL;
}
string path = p.substr(0, pos);
string size_str = p.substr(pos + 1);
uint64_t size = atoll(size_str.c_str());
if (!size) {
derr << __func__ << " invalid db path item " << p << " in "
<< kv_options["db_paths"] << dendl;
return -EINVAL;
}
opt.db_paths.push_back(rocksdb::DbPath(path, size));
dout(10) << __func__ << " db_path " << path << " size " << size << dendl;
}
}
} catch (const std::system_error& e) {
return -e.code().value();
}
if (cct->_conf->rocksdb_log_to_ceph_log) {
opt.info_log.reset(new CephRocksdbLogger(cct));
}
if (priv) {
dout(10) << __func__ << " using custom Env " << priv << dendl;
opt.env = static_cast<rocksdb::Env*>(priv);
} else {
env = opt.env;
}
opt.env->SetAllowNonOwnerAccess(false);
// caches
if (!set_cache_flag) {
cache_size = cct->_conf->rocksdb_cache_size;
}
uint64_t row_cache_size = cache_size * cct->_conf->rocksdb_cache_row_ratio;
uint64_t block_cache_size = cache_size - row_cache_size;
bbt_opts.block_cache = create_block_cache(cct->_conf->rocksdb_cache_type, block_cache_size);
if (!bbt_opts.block_cache) {
return -EINVAL;
}
bbt_opts.block_size = cct->_conf->rocksdb_block_size;
if (row_cache_size > 0)
opt.row_cache = rocksdb::NewLRUCache(row_cache_size,
cct->_conf->rocksdb_cache_shard_bits);
uint64_t bloom_bits = cct->_conf.get_val<uint64_t>("rocksdb_bloom_bits_per_key");
if (bloom_bits > 0) {
dout(10) << __func__ << " set bloom filter bits per key to "
<< bloom_bits << dendl;
bbt_opts.filter_policy.reset(rocksdb::NewBloomFilterPolicy(bloom_bits));
}
using std::placeholders::_1;
if (cct->_conf.with_val<std::string>("rocksdb_index_type",
std::bind(std::equal_to<std::string>(), _1,
"binary_search")))
bbt_opts.index_type = rocksdb::BlockBasedTableOptions::IndexType::kBinarySearch;
if (cct->_conf.with_val<std::string>("rocksdb_index_type",
std::bind(std::equal_to<std::string>(), _1,
"hash_search")))
bbt_opts.index_type = rocksdb::BlockBasedTableOptions::IndexType::kHashSearch;
if (cct->_conf.with_val<std::string>("rocksdb_index_type",
std::bind(std::equal_to<std::string>(), _1,
"two_level")))
bbt_opts.index_type = rocksdb::BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
if (!bbt_opts.no_block_cache) {
bbt_opts.cache_index_and_filter_blocks =
cct->_conf.get_val<bool>("rocksdb_cache_index_and_filter_blocks");
bbt_opts.cache_index_and_filter_blocks_with_high_priority =
cct->_conf.get_val<bool>("rocksdb_cache_index_and_filter_blocks_with_high_priority");
bbt_opts.pin_l0_filter_and_index_blocks_in_cache =
cct->_conf.get_val<bool>("rocksdb_pin_l0_filter_and_index_blocks_in_cache");
}
bbt_opts.partition_filters = cct->_conf.get_val<bool>("rocksdb_partition_filters");
if (cct->_conf.get_val<Option::size_t>("rocksdb_metadata_block_size") > 0)
bbt_opts.metadata_block_size = cct->_conf.get_val<Option::size_t>("rocksdb_metadata_block_size");
opt.table_factory.reset(rocksdb::NewBlockBasedTableFactory(bbt_opts));
dout(10) << __func__ << " block size " << cct->_conf->rocksdb_block_size
<< ", block_cache size " << byte_u_t(block_cache_size)
<< ", row_cache size " << byte_u_t(row_cache_size)
<< "; shards "
<< (1 << cct->_conf->rocksdb_cache_shard_bits)
<< ", type " << cct->_conf->rocksdb_cache_type
<< dendl;
opt.merge_operator.reset(new MergeOperatorRouter(*this));
comparator = opt.comparator;
return 0;
}
void RocksDBStore::add_column_family(const std::string& cf_name, uint32_t hash_l, uint32_t hash_h,
size_t shard_idx, rocksdb::ColumnFamilyHandle *handle) {
dout(10) << __func__ << " column_name=" << cf_name << " shard_idx=" << shard_idx <<
" hash_l=" << hash_l << " hash_h=" << hash_h << " handle=" << (void*) handle << dendl;
bool exists = cf_handles.count(cf_name) > 0;
auto& column = cf_handles[cf_name];
if (exists) {
ceph_assert(hash_l == column.hash_l);
ceph_assert(hash_h == column.hash_h);
} else {
ceph_assert(hash_l < hash_h);
column.hash_l = hash_l;
column.hash_h = hash_h;
}
if (column.handles.size() <= shard_idx)
column.handles.resize(shard_idx + 1);
column.handles[shard_idx] = handle;
cf_ids_to_prefix.emplace(handle->GetID(), cf_name);
}
bool RocksDBStore::is_column_family(const std::string& prefix) {
return cf_handles.count(prefix);
}
std::string_view RocksDBStore::get_key_hash_view(const prefix_shards& shards, const char* key, const size_t keylen) {
uint32_t hash_l = std::min<uint32_t>(shards.hash_l, keylen);
uint32_t hash_h = std::min<uint32_t>(shards.hash_h, keylen);
return { key + hash_l, hash_h - hash_l };
}
rocksdb::ColumnFamilyHandle *RocksDBStore::get_key_cf(const prefix_shards& shards, const char* key, const size_t keylen) {
auto sv = get_key_hash_view(shards, key, keylen);
uint32_t hash = ceph_str_hash_rjenkins(sv.data(), sv.size());
return shards.handles[hash % shards.handles.size()];
}
rocksdb::ColumnFamilyHandle *RocksDBStore::get_cf_handle(const std::string& prefix, const std::string& key) {
auto iter = cf_handles.find(prefix);
if (iter == cf_handles.end()) {
return nullptr;
} else {
if (iter->second.handles.size() == 1) {
return iter->second.handles[0];
} else {
return get_key_cf(iter->second, key.data(), key.size());
}
}
}
rocksdb::ColumnFamilyHandle *RocksDBStore::get_cf_handle(const std::string& prefix, const char* key, size_t keylen) {
auto iter = cf_handles.find(prefix);
if (iter == cf_handles.end()) {
return nullptr;
} else {
if (iter->second.handles.size() == 1) {
return iter->second.handles[0];
} else {
return get_key_cf(iter->second, key, keylen);
}
}
}
/**
* If the specified IteratorBounds arg has both an upper and a lower bound defined, and they have equal placement hash
* strings, we can be sure that the entire iteration range exists in a single CF. In that case, we return the relevant
* CF handle. In all other cases, we return a nullptr to indicate that the specified bounds cannot necessarily be mapped
* to a single CF.
*/
rocksdb::ColumnFamilyHandle *RocksDBStore::check_cf_handle_bounds(const cf_handles_iterator& iter, const IteratorBounds& bounds) {
if (!bounds.lower_bound || !bounds.upper_bound) {
return nullptr;
}
ceph_assert(iter != cf_handles.end());
ceph_assert(iter->second.handles.size() != 1);
if (iter->second.hash_l != 0) {
return nullptr;
}
auto lower_bound_hash_str = get_key_hash_view(iter->second, bounds.lower_bound->data(), bounds.lower_bound->size());
auto upper_bound_hash_str = get_key_hash_view(iter->second, bounds.upper_bound->data(), bounds.upper_bound->size());
if (lower_bound_hash_str == upper_bound_hash_str) {
auto key = *bounds.lower_bound;
return get_key_cf(iter->second, key.data(), key.size());
} else {
return nullptr;
}
}
/**
* Definition of sharding:
* space-separated list of: column_def [ '=' options ]
* column_def := column_name '(' shard_count ')'
* column_def := column_name '(' shard_count ',' hash_begin '-' ')'
* column_def := column_name '(' shard_count ',' hash_begin '-' hash_end ')'
* I=write_buffer_size=1048576 O(6) m(7,10-) prefix(4,0-10)=disable_auto_compactions=true,max_bytes_for_level_base=1048576
*/
bool RocksDBStore::parse_sharding_def(const std::string_view text_def_in,
std::vector<ColumnFamily>& sharding_def,
char const* *error_position,
std::string *error_msg)
{
std::string_view text_def = text_def_in;
char const* error_position_local = nullptr;
std::string error_msg_local;
if (error_position == nullptr) {
error_position = &error_position_local;
}
*error_position = nullptr;
if (error_msg == nullptr) {
error_msg = &error_msg_local;
error_msg->clear();
}
sharding_def.clear();
while (!text_def.empty()) {
std::string_view options;
std::string_view name;
size_t shard_cnt = 1;
uint32_t l_bound = 0;
uint32_t h_bound = std::numeric_limits<uint32_t>::max();
std::string_view column_def;
size_t spos = text_def.find(' ');
if (spos == std::string_view::npos) {
column_def = text_def;
text_def = std::string_view(text_def.end(), 0);
} else {
column_def = text_def.substr(0, spos);
text_def = text_def.substr(spos + 1);
}
size_t eqpos = column_def.find('=');
if (eqpos != std::string_view::npos) {
options = column_def.substr(eqpos + 1);
column_def = column_def.substr(0, eqpos);
}
size_t bpos = column_def.find('(');
if (bpos != std::string_view::npos) {
name = column_def.substr(0, bpos);
const char* nptr = &column_def[bpos + 1];
char* endptr;
shard_cnt = strtol(nptr, &endptr, 10);
if (nptr == endptr) {
*error_position = nptr;
*error_msg = "expecting integer";
break;
}
nptr = endptr;
if (*nptr == ',') {
nptr++;
l_bound = strtol(nptr, &endptr, 10);
if (nptr == endptr) {
*error_position = nptr;
*error_msg = "expecting integer";
break;
}
nptr = endptr;
if (*nptr != '-') {
*error_position = nptr;
*error_msg = "expecting '-'";
break;
}
nptr++;
h_bound = strtol(nptr, &endptr, 10);
if (nptr == endptr) {
h_bound = std::numeric_limits<uint32_t>::max();
}
nptr = endptr;
}
if (*nptr != ')') {
*error_position = nptr;
*error_msg = "expecting ')'";
break;
}
} else {
name = column_def;
}
sharding_def.emplace_back(std::string(name), shard_cnt, std::string(options), l_bound, h_bound);
}
return *error_position == nullptr;
}
void RocksDBStore::sharding_def_to_columns(const std::vector<ColumnFamily>& sharding_def,
std::vector<std::string>& columns)
{
columns.clear();
for (size_t i = 0; i < sharding_def.size(); i++) {
if (sharding_def[i].shard_cnt == 1) {
columns.push_back(sharding_def[i].name);
} else {
for (size_t j = 0; j < sharding_def[i].shard_cnt; j++) {
columns.push_back(sharding_def[i].name + "-" + std::to_string(j));
}
}
}
}
int RocksDBStore::create_shards(const rocksdb::Options& opt,
const std::vector<ColumnFamily>& sharding_def)
{
for (auto& p : sharding_def) {
// copy default CF settings, block cache, merge operators as
// the base for new CF
rocksdb::ColumnFamilyOptions cf_opt(opt);
rocksdb::Status status;
// apply options to column family
int r = update_column_family_options(p.name, p.options, &cf_opt);
if (r != 0) {
return r;
}
for (size_t idx = 0; idx < p.shard_cnt; idx++) {
std::string cf_name;
if (p.shard_cnt == 1)
cf_name = p.name;
else
cf_name = p.name + "-" + std::to_string(idx);
rocksdb::ColumnFamilyHandle *cf;
status = db->CreateColumnFamily(cf_opt, cf_name, &cf);
if (!status.ok()) {
derr << __func__ << " Failed to create rocksdb column family: "
<< cf_name << dendl;
return -EINVAL;
}
// store the new CF handle
add_column_family(p.name, p.hash_l, p.hash_h, idx, cf);
}
}
return 0;
}
int RocksDBStore::apply_sharding(const rocksdb::Options& opt,
const std::string& sharding_text)
{
// create and open column families
if (!sharding_text.empty()) {
bool b;
int r;
rocksdb::Status status;
std::vector<ColumnFamily> sharding_def;
char const* error_position;
std::string error_msg;
b = parse_sharding_def(sharding_text, sharding_def, &error_position, &error_msg);
if (!b) {
dout(1) << __func__ << " bad sharding: " << dendl;
dout(1) << __func__ << sharding_text << dendl;
dout(1) << __func__ << std::string(error_position - &sharding_text[0], ' ') << "^" << error_msg << dendl;
return -EINVAL;
}
r = create_shards(opt, sharding_def);
if (r != 0 ) {
derr << __func__ << " create_shards failed error=" << r << dendl;
return r;
}
opt.env->CreateDir(sharding_def_dir);
status = rocksdb::WriteStringToFile(opt.env, sharding_text,
sharding_def_file, true);
if (!status.ok()) {
derr << __func__ << " cannot write to " << sharding_def_file << dendl;
return -EIO;
}
} else {
opt.env->DeleteFile(sharding_def_file);
}
return 0;
}
// linking to rocksdb function defined in options_helper.cc
// it can parse nested params like "nested_opt={opt1=1;opt2=2}"
extern rocksdb::Status rocksdb::StringToMap(const std::string& opts_str,
std::unordered_map<std::string, std::string>* opts_map);
// Splits column family options from single string into name->value column_opts_map.
// The split is done using RocksDB parser that understands "{" and "}", so it
// properly extracts compound options.
// If non-RocksDB option "block_cache" is defined it is extracted to block_cache_opt.
int RocksDBStore::split_column_family_options(const std::string& options,
std::unordered_map<std::string, std::string>* opt_map,
std::string* block_cache_opt)
{
dout(20) << __func__ << " options=" << options << dendl;
rocksdb::Status status = rocksdb::StringToMap(options, opt_map);
if (!status.ok()) {
dout(5) << __func__ << " error '" << status.getState()
<< "' while parsing options '" << options << "'" << dendl;
return -EINVAL;
}
// if "block_cache" option exists, then move it to separate string
if (auto it = opt_map->find("block_cache"); it != opt_map->end()) {
*block_cache_opt = it->second;
opt_map->erase(it);
} else {
block_cache_opt->clear();
}
return 0;
}
// Updates column family options.
// Take options from more_options and apply them to cf_opt.
// Allowed options are exactly the same as allowed for column families in RocksDB.
// Ceph addition is "block_cache" option that is translated to block_cache and
// allows to specialize separate block cache for O column family.
//
// base_name - name of column without shard suffix: "-"+number
// options - additional options to apply
// cf_opt - column family options to update
int RocksDBStore::update_column_family_options(const std::string& base_name,
const std::string& more_options,
rocksdb::ColumnFamilyOptions* cf_opt)
{
std::unordered_map<std::string, std::string> options_map;
std::string block_cache_opt;
rocksdb::Status status;
int r = split_column_family_options(more_options, &options_map, &block_cache_opt);
if (r != 0) {
dout(5) << __func__ << " failed to parse options; column family=" << base_name
<< " options=" << more_options << dendl;
return r;
}
status = rocksdb::GetColumnFamilyOptionsFromMap(*cf_opt, options_map, cf_opt);
if (!status.ok()) {
dout(5) << __func__ << " invalid column family optionsp; column family="
<< base_name << " options=" << more_options << dendl;
dout(5) << __func__ << " RocksDB error='" << status.getState() << "'" << dendl;
return -EINVAL;
}
if (base_name != rocksdb::kDefaultColumnFamilyName) {
// default cf has its merge operator defined in load_rocksdb_options, should not override it
install_cf_mergeop(base_name, cf_opt);
}
if (!block_cache_opt.empty()) {
r = apply_block_cache_options(base_name, block_cache_opt, cf_opt);
if (r != 0) {
// apply_block_cache_options already does all necessary douts
return r;
}
}
// Set Compact on Deletion Factory
if (cct->_conf->rocksdb_cf_compact_on_deletion) {
size_t sliding_window = cct->_conf->rocksdb_cf_compact_on_deletion_sliding_window;
size_t trigger = cct->_conf->rocksdb_cf_compact_on_deletion_trigger;
cf_opt->table_properties_collector_factories.emplace_back(
rocksdb::NewCompactOnDeletionCollectorFactory(sliding_window, trigger));
}
return 0;
}
int RocksDBStore::apply_block_cache_options(const std::string& column_name,
const std::string& block_cache_opt,
rocksdb::ColumnFamilyOptions* cf_opt)
{
rocksdb::Status status;
std::unordered_map<std::string, std::string> cache_options_map;
status = rocksdb::StringToMap(block_cache_opt, &cache_options_map);
if (!status.ok()) {
dout(5) << __func__ << " invalid block cache options; column=" << column_name
<< " options=" << block_cache_opt << dendl;
dout(5) << __func__ << " RocksDB error='" << status.getState() << "'" << dendl;
return -EINVAL;
}
bool require_new_block_cache = false;
std::string cache_type = cct->_conf->rocksdb_cache_type;
if (const auto it = cache_options_map.find("type"); it != cache_options_map.end()) {
cache_type = it->second;
cache_options_map.erase(it);
require_new_block_cache = true;
}
size_t cache_size = cct->_conf->rocksdb_cache_size;
if (auto it = cache_options_map.find("size"); it != cache_options_map.end()) {
std::string error;
cache_size = strict_iecstrtoll(it->second.c_str(), &error);
if (!error.empty()) {
dout(10) << __func__ << " invalid size: '" << it->second << "'" << dendl;
return -EINVAL;
}
cache_options_map.erase(it);
require_new_block_cache = true;
}
double high_pri_pool_ratio = 0.0;
if (auto it = cache_options_map.find("high_ratio"); it != cache_options_map.end()) {
std::string error;
high_pri_pool_ratio = strict_strtod(it->second.c_str(), &error);
if (!error.empty()) {
dout(10) << __func__ << " invalid high_pri (float): '" << it->second << "'" << dendl;
return -EINVAL;
}
cache_options_map.erase(it);
require_new_block_cache = true;
}
rocksdb::BlockBasedTableOptions column_bbt_opts;
status = GetBlockBasedTableOptionsFromMap(bbt_opts, cache_options_map, &column_bbt_opts);
if (!status.ok()) {
dout(5) << __func__ << " invalid block cache options; column=" << column_name
<< " options=" << block_cache_opt << dendl;
dout(5) << __func__ << " RocksDB error='" << status.getState() << "'" << dendl;
return -EINVAL;
}
std::shared_ptr<rocksdb::Cache> block_cache;
if (column_bbt_opts.no_block_cache) {
// clear all settings except no_block_cache
// rocksdb does not like then
column_bbt_opts = rocksdb::BlockBasedTableOptions();
column_bbt_opts.no_block_cache = true;
} else {
if (require_new_block_cache) {
block_cache = create_block_cache(cache_type, cache_size, high_pri_pool_ratio);
if (!block_cache) {
dout(5) << __func__ << " failed to create block cache for params: " << block_cache_opt << dendl;
return -EINVAL;
}
} else {
block_cache = bbt_opts.block_cache;
}
}
column_bbt_opts.block_cache = block_cache;
cf_bbt_opts[column_name] = column_bbt_opts;
cf_opt->table_factory.reset(NewBlockBasedTableFactory(cf_bbt_opts[column_name]));
return 0;
}
int RocksDBStore::verify_sharding(const rocksdb::Options& opt,
std::vector<rocksdb::ColumnFamilyDescriptor>& existing_cfs,
std::vector<std::pair<size_t, RocksDBStore::ColumnFamily> >& existing_cfs_shard,
std::vector<rocksdb::ColumnFamilyDescriptor>& missing_cfs,
std::vector<std::pair<size_t, RocksDBStore::ColumnFamily> >& missing_cfs_shard)
{
rocksdb::Status status;
std::string stored_sharding_text;
status = opt.env->FileExists(sharding_def_file);
if (status.ok()) {
status = rocksdb::ReadFileToString(opt.env,
sharding_def_file,
&stored_sharding_text);
if(!status.ok()) {
derr << __func__ << " cannot read from " << sharding_def_file << dendl;
return -EIO;
}
dout(20) << __func__ << " sharding=" << stored_sharding_text << dendl;
} else {
dout(30) << __func__ << " no sharding" << dendl;
//no "sharding_def" present
}
//check if sharding_def matches stored_sharding_def
std::vector<ColumnFamily> stored_sharding_def;
parse_sharding_def(stored_sharding_text, stored_sharding_def);
std::sort(stored_sharding_def.begin(), stored_sharding_def.end(),
[](ColumnFamily& a, ColumnFamily& b) { return a.name < b.name; } );
std::vector<string> rocksdb_cfs;
status = rocksdb::DB::ListColumnFamilies(rocksdb::DBOptions(opt),
path, &rocksdb_cfs);
if (!status.ok()) {
derr << __func__ << " unable to list column families: " << status.ToString() << dendl;
return -EIO;
}
dout(5) << __func__ << " column families from rocksdb: " << rocksdb_cfs << dendl;
auto emplace_cf = [&] (const RocksDBStore::ColumnFamily& column,
int32_t shard_id,
const std::string& shard_name,
const rocksdb::ColumnFamilyOptions& opt) {
if (std::find(rocksdb_cfs.begin(), rocksdb_cfs.end(), shard_name) != rocksdb_cfs.end()) {
existing_cfs.emplace_back(shard_name, opt);
existing_cfs_shard.emplace_back(shard_id, column);
} else {
missing_cfs.emplace_back(shard_name, opt);
missing_cfs_shard.emplace_back(shard_id, column);
}
};
for (auto& column : stored_sharding_def) {
rocksdb::ColumnFamilyOptions cf_opt(opt);
int r = update_column_family_options(column.name, column.options, &cf_opt);
if (r != 0) {
return r;
}
if (column.shard_cnt == 1) {
emplace_cf(column, 0, column.name, cf_opt);
} else {
for (size_t i = 0; i < column.shard_cnt; i++) {
std::string cf_name = column.name + "-" + std::to_string(i);
emplace_cf(column, i, cf_name, cf_opt);
}
}
}
existing_cfs.emplace_back("default", opt);
if (existing_cfs.size() != rocksdb_cfs.size()) {
std::vector<std::string> columns_from_stored;
sharding_def_to_columns(stored_sharding_def, columns_from_stored);
derr << __func__ << " extra columns in rocksdb. rocksdb columns = " << rocksdb_cfs
<< " target columns = " << columns_from_stored << dendl;
return -EIO;
}
return 0;
}
std::ostream& operator<<(std::ostream& out, const RocksDBStore::ColumnFamily& cf)
{
out << "(";
out << cf.name;
out << ",";
out << cf.shard_cnt;
out << ",";
out << cf.hash_l;
out << "-";
if (cf.hash_h != std::numeric_limits<uint32_t>::max()) {
out << cf.hash_h;
}
out << ",";
out << cf.options;
out << ")";
return out;
}
int RocksDBStore::do_open(ostream &out,
bool create_if_missing,
bool open_readonly,
const std::string& sharding_text)
{
ceph_assert(!(create_if_missing && open_readonly));
rocksdb::Options opt;
int r = load_rocksdb_options(create_if_missing, opt);
if (r) {
dout(1) << __func__ << " load rocksdb options failed" << dendl;
return r;
}
rocksdb::Status status;
if (create_if_missing) {
status = rocksdb::DB::Open(opt, path, &db);
if (!status.ok()) {
derr << status.ToString() << dendl;
return -EINVAL;
}
r = apply_sharding(opt, sharding_text);
if (r < 0) {
return r;
}
default_cf = db->DefaultColumnFamily();
} else {
std::vector<rocksdb::ColumnFamilyDescriptor> existing_cfs;
std::vector<std::pair<size_t, RocksDBStore::ColumnFamily> > existing_cfs_shard;
std::vector<rocksdb::ColumnFamilyDescriptor> missing_cfs;
std::vector<std::pair<size_t, RocksDBStore::ColumnFamily> > missing_cfs_shard;
r = verify_sharding(opt,
existing_cfs, existing_cfs_shard,
missing_cfs, missing_cfs_shard);
if (r < 0) {
return r;
}
std::string sharding_recreate_text;
status = rocksdb::ReadFileToString(opt.env,
sharding_recreate,
&sharding_recreate_text);
bool recreate_mode = status.ok() && sharding_recreate_text == "1";
ceph_assert(!recreate_mode || !open_readonly);
if (recreate_mode == false && missing_cfs.size() != 0) {
// We do not accept when there are missing column families, except case that we are during resharding.
// We can get into this case if resharding was interrupted. It gives a chance to continue.
// Opening DB is only allowed in read-only mode.
if (open_readonly == false &&
std::find_if(missing_cfs.begin(), missing_cfs.end(),
[](const rocksdb::ColumnFamilyDescriptor& c) { return c.name == resharding_column_lock; }
) != missing_cfs.end()) {
derr << __func__ << " missing column families: " << missing_cfs_shard << dendl;
return -EIO;
}
}
if (existing_cfs.empty()) {
// no column families
if (open_readonly) {
status = rocksdb::DB::OpenForReadOnly(opt, path, &db);
} else {
status = rocksdb::DB::Open(opt, path, &db);
}
if (!status.ok()) {
derr << status.ToString() << dendl;
return -EINVAL;
}
default_cf = db->DefaultColumnFamily();
} else {
std::vector<rocksdb::ColumnFamilyHandle*> handles;
if (open_readonly) {
status = rocksdb::DB::OpenForReadOnly(rocksdb::DBOptions(opt),
path, existing_cfs,
&handles, &db);
} else {
status = rocksdb::DB::Open(rocksdb::DBOptions(opt),
path, existing_cfs, &handles, &db);
}
if (!status.ok()) {
derr << status.ToString() << dendl;
return -EINVAL;
}
ceph_assert(existing_cfs.size() == existing_cfs_shard.size() + 1);
ceph_assert(handles.size() == existing_cfs.size());
dout(10) << __func__ << " existing_cfs=" << existing_cfs.size() << dendl;
for (size_t i = 0; i < existing_cfs_shard.size(); i++) {
add_column_family(existing_cfs_shard[i].second.name,
existing_cfs_shard[i].second.hash_l,
existing_cfs_shard[i].second.hash_h,
existing_cfs_shard[i].first,
handles[i]);
}
default_cf = handles[handles.size() - 1];
must_close_default_cf = true;
if (missing_cfs.size() > 0 &&
std::find_if(missing_cfs.begin(), missing_cfs.end(),
[](const rocksdb::ColumnFamilyDescriptor& c) { return c.name == resharding_column_lock; }
) == missing_cfs.end())
{
dout(10) << __func__ << " missing_cfs=" << missing_cfs.size() << dendl;
ceph_assert(recreate_mode);
ceph_assert(missing_cfs.size() == missing_cfs_shard.size());
for (size_t i = 0; i < missing_cfs.size(); i++) {
rocksdb::ColumnFamilyHandle *cf;
status = db->CreateColumnFamily(missing_cfs[i].options, missing_cfs[i].name, &cf);
if (!status.ok()) {
derr << __func__ << " Failed to create rocksdb column family: "
<< missing_cfs[i].name << dendl;
return -EINVAL;
}
add_column_family(missing_cfs_shard[i].second.name,
missing_cfs_shard[i].second.hash_l,
missing_cfs_shard[i].second.hash_h,
missing_cfs_shard[i].first,
cf);
}
opt.env->DeleteFile(sharding_recreate);
}
}
}
ceph_assert(default_cf != nullptr);
PerfCountersBuilder plb(cct, "rocksdb", l_rocksdb_first, l_rocksdb_last);
plb.add_time_avg(l_rocksdb_get_latency, "get_latency", "Get latency");
plb.add_time_avg(l_rocksdb_submit_latency, "submit_latency", "Submit Latency");
plb.add_time_avg(l_rocksdb_submit_sync_latency, "submit_sync_latency", "Submit Sync Latency");
plb.add_u64_counter(l_rocksdb_compact, "compact", "Compactions");
plb.add_u64_counter(l_rocksdb_compact_range, "compact_range", "Compactions by range");
plb.add_u64_counter(l_rocksdb_compact_queue_merge, "compact_queue_merge", "Mergings of ranges in compaction queue");
plb.add_u64(l_rocksdb_compact_queue_len, "compact_queue_len", "Length of compaction queue");
plb.add_time_avg(l_rocksdb_write_wal_time, "rocksdb_write_wal_time", "Rocksdb write wal time");
plb.add_time_avg(l_rocksdb_write_memtable_time, "rocksdb_write_memtable_time", "Rocksdb write memtable time");
plb.add_time_avg(l_rocksdb_write_delay_time, "rocksdb_write_delay_time", "Rocksdb write delay time");
plb.add_time_avg(l_rocksdb_write_pre_and_post_process_time,
"rocksdb_write_pre_and_post_time", "total time spent on writing a record, excluding write process");
logger = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
if (compact_on_mount) {
derr << "Compacting rocksdb store..." << dendl;
compact();
derr << "Finished compacting rocksdb store" << dendl;
}
return 0;
}
int RocksDBStore::_test_init(const string& dir)
{
rocksdb::Options options;
options.create_if_missing = true;
rocksdb::DB *db;
rocksdb::Status status = rocksdb::DB::Open(options, dir, &db);
delete db;
db = nullptr;
return status.ok() ? 0 : -EIO;
}
RocksDBStore::~RocksDBStore()
{
close();
if (priv) {
delete static_cast<rocksdb::Env*>(priv);
}
}
void RocksDBStore::close()
{
// stop compaction thread
compact_queue_lock.lock();
if (compact_thread.is_started()) {
dout(1) << __func__ << " waiting for compaction thread to stop" << dendl;
compact_queue_stop = true;
compact_queue_cond.notify_all();
compact_queue_lock.unlock();
compact_thread.join();
dout(1) << __func__ << " compaction thread to stopped" << dendl;
} else {
compact_queue_lock.unlock();
}
if (logger) {
cct->get_perfcounters_collection()->remove(logger);
delete logger;
logger = nullptr;
}
// Ensure db is destroyed before dependent db_cache and filterpolicy
for (auto& p : cf_handles) {
for (size_t i = 0; i < p.second.handles.size(); i++) {
db->DestroyColumnFamilyHandle(p.second.handles[i]);
}
}
cf_handles.clear();
if (must_close_default_cf) {
db->DestroyColumnFamilyHandle(default_cf);
must_close_default_cf = false;
}
default_cf = nullptr;
delete db;
db = nullptr;
}
int RocksDBStore::repair(std::ostream &out)
{
rocksdb::Status status;
rocksdb::Options opt;
int r = load_rocksdb_options(false, opt);
if (r) {
dout(1) << __func__ << " load rocksdb options failed" << dendl;
out << "load rocksdb options failed" << std::endl;
return r;
}
//need to save sharding definition, repairDB will delete files it does not know
std::string stored_sharding_text;
status = opt.env->FileExists(sharding_def_file);
if (status.ok()) {
status = rocksdb::ReadFileToString(opt.env,
sharding_def_file,
&stored_sharding_text);
if (!status.ok()) {
stored_sharding_text.clear();
}
}
dout(10) << __func__ << " stored_sharding: " << stored_sharding_text << dendl;
status = rocksdb::RepairDB(path, opt);
bool repaired = status.ok();
if (!stored_sharding_text.empty()) {
//recreate markers even if repair failed
opt.env->CreateDir(sharding_def_dir);
status = rocksdb::WriteStringToFile(opt.env, stored_sharding_text,
sharding_def_file, true);
if (!status.ok()) {
derr << __func__ << " cannot write to " << sharding_def_file << dendl;
return -1;
}
status = rocksdb::WriteStringToFile(opt.env, "1",
sharding_recreate, true);
if (!status.ok()) {
derr << __func__ << " cannot write to " << sharding_recreate << dendl;
return -1;
}
// fiinalize sharding recreate
if (do_open(out, false, false)) {
derr << __func__ << " cannot finalize repair" << dendl;
return -1;
}
close();
}
if (repaired && status.ok()) {
return 0;
} else {
out << "repair rocksdb failed : " << status.ToString() << std::endl;
return -1;
}
}
void RocksDBStore::split_stats(const std::string &s, char delim, std::vector<std::string> &elems) {
std::stringstream ss;
ss.str(s);
std::string item;
while (std::getline(ss, item, delim)) {
elems.push_back(item);
}
}
bool RocksDBStore::get_property(
const std::string &property,
uint64_t *out)
{
return db->GetIntProperty(property, out);
}
int64_t RocksDBStore::estimate_prefix_size(const string& prefix,
const string& key_prefix)
{
uint64_t size = 0;
auto p_iter = cf_handles.find(prefix);
if (p_iter != cf_handles.end()) {
for (auto cf : p_iter->second.handles) {
uint64_t s = 0;
string start = key_prefix + string(1, '\x00');
string limit = key_prefix + string("\xff\xff\xff\xff");
rocksdb::Range r(start, limit);
db->GetApproximateSizes(cf, &r, 1, &s);
size += s;
}
} else {
string start = combine_strings(prefix , key_prefix);
string limit = combine_strings(prefix , key_prefix + "\xff\xff\xff\xff");
rocksdb::Range r(start, limit);
db->GetApproximateSizes(default_cf, &r, 1, &size);
}
return size;
}
void RocksDBStore::get_statistics(Formatter *f)
{
if (!cct->_conf->rocksdb_perf) {
dout(20) << __func__ << " RocksDB perf is disabled, can't probe for stats"
<< dendl;
return;
}
if (cct->_conf->rocksdb_collect_compaction_stats) {
std::string stat_str;
bool status = db->GetProperty("rocksdb.stats", &stat_str);
if (status) {
f->open_object_section("rocksdb_statistics");
f->dump_string("rocksdb_compaction_statistics", "");
vector<string> stats;
split_stats(stat_str, '\n', stats);
for (auto st :stats) {
f->dump_string("", st);
}
f->close_section();
}
}
if (cct->_conf->rocksdb_collect_extended_stats) {
if (dbstats) {
f->open_object_section("rocksdb_extended_statistics");
string stat_str = dbstats->ToString();
vector<string> stats;
split_stats(stat_str, '\n', stats);
f->dump_string("rocksdb_extended_statistics", "");
for (auto st :stats) {
f->dump_string(".", st);
}
f->close_section();
}
f->open_object_section("rocksdbstore_perf_counters");
logger->dump_formatted(f, false, false);
f->close_section();
}
if (cct->_conf->rocksdb_collect_memory_stats) {
f->open_object_section("rocksdb_memtable_statistics");
std::string str;
if (!bbt_opts.no_block_cache) {
str.append(stringify(bbt_opts.block_cache->GetUsage()));
f->dump_string("block_cache_usage", str.data());
str.clear();
str.append(stringify(bbt_opts.block_cache->GetPinnedUsage()));
f->dump_string("block_cache_pinned_blocks_usage", str);
str.clear();
}
db->GetProperty("rocksdb.cur-size-all-mem-tables", &str);
f->dump_string("rocksdb_memtable_usage", str);
str.clear();
db->GetProperty("rocksdb.estimate-table-readers-mem", &str);
f->dump_string("rocksdb_index_filter_blocks_usage", str);
f->close_section();
}
}
struct RocksDBStore::RocksWBHandler: public rocksdb::WriteBatch::Handler {
RocksWBHandler(const RocksDBStore& db) : db(db) {}
const RocksDBStore& db;
std::stringstream seen;
int num_seen = 0;
void dump(const char* op_name,
uint32_t column_family_id,
const rocksdb::Slice& key_in,
const rocksdb::Slice* value = nullptr) {
string prefix;
string key;
ssize_t size = value ? value->size() : -1;
seen << std::endl << op_name << "(";
if (column_family_id == 0) {
db.split_key(key_in, &prefix, &key);
} else {
auto it = db.cf_ids_to_prefix.find(column_family_id);
ceph_assert(it != db.cf_ids_to_prefix.end());
prefix = it->second;
key = key_in.ToString();
}
seen << " prefix = " << prefix;
seen << " key = " << pretty_binary_string(key);
if (size != -1)
seen << " value size = " << std::to_string(size);
seen << ")";
num_seen++;
}
void Put(const rocksdb::Slice& key,
const rocksdb::Slice& value) override {
dump("Put", 0, key, &value);
}
rocksdb::Status PutCF(uint32_t column_family_id, const rocksdb::Slice& key,
const rocksdb::Slice& value) override {
dump("PutCF", column_family_id, key, &value);
return rocksdb::Status::OK();
}
void SingleDelete(const rocksdb::Slice& key) override {
dump("SingleDelete", 0, key);
}
rocksdb::Status SingleDeleteCF(uint32_t column_family_id, const rocksdb::Slice& key) override {
dump("SingleDeleteCF", column_family_id, key);
return rocksdb::Status::OK();
}
void Delete(const rocksdb::Slice& key) override {
dump("Delete", 0, key);
}
rocksdb::Status DeleteCF(uint32_t column_family_id, const rocksdb::Slice& key) override {
dump("DeleteCF", column_family_id, key);
return rocksdb::Status::OK();
}
void Merge(const rocksdb::Slice& key,
const rocksdb::Slice& value) override {
dump("Merge", 0, key, &value);
}
rocksdb::Status MergeCF(uint32_t column_family_id, const rocksdb::Slice& key,
const rocksdb::Slice& value) override {
dump("MergeCF", column_family_id, key, &value);
return rocksdb::Status::OK();
}
bool Continue() override { return num_seen < 50; }
};
int RocksDBStore::submit_common(rocksdb::WriteOptions& woptions, KeyValueDB::Transaction t)
{
// enable rocksdb breakdown
// considering performance overhead, default is disabled
if (cct->_conf->rocksdb_perf) {
rocksdb::SetPerfLevel(rocksdb::PerfLevel::kEnableTimeExceptForMutex);
rocksdb::get_perf_context()->Reset();
}
RocksDBTransactionImpl * _t =
static_cast<RocksDBTransactionImpl *>(t.get());
woptions.disableWAL = disableWAL;
lgeneric_subdout(cct, rocksdb, 30) << __func__;
RocksWBHandler bat_txc(*this);
_t->bat.Iterate(&bat_txc);
*_dout << " Rocksdb transaction: " << bat_txc.seen.str() << dendl;
rocksdb::Status s = db->Write(woptions, &_t->bat);
if (!s.ok()) {
RocksWBHandler rocks_txc(*this);
_t->bat.Iterate(&rocks_txc);
derr << __func__ << " error: " << s.ToString() << " code = " << s.code()
<< " Rocksdb transaction: " << rocks_txc.seen.str() << dendl;
}
if (cct->_conf->rocksdb_perf) {
utime_t write_memtable_time;
utime_t write_delay_time;
utime_t write_wal_time;
utime_t write_pre_and_post_process_time;
write_wal_time.set_from_double(
static_cast<double>(rocksdb::get_perf_context()->write_wal_time)/1000000000);
write_memtable_time.set_from_double(
static_cast<double>(rocksdb::get_perf_context()->write_memtable_time)/1000000000);
write_delay_time.set_from_double(
static_cast<double>(rocksdb::get_perf_context()->write_delay_time)/1000000000);
write_pre_and_post_process_time.set_from_double(
static_cast<double>(rocksdb::get_perf_context()->write_pre_and_post_process_time)/1000000000);
logger->tinc(l_rocksdb_write_memtable_time, write_memtable_time);
logger->tinc(l_rocksdb_write_delay_time, write_delay_time);
logger->tinc(l_rocksdb_write_wal_time, write_wal_time);
logger->tinc(l_rocksdb_write_pre_and_post_process_time, write_pre_and_post_process_time);
}
return s.ok() ? 0 : -1;
}
int RocksDBStore::submit_transaction(KeyValueDB::Transaction t)
{
utime_t start = ceph_clock_now();
rocksdb::WriteOptions woptions;
woptions.sync = false;
int result = submit_common(woptions, t);
utime_t lat = ceph_clock_now() - start;
logger->tinc(l_rocksdb_submit_latency, lat);
return result;
}
int RocksDBStore::submit_transaction_sync(KeyValueDB::Transaction t)
{
utime_t start = ceph_clock_now();
rocksdb::WriteOptions woptions;
// if disableWAL, sync can't set
woptions.sync = !disableWAL;
int result = submit_common(woptions, t);
utime_t lat = ceph_clock_now() - start;
logger->tinc(l_rocksdb_submit_sync_latency, lat);
return result;
}
RocksDBStore::RocksDBTransactionImpl::RocksDBTransactionImpl(RocksDBStore *_db)
{
db = _db;
}
void RocksDBStore::RocksDBTransactionImpl::put_bat(
rocksdb::WriteBatch& bat,
rocksdb::ColumnFamilyHandle *cf,
const string &key,
const bufferlist &to_set_bl)
{
// bufferlist::c_str() is non-constant, so we can't call c_str()
if (to_set_bl.is_contiguous() && to_set_bl.length() > 0) {
bat.Put(cf,
rocksdb::Slice(key),
rocksdb::Slice(to_set_bl.buffers().front().c_str(),
to_set_bl.length()));
} else {
rocksdb::Slice key_slice(key);
vector<rocksdb::Slice> value_slices(to_set_bl.get_num_buffers());
bat.Put(cf,
rocksdb::SliceParts(&key_slice, 1),
prepare_sliceparts(to_set_bl, &value_slices));
}
}
void RocksDBStore::RocksDBTransactionImpl::set(
const string &prefix,
const string &k,
const bufferlist &to_set_bl)
{
auto cf = db->get_cf_handle(prefix, k);
if (cf) {
put_bat(bat, cf, k, to_set_bl);
} else {
string key = combine_strings(prefix, k);
put_bat(bat, db->default_cf, key, to_set_bl);
}
}
void RocksDBStore::RocksDBTransactionImpl::set(
const string &prefix,
const char *k, size_t keylen,
const bufferlist &to_set_bl)
{
auto cf = db->get_cf_handle(prefix, k, keylen);
if (cf) {
string key(k, keylen); // fixme?
put_bat(bat, cf, key, to_set_bl);
} else {
string key;
combine_strings(prefix, k, keylen, &key);
put_bat(bat, cf, key, to_set_bl);
}
}
void RocksDBStore::RocksDBTransactionImpl::rmkey(const string &prefix,
const string &k)
{
auto cf = db->get_cf_handle(prefix, k);
if (cf) {
bat.Delete(cf, rocksdb::Slice(k));
} else {
bat.Delete(db->default_cf, combine_strings(prefix, k));
}
}
void RocksDBStore::RocksDBTransactionImpl::rmkey(const string &prefix,
const char *k,
size_t keylen)
{
auto cf = db->get_cf_handle(prefix, k, keylen);
if (cf) {
bat.Delete(cf, rocksdb::Slice(k, keylen));
} else {
string key;
combine_strings(prefix, k, keylen, &key);
bat.Delete(db->default_cf, rocksdb::Slice(key));
}
}
void RocksDBStore::RocksDBTransactionImpl::rm_single_key(const string &prefix,
const string &k)
{
auto cf = db->get_cf_handle(prefix, k);
if (cf) {
bat.SingleDelete(cf, k);
} else {
bat.SingleDelete(db->default_cf, combine_strings(prefix, k));
}
}
void RocksDBStore::RocksDBTransactionImpl::rmkeys_by_prefix(const string &prefix)
{
auto p_iter = db->cf_handles.find(prefix);
if (p_iter == db->cf_handles.end()) {
uint64_t cnt = db->get_delete_range_threshold();
bat.SetSavePoint();
auto it = db->get_iterator(prefix);
for (it->seek_to_first(); it->valid() && (--cnt) != 0; it->next()) {
bat.Delete(db->default_cf, combine_strings(prefix, it->key()));
}
if (cnt == 0) {
bat.RollbackToSavePoint();
string endprefix = prefix;
endprefix.push_back('\x01');
bat.DeleteRange(db->default_cf,
combine_strings(prefix, string()),
combine_strings(endprefix, string()));
} else {
bat.PopSavePoint();
}
} else {
ceph_assert(p_iter->second.handles.size() >= 1);
for (auto cf : p_iter->second.handles) {
uint64_t cnt = db->get_delete_range_threshold();
bat.SetSavePoint();
auto it = db->new_shard_iterator(cf);
for (it->seek_to_first(); it->valid() && (--cnt) != 0; it->next()) {
bat.Delete(cf, it->key());
}
if (cnt == 0) {
bat.RollbackToSavePoint();
string endprefix = "\xff\xff\xff\xff"; // FIXME: this is cheating...
bat.DeleteRange(cf, string(), endprefix);
} else {
bat.PopSavePoint();
}
}
}
}
void RocksDBStore::RocksDBTransactionImpl::rm_range_keys(const string &prefix,
const string &start,
const string &end)
{
ldout(db->cct, 10) << __func__
<< " enter prefix=" << prefix
<< " start=" << pretty_binary_string(start)
<< " end=" << pretty_binary_string(end) << dendl;
auto p_iter = db->cf_handles.find(prefix);
uint64_t cnt = db->get_delete_range_threshold();
if (p_iter == db->cf_handles.end()) {
uint64_t cnt0 = cnt;
bat.SetSavePoint();
auto it = db->get_iterator(prefix);
for (it->lower_bound(start);
it->valid() && db->comparator->Compare(it->key(), end) < 0 && (--cnt) != 0;
it->next()) {
bat.Delete(db->default_cf, combine_strings(prefix, it->key()));
}
ldout(db->cct, 15) << __func__
<< " count = " << cnt0 - cnt
<< dendl;
if (cnt == 0) {
ldout(db->cct, 10) << __func__ << " p_iter == end(), resorting to DeleteRange"
<< dendl;
bat.RollbackToSavePoint();
bat.DeleteRange(db->default_cf,
rocksdb::Slice(combine_strings(prefix, start)),
rocksdb::Slice(combine_strings(prefix, end)));
} else {
bat.PopSavePoint();
}
} else if (cnt == 0) {
ceph_assert(p_iter->second.handles.size() >= 1);
for (auto cf : p_iter->second.handles) {
ldout(db->cct, 10) << __func__ << " p_iter != end(), resorting to DeleteRange"
<< dendl;
bat.DeleteRange(cf, rocksdb::Slice(start), rocksdb::Slice(end));
}
} else {
auto bounds = KeyValueDB::IteratorBounds();
bounds.lower_bound = start;
bounds.upper_bound = end;
ceph_assert(p_iter->second.handles.size() >= 1);
for (auto cf : p_iter->second.handles) {
cnt = db->get_delete_range_threshold();
uint64_t cnt0 = cnt;
bat.SetSavePoint();
auto it = db->new_shard_iterator(cf, prefix, bounds);
for (it->lower_bound(start);
it->valid() && (--cnt) != 0;
it->next()) {
bat.Delete(cf, it->key());
}
ldout(db->cct, 10) << __func__
<< " count = " << cnt0 - cnt
<< dendl;
if (cnt == 0) {
ldout(db->cct, 10) << __func__ << " p_iter != end(), resorting to DeleteRange"
<< dendl;
bat.RollbackToSavePoint();
bat.DeleteRange(cf, rocksdb::Slice(start), rocksdb::Slice(end));
} else {
bat.PopSavePoint();
}
}
}
ldout(db->cct, 10) << __func__ << " end" << dendl;
}
void RocksDBStore::RocksDBTransactionImpl::merge(
const string &prefix,
const string &k,
const bufferlist &to_set_bl)
{
auto cf = db->get_cf_handle(prefix, k);
if (cf) {
// bufferlist::c_str() is non-constant, so we can't call c_str()
if (to_set_bl.is_contiguous() && to_set_bl.length() > 0) {
bat.Merge(
cf,
rocksdb::Slice(k),
rocksdb::Slice(to_set_bl.buffers().front().c_str(), to_set_bl.length()));
} else {
// make a copy
rocksdb::Slice key_slice(k);
vector<rocksdb::Slice> value_slices(to_set_bl.get_num_buffers());
bat.Merge(cf, rocksdb::SliceParts(&key_slice, 1),
prepare_sliceparts(to_set_bl, &value_slices));
}
} else {
string key = combine_strings(prefix, k);
// bufferlist::c_str() is non-constant, so we can't call c_str()
if (to_set_bl.is_contiguous() && to_set_bl.length() > 0) {
bat.Merge(
db->default_cf,
rocksdb::Slice(key),
rocksdb::Slice(to_set_bl.buffers().front().c_str(), to_set_bl.length()));
} else {
// make a copy
rocksdb::Slice key_slice(key);
vector<rocksdb::Slice> value_slices(to_set_bl.get_num_buffers());
bat.Merge(
db->default_cf,
rocksdb::SliceParts(&key_slice, 1),
prepare_sliceparts(to_set_bl, &value_slices));
}
}
}
int RocksDBStore::get(
const string &prefix,
const std::set<string> &keys,
std::map<string, bufferlist> *out)
{
rocksdb::PinnableSlice value;
utime_t start = ceph_clock_now();
if (cf_handles.count(prefix) > 0) {
for (auto& key : keys) {
auto cf_handle = get_cf_handle(prefix, key);
auto status = db->Get(rocksdb::ReadOptions(),
cf_handle,
rocksdb::Slice(key),
&value);
if (status.ok()) {
(*out)[key].append(value.data(), value.size());
} else if (status.IsIOError()) {
ceph_abort_msg(status.getState());
}
value.Reset();
}
} else {
for (auto& key : keys) {
string k = combine_strings(prefix, key);
auto status = db->Get(rocksdb::ReadOptions(),
default_cf,
rocksdb::Slice(k),
&value);
if (status.ok()) {
(*out)[key].append(value.data(), value.size());
} else if (status.IsIOError()) {
ceph_abort_msg(status.getState());
}
value.Reset();
}
}
utime_t lat = ceph_clock_now() - start;
logger->tinc(l_rocksdb_get_latency, lat);
return 0;
}
int RocksDBStore::get(
const string &prefix,
const string &key,
bufferlist *out)
{
ceph_assert(out && (out->length() == 0));
utime_t start = ceph_clock_now();
int r = 0;
rocksdb::PinnableSlice value;
rocksdb::Status s;
auto cf = get_cf_handle(prefix, key);
if (cf) {
s = db->Get(rocksdb::ReadOptions(),
cf,
rocksdb::Slice(key),
&value);
} else {
string k = combine_strings(prefix, key);
s = db->Get(rocksdb::ReadOptions(),
default_cf,
rocksdb::Slice(k),
&value);
}
if (s.ok()) {
out->append(value.data(), value.size());
} else if (s.IsNotFound()) {
r = -ENOENT;
} else {
ceph_abort_msg(s.getState());
}
utime_t lat = ceph_clock_now() - start;
logger->tinc(l_rocksdb_get_latency, lat);
return r;
}
int RocksDBStore::get(
const string& prefix,
const char *key,
size_t keylen,
bufferlist *out)
{
ceph_assert(out && (out->length() == 0));
utime_t start = ceph_clock_now();
int r = 0;
rocksdb::PinnableSlice value;
rocksdb::Status s;
auto cf = get_cf_handle(prefix, key, keylen);
if (cf) {
s = db->Get(rocksdb::ReadOptions(),
cf,
rocksdb::Slice(key, keylen),
&value);
} else {
string k;
combine_strings(prefix, key, keylen, &k);
s = db->Get(rocksdb::ReadOptions(),
default_cf,
rocksdb::Slice(k),
&value);
}
if (s.ok()) {
out->append(value.data(), value.size());
} else if (s.IsNotFound()) {
r = -ENOENT;
} else {
ceph_abort_msg(s.getState());
}
utime_t lat = ceph_clock_now() - start;
logger->tinc(l_rocksdb_get_latency, lat);
return r;
}
int RocksDBStore::split_key(rocksdb::Slice in, string *prefix, string *key)
{
size_t prefix_len = 0;
// Find separator inside Slice
char* separator = (char*) memchr(in.data(), 0, in.size());
if (separator == NULL)
return -EINVAL;
prefix_len = size_t(separator - in.data());
if (prefix_len >= in.size())
return -EINVAL;
// Fetch prefix and/or key directly from Slice
if (prefix)
*prefix = string(in.data(), prefix_len);
if (key)
*key = string(separator+1, in.size()-prefix_len-1);
return 0;
}
void RocksDBStore::compact()
{
logger->inc(l_rocksdb_compact);
rocksdb::CompactRangeOptions options;
db->CompactRange(options, default_cf, nullptr, nullptr);
for (auto cf : cf_handles) {
for (auto shard_cf : cf.second.handles) {
db->CompactRange(
options,
shard_cf,
nullptr, nullptr);
}
}
}
void RocksDBStore::compact_thread_entry()
{
std::unique_lock l{compact_queue_lock};
dout(10) << __func__ << " enter" << dendl;
while (!compact_queue_stop) {
if (!compact_queue.empty()) {
auto range = compact_queue.front();
compact_queue.pop_front();
logger->set(l_rocksdb_compact_queue_len, compact_queue.size());
l.unlock();
logger->inc(l_rocksdb_compact_range);
if (range.first.empty() && range.second.empty()) {
compact();
} else {
compact_range(range.first, range.second);
}
l.lock();
continue;
}
dout(10) << __func__ << " waiting" << dendl;
compact_queue_cond.wait(l);
}
dout(10) << __func__ << " exit" << dendl;
}
void RocksDBStore::compact_range_async(const string& start, const string& end)
{
std::lock_guard l(compact_queue_lock);
// try to merge adjacent ranges. this is O(n), but the queue should
// be short. note that we do not cover all overlap cases and merge
// opportunities here, but we capture the ones we currently need.
list< pair<string,string> >::iterator p = compact_queue.begin();
while (p != compact_queue.end()) {
if (p->first == start && p->second == end) {
// dup; no-op
return;
}
if (start <= p->first && p->first <= end) {
// new region crosses start of existing range
// select right bound that is bigger
compact_queue.push_back(make_pair(start, end > p->second ? end : p->second));
compact_queue.erase(p);
logger->inc(l_rocksdb_compact_queue_merge);
break;
}
if (start <= p->second && p->second <= end) {
// new region crosses end of existing range
//p->first < p->second and p->second <= end, so p->first <= end.
//But we break if previous condition, so start > p->first.
compact_queue.push_back(make_pair(p->first, end));
compact_queue.erase(p);
logger->inc(l_rocksdb_compact_queue_merge);
break;
}
++p;
}
if (p == compact_queue.end()) {
// no merge, new entry.
compact_queue.push_back(make_pair(start, end));
logger->set(l_rocksdb_compact_queue_len, compact_queue.size());
}
compact_queue_cond.notify_all();
if (!compact_thread.is_started()) {
compact_thread.create("rstore_compact");
}
}
bool RocksDBStore::check_omap_dir(string &omap_dir)
{
rocksdb::Options options;
options.create_if_missing = true;
rocksdb::DB *db;
rocksdb::Status status = rocksdb::DB::Open(options, omap_dir, &db);
delete db;
db = nullptr;
return status.ok();
}
void RocksDBStore::compact_range(const string& start, const string& end)
{
rocksdb::CompactRangeOptions options;
rocksdb::Slice cstart(start);
rocksdb::Slice cend(end);
string prefix_start, key_start;
string prefix_end, key_end;
string key_highest = "\xff\xff\xff\xff"; //cheating
string key_lowest = "";
auto compact_range = [&] (const decltype(cf_handles)::iterator column_it,
const std::string& start,
const std::string& end) {
rocksdb::Slice cstart(start);
rocksdb::Slice cend(end);
for (const auto& shard_it : column_it->second.handles) {
db->CompactRange(options, shard_it, &cstart, &cend);
}
};
db->CompactRange(options, default_cf, &cstart, &cend);
split_key(cstart, &prefix_start, &key_start);
split_key(cend, &prefix_end, &key_end);
if (prefix_start == prefix_end) {
const auto& column = cf_handles.find(prefix_start);
if (column != cf_handles.end()) {
compact_range(column, key_start, key_end);
}
} else {
auto column = cf_handles.find(prefix_start);
if (column != cf_handles.end()) {
compact_range(column, key_start, key_highest);
++column;
}
const auto& column_end = cf_handles.find(prefix_end);
while (column != column_end) {
compact_range(column, key_lowest, key_highest);
column++;
}
if (column != cf_handles.end()) {
compact_range(column, key_lowest, key_end);
}
}
}
RocksDBStore::RocksDBWholeSpaceIteratorImpl::~RocksDBWholeSpaceIteratorImpl()
{
delete dbiter;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::seek_to_first()
{
dbiter->SeekToFirst();
ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::seek_to_first(const string &prefix)
{
rocksdb::Slice slice_prefix(prefix);
dbiter->Seek(slice_prefix);
ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::seek_to_last()
{
dbiter->SeekToLast();
ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::seek_to_last(const string &prefix)
{
string limit = past_prefix(prefix);
rocksdb::Slice slice_limit(limit);
dbiter->Seek(slice_limit);
if (!dbiter->Valid()) {
dbiter->SeekToLast();
} else {
dbiter->Prev();
}
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::upper_bound(const string &prefix, const string &after)
{
lower_bound(prefix, after);
if (valid()) {
pair<string,string> key = raw_key();
if (key.first == prefix && key.second == after)
next();
}
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::lower_bound(const string &prefix, const string &to)
{
string bound = combine_strings(prefix, to);
rocksdb::Slice slice_bound(bound);
dbiter->Seek(slice_bound);
return dbiter->status().ok() ? 0 : -1;
}
bool RocksDBStore::RocksDBWholeSpaceIteratorImpl::valid()
{
return dbiter->Valid();
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::next()
{
if (valid()) {
dbiter->Next();
}
ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::prev()
{
if (valid()) {
dbiter->Prev();
}
ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
string RocksDBStore::RocksDBWholeSpaceIteratorImpl::key()
{
string out_key;
split_key(dbiter->key(), 0, &out_key);
return out_key;
}
pair<string,string> RocksDBStore::RocksDBWholeSpaceIteratorImpl::raw_key()
{
string prefix, key;
split_key(dbiter->key(), &prefix, &key);
return make_pair(prefix, key);
}
bool RocksDBStore::RocksDBWholeSpaceIteratorImpl::raw_key_is_prefixed(const string &prefix) {
// Look for "prefix\0" right in rocksb::Slice
rocksdb::Slice key = dbiter->key();
if ((key.size() > prefix.length()) && (key[prefix.length()] == '\0')) {
return memcmp(key.data(), prefix.c_str(), prefix.length()) == 0;
} else {
return false;
}
}
bufferlist RocksDBStore::RocksDBWholeSpaceIteratorImpl::value()
{
return to_bufferlist(dbiter->value());
}
size_t RocksDBStore::RocksDBWholeSpaceIteratorImpl::key_size()
{
return dbiter->key().size();
}
size_t RocksDBStore::RocksDBWholeSpaceIteratorImpl::value_size()
{
return dbiter->value().size();
}
bufferptr RocksDBStore::RocksDBWholeSpaceIteratorImpl::value_as_ptr()
{
rocksdb::Slice val = dbiter->value();
return bufferptr(val.data(), val.size());
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::status()
{
return dbiter->status().ok() ? 0 : -1;
}
string RocksDBStore::past_prefix(const string &prefix)
{
string limit = prefix;
limit.push_back(1);
return limit;
}
class CFIteratorImpl : public KeyValueDB::IteratorImpl {
protected:
string prefix;
rocksdb::Iterator *dbiter;
const KeyValueDB::IteratorBounds bounds;
const rocksdb::Slice iterate_lower_bound;
const rocksdb::Slice iterate_upper_bound;
public:
explicit CFIteratorImpl(const RocksDBStore* db,
const std::string& p,
rocksdb::ColumnFamilyHandle* cf,
KeyValueDB::IteratorBounds bounds_)
: prefix(p), bounds(std::move(bounds_)),
iterate_lower_bound(make_slice(bounds.lower_bound)),
iterate_upper_bound(make_slice(bounds.upper_bound))
{
auto options = rocksdb::ReadOptions();
if (db->cct->_conf->osd_rocksdb_iterator_bounds_enabled) {
if (bounds.lower_bound) {
options.iterate_lower_bound = &iterate_lower_bound;
}
if (bounds.upper_bound) {
options.iterate_upper_bound = &iterate_upper_bound;
}
}
dbiter = db->db->NewIterator(options, cf);
}
~CFIteratorImpl() {
delete dbiter;
}
int seek_to_first() override {
dbiter->SeekToFirst();
return dbiter->status().ok() ? 0 : -1;
}
int seek_to_last() override {
dbiter->SeekToLast();
return dbiter->status().ok() ? 0 : -1;
}
int upper_bound(const string &after) override {
lower_bound(after);
if (valid() && (key() == after)) {
next();
}
return dbiter->status().ok() ? 0 : -1;
}
int lower_bound(const string &to) override {
rocksdb::Slice slice_bound(to);
dbiter->Seek(slice_bound);
return dbiter->status().ok() ? 0 : -1;
}
int next() override {
if (valid()) {
dbiter->Next();
}
return dbiter->status().ok() ? 0 : -1;
}
int prev() override {
if (valid()) {
dbiter->Prev();
}
return dbiter->status().ok() ? 0 : -1;
}
bool valid() override {
return dbiter->Valid();
}
string key() override {
return dbiter->key().ToString();
}
std::pair<std::string, std::string> raw_key() override {
return make_pair(prefix, key());
}
bufferlist value() override {
return to_bufferlist(dbiter->value());
}
bufferptr value_as_ptr() override {
rocksdb::Slice val = dbiter->value();
return bufferptr(val.data(), val.size());
}
int status() override {
return dbiter->status().ok() ? 0 : -1;
}
};
//merge column iterators and rest iterator
class WholeMergeIteratorImpl : public KeyValueDB::WholeSpaceIteratorImpl {
private:
RocksDBStore* db;
KeyValueDB::WholeSpaceIterator main;
std::map<std::string, KeyValueDB::Iterator> shards;
std::map<std::string, KeyValueDB::Iterator>::iterator current_shard;
enum {on_main, on_shard} smaller;
public:
WholeMergeIteratorImpl(RocksDBStore* db)
: db(db)
, main(db->get_default_cf_iterator())
{
for (auto& e : db->cf_handles) {
shards.emplace(e.first, db->get_iterator(e.first));
}
}
// returns true if value in main is smaller then in shards
// invalid is larger then actual value
bool is_main_smaller() {
if (main->valid()) {
if (current_shard != shards.end()) {
auto main_rk = main->raw_key();
ceph_assert(current_shard->second->valid());
auto shards_rk = current_shard->second->raw_key();
if (main_rk.first < shards_rk.first)
return true;
if (main_rk.first > shards_rk.first)
return false;
return main_rk.second < shards_rk.second;
} else {
return true;
}
} else {
if (current_shard != shards.end()) {
return false;
} else {
//this means that neither is valid
//we select main to be smaller, so valid() will signal properly
return true;
}
}
}
int seek_to_first() override {
int r0 = main->seek_to_first();
int r1 = 0;
// find first shard that has some data
current_shard = shards.begin();
while (current_shard != shards.end()) {
r1 = current_shard->second->seek_to_first();
if (r1 != 0 || current_shard->second->valid()) {
//this is the first shard that will yield some keys
break;
}
++current_shard;
}
smaller = is_main_smaller() ? on_main : on_shard;
return r0 == 0 && r1 == 0 ? 0 : -1;
}
int seek_to_first(const std::string &prefix) override {
int r0 = main->seek_to_first(prefix);
int r1 = 0;
// find first shard that has some data
current_shard = shards.lower_bound(prefix);
while (current_shard != shards.end()) {
r1 = current_shard->second->seek_to_first();
if (r1 != 0 || current_shard->second->valid()) {
//this is the first shard that will yield some keys
break;
}
++current_shard;
}
smaller = is_main_smaller() ? on_main : on_shard;
return r0 == 0 && r1 == 0 ? 0 : -1;
};
int seek_to_last() override {
int r0 = main->seek_to_last();
int r1 = 0;
r1 = shards_seek_to_last();
//if we have 2 candidates, we need to select
if (main->valid()) {
if (shards_valid()) {
if (is_main_smaller()) {
smaller = on_shard;
main->next();
} else {
smaller = on_main;
shards_next();
}
} else {
smaller = on_main;
}
} else {
if (shards_valid()) {
smaller = on_shard;
} else {
smaller = on_main;
}
}
return r0 == 0 && r1 == 0 ? 0 : -1;
}
int seek_to_last(const std::string &prefix) override {
int r0 = main->seek_to_last(prefix);
int r1 = 0;
// find last shard that has some data
bool found = false;
current_shard = shards.lower_bound(prefix);
while (current_shard != shards.begin()) {
r1 = current_shard->second->seek_to_last();
if (r1 != 0)
break;
if (current_shard->second->valid()) {
found = true;
break;
}
}
//if we have 2 candidates, we need to select
if (main->valid() && found) {
if (is_main_smaller()) {
main->next();
} else {
shards_next();
}
}
if (!found) {
//set shards state that properly represents eof
current_shard = shards.end();
}
smaller = is_main_smaller() ? on_main : on_shard;
return r0 == 0 && r1 == 0 ? 0 : -1;
}
int upper_bound(const std::string &prefix, const std::string &after) override {
int r0 = main->upper_bound(prefix, after);
int r1 = 0;
if (r0 != 0)
return r0;
current_shard = shards.lower_bound(prefix);
if (current_shard != shards.end()) {
bool located = false;
if (current_shard->first == prefix) {
r1 = current_shard->second->upper_bound(after);
if (r1 != 0)
return r1;
if (current_shard->second->valid()) {
located = true;
}
}
if (!located) {
while (current_shard != shards.end()) {
r1 = current_shard->second->seek_to_first();
if (r1 != 0)
return r1;
if (current_shard->second->valid())
break;
++current_shard;
}
}
}
smaller = is_main_smaller() ? on_main : on_shard;
return 0;
}
int lower_bound(const std::string &prefix, const std::string &to) override {
int r0 = main->lower_bound(prefix, to);
int r1 = 0;
if (r0 != 0)
return r0;
current_shard = shards.lower_bound(prefix);
if (current_shard != shards.end()) {
bool located = false;
if (current_shard->first == prefix) {
r1 = current_shard->second->lower_bound(to);
if (r1 != 0)
return r1;
if (current_shard->second->valid()) {
located = true;
}
}
if (!located) {
while (current_shard != shards.end()) {
r1 = current_shard->second->seek_to_first();
if (r1 != 0)
return r1;
if (current_shard->second->valid())
break;
++current_shard;
}
}
}
smaller = is_main_smaller() ? on_main : on_shard;
return 0;
}
bool valid() override {
if (smaller == on_main) {
return main->valid();
} else {
if (current_shard == shards.end())
return false;
return current_shard->second->valid();
}
};
int next() override {
int r;
if (smaller == on_main) {
r = main->next();
} else {
r = shards_next();
}
if (r != 0)
return r;
smaller = is_main_smaller() ? on_main : on_shard;
return 0;
}
int prev() override {
int r;
bool main_was_valid = false;
if (main->valid()) {
main_was_valid = true;
r = main->prev();
} else {
r = main->seek_to_last();
}
if (r != 0)
return r;
bool shards_was_valid = false;
if (shards_valid()) {
shards_was_valid = true;
r = shards_prev();
} else {
r = shards_seek_to_last();
}
if (r != 0)
return r;
if (!main->valid() && !shards_valid()) {
//end, no previous. set marker so valid() can work
smaller = on_main;
return 0;
}
//if 1 is valid, select it
//if 2 are valid select larger and advance the other
if (main->valid()) {
if (shards_valid()) {
if (is_main_smaller()) {
smaller = on_shard;
if (main_was_valid) {
if (main->valid()) {
r = main->next();
} else {
r = main->seek_to_first();
}
} else {
//if we have resurrected main, kill it
if (main->valid()) {
main->next();
}
}
} else {
smaller = on_main;
if (shards_was_valid) {
if (shards_valid()) {
r = shards_next();
} else {
r = shards_seek_to_first();
}
} else {
//if we have resurected shards, kill it
if (shards_valid()) {
shards_next();
}
}
}
} else {
smaller = on_main;
r = shards_seek_to_first();
}
} else {
smaller = on_shard;
r = main->seek_to_first();
}
return r;
}
std::string key() override
{
if (smaller == on_main) {
return main->key();
} else {
return current_shard->second->key();
}
}
std::pair<std::string,std::string> raw_key() override
{
if (smaller == on_main) {
return main->raw_key();
} else {
return { current_shard->first, current_shard->second->key() };
}
}
bool raw_key_is_prefixed(const std::string &prefix) override
{
if (smaller == on_main) {
return main->raw_key_is_prefixed(prefix);
} else {
return current_shard->first == prefix;
}
}
ceph::buffer::list value() override
{
if (smaller == on_main) {
return main->value();
} else {
return current_shard->second->value();
}
}
int status() override
{
//because we already had to inspect key, it must be ok
return 0;
}
size_t key_size() override
{
if (smaller == on_main) {
return main->key_size();
} else {
return current_shard->second->key().size();
}
}
size_t value_size() override
{
if (smaller == on_main) {
return main->value_size();
} else {
return current_shard->second->value().length();
}
}
int shards_valid() {
if (current_shard == shards.end())
return false;
return current_shard->second->valid();
}
int shards_next() {
if (current_shard == shards.end()) {
//illegal to next() on !valid()
return -1;
}
int r = 0;
r = current_shard->second->next();
if (r != 0)
return r;
if (current_shard->second->valid())
return 0;
//current shard exhaused, search for key
++current_shard;
while (current_shard != shards.end()) {
r = current_shard->second->seek_to_first();
if (r != 0)
return r;
if (current_shard->second->valid())
break;
++current_shard;
}
//either we found key or not, but it is success
return 0;
}
int shards_prev() {
if (current_shard == shards.end()) {
//illegal to prev() on !valid()
return -1;
}
int r = current_shard->second->prev();
while (r == 0) {
if (current_shard->second->valid()) {
break;
}
if (current_shard == shards.begin()) {
//we have reached pre-first element
//this makes it !valid(), but guarantees next() moves to first element
break;
}
--current_shard;
r = current_shard->second->seek_to_last();
}
return r;
}
int shards_seek_to_last() {
int r = 0;
current_shard = shards.end();
if (current_shard == shards.begin()) {
//no shards at all
return 0;
}
while (current_shard != shards.begin()) {
--current_shard;
r = current_shard->second->seek_to_last();
if (r != 0)
return r;
if (current_shard->second->valid()) {
return 0;
}
}
//no keys at all
current_shard = shards.end();
return r;
}
int shards_seek_to_first() {
int r = 0;
current_shard = shards.begin();
while (current_shard != shards.end()) {
r = current_shard->second->seek_to_first();
if (r != 0)
break;
if (current_shard->second->valid()) {
//this is the first shard that will yield some keys
break;
}
++current_shard;
}
return r;
}
};
class ShardMergeIteratorImpl : public KeyValueDB::IteratorImpl {
private:
struct KeyLess {
private:
const rocksdb::Comparator* comparator;
public:
KeyLess(const rocksdb::Comparator* comparator) : comparator(comparator) { };
bool operator()(rocksdb::Iterator* a, rocksdb::Iterator* b) const
{
if (a->Valid()) {
if (b->Valid()) {
return comparator->Compare(a->key(), b->key()) < 0;
} else {
return true;
}
} else {
if (b->Valid()) {
return false;
} else {
return false;
}
}
}
};
const RocksDBStore* db;
KeyLess keyless;
string prefix;
const KeyValueDB::IteratorBounds bounds;
const rocksdb::Slice iterate_lower_bound;
const rocksdb::Slice iterate_upper_bound;
std::vector<rocksdb::Iterator*> iters;
public:
explicit ShardMergeIteratorImpl(const RocksDBStore* db,
const std::string& prefix,
const std::vector<rocksdb::ColumnFamilyHandle*>& shards,
KeyValueDB::IteratorBounds bounds_)
: db(db), keyless(db->comparator), prefix(prefix), bounds(std::move(bounds_)),
iterate_lower_bound(make_slice(bounds.lower_bound)),
iterate_upper_bound(make_slice(bounds.upper_bound))
{
iters.reserve(shards.size());
auto options = rocksdb::ReadOptions();
if (db->cct->_conf->osd_rocksdb_iterator_bounds_enabled) {
if (bounds.lower_bound) {
options.iterate_lower_bound = &iterate_lower_bound;
}
if (bounds.upper_bound) {
options.iterate_upper_bound = &iterate_upper_bound;
}
}
for (auto& s : shards) {
iters.push_back(db->db->NewIterator(options, s));
}
}
~ShardMergeIteratorImpl() {
for (auto& it : iters) {
delete it;
}
}
int seek_to_first() override {
for (auto& it : iters) {
it->SeekToFirst();
if (!it->status().ok()) {
return -1;
}
}
//all iterators seeked, sort
std::sort(iters.begin(), iters.end(), keyless);
return 0;
}
int seek_to_last() override {
for (auto& it : iters) {
it->SeekToLast();
if (!it->status().ok()) {
return -1;
}
}
for (size_t i = 1; i < iters.size(); i++) {
if (iters[0]->Valid()) {
if (iters[i]->Valid()) {
if (keyless(iters[0], iters[i])) {
std::swap(iters[0], iters[i]);
}
} else {
//iters[i] empty
}
} else {
if (iters[i]->Valid()) {
std::swap(iters[0], iters[i]);
}
}
//it might happen that cf was empty
if (iters[i]->Valid()) {
iters[i]->Next();
}
}
//no need to sort, as at most 1 iterator is valid now
return 0;
}
int upper_bound(const string &after) override {
rocksdb::Slice slice_bound(after);
for (auto& it : iters) {
it->Seek(slice_bound);
if (it->Valid() && it->key() == after) {
it->Next();
}
if (!it->status().ok()) {
return -1;
}
}
std::sort(iters.begin(), iters.end(), keyless);
return 0;
}
int lower_bound(const string &to) override {
rocksdb::Slice slice_bound(to);
for (auto& it : iters) {
it->Seek(slice_bound);
if (!it->status().ok()) {
return -1;
}
}
std::sort(iters.begin(), iters.end(), keyless);
return 0;
}
int next() override {
int r = -1;
if (iters[0]->Valid()) {
iters[0]->Next();
if (iters[0]->status().ok()) {
r = 0;
//bubble up
for (size_t i = 0; i < iters.size() - 1; i++) {
if (keyless(iters[i], iters[i + 1])) {
//matches, fixed
break;
}
std::swap(iters[i], iters[i + 1]);
}
}
}
return r;
}
// iters are sorted, so
// a[0] < b[0] < c[0] < d[0]
// a[0] > a[-1], a[0] > b[-1], a[0] > c[-1], a[0] > d[-1]
// so, prev() will be one of:
// a[-1], b[-1], c[-1], d[-1]
// prev() will be the one that is *largest* of them
//
// alg:
// 1. go prev() on each iterator we can
// 2. select largest key from those iterators
// 3. go next() on all iterators except (2)
// 4. sort
int prev() override {
std::vector<rocksdb::Iterator*> prev_done;
//1
for (auto it: iters) {
if (it->Valid()) {
it->Prev();
if (it->Valid()) {
prev_done.push_back(it);
} else {
it->SeekToFirst();
}
} else {
it->SeekToLast();
if (it->Valid()) {
prev_done.push_back(it);
}
}
}
if (prev_done.size() == 0) {
/* there is no previous element */
if (iters[0]->Valid()) {
iters[0]->Prev();
ceph_assert(!iters[0]->Valid());
}
return 0;
}
//2,3
rocksdb::Iterator* highest = prev_done[0];
for (size_t i = 1; i < prev_done.size(); i++) {
if (keyless(highest, prev_done[i])) {
highest->Next();
highest = prev_done[i];
} else {
prev_done[i]->Next();
}
}
//4
//insert highest in the beginning, and shift values until we pick highest
//untouched rest is sorted - we just prev()/next() them
rocksdb::Iterator* hold = highest;
for (size_t i = 0; i < iters.size(); i++) {
std::swap(hold, iters[i]);
if (hold == highest) break;
}
ceph_assert(hold == highest);
return 0;
}
bool valid() override {
return iters[0]->Valid();
}
string key() override {
return iters[0]->key().ToString();
}
std::pair<std::string, std::string> raw_key() override {
return make_pair(prefix, key());
}
bufferlist value() override {
return to_bufferlist(iters[0]->value());
}
bufferptr value_as_ptr() override {
rocksdb::Slice val = iters[0]->value();
return bufferptr(val.data(), val.size());
}
int status() override {
return iters[0]->status().ok() ? 0 : -1;
}
};
KeyValueDB::Iterator RocksDBStore::get_iterator(const std::string& prefix, IteratorOpts opts, IteratorBounds bounds)
{
auto cf_it = cf_handles.find(prefix);
if (cf_it != cf_handles.end()) {
rocksdb::ColumnFamilyHandle* cf = nullptr;
if (cf_it->second.handles.size() == 1) {
cf = cf_it->second.handles[0];
} else if (cct->_conf->osd_rocksdb_iterator_bounds_enabled) {
cf = check_cf_handle_bounds(cf_it, bounds);
}
if (cf) {
return std::make_shared<CFIteratorImpl>(
this,
prefix,
cf,
std::move(bounds));
} else {
return std::make_shared<ShardMergeIteratorImpl>(
this,
prefix,
cf_it->second.handles,
std::move(bounds));
}
} else {
// use wholespace engine if no cfs are configured
// or use default cf otherwise as there is no
// matching cf for the specified prefix.
auto w_it = cf_handles.size() == 0 || prefix.empty() ?
get_wholespace_iterator(opts) :
get_default_cf_iterator();
return KeyValueDB::make_iterator(prefix, w_it);
}
}
RocksDBStore::WholeSpaceIterator RocksDBStore::new_shard_iterator(rocksdb::ColumnFamilyHandle* cf)
{
return std::make_shared<RocksDBWholeSpaceIteratorImpl>(
this,
cf,
0);
}
KeyValueDB::Iterator RocksDBStore::new_shard_iterator(rocksdb::ColumnFamilyHandle* cf,
const std::string& prefix,
IteratorBounds bounds)
{
return std::make_shared<CFIteratorImpl>(
this,
prefix,
cf,
std::move(bounds));
}
RocksDBStore::WholeSpaceIterator RocksDBStore::get_wholespace_iterator(IteratorOpts opts)
{
if (cf_handles.size() == 0) {
return std::make_shared<RocksDBWholeSpaceIteratorImpl>(
this, default_cf, opts);
} else {
return std::make_shared<WholeMergeIteratorImpl>(this);
}
}
RocksDBStore::WholeSpaceIterator RocksDBStore::get_default_cf_iterator()
{
return std::make_shared<RocksDBWholeSpaceIteratorImpl>(this, default_cf, 0);
}
int RocksDBStore::prepare_for_reshard(const std::string& new_sharding,
RocksDBStore::columns_t& to_process_columns)
{
//0. lock db from opening
//1. list existing columns
//2. apply merge operator to (main + columns) opts
//3. prepare std::vector<rocksdb::ColumnFamilyDescriptor> existing_cfs
//4. open db, acquire existing column handles
//5. calculate missing columns
//6. create missing columns
//7. construct cf_handles according to new sharding
//8. check is all cf_handles are filled
bool b;
std::vector<ColumnFamily> new_sharding_def;
char const* error_position;
std::string error_msg;
b = parse_sharding_def(new_sharding, new_sharding_def, &error_position, &error_msg);
if (!b) {
dout(1) << __func__ << " bad sharding: " << dendl;
dout(1) << __func__ << new_sharding << dendl;
dout(1) << __func__ << std::string(error_position - &new_sharding[0], ' ') << "^" << error_msg << dendl;
return -EINVAL;
}
//0. lock db from opening
std::string stored_sharding_text;
rocksdb::ReadFileToString(env,
sharding_def_file,
&stored_sharding_text);
if (stored_sharding_text.find(resharding_column_lock) == string::npos) {
rocksdb::Status status;
if (stored_sharding_text.size() != 0)
stored_sharding_text += " ";
stored_sharding_text += resharding_column_lock;
env->CreateDir(sharding_def_dir);
status = rocksdb::WriteStringToFile(env, stored_sharding_text,
sharding_def_file, true);
if (!status.ok()) {
derr << __func__ << " cannot write to " << sharding_def_file << dendl;
return -EIO;
}
}
//1. list existing columns
rocksdb::Status status;
std::vector<std::string> existing_columns;
rocksdb::Options opt;
int r = load_rocksdb_options(false, opt);
if (r) {
dout(1) << __func__ << " load rocksdb options failed" << dendl;
return r;
}
status = rocksdb::DB::ListColumnFamilies(rocksdb::DBOptions(opt), path, &existing_columns);
if (!status.ok()) {
derr << "Unable to list column families: " << status.ToString() << dendl;
return -EINVAL;
}
dout(5) << "existing columns = " << existing_columns << dendl;
//2. apply merge operator to (main + columns) opts
//3. prepare std::vector<rocksdb::ColumnFamilyDescriptor> cfs_to_open
std::vector<rocksdb::ColumnFamilyDescriptor> cfs_to_open;
for (const auto& full_name : existing_columns) {
//split col_name to <prefix>-<number>
std::string base_name;
size_t pos = full_name.find('-');
if (std::string::npos == pos)
base_name = full_name;
else
base_name = full_name.substr(0,pos);
rocksdb::ColumnFamilyOptions cf_opt(opt);
// search if we have options for this column
std::string options;
for (const auto& nsd : new_sharding_def) {
if (nsd.name == base_name) {
options = nsd.options;
break;
}
}
int r = update_column_family_options(base_name, options, &cf_opt);
if (r != 0) {
return r;
}
cfs_to_open.emplace_back(full_name, cf_opt);
}
//4. open db, acquire existing column handles
std::vector<rocksdb::ColumnFamilyHandle*> handles;
status = rocksdb::DB::Open(rocksdb::DBOptions(opt),
path, cfs_to_open, &handles, &db);
if (!status.ok()) {
derr << status.ToString() << dendl;
return -EINVAL;
}
for (size_t i = 0; i < cfs_to_open.size(); i++) {
dout(10) << "column " << cfs_to_open[i].name << " handle " << (void*)handles[i] << dendl;
}
//5. calculate missing columns
std::vector<std::string> new_sharding_columns;
std::vector<std::string> missing_columns;
sharding_def_to_columns(new_sharding_def,
new_sharding_columns);
dout(5) << "target columns = " << new_sharding_columns << dendl;
for (const auto& n : new_sharding_columns) {
bool found = false;
for (const auto& e : existing_columns) {
if (n == e) {
found = true;
break;
}
}
if (!found) {
missing_columns.push_back(n);
}
}
dout(5) << "missing columns = " << missing_columns << dendl;
//6. create missing columns
for (const auto& full_name : missing_columns) {
std::string base_name;
size_t pos = full_name.find('-');
if (std::string::npos == pos)
base_name = full_name;
else
base_name = full_name.substr(0,pos);
rocksdb::ColumnFamilyOptions cf_opt(opt);
// search if we have options for this column
std::string options;
for (const auto& nsd : new_sharding_def) {
if (nsd.name == base_name) {
options = nsd.options;
break;
}
}
int r = update_column_family_options(base_name, options, &cf_opt);
if (r != 0) {
return r;
}
rocksdb::ColumnFamilyHandle *cf;
status = db->CreateColumnFamily(cf_opt, full_name, &cf);
if (!status.ok()) {
derr << __func__ << " Failed to create rocksdb column family: "
<< full_name << dendl;
return -EINVAL;
}
dout(10) << "created column " << full_name << " handle = " << (void*)cf << dendl;
existing_columns.push_back(full_name);
handles.push_back(cf);
}
//7. construct cf_handles according to new sharding
for (size_t i = 0; i < existing_columns.size(); i++) {
std::string full_name = existing_columns[i];
rocksdb::ColumnFamilyHandle *cf = handles[i];
std::string base_name;
size_t shard_idx = 0;
size_t pos = full_name.find('-');
dout(10) << "processing column " << full_name << dendl;
if (std::string::npos == pos) {
base_name = full_name;
} else {
base_name = full_name.substr(0,pos);
shard_idx = atoi(full_name.substr(pos+1).c_str());
}
if (rocksdb::kDefaultColumnFamilyName == base_name) {
default_cf = handles[i];
must_close_default_cf = true;
std::unique_ptr<rocksdb::ColumnFamilyHandle, cf_deleter_t> ptr{
cf, [](rocksdb::ColumnFamilyHandle*) {}};
to_process_columns.emplace(full_name, std::move(ptr));
} else {
for (const auto& nsd : new_sharding_def) {
if (nsd.name == base_name) {
if (shard_idx < nsd.shard_cnt) {
add_column_family(base_name, nsd.hash_l, nsd.hash_h, shard_idx, cf);
} else {
//ignore columns with index larger then shard count
}
break;
}
}
std::unique_ptr<rocksdb::ColumnFamilyHandle, cf_deleter_t> ptr{
cf, [this](rocksdb::ColumnFamilyHandle* handle) {
db->DestroyColumnFamilyHandle(handle);
}};
to_process_columns.emplace(full_name, std::move(ptr));
}
}
//8. check if all cf_handles are filled
for (const auto& col : cf_handles) {
for (size_t i = 0; i < col.second.handles.size(); i++) {
if (col.second.handles[i] == nullptr) {
derr << "missing handle for column " << col.first << " shard " << i << dendl;
return -EIO;
}
}
}
return 0;
}
int RocksDBStore::reshard_cleanup(const RocksDBStore::columns_t& current_columns)
{
std::vector<std::string> new_sharding_columns;
for (const auto& [name, handle] : cf_handles) {
if (handle.handles.size() == 1) {
new_sharding_columns.push_back(name);
} else {
for (size_t i = 0; i < handle.handles.size(); i++) {
new_sharding_columns.push_back(name + "-" + std::to_string(i));
}
}
}
for (auto& [name, handle] : current_columns) {
auto found = std::find(new_sharding_columns.begin(),
new_sharding_columns.end(),
name) != new_sharding_columns.end();
if (found || name == rocksdb::kDefaultColumnFamilyName) {
dout(5) << "Column " << name << " is part of new sharding." << dendl;
continue;
}
dout(5) << "Column " << name << " not part of new sharding. Deleting." << dendl;
// verify that column is empty
std::unique_ptr<rocksdb::Iterator> it{
db->NewIterator(rocksdb::ReadOptions(), handle.get())};
ceph_assert(it);
it->SeekToFirst();
ceph_assert(!it->Valid());
if (rocksdb::Status status = db->DropColumnFamily(handle.get()); !status.ok()) {
derr << __func__ << " Failed to drop column: " << name << dendl;
return -EINVAL;
}
}
return 0;
}
int RocksDBStore::reshard(const std::string& new_sharding, const RocksDBStore::resharding_ctrl* ctrl_in)
{
resharding_ctrl ctrl = ctrl_in ? *ctrl_in : resharding_ctrl();
size_t bytes_in_batch = 0;
size_t keys_in_batch = 0;
size_t bytes_per_iterator = 0;
size_t keys_per_iterator = 0;
size_t keys_processed = 0;
size_t keys_moved = 0;
auto flush_batch = [&](rocksdb::WriteBatch* batch) {
dout(10) << "flushing batch, " << keys_in_batch << " keys, for "
<< bytes_in_batch << " bytes" << dendl;
rocksdb::WriteOptions woptions;
woptions.sync = true;
rocksdb::Status s = db->Write(woptions, batch);
ceph_assert(s.ok());
bytes_in_batch = 0;
keys_in_batch = 0;
batch->Clear();
};
auto process_column = [&](rocksdb::ColumnFamilyHandle* handle,
const std::string& fixed_prefix)
{
dout(5) << " column=" << (void*)handle << " prefix=" << fixed_prefix << dendl;
std::unique_ptr<rocksdb::Iterator> it{
db->NewIterator(rocksdb::ReadOptions(), handle)};
ceph_assert(it);
rocksdb::WriteBatch bat;
for (it->SeekToFirst(); it->Valid(); it->Next()) {
rocksdb::Slice raw_key = it->key();
dout(30) << "key=" << pretty_binary_string(raw_key.ToString()) << dendl;
//check if need to refresh iterator
if (bytes_per_iterator >= ctrl.bytes_per_iterator ||
keys_per_iterator >= ctrl.keys_per_iterator) {
dout(8) << "refreshing iterator" << dendl;
bytes_per_iterator = 0;
keys_per_iterator = 0;
std::string raw_key_str = raw_key.ToString();
it.reset(db->NewIterator(rocksdb::ReadOptions(), handle));
ceph_assert(it);
it->Seek(raw_key_str);
ceph_assert(it->Valid());
raw_key = it->key();
}
rocksdb::Slice value = it->value();
std::string prefix, key;
if (fixed_prefix.size() == 0) {
split_key(raw_key, &prefix, &key);
} else {
prefix = fixed_prefix;
key = raw_key.ToString();
}
keys_processed++;
if ((keys_processed % 10000) == 0) {
dout(10) << "processed " << keys_processed << " keys, moved " << keys_moved << dendl;
}
rocksdb::ColumnFamilyHandle* new_handle = get_cf_handle(prefix, key);
if (new_handle == nullptr) {
new_handle = default_cf;
}
if (handle == new_handle) {
continue;
}
std::string new_raw_key;
if (new_handle == default_cf) {
new_raw_key = combine_strings(prefix, key);
} else {
new_raw_key = key;
}
bat.Delete(handle, raw_key);
bat.Put(new_handle, new_raw_key, value);
dout(25) << "moving " << (void*)handle << "/" << pretty_binary_string(raw_key.ToString()) <<
" to " << (void*)new_handle << "/" << pretty_binary_string(new_raw_key) <<
" size " << value.size() << dendl;
keys_moved++;
bytes_in_batch += new_raw_key.size() * 2 + value.size();
keys_in_batch++;
bytes_per_iterator += new_raw_key.size() * 2 + value.size();
keys_per_iterator++;
//check if need to write batch
if (bytes_in_batch >= ctrl.bytes_per_batch ||
keys_in_batch >= ctrl.keys_per_batch) {
flush_batch(&bat);
if (ctrl.unittest_fail_after_first_batch) {
return -1000;
}
}
}
if (bat.Count() > 0) {
flush_batch(&bat);
}
return 0;
};
auto close_column_handles = make_scope_guard([this] {
cf_handles.clear();
close();
});
columns_t to_process_columns;
int r = prepare_for_reshard(new_sharding, to_process_columns);
if (r != 0) {
dout(1) << "failed to prepare db for reshard" << dendl;
return r;
}
for (auto& [name, handle] : to_process_columns) {
dout(5) << "Processing column=" << name
<< " handle=" << handle.get() << dendl;
if (name == rocksdb::kDefaultColumnFamilyName) {
ceph_assert(handle.get() == default_cf);
r = process_column(default_cf, std::string());
} else {
std::string fixed_prefix = name.substr(0, name.find('-'));
dout(10) << "Prefix: " << fixed_prefix << dendl;
r = process_column(handle.get(), fixed_prefix);
}
if (r != 0) {
derr << "Error processing column " << name << dendl;
return r;
}
if (ctrl.unittest_fail_after_processing_column) {
return -1001;
}
}
r = reshard_cleanup(to_process_columns);
if (r != 0) {
dout(5) << "failed to cleanup after reshard" << dendl;
return r;
}
if (ctrl.unittest_fail_after_successful_processing) {
return -1002;
}
env->CreateDir(sharding_def_dir);
if (auto status = rocksdb::WriteStringToFile(env, new_sharding,
sharding_def_file, true);
!status.ok()) {
derr << __func__ << " cannot write to " << sharding_def_file << dendl;
return -EIO;
}
return r;
}
bool RocksDBStore::get_sharding(std::string& sharding) {
rocksdb::Status status;
std::string stored_sharding_text;
bool result = false;
sharding.clear();
status = env->FileExists(sharding_def_file);
if (status.ok()) {
status = rocksdb::ReadFileToString(env,
sharding_def_file,
&stored_sharding_text);
if(status.ok()) {
result = true;
sharding = stored_sharding_text;
}
}
return result;
}
| 105,861 | 29.341645 | 130 |
cc
|
null |
ceph-main/src/kv/RocksDBStore.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef ROCKS_DB_STORE_H
#define ROCKS_DB_STORE_H
#include "include/types.h"
#include "include/buffer_fwd.h"
#include "KeyValueDB.h"
#include <set>
#include <map>
#include <string>
#include <memory>
#include <boost/scoped_ptr.hpp>
#include "rocksdb/write_batch.h"
#include "rocksdb/perf_context.h"
#include "rocksdb/iostats_context.h"
#include "rocksdb/statistics.h"
#include "rocksdb/table.h"
#include "rocksdb/db.h"
#include "kv/rocksdb_cache/BinnedLRUCache.h"
#include <errno.h>
#include "common/errno.h"
#include "common/dout.h"
#include "include/ceph_assert.h"
#include "include/common_fwd.h"
#include "common/Formatter.h"
#include "common/Cond.h"
#include "common/ceph_context.h"
#include "common/PriorityCache.h"
#include "common/pretty_binary.h"
enum {
l_rocksdb_first = 34300,
l_rocksdb_get_latency,
l_rocksdb_submit_latency,
l_rocksdb_submit_sync_latency,
l_rocksdb_compact,
l_rocksdb_compact_range,
l_rocksdb_compact_queue_merge,
l_rocksdb_compact_queue_len,
l_rocksdb_write_wal_time,
l_rocksdb_write_memtable_time,
l_rocksdb_write_delay_time,
l_rocksdb_write_pre_and_post_process_time,
l_rocksdb_last,
};
namespace rocksdb{
class DB;
class Env;
class Cache;
class FilterPolicy;
class Snapshot;
class Slice;
class WriteBatch;
class Iterator;
class Logger;
class ColumnFamilyHandle;
struct Options;
struct BlockBasedTableOptions;
struct DBOptions;
struct ColumnFamilyOptions;
}
extern rocksdb::Logger *create_rocksdb_ceph_logger();
inline rocksdb::Slice make_slice(const std::optional<std::string>& bound) {
if (bound) {
return {*bound};
} else {
return {};
}
}
/**
* Uses RocksDB to implement the KeyValueDB interface
*/
class RocksDBStore : public KeyValueDB {
CephContext *cct;
PerfCounters *logger;
std::string path;
std::map<std::string,std::string> kv_options;
void *priv;
rocksdb::DB *db;
rocksdb::Env *env;
const rocksdb::Comparator* comparator;
std::shared_ptr<rocksdb::Statistics> dbstats;
rocksdb::BlockBasedTableOptions bbt_opts;
std::string options_str;
uint64_t cache_size = 0;
bool set_cache_flag = false;
friend class ShardMergeIteratorImpl;
friend class CFIteratorImpl;
friend class WholeMergeIteratorImpl;
/*
* See RocksDB's definition of a column family(CF) and how to use it.
* The interfaces of KeyValueDB is extended, when a column family is created.
* Prefix will be the name of column family to use.
*/
public:
struct ColumnFamily {
std::string name; //< name of this individual column family
size_t shard_cnt; //< count of shards
std::string options; //< configure option string for this CF
uint32_t hash_l; //< first character to take for hash calc.
uint32_t hash_h; //< last character to take for hash calc.
ColumnFamily(const std::string &name, size_t shard_cnt, const std::string &options,
uint32_t hash_l, uint32_t hash_h)
: name(name), shard_cnt(shard_cnt), options(options), hash_l(hash_l), hash_h(hash_h) {}
};
private:
friend std::ostream& operator<<(std::ostream& out, const ColumnFamily& cf);
bool must_close_default_cf = false;
rocksdb::ColumnFamilyHandle *default_cf = nullptr;
/// column families in use, name->handles
struct prefix_shards {
uint32_t hash_l; //< first character to take for hash calc.
uint32_t hash_h; //< last character to take for hash calc.
std::vector<rocksdb::ColumnFamilyHandle *> handles;
};
std::unordered_map<std::string, prefix_shards> cf_handles;
typedef decltype(cf_handles)::iterator cf_handles_iterator;
std::unordered_map<uint32_t, std::string> cf_ids_to_prefix;
std::unordered_map<std::string, rocksdb::BlockBasedTableOptions> cf_bbt_opts;
void add_column_family(const std::string& cf_name, uint32_t hash_l, uint32_t hash_h,
size_t shard_idx, rocksdb::ColumnFamilyHandle *handle);
bool is_column_family(const std::string& prefix);
std::string_view get_key_hash_view(const prefix_shards& shards, const char* key, const size_t keylen);
rocksdb::ColumnFamilyHandle *get_key_cf(const prefix_shards& shards, const char* key, const size_t keylen);
rocksdb::ColumnFamilyHandle *get_cf_handle(const std::string& prefix, const std::string& key);
rocksdb::ColumnFamilyHandle *get_cf_handle(const std::string& prefix, const char* key, size_t keylen);
rocksdb::ColumnFamilyHandle *check_cf_handle_bounds(const cf_handles_iterator& it, const IteratorBounds& bounds);
int submit_common(rocksdb::WriteOptions& woptions, KeyValueDB::Transaction t);
int install_cf_mergeop(const std::string &cf_name, rocksdb::ColumnFamilyOptions *cf_opt);
int create_db_dir();
int do_open(std::ostream &out, bool create_if_missing, bool open_readonly,
const std::string& cfs="");
int load_rocksdb_options(bool create_if_missing, rocksdb::Options& opt);
public:
static bool parse_sharding_def(const std::string_view text_def,
std::vector<ColumnFamily>& sharding_def,
char const* *error_position = nullptr,
std::string *error_msg = nullptr);
const rocksdb::Comparator* get_comparator() const {
return comparator;
}
private:
static void sharding_def_to_columns(const std::vector<ColumnFamily>& sharding_def,
std::vector<std::string>& columns);
int create_shards(const rocksdb::Options& opt,
const std::vector<ColumnFamily>& sharding_def);
int apply_sharding(const rocksdb::Options& opt,
const std::string& sharding_text);
int verify_sharding(const rocksdb::Options& opt,
std::vector<rocksdb::ColumnFamilyDescriptor>& existing_cfs,
std::vector<std::pair<size_t, RocksDBStore::ColumnFamily> >& existing_cfs_shard,
std::vector<rocksdb::ColumnFamilyDescriptor>& missing_cfs,
std::vector<std::pair<size_t, RocksDBStore::ColumnFamily> >& missing_cfs_shard);
std::shared_ptr<rocksdb::Cache> create_block_cache(const std::string& cache_type, size_t cache_size, double cache_prio_high = 0.0);
int split_column_family_options(const std::string& opts_str,
std::unordered_map<std::string, std::string>* column_opts_map,
std::string* block_cache_opt);
int apply_block_cache_options(const std::string& column_name,
const std::string& block_cache_opt,
rocksdb::ColumnFamilyOptions* cf_opt);
int update_column_family_options(const std::string& base_name,
const std::string& more_options,
rocksdb::ColumnFamilyOptions* cf_opt);
// manage async compactions
ceph::mutex compact_queue_lock =
ceph::make_mutex("RocksDBStore::compact_thread_lock");
ceph::condition_variable compact_queue_cond;
std::list<std::pair<std::string,std::string>> compact_queue;
bool compact_queue_stop;
class CompactThread : public Thread {
RocksDBStore *db;
public:
explicit CompactThread(RocksDBStore *d) : db(d) {}
void *entry() override {
db->compact_thread_entry();
return NULL;
}
friend class RocksDBStore;
} compact_thread;
void compact_thread_entry();
void compact_range(const std::string& start, const std::string& end);
void compact_range_async(const std::string& start, const std::string& end);
int tryInterpret(const std::string& key, const std::string& val,
rocksdb::Options& opt);
public:
/// compact the underlying rocksdb store
bool compact_on_mount;
bool disableWAL;
uint64_t get_delete_range_threshold() const {
return cct->_conf.get_val<uint64_t>("rocksdb_delete_range_threshold");
}
void compact() override;
void compact_async() override {
compact_range_async({}, {});
}
int ParseOptionsFromString(const std::string& opt_str, rocksdb::Options& opt);
static int ParseOptionsFromStringStatic(
CephContext* cct,
const std::string& opt_str,
rocksdb::Options &opt,
std::function<int(const std::string&, const std::string&, rocksdb::Options&)> interp);
static int _test_init(const std::string& dir);
int init(std::string options_str) override;
/// compact rocksdb for all keys with a given prefix
void compact_prefix(const std::string& prefix) override {
compact_range(prefix, past_prefix(prefix));
}
void compact_prefix_async(const std::string& prefix) override {
compact_range_async(prefix, past_prefix(prefix));
}
void compact_range(const std::string& prefix, const std::string& start,
const std::string& end) override {
compact_range(combine_strings(prefix, start), combine_strings(prefix, end));
}
void compact_range_async(const std::string& prefix, const std::string& start,
const std::string& end) override {
compact_range_async(combine_strings(prefix, start), combine_strings(prefix, end));
}
RocksDBStore(CephContext *c, const std::string &path, std::map<std::string,std::string> opt, void *p) :
cct(c),
logger(NULL),
path(path),
kv_options(opt),
priv(p),
db(NULL),
env(static_cast<rocksdb::Env*>(p)),
comparator(nullptr),
dbstats(NULL),
compact_queue_stop(false),
compact_thread(this),
compact_on_mount(false),
disableWAL(false)
{}
~RocksDBStore() override;
static bool check_omap_dir(std::string &omap_dir);
/// Opens underlying db
int open(std::ostream &out, const std::string& cfs="") override {
return do_open(out, false, false, cfs);
}
/// Creates underlying db if missing and opens it
int create_and_open(std::ostream &out,
const std::string& cfs="") override;
int open_read_only(std::ostream &out, const std::string& cfs="") override {
return do_open(out, false, true, cfs);
}
void close() override;
int repair(std::ostream &out) override;
void split_stats(const std::string &s, char delim, std::vector<std::string> &elems);
void get_statistics(ceph::Formatter *f) override;
PerfCounters *get_perf_counters() override
{
return logger;
}
bool get_property(
const std::string &property,
uint64_t *out) final;
int64_t estimate_prefix_size(const std::string& prefix,
const std::string& key_prefix) override;
struct RocksWBHandler;
class RocksDBTransactionImpl : public KeyValueDB::TransactionImpl {
public:
rocksdb::WriteBatch bat;
RocksDBStore *db;
explicit RocksDBTransactionImpl(RocksDBStore *_db);
private:
void put_bat(
rocksdb::WriteBatch& bat,
rocksdb::ColumnFamilyHandle *cf,
const std::string &k,
const ceph::bufferlist &to_set_bl);
public:
void set(
const std::string &prefix,
const std::string &k,
const ceph::bufferlist &bl) override;
void set(
const std::string &prefix,
const char *k,
size_t keylen,
const ceph::bufferlist &bl) override;
void rmkey(
const std::string &prefix,
const std::string &k) override;
void rmkey(
const std::string &prefix,
const char *k,
size_t keylen) override;
void rm_single_key(
const std::string &prefix,
const std::string &k) override;
void rmkeys_by_prefix(
const std::string &prefix
) override;
void rm_range_keys(
const std::string &prefix,
const std::string &start,
const std::string &end) override;
void merge(
const std::string& prefix,
const std::string& k,
const ceph::bufferlist &bl) override;
};
KeyValueDB::Transaction get_transaction() override {
return std::make_shared<RocksDBTransactionImpl>(this);
}
int submit_transaction(KeyValueDB::Transaction t) override;
int submit_transaction_sync(KeyValueDB::Transaction t) override;
int get(
const std::string &prefix,
const std::set<std::string> &key,
std::map<std::string, ceph::bufferlist> *out
) override;
int get(
const std::string &prefix,
const std::string &key,
ceph::bufferlist *out
) override;
int get(
const std::string &prefix,
const char *key,
size_t keylen,
ceph::bufferlist *out) override;
class RocksDBWholeSpaceIteratorImpl :
public KeyValueDB::WholeSpaceIteratorImpl {
protected:
rocksdb::Iterator *dbiter;
public:
explicit RocksDBWholeSpaceIteratorImpl(const RocksDBStore* db,
rocksdb::ColumnFamilyHandle* cf,
const KeyValueDB::IteratorOpts opts)
{
rocksdb::ReadOptions options = rocksdb::ReadOptions();
if (opts & ITERATOR_NOCACHE)
options.fill_cache=false;
dbiter = db->db->NewIterator(options, cf);
}
~RocksDBWholeSpaceIteratorImpl() override;
int seek_to_first() override;
int seek_to_first(const std::string &prefix) override;
int seek_to_last() override;
int seek_to_last(const std::string &prefix) override;
int upper_bound(const std::string &prefix, const std::string &after) override;
int lower_bound(const std::string &prefix, const std::string &to) override;
bool valid() override;
int next() override;
int prev() override;
std::string key() override;
std::pair<std::string,std::string> raw_key() override;
bool raw_key_is_prefixed(const std::string &prefix) override;
ceph::bufferlist value() override;
ceph::bufferptr value_as_ptr() override;
int status() override;
size_t key_size() override;
size_t value_size() override;
};
Iterator get_iterator(const std::string& prefix, IteratorOpts opts = 0, IteratorBounds = IteratorBounds()) override;
private:
/// this iterator spans single cf
WholeSpaceIterator new_shard_iterator(rocksdb::ColumnFamilyHandle* cf);
Iterator new_shard_iterator(rocksdb::ColumnFamilyHandle* cf,
const std::string& prefix, IteratorBounds bound);
public:
/// Utility
static std::string combine_strings(const std::string &prefix, const std::string &value) {
std::string out = prefix;
out.push_back(0);
out.append(value);
return out;
}
static void combine_strings(const std::string &prefix,
const char *key, size_t keylen,
std::string *out) {
out->reserve(prefix.size() + 1 + keylen);
*out = prefix;
out->push_back(0);
out->append(key, keylen);
}
static int split_key(rocksdb::Slice in, std::string *prefix, std::string *key);
static std::string past_prefix(const std::string &prefix);
class MergeOperatorRouter;
class MergeOperatorLinker;
friend class MergeOperatorRouter;
int set_merge_operator(
const std::string& prefix,
std::shared_ptr<KeyValueDB::MergeOperator> mop) override;
std::string assoc_name; ///< Name of associative operator
uint64_t get_estimated_size(std::map<std::string,uint64_t> &extra) override {
DIR *store_dir = opendir(path.c_str());
if (!store_dir) {
lderr(cct) << __func__ << " something happened opening the store: "
<< cpp_strerror(errno) << dendl;
return 0;
}
uint64_t total_size = 0;
uint64_t sst_size = 0;
uint64_t log_size = 0;
uint64_t misc_size = 0;
struct dirent *entry = NULL;
while ((entry = readdir(store_dir)) != NULL) {
std::string n(entry->d_name);
if (n == "." || n == "..")
continue;
std::string fpath = path + '/' + n;
struct stat s;
int err = stat(fpath.c_str(), &s);
if (err < 0)
err = -errno;
// we may race against rocksdb while reading files; this should only
// happen when those files are being updated, data is being shuffled
// and files get removed, in which case there's not much of a problem
// as we'll get to them next time around.
if (err == -ENOENT) {
continue;
}
if (err < 0) {
lderr(cct) << __func__ << " error obtaining stats for " << fpath
<< ": " << cpp_strerror(err) << dendl;
goto err;
}
size_t pos = n.find_last_of('.');
if (pos == std::string::npos) {
misc_size += s.st_size;
continue;
}
std::string ext = n.substr(pos+1);
if (ext == "sst") {
sst_size += s.st_size;
} else if (ext == "log") {
log_size += s.st_size;
} else {
misc_size += s.st_size;
}
}
total_size = sst_size + log_size + misc_size;
extra["sst"] = sst_size;
extra["log"] = log_size;
extra["misc"] = misc_size;
extra["total"] = total_size;
err:
closedir(store_dir);
return total_size;
}
virtual int64_t get_cache_usage() const override {
return static_cast<int64_t>(bbt_opts.block_cache->GetUsage());
}
virtual int64_t get_cache_usage(std::string prefix) const override {
auto it = cf_bbt_opts.find(prefix);
if (it != cf_bbt_opts.end() && it->second.block_cache) {
return static_cast<int64_t>(it->second.block_cache->GetUsage());
}
return -EINVAL;
}
int set_cache_size(uint64_t s) override {
cache_size = s;
set_cache_flag = true;
return 0;
}
virtual std::shared_ptr<PriorityCache::PriCache>
get_priority_cache() const override {
return std::dynamic_pointer_cast<PriorityCache::PriCache>(
bbt_opts.block_cache);
}
virtual std::shared_ptr<PriorityCache::PriCache>
get_priority_cache(std::string prefix) const override {
auto it = cf_bbt_opts.find(prefix);
if (it != cf_bbt_opts.end()) {
return std::dynamic_pointer_cast<PriorityCache::PriCache>(
it->second.block_cache);
}
return nullptr;
}
WholeSpaceIterator get_wholespace_iterator(IteratorOpts opts = 0) override;
private:
WholeSpaceIterator get_default_cf_iterator();
using cf_deleter_t = std::function<void(rocksdb::ColumnFamilyHandle*)>;
using columns_t = std::map<std::string,
std::unique_ptr<rocksdb::ColumnFamilyHandle,
cf_deleter_t>>;
int prepare_for_reshard(const std::string& new_sharding,
columns_t& to_process_columns);
int reshard_cleanup(const columns_t& current_columns);
public:
struct resharding_ctrl {
size_t bytes_per_iterator = 10000000; /// amount of data to process before refreshing iterator
size_t keys_per_iterator = 10000;
size_t bytes_per_batch = 1000000; /// amount of data before submitting batch
size_t keys_per_batch = 1000;
bool unittest_fail_after_first_batch = false;
bool unittest_fail_after_processing_column = false;
bool unittest_fail_after_successful_processing = false;
};
int reshard(const std::string& new_sharding, const resharding_ctrl* ctrl = nullptr);
bool get_sharding(std::string& sharding);
};
#endif
| 18,549 | 32.605072 | 133 |
h
|
null |
ceph-main/src/kv/rocksdb_cache/BinnedLRUCache.cc
|
// Copyright (c) 2018-Present Red Hat Inc. All rights reserved.
//
// Copyright (c) 2011-2018, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 and Apache 2.0 License
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include "BinnedLRUCache.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#define dout_context cct
#define dout_subsys ceph_subsys_rocksdb
#undef dout_prefix
#define dout_prefix *_dout << "rocksdb: "
namespace rocksdb_cache {
BinnedLRUHandleTable::BinnedLRUHandleTable() : list_(nullptr), length_(0), elems_(0) {
Resize();
}
BinnedLRUHandleTable::~BinnedLRUHandleTable() {
ApplyToAllCacheEntries([](BinnedLRUHandle* h) {
if (h->refs == 1) {
h->Free();
}
});
delete[] list_;
}
BinnedLRUHandle* BinnedLRUHandleTable::Lookup(const rocksdb::Slice& key, uint32_t hash) {
return *FindPointer(key, hash);
}
BinnedLRUHandle* BinnedLRUHandleTable::Insert(BinnedLRUHandle* h) {
BinnedLRUHandle** ptr = FindPointer(h->key(), h->hash);
BinnedLRUHandle* old = *ptr;
h->next_hash = (old == nullptr ? nullptr : old->next_hash);
*ptr = h;
if (old == nullptr) {
++elems_;
if (elems_ > length_) {
// Since each cache entry is fairly large, we aim for a small
// average linked list length (<= 1).
Resize();
}
}
return old;
}
BinnedLRUHandle* BinnedLRUHandleTable::Remove(const rocksdb::Slice& key, uint32_t hash) {
BinnedLRUHandle** ptr = FindPointer(key, hash);
BinnedLRUHandle* result = *ptr;
if (result != nullptr) {
*ptr = result->next_hash;
--elems_;
}
return result;
}
BinnedLRUHandle** BinnedLRUHandleTable::FindPointer(const rocksdb::Slice& key, uint32_t hash) {
BinnedLRUHandle** ptr = &list_[hash & (length_ - 1)];
while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
ptr = &(*ptr)->next_hash;
}
return ptr;
}
void BinnedLRUHandleTable::Resize() {
uint32_t new_length = 16;
while (new_length < elems_ * 1.5) {
new_length *= 2;
}
BinnedLRUHandle** new_list = new BinnedLRUHandle*[new_length];
memset(new_list, 0, sizeof(new_list[0]) * new_length);
uint32_t count = 0;
for (uint32_t i = 0; i < length_; i++) {
BinnedLRUHandle* h = list_[i];
while (h != nullptr) {
BinnedLRUHandle* next = h->next_hash;
uint32_t hash = h->hash;
BinnedLRUHandle** ptr = &new_list[hash & (new_length - 1)];
h->next_hash = *ptr;
*ptr = h;
h = next;
count++;
}
}
ceph_assert(elems_ == count);
delete[] list_;
list_ = new_list;
length_ = new_length;
}
BinnedLRUCacheShard::BinnedLRUCacheShard(CephContext *c, size_t capacity, bool strict_capacity_limit,
double high_pri_pool_ratio)
: cct(c),
capacity_(0),
high_pri_pool_usage_(0),
strict_capacity_limit_(strict_capacity_limit),
high_pri_pool_ratio_(high_pri_pool_ratio),
high_pri_pool_capacity_(0),
usage_(0),
lru_usage_(0),
age_bins(1) {
shift_bins();
// Make empty circular linked list
lru_.next = &lru_;
lru_.prev = &lru_;
lru_low_pri_ = &lru_;
SetCapacity(capacity);
}
BinnedLRUCacheShard::~BinnedLRUCacheShard() {}
bool BinnedLRUCacheShard::Unref(BinnedLRUHandle* e) {
ceph_assert(e->refs > 0);
e->refs--;
return e->refs == 0;
}
// Call deleter and free
void BinnedLRUCacheShard::EraseUnRefEntries() {
ceph::autovector<BinnedLRUHandle*> last_reference_list;
{
std::lock_guard<std::mutex> l(mutex_);
while (lru_.next != &lru_) {
BinnedLRUHandle* old = lru_.next;
ceph_assert(old->InCache());
ceph_assert(old->refs ==
1); // LRU list contains elements which may be evicted
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
old->SetInCache(false);
Unref(old);
usage_ -= old->charge;
last_reference_list.push_back(old);
}
}
for (auto entry : last_reference_list) {
entry->Free();
}
}
void BinnedLRUCacheShard::ApplyToAllCacheEntries(
const std::function<void(const rocksdb::Slice& key,
void* value,
size_t charge,
DeleterFn)>& callback,
bool thread_safe)
{
if (thread_safe) {
mutex_.lock();
}
table_.ApplyToAllCacheEntries(
[callback](BinnedLRUHandle* h) {
callback(h->key(), h->value, h->charge, h->deleter);
});
if (thread_safe) {
mutex_.unlock();
}
}
void BinnedLRUCacheShard::TEST_GetLRUList(BinnedLRUHandle** lru, BinnedLRUHandle** lru_low_pri) {
*lru = &lru_;
*lru_low_pri = lru_low_pri_;
}
size_t BinnedLRUCacheShard::TEST_GetLRUSize() {
BinnedLRUHandle* lru_handle = lru_.next;
size_t lru_size = 0;
while (lru_handle != &lru_) {
lru_size++;
lru_handle = lru_handle->next;
}
return lru_size;
}
double BinnedLRUCacheShard::GetHighPriPoolRatio() const {
std::lock_guard<std::mutex> l(mutex_);
return high_pri_pool_ratio_;
}
size_t BinnedLRUCacheShard::GetHighPriPoolUsage() const {
std::lock_guard<std::mutex> l(mutex_);
return high_pri_pool_usage_;
}
void BinnedLRUCacheShard::LRU_Remove(BinnedLRUHandle* e) {
ceph_assert(e->next != nullptr);
ceph_assert(e->prev != nullptr);
if (lru_low_pri_ == e) {
lru_low_pri_ = e->prev;
}
e->next->prev = e->prev;
e->prev->next = e->next;
e->prev = e->next = nullptr;
lru_usage_ -= e->charge;
if (e->InHighPriPool()) {
ceph_assert(high_pri_pool_usage_ >= e->charge);
high_pri_pool_usage_ -= e->charge;
} else {
ceph_assert(*(e->age_bin) >= e->charge);
*(e->age_bin) -= e->charge;
}
}
void BinnedLRUCacheShard::LRU_Insert(BinnedLRUHandle* e) {
ceph_assert(e->next == nullptr);
ceph_assert(e->prev == nullptr);
e->age_bin = age_bins.front();
if (high_pri_pool_ratio_ > 0 && e->IsHighPri()) {
// Inset "e" to head of LRU list.
e->next = &lru_;
e->prev = lru_.prev;
e->prev->next = e;
e->next->prev = e;
e->SetInHighPriPool(true);
high_pri_pool_usage_ += e->charge;
MaintainPoolSize();
} else {
// Insert "e" to the head of low-pri pool. Note that when
// high_pri_pool_ratio is 0, head of low-pri pool is also head of LRU list.
e->next = lru_low_pri_->next;
e->prev = lru_low_pri_;
e->prev->next = e;
e->next->prev = e;
e->SetInHighPriPool(false);
lru_low_pri_ = e;
*(e->age_bin) += e->charge;
}
lru_usage_ += e->charge;
}
uint64_t BinnedLRUCacheShard::sum_bins(uint32_t start, uint32_t end) const {
std::lock_guard<std::mutex> l(mutex_);
auto size = age_bins.size();
if (size < start) {
return 0;
}
uint64_t bytes = 0;
end = (size < end) ? size : end;
for (auto i = start; i < end; i++) {
bytes += *(age_bins[i]);
}
return bytes;
}
void BinnedLRUCacheShard::MaintainPoolSize() {
while (high_pri_pool_usage_ > high_pri_pool_capacity_) {
// Overflow last entry in high-pri pool to low-pri pool.
lru_low_pri_ = lru_low_pri_->next;
ceph_assert(lru_low_pri_ != &lru_);
lru_low_pri_->SetInHighPriPool(false);
high_pri_pool_usage_ -= lru_low_pri_->charge;
*(lru_low_pri_->age_bin) += lru_low_pri_->charge;
}
}
void BinnedLRUCacheShard::EvictFromLRU(size_t charge,
ceph::autovector<BinnedLRUHandle*>* deleted) {
while (usage_ + charge > capacity_ && lru_.next != &lru_) {
BinnedLRUHandle* old = lru_.next;
ceph_assert(old->InCache());
ceph_assert(old->refs == 1); // LRU list contains elements which may be evicted
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
old->SetInCache(false);
Unref(old);
usage_ -= old->charge;
deleted->push_back(old);
}
}
void BinnedLRUCacheShard::SetCapacity(size_t capacity) {
ceph::autovector<BinnedLRUHandle*> last_reference_list;
{
std::lock_guard<std::mutex> l(mutex_);
capacity_ = capacity;
high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
EvictFromLRU(0, &last_reference_list);
}
// we free the entries here outside of mutex for
// performance reasons
for (auto entry : last_reference_list) {
entry->Free();
}
}
void BinnedLRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
std::lock_guard<std::mutex> l(mutex_);
strict_capacity_limit_ = strict_capacity_limit;
}
rocksdb::Cache::Handle* BinnedLRUCacheShard::Lookup(const rocksdb::Slice& key, uint32_t hash) {
std::lock_guard<std::mutex> l(mutex_);
BinnedLRUHandle* e = table_.Lookup(key, hash);
if (e != nullptr) {
ceph_assert(e->InCache());
if (e->refs == 1) {
LRU_Remove(e);
}
e->refs++;
e->SetHit();
}
return reinterpret_cast<rocksdb::Cache::Handle*>(e);
}
bool BinnedLRUCacheShard::Ref(rocksdb::Cache::Handle* h) {
BinnedLRUHandle* handle = reinterpret_cast<BinnedLRUHandle*>(h);
std::lock_guard<std::mutex> l(mutex_);
if (handle->InCache() && handle->refs == 1) {
LRU_Remove(handle);
}
handle->refs++;
return true;
}
void BinnedLRUCacheShard::SetHighPriPoolRatio(double high_pri_pool_ratio) {
std::lock_guard<std::mutex> l(mutex_);
high_pri_pool_ratio_ = high_pri_pool_ratio;
high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
MaintainPoolSize();
}
bool BinnedLRUCacheShard::Release(rocksdb::Cache::Handle* handle, bool force_erase) {
if (handle == nullptr) {
return false;
}
BinnedLRUHandle* e = reinterpret_cast<BinnedLRUHandle*>(handle);
bool last_reference = false;
{
std::lock_guard<std::mutex> l(mutex_);
last_reference = Unref(e);
if (last_reference) {
usage_ -= e->charge;
}
if (e->refs == 1 && e->InCache()) {
// The item is still in cache, and nobody else holds a reference to it
if (usage_ > capacity_ || force_erase) {
// the cache is full
// The LRU list must be empty since the cache is full
ceph_assert(!(usage_ > capacity_) || lru_.next == &lru_);
// take this opportunity and remove the item
table_.Remove(e->key(), e->hash);
e->SetInCache(false);
Unref(e);
usage_ -= e->charge;
last_reference = true;
} else {
// put the item on the list to be potentially freed
LRU_Insert(e);
}
}
}
// free outside of mutex
if (last_reference) {
e->Free();
}
return last_reference;
}
rocksdb::Status BinnedLRUCacheShard::Insert(const rocksdb::Slice& key, uint32_t hash, void* value,
size_t charge,
DeleterFn deleter,
rocksdb::Cache::Handle** handle, rocksdb::Cache::Priority priority) {
auto e = new BinnedLRUHandle();
rocksdb::Status s;
ceph::autovector<BinnedLRUHandle*> last_reference_list;
e->value = value;
e->deleter = deleter;
e->charge = charge;
e->key_length = key.size();
e->key_data = new char[e->key_length];
e->flags = 0;
e->hash = hash;
e->refs = (handle == nullptr
? 1
: 2); // One from BinnedLRUCache, one for the returned handle
e->next = e->prev = nullptr;
e->SetInCache(true);
e->SetPriority(priority);
std::copy_n(key.data(), e->key_length, e->key_data);
{
std::lock_guard<std::mutex> l(mutex_);
// Free the space following strict LRU policy until enough space
// is freed or the lru list is empty
EvictFromLRU(charge, &last_reference_list);
if (usage_ - lru_usage_ + charge > capacity_ &&
(strict_capacity_limit_ || handle == nullptr)) {
if (handle == nullptr) {
// Don't insert the entry but still return ok, as if the entry inserted
// into cache and get evicted immediately.
last_reference_list.push_back(e);
} else {
delete e;
*handle = nullptr;
s = rocksdb::Status::Incomplete("Insert failed due to LRU cache being full.");
}
} else {
// insert into the cache
// note that the cache might get larger than its capacity if not enough
// space was freed
BinnedLRUHandle* old = table_.Insert(e);
usage_ += e->charge;
if (old != nullptr) {
old->SetInCache(false);
if (Unref(old)) {
usage_ -= old->charge;
// old is on LRU because it's in cache and its reference count
// was just 1 (Unref returned 0)
LRU_Remove(old);
last_reference_list.push_back(old);
}
}
if (handle == nullptr) {
LRU_Insert(e);
} else {
*handle = reinterpret_cast<rocksdb::Cache::Handle*>(e);
}
s = rocksdb::Status::OK();
}
}
// we free the entries here outside of mutex for
// performance reasons
for (auto entry : last_reference_list) {
entry->Free();
}
return s;
}
void BinnedLRUCacheShard::Erase(const rocksdb::Slice& key, uint32_t hash) {
BinnedLRUHandle* e;
bool last_reference = false;
{
std::lock_guard<std::mutex> l(mutex_);
e = table_.Remove(key, hash);
if (e != nullptr) {
last_reference = Unref(e);
if (last_reference) {
usage_ -= e->charge;
}
if (last_reference && e->InCache()) {
LRU_Remove(e);
}
e->SetInCache(false);
}
}
// mutex not held here
// last_reference will only be true if e != nullptr
if (last_reference) {
e->Free();
}
}
size_t BinnedLRUCacheShard::GetUsage() const {
std::lock_guard<std::mutex> l(mutex_);
return usage_;
}
size_t BinnedLRUCacheShard::GetPinnedUsage() const {
std::lock_guard<std::mutex> l(mutex_);
ceph_assert(usage_ >= lru_usage_);
return usage_ - lru_usage_;
}
void BinnedLRUCacheShard::shift_bins() {
std::lock_guard<std::mutex> l(mutex_);
age_bins.push_front(std::make_shared<uint64_t>(0));
}
uint32_t BinnedLRUCacheShard::get_bin_count() const {
std::lock_guard<std::mutex> l(mutex_);
return age_bins.capacity();
}
void BinnedLRUCacheShard::set_bin_count(uint32_t count) {
std::lock_guard<std::mutex> l(mutex_);
age_bins.set_capacity(count);
}
std::string BinnedLRUCacheShard::GetPrintableOptions() const {
const int kBufferSize = 200;
char buffer[kBufferSize];
{
std::lock_guard<std::mutex> l(mutex_);
snprintf(buffer, kBufferSize, " high_pri_pool_ratio: %.3lf\n",
high_pri_pool_ratio_);
}
return std::string(buffer);
}
DeleterFn BinnedLRUCacheShard::GetDeleter(rocksdb::Cache::Handle* h) const
{
auto* handle = reinterpret_cast<BinnedLRUHandle*>(h);
return handle->deleter;
}
BinnedLRUCache::BinnedLRUCache(CephContext *c,
size_t capacity,
int num_shard_bits,
bool strict_capacity_limit,
double high_pri_pool_ratio)
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit), cct(c) {
num_shards_ = 1 << num_shard_bits;
// TODO: Switch over to use mempool
int rc = posix_memalign((void**) &shards_,
CACHE_LINE_SIZE,
sizeof(BinnedLRUCacheShard) * num_shards_);
if (rc != 0) {
throw std::bad_alloc();
}
size_t per_shard = (capacity + (num_shards_ - 1)) / num_shards_;
for (int i = 0; i < num_shards_; i++) {
new (&shards_[i])
BinnedLRUCacheShard(c, per_shard, strict_capacity_limit, high_pri_pool_ratio);
}
}
BinnedLRUCache::~BinnedLRUCache() {
for (int i = 0; i < num_shards_; i++) {
shards_[i].~BinnedLRUCacheShard();
}
aligned_free(shards_);
}
CacheShard* BinnedLRUCache::GetShard(int shard) {
return reinterpret_cast<CacheShard*>(&shards_[shard]);
}
const CacheShard* BinnedLRUCache::GetShard(int shard) const {
return reinterpret_cast<CacheShard*>(&shards_[shard]);
}
void* BinnedLRUCache::Value(Handle* handle) {
return reinterpret_cast<const BinnedLRUHandle*>(handle)->value;
}
size_t BinnedLRUCache::GetCharge(Handle* handle) const {
return reinterpret_cast<const BinnedLRUHandle*>(handle)->charge;
}
uint32_t BinnedLRUCache::GetHash(Handle* handle) const {
return reinterpret_cast<const BinnedLRUHandle*>(handle)->hash;
}
void BinnedLRUCache::DisownData() {
// Do not drop data if compile with ASAN to suppress leak warning.
#ifndef __SANITIZE_ADDRESS__
shards_ = nullptr;
#endif // !__SANITIZE_ADDRESS__
}
#if (ROCKSDB_MAJOR >= 7 || (ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR >= 22))
DeleterFn BinnedLRUCache::GetDeleter(Handle* handle) const
{
return reinterpret_cast<const BinnedLRUHandle*>(handle)->deleter;
}
#endif
size_t BinnedLRUCache::TEST_GetLRUSize() {
size_t lru_size_of_all_shards = 0;
for (int i = 0; i < num_shards_; i++) {
lru_size_of_all_shards += shards_[i].TEST_GetLRUSize();
}
return lru_size_of_all_shards;
}
void BinnedLRUCache::SetHighPriPoolRatio(double high_pri_pool_ratio) {
for (int i = 0; i < num_shards_; i++) {
shards_[i].SetHighPriPoolRatio(high_pri_pool_ratio);
}
}
double BinnedLRUCache::GetHighPriPoolRatio() const {
double result = 0.0;
if (num_shards_ > 0) {
result = shards_[0].GetHighPriPoolRatio();
}
return result;
}
size_t BinnedLRUCache::GetHighPriPoolUsage() const {
// We will not lock the cache when getting the usage from shards.
size_t usage = 0;
for (int s = 0; s < num_shards_; s++) {
usage += shards_[s].GetHighPriPoolUsage();
}
return usage;
}
// PriCache
int64_t BinnedLRUCache::request_cache_bytes(PriorityCache::Priority pri, uint64_t total_cache) const
{
int64_t assigned = get_cache_bytes(pri);
int64_t request = 0;
switch(pri) {
// PRI0 is for rocksdb's high priority items (indexes/filters)
case PriorityCache::Priority::PRI0:
{
// Because we want the high pri cache to grow independently of the low
// pri cache, request a chunky allocation independent of the other
// priorities.
request = PriorityCache::get_chunk(GetHighPriPoolUsage(), total_cache);
break;
}
case PriorityCache::Priority::LAST:
{
auto max = get_bin_count();
request = GetUsage();
request -= GetHighPriPoolUsage();
request -= sum_bins(0, max);
break;
}
default:
{
ceph_assert(pri > 0 && pri < PriorityCache::Priority::LAST);
auto prev_pri = static_cast<PriorityCache::Priority>(pri - 1);
uint64_t start = get_bins(prev_pri);
uint64_t end = get_bins(pri);
request = sum_bins(start, end);
break;
}
}
request = (request > assigned) ? request - assigned : 0;
ldout(cct, 10) << __func__ << " Priority: " << static_cast<uint32_t>(pri)
<< " Request: " << request << dendl;
return request;
}
int64_t BinnedLRUCache::commit_cache_size(uint64_t total_bytes)
{
size_t old_bytes = GetCapacity();
int64_t new_bytes = PriorityCache::get_chunk(
get_cache_bytes(), total_bytes);
ldout(cct, 10) << __func__ << " old: " << old_bytes
<< " new: " << new_bytes << dendl;
SetCapacity((size_t) new_bytes);
double ratio = 0;
if (new_bytes > 0) {
int64_t pri0_bytes = get_cache_bytes(PriorityCache::Priority::PRI0);
ratio = (double) pri0_bytes / new_bytes;
}
ldout(cct, 5) << __func__ << " High Pri Pool Ratio set to " << ratio << dendl;
SetHighPriPoolRatio(ratio);
return new_bytes;
}
void BinnedLRUCache::shift_bins() {
for (int s = 0; s < num_shards_; s++) {
shards_[s].shift_bins();
}
}
uint64_t BinnedLRUCache::sum_bins(uint32_t start, uint32_t end) const {
uint64_t bytes = 0;
for (int s = 0; s < num_shards_; s++) {
bytes += shards_[s].sum_bins(start, end);
}
return bytes;
}
uint32_t BinnedLRUCache::get_bin_count() const {
uint32_t result = 0;
if (num_shards_ > 0) {
result = shards_[0].get_bin_count();
}
return result;
}
void BinnedLRUCache::set_bin_count(uint32_t count) {
for (int s = 0; s < num_shards_; s++) {
shards_[s].set_bin_count(count);
}
}
std::shared_ptr<rocksdb::Cache> NewBinnedLRUCache(
CephContext *c,
size_t capacity,
int num_shard_bits,
bool strict_capacity_limit,
double high_pri_pool_ratio) {
if (num_shard_bits >= 20) {
return nullptr; // the cache cannot be sharded into too many fine pieces
}
if (high_pri_pool_ratio < 0.0 || high_pri_pool_ratio > 1.0) {
// invalid high_pri_pool_ratio
return nullptr;
}
if (num_shard_bits < 0) {
num_shard_bits = GetDefaultCacheShardBits(capacity);
}
return std::make_shared<BinnedLRUCache>(
c, capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio);
}
} // namespace rocksdb_cache
| 20,729 | 27.791667 | 101 |
cc
|
null |
ceph-main/src/kv/rocksdb_cache/BinnedLRUCache.h
|
// Copyright (c) 2018-Present Red Hat Inc. All rights reserved.
//
// Copyright (c) 2011-2018, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 and Apache 2.0 License
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef ROCKSDB_BINNED_LRU_CACHE
#define ROCKSDB_BINNED_LRU_CACHE
#include <string>
#include <mutex>
#include <boost/circular_buffer.hpp>
#include "ShardedCache.h"
#include "common/autovector.h"
#include "common/dout.h"
#include "include/ceph_assert.h"
#include "common/ceph_context.h"
namespace rocksdb_cache {
// LRU cache implementation
// An entry is a variable length heap-allocated structure.
// Entries are referenced by cache and/or by any external entity.
// The cache keeps all its entries in table. Some elements
// are also stored on LRU list.
//
// BinnedLRUHandle can be in these states:
// 1. Referenced externally AND in hash table.
// In that case the entry is *not* in the LRU. (refs > 1 && in_cache == true)
// 2. Not referenced externally and in hash table. In that case the entry is
// in the LRU and can be freed. (refs == 1 && in_cache == true)
// 3. Referenced externally and not in hash table. In that case the entry is
// in not on LRU and not in table. (refs >= 1 && in_cache == false)
//
// All newly created BinnedLRUHandles are in state 1. If you call
// BinnedLRUCacheShard::Release
// on entry in state 1, it will go into state 2. To move from state 1 to
// state 3, either call BinnedLRUCacheShard::Erase or BinnedLRUCacheShard::Insert with the
// same key.
// To move from state 2 to state 1, use BinnedLRUCacheShard::Lookup.
// Before destruction, make sure that no handles are in state 1. This means
// that any successful BinnedLRUCacheShard::Lookup/BinnedLRUCacheShard::Insert have a
// matching
// RUCache::Release (to move into state 2) or BinnedLRUCacheShard::Erase (for state 3)
std::shared_ptr<rocksdb::Cache> NewBinnedLRUCache(
CephContext *c,
size_t capacity,
int num_shard_bits = -1,
bool strict_capacity_limit = false,
double high_pri_pool_ratio = 0.0);
struct BinnedLRUHandle {
std::shared_ptr<uint64_t> age_bin;
void* value;
DeleterFn deleter;
BinnedLRUHandle* next_hash;
BinnedLRUHandle* next;
BinnedLRUHandle* prev;
size_t charge; // TODO(opt): Only allow uint32_t?
size_t key_length;
uint32_t refs; // a number of refs to this entry
// cache itself is counted as 1
// Include the following flags:
// in_cache: whether this entry is referenced by the hash table.
// is_high_pri: whether this entry is high priority entry.
// in_high_pri_pool: whether this entry is in high-pri pool.
char flags;
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
char* key_data = nullptr; // Beginning of key
rocksdb::Slice key() const {
// For cheaper lookups, we allow a temporary Handle object
// to store a pointer to a key in "value".
if (next == this) {
return *(reinterpret_cast<rocksdb::Slice*>(value));
} else {
return rocksdb::Slice(key_data, key_length);
}
}
bool InCache() { return flags & 1; }
bool IsHighPri() { return flags & 2; }
bool InHighPriPool() { return flags & 4; }
bool HasHit() { return flags & 8; }
void SetInCache(bool in_cache) {
if (in_cache) {
flags |= 1;
} else {
flags &= ~1;
}
}
void SetPriority(rocksdb::Cache::Priority priority) {
if (priority == rocksdb::Cache::Priority::HIGH) {
flags |= 2;
} else {
flags &= ~2;
}
}
void SetInHighPriPool(bool in_high_pri_pool) {
if (in_high_pri_pool) {
flags |= 4;
} else {
flags &= ~4;
}
}
void SetHit() { flags |= 8; }
void Free() {
ceph_assert((refs == 1 && InCache()) || (refs == 0 && !InCache()));
if (deleter) {
(*deleter)(key(), value);
}
delete[] key_data;
delete this;
}
};
// We provide our own simple hash table since it removes a whole bunch
// of porting hacks and is also faster than some of the built-in hash
// table implementations in some of the compiler/runtime combinations
// we have tested. E.g., readrandom speeds up by ~5% over the g++
// 4.4.3's builtin hashtable.
class BinnedLRUHandleTable {
public:
BinnedLRUHandleTable();
~BinnedLRUHandleTable();
BinnedLRUHandle* Lookup(const rocksdb::Slice& key, uint32_t hash);
BinnedLRUHandle* Insert(BinnedLRUHandle* h);
BinnedLRUHandle* Remove(const rocksdb::Slice& key, uint32_t hash);
template <typename T>
void ApplyToAllCacheEntries(T func) {
for (uint32_t i = 0; i < length_; i++) {
BinnedLRUHandle* h = list_[i];
while (h != nullptr) {
auto n = h->next_hash;
ceph_assert(h->InCache());
func(h);
h = n;
}
}
}
private:
// Return a pointer to slot that points to a cache entry that
// matches key/hash. If there is no such cache entry, return a
// pointer to the trailing slot in the corresponding linked list.
BinnedLRUHandle** FindPointer(const rocksdb::Slice& key, uint32_t hash);
void Resize();
// The table consists of an array of buckets where each bucket is
// a linked list of cache entries that hash into the bucket.
BinnedLRUHandle** list_;
uint32_t length_;
uint32_t elems_;
};
// A single shard of sharded cache.
class alignas(CACHE_LINE_SIZE) BinnedLRUCacheShard : public CacheShard {
public:
BinnedLRUCacheShard(CephContext *c, size_t capacity, bool strict_capacity_limit,
double high_pri_pool_ratio);
virtual ~BinnedLRUCacheShard();
// Separate from constructor so caller can easily make an array of BinnedLRUCache
// if current usage is more than new capacity, the function will attempt to
// free the needed space
virtual void SetCapacity(size_t capacity) override;
// Set the flag to reject insertion if cache if full.
virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override;
// Set percentage of capacity reserved for high-pri cache entries.
void SetHighPriPoolRatio(double high_pri_pool_ratio);
// Like Cache methods, but with an extra "hash" parameter.
virtual rocksdb::Status Insert(const rocksdb::Slice& key, uint32_t hash, void* value,
size_t charge,
DeleterFn deleter,
rocksdb::Cache::Handle** handle,
rocksdb::Cache::Priority priority) override;
virtual rocksdb::Cache::Handle* Lookup(const rocksdb::Slice& key, uint32_t hash) override;
virtual bool Ref(rocksdb::Cache::Handle* handle) override;
virtual bool Release(rocksdb::Cache::Handle* handle,
bool force_erase = false) override;
virtual void Erase(const rocksdb::Slice& key, uint32_t hash) override;
// Although in some platforms the update of size_t is atomic, to make sure
// GetUsage() and GetPinnedUsage() work correctly under any platform, we'll
// protect them with mutex_.
virtual size_t GetUsage() const override;
virtual size_t GetPinnedUsage() const override;
virtual void ApplyToAllCacheEntries(
const std::function<void(const rocksdb::Slice& key,
void* value,
size_t charge,
DeleterFn)>& callback,
bool thread_safe) override;
virtual void EraseUnRefEntries() override;
virtual std::string GetPrintableOptions() const override;
virtual DeleterFn GetDeleter(rocksdb::Cache::Handle* handle) const override;
void TEST_GetLRUList(BinnedLRUHandle** lru, BinnedLRUHandle** lru_low_pri);
// Retrieves number of elements in LRU, for unit test purpose only
// not threadsafe
size_t TEST_GetLRUSize();
// Retrieves high pri pool ratio
double GetHighPriPoolRatio() const;
// Retrieves high pri pool usage
size_t GetHighPriPoolUsage() const;
// Rotate the bins
void shift_bins();
// Get the bin count
uint32_t get_bin_count() const;
// Set the bin count
void set_bin_count(uint32_t count);
// Get the byte counts for a range of age bins
uint64_t sum_bins(uint32_t start, uint32_t end) const;
private:
CephContext *cct;
void LRU_Remove(BinnedLRUHandle* e);
void LRU_Insert(BinnedLRUHandle* e);
// Overflow the last entry in high-pri pool to low-pri pool until size of
// high-pri pool is no larger than the size specify by high_pri_pool_pct.
void MaintainPoolSize();
// Just reduce the reference count by 1.
// Return true if last reference
bool Unref(BinnedLRUHandle* e);
// Free some space following strict LRU policy until enough space
// to hold (usage_ + charge) is freed or the lru list is empty
// This function is not thread safe - it needs to be executed while
// holding the mutex_
void EvictFromLRU(size_t charge, ceph::autovector<BinnedLRUHandle*>* deleted);
// Initialized before use.
size_t capacity_;
// Memory size for entries in high-pri pool.
size_t high_pri_pool_usage_;
// Whether to reject insertion if cache reaches its full capacity.
bool strict_capacity_limit_;
// Ratio of capacity reserved for high priority cache entries.
double high_pri_pool_ratio_;
// High-pri pool size, equals to capacity * high_pri_pool_ratio.
// Remember the value to avoid recomputing each time.
double high_pri_pool_capacity_;
// Dummy head of LRU list.
// lru.prev is newest entry, lru.next is oldest entry.
// LRU contains items which can be evicted, ie reference only by cache
BinnedLRUHandle lru_;
// Pointer to head of low-pri pool in LRU list.
BinnedLRUHandle* lru_low_pri_;
// ------------^^^^^^^^^^^^^-----------
// Not frequently modified data members
// ------------------------------------
//
// We separate data members that are updated frequently from the ones that
// are not frequently updated so that they don't share the same cache line
// which will lead into false cache sharing
//
// ------------------------------------
// Frequently modified data members
// ------------vvvvvvvvvvvvv-----------
BinnedLRUHandleTable table_;
// Memory size for entries residing in the cache
size_t usage_;
// Memory size for entries residing only in the LRU list
size_t lru_usage_;
// mutex_ protects the following state.
// We don't count mutex_ as the cache's internal state so semantically we
// don't mind mutex_ invoking the non-const actions.
mutable std::mutex mutex_;
// Circular buffer of byte counters for age binning
boost::circular_buffer<std::shared_ptr<uint64_t>> age_bins;
};
class BinnedLRUCache : public ShardedCache {
public:
BinnedLRUCache(CephContext *c, size_t capacity, int num_shard_bits,
bool strict_capacity_limit, double high_pri_pool_ratio);
virtual ~BinnedLRUCache();
virtual const char* Name() const override { return "BinnedLRUCache"; }
virtual CacheShard* GetShard(int shard) override;
virtual const CacheShard* GetShard(int shard) const override;
virtual void* Value(Handle* handle) override;
virtual size_t GetCharge(Handle* handle) const override;
virtual uint32_t GetHash(Handle* handle) const override;
virtual void DisownData() override;
#if (ROCKSDB_MAJOR >= 7 || (ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR >= 22))
virtual DeleterFn GetDeleter(Handle* handle) const override;
#endif
// Retrieves number of elements in LRU, for unit test purpose only
size_t TEST_GetLRUSize();
// Sets the high pri pool ratio
void SetHighPriPoolRatio(double high_pri_pool_ratio);
// Retrieves high pri pool ratio
double GetHighPriPoolRatio() const;
// Retrieves high pri pool usage
size_t GetHighPriPoolUsage() const;
// PriorityCache
virtual int64_t request_cache_bytes(
PriorityCache::Priority pri, uint64_t total_cache) const;
virtual int64_t commit_cache_size(uint64_t total_cache);
virtual int64_t get_committed_size() const {
return GetCapacity();
}
virtual void shift_bins();
uint64_t sum_bins(uint32_t start, uint32_t end) const;
uint32_t get_bin_count() const;
void set_bin_count(uint32_t count);
virtual std::string get_cache_name() const {
return "RocksDB Binned LRU Cache";
}
private:
CephContext *cct;
BinnedLRUCacheShard* shards_;
int num_shards_ = 0;
};
} // namespace rocksdb_cache
#endif // ROCKSDB_BINNED_LRU_CACHE
| 12,504 | 33.073569 | 92 |
h
|
null |
ceph-main/src/kv/rocksdb_cache/ShardedCache.cc
|
// Copyright (c) 2018-Present Red Hat Inc. All rights reserved.
//
// Copyright (c) 2011-2018, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 and Apache 2.0 License
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include "ShardedCache.h"
#include <string>
namespace rocksdb_cache {
ShardedCache::ShardedCache(size_t capacity, int num_shard_bits,
bool strict_capacity_limit)
: num_shard_bits_(num_shard_bits),
capacity_(capacity),
strict_capacity_limit_(strict_capacity_limit),
last_id_(1) {}
void ShardedCache::SetCapacity(size_t capacity) {
int num_shards = 1 << num_shard_bits_;
const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
std::lock_guard<std::mutex> l(capacity_mutex_);
for (int s = 0; s < num_shards; s++) {
GetShard(s)->SetCapacity(per_shard);
}
capacity_ = capacity;
}
void ShardedCache::SetStrictCapacityLimit(bool strict_capacity_limit) {
int num_shards = 1 << num_shard_bits_;
std::lock_guard<std::mutex> l(capacity_mutex_);
for (int s = 0; s < num_shards; s++) {
GetShard(s)->SetStrictCapacityLimit(strict_capacity_limit);
}
strict_capacity_limit_ = strict_capacity_limit;
}
rocksdb::Status ShardedCache::Insert(const rocksdb::Slice& key, void* value, size_t charge,
DeleterFn deleter,
rocksdb::Cache::Handle** handle, Priority priority) {
uint32_t hash = HashSlice(key);
return GetShard(Shard(hash))
->Insert(key, hash, value, charge, deleter, handle, priority);
}
rocksdb::Cache::Handle* ShardedCache::Lookup(const rocksdb::Slice& key, rocksdb::Statistics* /*stats*/) {
uint32_t hash = HashSlice(key);
return GetShard(Shard(hash))->Lookup(key, hash);
}
bool ShardedCache::Ref(rocksdb::Cache::Handle* handle) {
uint32_t hash = GetHash(handle);
return GetShard(Shard(hash))->Ref(handle);
}
bool ShardedCache::Release(rocksdb::Cache::Handle* handle, bool force_erase) {
uint32_t hash = GetHash(handle);
return GetShard(Shard(hash))->Release(handle, force_erase);
}
void ShardedCache::Erase(const rocksdb::Slice& key) {
uint32_t hash = HashSlice(key);
GetShard(Shard(hash))->Erase(key, hash);
}
uint64_t ShardedCache::NewId() {
return last_id_.fetch_add(1, std::memory_order_relaxed);
}
size_t ShardedCache::GetCapacity() const {
std::lock_guard<std::mutex> l(capacity_mutex_);
return capacity_;
}
bool ShardedCache::HasStrictCapacityLimit() const {
std::lock_guard<std::mutex> l(capacity_mutex_);
return strict_capacity_limit_;
}
size_t ShardedCache::GetUsage() const {
// We will not lock the cache when getting the usage from shards.
int num_shards = 1 << num_shard_bits_;
size_t usage = 0;
for (int s = 0; s < num_shards; s++) {
usage += GetShard(s)->GetUsage();
}
return usage;
}
size_t ShardedCache::GetUsage(rocksdb::Cache::Handle* handle) const {
return GetCharge(handle);
}
size_t ShardedCache::GetPinnedUsage() const {
// We will not lock the cache when getting the usage from shards.
int num_shards = 1 << num_shard_bits_;
size_t usage = 0;
for (int s = 0; s < num_shards; s++) {
usage += GetShard(s)->GetPinnedUsage();
}
return usage;
}
#if (ROCKSDB_MAJOR >= 7 || (ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR >= 22))
DeleterFn ShardedCache::GetDeleter(Handle* handle) const
{
uint32_t hash = GetHash(handle);
return GetShard(Shard(hash))->GetDeleter(handle);
}
void ShardedCache::ApplyToAllEntries(
const std::function<void(const rocksdb::Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
const ApplyToAllEntriesOptions& opts)
{
int num_shards = 1 << num_shard_bits_;
for (int s = 0; s < num_shards; s++) {
GetShard(s)->ApplyToAllCacheEntries(callback, true /* thread_safe */);
}
}
#else
void ShardedCache::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
bool thread_safe) {
int num_shards = 1 << num_shard_bits_;
for (int s = 0; s < num_shards; s++) {
GetShard(s)->ApplyToAllCacheEntries(
[callback](const rocksdb::Slice&, void* value, size_t charge, DeleterFn) {
callback(value, charge);
},
thread_safe);
}
}
#endif
void ShardedCache::EraseUnRefEntries() {
int num_shards = 1 << num_shard_bits_;
for (int s = 0; s < num_shards; s++) {
GetShard(s)->EraseUnRefEntries();
}
}
std::string ShardedCache::GetPrintableOptions() const {
std::string ret;
ret.reserve(20000);
const int kBufferSize = 200;
char buffer[kBufferSize];
{
std::lock_guard<std::mutex> l(capacity_mutex_);
snprintf(buffer, kBufferSize, " capacity : %zu\n",
capacity_);
ret.append(buffer);
snprintf(buffer, kBufferSize, " num_shard_bits : %d\n", num_shard_bits_);
ret.append(buffer);
snprintf(buffer, kBufferSize, " strict_capacity_limit : %d\n",
strict_capacity_limit_);
ret.append(buffer);
}
ret.append(GetShard(0)->GetPrintableOptions());
return ret;
}
int GetDefaultCacheShardBits(size_t capacity) {
int num_shard_bits = 0;
size_t min_shard_size = 512L * 1024L; // Every shard is at least 512KB.
size_t num_shards = capacity / min_shard_size;
while (num_shards >>= 1) {
if (++num_shard_bits >= 6) {
// No more than 6.
return num_shard_bits;
}
}
return num_shard_bits;
}
} // namespace rocksdb_cache
| 5,696 | 30.131148 | 105 |
cc
|
null |
ceph-main/src/kv/rocksdb_cache/ShardedCache.h
|
// Copyright (c) 2018-Present Red Hat Inc. All rights reserved.
//
// Copyright (c) 2011-2018, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 and Apache 2.0 License
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef ROCKSDB_SHARDED_CACHE
#define ROCKSDB_SHARDED_CACHE
#include <atomic>
#include <string>
#include <mutex>
#include "rocksdb/version.h"
#include "rocksdb/cache.h"
#include "include/ceph_hash.h"
#include "common/PriorityCache.h"
//#include "hash.h"
#ifndef CACHE_LINE_SIZE
#define CACHE_LINE_SIZE 64 // XXX arch-specific define
#endif
namespace rocksdb_cache {
using DeleterFn = void (*)(const rocksdb::Slice& key, void* value);
// Single cache shard interface.
class CacheShard {
public:
CacheShard() = default;
virtual ~CacheShard() = default;
virtual rocksdb::Status Insert(const rocksdb::Slice& key, uint32_t hash, void* value,
size_t charge,
DeleterFn deleter,
rocksdb::Cache::Handle** handle, rocksdb::Cache::Priority priority) = 0;
virtual rocksdb::Cache::Handle* Lookup(const rocksdb::Slice& key, uint32_t hash) = 0;
virtual bool Ref(rocksdb::Cache::Handle* handle) = 0;
virtual bool Release(rocksdb::Cache::Handle* handle, bool force_erase = false) = 0;
virtual void Erase(const rocksdb::Slice& key, uint32_t hash) = 0;
virtual void SetCapacity(size_t capacity) = 0;
virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0;
virtual size_t GetUsage() const = 0;
virtual size_t GetPinnedUsage() const = 0;
virtual void ApplyToAllCacheEntries(
const std::function<void(const rocksdb::Slice& key,
void* value,
size_t charge,
DeleterFn)>& callback,
bool thread_safe) = 0;
virtual void EraseUnRefEntries() = 0;
virtual std::string GetPrintableOptions() const { return ""; }
virtual DeleterFn GetDeleter(rocksdb::Cache::Handle* handle) const = 0;
};
// Generic cache interface which shards cache by hash of keys. 2^num_shard_bits
// shards will be created, with capacity split evenly to each of the shards.
// Keys are sharded by the highest num_shard_bits bits of hash value.
class ShardedCache : public rocksdb::Cache, public PriorityCache::PriCache {
public:
ShardedCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit);
virtual ~ShardedCache() = default;
// rocksdb::Cache
virtual const char* Name() const override = 0;
virtual rocksdb::Status Insert(const rocksdb::Slice& key, void* value, size_t charge,
DeleterFn,
rocksdb::Cache::Handle** handle, Priority priority) override;
virtual rocksdb::Cache::Handle* Lookup(const rocksdb::Slice& key, rocksdb::Statistics* stats) override;
virtual bool Ref(rocksdb::Cache::Handle* handle) override;
virtual bool Release(rocksdb::Cache::Handle* handle, bool force_erase = false) override;
virtual void* Value(Handle* handle) override = 0;
virtual void Erase(const rocksdb::Slice& key) override;
virtual uint64_t NewId() override;
virtual void SetCapacity(size_t capacity) override;
virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override;
virtual bool HasStrictCapacityLimit() const override;
virtual size_t GetCapacity() const override;
virtual size_t GetUsage() const override;
virtual size_t GetUsage(rocksdb::Cache::Handle* handle) const override;
virtual size_t GetPinnedUsage() const override;
virtual size_t GetCharge(Handle* handle) const = 0;
#if (ROCKSDB_MAJOR >= 7 || (ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR >= 22))
virtual DeleterFn GetDeleter(Handle* handle) const override;
#endif
virtual void DisownData() override = 0;
#if (ROCKSDB_MAJOR >= 7 || (ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR >= 22))
virtual void ApplyToAllEntries(
const std::function<void(const rocksdb::Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
const ApplyToAllEntriesOptions& opts) override;
#else
virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
bool thread_safe) override;
#endif
virtual void EraseUnRefEntries() override;
virtual std::string GetPrintableOptions() const override;
virtual CacheShard* GetShard(int shard) = 0;
virtual const CacheShard* GetShard(int shard) const = 0;
virtual uint32_t GetHash(Handle* handle) const = 0;
int GetNumShardBits() const { return num_shard_bits_; }
virtual uint32_t get_bin_count() const = 0;
virtual void set_bin_count(uint32_t count) = 0;
// PriCache
virtual int64_t get_cache_bytes(PriorityCache::Priority pri) const {
return cache_bytes[pri];
}
virtual int64_t get_cache_bytes() const {
int64_t total = 0;
for (int i = 0; i < PriorityCache::Priority::LAST + 1; i++) {
PriorityCache::Priority pri = static_cast<PriorityCache::Priority>(i);
total += get_cache_bytes(pri);
}
return total;
}
virtual void set_cache_bytes(PriorityCache::Priority pri, int64_t bytes) {
cache_bytes[pri] = bytes;
}
virtual void add_cache_bytes(PriorityCache::Priority pri, int64_t bytes) {
cache_bytes[pri] += bytes;
}
virtual double get_cache_ratio() const {
return cache_ratio;
}
virtual void set_cache_ratio(double ratio) {
cache_ratio = ratio;
}
virtual uint64_t get_bins(PriorityCache::Priority pri) const {
if (pri > PriorityCache::Priority::PRI0 &&
pri < PriorityCache::Priority::LAST) {
return bins[pri];
}
return 0;
}
virtual void set_bins(PriorityCache::Priority pri, uint64_t end_bin) {
if (pri <= PriorityCache::Priority::PRI0 ||
pri >= PriorityCache::Priority::LAST) {
return;
}
bins[pri] = end_bin;
uint64_t max = 0;
for (int pri = 1; pri < PriorityCache::Priority::LAST; pri++) {
if (bins[pri] > max) {
max = bins[pri];
}
}
set_bin_count(max);
}
virtual void import_bins(const std::vector<uint64_t> &bins_v) {
uint64_t max = 0;
for (int pri = 1; pri < PriorityCache::Priority::LAST; pri++) {
unsigned i = (unsigned) pri - 1;
if (i < bins_v.size()) {
bins[pri] = bins_v[i];
if (bins[pri] > max) {
max = bins[pri];
}
} else {
bins[pri] = 0;
}
}
set_bin_count(max);
}
virtual std::string get_cache_name() const = 0;
private:
static inline uint32_t HashSlice(const rocksdb::Slice& s) {
return ceph_str_hash(CEPH_STR_HASH_RJENKINS, s.data(), s.size());
// return Hash(s.data(), s.size(), 0);
}
uint32_t Shard(uint32_t hash) const {
// Note, hash >> 32 yields hash in gcc, not the zero we expect!
return (num_shard_bits_ > 0) ? (hash >> (32 - num_shard_bits_)) : 0;
}
uint64_t bins[PriorityCache::Priority::LAST+1] = {0};
int64_t cache_bytes[PriorityCache::Priority::LAST+1] = {0};
double cache_ratio = 0;
int num_shard_bits_;
mutable std::mutex capacity_mutex_;
size_t capacity_;
bool strict_capacity_limit_;
std::atomic<uint64_t> last_id_;
};
extern int GetDefaultCacheShardBits(size_t capacity);
} // namespace rocksdb_cache
#endif // ROCKSDB_SHARDED_CACHE
| 7,499 | 36.878788 | 105 |
h
|
null |
ceph-main/src/librados/AioCompletionImpl.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOS_AIOCOMPLETIONIMPL_H
#define CEPH_LIBRADOS_AIOCOMPLETIONIMPL_H
#include "common/ceph_mutex.h"
#include "include/buffer.h"
#include "include/xlist.h"
#include "osd/osd_types.h"
class IoCtxImpl;
struct librados::AioCompletionImpl {
ceph::mutex lock = ceph::make_mutex("AioCompletionImpl lock", false);
ceph::condition_variable cond;
int ref = 1, rval = 0;
bool released = false;
bool complete = false;
version_t objver = 0;
ceph_tid_t tid = 0;
rados_callback_t callback_complete = nullptr, callback_safe = nullptr;
void *callback_complete_arg = nullptr, *callback_safe_arg = nullptr;
// for read
bool is_read = false;
bufferlist bl;
bufferlist *blp = nullptr;
char *out_buf = nullptr;
IoCtxImpl *io = nullptr;
ceph_tid_t aio_write_seq = 0;
xlist<AioCompletionImpl*>::item aio_write_list_item;
AioCompletionImpl() : aio_write_list_item(this) { }
int set_complete_callback(void *cb_arg, rados_callback_t cb) {
std::scoped_lock l{lock};
callback_complete = cb;
callback_complete_arg = cb_arg;
return 0;
}
int set_safe_callback(void *cb_arg, rados_callback_t cb) {
std::scoped_lock l{lock};
callback_safe = cb;
callback_safe_arg = cb_arg;
return 0;
}
int wait_for_complete() {
std::unique_lock l{lock};
cond.wait(l, [this] { return complete; });
return 0;
}
int wait_for_safe() {
return wait_for_complete();
}
int is_complete() {
std::scoped_lock l{lock};
return complete;
}
int is_safe() {
return is_complete();
}
int wait_for_complete_and_cb() {
std::unique_lock l{lock};
cond.wait(l, [this] { return complete && !callback_complete && !callback_safe; });
return 0;
}
int wait_for_safe_and_cb() {
return wait_for_complete_and_cb();
}
int is_complete_and_cb() {
std::scoped_lock l{lock};
return complete && !callback_complete && !callback_safe;
}
int is_safe_and_cb() {
return is_complete_and_cb();
}
int get_return_value() {
std::scoped_lock l{lock};
return rval;
}
uint64_t get_version() {
std::scoped_lock l{lock};
return objver;
}
void get() {
std::scoped_lock l{lock};
_get();
}
void _get() {
ceph_assert(ceph_mutex_is_locked(lock));
ceph_assert(ref > 0);
++ref;
}
void release() {
lock.lock();
ceph_assert(!released);
released = true;
put_unlock();
}
void put() {
lock.lock();
put_unlock();
}
void put_unlock() {
ceph_assert(ref > 0);
int n = --ref;
lock.unlock();
if (!n)
delete this;
}
};
namespace librados {
struct CB_AioComplete {
AioCompletionImpl *c;
explicit CB_AioComplete(AioCompletionImpl *cc) : c(cc) {
c->_get();
}
void operator()() {
rados_callback_t cb_complete = c->callback_complete;
void *cb_complete_arg = c->callback_complete_arg;
if (cb_complete)
cb_complete(c, cb_complete_arg);
rados_callback_t cb_safe = c->callback_safe;
void *cb_safe_arg = c->callback_safe_arg;
if (cb_safe)
cb_safe(c, cb_safe_arg);
c->lock.lock();
c->callback_complete = NULL;
c->callback_safe = NULL;
c->cond.notify_all();
c->put_unlock();
}
};
/**
* Fills in all completed request data, and calls both
* complete and safe callbacks if they exist.
*
* Not useful for usual I/O, but for special things like
* flush where we only want to wait for things to be safe,
* but allow users to specify any of the callbacks.
*/
struct CB_AioCompleteAndSafe {
AioCompletionImpl *c;
explicit CB_AioCompleteAndSafe(AioCompletionImpl *cc) : c(cc) {
c->get();
}
CB_AioCompleteAndSafe(const CB_AioCompleteAndSafe&) = delete;
CB_AioCompleteAndSafe& operator =(const CB_AioCompleteAndSafe&) = delete;
CB_AioCompleteAndSafe(CB_AioCompleteAndSafe&& rhs) {
c = rhs.c;
rhs.c = nullptr;
}
CB_AioCompleteAndSafe& operator =(CB_AioCompleteAndSafe&& rhs) {
c = rhs.c;
rhs.c = nullptr;
return *this;
}
void operator()(int r = 0) {
c->lock.lock();
c->rval = r;
c->complete = true;
c->lock.unlock();
rados_callback_t cb_complete = c->callback_complete;
void *cb_complete_arg = c->callback_complete_arg;
if (cb_complete)
cb_complete(c, cb_complete_arg);
rados_callback_t cb_safe = c->callback_safe;
void *cb_safe_arg = c->callback_safe_arg;
if (cb_safe)
cb_safe(c, cb_safe_arg);
c->lock.lock();
c->callback_complete = NULL;
c->callback_safe = NULL;
c->cond.notify_all();
c->put_unlock();
}
};
}
#endif
| 5,013 | 22.990431 | 86 |
h
|
null |
ceph-main/src/librados/IoCtxImpl.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <limits.h>
#include "IoCtxImpl.h"
#include "librados/librados_c.h"
#include "librados/AioCompletionImpl.h"
#include "librados/PoolAsyncCompletionImpl.h"
#include "librados/RadosClient.h"
#include "include/ceph_assert.h"
#include "common/valgrind.h"
#include "common/EventTrace.h"
#define dout_subsys ceph_subsys_rados
#undef dout_prefix
#define dout_prefix *_dout << "librados: "
using std::string;
using std::map;
using std::unique_lock;
using std::vector;
namespace bs = boost::system;
namespace ca = ceph::async;
namespace cb = ceph::buffer;
namespace librados {
namespace {
struct CB_notify_Finish {
CephContext *cct;
Context *ctx;
Objecter *objecter;
Objecter::LingerOp *linger_op;
bufferlist *preply_bl;
char **preply_buf;
size_t *preply_buf_len;
CB_notify_Finish(CephContext *_cct, Context *_ctx, Objecter *_objecter,
Objecter::LingerOp *_linger_op, bufferlist *_preply_bl,
char **_preply_buf, size_t *_preply_buf_len)
: cct(_cct), ctx(_ctx), objecter(_objecter), linger_op(_linger_op),
preply_bl(_preply_bl), preply_buf(_preply_buf),
preply_buf_len(_preply_buf_len) {}
// move-only
CB_notify_Finish(const CB_notify_Finish&) = delete;
CB_notify_Finish& operator =(const CB_notify_Finish&) = delete;
CB_notify_Finish(CB_notify_Finish&&) = default;
CB_notify_Finish& operator =(CB_notify_Finish&&) = default;
void operator()(bs::error_code ec, bufferlist&& reply_bl) {
ldout(cct, 10) << __func__ << " completed notify (linger op "
<< linger_op << "), ec = " << ec << dendl;
// pass result back to user
// NOTE: we do this regardless of what error code we return
if (preply_buf) {
if (reply_bl.length()) {
*preply_buf = (char*)malloc(reply_bl.length());
memcpy(*preply_buf, reply_bl.c_str(), reply_bl.length());
} else {
*preply_buf = NULL;
}
}
if (preply_buf_len)
*preply_buf_len = reply_bl.length();
if (preply_bl)
*preply_bl = std::move(reply_bl);
ctx->complete(ceph::from_error_code(ec));
}
};
struct CB_aio_linger_cancel {
Objecter *objecter;
Objecter::LingerOp *linger_op;
CB_aio_linger_cancel(Objecter *_objecter, Objecter::LingerOp *_linger_op)
: objecter(_objecter), linger_op(_linger_op)
{
}
void operator()() {
objecter->linger_cancel(linger_op);
}
};
struct C_aio_linger_Complete : public Context {
AioCompletionImpl *c;
Objecter::LingerOp *linger_op;
bool cancel;
C_aio_linger_Complete(AioCompletionImpl *_c, Objecter::LingerOp *_linger_op, bool _cancel)
: c(_c), linger_op(_linger_op), cancel(_cancel)
{
c->get();
}
void finish(int r) override {
if (cancel || r < 0)
boost::asio::defer(c->io->client->finish_strand,
CB_aio_linger_cancel(c->io->objecter,
linger_op));
c->lock.lock();
c->rval = r;
c->complete = true;
c->cond.notify_all();
if (c->callback_complete ||
c->callback_safe) {
boost::asio::defer(c->io->client->finish_strand, CB_AioComplete(c));
}
c->put_unlock();
}
};
struct C_aio_notify_Complete : public C_aio_linger_Complete {
ceph::mutex lock = ceph::make_mutex("C_aio_notify_Complete::lock");
bool acked = false;
bool finished = false;
int ret_val = 0;
C_aio_notify_Complete(AioCompletionImpl *_c, Objecter::LingerOp *_linger_op)
: C_aio_linger_Complete(_c, _linger_op, false) {
}
void handle_ack(int r) {
// invoked by C_aio_notify_Ack
lock.lock();
acked = true;
complete_unlock(r);
}
void complete(int r) override {
// invoked by C_notify_Finish
lock.lock();
finished = true;
complete_unlock(r);
}
void complete_unlock(int r) {
if (ret_val == 0 && r < 0) {
ret_val = r;
}
if (acked && finished) {
lock.unlock();
cancel = true;
C_aio_linger_Complete::complete(ret_val);
} else {
lock.unlock();
}
}
};
struct C_aio_notify_Ack : public Context {
CephContext *cct;
C_aio_notify_Complete *oncomplete;
C_aio_notify_Ack(CephContext *_cct,
C_aio_notify_Complete *_oncomplete)
: cct(_cct), oncomplete(_oncomplete)
{
}
void finish(int r) override
{
ldout(cct, 10) << __func__ << " linger op " << oncomplete->linger_op << " "
<< "acked (" << r << ")" << dendl;
oncomplete->handle_ack(r);
}
};
struct C_aio_selfmanaged_snap_op_Complete : public Context {
librados::RadosClient *client;
librados::AioCompletionImpl *c;
C_aio_selfmanaged_snap_op_Complete(librados::RadosClient *client,
librados::AioCompletionImpl *c)
: client(client), c(c) {
c->get();
}
void finish(int r) override {
c->lock.lock();
c->rval = r;
c->complete = true;
c->cond.notify_all();
if (c->callback_complete || c->callback_safe) {
boost::asio::defer(client->finish_strand, librados::CB_AioComplete(c));
}
c->put_unlock();
}
};
struct C_aio_selfmanaged_snap_create_Complete : public C_aio_selfmanaged_snap_op_Complete {
snapid_t snapid;
uint64_t *dest_snapid;
C_aio_selfmanaged_snap_create_Complete(librados::RadosClient *client,
librados::AioCompletionImpl *c,
uint64_t *dest_snapid)
: C_aio_selfmanaged_snap_op_Complete(client, c),
dest_snapid(dest_snapid) {
}
void finish(int r) override {
if (r >= 0) {
*dest_snapid = snapid;
}
C_aio_selfmanaged_snap_op_Complete::finish(r);
}
};
} // anonymous namespace
} // namespace librados
librados::IoCtxImpl::IoCtxImpl() = default;
librados::IoCtxImpl::IoCtxImpl(RadosClient *c, Objecter *objecter,
int64_t poolid, snapid_t s)
: client(c), poolid(poolid), snap_seq(s),
notify_timeout(c->cct->_conf->client_notify_timeout),
oloc(poolid),
aio_write_seq(0), objecter(objecter)
{
}
void librados::IoCtxImpl::set_snap_read(snapid_t s)
{
if (!s)
s = CEPH_NOSNAP;
ldout(client->cct, 10) << "set snap read " << snap_seq << " -> " << s << dendl;
snap_seq = s;
}
int librados::IoCtxImpl::set_snap_write_context(snapid_t seq, vector<snapid_t>& snaps)
{
::SnapContext n;
ldout(client->cct, 10) << "set snap write context: seq = " << seq
<< " and snaps = " << snaps << dendl;
n.seq = seq;
n.snaps = snaps;
if (!n.is_valid())
return -EINVAL;
snapc = n;
return 0;
}
int librados::IoCtxImpl::get_object_hash_position(
const std::string& oid, uint32_t *hash_position)
{
int64_t r = objecter->get_object_hash_position(poolid, oid, oloc.nspace);
if (r < 0)
return r;
*hash_position = (uint32_t)r;
return 0;
}
int librados::IoCtxImpl::get_object_pg_hash_position(
const std::string& oid, uint32_t *pg_hash_position)
{
int64_t r = objecter->get_object_pg_hash_position(poolid, oid, oloc.nspace);
if (r < 0)
return r;
*pg_hash_position = (uint32_t)r;
return 0;
}
void librados::IoCtxImpl::queue_aio_write(AioCompletionImpl *c)
{
get();
std::scoped_lock l{aio_write_list_lock};
ceph_assert(c->io == this);
c->aio_write_seq = ++aio_write_seq;
ldout(client->cct, 20) << "queue_aio_write " << this << " completion " << c
<< " write_seq " << aio_write_seq << dendl;
aio_write_list.push_back(&c->aio_write_list_item);
}
void librados::IoCtxImpl::complete_aio_write(AioCompletionImpl *c)
{
ldout(client->cct, 20) << "complete_aio_write " << c << dendl;
aio_write_list_lock.lock();
ceph_assert(c->io == this);
c->aio_write_list_item.remove_myself();
map<ceph_tid_t, std::list<AioCompletionImpl*> >::iterator waiters = aio_write_waiters.begin();
while (waiters != aio_write_waiters.end()) {
if (!aio_write_list.empty() &&
aio_write_list.front()->aio_write_seq <= waiters->first) {
ldout(client->cct, 20) << " next outstanding write is " << aio_write_list.front()->aio_write_seq
<< " <= waiter " << waiters->first
<< ", stopping" << dendl;
break;
}
ldout(client->cct, 20) << " waking waiters on seq " << waiters->first << dendl;
for (std::list<AioCompletionImpl*>::iterator it = waiters->second.begin();
it != waiters->second.end(); ++it) {
boost::asio::defer(client->finish_strand, CB_AioCompleteAndSafe(*it));
(*it)->put();
}
aio_write_waiters.erase(waiters++);
}
aio_write_cond.notify_all();
aio_write_list_lock.unlock();
put();
}
void librados::IoCtxImpl::flush_aio_writes_async(AioCompletionImpl *c)
{
ldout(client->cct, 20) << "flush_aio_writes_async " << this
<< " completion " << c << dendl;
std::lock_guard l(aio_write_list_lock);
ceph_tid_t seq = aio_write_seq;
if (aio_write_list.empty()) {
ldout(client->cct, 20) << "flush_aio_writes_async no writes. (tid "
<< seq << ")" << dendl;
boost::asio::defer(client->finish_strand, CB_AioCompleteAndSafe(c));
} else {
ldout(client->cct, 20) << "flush_aio_writes_async " << aio_write_list.size()
<< " writes in flight; waiting on tid " << seq << dendl;
c->get();
aio_write_waiters[seq].push_back(c);
}
}
void librados::IoCtxImpl::flush_aio_writes()
{
ldout(client->cct, 20) << "flush_aio_writes" << dendl;
std::unique_lock l{aio_write_list_lock};
aio_write_cond.wait(l, [seq=aio_write_seq, this] {
return (aio_write_list.empty() ||
aio_write_list.front()->aio_write_seq > seq);
});
}
string librados::IoCtxImpl::get_cached_pool_name()
{
std::string pn;
client->pool_get_name(get_id(), &pn);
return pn;
}
// SNAPS
int librados::IoCtxImpl::snap_create(const char *snapName)
{
int reply;
string sName(snapName);
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::snap_create::mylock");
ceph::condition_variable cond;
bool done;
Context *onfinish = new C_SafeCond(mylock, cond, &done, &reply);
objecter->create_pool_snap(poolid, sName, onfinish);
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
return reply;
}
int librados::IoCtxImpl::selfmanaged_snap_create(uint64_t *psnapid)
{
int reply;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::selfmanaged_snap_create::mylock");
ceph::condition_variable cond;
bool done;
Context *onfinish = new C_SafeCond(mylock, cond, &done, &reply);
snapid_t snapid;
objecter->allocate_selfmanaged_snap(poolid, &snapid, onfinish);
{
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
}
if (reply == 0)
*psnapid = snapid;
return reply;
}
void librados::IoCtxImpl::aio_selfmanaged_snap_create(uint64_t *snapid,
AioCompletionImpl *c)
{
C_aio_selfmanaged_snap_create_Complete *onfinish =
new C_aio_selfmanaged_snap_create_Complete(client, c, snapid);
objecter->allocate_selfmanaged_snap(poolid, &onfinish->snapid,
onfinish);
}
int librados::IoCtxImpl::snap_remove(const char *snapName)
{
int reply;
string sName(snapName);
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::snap_remove::mylock");
ceph::condition_variable cond;
bool done;
Context *onfinish = new C_SafeCond(mylock, cond, &done, &reply);
objecter->delete_pool_snap(poolid, sName, onfinish);
unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
return reply;
}
int librados::IoCtxImpl::selfmanaged_snap_rollback_object(const object_t& oid,
::SnapContext& snapc,
uint64_t snapid)
{
int reply;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::snap_rollback::mylock");
ceph::condition_variable cond;
bool done;
Context *onack = new C_SafeCond(mylock, cond, &done, &reply);
::ObjectOperation op;
prepare_assert_ops(&op);
op.rollback(snapid);
objecter->mutate(oid, oloc,
op, snapc, ceph::real_clock::now(),
extra_op_flags,
onack, NULL);
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
return reply;
}
int librados::IoCtxImpl::rollback(const object_t& oid, const char *snapName)
{
snapid_t snap;
int r = objecter->pool_snap_by_name(poolid, snapName, &snap);
if (r < 0) {
return r;
}
return selfmanaged_snap_rollback_object(oid, snapc, snap);
}
int librados::IoCtxImpl::selfmanaged_snap_remove(uint64_t snapid)
{
int reply;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::selfmanaged_snap_remove::mylock");
ceph::condition_variable cond;
bool done;
objecter->delete_selfmanaged_snap(poolid, snapid_t(snapid),
new C_SafeCond(mylock, cond, &done, &reply));
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
return (int)reply;
}
void librados::IoCtxImpl::aio_selfmanaged_snap_remove(uint64_t snapid,
AioCompletionImpl *c)
{
Context *onfinish = new C_aio_selfmanaged_snap_op_Complete(client, c);
objecter->delete_selfmanaged_snap(poolid, snapid, onfinish);
}
int librados::IoCtxImpl::snap_list(vector<uint64_t> *snaps)
{
return objecter->pool_snap_list(poolid, snaps);
}
int librados::IoCtxImpl::snap_lookup(const char *name, uint64_t *snapid)
{
return objecter->pool_snap_by_name(poolid, name, (snapid_t *)snapid);
}
int librados::IoCtxImpl::snap_get_name(uint64_t snapid, std::string *s)
{
pool_snap_info_t info;
int ret = objecter->pool_snap_get_info(poolid, snapid, &info);
if (ret < 0) {
return ret;
}
*s = info.name.c_str();
return 0;
}
int librados::IoCtxImpl::snap_get_stamp(uint64_t snapid, time_t *t)
{
pool_snap_info_t info;
int ret = objecter->pool_snap_get_info(poolid, snapid, &info);
if (ret < 0) {
return ret;
}
*t = info.stamp.sec();
return 0;
}
// IO
int librados::IoCtxImpl::nlist(Objecter::NListContext *context, int max_entries)
{
bool done;
int r = 0;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::nlist::mylock");
ceph::condition_variable cond;
if (context->at_end())
return 0;
context->max_entries = max_entries;
context->nspace = oloc.nspace;
objecter->list_nobjects(context, new C_SafeCond(mylock, cond, &done, &r));
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
return r;
}
uint32_t librados::IoCtxImpl::nlist_seek(Objecter::NListContext *context,
uint32_t pos)
{
context->list.clear();
return objecter->list_nobjects_seek(context, pos);
}
uint32_t librados::IoCtxImpl::nlist_seek(Objecter::NListContext *context,
const rados_object_list_cursor& cursor)
{
context->list.clear();
return objecter->list_nobjects_seek(context, *(const hobject_t *)cursor);
}
rados_object_list_cursor librados::IoCtxImpl::nlist_get_cursor(Objecter::NListContext *context)
{
hobject_t *c = new hobject_t;
objecter->list_nobjects_get_cursor(context, c);
return (rados_object_list_cursor)c;
}
int librados::IoCtxImpl::create(const object_t& oid, bool exclusive)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.create(exclusive);
return operate(oid, &op, NULL);
}
/*
* add any version assert operations that are appropriate given the
* stat in the IoCtx, either the target version assert or any src
* object asserts. these affect a single ioctx operation, so clear
* the ioctx state when we're doing.
*
* return a pointer to the ObjectOperation if we added any events;
* this is convenient for passing the extra_ops argument into Objecter
* methods.
*/
::ObjectOperation *librados::IoCtxImpl::prepare_assert_ops(::ObjectOperation *op)
{
::ObjectOperation *pop = NULL;
if (assert_ver) {
op->assert_version(assert_ver);
assert_ver = 0;
pop = op;
}
return pop;
}
int librados::IoCtxImpl::write(const object_t& oid, bufferlist& bl,
size_t len, uint64_t off)
{
if (len > UINT_MAX/2)
return -E2BIG;
::ObjectOperation op;
prepare_assert_ops(&op);
bufferlist mybl;
mybl.substr_of(bl, 0, len);
op.write(off, mybl);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::append(const object_t& oid, bufferlist& bl, size_t len)
{
if (len > UINT_MAX/2)
return -E2BIG;
::ObjectOperation op;
prepare_assert_ops(&op);
bufferlist mybl;
mybl.substr_of(bl, 0, len);
op.append(mybl);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::write_full(const object_t& oid, bufferlist& bl)
{
if (bl.length() > UINT_MAX/2)
return -E2BIG;
::ObjectOperation op;
prepare_assert_ops(&op);
op.write_full(bl);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::writesame(const object_t& oid, bufferlist& bl,
size_t write_len, uint64_t off)
{
if ((bl.length() > UINT_MAX/2) || (write_len > UINT_MAX/2))
return -E2BIG;
if ((bl.length() == 0) || (write_len % bl.length()))
return -EINVAL;
::ObjectOperation op;
prepare_assert_ops(&op);
bufferlist mybl;
mybl.substr_of(bl, 0, bl.length());
op.writesame(off, write_len, mybl);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::operate(const object_t& oid, ::ObjectOperation *o,
ceph::real_time *pmtime, int flags)
{
ceph::real_time ut = (pmtime ? *pmtime :
ceph::real_clock::now());
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
if (!o->size())
return 0;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::operate::mylock");
ceph::condition_variable cond;
bool done;
int r;
version_t ver;
Context *oncommit = new C_SafeCond(mylock, cond, &done, &r);
int op = o->ops[0].op.op;
ldout(client->cct, 10) << ceph_osd_op_name(op) << " oid=" << oid
<< " nspace=" << oloc.nspace << dendl;
Objecter::Op *objecter_op = objecter->prepare_mutate_op(
oid, oloc,
*o, snapc, ut,
flags | extra_op_flags,
oncommit, &ver);
objecter->op_submit(objecter_op);
{
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done;});
}
ldout(client->cct, 10) << "Objecter returned from "
<< ceph_osd_op_name(op) << " r=" << r << dendl;
set_sync_op_version(ver);
return r;
}
int librados::IoCtxImpl::operate_read(const object_t& oid,
::ObjectOperation *o,
bufferlist *pbl,
int flags)
{
if (!o->size())
return 0;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::operate_read::mylock");
ceph::condition_variable cond;
bool done;
int r;
version_t ver;
Context *onack = new C_SafeCond(mylock, cond, &done, &r);
int op = o->ops[0].op.op;
ldout(client->cct, 10) << ceph_osd_op_name(op) << " oid=" << oid << " nspace=" << oloc.nspace << dendl;
Objecter::Op *objecter_op = objecter->prepare_read_op(
oid, oloc,
*o, snap_seq, pbl,
flags | extra_op_flags,
onack, &ver);
objecter->op_submit(objecter_op);
{
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
}
ldout(client->cct, 10) << "Objecter returned from "
<< ceph_osd_op_name(op) << " r=" << r << dendl;
set_sync_op_version(ver);
return r;
}
int librados::IoCtxImpl::aio_operate_read(const object_t &oid,
::ObjectOperation *o,
AioCompletionImpl *c,
int flags,
bufferlist *pbl,
const blkin_trace_info *trace_info)
{
FUNCTRACE(client->cct);
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->is_read = true;
c->io = this;
ZTracer::Trace trace;
if (trace_info) {
ZTracer::Trace parent_trace("", nullptr, trace_info);
trace.init("rados operate read", &objecter->trace_endpoint, &parent_trace);
}
trace.event("init root span");
Objecter::Op *objecter_op = objecter->prepare_read_op(
oid, oloc,
*o, snap_seq, pbl, flags | extra_op_flags,
oncomplete, &c->objver, nullptr, 0, &trace);
objecter->op_submit(objecter_op, &c->tid);
trace.event("rados operate read submitted");
return 0;
}
int librados::IoCtxImpl::aio_operate(const object_t& oid,
::ObjectOperation *o, AioCompletionImpl *c,
const SnapContext& snap_context,
const ceph::real_time *pmtime, int flags,
const blkin_trace_info *trace_info)
{
FUNCTRACE(client->cct);
OID_EVENT_TRACE(oid.name.c_str(), "RADOS_WRITE_OP_BEGIN");
const ceph::real_time ut = (pmtime ? *pmtime : ceph::real_clock::now());
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->io = this;
queue_aio_write(c);
ZTracer::Trace trace;
if (trace_info) {
ZTracer::Trace parent_trace("", nullptr, trace_info);
trace.init("rados operate", &objecter->trace_endpoint, &parent_trace);
}
trace.event("init root span");
Objecter::Op *op = objecter->prepare_mutate_op(
oid, oloc, *o, snap_context, ut, flags | extra_op_flags,
oncomplete, &c->objver, osd_reqid_t(), &trace);
objecter->op_submit(op, &c->tid);
trace.event("rados operate op submitted");
return 0;
}
int librados::IoCtxImpl::aio_read(const object_t oid, AioCompletionImpl *c,
bufferlist *pbl, size_t len, uint64_t off,
uint64_t snapid, const blkin_trace_info *info)
{
FUNCTRACE(client->cct);
if (len > (size_t) INT_MAX)
return -EDOM;
OID_EVENT_TRACE(oid.name.c_str(), "RADOS_READ_OP_BEGIN");
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->is_read = true;
c->io = this;
c->blp = pbl;
ZTracer::Trace trace;
if (info)
trace.init("rados read", &objecter->trace_endpoint, info);
Objecter::Op *o = objecter->prepare_read_op(
oid, oloc,
off, len, snapid, pbl, extra_op_flags,
oncomplete, &c->objver, nullptr, 0, &trace);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_read(const object_t oid, AioCompletionImpl *c,
char *buf, size_t len, uint64_t off,
uint64_t snapid, const blkin_trace_info *info)
{
FUNCTRACE(client->cct);
if (len > (size_t) INT_MAX)
return -EDOM;
OID_EVENT_TRACE(oid.name.c_str(), "RADOS_READ_OP_BEGIN");
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->is_read = true;
c->io = this;
c->bl.clear();
c->bl.push_back(buffer::create_static(len, buf));
c->blp = &c->bl;
c->out_buf = buf;
ZTracer::Trace trace;
if (info)
trace.init("rados read", &objecter->trace_endpoint, info);
Objecter::Op *o = objecter->prepare_read_op(
oid, oloc,
off, len, snapid, &c->bl, extra_op_flags,
oncomplete, &c->objver, nullptr, 0, &trace);
objecter->op_submit(o, &c->tid);
return 0;
}
class C_ObjectOperation : public Context {
public:
::ObjectOperation m_ops;
explicit C_ObjectOperation(Context *c) : m_ctx(c) {}
void finish(int r) override {
m_ctx->complete(r);
}
private:
Context *m_ctx;
};
int librados::IoCtxImpl::aio_sparse_read(const object_t oid,
AioCompletionImpl *c,
std::map<uint64_t,uint64_t> *m,
bufferlist *data_bl, size_t len,
uint64_t off, uint64_t snapid)
{
FUNCTRACE(client->cct);
if (len > (size_t) INT_MAX)
return -EDOM;
Context *nested = new C_aio_Complete(c);
C_ObjectOperation *onack = new C_ObjectOperation(nested);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) nested)->oid = oid;
#endif
c->is_read = true;
c->io = this;
onack->m_ops.sparse_read(off, len, m, data_bl, NULL);
Objecter::Op *o = objecter->prepare_read_op(
oid, oloc,
onack->m_ops, snapid, NULL, extra_op_flags,
onack, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_cmpext(const object_t& oid,
AioCompletionImpl *c,
uint64_t off,
bufferlist& cmp_bl)
{
if (cmp_bl.length() > UINT_MAX/2)
return -E2BIG;
Context *onack = new C_aio_Complete(c);
c->is_read = true;
c->io = this;
Objecter::Op *o = objecter->prepare_cmpext_op(
oid, oloc, off, cmp_bl, snap_seq, extra_op_flags,
onack, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
/* use m_ops.cmpext() + prepare_read_op() for non-bufferlist C API */
int librados::IoCtxImpl::aio_cmpext(const object_t& oid,
AioCompletionImpl *c,
const char *cmp_buf,
size_t cmp_len,
uint64_t off)
{
if (cmp_len > UINT_MAX/2)
return -E2BIG;
bufferlist cmp_bl;
cmp_bl.append(cmp_buf, cmp_len);
Context *nested = new C_aio_Complete(c);
C_ObjectOperation *onack = new C_ObjectOperation(nested);
c->is_read = true;
c->io = this;
onack->m_ops.cmpext(off, cmp_len, cmp_buf, NULL);
Objecter::Op *o = objecter->prepare_read_op(
oid, oloc, onack->m_ops, snap_seq, NULL, extra_op_flags, onack, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_write(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t len,
uint64_t off, const blkin_trace_info *info)
{
FUNCTRACE(client->cct);
auto ut = ceph::real_clock::now();
ldout(client->cct, 20) << "aio_write " << oid << " " << off << "~" << len << " snapc=" << snapc << " snap_seq=" << snap_seq << dendl;
OID_EVENT_TRACE(oid.name.c_str(), "RADOS_WRITE_OP_BEGIN");
if (len > UINT_MAX/2)
return -E2BIG;
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
ZTracer::Trace trace;
if (info)
trace.init("rados write", &objecter->trace_endpoint, info);
c->io = this;
queue_aio_write(c);
Objecter::Op *o = objecter->prepare_write_op(
oid, oloc,
off, len, snapc, bl, ut, extra_op_flags,
oncomplete, &c->objver, nullptr, 0, &trace);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_append(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t len)
{
FUNCTRACE(client->cct);
auto ut = ceph::real_clock::now();
if (len > UINT_MAX/2)
return -E2BIG;
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->io = this;
queue_aio_write(c);
Objecter::Op *o = objecter->prepare_append_op(
oid, oloc,
len, snapc, bl, ut, extra_op_flags,
oncomplete, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_write_full(const object_t &oid,
AioCompletionImpl *c,
const bufferlist& bl)
{
FUNCTRACE(client->cct);
auto ut = ceph::real_clock::now();
if (bl.length() > UINT_MAX/2)
return -E2BIG;
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->io = this;
queue_aio_write(c);
Objecter::Op *o = objecter->prepare_write_full_op(
oid, oloc,
snapc, bl, ut, extra_op_flags,
oncomplete, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_writesame(const object_t &oid,
AioCompletionImpl *c,
const bufferlist& bl,
size_t write_len,
uint64_t off)
{
FUNCTRACE(client->cct);
auto ut = ceph::real_clock::now();
if ((bl.length() > UINT_MAX/2) || (write_len > UINT_MAX/2))
return -E2BIG;
if ((bl.length() == 0) || (write_len % bl.length()))
return -EINVAL;
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->io = this;
queue_aio_write(c);
Objecter::Op *o = objecter->prepare_writesame_op(
oid, oloc,
write_len, off,
snapc, bl, ut, extra_op_flags,
oncomplete, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_remove(const object_t &oid, AioCompletionImpl *c, int flags)
{
FUNCTRACE(client->cct);
auto ut = ceph::real_clock::now();
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->io = this;
queue_aio_write(c);
Objecter::Op *o = objecter->prepare_remove_op(
oid, oloc,
snapc, ut, flags | extra_op_flags,
oncomplete, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_stat(const object_t& oid, AioCompletionImpl *c,
uint64_t *psize, time_t *pmtime)
{
C_aio_stat_Ack *onack = new C_aio_stat_Ack(c, pmtime);
c->is_read = true;
c->io = this;
Objecter::Op *o = objecter->prepare_stat_op(
oid, oloc,
snap_seq, psize, &onack->mtime, extra_op_flags,
onack, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_stat2(const object_t& oid, AioCompletionImpl *c,
uint64_t *psize, struct timespec *pts)
{
C_aio_stat2_Ack *onack = new C_aio_stat2_Ack(c, pts);
c->is_read = true;
c->io = this;
Objecter::Op *o = objecter->prepare_stat_op(
oid, oloc,
snap_seq, psize, &onack->mtime, extra_op_flags,
onack, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_getxattr(const object_t& oid, AioCompletionImpl *c,
const char *name, bufferlist& bl)
{
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.getxattr(name, &bl, NULL);
int r = aio_operate_read(oid, &rd, c, 0, &bl);
return r;
}
int librados::IoCtxImpl::aio_rmxattr(const object_t& oid, AioCompletionImpl *c,
const char *name)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.rmxattr(name);
return aio_operate(oid, &op, c, snapc, nullptr, 0);
}
int librados::IoCtxImpl::aio_setxattr(const object_t& oid, AioCompletionImpl *c,
const char *name, bufferlist& bl)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.setxattr(name, bl);
return aio_operate(oid, &op, c, snapc, nullptr, 0);
}
namespace {
struct AioGetxattrsData {
AioGetxattrsData(librados::AioCompletionImpl *c, map<string, bufferlist>* attrset,
librados::RadosClient *_client) :
user_completion(c), user_attrset(attrset), client(_client) {}
struct librados::CB_AioCompleteAndSafe user_completion;
map<string, bufferlist> result_attrset;
map<std::string, bufferlist>* user_attrset;
librados::RadosClient *client;
};
}
static void aio_getxattrs_complete(rados_completion_t c, void *arg) {
AioGetxattrsData *cdata = reinterpret_cast<AioGetxattrsData*>(arg);
int rc = rados_aio_get_return_value(c);
cdata->user_attrset->clear();
if (rc >= 0) {
for (map<string,bufferlist>::iterator p = cdata->result_attrset.begin();
p != cdata->result_attrset.end();
++p) {
ldout(cdata->client->cct, 10) << "IoCtxImpl::getxattrs: xattr=" << p->first << dendl;
(*cdata->user_attrset)[p->first] = p->second;
}
}
cdata->user_completion(rc);
((librados::AioCompletionImpl*)c)->put();
delete cdata;
}
int librados::IoCtxImpl::aio_getxattrs(const object_t& oid, AioCompletionImpl *c,
map<std::string, bufferlist>& attrset)
{
AioGetxattrsData *cdata = new AioGetxattrsData(c, &attrset, client);
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.getxattrs(&cdata->result_attrset, NULL);
librados::AioCompletionImpl *comp = new librados::AioCompletionImpl;
comp->set_complete_callback(cdata, aio_getxattrs_complete);
return aio_operate_read(oid, &rd, comp, 0, NULL);
}
int librados::IoCtxImpl::aio_cancel(AioCompletionImpl *c)
{
return objecter->op_cancel(c->tid, -ECANCELED);
}
int librados::IoCtxImpl::hit_set_list(uint32_t hash, AioCompletionImpl *c,
std::list< std::pair<time_t, time_t> > *pls)
{
Context *oncomplete = new C_aio_Complete(c);
c->is_read = true;
c->io = this;
::ObjectOperation rd;
rd.hit_set_ls(pls, NULL);
object_locator_t oloc(poolid);
Objecter::Op *o = objecter->prepare_pg_read_op(
hash, oloc, rd, NULL, extra_op_flags, oncomplete, NULL, NULL);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::hit_set_get(uint32_t hash, AioCompletionImpl *c,
time_t stamp,
bufferlist *pbl)
{
Context *oncomplete = new C_aio_Complete(c);
c->is_read = true;
c->io = this;
::ObjectOperation rd;
rd.hit_set_get(ceph::real_clock::from_time_t(stamp), pbl, 0);
object_locator_t oloc(poolid);
Objecter::Op *o = objecter->prepare_pg_read_op(
hash, oloc, rd, NULL, extra_op_flags, oncomplete, NULL, NULL);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::remove(const object_t& oid)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.remove();
return operate(oid, &op, nullptr, librados::OPERATION_FULL_FORCE);
}
int librados::IoCtxImpl::remove(const object_t& oid, int flags)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.remove();
return operate(oid, &op, NULL, flags);
}
int librados::IoCtxImpl::trunc(const object_t& oid, uint64_t size)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.truncate(size);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::get_inconsistent_objects(const pg_t& pg,
const librados::object_id_t& start_after,
uint64_t max_to_get,
AioCompletionImpl *c,
std::vector<inconsistent_obj_t>* objects,
uint32_t* interval)
{
Context *oncomplete = new C_aio_Complete(c);
c->is_read = true;
c->io = this;
::ObjectOperation op;
op.scrub_ls(start_after, max_to_get, objects, interval, &c->rval);
object_locator_t oloc{poolid, pg.ps()};
Objecter::Op *o = objecter->prepare_pg_read_op(
oloc.hash, oloc, op, nullptr, CEPH_OSD_FLAG_PGOP | extra_op_flags, oncomplete,
nullptr, nullptr);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::get_inconsistent_snapsets(const pg_t& pg,
const librados::object_id_t& start_after,
uint64_t max_to_get,
AioCompletionImpl *c,
std::vector<inconsistent_snapset_t>* snapsets,
uint32_t* interval)
{
Context *oncomplete = new C_aio_Complete(c);
c->is_read = true;
c->io = this;
::ObjectOperation op;
op.scrub_ls(start_after, max_to_get, snapsets, interval, &c->rval);
object_locator_t oloc{poolid, pg.ps()};
Objecter::Op *o = objecter->prepare_pg_read_op(
oloc.hash, oloc, op, nullptr, CEPH_OSD_FLAG_PGOP | extra_op_flags, oncomplete,
nullptr, nullptr);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::tmap_update(const object_t& oid, bufferlist& cmdbl)
{
::ObjectOperation wr;
prepare_assert_ops(&wr);
wr.tmap_update(cmdbl);
return operate(oid, &wr, NULL);
}
int librados::IoCtxImpl::exec(const object_t& oid,
const char *cls, const char *method,
bufferlist& inbl, bufferlist& outbl)
{
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.call(cls, method, inbl);
return operate_read(oid, &rd, &outbl);
}
int librados::IoCtxImpl::aio_exec(const object_t& oid, AioCompletionImpl *c,
const char *cls, const char *method,
bufferlist& inbl, bufferlist *outbl)
{
FUNCTRACE(client->cct);
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->is_read = true;
c->io = this;
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.call(cls, method, inbl);
Objecter::Op *o = objecter->prepare_read_op(
oid, oloc, rd, snap_seq, outbl, extra_op_flags, oncomplete, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_exec(const object_t& oid, AioCompletionImpl *c,
const char *cls, const char *method,
bufferlist& inbl, char *buf, size_t out_len)
{
FUNCTRACE(client->cct);
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->is_read = true;
c->io = this;
c->bl.clear();
c->bl.push_back(buffer::create_static(out_len, buf));
c->blp = &c->bl;
c->out_buf = buf;
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.call(cls, method, inbl);
Objecter::Op *o = objecter->prepare_read_op(
oid, oloc, rd, snap_seq, &c->bl, extra_op_flags, oncomplete, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::read(const object_t& oid,
bufferlist& bl, size_t len, uint64_t off)
{
if (len > (size_t) INT_MAX)
return -EDOM;
OID_EVENT_TRACE(oid.name.c_str(), "RADOS_READ_OP_BEGIN");
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.read(off, len, &bl, NULL, NULL);
int r = operate_read(oid, &rd, &bl);
if (r < 0)
return r;
if (bl.length() < len) {
ldout(client->cct, 10) << "Returned length " << bl.length()
<< " less than original length "<< len << dendl;
}
return bl.length();
}
int librados::IoCtxImpl::cmpext(const object_t& oid, uint64_t off,
bufferlist& cmp_bl)
{
if (cmp_bl.length() > UINT_MAX/2)
return -E2BIG;
::ObjectOperation op;
prepare_assert_ops(&op);
op.cmpext(off, cmp_bl, NULL);
return operate_read(oid, &op, NULL);
}
int librados::IoCtxImpl::mapext(const object_t& oid,
uint64_t off, size_t len,
std::map<uint64_t,uint64_t>& m)
{
bufferlist bl;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::read::mylock");
ceph::condition_variable cond;
bool done;
int r;
Context *onack = new C_SafeCond(mylock, cond, &done, &r);
objecter->mapext(oid, oloc,
off, len, snap_seq, &bl, extra_op_flags,
onack);
{
unique_lock l{mylock};
cond.wait(l, [&done] { return done;});
}
ldout(client->cct, 10) << "Objecter returned from read r=" << r << dendl;
if (r < 0)
return r;
auto iter = bl.cbegin();
decode(m, iter);
return m.size();
}
int librados::IoCtxImpl::sparse_read(const object_t& oid,
std::map<uint64_t,uint64_t>& m,
bufferlist& data_bl, size_t len,
uint64_t off)
{
if (len > (size_t) INT_MAX)
return -EDOM;
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.sparse_read(off, len, &m, &data_bl, NULL);
int r = operate_read(oid, &rd, NULL);
if (r < 0)
return r;
return m.size();
}
int librados::IoCtxImpl::checksum(const object_t& oid, uint8_t type,
const bufferlist &init_value, size_t len,
uint64_t off, size_t chunk_size,
bufferlist *pbl)
{
if (len > (size_t) INT_MAX) {
return -EDOM;
}
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.checksum(type, init_value, off, len, chunk_size, pbl, nullptr, nullptr);
int r = operate_read(oid, &rd, nullptr);
if (r < 0) {
return r;
}
return 0;
}
int librados::IoCtxImpl::stat(const object_t& oid, uint64_t *psize, time_t *pmtime)
{
uint64_t size;
real_time mtime;
if (!psize)
psize = &size;
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.stat(psize, &mtime, nullptr);
int r = operate_read(oid, &rd, NULL);
if (r >= 0 && pmtime) {
*pmtime = real_clock::to_time_t(mtime);
}
return r;
}
int librados::IoCtxImpl::stat2(const object_t& oid, uint64_t *psize, struct timespec *pts)
{
uint64_t size;
ceph::real_time mtime;
if (!psize)
psize = &size;
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.stat(psize, &mtime, nullptr);
int r = operate_read(oid, &rd, NULL);
if (r < 0) {
return r;
}
if (pts) {
*pts = ceph::real_clock::to_timespec(mtime);
}
return 0;
}
int librados::IoCtxImpl::getxattr(const object_t& oid,
const char *name, bufferlist& bl)
{
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.getxattr(name, &bl, NULL);
int r = operate_read(oid, &rd, &bl);
if (r < 0)
return r;
return bl.length();
}
int librados::IoCtxImpl::rmxattr(const object_t& oid, const char *name)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.rmxattr(name);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::setxattr(const object_t& oid,
const char *name, bufferlist& bl)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.setxattr(name, bl);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::getxattrs(const object_t& oid,
map<std::string, bufferlist>& attrset)
{
map<string, bufferlist> aset;
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.getxattrs(&aset, NULL);
int r = operate_read(oid, &rd, NULL);
attrset.clear();
if (r >= 0) {
for (map<string,bufferlist>::iterator p = aset.begin(); p != aset.end(); ++p) {
ldout(client->cct, 10) << "IoCtxImpl::getxattrs: xattr=" << p->first << dendl;
attrset[p->first.c_str()] = p->second;
}
}
return r;
}
void librados::IoCtxImpl::set_sync_op_version(version_t ver)
{
ANNOTATE_BENIGN_RACE_SIZED(&last_objver, sizeof(last_objver),
"IoCtxImpl last_objver");
last_objver = ver;
}
namespace librados {
void intrusive_ptr_add_ref(IoCtxImpl *p) { p->get(); }
void intrusive_ptr_release(IoCtxImpl *p) { p->put(); }
}
struct WatchInfo {
boost::intrusive_ptr<librados::IoCtxImpl> ioctx;
object_t oid;
librados::WatchCtx *ctx;
librados::WatchCtx2 *ctx2;
WatchInfo(librados::IoCtxImpl *io, object_t o,
librados::WatchCtx *c, librados::WatchCtx2 *c2)
: ioctx(io), oid(o), ctx(c), ctx2(c2) {}
void handle_notify(uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl) {
ldout(ioctx->client->cct, 10) << __func__ << " " << notify_id
<< " cookie " << cookie
<< " notifier_id " << notifier_id
<< " len " << bl.length()
<< dendl;
if (ctx2)
ctx2->handle_notify(notify_id, cookie, notifier_id, bl);
if (ctx) {
ctx->notify(0, 0, bl);
// send ACK back to OSD if using legacy protocol
bufferlist empty;
ioctx->notify_ack(oid, notify_id, cookie, empty);
}
}
void handle_error(uint64_t cookie, int err) {
ldout(ioctx->client->cct, 10) << __func__ << " cookie " << cookie
<< " err " << err
<< dendl;
if (ctx2)
ctx2->handle_error(cookie, err);
}
void operator()(bs::error_code ec,
uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist&& bl) {
if (ec) {
handle_error(cookie, ceph::from_error_code(ec));
} else {
handle_notify(notify_id, cookie, notifier_id, bl);
}
}
};
// internal WatchInfo that owns the context memory
struct InternalWatchInfo : public WatchInfo {
std::unique_ptr<librados::WatchCtx> ctx;
std::unique_ptr<librados::WatchCtx2> ctx2;
InternalWatchInfo(librados::IoCtxImpl *io, object_t o,
librados::WatchCtx *c, librados::WatchCtx2 *c2)
: WatchInfo(io, o, c, c2), ctx(c), ctx2(c2) {}
};
int librados::IoCtxImpl::watch(const object_t& oid, uint64_t *handle,
librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2,
bool internal)
{
return watch(oid, handle, ctx, ctx2, 0, internal);
}
int librados::IoCtxImpl::watch(const object_t& oid, uint64_t *handle,
librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2,
uint32_t timeout,
bool internal)
{
::ObjectOperation wr;
version_t objver;
C_SaferCond onfinish;
Objecter::LingerOp *linger_op = objecter->linger_register(oid, oloc,
extra_op_flags);
*handle = linger_op->get_cookie();
if (internal) {
linger_op->handle = InternalWatchInfo(this, oid, ctx, ctx2);
} else {
linger_op->handle = WatchInfo(this, oid, ctx, ctx2);
}
prepare_assert_ops(&wr);
wr.watch(*handle, CEPH_OSD_WATCH_OP_WATCH, timeout);
bufferlist bl;
objecter->linger_watch(linger_op, wr,
snapc, ceph::real_clock::now(), bl,
&onfinish,
&objver);
int r = onfinish.wait();
set_sync_op_version(objver);
if (r < 0) {
objecter->linger_cancel(linger_op);
*handle = 0;
}
return r;
}
int librados::IoCtxImpl::aio_watch(const object_t& oid,
AioCompletionImpl *c,
uint64_t *handle,
librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2,
bool internal) {
return aio_watch(oid, c, handle, ctx, ctx2, 0, internal);
}
int librados::IoCtxImpl::aio_watch(const object_t& oid,
AioCompletionImpl *c,
uint64_t *handle,
librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2,
uint32_t timeout,
bool internal)
{
Objecter::LingerOp *linger_op = objecter->linger_register(oid, oloc,
extra_op_flags);
c->io = this;
Context *oncomplete = new C_aio_linger_Complete(c, linger_op, false);
::ObjectOperation wr;
*handle = linger_op->get_cookie();
if (internal) {
linger_op->handle = InternalWatchInfo(this, oid, ctx, ctx2);
} else {
linger_op->handle = WatchInfo(this, oid, ctx, ctx2);
}
prepare_assert_ops(&wr);
wr.watch(*handle, CEPH_OSD_WATCH_OP_WATCH, timeout);
bufferlist bl;
objecter->linger_watch(linger_op, wr,
snapc, ceph::real_clock::now(), bl,
oncomplete, &c->objver);
return 0;
}
int librados::IoCtxImpl::notify_ack(
const object_t& oid,
uint64_t notify_id,
uint64_t cookie,
bufferlist& bl)
{
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.notify_ack(notify_id, cookie, bl);
objecter->read(oid, oloc, rd, snap_seq, (bufferlist*)NULL, extra_op_flags, 0, 0);
return 0;
}
int librados::IoCtxImpl::watch_check(uint64_t cookie)
{
auto linger_op = reinterpret_cast<Objecter::LingerOp*>(cookie);
auto r = objecter->linger_check(linger_op);
if (r)
return 1 + std::chrono::duration_cast<
std::chrono::milliseconds>(*r).count();
else
return ceph::from_error_code(r.error());
}
int librados::IoCtxImpl::unwatch(uint64_t cookie)
{
Objecter::LingerOp *linger_op = reinterpret_cast<Objecter::LingerOp*>(cookie);
C_SaferCond onfinish;
version_t ver = 0;
::ObjectOperation wr;
prepare_assert_ops(&wr);
wr.watch(cookie, CEPH_OSD_WATCH_OP_UNWATCH);
objecter->mutate(linger_op->target.base_oid, oloc, wr,
snapc, ceph::real_clock::now(), extra_op_flags,
&onfinish, &ver);
objecter->linger_cancel(linger_op);
int r = onfinish.wait();
set_sync_op_version(ver);
return r;
}
int librados::IoCtxImpl::aio_unwatch(uint64_t cookie, AioCompletionImpl *c)
{
c->io = this;
Objecter::LingerOp *linger_op = reinterpret_cast<Objecter::LingerOp*>(cookie);
Context *oncomplete = new C_aio_linger_Complete(c, linger_op, true);
::ObjectOperation wr;
prepare_assert_ops(&wr);
wr.watch(cookie, CEPH_OSD_WATCH_OP_UNWATCH);
objecter->mutate(linger_op->target.base_oid, oloc, wr,
snapc, ceph::real_clock::now(), extra_op_flags,
oncomplete, &c->objver);
return 0;
}
int librados::IoCtxImpl::notify(const object_t& oid, bufferlist& bl,
uint64_t timeout_ms,
bufferlist *preply_bl,
char **preply_buf, size_t *preply_buf_len)
{
Objecter::LingerOp *linger_op = objecter->linger_register(oid, oloc,
extra_op_flags);
C_SaferCond notify_finish_cond;
linger_op->on_notify_finish =
Objecter::LingerOp::OpComp::create(
objecter->service.get_executor(),
CB_notify_Finish(client->cct, ¬ify_finish_cond,
objecter, linger_op, preply_bl,
preply_buf, preply_buf_len));
uint32_t timeout = notify_timeout;
if (timeout_ms)
timeout = timeout_ms / 1000;
// Construct RADOS op
::ObjectOperation rd;
prepare_assert_ops(&rd);
bufferlist inbl;
rd.notify(linger_op->get_cookie(), 1, timeout, bl, &inbl);
// Issue RADOS op
C_SaferCond onack;
version_t objver;
objecter->linger_notify(linger_op,
rd, snap_seq, inbl, NULL,
&onack, &objver);
ldout(client->cct, 10) << __func__ << " issued linger op " << linger_op << dendl;
int r = onack.wait();
ldout(client->cct, 10) << __func__ << " linger op " << linger_op
<< " acked (" << r << ")" << dendl;
if (r == 0) {
ldout(client->cct, 10) << __func__ << " waiting for watch_notify finish "
<< linger_op << dendl;
r = notify_finish_cond.wait();
} else {
ldout(client->cct, 10) << __func__ << " failed to initiate notify, r = "
<< r << dendl;
notify_finish_cond.wait();
}
objecter->linger_cancel(linger_op);
set_sync_op_version(objver);
return r;
}
int librados::IoCtxImpl::aio_notify(const object_t& oid, AioCompletionImpl *c,
bufferlist& bl, uint64_t timeout_ms,
bufferlist *preply_bl, char **preply_buf,
size_t *preply_buf_len)
{
Objecter::LingerOp *linger_op = objecter->linger_register(oid, oloc,
extra_op_flags);
c->io = this;
C_aio_notify_Complete *oncomplete = new C_aio_notify_Complete(c, linger_op);
linger_op->on_notify_finish =
Objecter::LingerOp::OpComp::create(
objecter->service.get_executor(),
CB_notify_Finish(client->cct, oncomplete,
objecter, linger_op,
preply_bl, preply_buf,
preply_buf_len));
Context *onack = new C_aio_notify_Ack(client->cct, oncomplete);
uint32_t timeout = notify_timeout;
if (timeout_ms)
timeout = timeout_ms / 1000;
// Construct RADOS op
::ObjectOperation rd;
prepare_assert_ops(&rd);
bufferlist inbl;
rd.notify(linger_op->get_cookie(), 1, timeout, bl, &inbl);
// Issue RADOS op
objecter->linger_notify(linger_op,
rd, snap_seq, inbl, NULL,
onack, &c->objver);
return 0;
}
int librados::IoCtxImpl::set_alloc_hint(const object_t& oid,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags)
{
::ObjectOperation wr;
prepare_assert_ops(&wr);
wr.set_alloc_hint(expected_object_size, expected_write_size, flags);
return operate(oid, &wr, NULL);
}
version_t librados::IoCtxImpl::last_version()
{
return last_objver;
}
void librados::IoCtxImpl::set_assert_version(uint64_t ver)
{
assert_ver = ver;
}
void librados::IoCtxImpl::set_notify_timeout(uint32_t timeout)
{
notify_timeout = timeout;
}
int librados::IoCtxImpl::cache_pin(const object_t& oid)
{
::ObjectOperation wr;
prepare_assert_ops(&wr);
wr.cache_pin();
return operate(oid, &wr, NULL);
}
int librados::IoCtxImpl::cache_unpin(const object_t& oid)
{
::ObjectOperation wr;
prepare_assert_ops(&wr);
wr.cache_unpin();
return operate(oid, &wr, NULL);
}
///////////////////////////// C_aio_stat_Ack ////////////////////////////
librados::IoCtxImpl::C_aio_stat_Ack::C_aio_stat_Ack(AioCompletionImpl *_c,
time_t *pm)
: c(_c), pmtime(pm)
{
ceph_assert(!c->io);
c->get();
}
void librados::IoCtxImpl::C_aio_stat_Ack::finish(int r)
{
c->lock.lock();
c->rval = r;
c->complete = true;
c->cond.notify_all();
if (r >= 0 && pmtime) {
*pmtime = real_clock::to_time_t(mtime);
}
if (c->callback_complete) {
boost::asio::defer(c->io->client->finish_strand, CB_AioComplete(c));
}
c->put_unlock();
}
///////////////////////////// C_aio_stat2_Ack ////////////////////////////
librados::IoCtxImpl::C_aio_stat2_Ack::C_aio_stat2_Ack(AioCompletionImpl *_c,
struct timespec *pt)
: c(_c), pts(pt)
{
ceph_assert(!c->io);
c->get();
}
void librados::IoCtxImpl::C_aio_stat2_Ack::finish(int r)
{
c->lock.lock();
c->rval = r;
c->complete = true;
c->cond.notify_all();
if (r >= 0 && pts) {
*pts = real_clock::to_timespec(mtime);
}
if (c->callback_complete) {
boost::asio::defer(c->io->client->finish_strand, CB_AioComplete(c));
}
c->put_unlock();
}
//////////////////////////// C_aio_Complete ////////////////////////////////
librados::IoCtxImpl::C_aio_Complete::C_aio_Complete(AioCompletionImpl *_c)
: c(_c)
{
c->get();
}
void librados::IoCtxImpl::C_aio_Complete::finish(int r)
{
c->lock.lock();
// Leave an existing rval unless r != 0
if (r)
c->rval = r; // This clears the error set in C_ObjectOperation_scrub_ls::finish()
c->complete = true;
c->cond.notify_all();
if (r == 0 && c->blp && c->blp->length() > 0) {
if (c->out_buf && !c->blp->is_contiguous()) {
c->rval = -ERANGE;
} else {
if (c->out_buf && !c->blp->is_provided_buffer(c->out_buf))
c->blp->begin().copy(c->blp->length(), c->out_buf);
c->rval = c->blp->length();
}
}
if (c->callback_complete ||
c->callback_safe) {
boost::asio::defer(c->io->client->finish_strand, CB_AioComplete(c));
}
if (c->aio_write_seq) {
c->io->complete_aio_write(c);
}
#if defined(WITH_EVENTTRACE)
OID_EVENT_TRACE(oid.name.c_str(), "RADOS_OP_COMPLETE");
#endif
c->put_unlock();
}
void librados::IoCtxImpl::object_list_slice(
const hobject_t start,
const hobject_t finish,
const size_t n,
const size_t m,
hobject_t *split_start,
hobject_t *split_finish)
{
if (start.is_max()) {
*split_start = hobject_t::get_max();
*split_finish = hobject_t::get_max();
return;
}
uint64_t start_hash = hobject_t::_reverse_bits(start.get_hash());
uint64_t finish_hash =
finish.is_max() ? 0x100000000 :
hobject_t::_reverse_bits(finish.get_hash());
uint64_t diff = finish_hash - start_hash;
uint64_t rev_start = start_hash + (diff * n / m);
uint64_t rev_finish = start_hash + (diff * (n + 1) / m);
if (n == 0) {
*split_start = start;
} else {
*split_start = hobject_t(
object_t(), string(), CEPH_NOSNAP,
hobject_t::_reverse_bits(rev_start), poolid, string());
}
if (n == m - 1)
*split_finish = finish;
else if (rev_finish >= 0x100000000)
*split_finish = hobject_t::get_max();
else
*split_finish = hobject_t(
object_t(), string(), CEPH_NOSNAP,
hobject_t::_reverse_bits(rev_finish), poolid, string());
}
int librados::IoCtxImpl::application_enable(const std::string& app_name,
bool force)
{
auto c = new PoolAsyncCompletionImpl();
application_enable_async(app_name, force, c);
int r = c->wait();
ceph_assert(r == 0);
r = c->get_return_value();
c->release();
c->put();
if (r < 0) {
return r;
}
return client->wait_for_latest_osdmap();
}
void librados::IoCtxImpl::application_enable_async(const std::string& app_name,
bool force,
PoolAsyncCompletionImpl *c)
{
// pre-Luminous clusters will return -EINVAL and application won't be
// preserved until Luminous is configured as minimim version.
if (!client->get_required_monitor_features().contains_all(
ceph::features::mon::FEATURE_LUMINOUS)) {
boost::asio::defer(client->finish_strand,
[cb = CB_PoolAsync_Safe(c)]() mutable {
cb(-EOPNOTSUPP);
});
return;
}
std::stringstream cmd;
cmd << "{"
<< "\"prefix\": \"osd pool application enable\","
<< "\"pool\": \"" << get_cached_pool_name() << "\","
<< "\"app\": \"" << app_name << "\"";
if (force) {
cmd << ",\"yes_i_really_mean_it\": true";
}
cmd << "}";
std::vector<std::string> cmds;
cmds.push_back(cmd.str());
bufferlist inbl;
client->mon_command_async(cmds, inbl, nullptr, nullptr,
make_lambda_context(CB_PoolAsync_Safe(c)));
}
int librados::IoCtxImpl::application_list(std::set<std::string> *app_names)
{
int r = 0;
app_names->clear();
objecter->with_osdmap([&](const OSDMap& o) {
auto pg_pool = o.get_pg_pool(poolid);
if (pg_pool == nullptr) {
r = -ENOENT;
return;
}
for (auto &pair : pg_pool->application_metadata) {
app_names->insert(pair.first);
}
});
return r;
}
int librados::IoCtxImpl::application_metadata_get(const std::string& app_name,
const std::string &key,
std::string* value)
{
int r = 0;
objecter->with_osdmap([&](const OSDMap& o) {
auto pg_pool = o.get_pg_pool(poolid);
if (pg_pool == nullptr) {
r = -ENOENT;
return;
}
auto app_it = pg_pool->application_metadata.find(app_name);
if (app_it == pg_pool->application_metadata.end()) {
r = -ENOENT;
return;
}
auto it = app_it->second.find(key);
if (it == app_it->second.end()) {
r = -ENOENT;
return;
}
*value = it->second;
});
return r;
}
int librados::IoCtxImpl::application_metadata_set(const std::string& app_name,
const std::string &key,
const std::string& value)
{
std::stringstream cmd;
cmd << "{"
<< "\"prefix\":\"osd pool application set\","
<< "\"pool\":\"" << get_cached_pool_name() << "\","
<< "\"app\":\"" << app_name << "\","
<< "\"key\":\"" << key << "\","
<< "\"value\":\"" << value << "\""
<< "}";
std::vector<std::string> cmds;
cmds.push_back(cmd.str());
bufferlist inbl;
int r = client->mon_command(cmds, inbl, nullptr, nullptr);
if (r < 0) {
return r;
}
// ensure we have the latest osd map epoch before proceeding
return client->wait_for_latest_osdmap();
}
int librados::IoCtxImpl::application_metadata_remove(const std::string& app_name,
const std::string &key)
{
std::stringstream cmd;
cmd << "{"
<< "\"prefix\":\"osd pool application rm\","
<< "\"pool\":\"" << get_cached_pool_name() << "\","
<< "\"app\":\"" << app_name << "\","
<< "\"key\":\"" << key << "\""
<< "}";
std::vector<std::string> cmds;
cmds.push_back(cmd.str());
bufferlist inbl;
int r = client->mon_command(cmds, inbl, nullptr, nullptr);
if (r < 0) {
return r;
}
// ensure we have the latest osd map epoch before proceeding
return client->wait_for_latest_osdmap();
}
int librados::IoCtxImpl::application_metadata_list(const std::string& app_name,
std::map<std::string, std::string> *values)
{
int r = 0;
values->clear();
objecter->with_osdmap([&](const OSDMap& o) {
auto pg_pool = o.get_pg_pool(poolid);
if (pg_pool == nullptr) {
r = -ENOENT;
return;
}
auto it = pg_pool->application_metadata.find(app_name);
if (it == pg_pool->application_metadata.end()) {
r = -ENOENT;
return;
}
*values = it->second;
});
return r;
}
| 59,825 | 25.90018 | 135 |
cc
|
null |
ceph-main/src/librados/IoCtxImpl.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOS_IOCTXIMPL_H
#define CEPH_LIBRADOS_IOCTXIMPL_H
#include <atomic>
#include "common/Cond.h"
#include "common/ceph_mutex.h"
#include "common/snap_types.h"
#include "common/zipkin_trace.h"
#include "include/types.h"
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/xlist.h"
#include "osd/osd_types.h"
#include "osdc/Objecter.h"
class RadosClient;
struct librados::IoCtxImpl {
std::atomic<uint64_t> ref_cnt = { 0 };
RadosClient *client = nullptr;
int64_t poolid = 0;
snapid_t snap_seq;
::SnapContext snapc;
uint64_t assert_ver = 0;
version_t last_objver = 0;
uint32_t notify_timeout = 30;
object_locator_t oloc;
int extra_op_flags = 0;
ceph::mutex aio_write_list_lock =
ceph::make_mutex("librados::IoCtxImpl::aio_write_list_lock");
ceph_tid_t aio_write_seq = 0;
ceph::condition_variable aio_write_cond;
xlist<AioCompletionImpl*> aio_write_list;
std::map<ceph_tid_t, std::list<AioCompletionImpl*> > aio_write_waiters;
Objecter *objecter = nullptr;
IoCtxImpl();
IoCtxImpl(RadosClient *c, Objecter *objecter,
int64_t poolid, snapid_t s);
void dup(const IoCtxImpl& rhs) {
// Copy everything except the ref count
client = rhs.client;
poolid = rhs.poolid;
snap_seq = rhs.snap_seq;
snapc = rhs.snapc;
assert_ver = rhs.assert_ver;
last_objver = rhs.last_objver;
notify_timeout = rhs.notify_timeout;
oloc = rhs.oloc;
extra_op_flags = rhs.extra_op_flags;
objecter = rhs.objecter;
}
void set_snap_read(snapid_t s);
int set_snap_write_context(snapid_t seq, std::vector<snapid_t>& snaps);
void get() {
ref_cnt++;
}
void put() {
if (--ref_cnt == 0)
delete this;
}
void queue_aio_write(struct AioCompletionImpl *c);
void complete_aio_write(struct AioCompletionImpl *c);
void flush_aio_writes_async(AioCompletionImpl *c);
void flush_aio_writes();
int64_t get_id() {
return poolid;
}
std::string get_cached_pool_name();
int get_object_hash_position(const std::string& oid, uint32_t *hash_position);
int get_object_pg_hash_position(const std::string& oid, uint32_t *pg_hash_position);
::ObjectOperation *prepare_assert_ops(::ObjectOperation *op);
// snaps
int snap_list(std::vector<uint64_t> *snaps);
int snap_lookup(const char *name, uint64_t *snapid);
int snap_get_name(uint64_t snapid, std::string *s);
int snap_get_stamp(uint64_t snapid, time_t *t);
int snap_create(const char* snapname);
int selfmanaged_snap_create(uint64_t *snapid);
void aio_selfmanaged_snap_create(uint64_t *snapid, AioCompletionImpl *c);
int snap_remove(const char* snapname);
int rollback(const object_t& oid, const char *snapName);
int selfmanaged_snap_remove(uint64_t snapid);
void aio_selfmanaged_snap_remove(uint64_t snapid, AioCompletionImpl *c);
int selfmanaged_snap_rollback_object(const object_t& oid,
::SnapContext& snapc, uint64_t snapid);
// io
int nlist(Objecter::NListContext *context, int max_entries);
uint32_t nlist_seek(Objecter::NListContext *context, uint32_t pos);
uint32_t nlist_seek(Objecter::NListContext *context, const rados_object_list_cursor& cursor);
rados_object_list_cursor nlist_get_cursor(Objecter::NListContext *context);
void object_list_slice(
const hobject_t start,
const hobject_t finish,
const size_t n,
const size_t m,
hobject_t *split_start,
hobject_t *split_finish);
int create(const object_t& oid, bool exclusive);
int write(const object_t& oid, bufferlist& bl, size_t len, uint64_t off);
int append(const object_t& oid, bufferlist& bl, size_t len);
int write_full(const object_t& oid, bufferlist& bl);
int writesame(const object_t& oid, bufferlist& bl,
size_t write_len, uint64_t offset);
int read(const object_t& oid, bufferlist& bl, size_t len, uint64_t off);
int mapext(const object_t& oid, uint64_t off, size_t len,
std::map<uint64_t,uint64_t>& m);
int sparse_read(const object_t& oid, std::map<uint64_t,uint64_t>& m,
bufferlist& bl, size_t len, uint64_t off);
int checksum(const object_t& oid, uint8_t type, const bufferlist &init_value,
size_t len, uint64_t off, size_t chunk_size, bufferlist *pbl);
int remove(const object_t& oid);
int remove(const object_t& oid, int flags);
int stat(const object_t& oid, uint64_t *psize, time_t *pmtime);
int stat2(const object_t& oid, uint64_t *psize, struct timespec *pts);
int trunc(const object_t& oid, uint64_t size);
int cmpext(const object_t& oid, uint64_t off, bufferlist& cmp_bl);
int tmap_update(const object_t& oid, bufferlist& cmdbl);
int exec(const object_t& oid, const char *cls, const char *method, bufferlist& inbl, bufferlist& outbl);
int getxattr(const object_t& oid, const char *name, bufferlist& bl);
int setxattr(const object_t& oid, const char *name, bufferlist& bl);
int getxattrs(const object_t& oid, std::map<std::string, bufferlist>& attrset);
int rmxattr(const object_t& oid, const char *name);
int operate(const object_t& oid, ::ObjectOperation *o, ceph::real_time *pmtime, int flags=0);
int operate_read(const object_t& oid, ::ObjectOperation *o, bufferlist *pbl, int flags=0);
int aio_operate(const object_t& oid, ::ObjectOperation *o,
AioCompletionImpl *c, const SnapContext& snap_context,
const ceph::real_time *pmtime, int flags,
const blkin_trace_info *trace_info = nullptr);
int aio_operate_read(const object_t& oid, ::ObjectOperation *o,
AioCompletionImpl *c, int flags, bufferlist *pbl, const blkin_trace_info *trace_info = nullptr);
struct C_aio_stat_Ack : public Context {
librados::AioCompletionImpl *c;
time_t *pmtime;
ceph::real_time mtime;
C_aio_stat_Ack(AioCompletionImpl *_c, time_t *pm);
void finish(int r) override;
};
struct C_aio_stat2_Ack : public Context {
librados::AioCompletionImpl *c;
struct timespec *pts;
ceph::real_time mtime;
C_aio_stat2_Ack(AioCompletionImpl *_c, struct timespec *pts);
void finish(int r) override;
};
struct C_aio_Complete : public Context {
#if defined(WITH_EVENTTRACE)
object_t oid;
#endif
AioCompletionImpl *c;
explicit C_aio_Complete(AioCompletionImpl *_c);
void finish(int r) override;
};
int aio_read(const object_t oid, AioCompletionImpl *c,
bufferlist *pbl, size_t len, uint64_t off, uint64_t snapid,
const blkin_trace_info *info = nullptr);
int aio_read(object_t oid, AioCompletionImpl *c,
char *buf, size_t len, uint64_t off, uint64_t snapid,
const blkin_trace_info *info = nullptr);
int aio_sparse_read(const object_t oid, AioCompletionImpl *c,
std::map<uint64_t,uint64_t> *m, bufferlist *data_bl,
size_t len, uint64_t off, uint64_t snapid);
int aio_cmpext(const object_t& oid, AioCompletionImpl *c, uint64_t off,
bufferlist& cmp_bl);
int aio_cmpext(const object_t& oid, AioCompletionImpl *c,
const char *cmp_buf, size_t cmp_len, uint64_t off);
int aio_write(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t len, uint64_t off,
const blkin_trace_info *info = nullptr);
int aio_append(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t len);
int aio_write_full(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl);
int aio_writesame(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t write_len, uint64_t off);
int aio_remove(const object_t &oid, AioCompletionImpl *c, int flags=0);
int aio_exec(const object_t& oid, AioCompletionImpl *c, const char *cls,
const char *method, bufferlist& inbl, bufferlist *outbl);
int aio_exec(const object_t& oid, AioCompletionImpl *c, const char *cls,
const char *method, bufferlist& inbl, char *buf, size_t out_len);
int aio_stat(const object_t& oid, AioCompletionImpl *c, uint64_t *psize, time_t *pmtime);
int aio_stat2(const object_t& oid, AioCompletionImpl *c, uint64_t *psize, struct timespec *pts);
int aio_getxattr(const object_t& oid, AioCompletionImpl *c,
const char *name, bufferlist& bl);
int aio_setxattr(const object_t& oid, AioCompletionImpl *c,
const char *name, bufferlist& bl);
int aio_getxattrs(const object_t& oid, AioCompletionImpl *c,
std::map<std::string, bufferlist>& attrset);
int aio_rmxattr(const object_t& oid, AioCompletionImpl *c,
const char *name);
int aio_cancel(AioCompletionImpl *c);
int hit_set_list(uint32_t hash, AioCompletionImpl *c,
std::list< std::pair<time_t, time_t> > *pls);
int hit_set_get(uint32_t hash, AioCompletionImpl *c, time_t stamp,
bufferlist *pbl);
int get_inconsistent_objects(const pg_t& pg,
const librados::object_id_t& start_after,
uint64_t max_to_get,
AioCompletionImpl *c,
std::vector<inconsistent_obj_t>* objects,
uint32_t* interval);
int get_inconsistent_snapsets(const pg_t& pg,
const librados::object_id_t& start_after,
uint64_t max_to_get,
AioCompletionImpl *c,
std::vector<inconsistent_snapset_t>* snapsets,
uint32_t* interval);
void set_sync_op_version(version_t ver);
int watch(const object_t& oid, uint64_t *cookie, librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2, bool internal = false);
int watch(const object_t& oid, uint64_t *cookie, librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2, uint32_t timeout, bool internal = false);
int aio_watch(const object_t& oid, AioCompletionImpl *c, uint64_t *cookie,
librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2,
bool internal = false);
int aio_watch(const object_t& oid, AioCompletionImpl *c, uint64_t *cookie,
librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2,
uint32_t timeout, bool internal = false);
int watch_check(uint64_t cookie);
int unwatch(uint64_t cookie);
int aio_unwatch(uint64_t cookie, AioCompletionImpl *c);
int notify(const object_t& oid, bufferlist& bl, uint64_t timeout_ms,
bufferlist *preplybl, char **preply_buf, size_t *preply_buf_len);
int notify_ack(const object_t& oid, uint64_t notify_id, uint64_t cookie,
bufferlist& bl);
int aio_notify(const object_t& oid, AioCompletionImpl *c, bufferlist& bl,
uint64_t timeout_ms, bufferlist *preplybl, char **preply_buf,
size_t *preply_buf_len);
int set_alloc_hint(const object_t& oid,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags);
version_t last_version();
void set_assert_version(uint64_t ver);
void set_notify_timeout(uint32_t timeout);
int cache_pin(const object_t& oid);
int cache_unpin(const object_t& oid);
int application_enable(const std::string& app_name, bool force);
void application_enable_async(const std::string& app_name, bool force,
PoolAsyncCompletionImpl *c);
int application_list(std::set<std::string> *app_names);
int application_metadata_get(const std::string& app_name,
const std::string &key,
std::string* value);
int application_metadata_set(const std::string& app_name,
const std::string &key,
const std::string& value);
int application_metadata_remove(const std::string& app_name,
const std::string &key);
int application_metadata_list(const std::string& app_name,
std::map<std::string, std::string> *values);
};
#endif
| 12,110 | 39.23588 | 106 |
h
|
null |
ceph-main/src/librados/ListObjectImpl.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 David Zafman <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOS_LISTOBJECTIMPL_H
#define CEPH_LIBRADOS_LISTOBJECTIMPL_H
#include <string>
#include <include/rados/librados.hpp>
namespace librados {
struct ListObjectImpl {
std::string nspace;
std::string oid;
std::string locator;
ListObjectImpl() {}
ListObjectImpl(std::string n, std::string o, std::string l):
nspace(n), oid(o), locator(l) {}
auto operator<=>(const ListObjectImpl&) const = default;
const std::string& get_nspace() const { return nspace; }
const std::string& get_oid() const { return oid; }
const std::string& get_locator() const { return locator; }
};
inline std::ostream& operator<<(std::ostream& out, const struct ListObjectImpl& lop) {
out << (lop.nspace.size() ? lop.nspace + "/" : "") << lop.oid
<< (lop.locator.size() ? "@" + lop.locator : "");
return out;
}
class NObjectIteratorImpl {
public:
NObjectIteratorImpl() {}
~NObjectIteratorImpl();
NObjectIteratorImpl(const NObjectIteratorImpl &rhs);
NObjectIteratorImpl& operator=(const NObjectIteratorImpl& rhs);
bool operator==(const NObjectIteratorImpl& rhs) const;
bool operator!=(const NObjectIteratorImpl& rhs) const;
const ListObject& operator*() const;
const ListObject* operator->() const;
NObjectIteratorImpl &operator++(); // Preincrement
NObjectIteratorImpl operator++(int); // Postincrement
const ListObject *get_listobjectp() { return &cur_obj; }
/// get current hash position of the iterator, rounded to the current pg
uint32_t get_pg_hash_position() const;
/// move the iterator to a given hash position. this may (will!) be rounded to the nearest pg.
uint32_t seek(uint32_t pos);
/// move the iterator to a given cursor position
uint32_t seek(const librados::ObjectCursor& cursor);
/// get current cursor position
librados::ObjectCursor get_cursor();
void set_filter(const bufferlist &bl);
NObjectIteratorImpl(ObjListCtx *ctx_);
void get_next();
std::shared_ptr < ObjListCtx > ctx;
ListObject cur_obj;
};
}
#endif
| 2,482 | 30.0375 | 99 |
h
|
null |
ceph-main/src/librados/ObjectOperationImpl.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "common/ceph_time.h"
#include "osdc/Objecter.h"
namespace librados {
// Wraps Objecter's ObjectOperation with storage for an optional mtime argument.
struct ObjectOperationImpl {
::ObjectOperation o;
ceph::real_time rt;
ceph::real_time *prt = nullptr;
};
} // namespace librados
| 677 | 23.214286 | 80 |
h
|
null |
ceph-main/src/librados/PoolAsyncCompletionImpl.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOS_POOLASYNCCOMPLETIONIMPL_H
#define CEPH_LIBRADOS_POOLASYNCCOMPLETIONIMPL_H
#include "common/ceph_mutex.h"
#include <boost/intrusive_ptr.hpp>
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
namespace librados {
struct PoolAsyncCompletionImpl {
ceph::mutex lock = ceph::make_mutex("PoolAsyncCompletionImpl lock");
ceph::condition_variable cond;
int ref = 1;
int rval = 0;
bool released = false;
bool done = false;
rados_callback_t callback = nullptr;
void *callback_arg = nullptr;
PoolAsyncCompletionImpl() = default;
int set_callback(void *cb_arg, rados_callback_t cb) {
std::scoped_lock l(lock);
callback = cb;
callback_arg = cb_arg;
return 0;
}
int wait() {
std::unique_lock l(lock);
while (!done)
cond.wait(l);
return 0;
}
int is_complete() {
std::scoped_lock l(lock);
return done;
}
int get_return_value() {
std::scoped_lock l(lock);
return rval;
}
void get() {
std::scoped_lock l(lock);
ceph_assert(ref > 0);
ref++;
}
void release() {
std::scoped_lock l(lock);
ceph_assert(!released);
released = true;
}
void put() {
std::unique_lock l(lock);
int n = --ref;
l.unlock();
if (!n)
delete this;
}
};
inline void intrusive_ptr_add_ref(PoolAsyncCompletionImpl* p) {
p->get();
}
inline void intrusive_ptr_release(PoolAsyncCompletionImpl* p) {
p->put();
}
class CB_PoolAsync_Safe {
boost::intrusive_ptr<PoolAsyncCompletionImpl> p;
public:
explicit CB_PoolAsync_Safe(boost::intrusive_ptr<PoolAsyncCompletionImpl> p)
: p(p) {}
~CB_PoolAsync_Safe() = default;
void operator()(int r) {
auto c(std::move(p));
std::unique_lock l(c->lock);
c->rval = r;
c->done = true;
c->cond.notify_all();
if (c->callback) {
rados_callback_t cb = c->callback;
void *cb_arg = c->callback_arg;
l.unlock();
cb(c.get(), cb_arg);
l.lock();
}
}
};
}
#endif
| 2,529 | 21.792793 | 79 |
h
|
null |
ceph-main/src/librados/RadosClient.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <iostream>
#include <string>
#include <sstream>
#include <pthread.h>
#include <errno.h>
#include "common/ceph_context.h"
#include "common/config.h"
#include "common/common_init.h"
#include "common/ceph_json.h"
#include "common/errno.h"
#include "common/ceph_json.h"
#include "common/async/blocked_completion.h"
#include "include/buffer.h"
#include "include/stringify.h"
#include "include/util.h"
#include "msg/Messenger.h"
// needed for static_cast
#include "messages/MLog.h"
#include "AioCompletionImpl.h"
#include "IoCtxImpl.h"
#include "PoolAsyncCompletionImpl.h"
#include "RadosClient.h"
#include "include/ceph_assert.h"
#include "common/EventTrace.h"
#define dout_subsys ceph_subsys_rados
#undef dout_prefix
#define dout_prefix *_dout << "librados: "
using std::ostringstream;
using std::string;
using std::map;
using std::vector;
namespace bc = boost::container;
namespace bs = boost::system;
namespace ca = ceph::async;
namespace cb = ceph::buffer;
librados::RadosClient::RadosClient(CephContext *cct_)
: Dispatcher(cct_->get()),
cct_deleter{cct, [](CephContext *p) {p->put();}}
{
auto& conf = cct->_conf;
conf.add_observer(this);
rados_mon_op_timeout = conf.get_val<std::chrono::seconds>("rados_mon_op_timeout");
}
int64_t librados::RadosClient::lookup_pool(const char *name)
{
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
int64_t ret = objecter->with_osdmap(std::mem_fn(&OSDMap::lookup_pg_pool_name),
name);
if (-ENOENT == ret) {
// Make sure we have the latest map
int r = wait_for_latest_osdmap();
if (r < 0)
return r;
ret = objecter->with_osdmap(std::mem_fn(&OSDMap::lookup_pg_pool_name),
name);
}
return ret;
}
bool librados::RadosClient::pool_requires_alignment(int64_t pool_id)
{
bool required;
int r = pool_requires_alignment2(pool_id, &required);
if (r < 0) {
// Cast answer to false, this is a little bit problematic
// since we really don't know the answer yet, say.
return false;
}
return required;
}
// a safer version of pool_requires_alignment
int librados::RadosClient::pool_requires_alignment2(int64_t pool_id,
bool *req)
{
if (!req)
return -EINVAL;
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
return objecter->with_osdmap([req, pool_id](const OSDMap& o) {
if (!o.have_pg_pool(pool_id)) {
return -ENOENT;
}
*req = o.get_pg_pool(pool_id)->requires_aligned_append();
return 0;
});
}
uint64_t librados::RadosClient::pool_required_alignment(int64_t pool_id)
{
uint64_t alignment;
int r = pool_required_alignment2(pool_id, &alignment);
if (r < 0) {
return 0;
}
return alignment;
}
// a safer version of pool_required_alignment
int librados::RadosClient::pool_required_alignment2(int64_t pool_id,
uint64_t *alignment)
{
if (!alignment)
return -EINVAL;
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
return objecter->with_osdmap([alignment, pool_id](const OSDMap &o) {
if (!o.have_pg_pool(pool_id)) {
return -ENOENT;
}
*alignment = o.get_pg_pool(pool_id)->required_alignment();
return 0;
});
}
int librados::RadosClient::pool_get_name(uint64_t pool_id, std::string *s, bool wait_latest_map)
{
int r = wait_for_osdmap();
if (r < 0)
return r;
retry:
objecter->with_osdmap([&](const OSDMap& o) {
if (!o.have_pg_pool(pool_id)) {
r = -ENOENT;
} else {
r = 0;
*s = o.get_pool_name(pool_id);
}
});
if (r == -ENOENT && wait_latest_map) {
r = wait_for_latest_osdmap();
if (r < 0)
return r;
wait_latest_map = false;
goto retry;
}
return r;
}
int librados::RadosClient::get_fsid(std::string *s)
{
if (!s)
return -EINVAL;
std::lock_guard l(lock);
ostringstream oss;
oss << monclient.get_fsid();
*s = oss.str();
return 0;
}
int librados::RadosClient::ping_monitor(const string mon_id, string *result)
{
int err = 0;
/* If we haven't yet connected, we have no way of telling whether we
* already built monc's initial monmap. IF we are in CONNECTED state,
* then it is safe to assume that we went through connect(), which does
* build a monmap.
*/
if (state != CONNECTED) {
ldout(cct, 10) << __func__ << " build monmap" << dendl;
err = monclient.build_initial_monmap();
}
if (err < 0) {
return err;
}
err = monclient.ping_monitor(mon_id, result);
return err;
}
int librados::RadosClient::connect()
{
int err;
// already connected?
if (state == CONNECTING)
return -EINPROGRESS;
if (state == CONNECTED)
return -EISCONN;
state = CONNECTING;
if (!cct->_log->is_started()) {
cct->_log->start();
}
{
MonClient mc_bootstrap(cct, poolctx);
err = mc_bootstrap.get_monmap_and_config();
if (err < 0)
return err;
}
common_init_finish(cct);
poolctx.start(cct->_conf.get_val<std::uint64_t>("librados_thread_count"));
// get monmap
err = monclient.build_initial_monmap();
if (err < 0)
goto out;
err = -ENOMEM;
messenger = Messenger::create_client_messenger(cct, "radosclient");
if (!messenger)
goto out;
// require OSDREPLYMUX feature. this means we will fail to talk to
// old servers. this is necessary because otherwise we won't know
// how to decompose the reply data into its constituent pieces.
messenger->set_default_policy(Messenger::Policy::lossy_client(CEPH_FEATURE_OSDREPLYMUX));
ldout(cct, 1) << "starting msgr at " << messenger->get_myaddrs() << dendl;
ldout(cct, 1) << "starting objecter" << dendl;
objecter = new (std::nothrow) Objecter(cct, messenger, &monclient, poolctx);
if (!objecter)
goto out;
objecter->set_balanced_budget();
monclient.set_messenger(messenger);
mgrclient.set_messenger(messenger);
objecter->init();
messenger->add_dispatcher_head(&mgrclient);
messenger->add_dispatcher_tail(objecter);
messenger->add_dispatcher_tail(this);
messenger->start();
ldout(cct, 1) << "setting wanted keys" << dendl;
monclient.set_want_keys(
CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MGR);
ldout(cct, 1) << "calling monclient init" << dendl;
err = monclient.init();
if (err) {
ldout(cct, 0) << conf->name << " initialization error " << cpp_strerror(-err) << dendl;
shutdown();
goto out;
}
err = monclient.authenticate(std::chrono::duration<double>(conf.get_val<std::chrono::seconds>("client_mount_timeout")).count());
if (err) {
ldout(cct, 0) << conf->name << " authentication error " << cpp_strerror(-err) << dendl;
shutdown();
goto out;
}
messenger->set_myname(entity_name_t::CLIENT(monclient.get_global_id()));
// Detect older cluster, put mgrclient into compatible mode
mgrclient.set_mgr_optional(
!get_required_monitor_features().contains_all(
ceph::features::mon::FEATURE_LUMINOUS));
// MgrClient needs this (it doesn't have MonClient reference itself)
monclient.sub_want("mgrmap", 0, 0);
monclient.renew_subs();
if (service_daemon) {
ldout(cct, 10) << __func__ << " registering as " << service_name << "."
<< daemon_name << dendl;
mgrclient.service_daemon_register(service_name, daemon_name,
daemon_metadata);
}
mgrclient.init();
objecter->set_client_incarnation(0);
objecter->start();
lock.lock();
state = CONNECTED;
instance_id = monclient.get_global_id();
lock.unlock();
ldout(cct, 1) << "init done" << dendl;
err = 0;
out:
if (err) {
state = DISCONNECTED;
if (objecter) {
delete objecter;
objecter = NULL;
}
if (messenger) {
delete messenger;
messenger = NULL;
}
}
return err;
}
void librados::RadosClient::shutdown()
{
std::unique_lock l{lock};
if (state == DISCONNECTED) {
return;
}
bool need_objecter = false;
if (objecter && objecter->initialized) {
need_objecter = true;
}
if (state == CONNECTED) {
if (need_objecter) {
// make sure watch callbacks are flushed
watch_flush();
}
}
state = DISCONNECTED;
instance_id = 0;
l.unlock();
if (need_objecter) {
objecter->shutdown();
}
mgrclient.shutdown();
monclient.shutdown();
if (messenger) {
messenger->shutdown();
messenger->wait();
}
poolctx.stop();
ldout(cct, 1) << "shutdown" << dendl;
}
int librados::RadosClient::watch_flush()
{
ldout(cct, 10) << __func__ << " enter" << dendl;
objecter->linger_callback_flush(ca::use_blocked);
ldout(cct, 10) << __func__ << " exit" << dendl;
return 0;
}
struct CB_aio_watch_flush_Complete {
librados::RadosClient *client;
librados::AioCompletionImpl *c;
CB_aio_watch_flush_Complete(librados::RadosClient *_client, librados::AioCompletionImpl *_c)
: client(_client), c(_c) {
c->get();
}
CB_aio_watch_flush_Complete(const CB_aio_watch_flush_Complete&) = delete;
CB_aio_watch_flush_Complete operator =(const CB_aio_watch_flush_Complete&) = delete;
CB_aio_watch_flush_Complete(CB_aio_watch_flush_Complete&& rhs) {
client = rhs.client;
c = rhs.c;
}
CB_aio_watch_flush_Complete& operator =(CB_aio_watch_flush_Complete&& rhs) {
client = rhs.client;
c = rhs.c;
return *this;
}
void operator()() {
c->lock.lock();
c->rval = 0;
c->complete = true;
c->cond.notify_all();
if (c->callback_complete ||
c->callback_safe) {
boost::asio::defer(client->finish_strand, librados::CB_AioComplete(c));
}
c->put_unlock();
}
};
int librados::RadosClient::async_watch_flush(AioCompletionImpl *c)
{
ldout(cct, 10) << __func__ << " enter" << dendl;
objecter->linger_callback_flush(CB_aio_watch_flush_Complete(this, c));
ldout(cct, 10) << __func__ << " exit" << dendl;
return 0;
}
uint64_t librados::RadosClient::get_instance_id()
{
return instance_id;
}
int librados::RadosClient::get_min_compatible_osd(int8_t* require_osd_release)
{
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
objecter->with_osdmap(
[require_osd_release](const OSDMap& o) {
*require_osd_release = to_integer<int8_t>(o.require_osd_release);
});
return 0;
}
int librados::RadosClient::get_min_compatible_client(int8_t* min_compat_client,
int8_t* require_min_compat_client)
{
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
objecter->with_osdmap(
[min_compat_client, require_min_compat_client](const OSDMap& o) {
*min_compat_client = to_integer<int8_t>(o.get_min_compat_client());
*require_min_compat_client =
to_integer<int8_t>(o.get_require_min_compat_client());
});
return 0;
}
librados::RadosClient::~RadosClient()
{
cct->_conf.remove_observer(this);
if (messenger)
delete messenger;
if (objecter)
delete objecter;
cct = NULL;
}
int librados::RadosClient::create_ioctx(const char *name, IoCtxImpl **io)
{
int64_t poolid = lookup_pool(name);
if (poolid < 0) {
return (int)poolid;
}
*io = new librados::IoCtxImpl(this, objecter, poolid, CEPH_NOSNAP);
return 0;
}
int librados::RadosClient::create_ioctx(int64_t pool_id, IoCtxImpl **io)
{
std::string pool_name;
int r = pool_get_name(pool_id, &pool_name, true);
if (r < 0)
return r;
*io = new librados::IoCtxImpl(this, objecter, pool_id, CEPH_NOSNAP);
return 0;
}
bool librados::RadosClient::ms_dispatch(Message *m)
{
bool ret;
std::lock_guard l(lock);
if (state == DISCONNECTED) {
ldout(cct, 10) << "disconnected, discarding " << *m << dendl;
m->put();
ret = true;
} else {
ret = _dispatch(m);
}
return ret;
}
void librados::RadosClient::ms_handle_connect(Connection *con)
{
}
bool librados::RadosClient::ms_handle_reset(Connection *con)
{
return false;
}
void librados::RadosClient::ms_handle_remote_reset(Connection *con)
{
}
bool librados::RadosClient::ms_handle_refused(Connection *con)
{
return false;
}
bool librados::RadosClient::_dispatch(Message *m)
{
ceph_assert(ceph_mutex_is_locked(lock));
switch (m->get_type()) {
// OSD
case CEPH_MSG_OSD_MAP:
cond.notify_all();
m->put();
break;
case CEPH_MSG_MDS_MAP:
m->put();
break;
case MSG_LOG:
handle_log(static_cast<MLog *>(m));
break;
default:
return false;
}
return true;
}
int librados::RadosClient::wait_for_osdmap()
{
ceph_assert(ceph_mutex_is_not_locked_by_me(lock));
if (state != CONNECTED) {
return -ENOTCONN;
}
bool need_map = false;
objecter->with_osdmap([&](const OSDMap& o) {
if (o.get_epoch() == 0) {
need_map = true;
}
});
if (need_map) {
std::unique_lock l(lock);
ceph::timespan timeout = rados_mon_op_timeout;
if (objecter->with_osdmap(std::mem_fn(&OSDMap::get_epoch)) == 0) {
ldout(cct, 10) << __func__ << " waiting" << dendl;
while (objecter->with_osdmap(std::mem_fn(&OSDMap::get_epoch)) == 0) {
if (timeout == timeout.zero()) {
cond.wait(l);
} else {
if (cond.wait_for(l, timeout) == std::cv_status::timeout) {
lderr(cct) << "timed out waiting for first osdmap from monitors"
<< dendl;
return -ETIMEDOUT;
}
}
}
ldout(cct, 10) << __func__ << " done waiting" << dendl;
}
return 0;
} else {
return 0;
}
}
int librados::RadosClient::wait_for_latest_osdmap()
{
bs::error_code ec;
objecter->wait_for_latest_osdmap(ca::use_blocked[ec]);
return ceph::from_error_code(ec);
}
int librados::RadosClient::pool_list(std::list<std::pair<int64_t, string> >& v)
{
int r = wait_for_osdmap();
if (r < 0)
return r;
objecter->with_osdmap([&](const OSDMap& o) {
for (auto p : o.get_pools())
v.push_back(std::make_pair(p.first, o.get_pool_name(p.first)));
});
return 0;
}
int librados::RadosClient::get_pool_stats(std::list<string>& pools,
map<string,::pool_stat_t> *result,
bool *pper_pool)
{
bs::error_code ec;
std::vector<std::string> v(pools.begin(), pools.end());
auto [res, per_pool] = objecter->get_pool_stats(v, ca::use_blocked[ec]);
if (ec)
return ceph::from_error_code(ec);
if (per_pool)
*pper_pool = per_pool;
if (result)
result->insert(res.begin(), res.end());
return 0;
}
bool librados::RadosClient::get_pool_is_selfmanaged_snaps_mode(
const std::string& pool)
{
bool ret = false;
objecter->with_osdmap([&](const OSDMap& osdmap) {
int64_t poolid = osdmap.lookup_pg_pool_name(pool);
if (poolid >= 0)
ret = osdmap.get_pg_pool(poolid)->is_unmanaged_snaps_mode();
});
return ret;
}
int librados::RadosClient::get_fs_stats(ceph_statfs& stats)
{
ceph::mutex mylock = ceph::make_mutex("RadosClient::get_fs_stats::mylock");
ceph::condition_variable cond;
bool done;
int ret = 0;
{
std::lock_guard l{mylock};
objecter->get_fs_stats(stats, std::optional<int64_t> (),
new C_SafeCond(mylock, cond, &done, &ret));
}
{
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done;});
}
return ret;
}
void librados::RadosClient::get() {
std::lock_guard l(lock);
ceph_assert(refcnt > 0);
refcnt++;
}
bool librados::RadosClient::put() {
std::lock_guard l(lock);
ceph_assert(refcnt > 0);
refcnt--;
return (refcnt == 0);
}
int librados::RadosClient::pool_create(string& name,
int16_t crush_rule)
{
if (!name.length())
return -EINVAL;
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
ceph::mutex mylock = ceph::make_mutex("RadosClient::pool_create::mylock");
int reply;
ceph::condition_variable cond;
bool done;
Context *onfinish = new C_SafeCond(mylock, cond, &done, &reply);
objecter->create_pool(name, onfinish, crush_rule);
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
return reply;
}
int librados::RadosClient::pool_create_async(string& name,
PoolAsyncCompletionImpl *c,
int16_t crush_rule)
{
int r = wait_for_osdmap();
if (r < 0)
return r;
Context *onfinish = make_lambda_context(CB_PoolAsync_Safe(c));
objecter->create_pool(name, onfinish, crush_rule);
return r;
}
int librados::RadosClient::pool_get_base_tier(int64_t pool_id, int64_t* base_tier)
{
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
objecter->with_osdmap([&](const OSDMap& o) {
const pg_pool_t* pool = o.get_pg_pool(pool_id);
if (pool) {
if (pool->tier_of < 0) {
*base_tier = pool_id;
} else {
*base_tier = pool->tier_of;
}
r = 0;
} else {
r = -ENOENT;
}
});
return r;
}
int librados::RadosClient::pool_delete(const char *name)
{
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
ceph::mutex mylock = ceph::make_mutex("RadosClient::pool_delete::mylock");
ceph::condition_variable cond;
bool done;
int ret;
Context *onfinish = new C_SafeCond(mylock, cond, &done, &ret);
objecter->delete_pool(name, onfinish);
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done;});
return ret;
}
int librados::RadosClient::pool_delete_async(const char *name, PoolAsyncCompletionImpl *c)
{
int r = wait_for_osdmap();
if (r < 0)
return r;
Context *onfinish = make_lambda_context(CB_PoolAsync_Safe(c));
objecter->delete_pool(name, onfinish);
return r;
}
void librados::RadosClient::blocklist_self(bool set) {
std::lock_guard l(lock);
objecter->blocklist_self(set);
}
std::string librados::RadosClient::get_addrs() const {
CachedStackStringStream cos;
*cos << messenger->get_myaddrs();
return std::string(cos->strv());
}
int librados::RadosClient::blocklist_add(const string& client_address,
uint32_t expire_seconds)
{
entity_addr_t addr;
if (!addr.parse(client_address)) {
lderr(cct) << "unable to parse address " << client_address << dendl;
return -EINVAL;
}
std::stringstream cmd;
cmd << "{"
<< "\"prefix\": \"osd blocklist\", "
<< "\"blocklistop\": \"add\", "
<< "\"addr\": \"" << client_address << "\"";
if (expire_seconds != 0) {
cmd << ", \"expire\": " << expire_seconds << ".0";
}
cmd << "}";
std::vector<std::string> cmds;
cmds.push_back(cmd.str());
bufferlist inbl;
int r = mon_command(cmds, inbl, NULL, NULL);
if (r == -EINVAL) {
// try legacy blacklist command
std::stringstream cmd;
cmd << "{"
<< "\"prefix\": \"osd blacklist\", "
<< "\"blacklistop\": \"add\", "
<< "\"addr\": \"" << client_address << "\"";
if (expire_seconds != 0) {
cmd << ", \"expire\": " << expire_seconds << ".0";
}
cmd << "}";
cmds.clear();
cmds.push_back(cmd.str());
r = mon_command(cmds, inbl, NULL, NULL);
}
if (r < 0) {
return r;
}
// ensure we have the latest osd map epoch before proceeding
r = wait_for_latest_osdmap();
return r;
}
int librados::RadosClient::mon_command(const vector<string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, string *outs)
{
C_SaferCond ctx;
mon_command_async(cmd, inbl, outbl, outs, &ctx);
return ctx.wait();
}
void librados::RadosClient::mon_command_async(const vector<string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, string *outs,
Context *on_finish)
{
std::lock_guard l{lock};
monclient.start_mon_command(cmd, inbl,
[outs, outbl,
on_finish = std::unique_ptr<Context>(on_finish)]
(bs::error_code e,
std::string&& s,
ceph::bufferlist&& b) mutable {
if (outs)
*outs = std::move(s);
if (outbl)
*outbl = std::move(b);
if (on_finish)
on_finish.release()->complete(
ceph::from_error_code(e));
});
}
int librados::RadosClient::mgr_command(const vector<string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, string *outs)
{
std::lock_guard l(lock);
C_SaferCond cond;
int r = mgrclient.start_command(cmd, inbl, outbl, outs, &cond);
if (r < 0)
return r;
lock.unlock();
if (rados_mon_op_timeout.count() > 0) {
r = cond.wait_for(rados_mon_op_timeout);
} else {
r = cond.wait();
}
lock.lock();
return r;
}
int librados::RadosClient::mgr_command(
const string& name,
const vector<string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, string *outs)
{
std::lock_guard l(lock);
C_SaferCond cond;
int r = mgrclient.start_tell_command(name, cmd, inbl, outbl, outs, &cond);
if (r < 0)
return r;
lock.unlock();
if (rados_mon_op_timeout.count() > 0) {
r = cond.wait_for(rados_mon_op_timeout);
} else {
r = cond.wait();
}
lock.lock();
return r;
}
int librados::RadosClient::mon_command(int rank, const vector<string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, string *outs)
{
bs::error_code ec;
auto&& [s, bl] = monclient.start_mon_command(rank, cmd, inbl,
ca::use_blocked[ec]);
if (outs)
*outs = std::move(s);
if (outbl)
*outbl = std::move(bl);
return ceph::from_error_code(ec);
}
int librados::RadosClient::mon_command(string name, const vector<string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, string *outs)
{
bs::error_code ec;
auto&& [s, bl] = monclient.start_mon_command(name, cmd, inbl,
ca::use_blocked[ec]);
if (outs)
*outs = std::move(s);
if (outbl)
*outbl = std::move(bl);
return ceph::from_error_code(ec);
}
int librados::RadosClient::osd_command(int osd, vector<string>& cmd,
const bufferlist& inbl,
bufferlist *poutbl, string *prs)
{
ceph_tid_t tid;
if (osd < 0)
return -EINVAL;
// XXX do anything with tid?
bs::error_code ec;
auto [s, bl] = objecter->osd_command(osd, std::move(cmd), cb::list(inbl),
&tid, ca::use_blocked[ec]);
if (poutbl)
*poutbl = std::move(bl);
if (prs)
*prs = std::move(s);
return ceph::from_error_code(ec);
}
int librados::RadosClient::pg_command(pg_t pgid, vector<string>& cmd,
const bufferlist& inbl,
bufferlist *poutbl, string *prs)
{
ceph_tid_t tid;
bs::error_code ec;
auto [s, bl] = objecter->pg_command(pgid, std::move(cmd), inbl, &tid,
ca::use_blocked[ec]);
if (poutbl)
*poutbl = std::move(bl);
if (prs)
*prs = std::move(s);
return ceph::from_error_code(ec);
}
int librados::RadosClient::monitor_log(const string& level,
rados_log_callback_t cb,
rados_log_callback2_t cb2,
void *arg)
{
std::lock_guard l(lock);
if (state != CONNECTED) {
return -ENOTCONN;
}
if (cb == NULL && cb2 == NULL) {
// stop watch
ldout(cct, 10) << __func__ << " removing cb " << (void*)log_cb
<< " " << (void*)log_cb2 << dendl;
monclient.sub_unwant(log_watch);
log_watch.clear();
log_cb = NULL;
log_cb2 = NULL;
log_cb_arg = NULL;
return 0;
}
string watch_level;
if (level == "debug") {
watch_level = "log-debug";
} else if (level == "info") {
watch_level = "log-info";
} else if (level == "warn" || level == "warning") {
watch_level = "log-warn";
} else if (level == "err" || level == "error") {
watch_level = "log-error";
} else if (level == "sec") {
watch_level = "log-sec";
} else {
ldout(cct, 10) << __func__ << " invalid level " << level << dendl;
return -EINVAL;
}
if (log_cb || log_cb2)
monclient.sub_unwant(log_watch);
// (re)start watch
ldout(cct, 10) << __func__ << " add cb " << (void*)cb << " " << (void*)cb2
<< " level " << level << dendl;
monclient.sub_want(watch_level, 0, 0);
monclient.renew_subs();
log_cb = cb;
log_cb2 = cb2;
log_cb_arg = arg;
log_watch = watch_level;
return 0;
}
void librados::RadosClient::handle_log(MLog *m)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 10) << __func__ << " version " << m->version << dendl;
if (log_last_version < m->version) {
log_last_version = m->version;
if (log_cb || log_cb2) {
for (std::deque<LogEntry>::iterator it = m->entries.begin(); it != m->entries.end(); ++it) {
LogEntry e = *it;
ostringstream ss;
ss << e.stamp << " " << e.name << " " << e.prio << " " << e.msg;
string line = ss.str();
string who = stringify(e.rank) + " " + stringify(e.addrs);
string name = stringify(e.name);
string level = stringify(e.prio);
struct timespec stamp;
e.stamp.to_timespec(&stamp);
ldout(cct, 20) << __func__ << " delivering " << ss.str() << dendl;
if (log_cb)
log_cb(log_cb_arg, line.c_str(), who.c_str(),
stamp.tv_sec, stamp.tv_nsec,
e.seq, level.c_str(), e.msg.c_str());
if (log_cb2)
log_cb2(log_cb_arg, line.c_str(),
e.channel.c_str(),
who.c_str(), name.c_str(),
stamp.tv_sec, stamp.tv_nsec,
e.seq, level.c_str(), e.msg.c_str());
}
}
monclient.sub_got(log_watch, log_last_version);
}
m->put();
}
int librados::RadosClient::service_daemon_register(
const std::string& service, ///< service name (e.g., 'rgw')
const std::string& name, ///< daemon name (e.g., 'gwfoo')
const std::map<std::string,std::string>& metadata)
{
if (service_daemon) {
return -EEXIST;
}
if (service == "osd" ||
service == "mds" ||
service == "client" ||
service == "mon" ||
service == "mgr") {
// normal ceph entity types are not allowed!
return -EINVAL;
}
if (service.empty() || name.empty()) {
return -EINVAL;
}
collect_sys_info(&daemon_metadata, cct);
ldout(cct,10) << __func__ << " " << service << "." << name << dendl;
service_daemon = true;
service_name = service;
daemon_name = name;
daemon_metadata.insert(metadata.begin(), metadata.end());
if (state == DISCONNECTED) {
return 0;
}
if (state == CONNECTING) {
return -EBUSY;
}
mgrclient.service_daemon_register(service_name, daemon_name,
daemon_metadata);
return 0;
}
int librados::RadosClient::service_daemon_update_status(
std::map<std::string,std::string>&& status)
{
if (state != CONNECTED) {
return -ENOTCONN;
}
return mgrclient.service_daemon_update_status(std::move(status));
}
mon_feature_t librados::RadosClient::get_required_monitor_features() const
{
return monclient.with_monmap([](const MonMap &monmap) {
return monmap.get_required_features(); } );
}
int librados::RadosClient::get_inconsistent_pgs(int64_t pool_id,
std::vector<std::string>* pgs)
{
vector<string> cmd = {
"{\"prefix\": \"pg ls\","
"\"pool\": " + std::to_string(pool_id) + ","
"\"states\": [\"inconsistent\"],"
"\"format\": \"json\"}"
};
bufferlist inbl, outbl;
string outstring;
if (auto ret = mgr_command(cmd, inbl, &outbl, &outstring); ret) {
return ret;
}
if (!outbl.length()) {
// no pg returned
return 0;
}
JSONParser parser;
if (!parser.parse(outbl.c_str(), outbl.length())) {
return -EINVAL;
}
vector<string> v;
if (!parser.is_array()) {
JSONObj *pgstat_obj = parser.find_obj("pg_stats");
if (!pgstat_obj)
return 0;
auto s = pgstat_obj->get_data();
JSONParser pg_stats;
if (!pg_stats.parse(s.c_str(), s.length())) {
return -EINVAL;
}
v = pg_stats.get_array_elements();
} else {
v = parser.get_array_elements();
}
for (auto i : v) {
JSONParser pg_json;
if (!pg_json.parse(i.c_str(), i.length())) {
return -EINVAL;
}
string pgid;
JSONDecoder::decode_json("pgid", pgid, &pg_json);
pgs->emplace_back(std::move(pgid));
}
return 0;
}
const char** librados::RadosClient::get_tracked_conf_keys() const
{
static const char *config_keys[] = {
"librados_thread_count",
"rados_mon_op_timeout",
nullptr
};
return config_keys;
}
void librados::RadosClient::handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed)
{
if (changed.count("librados_thread_count")) {
poolctx.stop();
poolctx.start(conf.get_val<std::uint64_t>("librados_thread_count"));
}
if (changed.count("rados_mon_op_timeout")) {
rados_mon_op_timeout = conf.get_val<std::chrono::seconds>("rados_mon_op_timeout");
}
}
| 28,810 | 23.29258 | 130 |
cc
|
null |
ceph-main/src/librados/RadosClient.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOS_RADOSCLIENT_H
#define CEPH_LIBRADOS_RADOSCLIENT_H
#include <functional>
#include <memory>
#include <string>
#include "msg/Dispatcher.h"
#include "common/async/context_pool.h"
#include "common/config_fwd.h"
#include "common/Cond.h"
#include "common/ceph_mutex.h"
#include "common/ceph_time.h"
#include "common/config_obs.h"
#include "include/common_fwd.h"
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "mon/MonClient.h"
#include "mgr/MgrClient.h"
#include "IoCtxImpl.h"
struct Context;
class Message;
class MLog;
class Messenger;
class AioCompletionImpl;
namespace neorados { namespace detail { class RadosClient; }}
class librados::RadosClient : public Dispatcher,
public md_config_obs_t
{
friend neorados::detail::RadosClient;
public:
using Dispatcher::cct;
private:
std::unique_ptr<CephContext,
std::function<void(CephContext*)>> cct_deleter;
public:
const ConfigProxy& conf{cct->_conf};
ceph::async::io_context_pool poolctx;
private:
enum {
DISCONNECTED,
CONNECTING,
CONNECTED,
} state{DISCONNECTED};
MonClient monclient{cct, poolctx};
MgrClient mgrclient{cct, nullptr, &monclient.monmap};
Messenger *messenger{nullptr};
uint64_t instance_id{0};
bool _dispatch(Message *m);
bool ms_dispatch(Message *m) override;
void ms_handle_connect(Connection *con) override;
bool ms_handle_reset(Connection *con) override;
void ms_handle_remote_reset(Connection *con) override;
bool ms_handle_refused(Connection *con) override;
Objecter *objecter{nullptr};
ceph::mutex lock = ceph::make_mutex("librados::RadosClient::lock");
ceph::condition_variable cond;
int refcnt{1};
version_t log_last_version{0};
rados_log_callback_t log_cb{nullptr};
rados_log_callback2_t log_cb2{nullptr};
void *log_cb_arg{nullptr};
std::string log_watch;
bool service_daemon = false;
std::string daemon_name, service_name;
std::map<std::string,std::string> daemon_metadata;
ceph::timespan rados_mon_op_timeout{};
int wait_for_osdmap();
public:
boost::asio::io_context::strand finish_strand{poolctx.get_io_context()};
explicit RadosClient(CephContext *cct);
~RadosClient() override;
int ping_monitor(std::string mon_id, std::string *result);
int connect();
void shutdown();
int watch_flush();
int async_watch_flush(AioCompletionImpl *c);
uint64_t get_instance_id();
int get_min_compatible_osd(int8_t* require_osd_release);
int get_min_compatible_client(int8_t* min_compat_client,
int8_t* require_min_compat_client);
int wait_for_latest_osdmap();
int create_ioctx(const char *name, IoCtxImpl **io);
int create_ioctx(int64_t, IoCtxImpl **io);
int get_fsid(std::string *s);
int64_t lookup_pool(const char *name);
bool pool_requires_alignment(int64_t pool_id);
int pool_requires_alignment2(int64_t pool_id, bool *req);
uint64_t pool_required_alignment(int64_t pool_id);
int pool_required_alignment2(int64_t pool_id, uint64_t *alignment);
int pool_get_name(uint64_t pool_id, std::string *name,
bool wait_latest_map = false);
int pool_list(std::list<std::pair<int64_t, std::string> >& ls);
int get_pool_stats(std::list<std::string>& ls, std::map<std::string,::pool_stat_t> *result,
bool *per_pool);
int get_fs_stats(ceph_statfs& result);
bool get_pool_is_selfmanaged_snaps_mode(const std::string& pool);
/*
-1 was set as the default value and monitor will pickup the right crush rule with below order:
a) osd pool default crush replicated rule
b) the first rule
c) error out if no value find
*/
int pool_create(std::string& name, int16_t crush_rule=-1);
int pool_create_async(std::string& name, PoolAsyncCompletionImpl *c,
int16_t crush_rule=-1);
int pool_get_base_tier(int64_t pool_id, int64_t* base_tier);
int pool_delete(const char *name);
int pool_delete_async(const char *name, PoolAsyncCompletionImpl *c);
int blocklist_add(const std::string& client_address, uint32_t expire_seconds);
int mon_command(const std::vector<std::string>& cmd, const bufferlist &inbl,
bufferlist *outbl, std::string *outs);
void mon_command_async(const std::vector<std::string>& cmd, const bufferlist &inbl,
bufferlist *outbl, std::string *outs, Context *on_finish);
int mon_command(int rank,
const std::vector<std::string>& cmd, const bufferlist &inbl,
bufferlist *outbl, std::string *outs);
int mon_command(std::string name,
const std::vector<std::string>& cmd, const bufferlist &inbl,
bufferlist *outbl, std::string *outs);
int mgr_command(const std::vector<std::string>& cmd, const bufferlist &inbl,
bufferlist *outbl, std::string *outs);
int mgr_command(
const std::string& name,
const std::vector<std::string>& cmd, const bufferlist &inbl,
bufferlist *outbl, std::string *outs);
int osd_command(int osd, std::vector<std::string>& cmd, const bufferlist& inbl,
bufferlist *poutbl, std::string *prs);
int pg_command(pg_t pgid, std::vector<std::string>& cmd, const bufferlist& inbl,
bufferlist *poutbl, std::string *prs);
void handle_log(MLog *m);
int monitor_log(const std::string& level, rados_log_callback_t cb,
rados_log_callback2_t cb2, void *arg);
void get();
bool put();
void blocklist_self(bool set);
std::string get_addrs() const;
int service_daemon_register(
const std::string& service, ///< service name (e.g., 'rgw')
const std::string& name, ///< daemon name (e.g., 'gwfoo')
const std::map<std::string,std::string>& metadata); ///< static metadata about daemon
int service_daemon_update_status(
std::map<std::string,std::string>&& status);
mon_feature_t get_required_monitor_features() const;
int get_inconsistent_pgs(int64_t pool_id, std::vector<std::string>* pgs);
const char** get_tracked_conf_keys() const override;
void handle_conf_change(const ConfigProxy& conf,
const std::set <std::string> &changed) override;
};
#endif
| 6,525 | 31.79397 | 96 |
h
|
null |
ceph-main/src/librados/RadosXattrIter.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <stdlib.h>
#include "RadosXattrIter.h"
librados::RadosXattrsIter::RadosXattrsIter()
: val(NULL)
{
i = attrset.end();
}
librados::RadosXattrsIter::~RadosXattrsIter()
{
free(val);
val = NULL;
}
| 645 | 20.533333 | 70 |
cc
|
null |
ceph-main/src/librados/RadosXattrIter.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOS_XATTRITER_H
#define CEPH_LIBRADOS_XATTRITER_H
#include <string>
#include <map>
#include "include/buffer.h" // for bufferlist
namespace librados {
/**
* iterator object used in implementation of the external
* attributes part of the C interface of librados
*/
struct RadosXattrsIter {
RadosXattrsIter();
~RadosXattrsIter();
std::map<std::string, bufferlist> attrset;
std::map<std::string, bufferlist>::iterator i;
char *val;
};
};
#endif
| 933 | 22.948718 | 70 |
h
|
null |
ceph-main/src/librados/librados_asio.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef LIBRADOS_ASIO_H
#define LIBRADOS_ASIO_H
#include "include/rados/librados.hpp"
#include "common/async/completion.h"
/// Defines asynchronous librados operations that satisfy all of the
/// "Requirements on asynchronous operations" imposed by the C++ Networking TS
/// in section 13.2.7. Many of the type and variable names below are taken
/// directly from those requirements.
///
/// The current draft of the Networking TS (as of 2017-11-27) is available here:
/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/n4711.pdf
///
/// The boost::asio documentation duplicates these requirements here:
/// http://www.boost.org/doc/libs/1_66_0/doc/html/boost_asio/reference/asynchronous_operations.html
namespace librados {
namespace detail {
#ifndef _WIN32
constexpr auto err_category = boost::system::system_category;
#else
// librados uses "errno.h" error codes. On Windows,
// boost::system::system_category refers to errors from winerror.h.
// That being considered, we'll use boost::system::generic_category.
constexpr auto err_category = boost::system::generic_category;
#endif
/// unique_ptr with custom deleter for AioCompletion
struct AioCompletionDeleter {
void operator()(AioCompletion *c) { c->release(); }
};
using unique_aio_completion_ptr =
std::unique_ptr<AioCompletion, AioCompletionDeleter>;
/// Invokes the given completion handler. When the type of Result is not void,
/// storage is provided for it and that result is passed as an additional
/// argument to the handler.
template <typename Result>
struct Invoker {
using Signature = void(boost::system::error_code, Result);
Result result;
template <typename Completion>
void dispatch(Completion&& completion, boost::system::error_code ec) {
ceph::async::dispatch(std::move(completion), ec, std::move(result));
}
};
// specialization for Result=void
template <>
struct Invoker<void> {
using Signature = void(boost::system::error_code);
template <typename Completion>
void dispatch(Completion&& completion, boost::system::error_code ec) {
ceph::async::dispatch(std::move(completion), ec);
}
};
template <typename Result>
struct AsyncOp : Invoker<Result> {
unique_aio_completion_ptr aio_completion;
using Signature = typename Invoker<Result>::Signature;
using Completion = ceph::async::Completion<Signature, AsyncOp<Result>>;
static void aio_dispatch(completion_t cb, void *arg) {
// reclaim ownership of the completion
auto p = std::unique_ptr<Completion>{static_cast<Completion*>(arg)};
// move result out of Completion memory being freed
auto op = std::move(p->user_data);
const int ret = op.aio_completion->get_return_value();
boost::system::error_code ec;
if (ret < 0) {
ec.assign(-ret, librados::detail::err_category());
}
op.dispatch(std::move(p), ec);
}
template <typename Executor1, typename CompletionHandler>
static auto create(const Executor1& ex1, CompletionHandler&& handler) {
auto p = Completion::create(ex1, std::move(handler));
p->user_data.aio_completion.reset(
Rados::aio_create_completion(p.get(), aio_dispatch));
return p;
}
};
} // namespace detail
/// Calls IoCtx::aio_read() and arranges for the AioCompletion to call a
/// given handler with signature (boost::system::error_code, bufferlist).
template <typename ExecutionContext, typename CompletionToken>
auto async_read(ExecutionContext& ctx, IoCtx& io, const std::string& oid,
size_t len, uint64_t off, CompletionToken&& token)
{
using Op = detail::AsyncOp<bufferlist>;
using Signature = typename Op::Signature;
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto p = Op::create(ctx.get_executor(), init.completion_handler);
auto& op = p->user_data;
int ret = io.aio_read(oid, op.aio_completion.get(), &op.result, len, off);
if (ret < 0) {
auto ec = boost::system::error_code{-ret, librados::detail::err_category()};
ceph::async::post(std::move(p), ec, bufferlist{});
} else {
p.release(); // release ownership until completion
}
return init.result.get();
}
/// Calls IoCtx::aio_write() and arranges for the AioCompletion to call a
/// given handler with signature (boost::system::error_code).
template <typename ExecutionContext, typename CompletionToken>
auto async_write(ExecutionContext& ctx, IoCtx& io, const std::string& oid,
bufferlist &bl, size_t len, uint64_t off,
CompletionToken&& token)
{
using Op = detail::AsyncOp<void>;
using Signature = typename Op::Signature;
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto p = Op::create(ctx.get_executor(), init.completion_handler);
auto& op = p->user_data;
int ret = io.aio_write(oid, op.aio_completion.get(), bl, len, off);
if (ret < 0) {
auto ec = boost::system::error_code{-ret, librados::detail::err_category()};
ceph::async::post(std::move(p), ec);
} else {
p.release(); // release ownership until completion
}
return init.result.get();
}
/// Calls IoCtx::aio_operate() and arranges for the AioCompletion to call a
/// given handler with signature (boost::system::error_code, bufferlist).
template <typename ExecutionContext, typename CompletionToken>
auto async_operate(ExecutionContext& ctx, IoCtx& io, const std::string& oid,
ObjectReadOperation *read_op, int flags,
CompletionToken&& token)
{
using Op = detail::AsyncOp<bufferlist>;
using Signature = typename Op::Signature;
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto p = Op::create(ctx.get_executor(), init.completion_handler);
auto& op = p->user_data;
int ret = io.aio_operate(oid, op.aio_completion.get(), read_op,
flags, &op.result);
if (ret < 0) {
auto ec = boost::system::error_code{-ret, librados::detail::err_category()};
ceph::async::post(std::move(p), ec, bufferlist{});
} else {
p.release(); // release ownership until completion
}
return init.result.get();
}
/// Calls IoCtx::aio_operate() and arranges for the AioCompletion to call a
/// given handler with signature (boost::system::error_code).
template <typename ExecutionContext, typename CompletionToken>
auto async_operate(ExecutionContext& ctx, IoCtx& io, const std::string& oid,
ObjectWriteOperation *write_op, int flags,
CompletionToken &&token)
{
using Op = detail::AsyncOp<void>;
using Signature = typename Op::Signature;
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto p = Op::create(ctx.get_executor(), init.completion_handler);
auto& op = p->user_data;
int ret = io.aio_operate(oid, op.aio_completion.get(), write_op, flags);
if (ret < 0) {
auto ec = boost::system::error_code{-ret, librados::detail::err_category()};
ceph::async::post(std::move(p), ec);
} else {
p.release(); // release ownership until completion
}
return init.result.get();
}
/// Calls IoCtx::aio_notify() and arranges for the AioCompletion to call a
/// given handler with signature (boost::system::error_code, bufferlist).
template <typename ExecutionContext, typename CompletionToken>
auto async_notify(ExecutionContext& ctx, IoCtx& io, const std::string& oid,
bufferlist& bl, uint64_t timeout_ms, CompletionToken &&token)
{
using Op = detail::AsyncOp<bufferlist>;
using Signature = typename Op::Signature;
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto p = Op::create(ctx.get_executor(), init.completion_handler);
auto& op = p->user_data;
int ret = io.aio_notify(oid, op.aio_completion.get(),
bl, timeout_ms, &op.result);
if (ret < 0) {
auto ec = boost::system::error_code{-ret, librados::detail::err_category()};
ceph::async::post(std::move(p), ec, bufferlist{});
} else {
p.release(); // release ownership until completion
}
return init.result.get();
}
} // namespace librados
#endif // LIBRADOS_ASIO_H
| 8,461 | 36.946188 | 99 |
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.