repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/src/cls/timeindex/cls_timeindex_client.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include "cls/timeindex/cls_timeindex_ops.h"
#include "cls/timeindex/cls_timeindex_client.h"
#include "include/compat.h"
void cls_timeindex_add(
librados::ObjectWriteOperation& op,
std::list<cls_timeindex_entry>& entries)
{
librados::bufferlist in;
cls_timeindex_add_op call;
call.entries = entries;
encode(call, in);
op.exec("timeindex", "add", in);
}
void cls_timeindex_add(
librados::ObjectWriteOperation& op,
cls_timeindex_entry& entry)
{
librados::bufferlist in;
cls_timeindex_add_op call;
call.entries.push_back(entry);
encode(call, in);
op.exec("timeindex", "add", in);
}
void cls_timeindex_add_prepare_entry(
cls_timeindex_entry& entry,
const utime_t& key_timestamp,
const std::string& key_ext,
const librados::bufferlist& bl)
{
entry.key_ts = key_timestamp;
entry.key_ext = key_ext;
entry.value = bl;
}
void cls_timeindex_add(
librados::ObjectWriteOperation& op,
const utime_t& key_timestamp,
const std::string& key_ext,
const librados::bufferlist& bl)
{
cls_timeindex_entry entry;
cls_timeindex_add_prepare_entry(entry, key_timestamp, key_ext, bl);
cls_timeindex_add(op, entry);
}
void cls_timeindex_trim(
librados::ObjectWriteOperation& op,
const utime_t& from_time,
const utime_t& to_time,
const std::string& from_marker,
const std::string& to_marker)
{
librados::bufferlist in;
cls_timeindex_trim_op call;
call.from_time = from_time;
call.to_time = to_time;
call.from_marker = from_marker;
call.to_marker = to_marker;
encode(call, in);
op.exec("timeindex", "trim", in);
}
int cls_timeindex_trim(
librados::IoCtx& io_ctx,
const std::string& oid,
const utime_t& from_time,
const utime_t& to_time,
const std::string& from_marker,
const std::string& to_marker)
{
bool done = false;
do {
librados::ObjectWriteOperation op;
cls_timeindex_trim(op, from_time, to_time, from_marker, to_marker);
int r = io_ctx.operate(oid, &op);
if (r == -ENODATA)
done = true;
else if (r < 0)
return r;
} while (!done);
return 0;
}
void cls_timeindex_list(
librados::ObjectReadOperation& op,
const utime_t& from,
const utime_t& to,
const std::string& in_marker,
const int max_entries,
std::list<cls_timeindex_entry>& entries,
std::string *out_marker,
bool *truncated)
{
librados::bufferlist in;
cls_timeindex_list_op call;
call.from_time = from;
call.to_time = to;
call.marker = in_marker;
call.max_entries = max_entries;
encode(call, in);
op.exec("timeindex", "list", in,
new TimeindexListCtx(&entries, out_marker, truncated));
}
| 2,730 | 21.570248 | 71 | cc |
null | ceph-main/src/cls/timeindex/cls_timeindex_client.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_TIMEINDEX_CLIENT_H
#define CEPH_CLS_TIMEINDEX_CLIENT_H
#include "include/rados/librados.hpp"
#include "cls_timeindex_ops.h"
/**
* timeindex objclass
*/
class TimeindexListCtx : public librados::ObjectOperationCompletion {
std::list<cls_timeindex_entry> *entries;
std::string *marker;
bool *truncated;
public:
///* ctor
TimeindexListCtx(
std::list<cls_timeindex_entry> *_entries,
std::string *_marker,
bool *_truncated)
: entries(_entries), marker(_marker), truncated(_truncated) {}
///* dtor
~TimeindexListCtx() {}
void handle_completion(int r, ceph::buffer::list& bl) override {
if (r >= 0) {
cls_timeindex_list_ret ret;
try {
auto iter = bl.cbegin();
decode(ret, iter);
if (entries)
*entries = ret.entries;
if (truncated)
*truncated = ret.truncated;
if (marker)
*marker = ret.marker;
} catch (ceph::buffer::error& err) {
// nothing we can do about it atm
}
}
}
};
void cls_timeindex_add_prepare_entry(
cls_timeindex_entry& entry,
const utime_t& key_timestamp,
const std::string& key_ext,
ceph::buffer::list& bl);
void cls_timeindex_add(
librados::ObjectWriteOperation& op,
const std::list<cls_timeindex_entry>& entry);
void cls_timeindex_add(
librados::ObjectWriteOperation& op,
const cls_timeindex_entry& entry);
void cls_timeindex_add(
librados::ObjectWriteOperation& op,
const utime_t& timestamp,
const std::string& name,
const ceph::buffer::list& bl);
void cls_timeindex_list(
librados::ObjectReadOperation& op,
const utime_t& from,
const utime_t& to,
const std::string& in_marker,
const int max_entries,
std::list<cls_timeindex_entry>& entries,
std::string *out_marker,
bool *truncated);
void cls_timeindex_trim(
librados::ObjectWriteOperation& op,
const utime_t& from_time,
const utime_t& to_time,
const std::string& from_marker = std::string(),
const std::string& to_marker = std::string());
// these overloads which call io_ctx.operate() should not be called in the rgw.
// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()
#ifndef CLS_CLIENT_HIDE_IOCTX
int cls_timeindex_trim(
librados::IoCtx& io_ctx,
const std::string& oid,
const utime_t& from_time,
const utime_t& to_time,
const std::string& from_marker = std::string(),
const std::string& to_marker = std::string());
#endif
#endif
| 2,571 | 24.979798 | 89 | h |
null | ceph-main/src/cls/timeindex/cls_timeindex_ops.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_TIMEINDEX_OPS_H
#define CEPH_CLS_TIMEINDEX_OPS_H
#include "cls_timeindex_types.h"
struct cls_timeindex_add_op {
std::list<cls_timeindex_entry> entries;
cls_timeindex_add_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(entries, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(entries, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_timeindex_add_op)
struct cls_timeindex_list_op {
utime_t from_time;
std::string marker; /* if not empty, overrides from_time */
utime_t to_time; /* not inclusive */
int max_entries; /* upperbound to returned num of entries
might return less than that and still be truncated */
cls_timeindex_list_op() : max_entries(0) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(from_time, bl);
encode(marker, bl);
encode(to_time, bl);
encode(max_entries, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(from_time, bl);
decode(marker, bl);
decode(to_time, bl);
decode(max_entries, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_timeindex_list_op)
struct cls_timeindex_list_ret {
std::list<cls_timeindex_entry> entries;
std::string marker;
bool truncated;
cls_timeindex_list_ret() : truncated(false) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(entries, bl);
encode(marker, bl);
encode(truncated, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(entries, bl);
decode(marker, bl);
decode(truncated, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_timeindex_list_ret)
/*
* operation will return 0 when successfully removed but not done. Will return
* -ENODATA when done, so caller needs to repeat sending request until that.
*/
struct cls_timeindex_trim_op {
utime_t from_time;
utime_t to_time; /* inclusive */
std::string from_marker;
std::string to_marker;
cls_timeindex_trim_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(from_time, bl);
encode(to_time, bl);
encode(from_marker, bl);
encode(to_marker, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(from_time, bl);
decode(to_time, bl);
decode(from_marker, bl);
decode(to_marker, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_timeindex_trim_op)
#endif /* CEPH_CLS_TIMEINDEX_OPS_H */
| 2,832 | 23.422414 | 78 | h |
null | ceph-main/src/cls/timeindex/cls_timeindex_types.cc | #include "cls_timeindex_types.h"
#include "common/Formatter.h"
void cls_timeindex_entry::dump(Formatter *f) const
{
f->dump_stream("key_ts") << key_ts;
f->dump_string("key_ext", key_ext);
f->dump_string("value", value.to_str());
}
void cls_timeindex_entry::generate_test_instances(std::list<cls_timeindex_entry*>& o)
{
cls_timeindex_entry *i = new cls_timeindex_entry;
i->key_ts = utime_t(0,0);
i->key_ext = "foo";
bufferlist bl;
bl.append("bar");
i->value = bl;
o.push_back(i);
o.push_back(new cls_timeindex_entry);
}
| 543 | 23.727273 | 85 | cc |
null | ceph-main/src/cls/timeindex/cls_timeindex_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_TIMEINDEX_TYPES_H
#define CEPH_CLS_TIMEINDEX_TYPES_H
#include "include/encoding.h"
#include "include/types.h"
#include "include/utime.h"
class JSONObj;
struct cls_timeindex_entry {
/* Mandatory timestamp. Will be part of the key. */
utime_t key_ts;
/* Not mandatory. The name_ext field, if not empty, will form second
* part of the key. */
std::string key_ext;
/* Become value of OMAP-based mapping. */
ceph::buffer::list value;
cls_timeindex_entry() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(key_ts, bl);
encode(key_ext, bl);
encode(value, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(key_ts, bl);
decode(key_ext, bl);
decode(value, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_timeindex_entry*>& o);
};
WRITE_CLASS_ENCODER(cls_timeindex_entry)
#endif /* CEPH_CLS_TIMEINDEX_TYPES_H */
| 1,147 | 23.425532 | 74 | h |
null | ceph-main/src/cls/user/cls_user.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include "include/utime.h"
#include "objclass/objclass.h"
#include "cls_user_ops.h"
using std::map;
using std::string;
using ceph::bufferlist;
using ceph::decode;
using ceph::encode;
CLS_VER(1,0)
CLS_NAME(user)
static int write_entry(cls_method_context_t hctx, const string& key, const cls_user_bucket_entry& entry)
{
bufferlist bl;
encode(entry, bl);
int ret = cls_cxx_map_set_val(hctx, key, &bl);
if (ret < 0)
return ret;
return 0;
}
static int remove_entry(cls_method_context_t hctx, const string& key)
{
int ret = cls_cxx_map_remove_key(hctx, key);
if (ret < 0)
return ret;
return 0;
}
static void get_key_by_bucket_name(const string& bucket_name, string *key)
{
*key = bucket_name;
}
static int get_existing_bucket_entry(cls_method_context_t hctx, const string& bucket_name,
cls_user_bucket_entry& entry)
{
if (bucket_name.empty()) {
return -EINVAL;
}
string key;
get_key_by_bucket_name(bucket_name, &key);
bufferlist bl;
int rc = cls_cxx_map_get_val(hctx, key, &bl);
if (rc < 0) {
CLS_LOG(10, "could not read entry %s", key.c_str());
return rc;
}
try {
auto iter = bl.cbegin();
decode(entry, iter);
} catch (ceph::buffer::error& err) {
CLS_LOG(0, "ERROR: failed to decode entry %s", key.c_str());
return -EIO;
}
return 0;
}
static int read_header(cls_method_context_t hctx, cls_user_header *header)
{
bufferlist bl;
int ret = cls_cxx_map_read_header(hctx, &bl);
if (ret < 0)
return ret;
if (bl.length() == 0) {
*header = cls_user_header();
return 0;
}
try {
decode(*header, bl);
} catch (ceph::buffer::error& err) {
CLS_LOG(0, "ERROR: failed to decode user header");
return -EIO;
}
return 0;
}
static void add_header_stats(cls_user_stats *stats, cls_user_bucket_entry& entry)
{
stats->total_entries += entry.count;
stats->total_bytes += entry.size;
stats->total_bytes_rounded += entry.size_rounded;
}
static void dec_header_stats(cls_user_stats *stats, cls_user_bucket_entry& entry)
{
stats->total_bytes -= entry.size;
stats->total_bytes_rounded -= entry.size_rounded;
stats->total_entries -= entry.count;
}
static void apply_entry_stats(const cls_user_bucket_entry& src_entry, cls_user_bucket_entry *target_entry)
{
target_entry->size = src_entry.size;
target_entry->size_rounded = src_entry.size_rounded;
target_entry->count = src_entry.count;
}
static int cls_user_set_buckets_info(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
{
auto in_iter = in->cbegin();
cls_user_set_buckets_op op;
try {
decode(op, in_iter);
} catch (ceph::buffer::error& err) {
CLS_LOG(1, "ERROR: cls_user_add_op(): failed to decode op");
return -EINVAL;
}
cls_user_header header;
int ret = read_header(hctx, &header);
if (ret < 0) {
CLS_LOG(0, "ERROR: failed to read user info header ret=%d", ret);
return ret;
}
for (auto iter = op.entries.begin(); iter != op.entries.end(); ++iter) {
cls_user_bucket_entry& update_entry = *iter;
string key;
get_key_by_bucket_name(update_entry.bucket.name, &key);
cls_user_bucket_entry entry;
ret = get_existing_bucket_entry(hctx, key, entry);
if (ret == -ENOENT) {
if (!op.add)
continue; /* racing bucket removal */
entry = update_entry;
ret = 0;
} else if (op.add) {
// bucket id may have changed (ie reshard)
entry.bucket.bucket_id = update_entry.bucket.bucket_id;
// creation date may have changed (ie delete/recreate bucket)
entry.creation_time = update_entry.creation_time;
}
if (ret < 0) {
CLS_LOG(0, "ERROR: get_existing_bucket_entry() key=%s returned %d", key.c_str(), ret);
return ret;
} else if (ret >= 0 && entry.user_stats_sync) {
dec_header_stats(&header.stats, entry);
}
CLS_LOG(20, "storing entry for key=%s size=%lld count=%lld",
key.c_str(), (long long)update_entry.size, (long long)update_entry.count);
// sync entry stats when not an op.add, as when the case is op.add if its a
// new entry we already have copied update_entry earlier, OTOH, for an existing entry
// we end up clobbering the existing stats for the bucket
if (!op.add){
apply_entry_stats(update_entry, &entry);
}
entry.user_stats_sync = true;
ret = write_entry(hctx, key, entry);
if (ret < 0)
return ret;
add_header_stats(&header.stats, entry);
}
bufferlist bl;
CLS_LOG(20, "header: total bytes=%lld entries=%lld", (long long)header.stats.total_bytes, (long long)header.stats.total_entries);
if (header.last_stats_update < op.time)
header.last_stats_update = op.time;
encode(header, bl);
ret = cls_cxx_map_write_header(hctx, &bl);
if (ret < 0)
return ret;
return 0;
}
static int cls_user_complete_stats_sync(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
{
auto in_iter = in->cbegin();
cls_user_complete_stats_sync_op op;
try {
decode(op, in_iter);
} catch (ceph::buffer::error& err) {
CLS_LOG(1, "ERROR: cls_user_add_op(): failed to decode op");
return -EINVAL;
}
cls_user_header header;
int ret = read_header(hctx, &header);
if (ret < 0) {
CLS_LOG(0, "ERROR: failed to read user info header ret=%d", ret);
return ret;
}
if (header.last_stats_sync < op.time)
header.last_stats_sync = op.time;
bufferlist bl;
encode(header, bl);
ret = cls_cxx_map_write_header(hctx, &bl);
if (ret < 0)
return ret;
return 0;
}
static int cls_user_remove_bucket(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
{
auto in_iter = in->cbegin();
cls_user_remove_bucket_op op;
try {
decode(op, in_iter);
} catch (ceph::buffer::error& err) {
CLS_LOG(1, "ERROR: cls_user_add_op(): failed to decode op");
return -EINVAL;
}
cls_user_header header;
int ret = read_header(hctx, &header);
if (ret < 0) {
CLS_LOG(0, "ERROR: failed to read user info header ret=%d", ret);
return ret;
}
string key;
get_key_by_bucket_name(op.bucket.name, &key);
cls_user_bucket_entry entry;
ret = get_existing_bucket_entry(hctx, key, entry);
if (ret == -ENOENT) {
return 0; /* idempotent removal */
}
if (ret < 0) {
CLS_LOG(0, "ERROR: get existing bucket entry, key=%s ret=%d", key.c_str(), ret);
return ret;
}
CLS_LOG(20, "removing entry at %s", key.c_str());
ret = remove_entry(hctx, key);
if (ret < 0)
return ret;
if (!entry.user_stats_sync) {
return 0;
}
dec_header_stats(&header.stats, entry);
CLS_LOG(20, "header: total bytes=%lld entries=%lld", (long long)header.stats.total_bytes, (long long)header.stats.total_entries);
bufferlist bl;
encode(header, bl);
return cls_cxx_map_write_header(hctx, &bl);
}
static int cls_user_list_buckets(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
{
auto in_iter = in->cbegin();
cls_user_list_buckets_op op;
try {
decode(op, in_iter);
} catch (ceph::buffer::error& err) {
CLS_LOG(1, "ERROR: cls_user_list_op(): failed to decode op");
return -EINVAL;
}
map<string, bufferlist> keys;
const string& from_index = op.marker;
const string& to_index = op.end_marker;
const bool to_index_valid = !to_index.empty();
#define MAX_ENTRIES 1000
size_t max_entries = op.max_entries;
if (max_entries > MAX_ENTRIES)
max_entries = MAX_ENTRIES;
string match_prefix;
cls_user_list_buckets_ret ret;
int rc = cls_cxx_map_get_vals(hctx, from_index, match_prefix, max_entries, &keys, &ret.truncated);
if (rc < 0)
return rc;
CLS_LOG(20, "from_index=%s to_index=%s match_prefix=%s",
from_index.c_str(),
to_index.c_str(),
match_prefix.c_str());
auto& entries = ret.entries;
auto iter = keys.begin();
string marker;
for (; iter != keys.end(); ++iter) {
const string& index = iter->first;
marker = index;
if (to_index_valid && to_index.compare(index) <= 0) {
ret.truncated = false;
break;
}
bufferlist& bl = iter->second;
auto biter = bl.cbegin();
try {
cls_user_bucket_entry e;
decode(e, biter);
entries.push_back(e);
} catch (ceph::buffer::error& err) {
CLS_LOG(0, "ERROR: cls_user_list: could not decode entry, index=%s", index.c_str());
}
}
if (ret.truncated) {
ret.marker = marker;
}
encode(ret, *out);
return 0;
}
static int cls_user_get_header(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
{
auto in_iter = in->cbegin();
cls_user_get_header_op op;
try {
decode(op, in_iter);
} catch (ceph::buffer::error& err) {
CLS_LOG(1, "ERROR: cls_user_get_header_op(): failed to decode op");
return -EINVAL;
}
cls_user_get_header_ret op_ret;
int ret = read_header(hctx, &op_ret.header);
if (ret < 0)
return ret;
encode(op_ret, *out);
return 0;
}
/// A method to reset the user.buckets header stats in accordance to
/// the values seen in the user.buckets omap keys. This is not be
/// equivalent to --sync-stats which also re-calculates the stats for
/// each bucket.
static int cls_user_reset_stats(cls_method_context_t hctx,
bufferlist *in, bufferlist *out /*ignore*/)
{
cls_user_reset_stats_op op;
try {
auto bliter = in->cbegin();
decode(op, bliter);
} catch (ceph::buffer::error& err) {
CLS_LOG(0, "ERROR: %s failed to decode op", __func__);
return -EINVAL;
}
cls_user_header header;
bool truncated = false;
string from_index, prefix;
do {
map<string, bufferlist> keys;
int rc = cls_cxx_map_get_vals(hctx, from_index, prefix, MAX_ENTRIES,
&keys, &truncated);
if (rc < 0) {
CLS_LOG(0, "ERROR: %s failed to retrieve omap key-values", __func__);
return rc;
}
CLS_LOG(20, "%s: read %lu key-values, truncated=%d",
__func__, keys.size(), truncated);
for (const auto& kv : keys) {
cls_user_bucket_entry e;
try {
auto bl = kv.second;
auto bliter = bl.cbegin();
decode(e, bliter);
} catch (ceph::buffer::error& err) {
CLS_LOG(0, "ERROR: %s failed to decode bucket entry for %s",
__func__, kv.first.c_str());
return -EIO;
}
add_header_stats(&header.stats, e);
}
if (!keys.empty()) {
from_index = keys.rbegin()->first;
}
} while (truncated);
bufferlist bl;
header.last_stats_update = op.time;
encode(header, bl);
CLS_LOG(20, "%s: updating header", __func__);
return cls_cxx_map_write_header(hctx, &bl);
} /* legacy cls_user_reset_stats */
/// A method to reset the user.buckets header stats in accordance to
/// the values seen in the user.buckets omap keys. This is not be
/// equivalent to --sync-stats which also re-calculates the stats for
/// each bucket.
static int cls_user_reset_stats2(cls_method_context_t hctx,
buffer::list *in, buffer::list *out)
{
cls_user_reset_stats2_op op;
try {
auto bliter = in->cbegin();
decode(op, bliter);
} catch (ceph::buffer::error& err) {
CLS_LOG(0, "ERROR: %s failed to decode op", __func__);
return -EINVAL;
}
cls_user_header header;
string from_index{op.marker}, prefix;
cls_user_reset_stats2_ret ret;
map<string, buffer::list> keys;
int rc = cls_cxx_map_get_vals(hctx, from_index, prefix, MAX_ENTRIES,
&keys, &ret.truncated);
if (rc < 0) {
CLS_LOG(0, "ERROR: %s failed to retrieve omap key-values", __func__);
return rc;
}
CLS_LOG(20, "%s: read %lu key-values, truncated=%d",
__func__, keys.size(), ret.truncated);
for (const auto& kv : keys) {
cls_user_bucket_entry e;
try {
auto& bl = kv.second;
auto bliter = bl.cbegin();
decode(e, bliter);
} catch (ceph::buffer::error& err) {
CLS_LOG(0, "ERROR: %s failed to decode bucket entry for %s",
__func__, kv.first.c_str());
return -EIO;
}
add_header_stats(&ret.acc_stats, e);
}
/* try-update marker */
if(!keys.empty())
ret.marker = (--keys.cend())->first;
if (! ret.truncated) {
buffer::list bl;
header.last_stats_update = op.time;
header.stats = ret.acc_stats;
encode(header, bl);
CLS_LOG(20, "%s: updating header", __func__);
rc = cls_cxx_map_write_header(hctx, &bl);
/* return final result */
encode(ret, *out);
return rc;
}
/* return partial result */
encode(ret, *out);
return 0;
} /* cls_user_reset_stats2 */
CLS_INIT(user)
{
CLS_LOG(1, "Loaded user class!");
cls_handle_t h_class;
cls_method_handle_t h_user_set_buckets_info;
cls_method_handle_t h_user_complete_stats_sync;
cls_method_handle_t h_user_remove_bucket;
cls_method_handle_t h_user_list_buckets;
cls_method_handle_t h_user_get_header;
cls_method_handle_t h_user_reset_stats;
cls_method_handle_t h_user_reset_stats2;
cls_register("user", &h_class);
/* log */
cls_register_cxx_method(h_class, "set_buckets_info", CLS_METHOD_RD | CLS_METHOD_WR,
cls_user_set_buckets_info, &h_user_set_buckets_info);
cls_register_cxx_method(h_class, "complete_stats_sync", CLS_METHOD_RD | CLS_METHOD_WR,
cls_user_complete_stats_sync, &h_user_complete_stats_sync);
cls_register_cxx_method(h_class, "remove_bucket", CLS_METHOD_RD | CLS_METHOD_WR, cls_user_remove_bucket, &h_user_remove_bucket);
cls_register_cxx_method(h_class, "list_buckets", CLS_METHOD_RD, cls_user_list_buckets, &h_user_list_buckets);
cls_register_cxx_method(h_class, "get_header", CLS_METHOD_RD, cls_user_get_header, &h_user_get_header);
cls_register_cxx_method(h_class, "reset_user_stats", CLS_METHOD_RD | CLS_METHOD_WR, cls_user_reset_stats, &h_user_reset_stats);
cls_register_cxx_method(h_class, "reset_user_stats2", CLS_METHOD_RD | CLS_METHOD_WR, cls_user_reset_stats2, &h_user_reset_stats2);
return;
}
| 13,953 | 25.229323 | 132 | cc |
null | ceph-main/src/cls/user/cls_user_client.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include "cls/user/cls_user_client.h"
#include "include/rados/librados.hpp"
using std::list;
using std::string;
using ceph::bufferlist;
using ceph::real_clock;
using librados::IoCtx;
using librados::ObjectOperationCompletion;
using librados::ObjectReadOperation;
void cls_user_set_buckets(librados::ObjectWriteOperation& op, list<cls_user_bucket_entry>& entries, bool add)
{
bufferlist in;
cls_user_set_buckets_op call;
call.entries = entries;
call.add = add;
call.time = real_clock::now();
encode(call, in);
op.exec("user", "set_buckets_info", in);
}
void cls_user_complete_stats_sync(librados::ObjectWriteOperation& op)
{
bufferlist in;
cls_user_complete_stats_sync_op call;
call.time = real_clock::now();
encode(call, in);
op.exec("user", "complete_stats_sync", in);
}
void cls_user_remove_bucket(librados::ObjectWriteOperation& op, const cls_user_bucket& bucket)
{
bufferlist in;
cls_user_remove_bucket_op call;
call.bucket = bucket;
encode(call, in);
op.exec("user", "remove_bucket", in);
}
class ClsUserListCtx : public ObjectOperationCompletion {
list<cls_user_bucket_entry> *entries;
string *marker;
bool *truncated;
int *pret;
public:
ClsUserListCtx(list<cls_user_bucket_entry> *_entries, string *_marker, bool *_truncated, int *_pret) :
entries(_entries), marker(_marker), truncated(_truncated), pret(_pret) {}
void handle_completion(int r, bufferlist& outbl) override {
if (r >= 0) {
cls_user_list_buckets_ret ret;
try {
auto iter = outbl.cbegin();
decode(ret, iter);
if (entries)
*entries = ret.entries;
if (truncated)
*truncated = ret.truncated;
if (marker)
*marker = ret.marker;
} catch (ceph::buffer::error& err) {
r = -EIO;
}
}
if (pret) {
*pret = r;
}
}
};
void cls_user_bucket_list(librados::ObjectReadOperation& op,
const string& in_marker,
const string& end_marker,
int max_entries,
list<cls_user_bucket_entry>& entries,
string *out_marker,
bool *truncated,
int *pret)
{
bufferlist inbl;
cls_user_list_buckets_op call;
call.marker = in_marker;
call.end_marker = end_marker;
call.max_entries = max_entries;
encode(call, inbl);
op.exec("user", "list_buckets", inbl, new ClsUserListCtx(&entries, out_marker, truncated, pret));
}
class ClsUserGetHeaderCtx : public ObjectOperationCompletion {
cls_user_header *header;
RGWGetUserHeader_CB *ret_ctx;
int *pret;
public:
ClsUserGetHeaderCtx(cls_user_header *_h, RGWGetUserHeader_CB *_ctx, int *_pret) : header(_h), ret_ctx(_ctx), pret(_pret) {}
~ClsUserGetHeaderCtx() override {
if (ret_ctx) {
ret_ctx->put();
}
}
void handle_completion(int r, bufferlist& outbl) override {
if (r >= 0) {
cls_user_get_header_ret ret;
try {
auto iter = outbl.cbegin();
decode(ret, iter);
if (header)
*header = ret.header;
} catch (ceph::buffer::error& err) {
r = -EIO;
}
if (ret_ctx) {
ret_ctx->handle_response(r, ret.header);
}
}
if (pret) {
*pret = r;
}
}
};
void cls_user_get_header(librados::ObjectReadOperation& op,
cls_user_header *header, int *pret)
{
bufferlist inbl;
cls_user_get_header_op call;
encode(call, inbl);
op.exec("user", "get_header", inbl, new ClsUserGetHeaderCtx(header, NULL, pret));
}
void cls_user_reset_stats(librados::ObjectWriteOperation &op)
{
bufferlist inbl;
cls_user_reset_stats_op call;
call.time = real_clock::now();
encode(call, inbl);
op.exec("user", "reset_user_stats", inbl);
}
int cls_user_get_header_async(IoCtx& io_ctx, string& oid, RGWGetUserHeader_CB *ctx)
{
bufferlist in, out;
cls_user_get_header_op call;
encode(call, in);
ObjectReadOperation op;
op.exec("user", "get_header", in, new ClsUserGetHeaderCtx(NULL, ctx, NULL)); /* no need to pass pret, as we'll call ctx->handle_response() with correct error */
auto c = librados::Rados::aio_create_completion(nullptr, nullptr);
int r = io_ctx.aio_operate(oid, c, &op, NULL);
c->release();
if (r < 0)
return r;
return 0;
}
| 4,490 | 26.218182 | 162 | cc |
null | ceph-main/src/cls/user/cls_user_client.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_USER_CLIENT_H
#define CEPH_CLS_USER_CLIENT_H
#include "include/rados/librados_fwd.hpp"
#include "cls_user_ops.h"
#include "common/RefCountedObj.h"
class RGWGetUserHeader_CB : public RefCountedObject {
public:
~RGWGetUserHeader_CB() override {}
virtual void handle_response(int r, cls_user_header& header) = 0;
};
/*
* user objclass
*/
void cls_user_set_buckets(librados::ObjectWriteOperation& op, std::list<cls_user_bucket_entry>& entries, bool add);
void cls_user_complete_stats_sync(librados::ObjectWriteOperation& op);
void cls_user_remove_bucket(librados::ObjectWriteOperation& op, const cls_user_bucket& bucket);
void cls_user_bucket_list(librados::ObjectReadOperation& op,
const std::string& in_marker,
const std::string& end_marker,
int max_entries,
std::list<cls_user_bucket_entry>& entries,
std::string *out_marker,
bool *truncated,
int *pret);
void cls_user_get_header(librados::ObjectReadOperation& op, cls_user_header *header, int *pret);
int cls_user_get_header_async(librados::IoCtx& io_ctx, std::string& oid, RGWGetUserHeader_CB *ctx);
void cls_user_reset_stats(librados::ObjectWriteOperation& op);
#endif
| 1,284 | 33.72973 | 115 | h |
null | ceph-main/src/cls/user/cls_user_ops.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "cls/user/cls_user_ops.h"
#include "common/Formatter.h"
#include "common/ceph_json.h"
using std::list;
using ceph::Formatter;
void cls_user_set_buckets_op::dump(Formatter *f) const
{
encode_json("entries", entries, f);
encode_json("add", add, f);
encode_json("time", utime_t(time), f);
}
void cls_user_set_buckets_op::generate_test_instances(list<cls_user_set_buckets_op*>& ls)
{
ls.push_back(new cls_user_set_buckets_op);
cls_user_set_buckets_op *op = new cls_user_set_buckets_op;
for (int i = 0; i < 3; i++) {
cls_user_bucket_entry e;
cls_user_gen_test_bucket_entry(&e, i);
op->entries.push_back(e);
}
op->add = true;
op->time = utime_t(1, 0).to_real_time();
ls.push_back(op);
}
void cls_user_remove_bucket_op::dump(Formatter *f) const
{
encode_json("bucket", bucket, f);
}
void cls_user_remove_bucket_op::generate_test_instances(list<cls_user_remove_bucket_op*>& ls)
{
ls.push_back(new cls_user_remove_bucket_op);
cls_user_remove_bucket_op *op = new cls_user_remove_bucket_op;
cls_user_gen_test_bucket(&op->bucket, 0);
ls.push_back(op);
}
void cls_user_list_buckets_op::dump(Formatter *f) const
{
encode_json("marker", marker, f);
encode_json("max_entries", max_entries, f);
}
void cls_user_list_buckets_op::generate_test_instances(list<cls_user_list_buckets_op*>& ls)
{
ls.push_back(new cls_user_list_buckets_op);
cls_user_list_buckets_op *op = new cls_user_list_buckets_op;
op->marker = "marker";
op->max_entries = 1000;
ls.push_back(op);
}
void cls_user_list_buckets_ret::dump(Formatter *f) const
{
encode_json("entries", entries, f);
encode_json("marker", marker, f);
encode_json("truncated", truncated, f);
}
void cls_user_list_buckets_ret::generate_test_instances(list<cls_user_list_buckets_ret*>& ls)
{
ls.push_back(new cls_user_list_buckets_ret);
cls_user_list_buckets_ret *ret = new cls_user_list_buckets_ret;
for (int i = 0; i < 3; i++) {
cls_user_bucket_entry e;
cls_user_gen_test_bucket_entry(&e, i);
ret->entries.push_back(e);
}
ret->marker = "123";
ret->truncated = true;
ls.push_back(ret);
}
void cls_user_get_header_op::dump(Formatter *f) const
{
// empty!
}
void cls_user_get_header_op::generate_test_instances(list<cls_user_get_header_op*>& ls)
{
ls.push_back(new cls_user_get_header_op);
}
void cls_user_get_header_ret::dump(Formatter *f) const
{
encode_json("header", header, f);
}
void cls_user_get_header_ret::generate_test_instances(list<cls_user_get_header_ret*>& ls)
{
ls.push_back(new cls_user_get_header_ret);
cls_user_get_header_ret *ret = new cls_user_get_header_ret;
cls_user_gen_test_header(&ret->header);
ls.push_back(ret);
}
void cls_user_complete_stats_sync_op::dump(Formatter *f) const
{
encode_json("time", utime_t(time), f);
}
void cls_user_complete_stats_sync_op::generate_test_instances(list<cls_user_complete_stats_sync_op*>& ls)
{
ls.push_back(new cls_user_complete_stats_sync_op);
cls_user_complete_stats_sync_op *op = new cls_user_complete_stats_sync_op;
op->time = utime_t(12345, 0).to_real_time();
ls.push_back(op);
}
| 3,204 | 25.932773 | 105 | cc |
null | ceph-main/src/cls/user/cls_user_ops.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_USER_OPS_H
#define CEPH_CLS_USER_OPS_H
#include "cls_user_types.h"
struct cls_user_set_buckets_op {
std::list<cls_user_bucket_entry> entries;
bool add;
ceph::real_time time; /* op time */
cls_user_set_buckets_op() : add(false) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(entries, bl);
encode(add, bl);
encode(time, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(entries, bl);
decode(add, bl);
decode(time, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_set_buckets_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_set_buckets_op)
struct cls_user_remove_bucket_op {
cls_user_bucket bucket;
cls_user_remove_bucket_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(bucket, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(bucket, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_remove_bucket_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_remove_bucket_op)
struct cls_user_list_buckets_op {
std::string marker;
std::string end_marker;
int max_entries; /* upperbound to returned num of entries
might return less than that and still be truncated */
cls_user_list_buckets_op()
: max_entries(0) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(2, 1, bl);
encode(marker, bl);
encode(max_entries, bl);
encode(end_marker, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(2, bl);
decode(marker, bl);
decode(max_entries, bl);
if (struct_v >= 2) {
decode(end_marker, bl);
}
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_list_buckets_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_list_buckets_op)
struct cls_user_list_buckets_ret {
std::list<cls_user_bucket_entry> entries;
std::string marker;
bool truncated;
cls_user_list_buckets_ret() : truncated(false) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(entries, bl);
encode(marker, bl);
encode(truncated, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(entries, bl);
decode(marker, bl);
decode(truncated, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_list_buckets_ret*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_list_buckets_ret)
struct cls_user_get_header_op {
cls_user_get_header_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_get_header_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_get_header_op)
struct cls_user_reset_stats_op {
ceph::real_time time;
cls_user_reset_stats_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(time, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(time, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_reset_stats_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_reset_stats_op);
struct cls_user_reset_stats2_op {
ceph::real_time time;
std::string marker;
cls_user_stats acc_stats;
cls_user_reset_stats2_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(time, bl);
encode(marker, bl);
encode(acc_stats, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(time, bl);
decode(marker, bl);
decode(acc_stats, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_reset_stats2_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_reset_stats2_op);
struct cls_user_reset_stats2_ret {
std::string marker;
cls_user_stats acc_stats; /* 0-initialized */
bool truncated;
cls_user_reset_stats2_ret()
: truncated(false) {}
void update_call(cls_user_reset_stats2_op& call) {
call.marker = marker;
call.acc_stats = acc_stats;
}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(marker, bl);
encode(acc_stats, bl);
encode(truncated, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(marker, bl);
decode(acc_stats, bl);
decode(truncated, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(
std::list<cls_user_reset_stats2_ret*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_reset_stats2_ret);
struct cls_user_get_header_ret {
cls_user_header header;
cls_user_get_header_ret() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(header, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(header, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_get_header_ret*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_get_header_ret)
struct cls_user_complete_stats_sync_op {
ceph::real_time time;
cls_user_complete_stats_sync_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(time, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(time, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_complete_stats_sync_op*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_complete_stats_sync_op)
#endif
| 6,495 | 23.238806 | 87 | h |
null | ceph-main/src/cls/user/cls_user_types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "cls/user/cls_user_types.h"
#include "common/Formatter.h"
#include "common/ceph_json.h"
#include "include/utime.h"
using std::list;
using std::string;
using ceph::Formatter;
using ceph::bufferlist;
using ceph::real_clock;
void cls_user_gen_test_bucket(cls_user_bucket *bucket, int i)
{
char buf[16];
snprintf(buf, sizeof(buf), ".%d", i);
bucket->name = string("buck") + buf;
bucket->marker = string("mark") + buf;
bucket->bucket_id = string("bucket.id") + buf;
}
void cls_user_bucket::dump(Formatter *f) const
{
encode_json("name", name, f);
encode_json("marker", marker,f);
encode_json("bucket_id", bucket_id,f);
}
void cls_user_bucket::generate_test_instances(list<cls_user_bucket*>& ls)
{
ls.push_back(new cls_user_bucket);
cls_user_bucket *b = new cls_user_bucket;
cls_user_gen_test_bucket(b, 0);
ls.push_back(b);
}
void cls_user_bucket_entry::dump(Formatter *f) const
{
encode_json("bucket", bucket, f);
encode_json("size", size, f);
encode_json("size_rounded", size_rounded, f);
encode_json("creation_time", utime_t(creation_time), f);
encode_json("count", count, f);
encode_json("user_stats_sync", user_stats_sync, f);
}
void cls_user_gen_test_bucket_entry(cls_user_bucket_entry *entry, int i)
{
cls_user_gen_test_bucket(&entry->bucket, i);
entry->size = i + 1;
entry->size_rounded = i + 2;
entry->creation_time = real_clock::from_time_t(i + 3);
entry->count = i + 4;
entry->user_stats_sync = true;
}
void cls_user_bucket_entry::generate_test_instances(list<cls_user_bucket_entry*>& ls)
{
ls.push_back(new cls_user_bucket_entry);
cls_user_bucket_entry *entry = new cls_user_bucket_entry;
cls_user_gen_test_bucket_entry(entry, 0);
ls.push_back(entry);
}
void cls_user_gen_test_stats(cls_user_stats *s)
{
s->total_entries = 1;
s->total_bytes = 2;
s->total_bytes_rounded = 3;
}
void cls_user_stats::dump(Formatter *f) const
{
f->dump_int("total_entries", total_entries);
f->dump_int("total_bytes", total_bytes);
f->dump_int("total_bytes_rounded", total_bytes_rounded);
}
void cls_user_stats::generate_test_instances(list<cls_user_stats*>& ls)
{
ls.push_back(new cls_user_stats);
cls_user_stats *s = new cls_user_stats;
cls_user_gen_test_stats(s);
ls.push_back(s);
}
void cls_user_gen_test_header(cls_user_header *h)
{
cls_user_gen_test_stats(&h->stats);
h->last_stats_sync = utime_t(1, 0).to_real_time();
h->last_stats_update = utime_t(2, 0).to_real_time();
}
void cls_user_header::dump(Formatter *f) const
{
encode_json("stats", stats, f);
encode_json("last_stats_sync", utime_t(last_stats_sync), f);
encode_json("last_stats_update", utime_t(last_stats_update), f);
}
void cls_user_header::generate_test_instances(list<cls_user_header*>& ls)
{
ls.push_back(new cls_user_header);
cls_user_header *h = new cls_user_header;
cls_user_gen_test_header(h);
ls.push_back(h);
}
| 2,995 | 25.75 | 85 | cc |
null | ceph-main/src/cls/user/cls_user_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_USER_TYPES_H
#define CEPH_CLS_USER_TYPES_H
#include "include/encoding.h"
#include "include/types.h"
#include "include/utime.h"
#include "common/ceph_time.h"
/*
* this needs to be compatible with rgw_bucket, as it replaces it
*/
struct cls_user_bucket {
std::string name;
std::string marker;
std::string bucket_id;
std::string placement_id;
struct {
std::string data_pool;
std::string index_pool;
std::string data_extra_pool;
} explicit_placement;
void encode(ceph::buffer::list& bl) const {
/* since new version of this structure is not backward compatible,
* we have older rgw running against newer osd if we encode it
* in the new way. Only encode newer version if placement_id is
* not empty, otherwise keep handling it as before
*/
if (!placement_id.empty()) {
ENCODE_START(9, 8, bl);
encode(name, bl);
encode(marker, bl);
encode(bucket_id, bl);
encode(placement_id, bl);
ENCODE_FINISH(bl);
} else {
ENCODE_START(7, 3, bl);
encode(name, bl);
encode(explicit_placement.data_pool, bl);
encode(marker, bl);
encode(bucket_id, bl);
encode(explicit_placement.index_pool, bl);
encode(explicit_placement.data_extra_pool, bl);
ENCODE_FINISH(bl);
}
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(8, 3, 3, bl);
decode(name, bl);
if (struct_v < 8) {
decode(explicit_placement.data_pool, bl);
}
if (struct_v >= 2) {
decode(marker, bl);
if (struct_v <= 3) {
uint64_t id;
decode(id, bl);
char buf[16];
snprintf(buf, sizeof(buf), "%llu", (long long)id);
bucket_id = buf;
} else {
decode(bucket_id, bl);
}
}
if (struct_v < 8) {
if (struct_v >= 5) {
decode(explicit_placement.index_pool, bl);
} else {
explicit_placement.index_pool = explicit_placement.data_pool;
}
if (struct_v >= 7) {
decode(explicit_placement.data_extra_pool, bl);
}
} else {
decode(placement_id, bl);
if (struct_v == 8 && placement_id.empty()) {
decode(explicit_placement.data_pool, bl);
decode(explicit_placement.index_pool, bl);
decode(explicit_placement.data_extra_pool, bl);
}
}
DECODE_FINISH(bl);
}
bool operator<(const cls_user_bucket& b) const {
return name.compare(b.name) < 0;
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_bucket*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_bucket)
/*
* this structure overrides RGWBucketEnt
*/
struct cls_user_bucket_entry {
cls_user_bucket bucket;
size_t size;
size_t size_rounded;
ceph::real_time creation_time;
uint64_t count;
bool user_stats_sync;
cls_user_bucket_entry() : size(0), size_rounded(0), count(0), user_stats_sync(false) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(9, 5, bl);
uint64_t s = size;
__u32 mt = ceph::real_clock::to_time_t(creation_time);
std::string empty_str; // originally had the bucket name here, but we encode bucket later
encode(empty_str, bl);
encode(s, bl);
encode(mt, bl);
encode(count, bl);
encode(bucket, bl);
s = size_rounded;
encode(s, bl);
encode(user_stats_sync, bl);
encode(creation_time, bl);
//::encode(placement_rule, bl); removed in v9
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(9, 5, 5, bl);
__u32 mt;
uint64_t s;
std::string empty_str; // backward compatibility
decode(empty_str, bl);
decode(s, bl);
decode(mt, bl);
size = s;
if (struct_v < 7) {
creation_time = ceph::real_clock::from_time_t(mt);
}
if (struct_v >= 2)
decode(count, bl);
if (struct_v >= 3)
decode(bucket, bl);
if (struct_v >= 4)
decode(s, bl);
size_rounded = s;
if (struct_v >= 6)
decode(user_stats_sync, bl);
if (struct_v >= 7)
decode(creation_time, bl);
if (struct_v == 8) { // added in v8, removed in v9
std::string placement_rule;
decode(placement_rule, bl);
}
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_bucket_entry*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_bucket_entry)
struct cls_user_stats {
uint64_t total_entries;
uint64_t total_bytes;
uint64_t total_bytes_rounded;
cls_user_stats()
: total_entries(0),
total_bytes(0),
total_bytes_rounded(0) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(total_entries, bl);
encode(total_bytes, bl);
encode(total_bytes_rounded, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(total_entries, bl);
decode(total_bytes, bl);
decode(total_bytes_rounded, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_stats*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_stats)
/*
* this needs to be compatible with rgw_bucket, as it replaces it
*/
struct cls_user_header {
cls_user_stats stats;
ceph::real_time last_stats_sync; /* last time a full stats sync completed */
ceph::real_time last_stats_update; /* last time a stats update was done */
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(stats, bl);
encode(last_stats_sync, bl);
encode(last_stats_update, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(stats, bl);
decode(last_stats_sync, bl);
decode(last_stats_update, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<cls_user_header*>& ls);
};
WRITE_CLASS_ENCODER(cls_user_header)
void cls_user_gen_test_bucket(cls_user_bucket *bucket, int i);
void cls_user_gen_test_bucket_entry(cls_user_bucket_entry *entry, int i);
void cls_user_gen_test_stats(cls_user_stats *stats);
void cls_user_gen_test_header(cls_user_header *h);
#endif
| 6,382 | 27.368889 | 94 | h |
null | ceph-main/src/cls/version/cls_version.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include "objclass/objclass.h"
#include "cls/version/cls_version_ops.h"
#include "include/compat.h"
using std::list;
using ceph::bufferlist;
CLS_VER(1,0)
CLS_NAME(version)
#define VERSION_ATTR "ceph.objclass.version"
static int set_version(cls_method_context_t hctx, struct obj_version *objv)
{
bufferlist bl;
encode(*objv, bl);
CLS_LOG(20, "cls_version: set_version %s:%d", objv->tag.c_str(), (int)objv->ver);
int ret = cls_cxx_setxattr(hctx, VERSION_ATTR, &bl);
if (ret < 0)
return ret;
return 0;
}
static int init_version(cls_method_context_t hctx, struct obj_version *objv)
{
#define TAG_LEN 24
char buf[TAG_LEN + 1];
int ret = cls_gen_rand_base64(buf, sizeof(buf));
if (ret < 0)
return ret;
objv->ver = 1;
objv->tag = buf;
CLS_LOG(20, "cls_version: init_version %s:%d", objv->tag.c_str(), (int)objv->ver);
return set_version(hctx, objv);
}
/* implicit create should be true only if called from a write operation (set, inc), never from a read operation (read, check) */
static int read_version(cls_method_context_t hctx, obj_version *objv, bool implicit_create)
{
bufferlist bl;
int ret = cls_cxx_getxattr(hctx, VERSION_ATTR, &bl);
if (ret == -ENOENT || ret == -ENODATA) {
objv->ver = 0;
if (implicit_create) {
return init_version(hctx, objv);
}
return 0;
}
if (ret < 0)
return ret;
try {
auto iter = bl.cbegin();
decode(*objv, iter);
} catch (ceph::buffer::error& err) {
CLS_LOG(0, "ERROR: read_version(): failed to decode version entry\n");
return -EIO;
}
CLS_LOG(20, "cls_version: read_version %s:%d", objv->tag.c_str(), (int)objv->ver);
return 0;
}
static int cls_version_set(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
{
auto in_iter = in->cbegin();
cls_version_set_op op;
try {
decode(op, in_iter);
} catch (ceph::buffer::error& err) {
CLS_LOG(1, "ERROR: cls_version_get(): failed to decode entry\n");
return -EINVAL;
}
int ret = set_version(hctx, &op.objv);
if (ret < 0)
return ret;
return 0;
}
static bool check_conds(list<obj_version_cond>& conds, obj_version& objv)
{
if (conds.empty())
return true;
for (list<obj_version_cond>::iterator iter = conds.begin(); iter != conds.end(); ++iter) {
obj_version_cond& cond = *iter;
obj_version& v = cond.ver;
CLS_LOG(20, "cls_version: check_version %s:%d (cond=%d)", v.tag.c_str(), (int)v.ver, (int)cond.cond);
switch (cond.cond) {
case VER_COND_NONE:
break;
case VER_COND_EQ:
if (!objv.compare(&v))
return false;
break;
case VER_COND_GT:
if (!(objv.ver > v.ver))
return false;
break;
case VER_COND_GE:
if (!(objv.ver >= v.ver))
return false;
break;
case VER_COND_LT:
if (!(objv.ver < v.ver))
return false;
break;
case VER_COND_LE:
if (!(objv.ver <= v.ver))
return false;
break;
case VER_COND_TAG_EQ:
if (objv.tag.compare(v.tag) != 0)
return false;
break;
case VER_COND_TAG_NE:
if (objv.tag.compare(v.tag) == 0)
return false;
break;
}
}
return true;
}
static int cls_version_inc(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
{
auto in_iter = in->cbegin();
cls_version_inc_op op;
try {
decode(op, in_iter);
} catch (ceph::buffer::error& err) {
CLS_LOG(1, "ERROR: cls_version_get(): failed to decode entry\n");
return -EINVAL;
}
obj_version objv;
int ret = read_version(hctx, &objv, true);
if (ret < 0)
return ret;
if (!check_conds(op.conds, objv)) {
return -ECANCELED;
}
objv.inc();
ret = set_version(hctx, &objv);
if (ret < 0)
return ret;
return 0;
}
static int cls_version_check(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
{
auto in_iter = in->cbegin();
cls_version_check_op op;
try {
decode(op, in_iter);
} catch (ceph::buffer::error& err) {
CLS_LOG(1, "ERROR: cls_version_get(): failed to decode entry\n");
return -EINVAL;
}
obj_version objv;
int ret = read_version(hctx, &objv, false);
if (ret < 0)
return ret;
if (!check_conds(op.conds, objv)) {
CLS_LOG(20, "cls_version: failed condition check");
return -ECANCELED;
}
return 0;
}
static int cls_version_read(cls_method_context_t hctx, bufferlist *in, bufferlist *out)
{
obj_version objv;
cls_version_read_ret read_ret;
int ret = read_version(hctx, &read_ret.objv, false);
if (ret < 0)
return ret;
encode(read_ret, *out);
return 0;
}
CLS_INIT(version)
{
CLS_LOG(1, "Loaded version class!");
cls_handle_t h_class;
cls_method_handle_t h_version_set;
cls_method_handle_t h_version_inc;
cls_method_handle_t h_version_inc_conds;
cls_method_handle_t h_version_read;
cls_method_handle_t h_version_check_conds;
cls_register("version", &h_class);
/* version */
cls_register_cxx_method(h_class, "set", CLS_METHOD_RD | CLS_METHOD_WR, cls_version_set, &h_version_set);
cls_register_cxx_method(h_class, "inc", CLS_METHOD_RD | CLS_METHOD_WR, cls_version_inc, &h_version_inc);
cls_register_cxx_method(h_class, "inc_conds", CLS_METHOD_RD | CLS_METHOD_WR, cls_version_inc, &h_version_inc_conds);
cls_register_cxx_method(h_class, "read", CLS_METHOD_RD, cls_version_read, &h_version_read);
cls_register_cxx_method(h_class, "check_conds", CLS_METHOD_RD, cls_version_check, &h_version_check_conds);
return;
}
| 5,503 | 22.029289 | 128 | cc |
null | ceph-main/src/cls/version/cls_version_client.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include "cls/version/cls_version_client.h"
#include "include/rados/librados.hpp"
using namespace librados;
void cls_version_set(librados::ObjectWriteOperation& op, obj_version& objv)
{
bufferlist in;
cls_version_set_op call;
call.objv = objv;
encode(call, in);
op.exec("version", "set", in);
}
void cls_version_inc(librados::ObjectWriteOperation& op)
{
bufferlist in;
cls_version_inc_op call;
encode(call, in);
op.exec("version", "inc", in);
}
void cls_version_inc(librados::ObjectWriteOperation& op, obj_version& objv, VersionCond cond)
{
bufferlist in;
cls_version_inc_op call;
call.objv = objv;
obj_version_cond c;
c.cond = cond;
c.ver = objv;
call.conds.push_back(c);
encode(call, in);
op.exec("version", "inc_conds", in);
}
void cls_version_check(librados::ObjectOperation& op, obj_version& objv, VersionCond cond)
{
bufferlist in;
cls_version_check_op call;
call.objv = objv;
obj_version_cond c;
c.cond = cond;
c.ver = objv;
call.conds.push_back(c);
encode(call, in);
op.exec("version", "check_conds", in);
}
class VersionReadCtx : public ObjectOperationCompletion {
obj_version *objv;
public:
explicit VersionReadCtx(obj_version *_objv) : objv(_objv) {}
void handle_completion(int r, bufferlist& outbl) override {
if (r >= 0) {
cls_version_read_ret ret;
try {
auto iter = outbl.cbegin();
decode(ret, iter);
*objv = ret.objv;
} catch (ceph::buffer::error& err) {
// nothing we can do about it atm
}
}
}
};
void cls_version_read(librados::ObjectReadOperation& op, obj_version *objv)
{
bufferlist inbl;
op.exec("version", "read", inbl, new VersionReadCtx(objv));
}
int cls_version_read(librados::IoCtx& io_ctx, std::string& oid, obj_version *ver)
{
bufferlist in, out;
int r = io_ctx.exec(oid, "version", "read", in, out);
if (r < 0)
return r;
cls_version_read_ret ret;
try {
auto iter = out.cbegin();
decode(ret, iter);
} catch (ceph::buffer::error& err) {
return -EIO;
}
*ver = ret.objv;
return r;
}
| 2,202 | 19.980952 | 93 | cc |
null | ceph-main/src/cls/version/cls_version_client.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_VERSION_CLIENT_H
#define CEPH_CLS_VERSION_CLIENT_H
#include "include/rados/librados_fwd.hpp"
#include "cls_version_ops.h"
/*
* version objclass
*/
void cls_version_set(librados::ObjectWriteOperation& op, obj_version& ver);
/* increase anyway */
void cls_version_inc(librados::ObjectWriteOperation& op);
/* conditional increase, return -EAGAIN if condition fails */
void cls_version_inc(librados::ObjectWriteOperation& op, obj_version& ver, VersionCond cond);
void cls_version_read(librados::ObjectReadOperation& op, obj_version *objv);
// these overloads which call io_ctx.operate() or io_ctx.exec() should not be called in the rgw.
// rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()/exec()
#ifndef CLS_CLIENT_HIDE_IOCTX
int cls_version_read(librados::IoCtx& io_ctx, std::string& oid, obj_version *ver);
#endif
void cls_version_check(librados::ObjectOperation& op, obj_version& ver, VersionCond cond);
#endif
| 1,075 | 31.606061 | 96 | h |
null | ceph-main/src/cls/version/cls_version_ops.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_VERSION_OPS_H
#define CEPH_CLS_VERSION_OPS_H
#include "cls_version_types.h"
struct cls_version_set_op {
obj_version objv;
cls_version_set_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(objv, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(objv, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_version_set_op)
struct cls_version_inc_op {
obj_version objv;
std::list<obj_version_cond> conds;
cls_version_inc_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(objv, bl);
encode(conds, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(objv, bl);
decode(conds, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_version_inc_op)
struct cls_version_check_op {
obj_version objv;
std::list<obj_version_cond> conds;
cls_version_check_op() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(objv, bl);
encode(conds, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(objv, bl);
decode(conds, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_version_check_op)
struct cls_version_read_ret {
obj_version objv;
cls_version_read_ret() {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(objv, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(objv, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(cls_version_read_ret)
#endif
| 1,876 | 19.182796 | 70 | h |
null | ceph-main/src/cls/version/cls_version_types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "cls/version/cls_version_types.h"
#include "common/Formatter.h"
#include "common/ceph_json.h"
void obj_version::dump(ceph::Formatter *f) const
{
f->dump_int("ver", ver);
f->dump_string("tag", tag);
}
void obj_version::decode_json(JSONObj *obj)
{
JSONDecoder::decode_json("ver", ver, obj);
JSONDecoder::decode_json("tag", tag, obj);
}
| 453 | 21.7 | 70 | cc |
null | ceph-main/src/cls/version/cls_version_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_CLS_VERSION_TYPES_H
#define CEPH_CLS_VERSION_TYPES_H
#include "include/encoding.h"
#include "include/types.h"
class JSONObj;
struct obj_version {
uint64_t ver;
std::string tag;
obj_version() : ver(0) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(ver, bl);
encode(tag, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(ver, bl);
decode(tag, bl);
DECODE_FINISH(bl);
}
void inc() {
ver++;
}
void clear() {
ver = 0;
tag.clear();
}
bool empty() const {
return tag.empty();
}
bool compare(struct obj_version *v) const {
return (ver == v->ver &&
tag.compare(v->tag) == 0);
}
bool operator==(const struct obj_version& v) const {
return (ver == v.ver &&
tag.compare(v.tag) == 0);
}
void dump(ceph::Formatter *f) const;
void decode_json(JSONObj *obj);
static void generate_test_instances(std::list<obj_version*>& o);
};
WRITE_CLASS_ENCODER(obj_version)
enum VersionCond {
VER_COND_NONE = 0,
VER_COND_EQ, /* equal */
VER_COND_GT, /* greater than */
VER_COND_GE, /* greater or equal */
VER_COND_LT, /* less than */
VER_COND_LE, /* less or equal */
VER_COND_TAG_EQ,
VER_COND_TAG_NE,
};
struct obj_version_cond {
struct obj_version ver;
VersionCond cond;
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(ver, bl);
uint32_t c = (uint32_t)cond;
encode(c, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(ver, bl);
uint32_t c;
decode(c, bl);
cond = (VersionCond)c;
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(obj_version_cond)
#endif
| 1,926 | 18.464646 | 70 | h |
null | ceph-main/src/common/AsyncOpTracker.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/AsyncOpTracker.h"
#include "include/Context.h"
AsyncOpTracker::AsyncOpTracker()
{
}
AsyncOpTracker::~AsyncOpTracker() {
std::lock_guard locker(m_lock);
ceph_assert(m_pending_ops == 0);
}
void AsyncOpTracker::start_op() {
std::lock_guard locker(m_lock);
++m_pending_ops;
}
void AsyncOpTracker::finish_op() {
Context *on_finish = nullptr;
{
std::lock_guard locker(m_lock);
ceph_assert(m_pending_ops > 0);
if (--m_pending_ops == 0) {
std::swap(on_finish, m_on_finish);
}
}
if (on_finish != nullptr) {
on_finish->complete(0);
}
}
void AsyncOpTracker::wait_for_ops(Context *on_finish) {
{
std::lock_guard locker(m_lock);
ceph_assert(m_on_finish == nullptr);
if (m_pending_ops > 0) {
m_on_finish = on_finish;
return;
}
}
on_finish->complete(0);
}
bool AsyncOpTracker::empty() {
std::lock_guard locker(m_lock);
return (m_pending_ops == 0);
}
| 1,041 | 18.660377 | 70 | cc |
null | ceph-main/src/common/AsyncOpTracker.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_ASYNC_OP_TRACKER_H
#define CEPH_ASYNC_OP_TRACKER_H
#include "common/ceph_mutex.h"
#include "include/Context.h"
class AsyncOpTracker {
public:
AsyncOpTracker();
~AsyncOpTracker();
void start_op();
void finish_op();
void wait_for_ops(Context *on_finish);
bool empty();
private:
ceph::mutex m_lock = ceph::make_mutex("AsyncOpTracker::m_lock");
uint32_t m_pending_ops = 0;
Context *m_on_finish = nullptr;
};
class C_TrackedOp : public Context {
public:
C_TrackedOp(AsyncOpTracker& async_op_tracker, Context* on_finish)
: m_async_op_tracker(async_op_tracker), m_on_finish(on_finish) {
m_async_op_tracker.start_op();
}
void finish(int r) override {
if (m_on_finish != nullptr) {
m_on_finish->complete(r);
}
m_async_op_tracker.finish_op();
}
private:
AsyncOpTracker& m_async_op_tracker;
Context* m_on_finish;
};
#endif // CEPH_ASYNC_OP_TRACKER_H
| 1,018 | 19.795918 | 70 | h |
null | ceph-main/src/common/AsyncReserver.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef ASYNC_RESERVER_H
#define ASYNC_RESERVER_H
#include "common/Formatter.h"
#define rdout(x) lgeneric_subdout(cct,reserver,x)
/**
* Manages a configurable number of asynchronous reservations.
*
* Memory usage is linear with the number of items queued and
* linear with respect to the total number of priorities used
* over all time.
*/
template <typename T, typename F>
class AsyncReserver {
CephContext *cct;
F *f;
unsigned max_allowed;
unsigned min_priority;
ceph::mutex lock = ceph::make_mutex("AsyncReserver::lock");
struct Reservation {
T item;
unsigned prio = 0;
Context *grant = 0;
Context *preempt = 0;
Reservation() {}
Reservation(T i, unsigned pr, Context *g, Context *p = 0)
: item(i), prio(pr), grant(g), preempt(p) {}
void dump(ceph::Formatter *f) const {
f->dump_stream("item") << item;
f->dump_unsigned("prio", prio);
f->dump_bool("can_preempt", !!preempt);
}
friend std::ostream& operator<<(std::ostream& out, const Reservation& r) {
return out << r.item << "(prio " << r.prio << " grant " << r.grant
<< " preempt " << r.preempt << ")";
}
};
std::map<unsigned, std::list<Reservation>> queues;
std::map<T, std::pair<unsigned, typename std::list<Reservation>::iterator>> queue_pointers;
std::map<T,Reservation> in_progress;
std::set<std::pair<unsigned,T>> preempt_by_prio; ///< in_progress that can be preempted
void preempt_one() {
ceph_assert(!preempt_by_prio.empty());
auto q = in_progress.find(preempt_by_prio.begin()->second);
ceph_assert(q != in_progress.end());
Reservation victim = q->second;
rdout(10) << __func__ << " preempt " << victim << dendl;
f->queue(victim.preempt);
victim.preempt = nullptr;
in_progress.erase(q);
preempt_by_prio.erase(preempt_by_prio.begin());
}
void do_queues() {
rdout(20) << __func__ << ":\n";
ceph::JSONFormatter jf(true);
jf.open_object_section("queue");
_dump(&jf);
jf.close_section();
jf.flush(*_dout);
*_dout << dendl;
// in case min_priority was adjusted up or max_allowed was adjusted down
while (!preempt_by_prio.empty() &&
(in_progress.size() > max_allowed ||
preempt_by_prio.begin()->first < min_priority)) {
preempt_one();
}
while (!queues.empty()) {
// choose highest priority queue
auto it = queues.end();
--it;
ceph_assert(!it->second.empty());
if (it->first < min_priority) {
break;
}
if (in_progress.size() >= max_allowed &&
!preempt_by_prio.empty() &&
it->first > preempt_by_prio.begin()->first) {
preempt_one();
}
if (in_progress.size() >= max_allowed) {
break; // no room
}
// grant
Reservation p = it->second.front();
rdout(10) << __func__ << " grant " << p << dendl;
queue_pointers.erase(p.item);
it->second.pop_front();
if (it->second.empty()) {
queues.erase(it);
}
f->queue(p.grant);
p.grant = nullptr;
in_progress[p.item] = p;
if (p.preempt) {
preempt_by_prio.insert(std::make_pair(p.prio, p.item));
}
}
}
public:
AsyncReserver(
CephContext *cct,
F *f,
unsigned max_allowed,
unsigned min_priority = 0)
: cct(cct),
f(f),
max_allowed(max_allowed),
min_priority(min_priority) {}
void set_max(unsigned max) {
std::lock_guard l(lock);
max_allowed = max;
do_queues();
}
void set_min_priority(unsigned min) {
std::lock_guard l(lock);
min_priority = min;
do_queues();
}
/**
* Update the priority of a reservation
*
* Note, on_reserved may be called following update_priority. Thus,
* the callback must be safe in that case. Callback will be called
* with no locks held. cancel_reservation must be called to release the
* reservation slot.
*
* Cases
* 1. Item is queued, re-queue with new priority
* 2. Item is queued, re-queue and preempt if new priority higher than an in progress item
* 3. Item is in progress, just adjust priority if no higher priority waiting
* 4. Item is in progress, adjust priority if higher priority items waiting preempt item
*
*/
void update_priority(T item, unsigned newprio) {
std::lock_guard l(lock);
auto i = queue_pointers.find(item);
if (i != queue_pointers.end()) {
unsigned prio = i->second.first;
if (newprio == prio)
return;
Reservation r = *i->second.second;
rdout(10) << __func__ << " update " << r << " (was queued)" << dendl;
// Like cancel_reservation() without preempting
queues[prio].erase(i->second.second);
if (queues[prio].empty()) {
queues.erase(prio);
}
queue_pointers.erase(i);
// Like request_reservation() to re-queue it but with new priority
ceph_assert(!queue_pointers.count(item) &&
!in_progress.count(item));
r.prio = newprio;
queues[newprio].push_back(r);
queue_pointers.insert(std::make_pair(item,
std::make_pair(newprio,--(queues[newprio]).end())));
} else {
auto p = in_progress.find(item);
if (p != in_progress.end()) {
if (p->second.prio == newprio)
return;
rdout(10) << __func__ << " update " << p->second
<< " (in progress)" << dendl;
// We want to preempt if priority goes down
// and smaller then highest priority waiting
if (p->second.preempt) {
if (newprio < p->second.prio && !queues.empty()) {
// choose highest priority queue
auto it = queues.end();
--it;
ceph_assert(!it->second.empty());
if (it->first > newprio) {
rdout(10) << __func__ << " update " << p->second
<< " lowered priority let do_queues() preempt it" << dendl;
}
}
preempt_by_prio.erase(std::make_pair(p->second.prio, p->second.item));
p->second.prio = newprio;
preempt_by_prio.insert(std::make_pair(p->second.prio, p->second.item));
} else {
p->second.prio = newprio;
}
} else {
rdout(10) << __func__ << " update " << item << " (not found)" << dendl;
}
}
do_queues();
return;
}
void dump(ceph::Formatter *f) {
std::lock_guard l(lock);
_dump(f);
}
void _dump(ceph::Formatter *f) {
f->dump_unsigned("max_allowed", max_allowed);
f->dump_unsigned("min_priority", min_priority);
f->open_array_section("queues");
for (auto& p : queues) {
f->open_object_section("queue");
f->dump_unsigned("priority", p.first);
f->open_array_section("items");
for (auto& q : p.second) {
f->dump_object("item", q);
}
f->close_section();
f->close_section();
}
f->close_section();
f->open_array_section("in_progress");
for (auto& p : in_progress) {
f->dump_object("item", p.second);
}
f->close_section();
}
/**
* Requests a reservation
*
* Note, on_reserved may be called following cancel_reservation. Thus,
* the callback must be safe in that case. Callback will be called
* with no locks held. cancel_reservation must be called to release the
* reservation slot.
*/
void request_reservation(
T item, ///< [in] reservation key
Context *on_reserved, ///< [in] callback to be called on reservation
unsigned prio, ///< [in] priority
Context *on_preempt = 0 ///< [in] callback to be called if we are preempted (optional)
) {
std::lock_guard l(lock);
Reservation r(item, prio, on_reserved, on_preempt);
rdout(10) << __func__ << " queue " << r << dendl;
ceph_assert(!queue_pointers.count(item) &&
!in_progress.count(item));
queues[prio].push_back(r);
queue_pointers.insert(std::make_pair(item,
std::make_pair(prio,--(queues[prio]).end())));
do_queues();
}
/**
* Cancels reservation
*
* Frees the reservation under key for use.
* Note, after cancel_reservation, the reservation_callback may or
* may not still be called.
*/
void cancel_reservation(
T item ///< [in] key for reservation to cancel
) {
std::lock_guard l(lock);
auto i = queue_pointers.find(item);
if (i != queue_pointers.end()) {
unsigned prio = i->second.first;
const Reservation& r = *i->second.second;
rdout(10) << __func__ << " cancel " << r << " (was queued)" << dendl;
delete r.grant;
delete r.preempt;
queues[prio].erase(i->second.second);
if (queues[prio].empty()) {
queues.erase(prio);
}
queue_pointers.erase(i);
} else {
auto p = in_progress.find(item);
if (p != in_progress.end()) {
rdout(10) << __func__ << " cancel " << p->second
<< " (was in progress)" << dendl;
if (p->second.preempt) {
preempt_by_prio.erase(std::make_pair(p->second.prio, p->second.item));
delete p->second.preempt;
}
in_progress.erase(p);
} else {
rdout(10) << __func__ << " cancel " << item << " (not found)" << dendl;
}
}
do_queues();
}
/**
* Has reservations
*
* Return true if there are reservations in progress
*/
bool has_reservation() {
std::lock_guard l(lock);
return !in_progress.empty();
}
static const unsigned MAX_PRIORITY = (unsigned)-1;
};
#undef rdout
#endif
| 9,780 | 29.470405 | 93 | h |
null | ceph-main/src/common/BackTrace.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <ostream>
#include <cxxabi.h>
#include <string.h>
#include "BackTrace.h"
#include "common/version.h"
#include "common/Formatter.h"
namespace ceph {
void ClibBackTrace::print(std::ostream& out) const
{
out << " " << pretty_version_to_str() << std::endl;
for (size_t i = skip; i < size; i++) {
out << " " << (i-skip+1) << ": " << demangle(strings[i]) << std::endl;
}
}
void ClibBackTrace::dump(Formatter *f) const
{
f->open_array_section("backtrace");
for (size_t i = skip; i < size; i++) {
// out << " " << (i-skip+1) << ": " << strings[i] << std::endl;
f->dump_string("frame", demangle(strings[i]));
}
f->close_section();
}
std::string ClibBackTrace::demangle(const char* name)
{
// find the parentheses and address offset surrounding the mangled name
#ifdef __FreeBSD__
static constexpr char OPEN = '<';
#else
static constexpr char OPEN = '(';
#endif
const char* begin = nullptr;
const char* end = nullptr;
for (const char *j = name; *j; ++j) {
if (*j == OPEN) {
begin = j + 1;
} else if (*j == '+') {
end = j;
}
}
if (begin && end && begin < end) {
std::string mangled(begin, end);
int status;
// only demangle a C++ mangled name
if (mangled.compare(0, 2, "_Z") == 0) {
// let __cxa_demangle do the malloc
char* demangled = abi::__cxa_demangle(mangled.c_str(), nullptr, nullptr, &status);
if (!status) {
std::string full_name{OPEN};
full_name += demangled;
full_name += end;
// buf could be reallocated, so free(demangled) instead
free(demangled);
return full_name;
}
// demangle failed, just pretend it's a C function with no args
}
// C function
return mangled + "()";
} else {
// didn't find the mangled name, just print the whole line
return name;
}
}
void PyBackTrace::dump(Formatter *f) const
{
f->open_array_section("backtrace");
for (auto& i : strings) {
f->dump_string("frame", i);
}
f->close_section();
}
void PyBackTrace::print(std::ostream& out) const
{
for (auto& i : strings) {
out << i << std::endl;
}
}
}
| 2,248 | 23.714286 | 88 | cc |
null | ceph-main/src/common/BackTrace.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_BACKTRACE_H
#define CEPH_BACKTRACE_H
#include "acconfig.h"
#include <iosfwd>
#ifdef HAVE_EXECINFO_H
#include <execinfo.h>
#endif
#include <stdlib.h>
#include <list>
#include <string>
namespace ceph {
class Formatter;
struct BackTrace {
virtual ~BackTrace() {}
virtual void print(std::ostream& out) const = 0;
virtual void dump(Formatter *f) const = 0;
};
inline std::ostream& operator<<(std::ostream& out, const BackTrace& bt) {
bt.print(out);
return out;
}
struct ClibBackTrace : public BackTrace {
const static int max = 32;
int skip;
void *array[max]{};
size_t size;
char **strings;
explicit ClibBackTrace(int s) {
#ifdef HAVE_EXECINFO_H
skip = s;
size = backtrace(array, max);
strings = backtrace_symbols(array, size);
#else
skip = 0;
size = 0;
strings = nullptr;
#endif
}
~ClibBackTrace() {
free(strings);
}
ClibBackTrace(const ClibBackTrace& other);
const ClibBackTrace& operator=(const ClibBackTrace& other);
void print(std::ostream& out) const override;
void dump(Formatter *f) const override;
static std::string demangle(const char* name);
};
struct PyBackTrace : public BackTrace {
std::list<std::string> strings;
explicit PyBackTrace(std::list<std::string>& s) : strings(s) {}
void dump(Formatter *f) const override;
void print(std::ostream& out) const override;
};
}
#endif
| 1,492 | 17.898734 | 73 | h |
null | ceph-main/src/common/CDC.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <random>
#include "CDC.h"
#include "FastCDC.h"
#include "FixedCDC.h"
std::unique_ptr<CDC> CDC::create(
const std::string& type,
int bits,
int windowbits)
{
if (type == "fastcdc") {
return std::unique_ptr<CDC>(new FastCDC(bits, windowbits));
}
if (type == "fixed") {
return std::unique_ptr<CDC>(new FixedCDC(bits, windowbits));
}
return nullptr;
}
void generate_buffer(int size, bufferlist *outbl, int seed)
{
std::mt19937_64 engine, engine2;
engine.seed(seed);
engine2.seed(seed);
// assemble from randomly-sized segments!
outbl->clear();
auto left = size;
while (left) {
size_t l = std::min<size_t>((engine2() & 0xffff0) + 16, left);
left -= l;
bufferptr p(l);
p.set_length(l);
char *b = p.c_str();
for (size_t i = 0; i < l / sizeof(uint64_t); ++i) {
((ceph_le64 *)b)[i] = ceph_le64(engine());
}
outbl->append(p);
}
}
| 1,010 | 20.978261 | 70 | cc |
null | ceph-main/src/common/CDC.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <vector>
#include <string>
#include "include/types.h"
#include "include/buffer.h"
class CDC {
public:
virtual ~CDC() = default;
/// calculate chunk boundaries as vector of (offset, length) pairs
virtual void calc_chunks(
const bufferlist& inputdata,
std::vector<std::pair<uint64_t, uint64_t>> *chunks) const = 0;
/// set target chunk size as a power of 2, and number of bits for hard min/max
virtual void set_target_bits(int bits, int windowbits = 2) = 0;
static std::unique_ptr<CDC> create(
const std::string& type,
int bits,
int windowbits = 0);
};
void generate_buffer(int size, bufferlist *outbl, int seed = 0);
| 776 | 24.064516 | 80 | h |
null | ceph-main/src/common/Checksummer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_OS_BLUESTORE_CHECKSUMMER
#define CEPH_OS_BLUESTORE_CHECKSUMMER
#include "include/buffer.h"
#include "include/byteorder.h"
#include "include/ceph_assert.h"
#include "xxHash/xxhash.h"
class Checksummer {
public:
enum CSumType {
CSUM_NONE = 1, //intentionally set to 1 to be aligned with OSDMnitor's pool_opts_t handling - it treats 0 as unset while we need to distinguish none and unset cases
CSUM_XXHASH32 = 2,
CSUM_XXHASH64 = 3,
CSUM_CRC32C = 4,
CSUM_CRC32C_16 = 5, // low 16 bits of crc32c
CSUM_CRC32C_8 = 6, // low 8 bits of crc32c
CSUM_MAX,
};
static const char *get_csum_type_string(unsigned t) {
switch (t) {
case CSUM_NONE: return "none";
case CSUM_XXHASH32: return "xxhash32";
case CSUM_XXHASH64: return "xxhash64";
case CSUM_CRC32C: return "crc32c";
case CSUM_CRC32C_16: return "crc32c_16";
case CSUM_CRC32C_8: return "crc32c_8";
default: return "???";
}
}
static int get_csum_string_type(const std::string &s) {
if (s == "none")
return CSUM_NONE;
if (s == "xxhash32")
return CSUM_XXHASH32;
if (s == "xxhash64")
return CSUM_XXHASH64;
if (s == "crc32c")
return CSUM_CRC32C;
if (s == "crc32c_16")
return CSUM_CRC32C_16;
if (s == "crc32c_8")
return CSUM_CRC32C_8;
return -EINVAL;
}
static size_t get_csum_init_value_size(int csum_type) {
switch (csum_type) {
case CSUM_NONE: return 0;
case CSUM_XXHASH32: return sizeof(xxhash32::init_value_t);
case CSUM_XXHASH64: return sizeof(xxhash64::init_value_t);
case CSUM_CRC32C: return sizeof(crc32c::init_value_t);
case CSUM_CRC32C_16: return sizeof(crc32c_16::init_value_t);
case CSUM_CRC32C_8: return sizeof(crc32c_8::init_value_t);
default: return 0;
}
}
static size_t get_csum_value_size(int csum_type) {
switch (csum_type) {
case CSUM_NONE: return 0;
case CSUM_XXHASH32: return 4;
case CSUM_XXHASH64: return 8;
case CSUM_CRC32C: return 4;
case CSUM_CRC32C_16: return 2;
case CSUM_CRC32C_8: return 1;
default: return 0;
}
}
struct crc32c {
typedef uint32_t init_value_t;
typedef ceph_le32 value_t;
// we have no execution context/state.
typedef int state_t;
static void init(state_t *state) {
}
static void fini(state_t *state) {
}
static init_value_t calc(
state_t state,
init_value_t init_value,
size_t len,
ceph::buffer::list::const_iterator& p
) {
return p.crc32c(len, init_value);
}
};
struct crc32c_16 {
typedef uint32_t init_value_t;
typedef ceph_le16 value_t;
// we have no execution context/state.
typedef int state_t;
static void init(state_t *state) {
}
static void fini(state_t *state) {
}
static init_value_t calc(
state_t state,
init_value_t init_value,
size_t len,
ceph::buffer::list::const_iterator& p
) {
return p.crc32c(len, init_value) & 0xffff;
}
};
struct crc32c_8 {
typedef uint32_t init_value_t;
typedef __u8 value_t;
// we have no execution context/state.
typedef int state_t;
static void init(state_t *state) {
}
static void fini(state_t *state) {
}
static init_value_t calc(
state_t state,
init_value_t init_value,
size_t len,
ceph::buffer::list::const_iterator& p
) {
return p.crc32c(len, init_value) & 0xff;
}
};
struct xxhash32 {
typedef uint32_t init_value_t;
typedef ceph_le32 value_t;
typedef XXH32_state_t *state_t;
static void init(state_t *s) {
*s = XXH32_createState();
}
static void fini(state_t *s) {
XXH32_freeState(*s);
}
static init_value_t calc(
state_t state,
init_value_t init_value,
size_t len,
ceph::buffer::list::const_iterator& p
) {
XXH32_reset(state, init_value);
while (len > 0) {
const char *data;
size_t l = p.get_ptr_and_advance(len, &data);
XXH32_update(state, data, l);
len -= l;
}
return XXH32_digest(state);
}
};
struct xxhash64 {
typedef uint64_t init_value_t;
typedef ceph_le64 value_t;
typedef XXH64_state_t *state_t;
static void init(state_t *s) {
*s = XXH64_createState();
}
static void fini(state_t *s) {
XXH64_freeState(*s);
}
static init_value_t calc(
state_t state,
init_value_t init_value,
size_t len,
ceph::buffer::list::const_iterator& p
) {
XXH64_reset(state, init_value);
while (len > 0) {
const char *data;
size_t l = p.get_ptr_and_advance(len, &data);
XXH64_update(state, data, l);
len -= l;
}
return XXH64_digest(state);
}
};
template<class Alg>
static int calculate(
size_t csum_block_size,
size_t offset,
size_t length,
const ceph::buffer::list &bl,
ceph::buffer::ptr* csum_data
) {
return calculate<Alg>(-1, csum_block_size, offset, length, bl, csum_data);
}
template<class Alg>
static int calculate(
typename Alg::init_value_t init_value,
size_t csum_block_size,
size_t offset,
size_t length,
const ceph::buffer::list &bl,
ceph::buffer::ptr* csum_data) {
ceph_assert(length % csum_block_size == 0);
size_t blocks = length / csum_block_size;
ceph::buffer::list::const_iterator p = bl.begin();
ceph_assert(bl.length() >= length);
typename Alg::state_t state;
Alg::init(&state);
ceph_assert(csum_data->length() >= (offset + length) / csum_block_size *
sizeof(typename Alg::value_t));
typename Alg::value_t *pv =
reinterpret_cast<typename Alg::value_t*>(csum_data->c_str());
pv += offset / csum_block_size;
while (blocks--) {
*pv = Alg::calc(state, init_value, csum_block_size, p);
++pv;
}
Alg::fini(&state);
return 0;
}
template<class Alg>
static int verify(
size_t csum_block_size,
size_t offset,
size_t length,
const ceph::buffer::list &bl,
const ceph::buffer::ptr& csum_data,
uint64_t *bad_csum=0
) {
ceph_assert(length % csum_block_size == 0);
ceph::buffer::list::const_iterator p = bl.begin();
ceph_assert(bl.length() >= length);
typename Alg::state_t state;
Alg::init(&state);
const typename Alg::value_t *pv =
reinterpret_cast<const typename Alg::value_t*>(csum_data.c_str());
pv += offset / csum_block_size;
size_t pos = offset;
while (length > 0) {
typename Alg::init_value_t v = Alg::calc(state, -1, csum_block_size, p);
if (*pv != v) {
if (bad_csum) {
*bad_csum = v;
}
Alg::fini(&state);
return pos;
}
++pv;
pos += csum_block_size;
length -= csum_block_size;
}
Alg::fini(&state);
return -1; // no errors
}
};
#endif
| 6,958 | 24.305455 | 168 | h |
null | ceph-main/src/common/Clock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CLOCK_H
#define CEPH_CLOCK_H
#include "include/utime.h"
#include <time.h>
static inline utime_t ceph_clock_now()
{
#if defined(__linux__)
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
utime_t n(tp);
#else
struct timeval tv;
gettimeofday(&tv, nullptr);
utime_t n(&tv);
#endif
return n;
}
#endif
| 761 | 19.594595 | 70 | h |
null | ceph-main/src/common/CommandTable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef COMMAND_TABLE_H_
#define COMMAND_TABLE_H_
#include "messages/MCommand.h"
#include "messages/MMgrCommand.h"
class CommandOp
{
public:
ConnectionRef con;
ceph_tid_t tid;
std::vector<std::string> cmd;
ceph::buffer::list inbl;
Context *on_finish;
ceph::buffer::list *outbl;
std::string *outs;
MessageRef get_message(const uuid_d &fsid,
bool mgr=false) const
{
if (mgr) {
auto m = ceph::make_message<MMgrCommand>(fsid);
m->cmd = cmd;
m->set_data(inbl);
m->set_tid(tid);
return m;
} else {
auto m = ceph::make_message<MCommand>(fsid);
m->cmd = cmd;
m->set_data(inbl);
m->set_tid(tid);
return m;
}
}
CommandOp(const ceph_tid_t t) : tid(t), on_finish(nullptr),
outbl(nullptr), outs(nullptr) {}
CommandOp() : tid(0), on_finish(nullptr), outbl(nullptr), outs(nullptr) {}
};
/**
* Hold client-side state for a collection of in-flight commands
* to a remote service.
*/
template<typename T>
class CommandTable
{
protected:
ceph_tid_t last_tid;
std::map<ceph_tid_t, T> commands;
public:
CommandTable()
: last_tid(0)
{}
~CommandTable()
{
ceph_assert(commands.empty());
}
T& start_command()
{
ceph_tid_t tid = last_tid++;
commands.insert(std::make_pair(tid, T(tid)) );
return commands.at(tid);
}
const std::map<ceph_tid_t, T> &get_commands() const
{
return commands;
}
bool exists(ceph_tid_t tid) const
{
return commands.count(tid) > 0;
}
T& get_command(ceph_tid_t tid)
{
return commands.at(tid);
}
void erase(ceph_tid_t tid)
{
commands.erase(tid);
}
void clear() {
commands.clear();
}
};
#endif
| 2,151 | 18.044248 | 76 | h |
null | ceph-main/src/common/Cond.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COND_H
#define CEPH_COND_H
#include "common/Clock.h"
#include "common/ceph_mutex.h"
#include "include/Context.h"
/**
* context to signal a cond
*
* Generic context to signal a cond and store the return value. We
* assume the caller is holding the appropriate lock.
*/
class C_Cond : public Context {
ceph::condition_variable& cond; ///< Cond to signal
bool *done; ///< true if finish() has been called
int *rval; ///< return value
public:
C_Cond(ceph::condition_variable &c, bool *d, int *r) : cond(c), done(d), rval(r) {
*done = false;
}
void finish(int r) override {
*done = true;
*rval = r;
cond.notify_all();
}
};
/**
* context to signal a cond, protected by a lock
*
* Generic context to signal a cond under a specific lock. We take the
* lock in the finish() callback, so the finish() caller must not
* already hold it.
*/
class C_SafeCond : public Context {
ceph::mutex& lock; ///< Mutex to take
ceph::condition_variable& cond; ///< Cond to signal
bool *done; ///< true after finish() has been called
int *rval; ///< return value (optional)
public:
C_SafeCond(ceph::mutex& l, ceph::condition_variable& c, bool *d, int *r=0)
: lock(l), cond(c), done(d), rval(r) {
*done = false;
}
void finish(int r) override {
std::lock_guard l{lock};
if (rval)
*rval = r;
*done = true;
cond.notify_all();
}
};
/**
* Context providing a simple wait() mechanism to wait for completion
*
* The context will not be deleted as part of complete and must live
* until wait() returns.
*/
class C_SaferCond : public Context {
ceph::mutex lock; ///< Mutex to take
ceph::condition_variable cond; ///< Cond to signal
bool done = false; ///< true after finish() has been called
int rval = 0; ///< return value
public:
C_SaferCond() :
C_SaferCond("C_SaferCond")
{}
explicit C_SaferCond(const std::string &name)
: lock(ceph::make_mutex(name)) {}
void finish(int r) override { complete(r); }
/// We overload complete in order to not delete the context
void complete(int r) override {
std::lock_guard l(lock);
done = true;
rval = r;
cond.notify_all();
}
/// Returns rval once the Context is called
int wait() {
std::unique_lock l{lock};
cond.wait(l, [this] { return done;});
return rval;
}
/// Wait until the \c secs expires or \c complete() is called
int wait_for(double secs) {
return wait_for(ceph::make_timespan(secs));
}
int wait_for(ceph::timespan secs) {
std::unique_lock l{lock};
if (done) {
return rval;
}
if (cond.wait_for(l, secs, [this] { return done; })) {
return rval;
} else {
return ETIMEDOUT;
}
}
};
#endif
| 3,193 | 24.96748 | 84 | h |
null | ceph-main/src/common/ConfUtils.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
// #define BOOST_SPIRIT_DEBUG
#include <algorithm>
#include <cctype>
#include <experimental/iterator>
#include <filesystem>
#include <fstream>
#include <iostream>
#include <iterator>
#include <map>
#include <sstream>
#include <boost/algorithm/string.hpp>
#include <boost/algorithm/string/trim_all.hpp>
#include <boost/spirit/include/qi.hpp>
#include <boost/phoenix.hpp>
#include <boost/spirit/include/support_line_pos_iterator.hpp>
#include "include/buffer.h"
#include "common/errno.h"
#include "common/utf8.h"
#include "common/ConfUtils.h"
namespace fs = std::filesystem;
using std::ostringstream;
using std::string;
#define MAX_CONFIG_FILE_SZ 0x40000000
conf_line_t::conf_line_t(const std::string& key, const std::string& val)
: key{ConfFile::normalize_key_name(key)},
val{boost::algorithm::trim_copy_if(
val,
[](unsigned char c) {
return std::isspace(c);
})}
{}
bool conf_line_t::operator<(const conf_line_t &rhs) const
{
// We only compare keys.
// If you have more than one line with the same key in a given section, the
// last one wins.
return key < rhs.key;
}
std::ostream &operator<<(std::ostream& oss, const conf_line_t &l)
{
oss << "conf_line_t(key = '" << l.key << "', val='" << l.val << "')";
return oss;
}
conf_section_t::conf_section_t(const std::string& heading,
const std::vector<conf_line_t>& lines)
: heading{heading}
{
for (auto& line : lines) {
auto [where, inserted] = insert(line);
if (!inserted) {
erase(where);
insert(line);
}
}
}
///////////////////////// ConfFile //////////////////////////
ConfFile::ConfFile(const std::vector<conf_section_t>& sections)
{
for (auto& section : sections) {
auto [old_sec, sec_inserted] = emplace(section.heading, section);
if (!sec_inserted) {
// merge lines in section into old_sec
for (auto& line : section) {
auto [old_line, line_inserted] = old_sec->second.emplace(line);
// and replace the existing ones if any
if (!line_inserted) {
old_sec->second.erase(old_line);
old_sec->second.insert(line);
}
}
}
}
}
/* We load the whole file into memory and then parse it. Although this is not
* the optimal approach, it does mean that most of this code can be shared with
* the bufferlist loading function. Since bufferlists are always in-memory, the
* load_from_buffer interface works well for them.
* In general, configuration files should be a few kilobytes at maximum, so
* loading the whole configuration into memory shouldn't be a problem.
*/
int ConfFile::parse_file(const std::string &fname,
std::ostream *warnings)
{
clear();
try {
if (auto file_size = fs::file_size(fname); file_size > MAX_CONFIG_FILE_SZ) {
*warnings << __func__ << ": config file '" << fname
<< "' is " << file_size << " bytes, "
<< "but the maximum is " << MAX_CONFIG_FILE_SZ;
return -EINVAL;
}
} catch (const fs::filesystem_error& e) {
std::error_code ec;
auto is_other = fs::is_other(fname, ec);
if (!ec && is_other) {
// /dev/null?
return 0;
} else {
*warnings << __func__ << ": " << e.what();
return -e.code().value();
}
}
std::ifstream ifs{fname};
std::string buffer{std::istreambuf_iterator<char>(ifs),
std::istreambuf_iterator<char>()};
if (parse_buffer(buffer, warnings)) {
return 0;
} else {
return -EINVAL;
}
}
namespace {
namespace qi = boost::spirit::qi;
namespace phoenix = boost::phoenix;
template<typename Iterator, typename Skipper>
struct IniGrammer : qi::grammar<Iterator, ConfFile(), Skipper>
{
struct error_handler_t {
std::ostream& os;
template<typename Iter>
auto operator()(Iter first, Iter last, Iter where,
const boost::spirit::info& what) const {
auto line_start = boost::spirit::get_line_start(first, where);
os << "parse error: expected '" << what
<< "' in line " << boost::spirit::get_line(where)
<< " at position " << boost::spirit::get_column(line_start, where) << "\n";
return qi::fail;
}
};
IniGrammer(Iterator begin, std::ostream& err)
: IniGrammer::base_type{conf_file},
report_error{error_handler_t{err}}
{
using qi::_1;
using qi::_2;
using qi::_val;
using qi::char_;
using qi::eoi;
using qi::eol;
using qi::blank;
using qi::lexeme;
using qi::lit;
using qi::raw;
blanks = *blank;
comment_start = lit('#') | lit(';');
continue_marker = lit('\\') >> eol;
text_char %=
(lit('\\') >> (char_ - eol)) |
(char_ - (comment_start | eol));
key %= raw[+(text_char - char_("=[ ")) % +blank];
quoted_value %=
lexeme[lit('"') >> *(text_char - '"') > '"'] |
lexeme[lit('\'') >> *(text_char - '\'') > '\''];
unquoted_value %= *text_char;
comment = *blank >> comment_start > *(char_ - eol);
empty_line = -(blanks|comment) >> eol;
value %= quoted_value | unquoted_value;
key_val =
(blanks >> key >> blanks >> '=' > blanks > value > +empty_line)
[_val = phoenix::construct<conf_line_t>(_1, _2)];
heading %= lit('[') > +(text_char - ']') > ']' > +empty_line;
section =
(heading >> *(key_val - heading) >> *eol)
[_val = phoenix::construct<conf_section_t>(_1, _2)];
conf_file =
(key_val [_val = phoenix::construct<ConfFile>(_1)]
|
(*eol >> (*section)[_val = phoenix::construct<ConfFile>(_1)])
) > eoi;
empty_line.name("empty_line");
key.name("key");
quoted_value.name("quoted value");
unquoted_value.name("unquoted value");
key_val.name("key=val");
heading.name("section name");
section.name("section");
qi::on_error<qi::fail>(
conf_file,
report_error(qi::_1, qi::_2, qi::_3, qi::_4));
BOOST_SPIRIT_DEBUG_NODE(heading);
BOOST_SPIRIT_DEBUG_NODE(section);
BOOST_SPIRIT_DEBUG_NODE(key);
BOOST_SPIRIT_DEBUG_NODE(quoted_value);
BOOST_SPIRIT_DEBUG_NODE(unquoted_value);
BOOST_SPIRIT_DEBUG_NODE(key_val);
BOOST_SPIRIT_DEBUG_NODE(conf_file);
}
qi::rule<Iterator> blanks;
qi::rule<Iterator> empty_line;
qi::rule<Iterator> comment_start;
qi::rule<Iterator> continue_marker;
qi::rule<Iterator, char()> text_char;
qi::rule<Iterator, std::string(), Skipper> key;
qi::rule<Iterator, std::string(), Skipper> quoted_value;
qi::rule<Iterator, std::string(), Skipper> unquoted_value;
qi::rule<Iterator> comment;
qi::rule<Iterator, std::string(), Skipper> value;
qi::rule<Iterator, conf_line_t(), Skipper> key_val;
qi::rule<Iterator, std::string(), Skipper> heading;
qi::rule<Iterator, conf_section_t(), Skipper> section;
qi::rule<Iterator, ConfFile(), Skipper> conf_file;
boost::phoenix::function<error_handler_t> report_error;
};
}
bool ConfFile::parse_buffer(std::string_view buf, std::ostream* err)
{
assert(err);
#ifdef _WIN32
// We'll need to ensure that there's a new line at the end of the buffer,
// otherwise the config parsing will fail.
std::string _buf = std::string(buf) + "\n";
#else
std::string_view _buf = buf;
#endif
if (int err_pos = check_utf8(_buf.data(), _buf.size()); err_pos > 0) {
*err << "parse error: invalid UTF-8 found at line "
<< std::count(_buf.begin(), std::next(_buf.begin(), err_pos), '\n') + 1;
return false;
}
using iter_t = boost::spirit::line_pos_iterator<decltype(_buf.begin())>;
iter_t first{_buf.begin()};
using skipper_t = qi::rule<iter_t>;
IniGrammer<iter_t, skipper_t> grammar{first, *err};
skipper_t skipper = grammar.continue_marker | grammar.comment;
return qi::phrase_parse(first, iter_t{_buf.end()},
grammar, skipper, *this);
}
int ConfFile::parse_bufferlist(ceph::bufferlist *bl,
std::ostream *warnings)
{
clear();
ostringstream oss;
if (!warnings) {
warnings = &oss;
}
return parse_buffer({bl->c_str(), bl->length()}, warnings) ? 0 : -EINVAL;
}
int ConfFile::read(std::string_view section_name,
std::string_view key,
std::string &val) const
{
string k(normalize_key_name(key));
if (auto s = base_type::find(section_name); s != end()) {
conf_line_t exemplar{k, {}};
if (auto line = s->second.find(exemplar); line != s->second.end()) {
val = line->val;
return 0;
}
}
return -ENOENT;
}
/* Normalize a key name.
*
* Normalized key names have no leading or trailing whitespace, and all
* whitespace is stored as underscores. The main reason for selecting this
* normal form is so that in common/config.cc, we can use a macro to stringify
* the field names of md_config_t and get a key in normal form.
*/
std::string ConfFile::normalize_key_name(std::string_view key)
{
std::string k{key};
boost::algorithm::trim_fill_if(k, "_", isspace);
return k;
}
void ConfFile::check_old_style_section_names(const std::vector<std::string>& prefixes,
std::ostream& os)
{
// Warn about section names that look like old-style section names
std::vector<std::string> old_style_section_names;
for (auto& [name, section] : *this) {
for (auto& prefix : prefixes) {
if (name.find(prefix) == 0 && name.size() > 3 && name[3] != '.') {
old_style_section_names.push_back(name);
}
}
}
if (!old_style_section_names.empty()) {
os << "ERROR! old-style section name(s) found: ";
std::copy(std::begin(old_style_section_names),
std::end(old_style_section_names),
std::experimental::make_ostream_joiner(os, ", "));
os << ". Please use the new style section names that include a period.";
}
}
std::ostream &operator<<(std::ostream &oss, const ConfFile &cf)
{
for (auto& [name, section] : cf) {
oss << "[" << name << "]\n";
for (auto& [key, val] : section) {
if (!key.empty()) {
oss << "\t" << key << " = \"" << val << "\"\n";
}
}
}
return oss;
}
| 10,243 | 29.041056 | 86 | cc |
null | ceph-main/src/common/ConfUtils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CONFUTILS_H
#define CEPH_CONFUTILS_H
#include <deque>
#include <map>
#include <set>
#include <string>
#include <string_view>
#include <vector>
#include "include/buffer_fwd.h"
/*
* Ceph configuration file support.
*
* This class loads an INI-style configuration from a file or bufferlist, and
* holds it in memory. In general, an INI configuration file is composed of
* sections, which contain key/value pairs. You can put comments on the end of
* lines by using either a hash mark (#) or the semicolon (;).
*
* You can get information out of ConfFile by calling get_key or by examining
* individual sections.
*
* This class could be extended to support modifying configuration files and
* writing them back out without too much difficulty. Currently, this is not
* implemented, and the file is read-only.
*/
struct conf_line_t {
conf_line_t() = default;
conf_line_t(const std::string& key, const std::string& val);
bool operator<(const conf_line_t& rhs) const;
std::string key;
std::string val;
};
std::ostream &operator<<(std::ostream& oss, const conf_line_t& line);
class conf_section_t : public std::set<conf_line_t> {
public:
conf_section_t() = default;
conf_section_t(const std::string& heading,
const std::vector<conf_line_t>& lines);
std::string heading;
friend std::ostream& operator<<(std::ostream& os, const conf_section_t&);
};
class ConfFile : public std::map<std::string, conf_section_t, std::less<>> {
using base_type = std::map<std::string, conf_section_t, std::less<>>;
public:
ConfFile()
: ConfFile{std::vector<conf_section_t>{}}
{}
ConfFile(const conf_line_t& line)
: ConfFile{{conf_section_t{"global", {line}}}}
{}
ConfFile(const std::vector<conf_section_t>& sections);
int parse_file(const std::string &fname, std::ostream *warnings);
int parse_bufferlist(ceph::bufferlist *bl, std::ostream *warnings);
bool parse_buffer(std::string_view buf, std::ostream* warning);
int read(std::string_view section, std::string_view key,
std::string &val) const;
static std::string normalize_key_name(std::string_view key);
// print warnings to os if any old-style section name is found
//
// consider a section name as old-style name if it starts with any of the
// given prefixes, but does not follow with a "."
void check_old_style_section_names(const std::vector<std::string>& prefixes,
std::ostream& os);
};
std::ostream &operator<<(std::ostream& oss, const ConfFile& cf);
#endif
| 2,911 | 31.719101 | 78 | h |
null | ceph-main/src/common/ContextCompletion.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ContextCompletion.h"
namespace ceph
{
ContextCompletion::ContextCompletion(Context *ctx, bool ignore_enoent)
: m_ctx(ctx),
m_ignore_enoent(ignore_enoent), m_ret(0), m_building(true), m_current_ops(0)
{
}
void ContextCompletion::finish_adding_requests() {
bool complete;
{
std::lock_guard l(m_lock);
m_building = false;
complete = (m_current_ops == 0);
}
if (complete) {
m_ctx->complete(m_ret);
delete this;
}
}
void ContextCompletion::start_op() {
std::lock_guard l(m_lock);
++m_current_ops;
}
void ContextCompletion::finish_op(int r) {
bool complete;
{
std::lock_guard l(m_lock);
if (r < 0 && m_ret == 0 && (!m_ignore_enoent || r != -ENOENT)) {
m_ret = r;
}
--m_current_ops;
complete = (m_current_ops == 0 && !m_building);
}
if (complete) {
m_ctx->complete(m_ret);
delete this;
}
}
} // namespace ceph
| 1,009 | 19.2 | 80 | cc |
null | ceph-main/src/common/ContextCompletion.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_ASYNC_COMPLETION_H
#define CEPH_ASYNC_COMPLETION_H
#include "include/Context.h"
namespace ceph {
class ContextCompletion {
public:
ContextCompletion(Context *ctx, bool ignore_enoent);
void finish_adding_requests();
void start_op();
void finish_op(int r);
private:
ceph::mutex m_lock = ceph::make_mutex("ContextCompletion::m_lock");
Context *m_ctx;
bool m_ignore_enoent;
int m_ret;
bool m_building;
uint64_t m_current_ops;
};
class C_ContextCompletion : public Context {
public:
C_ContextCompletion(ContextCompletion &context_completion)
: m_context_completion(context_completion)
{
m_context_completion.start_op();
}
void finish(int r) override {
m_context_completion.finish_op(r);
}
private:
ContextCompletion &m_context_completion;
};
} // namespace ceph
#endif // CEPH_ASYNC_COMPLETION_H
| 957 | 19.382979 | 70 | h |
null | ceph-main/src/common/Continuation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/Context.h"
/**
* The Continuation interface is designed to help easily create multi-step
* operations that share data without having to pass it around or create
* custom Context classes for each step. To write a Continuation:
* 1) create a child class with a function for each stage.
* 2) Put all your shared data members into the class.
* 3) In the constructor, register each function stage with set_callback().
* 4) Whenever you need to provide a Context callback that activates the next
* stage, call get_callback(stage_number). If you need to proceed to another
* stage immediately, call immediate(stage, retcode) and return its result.
*
* To use a class:
* 1) Construct the child class on the heap.
* 2) Call begin().
* 3) The destructor will be called once one of your functions returns true to
* indicate it is done.
*
* Please note that while you can skip stages and get multiple Callback
* objects at once, you *cannot* have any stage report that the Continuation
* is completed while any other stage Callbacks are outstanding. It's best to
* be serial unless you want to maintain your own metadata about which stages
* are still pending.
*
* In fact, there are only two situations in which a stage should return
* true while others are running:
* 1) A Callback was issued and completed in the same thread,
* 2) you called immediate(stage) and it is returning true.
*/
class Continuation {
std::set<int> stages_in_flight;
std::set<int> stages_processing;
int rval;
Context *on_finish;
bool reported_done;
class Callback : public Context {
Continuation *continuation;
int stage_to_activate;
public:
Callback(Continuation *c, int stage) :
continuation(c),
stage_to_activate(stage) {}
void finish(int r) override {
continuation->continue_function(r, stage_to_activate);
}
};
protected:
typedef bool (Continuation::*stagePtr)(int r);
/**
* Continue immediately to the given stage. It will be executed
* immediately, in the given thread.
* @pre You are in a callback function.
* @param stage The stage to execute
* @param r The return code that will be provided to the next stage
*/
bool immediate(int stage, int r) {
ceph_assert(!stages_in_flight.count(stage));
ceph_assert(!stages_processing.count(stage));
stages_in_flight.insert(stage);
stages_processing.insert(stage);
return _continue_function(r, stage);
}
/**
* Obtain a Context * that when complete()ed calls back into the given stage.
* @pre You are in a callback function.
* @param stage The stage this Context should activate
*/
Context *get_callback(int stage) {
stages_in_flight.insert(stage);
return new Callback(this, stage);
}
/**
* Set the return code that is passed to the finally-activated Context.
* @param new_rval The return code to use.
*/
void set_rval(int new_rval) { rval = new_rval; }
int get_rval() { return rval; }
/**
* Register member functions as associated with a given stage. Start
* your stage IDs at 0 and make that one the setup phase.
* @pre There are no other functions associated with the stage.
* @param stage The stage to associate this function with
* @param func The function to use
*/
void set_callback(int stage, stagePtr func) {
ceph_assert(callbacks.find(stage) == callbacks.end());
callbacks[stage] = func;
}
/**
* Called when the Continuation is done, as determined by a stage returning
* true and us having finished all the currently-processing ones.
*/
virtual void _done() {
on_finish->complete(rval);
on_finish = NULL;
return;
}
private:
std::map<int, Continuation::stagePtr> callbacks;
bool _continue_function(int r, int n) {
std::set<int>::iterator in_flight_iter = stages_in_flight.find(n);
ceph_assert(in_flight_iter != stages_in_flight.end());
ceph_assert(callbacks.count(n));
stagePtr p = callbacks[n];
[[maybe_unused]] auto [processing_iter, inserted] =
stages_processing.insert(n);
bool done = (this->*p)(r);
if (done)
reported_done = true;
stages_processing.erase(processing_iter);
stages_in_flight.erase(in_flight_iter);
return done;
}
void continue_function(int r, int stage) {
bool done = _continue_function(r, stage);
assert (!done ||
stages_in_flight.size() == stages_processing.size());
if ((done || reported_done) && stages_processing.empty()) {
_done();
delete this;
}
}
public:
/**
* Construct a new Continuation object. Call this from your child class,
* obviously.
*
* @Param c The Context which should be complete()ed when this Continuation
* is done.
*/
Continuation(Context *c) :
rval(0), on_finish(c), reported_done(false) {}
/**
* Clean up.
*/
virtual ~Continuation() { ceph_assert(on_finish == NULL); }
/**
* Begin running the Continuation.
*/
void begin() { stages_in_flight.insert(0); continue_function(0, 0); }
};
| 5,470 | 30.262857 | 79 | h |
null | ceph-main/src/common/Cycles.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* Copyright (c) 2011-2014 Stanford University
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "debug.h"
#include "Cycles.h"
double Cycles::cycles_per_sec = 0;
/**
* Perform once-only overall initialization for the Cycles class, such
* as calibrating the clock frequency. This method must be called
* before using the Cycles module.
*
* It is not initialized by default because the timing loops cause
* general process startup times to balloon
* (http://tracker.ceph.com/issues/15225).
*/
void Cycles::init()
{
if (cycles_per_sec != 0)
return;
// Skip initialization if rtdsc is not implemented
if (rdtsc() == 0)
return;
// Compute the frequency of the fine-grained CPU timer: to do this,
// take parallel time readings using both rdtsc and gettimeofday.
// After 10ms have elapsed, take the ratio between these readings.
struct timeval start_time, stop_time;
uint64_t micros;
double old_cycles;
// There is one tricky aspect, which is that we could get interrupted
// between calling gettimeofday and reading the cycle counter, in which
// case we won't have corresponding readings. To handle this (unlikely)
// case, compute the overall result repeatedly, and wait until we get
// two successive calculations that are within 0.1% of each other.
old_cycles = 0;
while (1) {
if (gettimeofday(&start_time, NULL) != 0) {
ceph_abort_msg("couldn't read clock");
}
uint64_t start_cycles = rdtsc();
while (1) {
if (gettimeofday(&stop_time, NULL) != 0) {
ceph_abort_msg("couldn't read clock");
}
uint64_t stop_cycles = rdtsc();
micros = (stop_time.tv_usec - start_time.tv_usec) +
(stop_time.tv_sec - start_time.tv_sec)*1000000;
if (micros > 10000) {
cycles_per_sec = static_cast<double>(stop_cycles - start_cycles);
cycles_per_sec = 1000000.0*cycles_per_sec/ static_cast<double>(micros);
break;
}
}
double delta = cycles_per_sec/1000.0;
if ((old_cycles > (cycles_per_sec - delta)) &&
(old_cycles < (cycles_per_sec + delta))) {
return;
}
old_cycles = cycles_per_sec;
}
}
/**
* Return the number of CPU cycles per second.
*/
double Cycles::per_second()
{
return get_cycles_per_sec();
}
/**
* Given an elapsed time measured in cycles, return a floating-point number
* giving the corresponding time in seconds.
* \param cycles
* Difference between the results of two calls to rdtsc.
* \param cycles_per_sec
* Optional parameter to specify the frequency of the counter that #cycles
* was taken from. Useful when converting a remote machine's tick counter
* to seconds. The default value of 0 will use the local processor's
* computed counter frequency.
* \return
* The time in seconds corresponding to cycles.
*/
double Cycles::to_seconds(uint64_t cycles, double cycles_per_sec)
{
if (cycles_per_sec == 0)
cycles_per_sec = get_cycles_per_sec();
return static_cast<double>(cycles)/cycles_per_sec;
}
/**
* Given a time in seconds, return the number of cycles that it
* corresponds to.
* \param seconds
* Time in seconds.
* \param cycles_per_sec
* Optional parameter to specify the frequency of the counter that #cycles
* was taken from. Useful when converting a remote machine's tick counter
* to seconds. The default value of 0 will use the local processor's
* computed counter frequency.
* \return
* The approximate number of cycles corresponding to #seconds.
*/
uint64_t Cycles::from_seconds(double seconds, double cycles_per_sec)
{
if (cycles_per_sec == 0)
cycles_per_sec = get_cycles_per_sec();
return (uint64_t) (seconds*cycles_per_sec + 0.5);
}
/**
* Given an elapsed time measured in cycles, return an integer
* giving the corresponding time in microseconds. Note: to_seconds()
* is faster than this method.
* \param cycles
* Difference between the results of two calls to rdtsc.
* \param cycles_per_sec
* Optional parameter to specify the frequency of the counter that #cycles
* was taken from. Useful when converting a remote machine's tick counter
* to seconds. The default value of 0 will use the local processor's
* computed counter frequency.
* \return
* The time in microseconds corresponding to cycles (rounded).
*/
uint64_t Cycles::to_microseconds(uint64_t cycles, double cycles_per_sec)
{
return to_nanoseconds(cycles, cycles_per_sec) / 1000;
}
/**
* Given an elapsed time measured in cycles, return an integer
* giving the corresponding time in nanoseconds. Note: to_seconds()
* is faster than this method.
* \param cycles
* Difference between the results of two calls to rdtsc.
* \param cycles_per_sec
* Optional parameter to specify the frequency of the counter that #cycles
* was taken from. Useful when converting a remote machine's tick counter
* to seconds. The default value of 0 will use the local processor's
* computed counter frequency.
* \return
* The time in nanoseconds corresponding to cycles (rounded).
*/
uint64_t Cycles::to_nanoseconds(uint64_t cycles, double cycles_per_sec)
{
if (cycles_per_sec == 0)
cycles_per_sec = get_cycles_per_sec();
return (uint64_t) (1e09*static_cast<double>(cycles)/cycles_per_sec + 0.5);
}
/**
* Given a number of nanoseconds, return an approximate number of
* cycles for an equivalent time length.
* \param ns
* Number of nanoseconds.
* \param cycles_per_sec
* Optional parameter to specify the frequency of the counter that #cycles
* was taken from. Useful when converting a remote machine's tick counter
* to seconds. The default value of 0 will use the local processor's
* computed counter frequency.
* \return
* The approximate number of cycles for the same time length.
*/
uint64_t
Cycles::from_nanoseconds(uint64_t ns, double cycles_per_sec)
{
if (cycles_per_sec == 0)
cycles_per_sec = get_cycles_per_sec();
return (uint64_t) (static_cast<double>(ns)*cycles_per_sec/1e09 + 0.5);
}
/**
* Busy wait for a given number of microseconds.
* Callers should use this method in most reasonable cases as opposed to
* usleep for accurate measurements. Calling usleep may put the the processor
* in a low power mode/sleep state which reduces the clock frequency.
* So, each time the process/thread wakes up from usleep, it takes some time
* to ramp up to maximum frequency. Thus meausrements often incur higher
* latencies.
* \param us
* Number of microseconds.
*/
void
Cycles::sleep(uint64_t us)
{
uint64_t stop = Cycles::rdtsc() + Cycles::from_nanoseconds(1000*us);
while (Cycles::rdtsc() < stop);
}
| 7,888 | 34.696833 | 79 | cc |
null | ceph-main/src/common/Cycles.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* Copyright (c) 2011-2014 Stanford University
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef CEPH_CYCLES_H
#define CEPH_CYCLES_H
#include <cstdint>
/**
* This class provides static methods that read the fine-grain CPU
* cycle counter and translate between cycle-level times and absolute
* times.
*/
class Cycles {
public:
static void init();
/**
* Return the current value of the fine-grain CPU cycle counter
* (accessed via the RDTSC instruction).
*/
static __inline __attribute__((always_inline)) uint64_t rdtsc() {
#if defined(__i386__)
int64_t ret;
__asm__ volatile ("rdtsc" : "=A" (ret) );
return ret;
#elif defined(__x86_64__) || defined(__amd64__)
uint32_t lo, hi;
__asm__ __volatile__("rdtsc" : "=a" (lo), "=d" (hi));
return (((uint64_t)hi << 32) | lo);
#elif defined(__aarch64__)
//
// arch/arm64/include/asm/arch_timer.h
//
// static inline u64 arch_counter_get_cntvct(void)
// {
// u64 cval;
//
// isb();
// asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
//
// return cval;
// }
//
// https://github.com/cloudius-systems/osv/blob/master/arch/aarch64/arm-clock.cc
uint64_t cntvct;
asm volatile ("isb; mrs %0, cntvct_el0; isb; " : "=r" (cntvct) :: "memory");
return cntvct;
#elif defined(__powerpc__) || defined (__powerpc64__)
// Based on:
// https://github.com/randombit/botan/blob/net.randombit.botan/src/lib/entropy/hres_timer/hres_timer.cpp
uint32_t lo = 0, hi = 0;
asm volatile("mftbu %0; mftb %1" : "=r" (hi), "=r" (lo));
return (((uint64_t)hi << 32) | lo);
#elif defined(__s390__)
uint64_t tsc;
asm volatile("stck %0" : "=Q" (tsc) : : "cc");
return tsc;
#else
#warning No high-precision counter available for your OS/arch
return 0;
#endif
}
static double per_second();
static double to_seconds(uint64_t cycles, double cycles_per_sec = 0);
static uint64_t from_seconds(double seconds, double cycles_per_sec = 0);
static uint64_t to_microseconds(uint64_t cycles, double cycles_per_sec = 0);
static uint64_t to_nanoseconds(uint64_t cycles, double cycles_per_sec = 0);
static uint64_t from_nanoseconds(uint64_t ns, double cycles_per_sec = 0);
static void sleep(uint64_t us);
private:
Cycles();
/// Conversion factor between cycles and the seconds; computed by
/// Cycles::init.
static double cycles_per_sec;
/**
* Returns the conversion factor between cycles in seconds, using
* a mock value for testing when appropriate.
*/
static __inline __attribute__((always_inline)) double get_cycles_per_sec() {
return cycles_per_sec;
}
};
#endif // CEPH_CYCLES_H
| 3,877 | 31.864407 | 108 | h |
null | ceph-main/src/common/DecayCounter.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "DecayCounter.h"
#include "Formatter.h"
#include "include/encoding.h"
void DecayCounter::encode(ceph::buffer::list& bl) const
{
decay();
ENCODE_START(5, 4, bl);
encode(val, bl);
ENCODE_FINISH(bl);
}
void DecayCounter::decode(ceph::buffer::list::const_iterator &p)
{
DECODE_START_LEGACY_COMPAT_LEN(5, 4, 4, p);
if (struct_v < 2) {
double k = 0.0;
decode(k, p);
}
if (struct_v < 3) {
double k = 0.0;
decode(k, p);
}
decode(val, p);
if (struct_v < 5) {
double delta, _;
decode(delta, p);
val += delta;
decode(_, p); /* velocity */
}
last_decay = clock::now();
DECODE_FINISH(p);
}
void DecayCounter::dump(ceph::Formatter *f) const
{
decay();
f->dump_float("value", val);
f->dump_float("halflife", rate.get_halflife());
}
void DecayCounter::generate_test_instances(std::list<DecayCounter*>& ls)
{
DecayCounter *counter = new DecayCounter();
counter->val = 3.0;
ls.push_back(counter);
counter = new DecayCounter();
ls.push_back(counter);
}
void DecayCounter::decay(double delta) const
{
auto now = clock::now();
double el = std::chrono::duration<double>(now - last_decay).count();
// calculate new value
double newval = val * exp(el * rate.k) + delta;
if (newval < .01) {
newval = 0.0;
}
val = newval;
last_decay = now;
}
| 1,755 | 20.95 | 72 | cc |
null | ceph-main/src/common/DecayCounter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_DECAYCOUNTER_H
#define CEPH_DECAYCOUNTER_H
#include "include/buffer.h"
#include "common/Formatter.h"
#include "common/StackStringStream.h"
#include "common/ceph_time.h"
#include <cmath>
#include <list>
#include <sstream>
/**
*
* TODO: normalize value based on some function of half_life,
* so that it can be interpreted as an approximation of a
* moving average of N seconds. currently, changing half-life
* skews the scale of the value, even at steady state.
*
*/
class DecayRate {
public:
friend class DecayCounter;
DecayRate() {}
// cppcheck-suppress noExplicitConstructor
DecayRate(double hl) { set_halflife(hl); }
DecayRate(const DecayRate &dr) : k(dr.k) {}
void set_halflife(double hl) {
k = log(.5) / hl;
}
double get_halflife() const {
return log(.5) / k;
}
private:
double k = 0; // k = ln(.5)/half_life
};
class DecayCounter {
public:
using time = ceph::coarse_mono_time;
using clock = ceph::coarse_mono_clock;
DecayCounter() : DecayCounter(DecayRate()) {}
explicit DecayCounter(const DecayRate &rate) : last_decay(clock::now()), rate(rate) {}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& p);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<DecayCounter*>& ls);
/**
* reading
*/
double get() const {
decay();
return val;
}
double get_last() const {
return val;
}
time get_last_decay() const {
return last_decay;
}
/**
* adjusting
*/
double hit(double v = 1.0) {
decay(v);
return val;
}
void adjust(double v = 1.0) {
decay(v);
}
void scale(double f) {
val *= f;
}
/**
* decay etc.
*/
void reset() {
last_decay = clock::now();
val = 0;
}
protected:
void decay(double delta) const;
void decay() const {decay(0.0);}
private:
mutable double val = 0.0; // value
mutable time last_decay = clock::zero(); // time of last decay
DecayRate rate;
};
inline void encode(const DecayCounter &c, ceph::buffer::list &bl) {
c.encode(bl);
}
inline void decode(DecayCounter &c, ceph::buffer::list::const_iterator &p) {
c.decode(p);
}
inline std::ostream& operator<<(std::ostream& out, const DecayCounter& d) {
CachedStackStringStream css;
css->precision(2);
double val = d.get();
*css << "[C " << std::scientific << val << "]";
return out << css->strv();
}
#endif
| 2,902 | 20.189781 | 88 | h |
null | ceph-main/src/common/EventTrace.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) Intel Corporation.
* All rights reserved.
*
* Author: Anjaneya Chagam <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/EventTrace.h"
#include "common/TracepointProvider.h"
#include "messages/MOSDOpReply.h"
#ifdef WITH_LTTNG
#define TRACEPOINT_DEFINE
#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#include "tracing/eventtrace.h"
#undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#undef TRACEPOINT_DEFINE
#else
#define tracepoint(...)
#endif
TracepointProvider::Traits event_tracepoint_traits("libeventtrace_tp.so", "event_tracing");
bool EventTrace::tpinit = false;
void EventTrace::init_tp(CephContext *_ctx)
{
if (unlikely(!_ctx))
return;
if (unlikely(!tpinit)) {
TracepointProvider::initialize<event_tracepoint_traits>(_ctx);
tpinit = true;
}
}
void EventTrace::set_message_attrs(const Message *m, string& oid, string& context, bool incl_oid)
{
// arg1 = oid, arg2 = message type, arg3 = source!source_addr!tid!sequence
if (m && (m->get_type() == CEPH_MSG_OSD_OP || m->get_type() == CEPH_MSG_OSD_OPREPLY)) {
if (incl_oid) {
if (m->get_type() == CEPH_MSG_OSD_OP)
oid = ((MOSDOp *)m)->get_oid().name;
else
oid = ((MOSDOpReply *)m)->get_oid().name;
}
ostringstream buf;
buf << m->get_source() << "!" << m->get_source_addr() << "!"
<< m->get_tid() << "!" << m->get_seq() << "!" << m->get_type();
context = buf.str();
}
}
EventTrace::EventTrace(CephContext *_ctx, const char *_file, const char *_func, int _line) :
ctx(_ctx),
file(_file),
func(_func),
line(_line)
{
if (unlikely(!ctx))
return;
last_ts = ceph_clock_now();
init_tp(ctx);
lsubdout(ctx, eventtrace, LOG_LEVEL) << "ENTRY (" << func << ") " << file << ":" << line << dendl;
tracepoint(eventtrace, func_enter, file.c_str(), func.c_str(), line);
}
EventTrace::~EventTrace()
{
if (unlikely(!ctx))
return;
lsubdout(ctx, eventtrace, LOG_LEVEL) << "EXIT (" << func << ") " << file << dendl;
tracepoint(eventtrace, func_exit, file.c_str(), func.c_str());
}
void EventTrace::log_event_latency(const char *event)
{
utime_t now = ceph_clock_now();
double usecs = (now.to_nsec()-last_ts.to_nsec())/1000;
OID_ELAPSED("", usecs, event);
last_ts = now;
}
void EventTrace::trace_oid_event(const char *oid, const char *event, const char *context,
const char *file, const char *func, int line)
{
if (unlikely(!g_ceph_context))
return;
init_tp(g_ceph_context);
tracepoint(eventtrace, oid_event, oid, event, context, file, func, line);
}
void EventTrace::trace_oid_event(const Message *m, const char *event, const char *file,
const char *func, int line, bool incl_oid)
{
string oid, context;
set_message_attrs(m, oid, context, incl_oid);
trace_oid_event(oid.c_str(), event, context.c_str(), file, func, line);
}
void EventTrace::trace_oid_elapsed(const char *oid, const char *event, const char *context,
double elapsed, const char *file, const char *func, int line)
{
if (unlikely(!g_ceph_context))
return;
init_tp(g_ceph_context);
tracepoint(eventtrace, oid_elapsed, oid, event, context, elapsed, file, func, line);
}
void EventTrace::trace_oid_elapsed(const Message *m, const char *event, double elapsed,
const char *file, const char *func, int line, bool incl_oid)
{
string oid, context;
set_message_attrs(m, oid, context, incl_oid);
trace_oid_elapsed(oid.c_str(), event, context.c_str(), elapsed, file, func, line);
}
| 3,808 | 28.757813 | 101 | cc |
null | ceph-main/src/common/EventTrace.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Intel Corporation.
* All rights reserved.
*
* Author: Anjaneya Chagam <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef _EventTrace_h_
#define _EventTrace_h_
#include "msg/Message.h"
#if defined(WITH_EVENTTRACE)
#define OID_EVENT_TRACE(oid, event) \
EventTrace::trace_oid_event(oid, event, "", __FILE__, __func__, __LINE__)
#define OID_EVENT_TRACE_WITH_MSG(msg, event, incl_oid) \
EventTrace::trace_oid_event(msg, event, __FILE__, __func__, __LINE__, incl_oid)
#define OID_ELAPSED(oid, elapsed, event) \
EventTrace::trace_oid_elapsed(oid, event, "", elapsed, __FILE__, __func__, __LINE__)
#define OID_ELAPSED_WITH_MSG(m, elapsed, event, incl_oid) \
EventTrace::trace_oid_elapsed(m, event, elapsed, __FILE__, __func__, __LINE__, incl_oid)
#define FUNCTRACE(cct) EventTrace _t1(cct, __FILE__, __func__, __LINE__)
#define OID_ELAPSED_FUNC_EVENT(event) _t1.log_event_latency(event)
#else
#define OID_EVENT_TRACE(oid, event)
#define OID_EVENT_TRACE_WITH_MSG(msg, event, incl_oid)
#define OID_ELAPSED(oid, elapsed, event)
#define OID_ELAPSED_WITH_MSG(m, elapsed, event, incl_oid)
#define FUNCTRACE(cct)
#define OID_ELAPSED_FUNC_EVENT(event)
#endif
#define LOG_LEVEL 30
class EventTrace {
private:
CephContext *ctx;
std::string file;
std::string func;
int line;
utime_t last_ts;
static bool tpinit;
static void init_tp(CephContext *_ctx);
static void set_message_attrs(const Message *m, std::string& oid, std::string& context, bool incl_oid);
public:
EventTrace(CephContext *_ctx, const char *_file, const char *_func, int line);
~EventTrace();
void log_event_latency(const char *tag);
static void trace_oid_event(const char *oid, const char *event, const char *context,
const char *file, const char *func, int line);
static void trace_oid_event(const Message *m, const char *event, const char *file,
const char *func, int line, bool incl_oid);
static void trace_oid_elapsed(const char *oid, const char *event, const char *context,
double elapsed, const char *file, const char *func, int line);
static void trace_oid_elapsed(const Message *m, const char *event, double elapsed,
const char *file, const char *func, int line, bool incl_oid);
};
#endif
| 2,594 | 31.4375 | 105 | h |
null | ceph-main/src/common/FastCDC.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <random>
#include "FastCDC.h"
// Unlike FastCDC described in the paper, if we are close to the
// target, use the target mask. If we are very small or very large,
// use an adjusted mask--like the paper. This tries to keep more
// cut points using the same mask, and fewer using the small or large
// masks.
// How many more/fewer bits to set in the small/large masks.
//
// This is the "normalization level" or "NC level" in the FastCDC
// paper.
#define TARGET_WINDOW_MASK_BITS 2
// How big the 'target window' is (in which we use the target mask).
//
// In the FastCDC paper, this is always 0: there is not "target
// window," and either small_mask (maskS) or large_mask (maskL) is
// used--never target_mask (maskA).
#define TARGET_WINDOW_BITS 1
// How many bits larger/smaller than target for hard limits on chunk
// size.
//
// We assume the min and max sizes are always this many bits
// larger/smaller than the target. (Note that the FastCDC paper 8KB
// example has a min of 2KB (2 bits smaller) and max of 64 KB (3 bits
// larger), although it is not clear why they chose those values.)
#define SIZE_WINDOW_BITS 2
void FastCDC::_setup(int target, int size_window_bits)
{
target_bits = target;
if (!size_window_bits) {
size_window_bits = SIZE_WINDOW_BITS;
}
min_bits = target - size_window_bits;
max_bits = target + size_window_bits;
std::mt19937_64 engine;
// prefill table
for (unsigned i = 0; i < 256; ++i) {
table[i] = engine();
}
// set mask
int did = 0;
uint64_t m = 0;
while (did < target_bits + TARGET_WINDOW_MASK_BITS) {
uint64_t bit = 1ull << (engine() & 63);
if (m & bit) {
continue; // this bit is already set
}
m |= bit;
++did;
if (did == target_bits - TARGET_WINDOW_MASK_BITS) {
large_mask = m;
} else if (did == target_bits) {
target_mask = m;
} else if (did == target_bits + TARGET_WINDOW_MASK_BITS) {
small_mask = m;
}
}
}
static inline bool _scan(
// these are our cursor/postion...
bufferlist::buffers_t::const_iterator *p,
const char **pp, const char **pe,
size_t& pos,
size_t max, // how much to read
uint64_t& fp, // fingerprint
uint64_t mask, const uint64_t *table)
{
while (pos < max) {
if (*pp == *pe) {
++(*p);
*pp = (*p)->c_str();
*pe = *pp + (*p)->length();
}
const char *te = std::min(*pe, *pp + max - pos);
for (; *pp < te; ++(*pp), ++pos) {
if ((fp & mask) == mask) {
return false;
}
fp = (fp << 1) ^ table[*(unsigned char*)*pp];
}
if (pos >= max) {
return true;
}
}
return true;
}
void FastCDC::calc_chunks(
const bufferlist& bl,
std::vector<std::pair<uint64_t, uint64_t>> *chunks) const
{
if (bl.length() == 0) {
return;
}
auto p = bl.buffers().begin();
const char *pp = p->c_str();
const char *pe = pp + p->length();
size_t pos = 0;
size_t len = bl.length();
while (pos < len) {
size_t cstart = pos;
uint64_t fp = 0;
// are we left with a min-sized (or smaller) chunk?
if (len - pos <= (1ul << min_bits)) {
chunks->push_back(std::pair<uint64_t,uint64_t>(pos, len - pos));
break;
}
// skip forward to the min chunk size cut point (minus the window, so
// we can initialize the rolling fingerprint).
size_t skip = (1 << min_bits) - window;
pos += skip;
while (skip) {
size_t s = std::min<size_t>(pe - pp, skip);
skip -= s;
pp += s;
if (pp == pe) {
++p;
pp = p->c_str();
pe = pp + p->length();
}
}
// first fill the window
size_t max = pos + window;
while (pos < max) {
if (pp == pe) {
++p;
pp = p->c_str();
pe = pp + p->length();
}
const char *te = std::min(pe, pp + (max - pos));
for (; pp < te; ++pp, ++pos) {
fp = (fp << 1) ^ table[*(unsigned char*)pp];
}
}
ceph_assert(pos < len);
// find an end marker
if (
// for the first "small" region
_scan(&p, &pp, &pe, pos,
std::min(len, cstart + (1 << (target_bits - TARGET_WINDOW_BITS))),
fp, small_mask, table) &&
// for the middle range (close to our target)
(TARGET_WINDOW_BITS == 0 ||
_scan(&p, &pp, &pe, pos,
std::min(len, cstart + (1 << (target_bits + TARGET_WINDOW_BITS))),
fp, target_mask, table)) &&
// we're past target, use large_mask!
_scan(&p, &pp, &pe, pos,
std::min(len,
cstart + (1 << max_bits)),
fp, large_mask, table))
;
chunks->push_back(std::pair<uint64_t,uint64_t>(cstart, pos - cstart));
}
}
| 4,705 | 25.587571 | 74 | cc |
null | ceph-main/src/common/FastCDC.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "CDC.h"
// Based on this paper:
// https://www.usenix.org/system/files/conference/atc16/atc16-paper-xia.pdf
//
// Changes:
// - window size fixed at 64 bytes (to match our word size)
// - use XOR instead of +
// - match mask instead of 0
// - use target mask when close to target size (instead of
// small/large mask). The idea here is to try to use a consistent (target)
// mask for most cut points if we can, and only resort to small/large mask
// when we are (very) small or (very) large.
// Note about the target_bits: The goal is an average chunk size of 1
// << target_bits. However, in reality the average is ~1.25x that
// because of the hard mininum chunk size.
class FastCDC : public CDC {
private:
int target_bits; ///< target chunk size bits (1 << target_bits)
int min_bits; ///< hard minimum chunk size bits (1 << min_bits)
int max_bits; ///< hard maximum chunk size bits (1 << max_bits)
uint64_t target_mask; ///< maskA in the paper (target_bits set)
uint64_t small_mask; ///< maskS in the paper (more bits set)
uint64_t large_mask; ///< maskL in the paper (fewer bits set)
/// lookup table with pseudorandom values for each byte
uint64_t table[256];
/// window size in bytes
const size_t window = sizeof(uint64_t)*8; // bits in uint64_t
void _setup(int target, int window_bits);
public:
FastCDC(int target = 18, int window_bits = 0) {
_setup(target, window_bits);
};
void set_target_bits(int target, int window_bits) override {
_setup(target, window_bits);
}
void calc_chunks(
const bufferlist& bl,
std::vector<std::pair<uint64_t, uint64_t>> *chunks) const override;
};
| 1,803 | 31.8 | 79 | h |
null | ceph-main/src/common/Finisher.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Finisher.h"
#define dout_subsys ceph_subsys_finisher
#undef dout_prefix
#define dout_prefix *_dout << "finisher(" << this << ") "
void Finisher::start()
{
ldout(cct, 10) << __func__ << dendl;
finisher_thread.create(thread_name.c_str());
}
void Finisher::stop()
{
ldout(cct, 10) << __func__ << dendl;
finisher_lock.lock();
finisher_stop = true;
// we don't have any new work to do, but we want the worker to wake up anyway
// to process the stop condition.
finisher_cond.notify_all();
finisher_lock.unlock();
finisher_thread.join(); // wait until the worker exits completely
ldout(cct, 10) << __func__ << " finish" << dendl;
}
void Finisher::wait_for_empty()
{
std::unique_lock ul(finisher_lock);
while (!finisher_queue.empty() || finisher_running) {
ldout(cct, 10) << "wait_for_empty waiting" << dendl;
finisher_empty_wait = true;
finisher_empty_cond.wait(ul);
}
ldout(cct, 10) << "wait_for_empty empty" << dendl;
finisher_empty_wait = false;
}
void *Finisher::finisher_thread_entry()
{
std::unique_lock ul(finisher_lock);
ldout(cct, 10) << "finisher_thread start" << dendl;
utime_t start;
uint64_t count = 0;
while (!finisher_stop) {
/// Every time we are woken up, we process the queue until it is empty.
while (!finisher_queue.empty()) {
// To reduce lock contention, we swap out the queue to process.
// This way other threads can submit new contexts to complete
// while we are working.
in_progress_queue.swap(finisher_queue);
finisher_running = true;
ul.unlock();
ldout(cct, 10) << "finisher_thread doing " << in_progress_queue << dendl;
if (logger) {
start = ceph_clock_now();
count = in_progress_queue.size();
}
// Now actually process the contexts.
for (auto p : in_progress_queue) {
p.first->complete(p.second);
}
ldout(cct, 10) << "finisher_thread done with " << in_progress_queue
<< dendl;
in_progress_queue.clear();
if (logger) {
logger->dec(l_finisher_queue_len, count);
logger->tinc(l_finisher_complete_lat, ceph_clock_now() - start);
}
ul.lock();
finisher_running = false;
}
ldout(cct, 10) << "finisher_thread empty" << dendl;
if (unlikely(finisher_empty_wait))
finisher_empty_cond.notify_all();
if (finisher_stop)
break;
ldout(cct, 10) << "finisher_thread sleeping" << dendl;
finisher_cond.wait(ul);
}
// If we are exiting, we signal the thread waiting in stop(),
// otherwise it would never unblock
finisher_empty_cond.notify_all();
ldout(cct, 10) << "finisher_thread stop" << dendl;
finisher_stop = false;
return 0;
}
| 2,808 | 27.958763 | 79 | cc |
null | ceph-main/src/common/Finisher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_FINISHER_H
#define CEPH_FINISHER_H
#include "include/Context.h"
#include "include/common_fwd.h"
#include "common/Thread.h"
#include "common/ceph_mutex.h"
#include "common/perf_counters.h"
#include "common/Cond.h"
/// Finisher queue length performance counter ID.
enum {
l_finisher_first = 997082,
l_finisher_queue_len,
l_finisher_complete_lat,
l_finisher_last
};
/** @brief Asynchronous cleanup class.
* Finisher asynchronously completes Contexts, which are simple classes
* representing callbacks, in a dedicated worker thread. Enqueuing
* contexts to complete is thread-safe.
*/
class Finisher {
CephContext *cct;
ceph::mutex finisher_lock; ///< Protects access to queues and finisher_running.
ceph::condition_variable finisher_cond; ///< Signaled when there is something to process.
ceph::condition_variable finisher_empty_cond; ///< Signaled when the finisher has nothing more to process.
bool finisher_stop; ///< Set when the finisher should stop.
bool finisher_running; ///< True when the finisher is currently executing contexts.
bool finisher_empty_wait; ///< True mean someone wait finisher empty.
/// Queue for contexts for which complete(0) will be called.
std::vector<std::pair<Context*,int>> finisher_queue;
std::vector<std::pair<Context*,int>> in_progress_queue;
std::string thread_name;
/// Performance counter for the finisher's queue length.
/// Only active for named finishers.
PerfCounters *logger;
void *finisher_thread_entry();
struct FinisherThread : public Thread {
Finisher *fin;
explicit FinisherThread(Finisher *f) : fin(f) {}
void* entry() override { return fin->finisher_thread_entry(); }
} finisher_thread;
public:
/// Add a context to complete, optionally specifying a parameter for the complete function.
void queue(Context *c, int r = 0) {
std::unique_lock ul(finisher_lock);
bool was_empty = finisher_queue.empty();
finisher_queue.push_back(std::make_pair(c, r));
if (was_empty) {
finisher_cond.notify_one();
}
if (logger)
logger->inc(l_finisher_queue_len);
}
void queue(std::list<Context*>& ls) {
{
std::unique_lock ul(finisher_lock);
if (finisher_queue.empty()) {
finisher_cond.notify_all();
}
for (auto i : ls) {
finisher_queue.push_back(std::make_pair(i, 0));
}
if (logger)
logger->inc(l_finisher_queue_len, ls.size());
}
ls.clear();
}
void queue(std::deque<Context*>& ls) {
{
std::unique_lock ul(finisher_lock);
if (finisher_queue.empty()) {
finisher_cond.notify_all();
}
for (auto i : ls) {
finisher_queue.push_back(std::make_pair(i, 0));
}
if (logger)
logger->inc(l_finisher_queue_len, ls.size());
}
ls.clear();
}
void queue(std::vector<Context*>& ls) {
{
std::unique_lock ul(finisher_lock);
if (finisher_queue.empty()) {
finisher_cond.notify_all();
}
for (auto i : ls) {
finisher_queue.push_back(std::make_pair(i, 0));
}
if (logger)
logger->inc(l_finisher_queue_len, ls.size());
}
ls.clear();
}
/// Start the worker thread.
void start();
/** @brief Stop the worker thread.
*
* Does not wait until all outstanding contexts are completed.
* To ensure that everything finishes, you should first shut down
* all sources that can add contexts to this finisher and call
* wait_for_empty() before calling stop(). */
void stop();
/** @brief Blocks until the finisher has nothing left to process.
* This function will also return when a concurrent call to stop()
* finishes, but this class should never be used in this way. */
void wait_for_empty();
/// Construct an anonymous Finisher.
/// Anonymous finishers do not log their queue length.
explicit Finisher(CephContext *cct_) :
cct(cct_), finisher_lock(ceph::make_mutex("Finisher::finisher_lock")),
finisher_stop(false), finisher_running(false), finisher_empty_wait(false),
thread_name("fn_anonymous"), logger(0),
finisher_thread(this) {}
/// Construct a named Finisher that logs its queue length.
Finisher(CephContext *cct_, std::string name, std::string tn) :
cct(cct_), finisher_lock(ceph::make_mutex("Finisher::" + name)),
finisher_stop(false), finisher_running(false), finisher_empty_wait(false),
thread_name(tn), logger(0),
finisher_thread(this) {
PerfCountersBuilder b(cct, std::string("finisher-") + name,
l_finisher_first, l_finisher_last);
b.add_u64(l_finisher_queue_len, "queue_len");
b.add_time_avg(l_finisher_complete_lat, "complete_latency");
logger = b.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
logger->set(l_finisher_queue_len, 0);
logger->set(l_finisher_complete_lat, 0);
}
~Finisher() {
if (logger && cct) {
cct->get_perfcounters_collection()->remove(logger);
delete logger;
}
}
};
/// Context that is completed asynchronously on the supplied finisher.
class C_OnFinisher : public Context {
Context *con;
Finisher *fin;
public:
C_OnFinisher(Context *c, Finisher *f) : con(c), fin(f) {
ceph_assert(fin != NULL);
ceph_assert(con != NULL);
}
~C_OnFinisher() override {
if (con != nullptr) {
delete con;
con = nullptr;
}
}
void finish(int r) override {
fin->queue(con, r);
con = nullptr;
}
};
class ContextQueue {
std::list<Context *> q;
std::mutex q_mutex;
ceph::mutex& mutex;
ceph::condition_variable& cond;
std::atomic_bool q_empty = true;
public:
ContextQueue(ceph::mutex& mut,
ceph::condition_variable& con)
: mutex(mut), cond(con) {}
void queue(std::list<Context *>& ls) {
bool was_empty = false;
{
std::scoped_lock l(q_mutex);
if (q.empty()) {
q.swap(ls);
was_empty = true;
} else {
q.insert(q.end(), ls.begin(), ls.end());
}
q_empty = q.empty();
}
if (was_empty) {
std::scoped_lock l{mutex};
cond.notify_all();
}
ls.clear();
}
void move_to(std::list<Context *>& ls) {
ls.clear();
std::scoped_lock l(q_mutex);
if (!q.empty()) {
q.swap(ls);
}
q_empty = true;
}
bool empty() {
return q_empty;
}
};
#endif
| 6,719 | 27 | 108 | h |
null | ceph-main/src/common/FixedCDC.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma
#include "FixedCDC.h"
void FixedCDC::calc_chunks(
const bufferlist& bl,
std::vector<std::pair<uint64_t, uint64_t>> *chunks) const
{
size_t len = bl.length();
if (!len) {
return;
}
for (size_t pos = 0; pos < len; pos += chunk_size) {
chunks->push_back(std::pair<uint64_t,uint64_t>(pos, std::min(chunk_size,
len - pos)));
}
}
| 464 | 21.142857 | 76 | cc |
null | ceph-main/src/common/FixedCDC.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "CDC.h"
class FixedCDC : public CDC {
private:
size_t chunk_size;
public:
FixedCDC(int target = 18, int window_bits = 0) {
set_target_bits(target, window_bits);
};
void set_target_bits(int target, int window_bits) override {
chunk_size = 1ul << target;
}
void calc_chunks(
const bufferlist& bl,
std::vector<std::pair<uint64_t, uint64_t>> *chunks) const override;
};
| 519 | 20.666667 | 71 | h |
null | ceph-main/src/common/Formatter.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#define LARGE_SIZE 1024
#include "HTMLFormatter.h"
#include "common/escape.h"
#include "include/buffer.h"
#include <fmt/format.h>
#include <algorithm>
#include <set>
#include <limits>
// -----------------------
namespace ceph {
std::string
fixed_u_to_string(uint64_t num, int scale)
{
std::ostringstream t;
t.fill('0');
t.width(scale + 1);
t << num;
int len = t.str().size();
return t.str().substr(0,len - scale) + "." + t.str().substr(len - scale);
}
std::string
fixed_to_string(int64_t num, int scale)
{
std::ostringstream t;
bool neg = num < 0;
if (neg) num = -num;
t.fill('0');
t.width(scale + 1);
t << num;
int len = t.str().size();
return (neg ? "-" : "") + t.str().substr(0,len - scale) + "." + t.str().substr(len - scale);
}
/*
* FormatterAttrs(const char *attr, ...)
*
* Requires a list of attrs followed by NULL. The attrs should be char *
* pairs, first one is the name, second one is the value. E.g.,
*
* FormatterAttrs("name1", "value1", "name2", "value2", NULL);
*/
FormatterAttrs::FormatterAttrs(const char *attr, ...)
{
const char *s = attr;
va_list ap;
va_start(ap, attr);
do {
const char *val = va_arg(ap, char *);
if (!val)
break;
attrs.push_back(make_pair(std::string(s), std::string(val)));
s = va_arg(ap, char *);
} while (s);
va_end(ap);
}
void Formatter::write_bin_data(const char*, int){}
Formatter::Formatter() { }
Formatter::~Formatter() { }
Formatter *Formatter::create(std::string_view type,
std::string_view default_type,
std::string_view fallback)
{
std::string_view mytype(type);
if (mytype.empty()) {
mytype = default_type;
}
if (mytype == "json")
return new JSONFormatter(false);
else if (mytype == "json-pretty")
return new JSONFormatter(true);
else if (mytype == "xml")
return new XMLFormatter(false);
else if (mytype == "xml-pretty")
return new XMLFormatter(true);
else if (mytype == "table")
return new TableFormatter();
else if (mytype == "table-kv")
return new TableFormatter(true);
else if (mytype == "html")
return new HTMLFormatter(false);
else if (mytype == "html-pretty")
return new HTMLFormatter(true);
else if (fallback != "")
return create(fallback, "", "");
else
return (Formatter *) NULL;
}
void Formatter::flush(bufferlist &bl)
{
std::stringstream os;
flush(os);
bl.append(os.str());
}
void Formatter::dump_format(std::string_view name, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
dump_format_va(name, NULL, true, fmt, ap);
va_end(ap);
}
void Formatter::dump_format_ns(std::string_view name, const char *ns, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
dump_format_va(name, ns, true, fmt, ap);
va_end(ap);
}
void Formatter::dump_format_unquoted(std::string_view name, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
dump_format_va(name, NULL, false, fmt, ap);
va_end(ap);
}
// -----------------------
JSONFormatter::JSONFormatter(bool p)
: m_pretty(p), m_is_pending_string(false)
{
reset();
}
void JSONFormatter::flush(std::ostream& os)
{
finish_pending_string();
os << m_ss.str();
if (m_line_break_enabled)
os << "\n";
m_ss.clear();
m_ss.str("");
}
void JSONFormatter::reset()
{
m_stack.clear();
m_ss.clear();
m_ss.str("");
m_pending_string.clear();
m_pending_string.str("");
}
void JSONFormatter::print_comma(json_formatter_stack_entry_d& entry)
{
if (entry.size) {
if (m_pretty) {
m_ss << ",\n";
for (unsigned i = 1; i < m_stack.size(); i++)
m_ss << " ";
} else {
m_ss << ",";
}
} else if (m_pretty) {
m_ss << "\n";
for (unsigned i = 1; i < m_stack.size(); i++)
m_ss << " ";
}
if (m_pretty && entry.is_array)
m_ss << " ";
}
void JSONFormatter::print_quoted_string(std::string_view s)
{
m_ss << '\"' << json_stream_escaper(s) << '\"';
}
void JSONFormatter::print_name(std::string_view name)
{
finish_pending_string();
if (m_stack.empty())
return;
struct json_formatter_stack_entry_d& entry = m_stack.back();
print_comma(entry);
if (!entry.is_array) {
if (m_pretty) {
m_ss << " ";
}
m_ss << "\"" << name << "\"";
if (m_pretty)
m_ss << ": ";
else
m_ss << ':';
}
++entry.size;
}
void JSONFormatter::open_section(std::string_view name, const char *ns, bool is_array)
{
if (handle_open_section(name, ns, is_array)) {
return;
}
if (ns) {
std::ostringstream oss;
oss << name << " " << ns;
print_name(oss.str().c_str());
} else {
print_name(name);
}
if (is_array)
m_ss << '[';
else
m_ss << '{';
json_formatter_stack_entry_d n;
n.is_array = is_array;
m_stack.push_back(n);
}
void JSONFormatter::open_array_section(std::string_view name)
{
open_section(name, nullptr, true);
}
void JSONFormatter::open_array_section_in_ns(std::string_view name, const char *ns)
{
open_section(name, ns, true);
}
void JSONFormatter::open_object_section(std::string_view name)
{
open_section(name, nullptr, false);
}
void JSONFormatter::open_object_section_in_ns(std::string_view name, const char *ns)
{
open_section(name, ns, false);
}
void JSONFormatter::close_section()
{
if (handle_close_section()) {
return;
}
ceph_assert(!m_stack.empty());
finish_pending_string();
struct json_formatter_stack_entry_d& entry = m_stack.back();
if (m_pretty && entry.size) {
m_ss << "\n";
for (unsigned i = 1; i < m_stack.size(); i++)
m_ss << " ";
}
m_ss << (entry.is_array ? ']' : '}');
m_stack.pop_back();
if (m_pretty && m_stack.empty())
m_ss << "\n";
}
void JSONFormatter::finish_pending_string()
{
if (m_is_pending_string) {
m_is_pending_string = false;
add_value(m_pending_name.c_str(), m_pending_string.str(), true);
m_pending_string.str("");
}
}
template <class T>
void JSONFormatter::add_value(std::string_view name, T val)
{
std::stringstream ss;
ss.precision(std::numeric_limits<T>::max_digits10);
ss << val;
add_value(name, ss.str(), false);
}
void JSONFormatter::add_value(std::string_view name, std::string_view val, bool quoted)
{
if (handle_value(name, val, quoted)) {
return;
}
print_name(name);
if (!quoted) {
m_ss << val;
} else {
print_quoted_string(val);
}
}
void JSONFormatter::dump_unsigned(std::string_view name, uint64_t u)
{
add_value(name, u);
}
void JSONFormatter::dump_int(std::string_view name, int64_t s)
{
add_value(name, s);
}
void JSONFormatter::dump_float(std::string_view name, double d)
{
add_value(name, d);
}
void JSONFormatter::dump_string(std::string_view name, std::string_view s)
{
add_value(name, s, true);
}
std::ostream& JSONFormatter::dump_stream(std::string_view name)
{
finish_pending_string();
m_pending_name = name;
m_is_pending_string = true;
return m_pending_string;
}
void JSONFormatter::dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap)
{
char buf[LARGE_SIZE];
vsnprintf(buf, LARGE_SIZE, fmt, ap);
add_value(name, buf, quoted);
}
int JSONFormatter::get_len() const
{
return m_ss.str().size();
}
void JSONFormatter::write_raw_data(const char *data)
{
m_ss << data;
}
const char *XMLFormatter::XML_1_DTD =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>";
XMLFormatter::XMLFormatter(bool pretty, bool lowercased, bool underscored)
: m_pretty(pretty),
m_lowercased(lowercased),
m_underscored(underscored)
{
reset();
}
void XMLFormatter::flush(std::ostream& os)
{
finish_pending_string();
std::string m_ss_str = m_ss.str();
os << m_ss_str;
/* There is a small catch here. If the rest of the formatter had NO output,
* we should NOT output a newline. This primarily triggers on HTTP redirects */
if (m_pretty && !m_ss_str.empty())
os << "\n";
else if (m_line_break_enabled)
os << "\n";
m_ss.clear();
m_ss.str("");
}
void XMLFormatter::reset()
{
m_ss.clear();
m_ss.str("");
m_pending_string.clear();
m_pending_string.str("");
m_sections.clear();
m_pending_string_name.clear();
m_header_done = false;
}
void XMLFormatter::output_header()
{
if(!m_header_done) {
m_header_done = true;
write_raw_data(XMLFormatter::XML_1_DTD);
if (m_pretty)
m_ss << "\n";
}
}
void XMLFormatter::output_footer()
{
while(!m_sections.empty()) {
close_section();
}
}
void XMLFormatter::open_object_section(std::string_view name)
{
open_section_in_ns(name, NULL, NULL);
}
void XMLFormatter::open_object_section_with_attrs(std::string_view name, const FormatterAttrs& attrs)
{
open_section_in_ns(name, NULL, &attrs);
}
void XMLFormatter::open_object_section_in_ns(std::string_view name, const char *ns)
{
open_section_in_ns(name, ns, NULL);
}
void XMLFormatter::open_array_section(std::string_view name)
{
open_section_in_ns(name, NULL, NULL);
}
void XMLFormatter::open_array_section_with_attrs(std::string_view name, const FormatterAttrs& attrs)
{
open_section_in_ns(name, NULL, &attrs);
}
void XMLFormatter::open_array_section_in_ns(std::string_view name, const char *ns)
{
open_section_in_ns(name, ns, NULL);
}
void XMLFormatter::close_section()
{
ceph_assert(!m_sections.empty());
finish_pending_string();
std::string section = m_sections.back();
std::transform(section.begin(), section.end(), section.begin(),
[this](char c) { return this->to_lower_underscore(c); });
m_sections.pop_back();
print_spaces();
m_ss << "</" << section << ">";
if (m_pretty)
m_ss << "\n";
}
template <class T>
void XMLFormatter::add_value(std::string_view name, T val)
{
std::string e(name);
std::transform(e.begin(), e.end(), e.begin(),
[this](char c) { return this->to_lower_underscore(c); });
print_spaces();
m_ss.precision(std::numeric_limits<T>::max_digits10);
m_ss << "<" << e << ">" << val << "</" << e << ">";
if (m_pretty)
m_ss << "\n";
}
void XMLFormatter::dump_unsigned(std::string_view name, uint64_t u)
{
add_value(name, u);
}
void XMLFormatter::dump_int(std::string_view name, int64_t s)
{
add_value(name, s);
}
void XMLFormatter::dump_float(std::string_view name, double d)
{
add_value(name, d);
}
void XMLFormatter::dump_string(std::string_view name, std::string_view s)
{
std::string e(name);
std::transform(e.begin(), e.end(), e.begin(),
[this](char c) { return this->to_lower_underscore(c); });
print_spaces();
m_ss << "<" << e << ">" << xml_stream_escaper(s) << "</" << e << ">";
if (m_pretty)
m_ss << "\n";
}
void XMLFormatter::dump_string_with_attrs(std::string_view name, std::string_view s, const FormatterAttrs& attrs)
{
std::string e(name);
std::transform(e.begin(), e.end(), e.begin(),
[this](char c) { return this->to_lower_underscore(c); });
std::string attrs_str;
get_attrs_str(&attrs, attrs_str);
print_spaces();
m_ss << "<" << e << attrs_str << ">" << xml_stream_escaper(s) << "</" << e << ">";
if (m_pretty)
m_ss << "\n";
}
std::ostream& XMLFormatter::dump_stream(std::string_view name)
{
print_spaces();
m_pending_string_name = name;
m_ss << "<" << m_pending_string_name << ">";
return m_pending_string;
}
void XMLFormatter::dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap)
{
char buf[LARGE_SIZE];
size_t len = vsnprintf(buf, LARGE_SIZE, fmt, ap);
std::string e(name);
std::transform(e.begin(), e.end(), e.begin(),
[this](char c) { return this->to_lower_underscore(c); });
print_spaces();
if (ns) {
m_ss << "<" << e << " xmlns=\"" << ns << "\">" << xml_stream_escaper(std::string_view(buf, len)) << "</" << e << ">";
} else {
m_ss << "<" << e << ">" << xml_stream_escaper(std::string_view(buf, len)) << "</" << e << ">";
}
if (m_pretty)
m_ss << "\n";
}
int XMLFormatter::get_len() const
{
return m_ss.str().size();
}
void XMLFormatter::write_raw_data(const char *data)
{
m_ss << data;
}
void XMLFormatter::write_bin_data(const char* buff, int buf_len)
{
std::stringbuf *pbuf = m_ss.rdbuf();
pbuf->sputn(buff, buf_len);
m_ss.seekg(buf_len);
}
void XMLFormatter::get_attrs_str(const FormatterAttrs *attrs, std::string& attrs_str)
{
std::stringstream attrs_ss;
for (std::list<std::pair<std::string, std::string> >::const_iterator iter = attrs->attrs.begin();
iter != attrs->attrs.end(); ++iter) {
std::pair<std::string, std::string> p = *iter;
attrs_ss << " " << p.first << "=" << "\"" << p.second << "\"";
}
attrs_str = attrs_ss.str();
}
void XMLFormatter::open_section_in_ns(std::string_view name, const char *ns, const FormatterAttrs *attrs)
{
print_spaces();
std::string attrs_str;
if (attrs) {
get_attrs_str(attrs, attrs_str);
}
std::string e(name);
std::transform(e.begin(), e.end(), e.begin(),
[this](char c) { return this->to_lower_underscore(c); });
if (ns) {
m_ss << "<" << e << attrs_str << " xmlns=\"" << ns << "\">";
} else {
m_ss << "<" << e << attrs_str << ">";
}
if (m_pretty)
m_ss << "\n";
m_sections.push_back(std::string(name));
}
void XMLFormatter::finish_pending_string()
{
if (!m_pending_string_name.empty()) {
m_ss << xml_stream_escaper(m_pending_string.str())
<< "</" << m_pending_string_name << ">";
m_pending_string_name.clear();
m_pending_string.str(std::string());
if (m_pretty) {
m_ss << "\n";
}
}
}
void XMLFormatter::print_spaces()
{
finish_pending_string();
if (m_pretty) {
std::string spaces(m_sections.size(), ' ');
m_ss << spaces;
}
}
char XMLFormatter::to_lower_underscore(char c) const
{
if (m_underscored && c == ' ') {
return '_';
} else if (m_lowercased) {
return std::tolower(c);
}
return c;
}
TableFormatter::TableFormatter(bool keyval) : m_keyval(keyval)
{
reset();
}
void TableFormatter::flush(std::ostream& os)
{
finish_pending_string();
std::vector<size_t> column_size = m_column_size;
std::vector<std::string> column_name = m_column_name;
std::set<int> need_header_set;
// auto-sizing columns
for (size_t i = 0; i < m_vec.size(); i++) {
for (size_t j = 0; j < m_vec[i].size(); j++) {
column_size.resize(m_vec[i].size());
column_name.resize(m_vec[i].size());
if (i > 0) {
if (m_vec[i - 1][j] != m_vec[i][j]) {
// changing row labels require to show the header
need_header_set.insert(i);
column_name[i] = m_vec[i][j].first;
}
} else {
column_name[i] = m_vec[i][j].first;
}
if (m_vec[i][j].second.length() > column_size[j])
column_size[j] = m_vec[i][j].second.length();
if (m_vec[i][j].first.length() > column_size[j])
column_size[j] = m_vec[i][j].first.length();
}
}
bool need_header = false;
if ((column_size.size() == m_column_size.size())) {
for (size_t i = 0; i < column_size.size(); i++) {
if (column_size[i] != m_column_size[i]) {
need_header = true;
break;
}
}
} else {
need_header = true;
}
if (need_header) {
// first row always needs a header if there wasn't one before
need_header_set.insert(0);
}
m_column_size = column_size;
for (size_t i = 0; i < m_vec.size(); i++) {
if (i == 0) {
if (need_header_set.count(i)) {
// print the header
if (!m_keyval) {
os << "+";
for (size_t j = 0; j < m_vec[i].size(); j++) {
for (size_t v = 0; v < m_column_size[j] + 3; v++)
os << "-";
os << "+";
}
os << "\n";
os << "|";
for (size_t j = 0; j < m_vec[i].size(); j++) {
os << fmt::format(" {:<{}}|",
m_vec[i][j].first, m_column_size[j] + 2);
}
os << "\n";
os << "+";
for (size_t j = 0; j < m_vec[i].size(); j++) {
for (size_t v = 0; v < m_column_size[j] + 3; v++)
os << "-";
os << "+";
}
os << "\n";
}
}
}
// print body
if (!m_keyval)
os << "|";
for (size_t j = 0; j < m_vec[i].size(); j++) {
if (!m_keyval)
os << " ";
if (m_keyval) {
os << "key::";
os << m_vec[i][j].first;
os << "=";
os << "\"";
os << m_vec[i][j].second;
os << "\" ";
} else {
os << fmt::format("{:<{}}|", m_vec[i][j].second, m_column_size[j] + 2);
}
}
os << "\n";
if (!m_keyval) {
if (i == (m_vec.size() - 1)) {
// print trailer
os << "+";
for (size_t j = 0; j < m_vec[i].size(); j++) {
for (size_t v = 0; v < m_column_size[j] + 3; v++)
os << "-";
os << "+";
}
os << "\n";
}
}
m_vec[i].clear();
}
m_vec.clear();
}
void TableFormatter::reset()
{
m_ss.clear();
m_ss.str("");
m_section_cnt.clear();
m_column_size.clear();
m_section_open = 0;
}
void TableFormatter::open_object_section(std::string_view name)
{
open_section_in_ns(name, NULL, NULL);
}
void TableFormatter::open_object_section_with_attrs(std::string_view name, const FormatterAttrs& attrs)
{
open_section_in_ns(name, NULL, NULL);
}
void TableFormatter::open_object_section_in_ns(std::string_view name, const char *ns)
{
open_section_in_ns(name, NULL, NULL);
}
void TableFormatter::open_array_section(std::string_view name)
{
open_section_in_ns(name, NULL, NULL);
}
void TableFormatter::open_array_section_with_attrs(std::string_view name, const FormatterAttrs& attrs)
{
open_section_in_ns(name, NULL, NULL);
}
void TableFormatter::open_array_section_in_ns(std::string_view name, const char *ns)
{
open_section_in_ns(name, NULL, NULL);
}
void TableFormatter::open_section_in_ns(std::string_view name, const char *ns, const FormatterAttrs *attrs)
{
m_section.push_back(std::string(name));
m_section_open++;
}
void TableFormatter::close_section()
{
//
m_section_open--;
if (m_section.size()) {
m_section_cnt[m_section.back()] = 0;
m_section.pop_back();
}
}
size_t TableFormatter::m_vec_index(std::string_view name)
{
std::string key(name);
size_t i = m_vec.size();
if (i)
i--;
// make sure there are vectors to push back key/val pairs
if (!m_vec.size())
m_vec.resize(1);
if (m_vec.size()) {
if (m_vec[i].size()) {
if (m_vec[i][0].first == key) {
// start a new column if a key is repeated
m_vec.resize(m_vec.size() + 1);
i++;
}
}
}
return i;
}
std::string TableFormatter::get_section_name(std::string_view name)
{
std::string t_name{name};
for (size_t i = 0; i < m_section.size(); i++) {
t_name.insert(0, ":");
t_name.insert(0, m_section[i]);
}
if (m_section_open) {
std::stringstream lss;
lss << t_name;
lss << "[";
lss << m_section_cnt[t_name]++;
lss << "]";
return lss.str();
} else {
return t_name;
}
}
template <class T>
void TableFormatter::add_value(std::string_view name, T val) {
finish_pending_string();
size_t i = m_vec_index(name);
m_ss.precision(std::numeric_limits<double>::max_digits10);
m_ss << val;
m_vec[i].push_back(std::make_pair(get_section_name(name), m_ss.str()));
m_ss.clear();
m_ss.str("");
}
void TableFormatter::dump_unsigned(std::string_view name, uint64_t u)
{
add_value(name, u);
}
void TableFormatter::dump_int(std::string_view name, int64_t s)
{
add_value(name, s);
}
void TableFormatter::dump_float(std::string_view name, double d)
{
add_value(name, d);
}
void TableFormatter::dump_string(std::string_view name, std::string_view s)
{
finish_pending_string();
size_t i = m_vec_index(name);
m_ss << s;
m_vec[i].push_back(std::make_pair(get_section_name(name), m_ss.str()));
m_ss.clear();
m_ss.str("");
}
void TableFormatter::dump_string_with_attrs(std::string_view name, std::string_view s, const FormatterAttrs& attrs)
{
finish_pending_string();
size_t i = m_vec_index(name);
std::string attrs_str;
get_attrs_str(&attrs, attrs_str);
m_ss << attrs_str << s;
m_vec[i].push_back(std::make_pair(get_section_name(name), m_ss.str()));
m_ss.clear();
m_ss.str("");
}
void TableFormatter::dump_format_va(std::string_view name,
const char *ns, bool quoted,
const char *fmt, va_list ap)
{
finish_pending_string();
char buf[LARGE_SIZE];
vsnprintf(buf, LARGE_SIZE, fmt, ap);
size_t i = m_vec_index(name);
if (ns) {
m_ss << ns << "." << buf;
} else
m_ss << buf;
m_vec[i].push_back(std::make_pair(get_section_name(name), m_ss.str()));
m_ss.clear();
m_ss.str("");
}
std::ostream& TableFormatter::dump_stream(std::string_view name)
{
finish_pending_string();
// we don't support this
m_pending_name = name;
return m_ss;
}
int TableFormatter::get_len() const
{
// we don't know the size until flush is called
return 0;
}
void TableFormatter::write_raw_data(const char *data) {
// not supported
}
void TableFormatter::get_attrs_str(const FormatterAttrs *attrs, std::string& attrs_str)
{
std::stringstream attrs_ss;
for (std::list<std::pair<std::string, std::string> >::const_iterator iter = attrs->attrs.begin();
iter != attrs->attrs.end(); ++iter) {
std::pair<std::string, std::string> p = *iter;
attrs_ss << " " << p.first << "=" << "\"" << p.second << "\"";
}
attrs_str = attrs_ss.str();
}
void TableFormatter::finish_pending_string()
{
if (m_pending_name.length()) {
std::string ss = m_ss.str();
m_ss.clear();
m_ss.str("");
std::string pending_name = m_pending_name;
m_pending_name = "";
dump_string(pending_name.c_str(), ss);
}
}
}
| 22,116 | 22.086639 | 121 | cc |
null | ceph-main/src/common/Formatter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_FORMATTER_H
#define CEPH_FORMATTER_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include <deque>
#include <list>
#include <memory>
#include <vector>
#include <stdarg.h>
#include <sstream>
#include <map>
namespace ceph {
struct FormatterAttrs {
std::list< std::pair<std::string, std::string> > attrs;
FormatterAttrs(const char *attr, ...);
};
class Formatter {
public:
class ObjectSection {
Formatter& formatter;
public:
ObjectSection(Formatter& f, std::string_view name) : formatter(f) {
formatter.open_object_section(name);
}
ObjectSection(Formatter& f, std::string_view name, const char *ns) : formatter(f) {
formatter.open_object_section_in_ns(name, ns);
}
~ObjectSection() {
formatter.close_section();
}
};
class ArraySection {
Formatter& formatter;
public:
ArraySection(Formatter& f, std::string_view name) : formatter(f) {
formatter.open_array_section(name);
}
ArraySection(Formatter& f, std::string_view name, const char *ns) : formatter(f) {
formatter.open_array_section_in_ns(name, ns);
}
~ArraySection() {
formatter.close_section();
}
};
static Formatter *create(std::string_view type,
std::string_view default_type,
std::string_view fallback);
static Formatter *create(std::string_view type,
std::string_view default_type) {
return create(type, default_type, "");
}
static Formatter *create(std::string_view type) {
return create(type, "json-pretty", "");
}
template <typename... Params>
static std::unique_ptr<Formatter> create_unique(Params &&...params)
{
return std::unique_ptr<Formatter>(
Formatter::create(std::forward<Params>(params)...));
}
Formatter();
virtual ~Formatter();
virtual void enable_line_break() = 0;
virtual void flush(std::ostream& os) = 0;
void flush(bufferlist &bl);
virtual void reset() = 0;
virtual void set_status(int status, const char* status_name) = 0;
virtual void output_header() = 0;
virtual void output_footer() = 0;
virtual void open_array_section(std::string_view name) = 0;
virtual void open_array_section_in_ns(std::string_view name, const char *ns) = 0;
virtual void open_object_section(std::string_view name) = 0;
virtual void open_object_section_in_ns(std::string_view name, const char *ns) = 0;
virtual void close_section() = 0;
virtual void dump_unsigned(std::string_view name, uint64_t u) = 0;
virtual void dump_int(std::string_view name, int64_t s) = 0;
virtual void dump_float(std::string_view name, double d) = 0;
virtual void dump_string(std::string_view name, std::string_view s) = 0;
virtual void dump_bool(std::string_view name, bool b)
{
dump_format_unquoted(name, "%s", (b ? "true" : "false"));
}
template<typename T>
void dump_object(std::string_view name, const T& foo) {
open_object_section(name);
foo.dump(this);
close_section();
}
virtual std::ostream& dump_stream(std::string_view name) = 0;
virtual void dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap) = 0;
virtual void dump_format(std::string_view name, const char *fmt, ...);
virtual void dump_format_ns(std::string_view name, const char *ns, const char *fmt, ...);
virtual void dump_format_unquoted(std::string_view name, const char *fmt, ...);
virtual int get_len() const = 0;
virtual void write_raw_data(const char *data) = 0;
/* with attrs */
virtual void open_array_section_with_attrs(std::string_view name, const FormatterAttrs& attrs)
{
open_array_section(name);
}
virtual void open_object_section_with_attrs(std::string_view name, const FormatterAttrs& attrs)
{
open_object_section(name);
}
virtual void dump_string_with_attrs(std::string_view name, std::string_view s, const FormatterAttrs& attrs)
{
dump_string(name, s);
}
virtual void *get_external_feature_handler(const std::string& feature) {
return nullptr;
}
virtual void write_bin_data(const char* buff, int buf_len);
};
class copyable_sstream : public std::stringstream {
public:
copyable_sstream() {}
copyable_sstream(const copyable_sstream& rhs) {
str(rhs.str());
}
copyable_sstream& operator=(const copyable_sstream& rhs) {
str(rhs.str());
return *this;
}
};
class JSONFormatter : public Formatter {
public:
explicit JSONFormatter(bool p = false);
void set_status(int status, const char* status_name) override {};
void output_header() override {};
void output_footer() override {};
void enable_line_break() override { m_line_break_enabled = true; }
void flush(std::ostream& os) override;
using Formatter::flush; // don't hide Formatter::flush(bufferlist &bl)
void reset() override;
void open_array_section(std::string_view name) override;
void open_array_section_in_ns(std::string_view name, const char *ns) override;
void open_object_section(std::string_view name) override;
void open_object_section_in_ns(std::string_view name, const char *ns) override;
void close_section() override;
void dump_unsigned(std::string_view name, uint64_t u) override;
void dump_int(std::string_view name, int64_t s) override;
void dump_float(std::string_view name, double d) override;
void dump_string(std::string_view name, std::string_view s) override;
std::ostream& dump_stream(std::string_view name) override;
void dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap) override;
int get_len() const override;
void write_raw_data(const char *data) override;
protected:
virtual bool handle_value(std::string_view name, std::string_view s, bool quoted) {
return false; /* is handling done? */
}
virtual bool handle_open_section(std::string_view name, const char *ns, bool is_array) {
return false; /* is handling done? */
}
virtual bool handle_close_section() {
return false; /* is handling done? */
}
int stack_size() { return m_stack.size(); }
private:
struct json_formatter_stack_entry_d {
int size;
bool is_array;
json_formatter_stack_entry_d() : size(0), is_array(false) { }
};
bool m_pretty;
void open_section(std::string_view name, const char *ns, bool is_array);
void print_quoted_string(std::string_view s);
void print_name(std::string_view name);
void print_comma(json_formatter_stack_entry_d& entry);
void finish_pending_string();
template <class T>
void add_value(std::string_view name, T val);
void add_value(std::string_view name, std::string_view val, bool quoted);
copyable_sstream m_ss;
copyable_sstream m_pending_string;
std::string m_pending_name;
std::list<json_formatter_stack_entry_d> m_stack;
bool m_is_pending_string;
bool m_line_break_enabled = false;
};
template <class T>
void add_value(std::string_view name, T val);
class XMLFormatter : public Formatter {
public:
static const char *XML_1_DTD;
XMLFormatter(bool pretty = false, bool lowercased = false, bool underscored = true);
void set_status(int status, const char* status_name) override {}
void output_header() override;
void output_footer() override;
void enable_line_break() override { m_line_break_enabled = true; }
void flush(std::ostream& os) override;
using Formatter::flush; // don't hide Formatter::flush(bufferlist &bl)
void reset() override;
void open_array_section(std::string_view name) override;
void open_array_section_in_ns(std::string_view name, const char *ns) override;
void open_object_section(std::string_view name) override;
void open_object_section_in_ns(std::string_view name, const char *ns) override;
void close_section() override;
void dump_unsigned(std::string_view name, uint64_t u) override;
void dump_int(std::string_view name, int64_t s) override;
void dump_float(std::string_view name, double d) override;
void dump_string(std::string_view name, std::string_view s) override;
std::ostream& dump_stream(std::string_view name) override;
void dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap) override;
int get_len() const override;
void write_raw_data(const char *data) override;
void write_bin_data(const char* buff, int len) override;
/* with attrs */
void open_array_section_with_attrs(std::string_view name, const FormatterAttrs& attrs) override;
void open_object_section_with_attrs(std::string_view name, const FormatterAttrs& attrs) override;
void dump_string_with_attrs(std::string_view name, std::string_view s, const FormatterAttrs& attrs) override;
protected:
void open_section_in_ns(std::string_view name, const char *ns, const FormatterAttrs *attrs);
void finish_pending_string();
void print_spaces();
void get_attrs_str(const FormatterAttrs *attrs, std::string& attrs_str);
char to_lower_underscore(char c) const;
std::stringstream m_ss, m_pending_string;
std::deque<std::string> m_sections;
const bool m_pretty;
const bool m_lowercased;
const bool m_underscored;
std::string m_pending_string_name;
bool m_header_done;
bool m_line_break_enabled = false;
private:
template <class T>
void add_value(std::string_view name, T val);
};
class TableFormatter : public Formatter {
public:
explicit TableFormatter(bool keyval = false);
void set_status(int status, const char* status_name) override {};
void output_header() override {};
void output_footer() override {};
void enable_line_break() override {};
void flush(std::ostream& os) override;
using Formatter::flush; // don't hide Formatter::flush(bufferlist &bl)
void reset() override;
void open_array_section(std::string_view name) override;
void open_array_section_in_ns(std::string_view name, const char *ns) override;
void open_object_section(std::string_view name) override;
void open_object_section_in_ns(std::string_view name, const char *ns) override;
void open_array_section_with_attrs(std::string_view name, const FormatterAttrs& attrs) override;
void open_object_section_with_attrs(std::string_view name, const FormatterAttrs& attrs) override;
void close_section() override;
void dump_unsigned(std::string_view name, uint64_t u) override;
void dump_int(std::string_view name, int64_t s) override;
void dump_float(std::string_view name, double d) override;
void dump_string(std::string_view name, std::string_view s) override;
void dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap) override;
void dump_string_with_attrs(std::string_view name, std::string_view s, const FormatterAttrs& attrs) override;
std::ostream& dump_stream(std::string_view name) override;
int get_len() const override;
void write_raw_data(const char *data) override;
void get_attrs_str(const FormatterAttrs *attrs, std::string& attrs_str);
private:
template <class T>
void add_value(std::string_view name, T val);
void open_section_in_ns(std::string_view name, const char *ns, const FormatterAttrs *attrs);
std::vector< std::vector<std::pair<std::string, std::string> > > m_vec;
std::stringstream m_ss;
size_t m_vec_index(std::string_view name);
std::string get_section_name(std::string_view name);
void finish_pending_string();
std::string m_pending_name;
bool m_keyval;
int m_section_open;
std::vector< std::string > m_section;
std::map<std::string, int> m_section_cnt;
std::vector<size_t> m_column_size;
std::vector< std::string > m_column_name;
};
std::string fixed_to_string(int64_t num, int scale);
std::string fixed_u_to_string(uint64_t num, int scale);
}
#endif
| 12,259 | 37.074534 | 117 | h |
null | ceph-main/src/common/Graylog.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Graylog.h"
#include "common/Formatter.h"
#include "common/LogEntry.h"
#include "log/Entry.h"
#include "log/SubsystemMap.h"
using std::cerr;
namespace ceph::logging {
Graylog::Graylog(const SubsystemMap * const s, const std::string &logger)
: m_subs(s),
m_logger(std::move(logger)),
m_ostream_compressed(std::stringstream::in |
std::stringstream::out |
std::stringstream::binary)
{
m_formatter = std::unique_ptr<Formatter>(Formatter::create("json"));
m_formatter_section = std::unique_ptr<Formatter>(Formatter::create("json"));
}
Graylog::Graylog(const std::string &logger)
: Graylog(nullptr, logger)
{}
Graylog::~Graylog()
{
}
void Graylog::set_destination(const std::string& host, int port)
{
try {
boost::asio::ip::udp::resolver resolver(m_io_service);
boost::asio::ip::udp::resolver::query query(host, std::to_string(port));
m_endpoint = *resolver.resolve(query);
m_log_dst_valid = true;
} catch (boost::system::system_error const& e) {
cerr << "Error resolving graylog destination: " << e.what() << std::endl;
m_log_dst_valid = false;
}
}
void Graylog::set_hostname(const std::string& host)
{
assert(!host.empty());
m_hostname = host;
}
void Graylog::set_fsid(const uuid_d& fsid)
{
std::vector<char> buf(40);
fsid.print(&buf[0]);
m_fsid = std::string(&buf[0]);
}
void Graylog::log_entry(const Entry& e)
{
if (m_log_dst_valid) {
auto s = e.strv();
m_formatter->open_object_section("");
m_formatter->dump_string("version", "1.1");
m_formatter->dump_string("host", m_hostname);
m_formatter->dump_string("short_message", s);
m_formatter->dump_string("_app", "ceph");
auto t = ceph::logging::log_clock::to_timeval(e.m_stamp);
m_formatter->dump_float("timestamp", t.tv_sec + (t.tv_usec / 1000000.0));
m_formatter->dump_unsigned("_thread", (uint64_t)e.m_thread);
m_formatter->dump_int("_level", e.m_prio);
if (m_subs != NULL)
m_formatter->dump_string("_subsys_name", m_subs->get_name(e.m_subsys));
m_formatter->dump_int("_subsys_id", e.m_subsys);
m_formatter->dump_string("_fsid", m_fsid);
m_formatter->dump_string("_logger", m_logger);
m_formatter->close_section();
m_ostream_compressed.clear();
m_ostream_compressed.str("");
m_ostream.reset();
m_ostream.push(m_compressor);
m_ostream.push(m_ostream_compressed);
m_formatter->flush(m_ostream);
m_ostream << std::endl;
m_ostream.reset();
try {
boost::asio::ip::udp::socket socket(m_io_service);
socket.open(m_endpoint.protocol());
socket.send_to(boost::asio::buffer(m_ostream_compressed.str()), m_endpoint);
} catch (boost::system::system_error const& e) {
cerr << "Error sending graylog message: " << e.what() << std::endl;
}
}
}
void Graylog::log_log_entry(LogEntry const * const e)
{
if (m_log_dst_valid) {
m_formatter->open_object_section("");
m_formatter->dump_string("version", "1.1");
m_formatter->dump_string("host", m_hostname);
m_formatter->dump_string("short_message", e->msg);
m_formatter->dump_float("timestamp", e->stamp.sec() + (e->stamp.usec() / 1000000.0));
m_formatter->dump_string("_app", "ceph");
m_formatter->dump_string("name", e->name.to_str());
m_formatter_section->open_object_section("rank");
e->rank.dump(m_formatter_section.get());
m_formatter_section->close_section();
m_formatter_section->open_object_section("addrs");
e->addrs.dump(m_formatter_section.get());
m_formatter_section->close_section();
m_ostream_section.clear();
m_ostream_section.str("");
m_formatter_section->flush(m_ostream_section);
m_formatter->dump_string("_who", m_ostream_section.str());
m_formatter->dump_int("_seq", e->seq);
m_formatter->dump_string("_prio", clog_type_to_string(e->prio));
m_formatter->dump_string("_channel", e->channel);
m_formatter->dump_string("_fsid", m_fsid);
m_formatter->dump_string("_logger", m_logger);
m_formatter->close_section();
m_ostream_compressed.clear();
m_ostream_compressed.str("");
m_ostream.reset();
m_ostream.push(m_compressor);
m_ostream.push(m_ostream_compressed);
m_formatter->flush(m_ostream);
m_ostream << std::endl;
m_ostream.reset();
try {
boost::asio::ip::udp::socket socket(m_io_service);
socket.open(m_endpoint.protocol());
socket.send_to(boost::asio::buffer(m_ostream_compressed.str()), m_endpoint);
} catch (boost::system::system_error const& e) {
cerr << "Error sending graylog message: " << e.what() << std::endl;
}
}
}
} // name ceph::logging
| 4,802 | 29.207547 | 89 | cc |
null | ceph-main/src/common/Graylog.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef __CEPH_LOG_GRAYLOG_H
#define __CEPH_LOG_GRAYLOG_H
#include <boost/asio.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/filter/zlib.hpp>
#include "include/ceph_assert.h" // boost clobbers this
struct uuid_d;
class LogEntry;
namespace ceph {
class Formatter;
namespace logging {
class Entry;
class SubsystemMap;
// Graylog logging backend: Convert log datastructures (LogEntry, Entry) to
// GELF (http://www.graylog2.org/resources/gelf/specification) and send it
// to a GELF UDP receiver
class Graylog
{
public:
/**
* Create Graylog with SubsystemMap. log_entry will resolve the subsystem
* id to string. Logging will not be ready until set_destination is called
* @param s SubsystemMap
* @param logger Value for key "_logger" in GELF
*/
Graylog(const SubsystemMap * const s, const std::string &logger);
/**
* Create Graylog without SubsystemMap. Logging will not be ready
* until set_destination is called
* @param logger Value for key "_logger" in GELF
*/
explicit Graylog(const std::string &logger);
virtual ~Graylog();
void set_hostname(const std::string& host);
void set_fsid(const uuid_d& fsid);
void set_destination(const std::string& host, int port);
void log_entry(const Entry& e);
void log_log_entry(LogEntry const * const e);
typedef std::shared_ptr<Graylog> Ref;
private:
SubsystemMap const * const m_subs;
bool m_log_dst_valid = false;
std::string m_hostname;
std::string m_fsid;
std::string m_logger;
boost::asio::ip::udp::endpoint m_endpoint;
boost::asio::io_service m_io_service;
std::unique_ptr<Formatter> m_formatter;
std::unique_ptr<Formatter> m_formatter_section;
std::stringstream m_ostream_section;
std::stringstream m_ostream_compressed;
boost::iostreams::filtering_ostream m_ostream;
boost::iostreams::zlib_compressor m_compressor;
};
}
}
#endif
| 2,008 | 22.916667 | 76 | h |
null | ceph-main/src/common/HBHandle.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
class HBHandle {
public:
virtual void reset_tp_timeout() = 0;
virtual void suspend_tp_timeout() = 0;
virtual ~HBHandle() {}
};
| 246 | 19.583333 | 70 | h |
null | ceph-main/src/common/HTMLFormatter.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#define LARGE_SIZE 1024
#include "HTMLFormatter.h"
#include "Formatter.h"
#include <sstream>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <string.h> // for strdup
#include "common/escape.h"
// -----------------------
namespace ceph {
HTMLFormatter::HTMLFormatter(bool pretty)
: XMLFormatter(pretty), m_status(0), m_status_name(NULL)
{
}
HTMLFormatter::~HTMLFormatter()
{
if (m_status_name) {
free((void*)m_status_name);
m_status_name = NULL;
}
}
void HTMLFormatter::reset()
{
XMLFormatter::reset();
m_header_done = false;
m_status = 0;
if (m_status_name) {
free((void*)m_status_name);
m_status_name = NULL;
}
}
void HTMLFormatter::set_status(int status, const char* status_name)
{
m_status = status;
if (status_name) {
if (m_status_name) {
free((void*)m_status_name);
}
m_status_name = strdup(status_name);
}
};
void HTMLFormatter::output_header() {
if (!m_header_done) {
m_header_done = true;
char buf[16];
snprintf(buf, sizeof(buf), "%d", m_status);
std::string status_line(buf);
if (m_status_name) {
status_line += " ";
status_line += m_status_name;
}
open_object_section("html");
print_spaces();
m_ss << "<head><title>" << status_line << "</title></head>";
if (m_pretty)
m_ss << "\n";
open_object_section("body");
print_spaces();
m_ss << "<h1>" << status_line << "</h1>";
if (m_pretty)
m_ss << "\n";
open_object_section("ul");
}
}
template <typename T>
void HTMLFormatter::dump_template(std::string_view name, T arg)
{
print_spaces();
m_ss << "<li>" << name << ": " << arg << "</li>";
if (m_pretty)
m_ss << "\n";
}
void HTMLFormatter::dump_unsigned(std::string_view name, uint64_t u)
{
dump_template(name, u);
}
void HTMLFormatter::dump_int(std::string_view name, int64_t u)
{
dump_template(name, u);
}
void HTMLFormatter::dump_float(std::string_view name, double d)
{
dump_template(name, d);
}
void HTMLFormatter::dump_string(std::string_view name, std::string_view s)
{
dump_template(name, xml_stream_escaper(s));
}
void HTMLFormatter::dump_string_with_attrs(std::string_view name, std::string_view s, const FormatterAttrs& attrs)
{
std::string e(name);
std::string attrs_str;
get_attrs_str(&attrs, attrs_str);
print_spaces();
m_ss << "<li>" << e << ": " << xml_stream_escaper(s) << attrs_str << "</li>";
if (m_pretty)
m_ss << "\n";
}
std::ostream& HTMLFormatter::dump_stream(std::string_view name)
{
print_spaces();
m_pending_string_name = "li";
m_ss << "<li>" << name << ": ";
return m_pending_string;
}
void HTMLFormatter::dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap)
{
char buf[LARGE_SIZE];
size_t len = vsnprintf(buf, LARGE_SIZE, fmt, ap);
std::string e(name);
print_spaces();
if (ns) {
m_ss << "<li xmlns=\"" << ns << "\">" << e << ": "
<< xml_stream_escaper(std::string_view(buf, len)) << "</li>";
} else {
m_ss << "<li>" << e << ": "
<< xml_stream_escaper(std::string_view(buf, len)) << "</li>";
}
if (m_pretty)
m_ss << "\n";
}
} // namespace ceph
| 3,603 | 21.666667 | 115 | cc |
null | ceph-main/src/common/HTMLFormatter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_HTML_FORMATTER_H
#define CEPH_HTML_FORMATTER_H
#include "Formatter.h"
namespace ceph {
class HTMLFormatter : public XMLFormatter {
public:
explicit HTMLFormatter(bool pretty = false);
~HTMLFormatter() override;
void reset() override;
void set_status(int status, const char* status_name) override;
void output_header() override;
void dump_unsigned(std::string_view name, uint64_t u) override;
void dump_int(std::string_view name, int64_t u) override;
void dump_float(std::string_view name, double d) override;
void dump_string(std::string_view name, std::string_view s) override;
std::ostream& dump_stream(std::string_view name) override;
void dump_format_va(std::string_view name, const char *ns, bool quoted, const char *fmt, va_list ap) override;
/* with attrs */
void dump_string_with_attrs(std::string_view name, std::string_view s, const FormatterAttrs& attrs) override;
private:
template <typename T> void dump_template(std::string_view name, T arg);
int m_status;
const char* m_status_name;
};
}
#endif
| 1,201 | 31.486486 | 114 | h |
null | ceph-main/src/common/HeartbeatMap.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <utime.h>
#include <signal.h>
#include "HeartbeatMap.h"
#include "ceph_context.h"
#include "common/errno.h"
#include "common/valgrind.h"
#include "debug.h"
#define dout_subsys ceph_subsys_heartbeatmap
#undef dout_prefix
#define dout_prefix *_dout << "heartbeat_map "
using std::chrono::duration_cast;
using std::chrono::seconds;
using std::string;
namespace ceph {
HeartbeatMap::HeartbeatMap(CephContext *cct)
: m_cct(cct),
m_unhealthy_workers(0),
m_total_workers(0)
{
}
HeartbeatMap::~HeartbeatMap()
{
ceph_assert(m_workers.empty());
}
heartbeat_handle_d *HeartbeatMap::add_worker(const string& name, pthread_t thread_id)
{
std::unique_lock locker{m_rwlock};
ldout(m_cct, 10) << "add_worker '" << name << "'" << dendl;
heartbeat_handle_d *h = new heartbeat_handle_d(name);
ANNOTATE_BENIGN_RACE_SIZED(&h->timeout, sizeof(h->timeout),
"heartbeat_handle_d timeout");
ANNOTATE_BENIGN_RACE_SIZED(&h->suicide_timeout, sizeof(h->suicide_timeout),
"heartbeat_handle_d suicide_timeout");
m_workers.push_front(h);
h->list_item = m_workers.begin();
h->thread_id = thread_id;
return h;
}
void HeartbeatMap::remove_worker(const heartbeat_handle_d *h)
{
std::unique_lock locker{m_rwlock};
ldout(m_cct, 10) << "remove_worker '" << h->name << "'" << dendl;
m_workers.erase(h->list_item);
delete h;
}
bool HeartbeatMap::_check(const heartbeat_handle_d *h, const char *who,
ceph::coarse_mono_time now)
{
bool healthy = true;
if (auto was = h->timeout.load(std::memory_order_relaxed);
!clock::is_zero(was) && was < now) {
ldout(m_cct, 1) << who << " '" << h->name << "'"
<< " had timed out after " << h->grace << dendl;
healthy = false;
}
if (auto was = h->suicide_timeout.load(std::memory_order_relaxed);
!clock::is_zero(was) && was < now) {
ldout(m_cct, 1) << who << " '" << h->name << "'"
<< " had suicide timed out after " << h->suicide_grace << dendl;
pthread_kill(h->thread_id, SIGABRT);
sleep(1);
ceph_abort_msg("hit suicide timeout");
}
return healthy;
}
void HeartbeatMap::reset_timeout(heartbeat_handle_d *h,
ceph::timespan grace,
ceph::timespan suicide_grace)
{
ldout(m_cct, 20) << "reset_timeout '" << h->name << "' grace " << grace
<< " suicide " << suicide_grace << dendl;
const auto now = clock::now();
_check(h, "reset_timeout", now);
h->timeout.store(now + grace, std::memory_order_relaxed);
h->grace = grace;
if (suicide_grace > ceph::timespan::zero()) {
h->suicide_timeout.store(now + suicide_grace, std::memory_order_relaxed);
} else {
h->suicide_timeout.store(clock::zero(), std::memory_order_relaxed);
}
h->suicide_grace = suicide_grace;
}
void HeartbeatMap::clear_timeout(heartbeat_handle_d *h)
{
ldout(m_cct, 20) << "clear_timeout '" << h->name << "'" << dendl;
auto now = clock::now();
_check(h, "clear_timeout", now);
h->timeout.store(clock::zero(), std::memory_order_relaxed);
h->suicide_timeout.store(clock::zero(), std::memory_order_relaxed);
}
bool HeartbeatMap::is_healthy()
{
int unhealthy = 0;
int total = 0;
m_rwlock.lock_shared();
auto now = ceph::coarse_mono_clock::now();
if (m_cct->_conf->heartbeat_inject_failure) {
ldout(m_cct, 0) << "is_healthy injecting failure for next " << m_cct->_conf->heartbeat_inject_failure << " seconds" << dendl;
m_inject_unhealthy_until = now + std::chrono::seconds(m_cct->_conf->heartbeat_inject_failure);
m_cct->_conf.set_val("heartbeat_inject_failure", "0");
}
bool healthy = true;
if (now < m_inject_unhealthy_until) {
auto sec = std::chrono::duration_cast<std::chrono::seconds>(m_inject_unhealthy_until - now).count();
ldout(m_cct, 0) << "is_healthy = false, injected failure for next "
<< sec << " seconds" << dendl;
healthy = false;
}
for (auto p = m_workers.begin();
p != m_workers.end();
++p) {
heartbeat_handle_d *h = *p;
if (!_check(h, "is_healthy", now)) {
healthy = false;
unhealthy++;
}
total++;
}
m_rwlock.unlock_shared();
m_unhealthy_workers = unhealthy;
m_total_workers = total;
ldout(m_cct, 20) << "is_healthy = " << (healthy ? "healthy" : "NOT HEALTHY")
<< ", total workers: " << total << ", number of unhealthy: " << unhealthy << dendl;
return healthy;
}
int HeartbeatMap::get_unhealthy_workers() const
{
return m_unhealthy_workers;
}
int HeartbeatMap::get_total_workers() const
{
return m_total_workers;
}
void HeartbeatMap::check_touch_file()
{
string path = m_cct->_conf->heartbeat_file;
if (path.length() && is_healthy()) {
int fd = ::open(path.c_str(), O_WRONLY|O_CREAT|O_CLOEXEC, 0644);
if (fd >= 0) {
::utime(path.c_str(), NULL);
::close(fd);
} else {
ldout(m_cct, 0) << "unable to touch " << path << ": "
<< cpp_strerror(errno) << dendl;
}
}
}
}
| 5,379 | 28.081081 | 129 | cc |
null | ceph-main/src/common/HeartbeatMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_HEARTBEATMAP_H
#define CEPH_HEARTBEATMAP_H
#include <list>
#include <atomic>
#include <string>
#include <pthread.h>
#include "common/ceph_time.h"
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
namespace ceph {
/*
* HeartbeatMap -
*
* Maintain a set of handles for internal subsystems to periodically
* check in with a health check and timeout. Each user can register
* and get a handle they can use to set or reset a timeout.
*
* A simple is_healthy() method checks for any users who are not within
* their grace period for a heartbeat.
*/
struct heartbeat_handle_d {
const std::string name;
pthread_t thread_id = 0;
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
std::atomic<time> timeout = clock::zero();
std::atomic<time> suicide_timeout = clock::zero();
ceph::timespan grace = ceph::timespan::zero();
ceph::timespan suicide_grace = ceph::timespan::zero();
std::list<heartbeat_handle_d*>::iterator list_item;
explicit heartbeat_handle_d(const std::string& n)
: name(n)
{ }
};
class HeartbeatMap {
public:
// register/unregister
heartbeat_handle_d *add_worker(const std::string& name, pthread_t thread_id);
void remove_worker(const heartbeat_handle_d *h);
// reset the timeout so that it expects another touch within grace amount of time
void reset_timeout(heartbeat_handle_d *h,
ceph::timespan grace,
ceph::timespan suicide_grace);
// clear the timeout so that it's not checked on
void clear_timeout(heartbeat_handle_d *h);
// return false if any of the timeouts are currently expired.
bool is_healthy();
// touch cct->_conf->heartbeat_file if is_healthy()
void check_touch_file();
// get the number of unhealthy workers
int get_unhealthy_workers() const;
// get the number of total workers
int get_total_workers() const;
explicit HeartbeatMap(CephContext *cct);
~HeartbeatMap();
private:
using clock = ceph::coarse_mono_clock;
CephContext *m_cct;
ceph::shared_mutex m_rwlock =
ceph::make_shared_mutex("HeartbeatMap::m_rwlock");
clock::time_point m_inject_unhealthy_until;
std::list<heartbeat_handle_d*> m_workers;
std::atomic<unsigned> m_unhealthy_workers = { 0 };
std::atomic<unsigned> m_total_workers = { 0 };
bool _check(const heartbeat_handle_d *h, const char *who,
ceph::coarse_mono_time now);
};
}
#endif
| 2,828 | 27.29 | 83 | h |
null | ceph-main/src/common/Initialize.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* Copyright (c) 2011 Stanford University
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef CEPH_INITIALIZE_H
#define CEPH_INITIALIZE_H
/**
* This class is used to manage once-only initialization that should occur
* before main() is invoked, such as the creation of static variables. It
* also provides a mechanism for handling dependencies (where one class
* needs to perform its once-only initialization before another).
*
* The simplest way to use an Initialize object is to define a static
* initialization method for a class, say Foo::init(). Then, declare
* a static Initialize object in the class:
* "static Initialize initializer(Foo::init);".
* The result is that Foo::init will be invoked when the object is
* constructed (before main() is invoked). Foo::init can create static
* objects and perform any other once-only initialization needed by the
* class. Furthermore, if some other class needs to ensure that Foo has
* been initialized (e.g. as part of its own initialization) it can invoke
* Foo::init directly (Foo::init should contain an internal guard so that
* it only performs its functions once, even if invoked several times).
*
* There is also a second form of constructor for Initialize that causes a
* new object to be dynamically allocated and assigned to a pointer, instead
* of invoking a function. This form allows for the creation of static objects
* that are never destructed (thereby avoiding issues with the order of
* destruction).
*/
class Initialize {
public:
/**
* This form of constructor causes its function argument to be invoked
* when the object is constructed. When used with a static Initialize
* object, this will cause \p func to run before main() runs, so that
* \p func can perform once-only initialization.
*
* \param func
* This function is invoked with no arguments when the object is
* constructed. Typically the function will create static
* objects and/or invoke other initialization functions. The
* function should normally contain an internal guard so that it
* only performs its initialization the first time it is invoked.
*/
explicit Initialize(void (*func)()) {
(*func)();
}
/**
* This form of constructor causes a new object of a particular class
* to be constructed with a no-argument constructor and assigned to a
* given pointer. This form is typically used with a static Initialize
* object: the result is that the object will be created and assigned
* to the pointer before main() runs.
*
* \param p
* Pointer to an object of any type. If the pointer is NULL then
* it is replaced with a pointer to a newly allocated object of
* the given type.
*/
template<typename T>
explicit Initialize(T*& p) {
if (p == NULL) {
p = new T;
}
}
};
#endif // CEPH_INITIALIZE_H
| 4,046 | 40.721649 | 78 | h |
null | ceph-main/src/common/Journald.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Journald.h"
#include <endian.h>
#include <fcntl.h>
#include <iterator>
#include <memory>
#include <string>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <syslog.h>
#include <unistd.h>
#include <fmt/format.h>
#include <fmt/ostream.h>
#include "include/ceph_assert.h"
#include "common/LogEntry.h"
#include "log/Entry.h"
#include "log/SubsystemMap.h"
#include "msg/msg_fmt.h"
namespace ceph::logging {
namespace {
const struct sockaddr_un sockaddr = {
AF_UNIX,
"/run/systemd/journal/socket",
};
ssize_t sendmsg_fd(int transport_fd, int fd)
{
constexpr size_t control_len = CMSG_LEN(sizeof(int));
char control[control_len];
struct msghdr mh = {
(struct sockaddr*)&sockaddr, // msg_name
sizeof(sockaddr), // msg_namelen
nullptr, // msg_iov
0, // msg_iovlen
&control, // msg_control
control_len, // msg_controllen
};
ceph_assert(transport_fd >= 0);
struct cmsghdr *cmsg = CMSG_FIRSTHDR(&mh);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
*reinterpret_cast<int *>(CMSG_DATA(cmsg)) = fd;
return sendmsg(transport_fd, &mh, MSG_NOSIGNAL);
}
char map_prio(short ceph_prio)
{
if (ceph_prio < 0)
return LOG_ERR;
if (ceph_prio == 0)
return LOG_WARNING;
if (ceph_prio < 5)
return LOG_NOTICE;
if (ceph_prio < 10)
return LOG_INFO;
return LOG_DEBUG;
}
}
namespace detail {
class EntryEncoderBase {
public:
EntryEncoderBase():
m_msg_vec {
{}, {}, {}, { (char *)"\n", 1 },
}
{
std::string id = program_invocation_short_name;
for (auto& c : id) {
if (c == '\n')
c = '_';
}
static_segment = "SYSLOG_IDENTIFIER=" + id + "\n";
m_msg_vec[0].iov_base = static_segment.data();
m_msg_vec[0].iov_len = static_segment.size();
}
constexpr struct iovec *iovec() { return this->m_msg_vec; }
constexpr std::size_t iovec_len()
{
return sizeof(m_msg_vec) / sizeof(m_msg_vec[0]);
}
private:
struct iovec m_msg_vec[4];
std::string static_segment;
protected:
fmt::memory_buffer meta_buf;
struct iovec &meta_vec() { return m_msg_vec[1]; }
struct iovec &msg_vec() { return m_msg_vec[2]; }
};
class EntryEncoder : public EntryEncoderBase {
public:
void encode(const Entry& e, const SubsystemMap *s)
{
meta_buf.clear();
fmt::format_to(std::back_inserter(meta_buf),
R"(PRIORITY={:d}
CEPH_SUBSYS={}
TIMESTAMP={}
CEPH_PRIO={}
THREAD={:016x}
MESSAGE
)",
map_prio(e.m_prio),
s->get_name(e.m_subsys),
e.m_stamp.time_since_epoch().count().count,
e.m_prio,
e.m_thread);
uint64_t msg_len = htole64(e.size());
meta_buf.resize(meta_buf.size() + sizeof(msg_len));
*(reinterpret_cast<uint64_t*>(meta_buf.end()) - 1) = htole64(e.size());
meta_vec().iov_base = meta_buf.data();
meta_vec().iov_len = meta_buf.size();
msg_vec().iov_base = (void *)e.strv().data();
msg_vec().iov_len = e.size();
}
};
class LogEntryEncoder : public EntryEncoderBase {
public:
void encode(const LogEntry& le)
{
meta_buf.clear();
fmt::format_to(std::back_inserter(meta_buf),
R"(PRIORITY={:d}
TIMESTAMP={}
CEPH_NAME={}
CEPH_RANK={}
CEPH_SEQ={}
CEPH_CHANNEL={}
MESSAGE
)",
clog_type_to_syslog_level(le.prio),
le.stamp.to_nsec(),
le.name.to_str(),
le.rank,
le.seq,
le.channel);
uint64_t msg_len = htole64(le.msg.size());
meta_buf.resize(meta_buf.size() + sizeof(msg_len));
*(reinterpret_cast<uint64_t*>(meta_buf.end()) - 1) = htole64(le.msg.size());
meta_vec().iov_base = meta_buf.data();
meta_vec().iov_len = meta_buf.size();
msg_vec().iov_base = (void *)le.msg.data();
msg_vec().iov_len = le.msg.size();
}
};
enum class JournaldClient::MemFileMode {
MEMFD_CREATE,
OPEN_TMPFILE,
OPEN_UNLINK,
};
constexpr const char *mem_file_dir = "/dev/shm";
void JournaldClient::detect_mem_file_mode()
{
int memfd = memfd_create("ceph-journald", MFD_ALLOW_SEALING | MFD_CLOEXEC);
if (memfd >= 0) {
mem_file_mode = MemFileMode::MEMFD_CREATE;
close(memfd);
return;
}
memfd = open(mem_file_dir, O_TMPFILE | O_EXCL | O_CLOEXEC, S_IRUSR | S_IWUSR);
if (memfd >= 0) {
mem_file_mode = MemFileMode::OPEN_TMPFILE;
close(memfd);
return;
}
mem_file_mode = MemFileMode::OPEN_UNLINK;
}
int JournaldClient::open_mem_file()
{
switch (mem_file_mode) {
case MemFileMode::MEMFD_CREATE:
return memfd_create("ceph-journald", MFD_ALLOW_SEALING | MFD_CLOEXEC);
case MemFileMode::OPEN_TMPFILE:
return open(mem_file_dir, O_TMPFILE | O_EXCL | O_CLOEXEC, S_IRUSR | S_IWUSR);
case MemFileMode::OPEN_UNLINK:
char mem_file_template[] = "/dev/shm/ceph-journald-XXXXXX";
int fd = mkostemp(mem_file_template, O_CLOEXEC);
unlink(mem_file_template);
return fd;
}
ceph_abort("Unexpected mem_file_mode");
}
JournaldClient::JournaldClient() :
m_msghdr({
(struct sockaddr*)&sockaddr, // msg_name
sizeof(sockaddr), // msg_namelen
})
{
fd = socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, 0);
ceph_assertf(fd > 0, "socket creation failed: %s", strerror(errno));
int sendbuf = 2 * 1024 * 1024;
setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sendbuf, sizeof(sendbuf));
detect_mem_file_mode();
}
JournaldClient::~JournaldClient()
{
close(fd);
}
int JournaldClient::send()
{
int ret = sendmsg(fd, &m_msghdr, MSG_NOSIGNAL);
if (ret >= 0)
return 0;
/* Fail silently if the journal is not available */
if (errno == ENOENT)
return -1;
if (errno != EMSGSIZE && errno != ENOBUFS) {
std::cerr << "Failed to send log to journald: " << strerror(errno) << std::endl;
return -1;
}
/* Message doesn't fit... Let's dump the data in a memfd and
* just pass a file descriptor of it to the other side.
*/
int buffer_fd = open_mem_file();
if (buffer_fd < 0) {
std::cerr << "Failed to open buffer_fd while sending log to journald: " << strerror(errno) << std::endl;
return -1;
}
ret = writev(buffer_fd, m_msghdr.msg_iov, m_msghdr.msg_iovlen);
if (ret < 0) {
std::cerr << "Failed to write to buffer_fd while sending log to journald: " << strerror(errno) << std::endl;
goto err_close_buffer_fd;
}
if (mem_file_mode == MemFileMode::MEMFD_CREATE) {
ret = fcntl(buffer_fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL);
if (ret) {
std::cerr << "Failed to seal buffer_fd while sending log to journald: " << strerror(errno) << std::endl;
goto err_close_buffer_fd;
}
}
ret = sendmsg_fd(fd, buffer_fd);
if (ret < 0) {
/* Fail silently if the journal is not available */
if (errno == ENOENT)
goto err_close_buffer_fd;
std::cerr << "Failed to send fd while sending log to journald: " << strerror(errno) << std::endl;
goto err_close_buffer_fd;
}
close(buffer_fd);
return 0;
err_close_buffer_fd:
close(buffer_fd);
return -1;
}
} // namespace ceph::logging::detail
JournaldLogger::JournaldLogger(const SubsystemMap *s) :
m_entry_encoder(std::make_unique<detail::EntryEncoder>()),
m_subs(s)
{
client.m_msghdr.msg_iov = m_entry_encoder->iovec();
client.m_msghdr.msg_iovlen = m_entry_encoder->iovec_len();
}
JournaldLogger::~JournaldLogger() = default;
int JournaldLogger::log_entry(const Entry& e)
{
m_entry_encoder->encode(e, m_subs);
return client.send();
}
JournaldClusterLogger::JournaldClusterLogger() :
m_log_entry_encoder(std::make_unique<detail::LogEntryEncoder>())
{
client.m_msghdr.msg_iov = m_log_entry_encoder->iovec();
client.m_msghdr.msg_iovlen = m_log_entry_encoder->iovec_len();
}
JournaldClusterLogger::~JournaldClusterLogger() = default;
int JournaldClusterLogger::log_log_entry(const LogEntry &le)
{
m_log_entry_encoder->encode(le);
return client.send();
}
}
| 8,058 | 24.184375 | 112 | cc |
null | ceph-main/src/common/Journald.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_COMMON_JOURNALD_H
#define CEPH_COMMON_JOURNALD_H
#include "acconfig.h"
#include <memory>
#include <sys/types.h>
#include <sys/socket.h>
struct LogEntry;
namespace ceph::logging {
class Entry;
class SubsystemMap;
#ifdef WITH_SYSTEMD
namespace detail {
class EntryEncoder;
class LogEntryEncoder;
class JournaldClient {
public:
JournaldClient();
~JournaldClient();
int send();
struct msghdr m_msghdr;
private:
int fd;
enum class MemFileMode;
MemFileMode mem_file_mode;
void detect_mem_file_mode();
int open_mem_file();
};
}
/**
* Logger to send local logs to journald
*
* local logs means @code dout(0) << ... @endcode and similars
*
* @see JournaldClusterLogger
*/
class JournaldLogger {
public:
JournaldLogger(const SubsystemMap *s);
~JournaldLogger();
/**
* @returns 0 if log entry is successfully sent, -1 otherwise.
*/
int log_entry(const Entry &e);
private:
detail::JournaldClient client;
std::unique_ptr<detail::EntryEncoder> m_entry_encoder;
const SubsystemMap * m_subs;
};
/**
* Logger to send cluster log recieved by MON to journald
*
* @see JournaldLogger
*/
class JournaldClusterLogger {
public:
JournaldClusterLogger();
~JournaldClusterLogger();
/**
* @returns 0 if log entry is successfully sent, -1 otherwise.
*/
int log_log_entry(const LogEntry &le);
private:
detail::JournaldClient client;
std::unique_ptr<detail::LogEntryEncoder> m_log_entry_encoder;
};
#else // WITH_SYSTEMD
class JournaldLogger {
public:
JournaldLogger(const SubsystemMap *) {}
int log_entry(const Entry &) {
return 0;
}
};
class JournaldClusterLogger {
public:
int log_log_entry(const LogEntry &le) {
return 0;
}
};
#endif // WITH_SYSTEMD
} // ceph::logging
#endif
| 1,881 | 15.954955 | 70 | h |
null | ceph-main/src/common/LRUSet.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <functional>
#include <boost/intrusive/list.hpp>
#include <boost/intrusive/unordered_set.hpp>
#include "include/encoding.h"
/// Combination of an LRU with fast hash-based membership lookup
template<class T, int NUM_BUCKETS=128>
class LRUSet {
/// internal node
struct Node
: boost::intrusive::unordered_set_base_hook<> {
// actual payload
T value;
// for the lru
boost::intrusive::list_member_hook<> lru_item;
Node(const T& v) : value(v) {}
friend std::size_t hash_value(const Node &node) {
return std::hash<T>{}(node.value);
}
friend bool operator<(const Node &a, const Node &b) {
return a.value < b.value;
}
friend bool operator>(const Node &a, const Node &b) {
return a.value > b.value;
}
friend bool operator==(const Node &a, const Node &b) {
return a.value == b.value;
}
};
struct NodeDeleteDisposer {
void operator()(Node *n) { delete n; }
};
// lru
boost::intrusive::list<
Node,
boost::intrusive::member_hook<Node,
boost::intrusive::list_member_hook<>,
&Node::lru_item>
> lru;
// hash-based set
typename boost::intrusive::unordered_set<Node>::bucket_type base_buckets[NUM_BUCKETS];
boost::intrusive::unordered_set<Node> set;
public:
LRUSet()
: set(typename boost::intrusive::unordered_set<Node>::bucket_traits(base_buckets,
NUM_BUCKETS))
{}
~LRUSet() {
clear();
}
LRUSet(const LRUSet& other)
: set(typename boost::intrusive::unordered_set<Node>::bucket_traits(base_buckets,
NUM_BUCKETS)) {
for (auto & i : other.lru) {
insert(i.value);
}
}
const LRUSet& operator=(const LRUSet& other) {
clear();
for (auto& i : other.lru) {
insert(i.value);
}
return *this;
}
size_t size() const {
return set.size();
}
bool empty() const {
return set.empty();
}
bool contains(const T& item) const {
return set.count(item) > 0;
}
void clear() {
prune(0);
}
void insert(const T& item) {
erase(item);
Node *n = new Node(item);
lru.push_back(*n);
set.insert(*n);
}
bool erase(const T& item) {
auto p = set.find(item);
if (p == set.end()) {
return false;
}
lru.erase(lru.iterator_to(*p));
set.erase_and_dispose(p, NodeDeleteDisposer());
return true;
}
void prune(size_t max) {
while (set.size() > max) {
auto p = lru.begin();
set.erase(*p);
lru.erase_and_dispose(p, NodeDeleteDisposer());
}
}
void encode(bufferlist& bl) const {
using ceph::encode;
ENCODE_START(1, 1, bl);
uint32_t n = set.size();
encode(n, bl);
auto p = set.begin();
while (n--) {
encode(p->value, bl);
++p;
}
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& p) {
using ceph::decode;
DECODE_START(1, p);
uint32_t n;
decode(n, p);
while (n--) {
T v;
decode(v, p);
insert(v);
}
DECODE_FINISH(p);
}
};
| 3,133 | 20.465753 | 88 | h |
null | ceph-main/src/common/LogClient.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/LogClient.h"
#include "include/str_map.h"
#include "messages/MLog.h"
#include "messages/MLogAck.h"
#include "msg/Messenger.h"
#include "mon/MonMap.h"
#include "common/Graylog.h"
#define dout_subsys ceph_subsys_monc
using std::map;
using std::ostream;
using std::ostringstream;
using std::string;
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
static ostream& _prefix(std::ostream *_dout, LogClient *logc) {
return *_dout << "log_client ";
}
static ostream& _prefix(std::ostream *_dout, LogChannel *lc) {
return *_dout << "log_channel(" << lc->get_log_channel() << ") ";
}
LogChannel::LogChannel(CephContext *cct, LogClient *lc, const string &channel)
: cct(cct), parent(lc),
log_channel(channel), log_to_syslog(false), log_to_monitors(false)
{
}
LogChannel::LogChannel(CephContext *cct, LogClient *lc,
const string &channel, const string &facility,
const string &prio)
: cct(cct), parent(lc),
log_channel(channel), log_prio(prio), syslog_facility(facility),
log_to_syslog(false), log_to_monitors(false)
{
}
LogClient::LogClient(CephContext *cct, Messenger *m, MonMap *mm,
enum logclient_flag_t flags)
: cct(cct), messenger(m), monmap(mm), is_mon(flags & FLAG_MON),
last_log_sent(0), last_log(0)
{
}
void LogChannel::set_log_to_monitors(bool v)
{
if (log_to_monitors != v) {
parent->reset();
log_to_monitors = v;
}
}
void LogChannel::update_config(const clog_targets_conf_t& conf_strings)
{
ldout(cct, 20) << __func__ << " log_to_monitors " << conf_strings.log_to_monitors
<< " log_to_syslog " << conf_strings.log_to_syslog
<< " log_channels " << conf_strings.log_channels
<< " log_prios " << conf_strings.log_prios
<< dendl;
bool to_monitors = (conf_strings.log_to_monitors == "true");
bool to_syslog = (conf_strings.log_to_syslog == "true");
bool to_graylog = (conf_strings.log_to_graylog == "true");
auto graylog_port = atoi(conf_strings.log_to_graylog_port.c_str());
set_log_to_monitors(to_monitors);
set_log_to_syslog(to_syslog);
set_syslog_facility(conf_strings.log_channels);
set_log_prio(conf_strings.log_prios);
if (to_graylog && !graylog) { /* should but isn't */
graylog = std::make_shared<ceph::logging::Graylog>("clog");
} else if (!to_graylog && graylog) { /* shouldn't but is */
graylog.reset();
}
if (to_graylog && graylog) {
graylog->set_fsid(conf_strings.fsid);
graylog->set_hostname(conf_strings.host);
}
if (graylog && !conf_strings.log_to_graylog_host.empty() && (graylog_port != 0)) {
graylog->set_destination(conf_strings.log_to_graylog_host, graylog_port);
}
ldout(cct, 10) << __func__
<< " to_monitors: " << (to_monitors ? "true" : "false")
<< " to_syslog: " << (to_syslog ? "true" : "false")
<< " syslog_facility: " << conf_strings.log_channels
<< " prio: " << conf_strings.log_prios
<< " to_graylog: " << (to_graylog ? "true" : "false")
<< " graylog_host: " << conf_strings.log_to_graylog_host
<< " graylog_port: " << graylog_port
<< ")" << dendl;
}
clog_targets_conf_t LogChannel::parse_client_options(CephContext* conf_cct)
{
auto parsed_options = parse_log_client_options(conf_cct);
update_config(parsed_options);
return parsed_options;
}
clog_targets_conf_t LogChannel::parse_log_client_options(CephContext* cct)
{
clog_targets_conf_t targets;
targets.log_to_monitors =
get_value_via_strmap(cct->_conf.get_val<string>("clog_to_monitors"),
log_channel, CLOG_CONFIG_DEFAULT_KEY);
targets.log_to_syslog =
get_value_via_strmap(cct->_conf.get_val<string>("clog_to_syslog"),
log_channel, CLOG_CONFIG_DEFAULT_KEY);
targets.log_channels =
get_value_via_strmap(cct->_conf.get_val<string>("clog_to_syslog_facility"),
log_channel, CLOG_CONFIG_DEFAULT_KEY);
targets.log_prios =
get_value_via_strmap(cct->_conf.get_val<string>("clog_to_syslog_level"),
log_channel, CLOG_CONFIG_DEFAULT_KEY);
targets.log_to_graylog =
get_value_via_strmap(cct->_conf.get_val<string>("clog_to_graylog"),
log_channel, CLOG_CONFIG_DEFAULT_KEY);
targets.log_to_graylog_host =
get_value_via_strmap(cct->_conf.get_val<string>("clog_to_graylog_host"),
log_channel, CLOG_CONFIG_DEFAULT_KEY);
targets.log_to_graylog_port =
get_value_via_strmap(cct->_conf.get_val<string>("clog_to_graylog_port"),
log_channel, CLOG_CONFIG_DEFAULT_KEY);
targets.fsid = cct->_conf.get_val<uuid_d>("fsid");
targets.host = cct->_conf->host;
return targets;
}
void LogChannel::do_log(clog_type prio, std::stringstream& ss)
{
while (!ss.eof()) {
string s;
getline(ss, s);
if (!s.empty())
do_log(prio, s);
}
}
void LogChannel::do_log(clog_type prio, const std::string& s)
{
std::lock_guard l(channel_lock);
if (CLOG_ERROR == prio) {
ldout(cct,-1) << "log " << prio << " : " << s << dendl;
} else {
ldout(cct,0) << "log " << prio << " : " << s << dendl;
}
LogEntry e;
e.stamp = ceph_clock_now();
// seq and who should be set for syslog/graylog/log_to_mon
e.addrs = parent->get_myaddrs();
e.name = parent->get_myname();
e.rank = parent->get_myrank();
e.prio = prio;
e.msg = s;
e.channel = get_log_channel();
// log to monitor?
if (log_to_monitors) {
e.seq = parent->queue(e);
} else {
e.seq = parent->get_next_seq();
}
// log to syslog?
if (do_log_to_syslog()) {
ldout(cct,0) << __func__ << " log to syslog" << dendl;
e.log_to_syslog(get_log_prio(), get_syslog_facility());
}
// log to graylog?
if (do_log_to_graylog()) {
ldout(cct,0) << __func__ << " log to graylog" << dendl;
graylog->log_log_entry(&e);
}
}
ceph::ref_t<Message> LogClient::get_mon_log_message(bool flush)
{
std::lock_guard l(log_lock);
if (flush) {
if (log_queue.empty())
return nullptr;
// reset session
last_log_sent = log_queue.front().seq;
}
return _get_mon_log_message();
}
bool LogClient::are_pending()
{
std::lock_guard l(log_lock);
return last_log > last_log_sent;
}
ceph::ref_t<Message> LogClient::_get_mon_log_message()
{
ceph_assert(ceph_mutex_is_locked(log_lock));
if (log_queue.empty())
return {};
// only send entries that haven't been sent yet during this mon
// session! monclient needs to call reset_session() on mon session
// reset for this to work right.
if (last_log_sent == last_log)
return {};
// limit entries per message
unsigned num_unsent = last_log - last_log_sent;
unsigned num_send;
if (cct->_conf->mon_client_max_log_entries_per_message > 0)
num_send = std::min(num_unsent, (unsigned)cct->_conf->mon_client_max_log_entries_per_message);
else
num_send = num_unsent;
ldout(cct,10) << " log_queue is " << log_queue.size() << " last_log " << last_log << " sent " << last_log_sent
<< " num " << log_queue.size()
<< " unsent " << num_unsent
<< " sending " << num_send << dendl;
ceph_assert(num_unsent <= log_queue.size());
std::deque<LogEntry>::iterator p = log_queue.begin();
std::deque<LogEntry> o;
while (p->seq <= last_log_sent) {
++p;
ceph_assert(p != log_queue.end());
}
while (num_send--) {
ceph_assert(p != log_queue.end());
o.push_back(*p);
last_log_sent = p->seq;
ldout(cct,10) << " will send " << *p << dendl;
++p;
}
return ceph::make_message<MLog>(monmap->get_fsid(),
std::move(o));
}
void LogClient::_send_to_mon()
{
ceph_assert(ceph_mutex_is_locked(log_lock));
ceph_assert(is_mon);
ceph_assert(messenger->get_myname().is_mon());
ldout(cct,10) << __func__ << " log to self" << dendl;
auto log = _get_mon_log_message();
messenger->get_loopback_connection()->send_message2(std::move(log));
}
version_t LogClient::queue(LogEntry &entry)
{
std::lock_guard l(log_lock);
entry.seq = ++last_log;
log_queue.push_back(entry);
if (is_mon) {
_send_to_mon();
}
return entry.seq;
}
void LogClient::reset()
{
std::lock_guard l(log_lock);
if (log_queue.size()) {
log_queue.clear();
}
last_log_sent = last_log;
}
uint64_t LogClient::get_next_seq()
{
std::lock_guard l(log_lock);
return ++last_log;
}
entity_addrvec_t LogClient::get_myaddrs()
{
return messenger->get_myaddrs();
}
entity_name_t LogClient::get_myrank()
{
return messenger->get_myname();
}
const EntityName& LogClient::get_myname()
{
return cct->_conf->name;
}
bool LogClient::handle_log_ack(MLogAck *m)
{
std::lock_guard l(log_lock);
ldout(cct,10) << "handle_log_ack " << *m << dendl;
version_t last = m->last;
auto q = log_queue.begin();
while (q != log_queue.end()) {
const LogEntry &entry(*q);
if (entry.seq > last)
break;
ldout(cct,10) << " logged " << entry << dendl;
q = log_queue.erase(q);
}
return true;
}
| 9,363 | 27.204819 | 112 | cc |
null | ceph-main/src/common/LogClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LOGCLIENT_H
#define CEPH_LOGCLIENT_H
#include <atomic>
#include "common/LogEntry.h"
#include "common/ceph_mutex.h"
#include "common/ostream_temp.h"
#include "common/ref.h"
#include "include/health.h"
class LogClient;
class MLog;
class MLogAck;
class Messenger;
class MonMap;
class Message;
struct uuid_d;
struct Connection;
class LogChannel;
namespace ceph {
namespace logging {
class Graylog;
}
}
struct clog_targets_conf_t {
std::string log_to_monitors;
std::string log_to_syslog;
std::string log_channels;
std::string log_prios;
std::string log_to_graylog;
std::string log_to_graylog_host;
std::string log_to_graylog_port;
uuid_d fsid; // only 16B. Simpler as a copy.
std::string host;
};
/** Manage where we output to and at which priority
*
* Not to be confused with the LogClient, which is the almighty coordinator
* of channels. We just deal with the boring part of the logging: send to
* syslog, send to file, generate LogEntry and queue it for the LogClient.
*
* Past queueing the LogEntry, the LogChannel is done with the whole thing.
* LogClient will deal with sending and handling of LogEntries.
*/
class LogChannel : public LoggerSinkSet
{
public:
LogChannel(CephContext *cct, LogClient *lc, const std::string &channel);
LogChannel(CephContext *cct, LogClient *lc,
const std::string &channel,
const std::string &facility,
const std::string &prio);
OstreamTemp debug() final {
return OstreamTemp(CLOG_DEBUG, this);
}
void debug(std::stringstream &s) final {
do_log(CLOG_DEBUG, s);
}
/**
* Convenience function mapping health status to
* the appropriate cluster log severity.
*/
OstreamTemp health(health_status_t health) {
switch(health) {
case HEALTH_OK:
return info();
case HEALTH_WARN:
return warn();
case HEALTH_ERR:
return error();
default:
// Invalid health_status_t value
ceph_abort();
}
}
OstreamTemp info() final {
return OstreamTemp(CLOG_INFO, this);
}
void info(std::stringstream &s) final {
do_log(CLOG_INFO, s);
}
OstreamTemp warn() final {
return OstreamTemp(CLOG_WARN, this);
}
void warn(std::stringstream &s) final {
do_log(CLOG_WARN, s);
}
OstreamTemp error() final {
return OstreamTemp(CLOG_ERROR, this);
}
void error(std::stringstream &s) final {
do_log(CLOG_ERROR, s);
}
OstreamTemp sec() final {
return OstreamTemp(CLOG_SEC, this);
}
void sec(std::stringstream &s) final {
do_log(CLOG_SEC, s);
}
void set_log_to_monitors(bool v);
void set_log_to_syslog(bool v) {
log_to_syslog = v;
}
void set_log_channel(const std::string& v) {
log_channel = v;
}
void set_log_prio(const std::string& v) {
log_prio = v;
}
void set_syslog_facility(const std::string& v) {
syslog_facility = v;
}
std::string get_log_prio() { return log_prio; }
std::string get_log_channel() { return log_channel; }
std::string get_syslog_facility() { return syslog_facility; }
bool must_log_to_syslog() { return log_to_syslog; }
/**
* Do we want to log to syslog?
*
* @return true if log_to_syslog is true and both channel and prio
* are not empty; false otherwise.
*/
bool do_log_to_syslog() {
return must_log_to_syslog() &&
!log_prio.empty() && !log_channel.empty();
}
bool must_log_to_monitors() { return log_to_monitors; }
bool do_log_to_graylog() {
return (graylog != nullptr);
}
typedef std::shared_ptr<LogChannel> Ref;
/**
* Query the configuration database in conf_cct for configuration
* parameters. Pick out the relevant values based on our channel name.
* Update the logger configuration based on these values.
*
* Return a collection of configuration strings.
*/
clog_targets_conf_t parse_client_options(CephContext* conf_cct);
void do_log(clog_type prio, std::stringstream& ss) final;
void do_log(clog_type prio, const std::string& s) final;
private:
CephContext *cct;
LogClient *parent;
ceph::mutex channel_lock = ceph::make_mutex("LogChannel::channel_lock");
std::string log_channel;
std::string log_prio;
std::string syslog_facility;
bool log_to_syslog;
bool log_to_monitors;
std::shared_ptr<ceph::logging::Graylog> graylog;
/**
* update config values from parsed k/v std::map for each config option
*/
void update_config(const clog_targets_conf_t& conf_strings);
clog_targets_conf_t parse_log_client_options(CephContext* conf_cct);
};
typedef LogChannel::Ref LogChannelRef;
class LogClient
{
public:
enum logclient_flag_t {
NO_FLAGS = 0,
FLAG_MON = 0x1,
};
LogClient(CephContext *cct, Messenger *m, MonMap *mm,
logclient_flag_t flags);
virtual ~LogClient() {
channels.clear();
}
bool handle_log_ack(MLogAck *m);
ceph::ref_t<Message> get_mon_log_message(bool flush);
bool are_pending();
LogChannelRef create_channel() {
return create_channel(CLOG_CHANNEL_DEFAULT);
}
LogChannelRef create_channel(const std::string& name) {
LogChannelRef c;
if (channels.count(name))
c = channels[name];
else {
c = std::make_shared<LogChannel>(cct, this, name);
channels[name] = c;
}
return c;
}
void destroy_channel(const std::string& name) {
if (channels.count(name))
channels.erase(name);
}
void shutdown() {
channels.clear();
}
uint64_t get_next_seq();
entity_addrvec_t get_myaddrs();
const EntityName& get_myname();
entity_name_t get_myrank();
version_t queue(LogEntry &entry);
void reset();
private:
ceph::ref_t<Message> _get_mon_log_message();
void _send_to_mon();
CephContext *cct;
Messenger *messenger;
MonMap *monmap;
bool is_mon;
ceph::mutex log_lock = ceph::make_mutex("LogClient::log_lock");
version_t last_log_sent;
version_t last_log;
std::deque<LogEntry> log_queue;
std::map<std::string, LogChannelRef> channels;
};
#endif
| 6,440 | 24.160156 | 75 | h |
null | ceph-main/src/common/LogEntry.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
//
#include <syslog.h>
#include <boost/algorithm/string/predicate.hpp>
#include "LogEntry.h"
#include "Formatter.h"
#include "include/stringify.h"
using std::list;
using std::map;
using std::make_pair;
using std::pair;
using std::string;
using ceph::bufferlist;
using ceph::decode;
using ceph::encode;
using ceph::Formatter;
// ----
// LogEntryKey
void LogEntryKey::dump(Formatter *f) const
{
f->dump_stream("rank") << rank;
f->dump_stream("stamp") << stamp;
f->dump_unsigned("seq", seq);
}
void LogEntryKey::generate_test_instances(list<LogEntryKey*>& o)
{
o.push_back(new LogEntryKey);
o.push_back(new LogEntryKey(entity_name_t::CLIENT(1234), utime_t(1,2), 34));
}
clog_type LogEntry::str_to_level(std::string const &str)
{
std::string level_str = str;
std::transform(level_str.begin(), level_str.end(), level_str.begin(),
[](char c) {return std::tolower(c);});
if (level_str == "debug") {
return CLOG_DEBUG;
} else if (level_str == "info") {
return CLOG_INFO;
} else if (level_str == "sec") {
return CLOG_SEC;
} else if (level_str == "warn" || level_str == "warning") {
return CLOG_WARN;
} else if (level_str == "error" || level_str == "err") {
return CLOG_ERROR;
} else {
return CLOG_UNKNOWN;
}
}
// ----
int clog_type_to_syslog_level(clog_type t)
{
switch (t) {
case CLOG_DEBUG:
return LOG_DEBUG;
case CLOG_INFO:
return LOG_INFO;
case CLOG_WARN:
return LOG_WARNING;
case CLOG_ERROR:
return LOG_ERR;
case CLOG_SEC:
return LOG_CRIT;
default:
ceph_abort();
return 0;
}
}
clog_type string_to_clog_type(const string& s)
{
if (boost::iequals(s, "debug") ||
boost::iequals(s, "dbg"))
return CLOG_DEBUG;
if (boost::iequals(s, "info") ||
boost::iequals(s, "inf"))
return CLOG_INFO;
if (boost::iequals(s, "warning") ||
boost::iequals(s, "warn") ||
boost::iequals(s, "wrn"))
return CLOG_WARN;
if (boost::iequals(s, "error") ||
boost::iequals(s, "err"))
return CLOG_ERROR;
if (boost::iequals(s, "security") ||
boost::iequals(s, "sec"))
return CLOG_SEC;
return CLOG_UNKNOWN;
}
int string_to_syslog_level(string s)
{
if (boost::iequals(s, "debug"))
return LOG_DEBUG;
if (boost::iequals(s, "info") ||
boost::iequals(s, "notice"))
return LOG_INFO;
if (boost::iequals(s, "warning") ||
boost::iequals(s, "warn"))
return LOG_WARNING;
if (boost::iequals(s, "error") ||
boost::iequals(s, "err"))
return LOG_ERR;
if (boost::iequals(s, "crit") ||
boost::iequals(s, "critical") ||
boost::iequals(s, "emerg"))
return LOG_CRIT;
// err on the side of noise!
return LOG_DEBUG;
}
int string_to_syslog_facility(string s)
{
if (boost::iequals(s, "auth"))
return LOG_AUTH;
if (boost::iequals(s, "authpriv"))
return LOG_AUTHPRIV;
if (boost::iequals(s, "cron"))
return LOG_CRON;
if (boost::iequals(s, "daemon"))
return LOG_DAEMON;
if (boost::iequals(s, "ftp"))
return LOG_FTP;
if (boost::iequals(s, "kern"))
return LOG_KERN;
if (boost::iequals(s, "local0"))
return LOG_LOCAL0;
if (boost::iequals(s, "local1"))
return LOG_LOCAL1;
if (boost::iequals(s, "local2"))
return LOG_LOCAL2;
if (boost::iequals(s, "local3"))
return LOG_LOCAL3;
if (boost::iequals(s, "local4"))
return LOG_LOCAL4;
if (boost::iequals(s, "local5"))
return LOG_LOCAL5;
if (boost::iequals(s, "local6"))
return LOG_LOCAL6;
if (boost::iequals(s, "local7"))
return LOG_LOCAL7;
if (boost::iequals(s, "lpr"))
return LOG_LPR;
if (boost::iequals(s, "mail"))
return LOG_MAIL;
if (boost::iequals(s, "news"))
return LOG_NEWS;
if (boost::iequals(s, "syslog"))
return LOG_SYSLOG;
if (boost::iequals(s, "user"))
return LOG_USER;
if (boost::iequals(s, "uucp"))
return LOG_UUCP;
// default to USER
return LOG_USER;
}
string clog_type_to_string(clog_type t)
{
switch (t) {
case CLOG_DEBUG:
return "debug";
case CLOG_INFO:
return "info";
case CLOG_WARN:
return "warn";
case CLOG_ERROR:
return "err";
case CLOG_SEC:
return "crit";
default:
ceph_abort();
return 0;
}
}
void LogEntry::log_to_syslog(string level, string facility) const
{
int min = string_to_syslog_level(level);
int l = clog_type_to_syslog_level(prio);
if (l <= min) {
int f = string_to_syslog_facility(facility);
syslog(l | f, "%s %s %llu : %s",
name.to_cstr(),
stringify(rank).c_str(),
(long long unsigned)seq,
msg.c_str());
}
}
void LogEntry::encode(bufferlist& bl, uint64_t features) const
{
assert(HAVE_FEATURE(features, SERVER_NAUTILUS));
ENCODE_START(5, 5, bl);
__u16 t = prio;
encode(name, bl);
encode(rank, bl);
encode(addrs, bl, features);
encode(stamp, bl);
encode(seq, bl);
encode(t, bl);
encode(msg, bl);
encode(channel, bl);
ENCODE_FINISH(bl);
}
void LogEntry::decode(bufferlist::const_iterator& bl)
{
DECODE_START_LEGACY_COMPAT_LEN(5, 2, 2, bl);
if (struct_v < 5) {
__u16 t;
entity_inst_t who;
decode(who, bl);
rank = who.name;
addrs.v.clear();
addrs.v.push_back(who.addr);
decode(stamp, bl);
decode(seq, bl);
decode(t, bl);
prio = (clog_type)t;
decode(msg, bl);
if (struct_v >= 3) {
decode(channel, bl);
} else {
// prior to having logging channels we only had a cluster log.
// Ensure we keep that appearance when the other party has no
// clue of what a 'channel' is.
channel = CLOG_CHANNEL_CLUSTER;
}
if (struct_v >= 4) {
decode(name, bl);
}
} else {
__u16 t;
decode(name, bl);
decode(rank, bl);
decode(addrs, bl);
decode(stamp, bl);
decode(seq, bl);
decode(t, bl);
prio = (clog_type)t;
decode(msg, bl);
decode(channel, bl);
}
DECODE_FINISH(bl);
}
void LogEntry::dump(Formatter *f) const
{
f->dump_stream("name") << name;
f->dump_stream("rank") << rank;
f->dump_object("addrs", addrs);
f->dump_stream("stamp") << stamp;
f->dump_unsigned("seq", seq);
f->dump_string("channel", channel);
f->dump_stream("priority") << prio;
f->dump_string("message", msg);
}
void LogEntry::generate_test_instances(list<LogEntry*>& o)
{
o.push_back(new LogEntry);
}
// -----
void LogSummary::build_ordered_tail_legacy(list<LogEntry> *tail) const
{
tail->clear();
// channel -> (begin, end)
map<string,pair<list<pair<uint64_t,LogEntry>>::const_iterator,
list<pair<uint64_t,LogEntry>>::const_iterator>> pos;
for (auto& i : tail_by_channel) {
pos.emplace(i.first, make_pair(i.second.begin(), i.second.end()));
}
while (true) {
uint64_t min_seq = 0;
list<pair<uint64_t,LogEntry>>::const_iterator *minp = 0;
for (auto& i : pos) {
if (i.second.first == i.second.second) {
continue;
}
if (min_seq == 0 || i.second.first->first < min_seq) {
min_seq = i.second.first->first;
minp = &i.second.first;
}
}
if (min_seq == 0) {
break; // done
}
tail->push_back((*minp)->second);
++(*minp);
}
}
void LogSummary::encode(bufferlist& bl, uint64_t features) const
{
assert(HAVE_FEATURE(features, SERVER_MIMIC));
ENCODE_START(4, 3, bl);
encode(version, bl);
encode(seq, bl);
encode(tail_by_channel, bl, features);
encode(channel_info, bl);
recent_keys.encode(bl);
ENCODE_FINISH(bl);
}
void LogSummary::decode(bufferlist::const_iterator& bl)
{
DECODE_START_LEGACY_COMPAT_LEN(4, 2, 2, bl);
decode(version, bl);
decode(seq, bl);
decode(tail_by_channel, bl);
if (struct_v >= 4) {
decode(channel_info, bl);
recent_keys.decode(bl);
}
DECODE_FINISH(bl);
keys.clear();
for (auto& i : tail_by_channel) {
for (auto& e : i.second) {
keys.insert(e.second.key());
}
}
}
void LogSummary::dump(Formatter *f) const
{
f->dump_unsigned("version", version);
f->open_object_section("tail_by_channel");
for (auto& i : tail_by_channel) {
f->open_object_section(i.first.c_str());
for (auto& j : i.second) {
string s = stringify(j.first);
f->dump_object(s.c_str(), j.second);
}
f->close_section();
}
f->close_section();
}
void LogSummary::generate_test_instances(list<LogSummary*>& o)
{
o.push_back(new LogSummary);
// more!
}
| 8,459 | 22.434903 | 78 | cc |
null | ceph-main/src/common/LogEntry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LOGENTRY_H
#define CEPH_LOGENTRY_H
#include <fmt/format.h>
#include "include/utime.h"
#include "msg/msg_fmt.h"
#include "msg/msg_types.h"
#include "common/entity_name.h"
#include "ostream_temp.h"
#include "LRUSet.h"
namespace ceph {
class Formatter;
}
static const std::string CLOG_CHANNEL_NONE = "none";
static const std::string CLOG_CHANNEL_DEFAULT = "cluster";
static const std::string CLOG_CHANNEL_CLUSTER = "cluster";
static const std::string CLOG_CHANNEL_AUDIT = "audit";
// this is the key name used in the config options for the default, e.g.
// default=true foo=false bar=false
static const std::string CLOG_CONFIG_DEFAULT_KEY = "default";
/*
* Given a clog log_type, return the equivalent syslog priority
*/
int clog_type_to_syslog_level(clog_type t);
clog_type string_to_clog_type(const std::string& s);
int string_to_syslog_level(std::string s);
int string_to_syslog_facility(std::string s);
std::string clog_type_to_string(clog_type t);
struct LogEntryKey {
private:
uint64_t _hash = 0;
void _calc_hash() {
std::hash<entity_name_t> h;
_hash = seq + h(rank);
}
entity_name_t rank;
utime_t stamp;
uint64_t seq = 0;
public:
LogEntryKey() {}
LogEntryKey(const entity_name_t& w, utime_t t, uint64_t s)
: rank(w), stamp(t), seq(s) {
_calc_hash();
}
uint64_t get_hash() const {
return _hash;
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<LogEntryKey*>& o);
friend bool operator==(const LogEntryKey& l, const LogEntryKey& r) {
return l.rank == r.rank && l.stamp == r.stamp && l.seq == r.seq;
}
void encode(bufferlist& bl) const {
using ceph::encode;
encode(rank, bl);
encode(stamp, bl);
encode(seq, bl);
}
void decode(bufferlist::const_iterator &p) {
using ceph::decode;
decode(rank, p);
decode(stamp, p);
decode(seq, p);
}
};
WRITE_CLASS_ENCODER(LogEntryKey)
namespace std {
template<> struct hash<LogEntryKey> {
size_t operator()(const LogEntryKey& r) const {
return r.get_hash();
}
};
} // namespace std
struct LogEntry {
EntityName name;
entity_name_t rank;
entity_addrvec_t addrs;
utime_t stamp;
uint64_t seq;
clog_type prio;
std::string msg;
std::string channel;
LogEntry() : seq(0), prio(CLOG_DEBUG) {}
LogEntryKey key() const { return LogEntryKey(rank, stamp, seq); }
void log_to_syslog(std::string level, std::string facility) const;
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<LogEntry*>& o);
static clog_type str_to_level(std::string const &str);
};
WRITE_CLASS_ENCODER_FEATURES(LogEntry)
struct LogSummary {
version_t version;
// ---- pre-quincy ----
// channel -> [(seq#, entry), ...]
std::map<std::string,std::list<std::pair<uint64_t,LogEntry>>> tail_by_channel;
uint64_t seq = 0;
ceph::unordered_set<LogEntryKey> keys;
// ---- quincy+ ----
LRUSet<LogEntryKey> recent_keys;
std::map<std::string, std::pair<uint64_t,uint64_t>> channel_info; // channel -> [begin, end)
LogSummary() : version(0) {}
void build_ordered_tail_legacy(std::list<LogEntry> *tail) const;
void add_legacy(const LogEntry& e) {
keys.insert(e.key());
tail_by_channel[e.channel].push_back(std::make_pair(++seq, e));
}
void prune(size_t max) {
for (auto& i : tail_by_channel) {
while (i.second.size() > max) {
keys.erase(i.second.front().second.key());
i.second.pop_front();
}
}
recent_keys.prune(max);
}
bool contains(const LogEntryKey& k) const {
return keys.count(k) || recent_keys.contains(k);
}
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<LogSummary*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(LogSummary)
inline std::ostream& operator<<(std::ostream& out, const clog_type t)
{
switch (t) {
case CLOG_DEBUG:
return out << "[DBG]";
case CLOG_INFO:
return out << "[INF]";
case CLOG_SEC:
return out << "[SEC]";
case CLOG_WARN:
return out << "[WRN]";
case CLOG_ERROR:
return out << "[ERR]";
default:
return out << "[???]";
}
}
inline std::ostream& operator<<(std::ostream& out, const LogEntry& e)
{
return out << e.stamp << " " << e.name << " (" << e.rank << ") "
<< e.seq << " : "
<< e.channel << " " << e.prio << " " << e.msg;
}
template <> struct fmt::formatter<EntityName> : fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const EntityName& e, FormatContext& ctx) {
return formatter<std::string_view>::format(e.to_str(), ctx);
}
};
template <> struct fmt::formatter<LogEntry> : fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const LogEntry& e, FormatContext& ctx) {
return fmt::format_to(ctx.out(), "{} {} ({}) {} : {} {} {}",
e.stamp, e.name, e.rank, e.seq, e.channel, e.prio, e.msg);
}
};
#endif
| 5,587 | 25.234742 | 94 | h |
null | ceph-main/src/common/MemoryModel.cc | #include "MemoryModel.h"
#include "include/compat.h"
#include "debug.h"
#if defined(__linux__)
#include <malloc.h>
#endif
#include <fstream>
#define dout_subsys ceph_subsys_
using namespace std;
MemoryModel::MemoryModel(CephContext *cct_)
: cct(cct_)
{
}
void MemoryModel::_sample(snap *psnap)
{
ifstream f;
f.open(PROCPREFIX "/proc/self/status");
if (!f.is_open()) {
ldout(cct, 0) << "check_memory_usage unable to open " PROCPREFIX "/proc/self/status" << dendl;
return;
}
while (!f.eof()) {
string line;
getline(f, line);
if (strncmp(line.c_str(), "VmSize:", 7) == 0)
psnap->size = atol(line.c_str() + 7);
else if (strncmp(line.c_str(), "VmRSS:", 6) == 0)
psnap->rss = atol(line.c_str() + 7);
else if (strncmp(line.c_str(), "VmHWM:", 6) == 0)
psnap->hwm = atol(line.c_str() + 7);
else if (strncmp(line.c_str(), "VmLib:", 6) == 0)
psnap->lib = atol(line.c_str() + 7);
else if (strncmp(line.c_str(), "VmPeak:", 7) == 0)
psnap->peak = atol(line.c_str() + 7);
else if (strncmp(line.c_str(), "VmData:", 7) == 0)
psnap->data = atol(line.c_str() + 7);
}
f.close();
f.open(PROCPREFIX "/proc/self/maps");
if (!f.is_open()) {
ldout(cct, 0) << "check_memory_usage unable to open " PROCPREFIX "/proc/self/maps" << dendl;
return;
}
long heap = 0;
while (f.is_open() && !f.eof()) {
string line;
getline(f, line);
//ldout(cct, 0) << "line is " << line << dendl;
const char *start = line.c_str();
const char *dash = start;
while (*dash && *dash != '-') dash++;
if (!*dash)
continue;
const char *end = dash + 1;
while (*end && *end != ' ') end++;
if (!*end)
continue;
unsigned long long as = strtoll(start, 0, 16);
unsigned long long ae = strtoll(dash+1, 0, 16);
//ldout(cct, 0) << std::hex << as << " to " << ae << std::dec << dendl;
end++;
const char *mode = end;
int skip = 4;
while (skip--) {
end++;
while (*end && *end != ' ') end++;
}
if (*end)
end++;
long size = ae - as;
//ldout(cct, 0) << "size " << size << " mode is '" << mode << "' end is '" << end << "'" << dendl;
/*
* anything 'rw' and anon is assumed to be heap.
*/
if (mode[0] == 'r' && mode[1] == 'w' && !*end)
heap += size;
}
psnap->heap = heap >> 10;
}
| 2,380 | 23.546392 | 102 | cc |
null | ceph-main/src/common/MemoryModel.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_MEMORYMODEL_H
#define CEPH_MEMORYMODEL_H
#include "include/common_fwd.h"
class MemoryModel {
public:
struct snap {
long peak;
long size;
long hwm;
long rss;
long data;
long lib;
long heap;
snap() : peak(0), size(0), hwm(0), rss(0), data(0), lib(0),
heap(0)
{}
long get_total() { return size; }
long get_rss() { return rss; }
long get_heap() { return heap; }
} last;
private:
CephContext *cct;
void _sample(snap *p);
public:
explicit MemoryModel(CephContext *cct);
void sample(snap *p = 0) {
_sample(&last);
if (p)
*p = last;
}
};
#endif
| 1,073 | 18.527273 | 71 | h |
null | ceph-main/src/common/OpQueue.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef OP_QUEUE_H
#define OP_QUEUE_H
#include "include/msgr.h"
#include <list>
#include <functional>
namespace ceph {
class Formatter;
}
/**
* Abstract class for all Op Queues
*
* In order to provide optimized code, be sure to declare all
* virtual functions as final in the derived class.
*/
template <typename T, typename K>
class OpQueue {
public:
// Ops of this class should be deleted immediately. If out isn't
// nullptr then items should be added to the front in
// front-to-back order. The typical strategy is to visit items in
// the queue in *reverse* order and to use *push_front* to insert
// them into out.
virtual void remove_by_class(K k, std::list<T> *out) = 0;
// Enqueue op in the back of the strict queue
virtual void enqueue_strict(K cl, unsigned priority, T &&item) = 0;
// Enqueue op in the front of the strict queue
virtual void enqueue_strict_front(K cl, unsigned priority, T &&item) = 0;
// Enqueue op in the back of the regular queue
virtual void enqueue(K cl, unsigned priority, unsigned cost, T &&item) = 0;
// Enqueue the op in the front of the regular queue
virtual void enqueue_front(
K cl, unsigned priority, unsigned cost, T &&item) = 0;
// Returns if the queue is empty
virtual bool empty() const = 0;
// Return an op to be dispatch
virtual T dequeue() = 0;
// Formatted output of the queue
virtual void dump(ceph::Formatter *f) const = 0;
// Human readable brief description of queue and relevant parameters
virtual void print(std::ostream &f) const = 0;
// Don't leak resources on destruction
virtual ~OpQueue() {};
};
#endif
| 2,063 | 26.891892 | 77 | h |
null | ceph-main/src/common/OutputDataSocket.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <poll.h>
#include <sys/un.h>
#include <unistd.h>
#include "common/OutputDataSocket.h"
#include "common/errno.h"
#include "common/debug.h"
#include "common/safe_io.h"
#include "include/compat.h"
#include "include/sock_compat.h"
// re-include our assert to clobber the system one; fix dout:
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_asok
#undef dout_prefix
#define dout_prefix *_dout << "asok(" << (void*)m_cct << ") "
using std::ostringstream;
/*
* UNIX domain sockets created by an application persist even after that
* application closes, unless they're explicitly unlinked. This is because the
* directory containing the socket keeps a reference to the socket.
*
* This code makes things a little nicer by unlinking those dead sockets when
* the application exits normally.
*/
static pthread_mutex_t cleanup_lock = PTHREAD_MUTEX_INITIALIZER;
static std::vector <const char*> cleanup_files;
static bool cleanup_atexit = false;
static void remove_cleanup_file(const char *file)
{
pthread_mutex_lock(&cleanup_lock);
VOID_TEMP_FAILURE_RETRY(unlink(file));
for (std::vector <const char*>::iterator i = cleanup_files.begin();
i != cleanup_files.end(); ++i) {
if (strcmp(file, *i) == 0) {
free((void*)*i);
cleanup_files.erase(i);
break;
}
}
pthread_mutex_unlock(&cleanup_lock);
}
static void remove_all_cleanup_files()
{
pthread_mutex_lock(&cleanup_lock);
for (std::vector <const char*>::iterator i = cleanup_files.begin();
i != cleanup_files.end(); ++i) {
VOID_TEMP_FAILURE_RETRY(unlink(*i));
free((void*)*i);
}
cleanup_files.clear();
pthread_mutex_unlock(&cleanup_lock);
}
static void add_cleanup_file(const char *file)
{
char *fname = strdup(file);
if (!fname)
return;
pthread_mutex_lock(&cleanup_lock);
cleanup_files.push_back(fname);
if (!cleanup_atexit) {
atexit(remove_all_cleanup_files);
cleanup_atexit = true;
}
pthread_mutex_unlock(&cleanup_lock);
}
OutputDataSocket::OutputDataSocket(CephContext *cct, uint64_t _backlog)
: m_cct(cct),
data_max_backlog(_backlog),
m_sock_fd(-1),
m_shutdown_rd_fd(-1),
m_shutdown_wr_fd(-1),
going_down(false),
data_size(0),
skipped(0)
{
}
OutputDataSocket::~OutputDataSocket()
{
shutdown();
}
/*
* This thread listens on the UNIX domain socket for incoming connections.
* It only handles one connection at a time at the moment. All I/O is nonblocking,
* so that we can implement sensible timeouts. [TODO: make all I/O nonblocking]
*
* This thread also listens to m_shutdown_rd_fd. If there is any data sent to this
* pipe, the thread terminates itself gracefully, allowing the
* OutputDataSocketConfigObs class to join() it.
*/
#define PFL_SUCCESS ((void*)(intptr_t)0)
#define PFL_FAIL ((void*)(intptr_t)1)
std::string OutputDataSocket::create_shutdown_pipe(int *pipe_rd, int *pipe_wr)
{
int pipefd[2];
if (pipe_cloexec(pipefd, 0) < 0) {
int e = errno;
ostringstream oss;
oss << "OutputDataSocket::create_shutdown_pipe error: " << cpp_strerror(e);
return oss.str();
}
*pipe_rd = pipefd[0];
*pipe_wr = pipefd[1];
return "";
}
std::string OutputDataSocket::bind_and_listen(const std::string &sock_path, int *fd)
{
ldout(m_cct, 5) << "bind_and_listen " << sock_path << dendl;
struct sockaddr_un address;
if (sock_path.size() > sizeof(address.sun_path) - 1) {
ostringstream oss;
oss << "OutputDataSocket::bind_and_listen: "
<< "The UNIX domain socket path " << sock_path << " is too long! The "
<< "maximum length on this system is "
<< (sizeof(address.sun_path) - 1);
return oss.str();
}
int sock_fd = socket_cloexec(PF_UNIX, SOCK_STREAM, 0);
if (sock_fd < 0) {
int err = errno;
ostringstream oss;
oss << "OutputDataSocket::bind_and_listen: "
<< "failed to create socket: " << cpp_strerror(err);
return oss.str();
}
// FIPS zeroization audit 20191115: this memset is not security related.
memset(&address, 0, sizeof(struct sockaddr_un));
address.sun_family = AF_UNIX;
snprintf(address.sun_path, sizeof(address.sun_path),
"%s", sock_path.c_str());
if (::bind(sock_fd, (struct sockaddr*)&address,
sizeof(struct sockaddr_un)) != 0) {
int err = errno;
if (err == EADDRINUSE) {
// The old UNIX domain socket must still be there.
// Let's unlink it and try again.
VOID_TEMP_FAILURE_RETRY(unlink(sock_path.c_str()));
if (::bind(sock_fd, (struct sockaddr*)&address,
sizeof(struct sockaddr_un)) == 0) {
err = 0;
}
else {
err = errno;
}
}
if (err != 0) {
ostringstream oss;
oss << "OutputDataSocket::bind_and_listen: "
<< "failed to bind the UNIX domain socket to '" << sock_path
<< "': " << cpp_strerror(err);
close(sock_fd);
return oss.str();
}
}
if (listen(sock_fd, 5) != 0) {
int err = errno;
ostringstream oss;
oss << "OutputDataSocket::bind_and_listen: "
<< "failed to listen to socket: " << cpp_strerror(err);
close(sock_fd);
VOID_TEMP_FAILURE_RETRY(unlink(sock_path.c_str()));
return oss.str();
}
*fd = sock_fd;
return "";
}
void* OutputDataSocket::entry()
{
ldout(m_cct, 5) << "entry start" << dendl;
while (true) {
struct pollfd fds[2];
// FIPS zeroization audit 20191115: this memset is not security related.
memset(fds, 0, sizeof(fds));
fds[0].fd = m_sock_fd;
fds[0].events = POLLIN | POLLRDBAND;
fds[1].fd = m_shutdown_rd_fd;
fds[1].events = POLLIN | POLLRDBAND;
int ret = poll(fds, 2, -1);
if (ret < 0) {
int err = errno;
if (err == EINTR) {
continue;
}
lderr(m_cct) << "OutputDataSocket: poll(2) error: '"
<< cpp_strerror(err) << dendl;
return PFL_FAIL;
}
if (fds[0].revents & POLLIN) {
// Send out some data
do_accept();
}
if (fds[1].revents & POLLIN) {
// Parent wants us to shut down
return PFL_SUCCESS;
}
}
ldout(m_cct, 5) << "entry exit" << dendl;
return PFL_SUCCESS; // unreachable
}
bool OutputDataSocket::do_accept()
{
struct sockaddr_un address;
socklen_t address_length = sizeof(address);
ldout(m_cct, 30) << "OutputDataSocket: calling accept" << dendl;
int connection_fd = accept_cloexec(m_sock_fd, (struct sockaddr*) &address,
&address_length);
if (connection_fd < 0) {
int err = errno;
lderr(m_cct) << "OutputDataSocket: do_accept error: '"
<< cpp_strerror(err) << dendl;
return false;
}
ldout(m_cct, 30) << "OutputDataSocket: finished accept" << dendl;
handle_connection(connection_fd);
close_connection(connection_fd);
return 0;
}
void OutputDataSocket::handle_connection(int fd)
{
ceph::buffer::list bl;
m_lock.lock();
init_connection(bl);
m_lock.unlock();
if (bl.length()) {
/* need to special case the connection init buffer output, as it needs
* to be dumped before any data, including older data that was sent
* before the connection was established, or before we identified
* older connection was broken
*/
int ret = safe_write(fd, bl.c_str(), bl.length());
if (ret < 0) {
return;
}
}
int ret = dump_data(fd);
if (ret < 0)
return;
do {
{
std::unique_lock l(m_lock);
if (!going_down) {
cond.wait(l);
}
if (going_down) {
break;
}
}
ret = dump_data(fd);
} while (ret >= 0);
}
int OutputDataSocket::dump_data(int fd)
{
m_lock.lock();
auto l = std::move(data);
data.clear();
data_size = 0;
m_lock.unlock();
for (auto iter = l.begin(); iter != l.end(); ++iter) {
ceph::buffer::list& bl = *iter;
int ret = safe_write(fd, bl.c_str(), bl.length());
if (ret >= 0) {
ret = safe_write(fd, delim.c_str(), delim.length());
}
if (ret < 0) {
std::scoped_lock lock(m_lock);
for (; iter != l.end(); ++iter) {
ceph::buffer::list& bl = *iter;
data.push_back(bl);
data_size += bl.length();
}
return ret;
}
}
return 0;
}
void OutputDataSocket::close_connection(int fd)
{
VOID_TEMP_FAILURE_RETRY(close(fd));
}
bool OutputDataSocket::init(const std::string &path)
{
ldout(m_cct, 5) << "init " << path << dendl;
/* Set up things for the new thread */
std::string err;
int pipe_rd = -1, pipe_wr = -1;
err = create_shutdown_pipe(&pipe_rd, &pipe_wr);
if (!err.empty()) {
lderr(m_cct) << "OutputDataSocketConfigObs::init: error: " << err << dendl;
return false;
}
int sock_fd;
err = bind_and_listen(path, &sock_fd);
if (!err.empty()) {
lderr(m_cct) << "OutputDataSocketConfigObs::init: failed: " << err << dendl;
close(pipe_rd);
close(pipe_wr);
return false;
}
/* Create new thread */
m_sock_fd = sock_fd;
m_shutdown_rd_fd = pipe_rd;
m_shutdown_wr_fd = pipe_wr;
m_path = path;
create("out_data_socket");
add_cleanup_file(m_path.c_str());
return true;
}
void OutputDataSocket::shutdown()
{
m_lock.lock();
going_down = true;
cond.notify_all();
m_lock.unlock();
if (m_shutdown_wr_fd < 0)
return;
ldout(m_cct, 5) << "shutdown" << dendl;
// Send a byte to the shutdown pipe that the thread is listening to
char buf[1] = { 0x0 };
int ret = safe_write(m_shutdown_wr_fd, buf, sizeof(buf));
VOID_TEMP_FAILURE_RETRY(close(m_shutdown_wr_fd));
m_shutdown_wr_fd = -1;
if (ret == 0) {
join();
} else {
lderr(m_cct) << "OutputDataSocket::shutdown: failed to write "
"to thread shutdown pipe: error " << ret << dendl;
}
remove_cleanup_file(m_path.c_str());
m_path.clear();
}
void OutputDataSocket::append_output(ceph::buffer::list& bl)
{
std::lock_guard l(m_lock);
if (data_size + bl.length() > data_max_backlog) {
if (skipped % 100 == 0) {
ldout(m_cct, 0) << "dropping data output, max backlog reached (skipped=="
<< skipped << ")"
<< dendl;
skipped = 1;
} else
++skipped;
cond.notify_all();
return;
}
data.push_back(bl);
data_size += bl.length();
cond.notify_all();
}
| 10,488 | 24.708333 | 84 | cc |
null | ceph-main/src/common/OutputDataSocket.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_OUTPUTDATASOCKET_H
#define CEPH_COMMON_OUTPUTDATASOCKET_H
#include "common/ceph_mutex.h"
#include "common/Thread.h"
#include "include/common_fwd.h"
#include "include/buffer.h"
class OutputDataSocket : public Thread
{
public:
OutputDataSocket(CephContext *cct, uint64_t _backlog);
~OutputDataSocket() override;
bool init(const std::string &path);
void append_output(ceph::buffer::list& bl);
protected:
virtual void init_connection(ceph::buffer::list& bl) {}
void shutdown();
std::string create_shutdown_pipe(int *pipe_rd, int *pipe_wr);
std::string bind_and_listen(const std::string &sock_path, int *fd);
void *entry() override;
bool do_accept();
void handle_connection(int fd);
void close_connection(int fd);
int dump_data(int fd);
CephContext *m_cct;
uint64_t data_max_backlog;
std::string m_path;
int m_sock_fd;
int m_shutdown_rd_fd;
int m_shutdown_wr_fd;
bool going_down;
uint64_t data_size;
uint32_t skipped;
std::vector<ceph::buffer::list> data;
ceph::mutex m_lock = ceph::make_mutex("OutputDataSocket::m_lock");
ceph::condition_variable cond;
ceph::buffer::list delim;
};
#endif
| 1,586 | 22.338235 | 71 | h |
null | ceph-main/src/common/PluginRegistry.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "PluginRegistry.h"
#include "ceph_ver.h"
#include "common/ceph_context.h"
#include "common/errno.h"
#include "common/debug.h"
#include "include/dlfcn_compat.h"
#define PLUGIN_PREFIX "libceph_"
#define PLUGIN_SUFFIX SHARED_LIB_SUFFIX
#define PLUGIN_INIT_FUNCTION "__ceph_plugin_init"
#define PLUGIN_VERSION_FUNCTION "__ceph_plugin_version"
#define dout_subsys ceph_subsys_context
using std::map;
using std::string;
namespace ceph {
PluginRegistry::PluginRegistry(CephContext *cct) :
cct(cct),
loading(false),
disable_dlclose(false)
{
}
PluginRegistry::~PluginRegistry()
{
if (disable_dlclose)
return;
for (std::map<std::string,std::map<std::string, Plugin*> >::iterator i =
plugins.begin();
i != plugins.end();
++i) {
for (std::map<std::string,Plugin*>::iterator j = i->second.begin();
j != i->second.end(); ++j) {
void *library = j->second->library;
delete j->second;
dlclose(library);
}
}
}
int PluginRegistry::remove(const std::string& type, const std::string& name)
{
ceph_assert(ceph_mutex_is_locked(lock));
std::map<std::string,std::map<std::string,Plugin*> >::iterator i =
plugins.find(type);
if (i == plugins.end())
return -ENOENT;
std::map<std::string,Plugin*>::iterator j = i->second.find(name);
if (j == i->second.end())
return -ENOENT;
ldout(cct, 1) << __func__ << " " << type << " " << name << dendl;
void *library = j->second->library;
delete j->second;
dlclose(library);
i->second.erase(j);
if (i->second.empty())
plugins.erase(i);
return 0;
}
int PluginRegistry::add(const std::string& type,
const std::string& name,
Plugin* plugin)
{
ceph_assert(ceph_mutex_is_locked(lock));
if (plugins.count(type) &&
plugins[type].count(name)) {
return -EEXIST;
}
ldout(cct, 1) << __func__ << " " << type << " " << name
<< " " << plugin << dendl;
plugins[type][name] = plugin;
return 0;
}
Plugin *PluginRegistry::get_with_load(const std::string& type,
const std::string& name)
{
std::lock_guard l(lock);
Plugin* ret = get(type, name);
if (!ret) {
int err = load(type, name);
if (err == 0)
ret = get(type, name);
}
return ret;
}
Plugin *PluginRegistry::get(const std::string& type,
const std::string& name)
{
ceph_assert(ceph_mutex_is_locked(lock));
Plugin *ret = 0;
std::map<std::string,Plugin*>::iterator j;
std::map<std::string,map<std::string,Plugin*> >::iterator i =
plugins.find(type);
if (i == plugins.end())
goto out;
j = i->second.find(name);
if (j == i->second.end())
goto out;
ret = j->second;
out:
ldout(cct, 1) << __func__ << " " << type << " " << name
<< " = " << ret << dendl;
return ret;
}
int PluginRegistry::load(const std::string &type,
const std::string &name)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 1) << __func__ << " " << type << " " << name << dendl;
std::string fname = cct->_conf.get_val<std::string>("plugin_dir") + "/" + type + "/" + PLUGIN_PREFIX
+ name + PLUGIN_SUFFIX;
void *library = dlopen(fname.c_str(), RTLD_NOW);
if (!library) {
string err1(dlerror());
// fall back to plugin_dir
fname = cct->_conf.get_val<std::string>("plugin_dir") + "/" + PLUGIN_PREFIX +
name + PLUGIN_SUFFIX;
library = dlopen(fname.c_str(), RTLD_NOW);
if (!library) {
lderr(cct) << __func__
<< " failed dlopen(): \"" << err1.c_str()
<< "\" or \"" << dlerror() << "\""
<< dendl;
return -EIO;
}
}
const char * (*code_version)() =
(const char *(*)())dlsym(library, PLUGIN_VERSION_FUNCTION);
if (code_version == NULL) {
lderr(cct) << __func__ << " code_version == NULL" << dlerror() << dendl;
return -EXDEV;
}
if (code_version() != string(CEPH_GIT_NICE_VER)) {
lderr(cct) << __func__ << " plugin " << fname << " version "
<< code_version() << " != expected "
<< CEPH_GIT_NICE_VER << dendl;
dlclose(library);
return -EXDEV;
}
int (*code_init)(CephContext *,
const std::string& type,
const std::string& name) =
(int (*)(CephContext *,
const std::string& type,
const std::string& name))dlsym(library, PLUGIN_INIT_FUNCTION);
if (code_init) {
int r = code_init(cct, type, name);
if (r != 0) {
lderr(cct) << __func__ << " " << fname << " "
<< PLUGIN_INIT_FUNCTION << "(" << cct
<< "," << type << "," << name << "): " << cpp_strerror(r)
<< dendl;
dlclose(library);
return r;
}
} else {
lderr(cct) << __func__ << " " << fname << " dlsym(" << PLUGIN_INIT_FUNCTION
<< "): " << dlerror() << dendl;
dlclose(library);
return -ENOENT;
}
Plugin *plugin = get(type, name);
if (plugin == 0) {
lderr(cct) << __func__ << " " << fname << " "
<< PLUGIN_INIT_FUNCTION << "()"
<< "did not register plugin type " << type << " name " << name
<< dendl;
dlclose(library);
return -EBADF;
}
plugin->library = library;
ldout(cct, 1) << __func__ << ": " << type << " " << name
<< " loaded and registered" << dendl;
return 0;
}
}
/*
int ErasureCodePluginRegistry::preload(const std::string &plugins,
const std::string &directory,
ostream &ss)
{
std::lock_guard l(lock);
list<string> plugins_list;
get_str_list(plugins, plugins_list);
for (list<string>::iterator i = plugins_list.begin();
i != plugins_list.end();
++i) {
ErasureCodePlugin *plugin;
int r = load(*i, directory, &plugin, ss);
if (r)
return r;
}
return 0;
}
*/
| 6,167 | 25.472103 | 102 | cc |
null | ceph-main/src/common/PluginRegistry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_COMMON_PLUGINREGISTRY_H
#define CEPH_COMMON_PLUGINREGISTRY_H
#include <map>
#include <string>
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
extern "C" {
const char *__ceph_plugin_version();
int __ceph_plugin_init(CephContext *cct,
const std::string& type,
const std::string& name);
}
namespace ceph {
class Plugin {
public:
void *library;
CephContext *cct;
explicit Plugin(CephContext *cct) : library(NULL), cct(cct) {}
virtual ~Plugin() {}
};
class PluginRegistry {
public:
CephContext *cct;
ceph::mutex lock = ceph::make_mutex("PluginRegistery::lock");
bool loading;
bool disable_dlclose;
std::map<std::string,std::map<std::string,Plugin*> > plugins;
explicit PluginRegistry(CephContext *cct);
~PluginRegistry();
int add(const std::string& type, const std::string& name,
Plugin *factory);
int remove(const std::string& type, const std::string& name);
Plugin *get(const std::string& type, const std::string& name);
Plugin *get_with_load(const std::string& type, const std::string& name);
int load(const std::string& type,
const std::string& name);
int preload();
int preload(const std::string& type);
};
}
#endif
| 1,844 | 25.73913 | 76 | h |
null | ceph-main/src/common/Preforker.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_COMMON_PREFORKER_H
#define CEPH_COMMON_PREFORKER_H
#include <signal.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <unistd.h>
#include <sstream>
#include "common/errno.h"
#include "common/safe_io.h"
#include "include/ceph_assert.h"
#include "include/compat.h"
#include "include/sock_compat.h"
/**
* pre-fork fork/daemonize helper class
*
* Hide the details of letting a process fork early, do a bunch of
* initialization work that may spam stdout or exit with an error, and
* then daemonize. The exit() method will either exit directly (if we
* haven't forked) or pass a message to the parent with the error if
* we have.
*/
class Preforker {
pid_t childpid;
bool forked;
int fd[2]; // parent's, child's
public:
Preforker()
: childpid(0),
forked(false)
{}
int prefork(std::string &err) {
ceph_assert(!forked);
std::ostringstream oss;
int r = socketpair_cloexec(AF_UNIX, SOCK_STREAM, 0, fd);
if (r < 0) {
int e = errno;
oss << "[" << getpid() << "]: unable to create socketpair: " << cpp_strerror(e);
err = oss.str();
return (errno = e, -1);
}
struct sigaction sa;
sa.sa_handler = SIG_IGN;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
if (sigaction(SIGHUP, &sa, nullptr) != 0) {
int e = errno;
oss << "[" << getpid() << "]: unable to ignore SIGHUP: " << cpp_strerror(e);
err = oss.str();
return (errno = e, -1);
}
forked = true;
childpid = fork();
if (childpid < 0) {
int e = errno;
oss << "[" << getpid() << "]: unable to fork: " << cpp_strerror(e);
err = oss.str();
return (errno = e, -1);
}
if (is_child()) {
::close(fd[0]);
} else {
::close(fd[1]);
}
return 0;
}
int get_signal_fd() const {
return forked ? fd[1] : 0;
}
bool is_child() {
return childpid == 0;
}
bool is_parent() {
return childpid != 0;
}
int parent_wait(std::string &err_msg) {
ceph_assert(forked);
int r = -1;
std::ostringstream oss;
int err = safe_read_exact(fd[0], &r, sizeof(r));
if (err == 0 && r == -1) {
// daemonize
::close(0);
::close(1);
::close(2);
} else if (err) {
oss << "[" << getpid() << "]: " << cpp_strerror(err);
} else {
// wait for child to exit
int status;
err = waitpid(childpid, &status, 0);
if (err < 0) {
oss << "[" << getpid() << "]" << " waitpid error: " << cpp_strerror(err);
} else if (WIFSIGNALED(status)) {
oss << "[" << getpid() << "]" << " exited with a signal";
} else if (!WIFEXITED(status)) {
oss << "[" << getpid() << "]" << " did not exit normally";
} else {
err = WEXITSTATUS(status);
if (err != 0)
oss << "[" << getpid() << "]" << " returned exit_status " << cpp_strerror(err);
}
}
err_msg = oss.str();
return err;
}
int signal_exit(int r) {
if (forked) {
/* If we get an error here, it's too late to do anything reasonable about it. */
[[maybe_unused]] auto n = safe_write(fd[1], &r, sizeof(r));
}
return r;
}
void exit(int r) {
if (is_child())
signal_exit(r);
::exit(r);
}
void daemonize() {
ceph_assert(forked);
static int r = -1;
int r2 = ::write(fd[1], &r, sizeof(r));
r += r2; // make the compiler shut up about the unused return code from ::write(2).
}
};
#endif
| 3,578 | 23.682759 | 88 | h |
null | ceph-main/src/common/PrioritizedQueue.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef PRIORITY_QUEUE_H
#define PRIORITY_QUEUE_H
#include "include/ceph_assert.h"
#include "common/Formatter.h"
#include "common/OpQueue.h"
/**
* Manages queue for normal and strict priority items
*
* On dequeue, the queue will select the lowest priority queue
* such that the q has bucket > cost of front queue item.
*
* If there is no such queue, we choose the next queue item for
* the highest priority queue.
*
* Before returning a dequeued item, we place into each bucket
* cost * (priority/total_priority) tokens.
*
* enqueue_strict and enqueue_strict_front queue items into queues
* which are serviced in strict priority order before items queued
* with enqueue and enqueue_front
*
* Within a priority class, we schedule round robin based on the class
* of type K used to enqueue items. e.g. you could use entity_inst_t
* to provide fairness for different clients.
*/
template <typename T, typename K>
class PrioritizedQueue : public OpQueue <T, K> {
int64_t total_priority;
int64_t max_tokens_per_subqueue;
int64_t min_cost;
typedef std::list<std::pair<unsigned, T> > ListPairs;
struct SubQueue {
private:
typedef std::map<K, ListPairs> Classes;
Classes q;
unsigned tokens, max_tokens;
int64_t size;
typename Classes::iterator cur;
public:
SubQueue(const SubQueue &other)
: q(other.q),
tokens(other.tokens),
max_tokens(other.max_tokens),
size(other.size),
cur(q.begin()) {}
SubQueue()
: tokens(0),
max_tokens(0),
size(0), cur(q.begin()) {}
void set_max_tokens(unsigned mt) {
max_tokens = mt;
}
unsigned get_max_tokens() const {
return max_tokens;
}
unsigned num_tokens() const {
return tokens;
}
void put_tokens(unsigned t) {
tokens += t;
if (tokens > max_tokens) {
tokens = max_tokens;
}
}
void take_tokens(unsigned t) {
if (tokens > t) {
tokens -= t;
} else {
tokens = 0;
}
}
void enqueue(K cl, unsigned cost, T &&item) {
q[cl].push_back(std::make_pair(cost, std::move(item)));
if (cur == q.end())
cur = q.begin();
size++;
}
void enqueue_front(K cl, unsigned cost, T &&item) {
q[cl].push_front(std::make_pair(cost, std::move(item)));
if (cur == q.end())
cur = q.begin();
size++;
}
std::pair<unsigned, T> &front() const {
ceph_assert(!(q.empty()));
ceph_assert(cur != q.end());
return cur->second.front();
}
T pop_front() {
ceph_assert(!(q.empty()));
ceph_assert(cur != q.end());
T ret = std::move(cur->second.front().second);
cur->second.pop_front();
if (cur->second.empty()) {
q.erase(cur++);
} else {
++cur;
}
if (cur == q.end()) {
cur = q.begin();
}
size--;
return ret;
}
unsigned length() const {
ceph_assert(size >= 0);
return (unsigned)size;
}
bool empty() const {
return q.empty();
}
void remove_by_class(K k, std::list<T> *out) {
typename Classes::iterator i = q.find(k);
if (i == q.end()) {
return;
}
size -= i->second.size();
if (i == cur) {
++cur;
}
if (out) {
for (typename ListPairs::reverse_iterator j =
i->second.rbegin();
j != i->second.rend();
++j) {
out->push_front(std::move(j->second));
}
}
q.erase(i);
if (cur == q.end()) {
cur = q.begin();
}
}
void dump(ceph::Formatter *f) const {
f->dump_int("tokens", tokens);
f->dump_int("max_tokens", max_tokens);
f->dump_int("size", size);
f->dump_int("num_keys", q.size());
if (!empty()) {
f->dump_int("first_item_cost", front().first);
}
}
};
typedef std::map<unsigned, SubQueue> SubQueues;
SubQueues high_queue;
SubQueues queue;
SubQueue *create_queue(unsigned priority) {
typename SubQueues::iterator p = queue.find(priority);
if (p != queue.end()) {
return &p->second;
}
total_priority += priority;
SubQueue *sq = &queue[priority];
sq->set_max_tokens(max_tokens_per_subqueue);
return sq;
}
void remove_queue(unsigned priority) {
ceph_assert(queue.count(priority));
queue.erase(priority);
total_priority -= priority;
ceph_assert(total_priority >= 0);
}
void distribute_tokens(unsigned cost) {
if (total_priority == 0) {
return;
}
for (typename SubQueues::iterator i = queue.begin();
i != queue.end();
++i) {
i->second.put_tokens(((i->first * cost) / total_priority) + 1);
}
}
public:
PrioritizedQueue(unsigned max_per, unsigned min_c)
: total_priority(0),
max_tokens_per_subqueue(max_per),
min_cost(min_c)
{}
unsigned length() const {
unsigned total = 0;
for (typename SubQueues::const_iterator i = queue.begin();
i != queue.end();
++i) {
ceph_assert(i->second.length());
total += i->second.length();
}
for (typename SubQueues::const_iterator i = high_queue.begin();
i != high_queue.end();
++i) {
ceph_assert(i->second.length());
total += i->second.length();
}
return total;
}
void remove_by_class(K k, std::list<T> *out = 0) final {
for (typename SubQueues::iterator i = queue.begin();
i != queue.end();
) {
i->second.remove_by_class(k, out);
if (i->second.empty()) {
unsigned priority = i->first;
++i;
remove_queue(priority);
} else {
++i;
}
}
for (typename SubQueues::iterator i = high_queue.begin();
i != high_queue.end();
) {
i->second.remove_by_class(k, out);
if (i->second.empty()) {
high_queue.erase(i++);
} else {
++i;
}
}
}
void enqueue_strict(K cl, unsigned priority, T&& item) final {
high_queue[priority].enqueue(cl, 0, std::move(item));
}
void enqueue_strict_front(K cl, unsigned priority, T&& item) final {
high_queue[priority].enqueue_front(cl, 0, std::move(item));
}
void enqueue(K cl, unsigned priority, unsigned cost, T&& item) final {
if (cost < min_cost)
cost = min_cost;
if (cost > max_tokens_per_subqueue)
cost = max_tokens_per_subqueue;
create_queue(priority)->enqueue(cl, cost, std::move(item));
}
void enqueue_front(K cl, unsigned priority, unsigned cost, T&& item) final {
if (cost < min_cost)
cost = min_cost;
if (cost > max_tokens_per_subqueue)
cost = max_tokens_per_subqueue;
create_queue(priority)->enqueue_front(cl, cost, std::move(item));
}
bool empty() const final {
ceph_assert(total_priority >= 0);
ceph_assert((total_priority == 0) || !(queue.empty()));
return queue.empty() && high_queue.empty();
}
T dequeue() final {
ceph_assert(!empty());
if (!(high_queue.empty())) {
T ret = std::move(high_queue.rbegin()->second.front().second);
high_queue.rbegin()->second.pop_front();
if (high_queue.rbegin()->second.empty()) {
high_queue.erase(high_queue.rbegin()->first);
}
return ret;
}
// if there are multiple buckets/subqueues with sufficient tokens,
// we behave like a strict priority queue among all subqueues that
// are eligible to run.
for (typename SubQueues::iterator i = queue.begin();
i != queue.end();
++i) {
ceph_assert(!(i->second.empty()));
if (i->second.front().first < i->second.num_tokens()) {
unsigned cost = i->second.front().first;
i->second.take_tokens(cost);
T ret = std::move(i->second.front().second);
i->second.pop_front();
if (i->second.empty()) {
remove_queue(i->first);
}
distribute_tokens(cost);
return ret;
}
}
// if no subqueues have sufficient tokens, we behave like a strict
// priority queue.
unsigned cost = queue.rbegin()->second.front().first;
T ret = std::move(queue.rbegin()->second.front().second);
queue.rbegin()->second.pop_front();
if (queue.rbegin()->second.empty()) {
remove_queue(queue.rbegin()->first);
}
distribute_tokens(cost);
return ret;
}
void dump(ceph::Formatter *f) const final {
f->dump_int("total_priority", total_priority);
f->dump_int("max_tokens_per_subqueue", max_tokens_per_subqueue);
f->dump_int("min_cost", min_cost);
f->open_array_section("high_queues");
for (typename SubQueues::const_iterator p = high_queue.begin();
p != high_queue.end();
++p) {
f->open_object_section("subqueue");
f->dump_int("priority", p->first);
p->second.dump(f);
f->close_section();
}
f->close_section();
f->open_array_section("queues");
for (typename SubQueues::const_iterator p = queue.begin();
p != queue.end();
++p) {
f->open_object_section("subqueue");
f->dump_int("priority", p->first);
p->second.dump(f);
f->close_section();
}
f->close_section();
}
void print(std::ostream &ostream) const final {
ostream << "PrioritizedQueue";
}
};
#endif
| 9,370 | 25.546742 | 78 | h |
null | ceph-main/src/common/PriorityCache.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "PriorityCache.h"
#include "common/dout.h"
#include "perfglue/heap_profiler.h"
#define dout_context cct
#define dout_subsys ceph_subsys_prioritycache
#undef dout_prefix
#define dout_prefix *_dout << "prioritycache "
namespace PriorityCache
{
int64_t get_chunk(uint64_t usage, uint64_t total_bytes)
{
uint64_t chunk = total_bytes;
// Find the nearest power of 2
chunk -= 1;
chunk |= chunk >> 1;
chunk |= chunk >> 2;
chunk |= chunk >> 4;
chunk |= chunk >> 8;
chunk |= chunk >> 16;
chunk |= chunk >> 32;
chunk += 1;
// shrink it to 1/256 of the rounded up cache size
chunk /= 256;
// bound the chunk size to be between 4MB and 64MB
chunk = (chunk > 4ul*1024*1024) ? chunk : 4ul*1024*1024;
chunk = (chunk < 64ul*1024*1024) ? chunk : 64ul*1024*1024;
/* FIXME: Hardcoded to force get_chunk to never drop below 64MB.
* if RocksDB is used, it's a good idea to have N MB of headroom where
* N is the target_file_size_base value. RocksDB will read SST files
* into the block cache during compaction which potentially can force out
* all existing cached data. Once compaction is finished, the SST data is
* released leaving an empty cache. Having enough headroom to absorb
* compaction reads allows the kv cache grow even during extremely heavy
* compaction workloads.
*/
uint64_t val = usage + 64*1024*1024;
uint64_t r = (val) % chunk;
if (r > 0)
val = val + chunk - r;
return val;
}
Manager::Manager(CephContext *c,
uint64_t min,
uint64_t max,
uint64_t target,
bool reserve_extra,
const std::string& name) :
cct(c),
caches{},
min_mem(min),
max_mem(max),
target_mem(target),
tuned_mem(min),
reserve_extra(reserve_extra),
name(name.empty() ? "prioritycache" : name)
{
PerfCountersBuilder b(cct, this->name, MallocStats::M_FIRST, MallocStats::M_LAST);
b.add_u64(MallocStats::M_TARGET_BYTES, "target_bytes",
"target process memory usage in bytes", "t",
PerfCountersBuilder::PRIO_INTERESTING, unit_t(UNIT_BYTES));
b.add_u64(MallocStats::M_MAPPED_BYTES, "mapped_bytes",
"total bytes mapped by the process", "m",
PerfCountersBuilder::PRIO_INTERESTING, unit_t(UNIT_BYTES));
b.add_u64(MallocStats::M_UNMAPPED_BYTES, "unmapped_bytes",
"unmapped bytes that the kernel has yet to reclaim", "u",
PerfCountersBuilder::PRIO_INTERESTING, unit_t(UNIT_BYTES));
b.add_u64(MallocStats::M_HEAP_BYTES, "heap_bytes",
"aggregate bytes in use by the heap", "h",
PerfCountersBuilder::PRIO_INTERESTING, unit_t(UNIT_BYTES));
b.add_u64(MallocStats::M_CACHE_BYTES, "cache_bytes",
"current memory available for caches.", "c",
PerfCountersBuilder::PRIO_INTERESTING, unit_t(UNIT_BYTES));
logger = b.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
tune_memory();
}
Manager::~Manager()
{
clear();
cct->get_perfcounters_collection()->remove(logger);
delete logger;
}
void Manager::tune_memory()
{
size_t heap_size = 0;
size_t unmapped = 0;
uint64_t mapped = 0;
ceph_heap_release_free_memory();
ceph_heap_get_numeric_property("generic.heap_size", &heap_size);
ceph_heap_get_numeric_property("tcmalloc.pageheap_unmapped_bytes", &unmapped);
mapped = heap_size - unmapped;
uint64_t new_size = tuned_mem;
new_size = (new_size < max_mem) ? new_size : max_mem;
new_size = (new_size > min_mem) ? new_size : min_mem;
// Approach the min/max slowly, but bounce away quickly.
if ((uint64_t) mapped < target_mem) {
double ratio = 1 - ((double) mapped / target_mem);
new_size += ratio * (max_mem - new_size);
} else {
double ratio = 1 - ((double) target_mem / mapped);
new_size -= ratio * (new_size - min_mem);
}
ldout(cct, 5) << __func__
<< " target: " << target_mem
<< " mapped: " << mapped
<< " unmapped: " << unmapped
<< " heap: " << heap_size
<< " old mem: " << tuned_mem
<< " new mem: " << new_size << dendl;
tuned_mem = new_size;
logger->set(MallocStats::M_TARGET_BYTES, target_mem);
logger->set(MallocStats::M_MAPPED_BYTES, mapped);
logger->set(MallocStats::M_UNMAPPED_BYTES, unmapped);
logger->set(MallocStats::M_HEAP_BYTES, heap_size);
logger->set(MallocStats::M_CACHE_BYTES, new_size);
}
void Manager::insert(const std::string& name, std::shared_ptr<PriCache> c,
bool enable_perf_counters)
{
ceph_assert(!caches.count(name));
ceph_assert(!indexes.count(name));
caches.emplace(name, c);
if (!enable_perf_counters) {
return;
}
// TODO: If we ever assign more than
// PERF_COUNTER_MAX_BOUND - PERF_COUNTER_LOWER_BOUND perf counters for
// priority caching we could run out of slots. Recycle them some day?
// Also note that start and end are *exclusive*.
int start = cur_index++;
int end = cur_index + Extra::E_LAST + 1;
ceph_assert(end < PERF_COUNTER_MAX_BOUND);
indexes.emplace(name, std::vector<int>(Extra::E_LAST + 1));
PerfCountersBuilder b(cct, this->name + ":" + name, start, end);
b.add_u64(cur_index + Priority::PRI0, "pri0_bytes",
"bytes allocated to pri0", "p0",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Priority::PRI1, "pri1_bytes",
"bytes allocated to pri1", "p1",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Priority::PRI2, "pri2_bytes",
"bytes allocated to pri2", "p2",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Priority::PRI3, "pri3_bytes",
"bytes allocated to pri3", "p3",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Priority::PRI4, "pri4_bytes",
"bytes allocated to pri4", "p4",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Priority::PRI5, "pri5_bytes",
"bytes allocated to pri5", "p5",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Priority::PRI6, "pri6_bytes",
"bytes allocated to pri6", "p6",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Priority::PRI7, "pri7_bytes",
"bytes allocated to pri7", "p7",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Priority::PRI8, "pri8_bytes",
"bytes allocated to pri8", "p8",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Priority::PRI9, "pri9_bytes",
"bytes allocated to pri9", "p9",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Priority::PRI10, "pri10_bytes",
"bytes allocated to pri10", "p10",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Priority::PRI11, "pri11_bytes",
"bytes allocated to pri11", "p11",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Extra::E_RESERVED, "reserved_bytes",
"bytes reserved for future growth.", "r",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
b.add_u64(cur_index + Extra::E_COMMITTED, "committed_bytes",
"total bytes committed,", "c",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
for (int i = 0; i < Extra::E_LAST+1; i++) {
indexes[name][i] = cur_index + i;
}
auto l = b.create_perf_counters();
loggers.emplace(name, l);
cct->get_perfcounters_collection()->add(l);
cur_index = end;
}
void Manager::erase(const std::string& name)
{
auto li = loggers.find(name);
if (li != loggers.end()) {
cct->get_perfcounters_collection()->remove(li->second);
delete li->second;
loggers.erase(li);
}
indexes.erase(name);
caches.erase(name);
}
void Manager::clear()
{
auto li = loggers.begin();
while (li != loggers.end()) {
cct->get_perfcounters_collection()->remove(li->second);
delete li->second;
li = loggers.erase(li);
}
indexes.clear();
caches.clear();
}
void Manager::balance()
{
int64_t mem_avail = tuned_mem;
// Each cache is going to get a little extra from get_chunk, so shrink the
// available memory here to compensate.
if (reserve_extra) {
mem_avail -= get_chunk(1, tuned_mem) * caches.size();
}
if (mem_avail < 0) {
// There's so little memory available that just assigning a chunk per
// cache pushes us over the limit. Set mem_avail to 0 and continue to
// ensure each priority's byte counts are zeroed in balance_priority.
mem_avail = 0;
}
// Assign memory for each priority level
for (int i = 0; i < Priority::LAST+1; i++) {
ldout(cct, 10) << __func__ << " assigning cache bytes for PRI: " << i << dendl;
auto pri = static_cast<Priority>(i);
balance_priority(&mem_avail, pri);
// Update the per-priority perf counters
for (auto &l : loggers) {
auto it = caches.find(l.first);
ceph_assert(it != caches.end());
auto bytes = it->second->get_cache_bytes(pri);
l.second->set(indexes[it->first][pri], bytes);
}
}
// assert if we assigned more memory than is available.
ceph_assert(mem_avail >= 0);
for (auto &l : loggers) {
auto it = caches.find(l.first);
ceph_assert(it != caches.end());
// Commit the new cache size
int64_t committed = it->second->commit_cache_size(tuned_mem);
// Update the perf counters
int64_t alloc = it->second->get_cache_bytes();
l.second->set(indexes[it->first][Extra::E_RESERVED], committed - alloc);
l.second->set(indexes[it->first][Extra::E_COMMITTED], committed);
}
}
void Manager::shift_bins()
{
for (auto &l : loggers) {
auto it = caches.find(l.first);
it->second->shift_bins();
}
}
void Manager::balance_priority(int64_t *mem_avail, Priority pri)
{
std::unordered_map<std::string, std::shared_ptr<PriCache>> tmp_caches = caches;
double cur_ratios = 0;
double new_ratios = 0;
uint64_t round = 0;
// First, zero this priority's bytes, sum the initial ratios.
for (auto it = caches.begin(); it != caches.end(); it++) {
it->second->set_cache_bytes(pri, 0);
cur_ratios += it->second->get_cache_ratio();
}
// For other priorities, loop until caches are satisified or we run out of
// memory (stop if we can't guarantee a full byte allocation).
while (!tmp_caches.empty() && *mem_avail > static_cast<int64_t>(tmp_caches.size())) {
uint64_t total_assigned = 0;
for (auto it = tmp_caches.begin(); it != tmp_caches.end();) {
int64_t cache_wants = it->second->request_cache_bytes(pri, tuned_mem);
// Usually the ratio should be set to the fraction of the current caches'
// assigned ratio compared to the total ratio of all caches that still
// want memory. There is a special case where the only caches left are
// all assigned 0% ratios but still want memory. In that case, give
// them an equal shot at the remaining memory for this priority.
double ratio = 1.0 / tmp_caches.size();
if (cur_ratios > 0) {
ratio = it->second->get_cache_ratio() / cur_ratios;
}
int64_t fair_share = static_cast<int64_t>(*mem_avail * ratio);
ldout(cct, 10) << __func__ << " " << it->first
<< " pri: " << (int) pri
<< " round: " << round
<< " wanted: " << cache_wants
<< " ratio: " << it->second->get_cache_ratio()
<< " cur_ratios: " << cur_ratios
<< " fair_share: " << fair_share
<< " mem_avail: " << *mem_avail
<< dendl;
if (cache_wants > fair_share) {
// If we want too much, take what we can get but stick around for more
it->second->add_cache_bytes(pri, fair_share);
total_assigned += fair_share;
new_ratios += it->second->get_cache_ratio();
++it;
} else {
// Otherwise assign only what we want
if (cache_wants > 0) {
it->second->add_cache_bytes(pri, cache_wants);
total_assigned += cache_wants;
}
// Either the cache didn't want anything or got what it wanted, so
// remove it from the tmp list.
it = tmp_caches.erase(it);
}
}
// Reset the ratios
*mem_avail -= total_assigned;
cur_ratios = new_ratios;
new_ratios = 0;
++round;
}
// If this is the last priority, divide up any remaining memory based
// solely on the ratios.
if (pri == Priority::LAST) {
uint64_t total_assigned = 0;
for (auto it = caches.begin(); it != caches.end(); it++) {
double ratio = it->second->get_cache_ratio();
int64_t fair_share = static_cast<int64_t>(*mem_avail * ratio);
it->second->set_cache_bytes(Priority::LAST, fair_share);
total_assigned += fair_share;
}
*mem_avail -= total_assigned;
return;
}
}
PriCache::~PriCache()
{
}
}
| 14,280 | 34.088452 | 89 | cc |
null | ceph-main/src/common/PriorityCache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_PRIORITY_CACHE_H
#define CEPH_PRIORITY_CACHE_H
#include <stdint.h>
#include <string>
#include <vector>
#include <memory>
#include <unordered_map>
#include "common/perf_counters.h"
#include "include/ceph_assert.h"
namespace PriorityCache {
// Reserve 16384 slots for PriorityCache perf counters
const int PERF_COUNTER_LOWER_BOUND = 1073741824;
const int PERF_COUNTER_MAX_BOUND = 1073758208;
enum MallocStats {
M_FIRST = PERF_COUNTER_LOWER_BOUND,
M_TARGET_BYTES,
M_MAPPED_BYTES,
M_UNMAPPED_BYTES,
M_HEAP_BYTES,
M_CACHE_BYTES,
M_LAST,
};
enum Priority {
PRI0,
PRI1,
PRI2,
PRI3,
PRI4,
PRI5,
PRI6,
PRI7,
PRI8,
PRI9,
PRI10,
PRI11,
LAST = PRI11,
};
enum Extra {
E_RESERVED = Priority::LAST+1,
E_COMMITTED,
E_LAST = E_COMMITTED,
};
int64_t get_chunk(uint64_t usage, uint64_t total_bytes);
struct PriCache {
virtual ~PriCache();
/* Ask the cache to request memory for the given priority. Note that the
* cache may ultimately be allocated less memory than it requests here.
*/
virtual int64_t request_cache_bytes(PriorityCache::Priority pri, uint64_t total_cache) const = 0;
// Get the number of bytes currently allocated to the given priority.
virtual int64_t get_cache_bytes(PriorityCache::Priority pri) const = 0;
// Get the number of bytes currently allocated to all priorities.
virtual int64_t get_cache_bytes() const = 0;
// Allocate bytes for a given priority.
virtual void set_cache_bytes(PriorityCache::Priority pri, int64_t bytes) = 0;
// Allocate additional bytes for a given priority.
virtual void add_cache_bytes(PriorityCache::Priority pri, int64_t bytes) = 0;
/* Commit the current number of bytes allocated to the cache. Space is
* allocated in chunks based on the allocation size and current total size
* of memory available for caches. */
virtual int64_t commit_cache_size(uint64_t total_cache) = 0;
/* Get the current number of bytes allocated to the cache. this may be
* larger than the value returned by get_cache_bytes as it includes extra
* space for future growth. */
virtual int64_t get_committed_size() const = 0;
// Get the ratio of available memory this cache should target.
virtual double get_cache_ratio() const = 0;
// Set the ratio of available memory this cache should target.
virtual void set_cache_ratio(double ratio) = 0;
// Get the name of this cache.
virtual std::string get_cache_name() const = 0;
// Rotate the bins
virtual void shift_bins() = 0;
// Import user bins (from PRI1 to LAST-1)
virtual void import_bins(const std::vector<uint64_t> &bins) = 0;
// Set bins (PRI0 and LAST should be ignored)
virtual void set_bins(PriorityCache::Priority pri, uint64_t end_bin) = 0;
// Get bins
virtual uint64_t get_bins(PriorityCache::Priority pri) const = 0;
};
class Manager {
CephContext* cct = nullptr;
PerfCounters* logger;
std::unordered_map<std::string, PerfCounters*> loggers;
std::unordered_map<std::string, std::vector<int>> indexes;
std::unordered_map<std::string, std::shared_ptr<PriCache>> caches;
// Start perf counter slots after the malloc stats.
int cur_index = MallocStats::M_LAST;
uint64_t min_mem = 0;
uint64_t max_mem = 0;
uint64_t target_mem = 0;
uint64_t tuned_mem = 0;
bool reserve_extra;
std::string name;
public:
Manager(CephContext *c, uint64_t min, uint64_t max, uint64_t target,
bool reserve_extra, const std::string& name = std::string());
~Manager();
void set_min_memory(uint64_t min) {
min_mem = min;
}
void set_max_memory(uint64_t max) {
max_mem = max;
}
void set_target_memory(uint64_t target) {
target_mem = target;
}
uint64_t get_tuned_mem() const {
return tuned_mem;
}
void insert(const std::string& name, const std::shared_ptr<PriCache> c,
bool enable_perf_counters);
void erase(const std::string& name);
void clear();
void tune_memory();
void balance();
void shift_bins();
private:
void balance_priority(int64_t *mem_avail, Priority pri);
};
}
#endif
| 4,692 | 27.969136 | 101 | h |
null | ceph-main/src/common/QueueRing.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef QUEUE_RING_H
#define QUEUE_RING_H
#include "common/ceph_mutex.h"
#include <list>
#include <atomic>
#include <vector>
template <class T>
class QueueRing {
struct QueueBucket {
ceph::mutex lock = ceph::make_mutex("QueueRing::QueueBucket::lock");
ceph::condition_variable cond;
typename std::list<T> entries;
QueueBucket() {}
QueueBucket(const QueueBucket& rhs) {
entries = rhs.entries;
}
void enqueue(const T& entry) {
lock.lock();
if (entries.empty()) {
cond.notify_all();
}
entries.push_back(entry);
lock.unlock();
}
void dequeue(T *entry) {
std::unique_lock l(lock);
while (entries.empty()) {
cond.wait(l);
};
ceph_assert(!entries.empty());
*entry = entries.front();
entries.pop_front();
};
};
std::vector<QueueBucket> buckets;
int num_buckets;
std::atomic<int64_t> cur_read_bucket = { 0 };
std::atomic<int64_t> cur_write_bucket = { 0 };
public:
QueueRing(int n) : buckets(n), num_buckets(n) {
}
void enqueue(const T& entry) {
buckets[++cur_write_bucket % num_buckets].enqueue(entry);
};
void dequeue(T *entry) {
buckets[++cur_read_bucket % num_buckets].dequeue(entry);
}
};
#endif
| 1,362 | 19.969231 | 72 | h |
null | ceph-main/src/common/RWLock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_RWLock_Posix__H
#define CEPH_RWLock_Posix__H
#include <pthread.h>
#include <string>
#include "include/ceph_assert.h"
#include "acconfig.h"
#include "lockdep.h"
#include "common/valgrind.h"
#include <atomic>
class RWLock final
{
mutable pthread_rwlock_t L;
std::string name;
mutable int id;
mutable std::atomic<unsigned> nrlock = { 0 }, nwlock = { 0 };
bool track, lockdep;
std::string unique_name(const char* name) const;
public:
RWLock(const RWLock& other) = delete;
const RWLock& operator=(const RWLock& other) = delete;
RWLock(const std::string &n, bool track_lock=true, bool ld=true, bool prioritize_write=false)
: name(n), id(-1), track(track_lock),
lockdep(ld) {
#if defined(HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP)
if (prioritize_write) {
pthread_rwlockattr_t attr;
pthread_rwlockattr_init(&attr);
// PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
// Setting the lock kind to this avoids writer starvation as long as
// long as any read locking is not done in a recursive fashion.
pthread_rwlockattr_setkind_np(&attr,
PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
pthread_rwlock_init(&L, &attr);
pthread_rwlockattr_destroy(&attr);
} else
#endif
// Next block is in {} to possibly connect to the above if when code is used.
{
pthread_rwlock_init(&L, NULL);
}
ANNOTATE_BENIGN_RACE_SIZED(&id, sizeof(id), "RWLock lockdep id");
ANNOTATE_BENIGN_RACE_SIZED(&nrlock, sizeof(nrlock), "RWlock nrlock");
ANNOTATE_BENIGN_RACE_SIZED(&nwlock, sizeof(nwlock), "RWlock nwlock");
if (lockdep && g_lockdep) id = lockdep_register(name.c_str());
}
bool is_locked() const {
ceph_assert(track);
return (nrlock > 0) || (nwlock > 0);
}
bool is_wlocked() const {
ceph_assert(track);
return (nwlock > 0);
}
~RWLock() {
// The following check is racy but we are about to destroy
// the object and we assume that there are no other users.
if (track)
ceph_assert(!is_locked());
pthread_rwlock_destroy(&L);
if (lockdep && g_lockdep) {
lockdep_unregister(id);
}
}
void unlock(bool lockdep=true) const {
if (track) {
if (nwlock > 0) {
nwlock--;
} else {
ceph_assert(nrlock > 0);
nrlock--;
}
}
if (lockdep && this->lockdep && g_lockdep)
id = lockdep_will_unlock(name.c_str(), id);
int r = pthread_rwlock_unlock(&L);
ceph_assert(r == 0);
}
// read
void get_read() const {
if (lockdep && g_lockdep) id = lockdep_will_lock(name.c_str(), id);
int r = pthread_rwlock_rdlock(&L);
ceph_assert(r == 0);
if (lockdep && g_lockdep) id = lockdep_locked(name.c_str(), id);
if (track)
nrlock++;
}
bool try_get_read() const {
if (pthread_rwlock_tryrdlock(&L) == 0) {
if (track)
nrlock++;
if (lockdep && g_lockdep) id = lockdep_locked(name.c_str(), id);
return true;
}
return false;
}
void put_read() const {
unlock();
}
void lock_shared() {
get_read();
}
void unlock_shared() {
put_read();
}
// write
void get_write(bool lockdep=true) {
if (lockdep && this->lockdep && g_lockdep)
id = lockdep_will_lock(name.c_str(), id);
int r = pthread_rwlock_wrlock(&L);
ceph_assert(r == 0);
if (lockdep && this->lockdep && g_lockdep)
id = lockdep_locked(name.c_str(), id);
if (track)
nwlock++;
}
bool try_get_write(bool lockdep=true) {
if (pthread_rwlock_trywrlock(&L) == 0) {
if (lockdep && this->lockdep && g_lockdep)
id = lockdep_locked(name.c_str(), id);
if (track)
nwlock++;
return true;
}
return false;
}
void put_write() {
unlock();
}
void lock() {
get_write();
}
void get(bool for_write) {
if (for_write) {
get_write();
} else {
get_read();
}
}
public:
class RLocker {
const RWLock &m_lock;
bool locked;
public:
explicit RLocker(const RWLock& lock) : m_lock(lock) {
m_lock.get_read();
locked = true;
}
void unlock() {
ceph_assert(locked);
m_lock.unlock();
locked = false;
}
~RLocker() {
if (locked) {
m_lock.unlock();
}
}
};
class WLocker {
RWLock &m_lock;
bool locked;
public:
explicit WLocker(RWLock& lock) : m_lock(lock) {
m_lock.get_write();
locked = true;
}
void unlock() {
ceph_assert(locked);
m_lock.unlock();
locked = false;
}
~WLocker() {
if (locked) {
m_lock.unlock();
}
}
};
class Context {
RWLock& lock;
public:
enum LockState {
Untaken = 0,
TakenForRead = 1,
TakenForWrite = 2,
};
private:
LockState state;
public:
explicit Context(RWLock& l) : lock(l), state(Untaken) {}
Context(RWLock& l, LockState s) : lock(l), state(s) {}
void get_write() {
ceph_assert(state == Untaken);
lock.get_write();
state = TakenForWrite;
}
void get_read() {
ceph_assert(state == Untaken);
lock.get_read();
state = TakenForRead;
}
void unlock() {
ceph_assert(state != Untaken);
lock.unlock();
state = Untaken;
}
void promote() {
ceph_assert(state == TakenForRead);
unlock();
get_write();
}
LockState get_state() { return state; }
void set_state(LockState s) {
state = s;
}
bool is_locked() {
return (state != Untaken);
}
bool is_rlocked() {
return (state == TakenForRead);
}
bool is_wlocked() {
return (state == TakenForWrite);
}
};
};
#endif // !CEPH_RWLock_Posix__H
| 6,180 | 21.476364 | 95 | h |
null | ceph-main/src/common/Readahead.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/Readahead.h"
#include "common/Cond.h"
using std::vector;
Readahead::Readahead()
: m_trigger_requests(10),
m_readahead_min_bytes(0),
m_readahead_max_bytes(NO_LIMIT),
m_alignments(),
m_nr_consec_read(0),
m_consec_read_bytes(0),
m_last_pos(0),
m_readahead_pos(0),
m_readahead_trigger_pos(0),
m_readahead_size(0),
m_pending(0) {
}
Readahead::~Readahead() {
}
Readahead::extent_t Readahead::update(const vector<extent_t>& extents, uint64_t limit) {
m_lock.lock();
for (vector<extent_t>::const_iterator p = extents.begin(); p != extents.end(); ++p) {
_observe_read(p->first, p->second);
}
if (m_readahead_pos >= limit|| m_last_pos >= limit) {
m_lock.unlock();
return extent_t(0, 0);
}
std::pair<uint64_t, uint64_t> extent = _compute_readahead(limit);
m_lock.unlock();
return extent;
}
Readahead::extent_t Readahead::update(uint64_t offset, uint64_t length, uint64_t limit) {
m_lock.lock();
_observe_read(offset, length);
if (m_readahead_pos >= limit || m_last_pos >= limit) {
m_lock.unlock();
return extent_t(0, 0);
}
extent_t extent = _compute_readahead(limit);
m_lock.unlock();
return extent;
}
void Readahead::_observe_read(uint64_t offset, uint64_t length) {
if (offset == m_last_pos) {
m_nr_consec_read++;
m_consec_read_bytes += length;
} else {
m_nr_consec_read = 0;
m_consec_read_bytes = 0;
m_readahead_trigger_pos = 0;
m_readahead_size = 0;
m_readahead_pos = 0;
}
m_last_pos = offset + length;
}
Readahead::extent_t Readahead::_compute_readahead(uint64_t limit) {
uint64_t readahead_offset = 0;
uint64_t readahead_length = 0;
if (m_nr_consec_read >= m_trigger_requests) {
// currently reading sequentially
if (m_last_pos >= m_readahead_trigger_pos) {
// need to read ahead
if (m_readahead_size == 0) {
// initial readahead trigger
m_readahead_size = m_consec_read_bytes;
m_readahead_pos = m_last_pos;
} else {
// continuing readahead trigger
m_readahead_size *= 2;
if (m_last_pos > m_readahead_pos) {
m_readahead_pos = m_last_pos;
}
}
m_readahead_size = std::max(m_readahead_size, m_readahead_min_bytes);
m_readahead_size = std::min(m_readahead_size, m_readahead_max_bytes);
readahead_offset = m_readahead_pos;
readahead_length = m_readahead_size;
// Snap to the first alignment possible
uint64_t readahead_end = readahead_offset + readahead_length;
for (vector<uint64_t>::iterator p = m_alignments.begin(); p != m_alignments.end(); ++p) {
// Align the readahead, if possible.
uint64_t alignment = *p;
uint64_t align_prev = readahead_end / alignment * alignment;
uint64_t align_next = align_prev + alignment;
uint64_t dist_prev = readahead_end - align_prev;
uint64_t dist_next = align_next - readahead_end;
if (dist_prev < readahead_length / 2 && dist_prev < dist_next) {
// we can snap to the previous alignment point by a less than 50% reduction in size
ceph_assert(align_prev > readahead_offset);
readahead_length = align_prev - readahead_offset;
break;
} else if(dist_next < readahead_length / 2) {
// we can snap to the next alignment point by a less than 50% increase in size
ceph_assert(align_next > readahead_offset);
readahead_length = align_next - readahead_offset;
break;
}
// Note that m_readahead_size should remain unadjusted.
}
if (m_readahead_pos + readahead_length > limit) {
readahead_length = limit - m_readahead_pos;
}
m_readahead_trigger_pos = m_readahead_pos + readahead_length / 2;
m_readahead_pos += readahead_length;
}
}
return extent_t(readahead_offset, readahead_length);
}
void Readahead::inc_pending(int count) {
ceph_assert(count > 0);
m_pending_lock.lock();
m_pending += count;
m_pending_lock.unlock();
}
void Readahead::dec_pending(int count) {
ceph_assert(count > 0);
m_pending_lock.lock();
ceph_assert(m_pending >= count);
m_pending -= count;
if (m_pending == 0) {
std::list<Context *> pending_waiting(std::move(m_pending_waiting));
m_pending_lock.unlock();
for (auto ctx : pending_waiting) {
ctx->complete(0);
}
} else {
m_pending_lock.unlock();
}
}
void Readahead::wait_for_pending() {
C_SaferCond ctx;
wait_for_pending(&ctx);
ctx.wait();
}
void Readahead::wait_for_pending(Context *ctx) {
m_pending_lock.lock();
if (m_pending > 0) {
m_pending_lock.unlock();
m_pending_waiting.push_back(ctx);
return;
}
m_pending_lock.unlock();
ctx->complete(0);
}
void Readahead::set_trigger_requests(int trigger_requests) {
m_lock.lock();
m_trigger_requests = trigger_requests;
m_lock.unlock();
}
uint64_t Readahead::get_min_readahead_size(void) {
std::lock_guard lock(m_lock);
return m_readahead_min_bytes;
}
uint64_t Readahead::get_max_readahead_size(void) {
std::lock_guard lock(m_lock);
return m_readahead_max_bytes;
}
void Readahead::set_min_readahead_size(uint64_t min_readahead_size) {
m_lock.lock();
m_readahead_min_bytes = min_readahead_size;
m_lock.unlock();
}
void Readahead::set_max_readahead_size(uint64_t max_readahead_size) {
m_lock.lock();
m_readahead_max_bytes = max_readahead_size;
m_lock.unlock();
}
void Readahead::set_alignments(const vector<uint64_t> &alignments) {
m_lock.lock();
m_alignments = alignments;
m_lock.unlock();
}
| 5,508 | 26.964467 | 95 | cc |
null | ceph-main/src/common/Readahead.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_READAHEAD_H
#define CEPH_READAHEAD_H
#include <list>
#include <vector>
#include "include/Context.h"
#include "common/ceph_mutex.h"
/**
This class provides common state and logic for code that needs to perform readahead
on linear things such as RBD images or files.
Unless otherwise specified, all methods are thread-safe.
Minimum and maximum readahead sizes may be violated by up to 50\% if alignment is enabled.
Minimum readahead size may be violated if the end of the readahead target is reached.
*/
class Readahead {
public:
typedef std::pair<uint64_t, uint64_t> extent_t;
// equal to UINT64_MAX
static const uint64_t NO_LIMIT = 18446744073709551615ULL;
Readahead();
~Readahead();
/**
Update state with new reads and return readahead to be performed.
If the length of the returned extent is 0, no readahead should be performed.
The readahead extent is guaranteed not to pass \c limit.
Note that passing in NO_LIMIT as the limit and truncating the returned extent
is not the same as passing in the correct limit, because the internal state
will differ in the two cases.
@param extents read operations since last call to update
@param limit size of the thing readahead is being applied to
*/
extent_t update(const std::vector<extent_t>& extents, uint64_t limit);
/**
Update state with a new read and return readahead to be performed.
If the length of the returned extent is 0, no readahead should be performed.
The readahead extent is guaranteed not to pass \c limit.
Note that passing in NO_LIMIT as the limit and truncating the returned extent
is not the same as passing in the correct limit, because the internal state
will differ in the two cases.
@param offset offset of the read operation
@param length length of the read operation
@param limit size of the thing readahead is being applied to
*/
extent_t update(uint64_t offset, uint64_t length, uint64_t limit);
/**
Increment the pending counter.
*/
void inc_pending(int count = 1);
/**
Decrement the pending counter.
The counter must not be decremented below 0.
*/
void dec_pending(int count = 1);
/**
Waits until the pending count reaches 0.
*/
void wait_for_pending();
void wait_for_pending(Context *ctx);
/**
Sets the number of sequential requests necessary to trigger readahead.
*/
void set_trigger_requests(int trigger_requests);
/**
Gets the minimum size of a readahead request, in bytes.
*/
uint64_t get_min_readahead_size(void);
/**
Gets the maximum size of a readahead request, in bytes.
*/
uint64_t get_max_readahead_size(void);
/**
Sets the minimum size of a readahead request, in bytes.
*/
void set_min_readahead_size(uint64_t min_readahead_size);
/**
Sets the maximum size of a readahead request, in bytes.
*/
void set_max_readahead_size(uint64_t max_readahead_size);
/**
Sets the alignment units.
If the end point of a readahead request can be aligned to an alignment unit
by increasing or decreasing the size of the request by 50\% or less, it will.
Alignments are tested in order, so larger numbers should almost always come first.
*/
void set_alignments(const std::vector<uint64_t> &alignments);
private:
/**
Records that a read request has been received.
m_lock must be held while calling.
*/
void _observe_read(uint64_t offset, uint64_t length);
/**
Computes the next readahead request.
m_lock must be held while calling.
*/
extent_t _compute_readahead(uint64_t limit);
/// Number of sequential requests necessary to trigger readahead
int m_trigger_requests;
/// Minimum size of a readahead request, in bytes
uint64_t m_readahead_min_bytes;
/// Maximum size of a readahead request, in bytes
uint64_t m_readahead_max_bytes;
/// Alignment units, in bytes
std::vector<uint64_t> m_alignments;
/// Held while reading/modifying any state except m_pending
ceph::mutex m_lock = ceph::make_mutex("Readahead::m_lock");
/// Number of consecutive read requests in the current sequential stream
int m_nr_consec_read;
/// Number of bytes read in the current sequenial stream
uint64_t m_consec_read_bytes;
/// Position of the read stream
uint64_t m_last_pos;
/// Position of the readahead stream
uint64_t m_readahead_pos;
/// When readahead is already triggered and the read stream crosses this point, readahead is continued
uint64_t m_readahead_trigger_pos;
/// Size of the next readahead request (barring changes due to alignment, etc.)
uint64_t m_readahead_size;
/// Number of pending readahead requests, as determined by inc_pending() and dec_pending()
int m_pending;
/// Lock for m_pending
ceph::mutex m_pending_lock = ceph::make_mutex("Readahead::m_pending_lock");
/// Waiters for pending readahead
std::list<Context *> m_pending_waiting;
};
#endif
| 5,113 | 29.440476 | 104 | h |
null | ceph-main/src/common/RefCountedObj.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
//
#include "include/ceph_assert.h"
#include "common/RefCountedObj.h"
#include "common/ceph_context.h"
#include "common/dout.h"
#include "common/valgrind.h"
namespace TOPNSPC::common {
RefCountedObject::~RefCountedObject()
{
ceph_assert(nref == 0);
}
void RefCountedObject::put() const {
CephContext *local_cct = cct;
auto v = --nref;
if (local_cct) {
lsubdout(local_cct, refs, 1) << "RefCountedObject::put " << this << " "
<< (v + 1) << " -> " << v
<< dendl;
}
if (v == 0) {
ANNOTATE_HAPPENS_AFTER(&nref);
ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(&nref);
delete this;
} else {
ANNOTATE_HAPPENS_BEFORE(&nref);
}
}
void RefCountedObject::_get() const {
auto v = ++nref;
ceph_assert(v > 1); /* it should never happen that _get() sees nref == 0 */
if (cct) {
lsubdout(cct, refs, 1) << "RefCountedObject::get " << this << " "
<< (v - 1) << " -> " << v << dendl;
}
}
}
| 1,025 | 22.318182 | 77 | cc |
null | ceph-main/src/common/RefCountedObj.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_REFCOUNTEDOBJ_H
#define CEPH_REFCOUNTEDOBJ_H
#include "common/ceph_mutex.h"
#include "common/ref.h"
#include "include/common_fwd.h"
#include <atomic>
/* This class provides mechanisms to make a sub-class work with
* boost::intrusive_ptr (aka ceph::ref_t).
*
* Generally, you'll want to inherit from RefCountedObjectSafe and not from
* RefCountedObject directly. This is because the ::get and ::put methods are
* public and can be used to create/delete references outside of the
* ceph::ref_t pointers with the potential to leak memory.
*
* It is also suggested that you make constructors and destructors private in
* your final class. This prevents instantiation of the object with assignment
* to a raw pointer. Consequently, you'll want to use ceph::make_ref<> to
* create a ceph::ref_t<> holding your object:
*
* auto ptr = ceph::make_ref<Foo>(...);
*
* Use FRIEND_MAKE_REF(ClassName) to allow ceph::make_ref to call the private
* constructors.
*
*/
namespace TOPNSPC::common {
class RefCountedObject {
public:
void set_cct(CephContext *c) {
cct = c;
}
uint64_t get_nref() const {
return nref;
}
const RefCountedObject *get() const {
_get();
return this;
}
RefCountedObject *get() {
_get();
return this;
}
void put() const;
protected:
RefCountedObject() = default;
RefCountedObject(const RefCountedObject& o) : cct(o.cct) {}
RefCountedObject& operator=(const RefCountedObject& o) = delete;
RefCountedObject(RefCountedObject&&) = delete;
RefCountedObject& operator=(RefCountedObject&&) = delete;
RefCountedObject(CephContext* c) : cct(c) {}
virtual ~RefCountedObject();
private:
void _get() const;
mutable std::atomic<uint64_t> nref{1};
CephContext *cct{nullptr};
};
class RefCountedObjectSafe : public RefCountedObject {
public:
RefCountedObject *get() = delete;
const RefCountedObject *get() const = delete;
void put() const = delete;
protected:
template<typename... Args>
RefCountedObjectSafe(Args&&... args) : RefCountedObject(std::forward<Args>(args)...) {}
virtual ~RefCountedObjectSafe() override {}
};
#if !defined(WITH_SEASTAR)|| defined(WITH_ALIEN)
/**
* RefCountedCond
*
* a refcounted condition, will be removed when all references are dropped
*/
struct RefCountedCond : public RefCountedObject {
RefCountedCond() = default;
~RefCountedCond() = default;
int wait() {
std::unique_lock l(lock);
while (!complete) {
cond.wait(l);
}
return rval;
}
void done(int r) {
std::lock_guard l(lock);
rval = r;
complete = true;
cond.notify_all();
}
void done() {
done(0);
}
private:
bool complete = false;
ceph::mutex lock = ceph::make_mutex("RefCountedCond::lock");
ceph::condition_variable cond;
int rval = 0;
};
/**
* RefCountedWaitObject
*
* refcounted object that allows waiting for the object's last reference.
* Any referrer can either put or put_wait(). A simple put() will return
* immediately, a put_wait() will return only when the object is destroyed.
* e.g., useful when we want to wait for a specific event completion. We
* use RefCountedCond, as the condition can be referenced after the object
* destruction.
*
*/
struct RefCountedWaitObject {
std::atomic<uint64_t> nref = { 1 };
RefCountedCond *c;
RefCountedWaitObject() {
c = new RefCountedCond;
}
virtual ~RefCountedWaitObject() {
c->put();
}
RefCountedWaitObject *get() {
nref++;
return this;
}
bool put() {
bool ret = false;
RefCountedCond *cond = c;
cond->get();
if (--nref == 0) {
cond->done();
delete this;
ret = true;
}
cond->put();
return ret;
}
void put_wait() {
RefCountedCond *cond = c;
cond->get();
if (--nref == 0) {
cond->done();
delete this;
} else {
cond->wait();
}
cond->put();
}
};
#endif // !defined(WITH_SEASTAR)|| defined(WITH_ALIEN)
static inline void intrusive_ptr_add_ref(const RefCountedObject *p) {
p->get();
}
static inline void intrusive_ptr_release(const RefCountedObject *p) {
p->put();
}
struct UniquePtrDeleter
{
void operator()(RefCountedObject *p) const
{
// Don't expect a call to `get()` in the ctor as we manually set nref to 1
p->put();
}
};
}
using RefCountedPtr = ceph::ref_t<TOPNSPC::common::RefCountedObject>;
#endif
| 4,821 | 22.753695 | 89 | h |
null | ceph-main/src/common/Semaphore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_Sem_Posix__H
#define CEPH_Sem_Posix__H
#include "common/ceph_mutex.h"
class Semaphore
{
ceph::mutex m = ceph::make_mutex("Semaphore::m");
ceph::condition_variable c;
int count = 0;
public:
void Put()
{
std::lock_guard l(m);
count++;
c.notify_all();
}
void Get()
{
std::unique_lock l(m);
while(count <= 0) {
c.wait(l);
}
count--;
}
};
#endif // !_Mutex_Posix_
| 864 | 17.404255 | 71 | h |
null | ceph-main/src/common/SloppyCRCMap.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/SloppyCRCMap.h"
#include "common/Formatter.h"
using namespace std;
using ceph::bufferlist;
void SloppyCRCMap::write(uint64_t offset, uint64_t len, const bufferlist& bl,
std::ostream *out)
{
int64_t left = len;
uint64_t pos = offset;
unsigned o = offset % block_size;
if (o) {
crc_map.erase(offset - o);
if (out)
*out << "write invalidate " << (offset - o) << "\n";
pos += (block_size - o);
left -= (block_size - o);
}
while (left >= block_size) {
bufferlist t;
t.substr_of(bl, pos - offset, block_size);
crc_map[pos] = t.crc32c(crc_iv);
if (out)
*out << "write set " << pos << " " << crc_map[pos] << "\n";
pos += block_size;
left -= block_size;
}
if (left > 0) {
crc_map.erase(pos);
if (out)
*out << "write invalidate " << pos << "\n";
}
}
int SloppyCRCMap::read(uint64_t offset, uint64_t len, const bufferlist& bl,
std::ostream *err)
{
int errors = 0;
int64_t left = len;
uint64_t pos = offset;
unsigned o = offset % block_size;
if (o) {
pos += (block_size - o);
left -= (block_size - o);
}
while (left >= block_size) {
// FIXME: this could be more efficient if we avoid doing a find()
// on each iteration
std::map<uint64_t,uint32_t>::iterator p = crc_map.find(pos);
if (p != crc_map.end()) {
bufferlist t;
t.substr_of(bl, pos - offset, block_size);
uint32_t crc = t.crc32c(crc_iv);
if (p->second != crc) {
errors++;
if (err)
*err << "offset " << pos << " len " << block_size
<< " has crc " << crc << " expected " << p->second << "\n";
}
}
pos += block_size;
left -= block_size;
}
return errors;
}
void SloppyCRCMap::truncate(uint64_t offset)
{
offset -= offset % block_size;
std::map<uint64_t,uint32_t>::iterator p = crc_map.lower_bound(offset);
while (p != crc_map.end())
crc_map.erase(p++);
}
void SloppyCRCMap::zero(uint64_t offset, uint64_t len)
{
int64_t left = len;
uint64_t pos = offset;
unsigned o = offset % block_size;
if (o) {
crc_map.erase(offset - o);
pos += (block_size - o);
left -= (block_size - o);
}
while (left >= block_size) {
crc_map[pos] = zero_crc;
pos += block_size;
left -= block_size;
}
if (left > 0)
crc_map.erase(pos);
}
void SloppyCRCMap::clone_range(uint64_t offset, uint64_t len,
uint64_t srcoff, const SloppyCRCMap& src,
std::ostream *out)
{
int64_t left = len;
uint64_t pos = offset;
uint64_t srcpos = srcoff;
unsigned o = offset % block_size;
if (o) {
crc_map.erase(offset - o);
pos += (block_size - o);
srcpos += (block_size - o);
left -= (block_size - o);
if (out)
*out << "clone_range invalidate " << (offset - o) << "\n";
}
while (left >= block_size) {
// FIXME: this could be more efficient.
if (block_size == src.block_size) {
map<uint64_t,uint32_t>::const_iterator p = src.crc_map.find(srcpos);
if (p != src.crc_map.end()) {
crc_map[pos] = p->second;
if (out)
*out << "clone_range copy " << pos << " " << p->second << "\n";
} else {
crc_map.erase(pos);
if (out)
*out << "clone_range invalidate " << pos << "\n";
}
} else {
crc_map.erase(pos);
if (out)
*out << "clone_range invalidate " << pos << "\n";
}
pos += block_size;
srcpos += block_size;
left -= block_size;
}
if (left > 0) {
crc_map.erase(pos);
if (out)
*out << "clone_range invalidate " << pos << "\n";
}
}
void SloppyCRCMap::encode(bufferlist& bl) const
{
ENCODE_START(1, 1, bl);
encode(block_size, bl);
encode(crc_map, bl);
ENCODE_FINISH(bl);
}
void SloppyCRCMap::decode(bufferlist::const_iterator& bl)
{
DECODE_START(1, bl);
uint32_t bs;
decode(bs, bl);
set_block_size(bs);
decode(crc_map, bl);
DECODE_FINISH(bl);
}
void SloppyCRCMap::dump(ceph::Formatter *f) const
{
f->dump_unsigned("block_size", block_size);
f->open_array_section("crc_map");
for (map<uint64_t,uint32_t>::const_iterator p = crc_map.begin(); p != crc_map.end(); ++p) {
f->open_object_section("crc");
f->dump_unsigned("offset", p->first);
f->dump_unsigned("crc", p->second);
f->close_section();
}
f->close_section();
}
void SloppyCRCMap::generate_test_instances(list<SloppyCRCMap*>& ls)
{
ls.push_back(new SloppyCRCMap);
ls.push_back(new SloppyCRCMap(2));
bufferlist bl;
bl.append("some data");
ls.back()->write(1, bl.length(), bl);
ls.back()->write(10, bl.length(), bl);
ls.back()->zero(4, 2);
}
| 4,663 | 24.347826 | 93 | cc |
null | ceph-main/src/common/SloppyCRCMap.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_COMMON_SLOPPYCRCMAP_H
#define CEPH_COMMON_SLOPPYCRCMAP_H
#include "include/encoding.h"
namespace ceph {
class Formatter;
}
/**
* SloppyCRCMap
*
* Opportunistically track CRCs on any reads or writes that cover full
* blocks. Verify read results when we have CRC data available for
* the given extent.
*/
class SloppyCRCMap {
static const int crc_iv = 0xffffffff;
std::map<uint64_t, uint32_t> crc_map; // offset -> crc(-1)
uint32_t block_size;
uint32_t zero_crc;
public:
SloppyCRCMap(uint32_t b=0) {
set_block_size(b);
}
void set_block_size(uint32_t b) {
block_size = b;
//zero_crc = ceph_crc32c(0xffffffff, NULL, block_size);
if (b) {
ceph::buffer::list bl;
bl.append_zero(block_size);
zero_crc = bl.crc32c(crc_iv);
} else {
zero_crc = crc_iv;
}
}
/// update based on a write
void write(uint64_t offset, uint64_t len, const ceph::buffer::list& bl,
std::ostream *out = NULL);
/// update based on a truncate
void truncate(uint64_t offset);
/// update based on a zero/punch_hole
void zero(uint64_t offset, uint64_t len);
/// update based on a zero/punch_hole
void clone_range(uint64_t offset, uint64_t len, uint64_t srcoff, const SloppyCRCMap& src,
std::ostream *out = NULL);
/**
* validate a read result
*
* @param offset offset
* @param length length
* @param bl data read
* @param err option ostream to describe errors in detail
* @returns error count, 0 for success
*/
int read(uint64_t offset, uint64_t len, const ceph::buffer::list& bl, std::ostream *err);
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<SloppyCRCMap*>& ls);
};
WRITE_CLASS_ENCODER(SloppyCRCMap)
#endif
| 1,964 | 24.519481 | 91 | h |
null | ceph-main/src/common/StackStringStream.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef COMMON_STACKSTRINGSTREAM_H
#define COMMON_STACKSTRINGSTREAM_H
#include <boost/container/small_vector.hpp>
#include <algorithm>
#include <iostream>
#include <memory>
#include <ostream>
#include <sstream>
#include <string_view>
#include <vector>
#include "include/inline_memory.h"
template<std::size_t SIZE>
class StackStringBuf : public std::basic_streambuf<char>
{
public:
StackStringBuf()
: vec{SIZE, boost::container::default_init_t{}}
{
setp(vec.data(), vec.data() + vec.size());
}
StackStringBuf(const StackStringBuf&) = delete;
StackStringBuf& operator=(const StackStringBuf&) = delete;
StackStringBuf(StackStringBuf&& o) = delete;
StackStringBuf& operator=(StackStringBuf&& o) = delete;
~StackStringBuf() override = default;
void clear()
{
vec.resize(SIZE);
setp(vec.data(), vec.data() + SIZE);
}
std::string_view strv() const
{
return std::string_view(pbase(), pptr() - pbase());
}
protected:
std::streamsize xsputn(const char *s, std::streamsize n) final
{
std::streamsize capacity = epptr() - pptr();
std::streamsize left = n;
if (capacity >= left) {
maybe_inline_memcpy(pptr(), s, left, 32);
pbump(left);
} else {
maybe_inline_memcpy(pptr(), s, capacity, 64);
s += capacity;
left -= capacity;
vec.insert(vec.end(), s, s + left);
setp(vec.data(), vec.data() + vec.size());
pbump(vec.size());
}
return n;
}
int overflow(int c) final
{
if (traits_type::not_eof(c)) {
char str = traits_type::to_char_type(c);
vec.push_back(str);
return c;
} else {
return traits_type::eof();
}
}
private:
boost::container::small_vector<char, SIZE> vec;
};
template<std::size_t SIZE>
class StackStringStream : public std::basic_ostream<char>
{
public:
StackStringStream() : basic_ostream<char>(&ssb), default_fmtflags(flags()) {}
StackStringStream(const StackStringStream& o) = delete;
StackStringStream& operator=(const StackStringStream& o) = delete;
StackStringStream(StackStringStream&& o) = delete;
StackStringStream& operator=(StackStringStream&& o) = delete;
~StackStringStream() override = default;
void reset() {
clear(); /* reset state flags */
flags(default_fmtflags); /* reset fmtflags to constructor defaults */
ssb.clear();
}
std::string_view strv() const {
return ssb.strv();
}
std::string str() const {
return std::string(ssb.strv());
}
private:
StackStringBuf<SIZE> ssb;
fmtflags const default_fmtflags;
};
/* In an ideal world, we could use StackStringStream indiscriminately, but alas
* it's very expensive to construct/destruct. So, we cache them in a
* thread_local vector. DO NOT share these with other threads. The copy/move
* constructors are deliberately restrictive to make this more difficult to
* accidentally do.
*/
class CachedStackStringStream {
public:
using sss = StackStringStream<4096>;
using osptr = std::unique_ptr<sss>;
CachedStackStringStream() {
if (cache.destructed || cache.c.empty()) {
osp = std::make_unique<sss>();
} else {
osp = std::move(cache.c.back());
cache.c.pop_back();
osp->reset();
}
}
CachedStackStringStream(const CachedStackStringStream&) = delete;
CachedStackStringStream& operator=(const CachedStackStringStream&) = delete;
CachedStackStringStream(CachedStackStringStream&&) = delete;
CachedStackStringStream& operator=(CachedStackStringStream&&) = delete;
~CachedStackStringStream() {
if (!cache.destructed && cache.c.size() < max_elems) {
cache.c.emplace_back(std::move(osp));
}
}
sss& operator*() {
return *osp;
}
sss const& operator*() const {
return *osp;
}
sss* operator->() {
return osp.get();
}
sss const* operator->() const {
return osp.get();
}
sss const* get() const {
return osp.get();
}
sss* get() {
return osp.get();
}
private:
static constexpr std::size_t max_elems = 8;
/* The thread_local cache may be destructed before other static structures.
* If those destructors try to create a CachedStackStringStream (e.g. for
* logging) and access this cache, that access will be undefined. So note if
* the cache has been destructed and check before use.
*/
struct Cache {
using container = std::vector<osptr>;
Cache() {}
~Cache() { destructed = true; }
container c;
bool destructed = false;
};
inline static thread_local Cache cache;
osptr osp;
};
#endif
| 4,934 | 24.569948 | 79 | h |
null | ceph-main/src/common/SubProcess.cc | #include "SubProcess.h"
#if defined(__FreeBSD__) || defined(__APPLE__)
#include <sys/types.h>
#include <signal.h>
#endif
#include <stdarg.h>
#include <fcntl.h>
#include <unistd.h>
#include <iostream>
#include "common/errno.h"
#include "include/ceph_assert.h"
#include "include/compat.h"
SubProcess::SubProcess(const char *cmd_, std_fd_op stdin_op_, std_fd_op stdout_op_, std_fd_op stderr_op_) :
cmd(cmd_),
cmd_args(),
stdin_op(stdin_op_),
stdout_op(stdout_op_),
stderr_op(stderr_op_),
stdin_pipe_out_fd(-1),
stdout_pipe_in_fd(-1),
stderr_pipe_in_fd(-1),
pid(-1),
errstr() {
}
SubProcess::~SubProcess() {
ceph_assert(!is_spawned());
ceph_assert(stdin_pipe_out_fd == -1);
ceph_assert(stdout_pipe_in_fd == -1);
ceph_assert(stderr_pipe_in_fd == -1);
}
void SubProcess::add_cmd_args(const char *arg, ...) {
ceph_assert(!is_spawned());
va_list ap;
va_start(ap, arg);
const char *p = arg;
do {
add_cmd_arg(p);
p = va_arg(ap, const char*);
} while (p != NULL);
va_end(ap);
}
void SubProcess::add_cmd_arg(const char *arg) {
ceph_assert(!is_spawned());
cmd_args.push_back(arg);
}
int SubProcess::get_stdin() const {
ceph_assert(is_spawned());
ceph_assert(stdin_op == PIPE);
return stdin_pipe_out_fd;
}
int SubProcess::get_stdout() const {
ceph_assert(is_spawned());
ceph_assert(stdout_op == PIPE);
return stdout_pipe_in_fd;
}
int SubProcess::get_stderr() const {
ceph_assert(is_spawned());
ceph_assert(stderr_op == PIPE);
return stderr_pipe_in_fd;
}
void SubProcess::close(int &fd) {
if (fd == -1)
return;
::close(fd);
fd = -1;
}
void SubProcess::close_stdin() {
ceph_assert(is_spawned());
ceph_assert(stdin_op == PIPE);
close(stdin_pipe_out_fd);
}
void SubProcess::close_stdout() {
ceph_assert(is_spawned());
ceph_assert(stdout_op == PIPE);
close(stdout_pipe_in_fd);
}
void SubProcess::close_stderr() {
ceph_assert(is_spawned());
ceph_assert(stderr_op == PIPE);
close(stderr_pipe_in_fd);
}
void SubProcess::kill(int signo) const {
ceph_assert(is_spawned());
int ret = ::kill(pid, signo);
ceph_assert(ret == 0);
}
const std::string SubProcess::err() const {
return errstr.str();
}
class fd_buf : public std::streambuf {
int fd;
public:
fd_buf (int fd) : fd(fd)
{}
protected:
int_type overflow (int_type c) override {
if (c == EOF) return EOF;
char buf = c;
if (write (fd, &buf, 1) != 1) {
return EOF;
}
return c;
}
std::streamsize xsputn (const char* s, std::streamsize count) override {
return write(fd, s, count);
}
};
int SubProcess::spawn() {
ceph_assert(!is_spawned());
ceph_assert(stdin_pipe_out_fd == -1);
ceph_assert(stdout_pipe_in_fd == -1);
ceph_assert(stderr_pipe_in_fd == -1);
enum { IN = 0, OUT = 1 };
int ipipe[2], opipe[2], epipe[2];
ipipe[0] = ipipe[1] = opipe[0] = opipe[1] = epipe[0] = epipe[1] = -1;
int ret = 0;
if ((stdin_op == PIPE && pipe_cloexec(ipipe, 0) == -1) ||
(stdout_op == PIPE && pipe_cloexec(opipe, 0) == -1) ||
(stderr_op == PIPE && pipe_cloexec(epipe, 0) == -1)) {
ret = -errno;
errstr << "pipe failed: " << cpp_strerror(errno);
goto fail;
}
pid = fork();
if (pid > 0) { // Parent
stdin_pipe_out_fd = ipipe[OUT]; close(ipipe[IN ]);
stdout_pipe_in_fd = opipe[IN ]; close(opipe[OUT]);
stderr_pipe_in_fd = epipe[IN ]; close(epipe[OUT]);
return 0;
}
if (pid == 0) { // Child
close(ipipe[OUT]);
close(opipe[IN ]);
close(epipe[IN ]);
if (ipipe[IN] >= 0) {
if (ipipe[IN] == STDIN_FILENO) {
::fcntl(STDIN_FILENO, F_SETFD, 0); /* clear FD_CLOEXEC */
} else {
::dup2(ipipe[IN], STDIN_FILENO);
::close(ipipe[IN]);
}
}
if (opipe[OUT] >= 0) {
if (opipe[OUT] == STDOUT_FILENO) {
::fcntl(STDOUT_FILENO, F_SETFD, 0); /* clear FD_CLOEXEC */
} else {
::dup2(opipe[OUT], STDOUT_FILENO);
::close(opipe[OUT]);
static fd_buf buf(STDOUT_FILENO);
std::cout.rdbuf(&buf);
}
}
if (epipe[OUT] >= 0) {
if (epipe[OUT] == STDERR_FILENO) {
::fcntl(STDERR_FILENO, F_SETFD, 0); /* clear FD_CLOEXEC */
} else {
::dup2(epipe[OUT], STDERR_FILENO);
::close(epipe[OUT]);
static fd_buf buf(STDERR_FILENO);
std::cerr.rdbuf(&buf);
}
}
int maxfd = sysconf(_SC_OPEN_MAX);
if (maxfd == -1)
maxfd = 16384;
for (int fd = 0; fd <= maxfd; fd++) {
if (fd == STDIN_FILENO && stdin_op != CLOSE)
continue;
if (fd == STDOUT_FILENO && stdout_op != CLOSE)
continue;
if (fd == STDERR_FILENO && stderr_op != CLOSE)
continue;
::close(fd);
}
exec();
ceph_abort(); // Never reached
}
ret = -errno;
errstr << "fork failed: " << cpp_strerror(errno);
fail:
close(ipipe[0]);
close(ipipe[1]);
close(opipe[0]);
close(opipe[1]);
close(epipe[0]);
close(epipe[1]);
return ret;
}
void SubProcess::exec() {
ceph_assert(is_child());
std::vector<const char *> args;
args.push_back(cmd.c_str());
for (std::vector<std::string>::iterator i = cmd_args.begin();
i != cmd_args.end();
i++) {
args.push_back(i->c_str());
}
args.push_back(NULL);
int ret = execvp(cmd.c_str(), (char * const *)&args[0]);
ceph_assert(ret == -1);
std::cerr << cmd << ": exec failed: " << cpp_strerror(errno) << "\n";
_exit(EXIT_FAILURE);
}
int SubProcess::join() {
ceph_assert(is_spawned());
close(stdin_pipe_out_fd);
close(stdout_pipe_in_fd);
close(stderr_pipe_in_fd);
int status;
while (waitpid(pid, &status, 0) == -1)
ceph_assert(errno == EINTR);
pid = -1;
if (WIFEXITED(status)) {
if (WEXITSTATUS(status) != EXIT_SUCCESS)
errstr << cmd << ": exit status: " << WEXITSTATUS(status);
return WEXITSTATUS(status);
}
if (WIFSIGNALED(status)) {
errstr << cmd << ": got signal: " << WTERMSIG(status);
return 128 + WTERMSIG(status);
}
errstr << cmd << ": waitpid: unknown status returned\n";
return EXIT_FAILURE;
}
SubProcessTimed::SubProcessTimed(const char *cmd, std_fd_op stdin_op,
std_fd_op stdout_op, std_fd_op stderr_op,
int timeout_, int sigkill_) :
SubProcess(cmd, stdin_op, stdout_op, stderr_op),
timeout(timeout_),
sigkill(sigkill_) {
}
static bool timedout = false; // only used after fork
void timeout_sighandler(int sig) {
timedout = true;
}
static void dummy_sighandler(int sig) {}
void SubProcessTimed::exec() {
ceph_assert(is_child());
if (timeout <= 0) {
SubProcess::exec();
ceph_abort(); // Never reached
}
sigset_t mask, oldmask;
int pid;
// Restore default action for SIGTERM in case the parent process decided
// to ignore it.
if (signal(SIGTERM, SIG_DFL) == SIG_ERR) {
std::cerr << cmd << ": signal failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
// Because SIGCHLD is ignored by default, setup dummy handler for it,
// so we can mask it.
if (signal(SIGCHLD, dummy_sighandler) == SIG_ERR) {
std::cerr << cmd << ": signal failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
// Setup timeout handler.
if (signal(SIGALRM, timeout_sighandler) == SIG_ERR) {
std::cerr << cmd << ": signal failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
// Block interesting signals.
sigemptyset(&mask);
sigaddset(&mask, SIGINT);
sigaddset(&mask, SIGTERM);
sigaddset(&mask, SIGCHLD);
sigaddset(&mask, SIGALRM);
if (sigprocmask(SIG_SETMASK, &mask, &oldmask) == -1) {
std::cerr << cmd << ": sigprocmask failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
pid = fork();
if (pid == -1) {
std::cerr << cmd << ": fork failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
if (pid == 0) { // Child
// Restore old sigmask.
if (sigprocmask(SIG_SETMASK, &oldmask, NULL) == -1) {
std::cerr << cmd << ": sigprocmask failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
(void)setpgid(0, 0); // Become process group leader.
SubProcess::exec();
ceph_abort(); // Never reached
}
// Parent
(void)alarm(timeout);
for (;;) {
int signo;
if (sigwait(&mask, &signo) == -1) {
std::cerr << cmd << ": sigwait failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
switch (signo) {
case SIGCHLD:
int status;
if (waitpid(pid, &status, WNOHANG) == -1) {
std::cerr << cmd << ": waitpid failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
if (WIFEXITED(status))
_exit(WEXITSTATUS(status));
if (WIFSIGNALED(status))
_exit(128 + WTERMSIG(status));
std::cerr << cmd << ": unknown status returned\n";
goto fail_exit;
case SIGINT:
case SIGTERM:
// Pass SIGINT and SIGTERM, which are usually used to terminate
// a process, to the child.
if (::kill(pid, signo) == -1) {
std::cerr << cmd << ": kill failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
continue;
case SIGALRM:
std::cerr << cmd << ": timed out (" << timeout << " sec)\n";
if (::killpg(pid, sigkill) == -1) {
std::cerr << cmd << ": kill failed: " << cpp_strerror(errno) << "\n";
goto fail_exit;
}
continue;
default:
std::cerr << cmd << ": sigwait: invalid signal: " << signo << "\n";
goto fail_exit;
}
}
fail_exit:
_exit(EXIT_FAILURE);
}
| 9,437 | 22.893671 | 107 | cc |
null | ceph-main/src/common/SubProcess.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2015 Mirantis Inc
*
* Author: Mykola Golub <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef SUB_PROCESS_H
#define SUB_PROCESS_H
#if defined(__FreeBSD__) || defined(__APPLE__)
#include <signal.h>
#endif
#ifndef _WIN32
#include <sys/wait.h>
#endif
#include <sstream>
#include <vector>
#include "include/compat.h"
/**
* SubProcess:
* A helper class to spawn a subprocess.
*
* Example:
*
* SubProcess cat("cat", SubProcess::PIPE, SubProcess::PIPE);
* if (cat.spawn() != 0) {
* std::cerr << "cat failed: " << cat.err() << std::endl;
* return false;
* }
* write_to_fd(cat.get_stdout(), "hello world!\n");
* cat.close_stdout();
* read_from_fd(cat.get_stdin(), buf);
* if (cat.join() != 0) {
* std::cerr << cat.err() << std::endl;
* return false;
* }
*/
class SubProcess {
public:
enum std_fd_op{
KEEP,
CLOSE,
PIPE
};
public:
SubProcess(const char *cmd,
std_fd_op stdin_op = CLOSE,
std_fd_op stdout_op = CLOSE,
std_fd_op stderr_op = CLOSE);
virtual ~SubProcess();
void add_cmd_args(const char *arg, ...);
void add_cmd_arg(const char *arg);
virtual int spawn(); // Returns 0 on success or -errno on failure.
virtual int join(); // Returns exit code (0 on success).
bool is_spawned() const { return pid > 0; }
int get_stdin() const;
int get_stdout() const;
int get_stderr() const;
void close_stdin();
void close_stdout();
void close_stderr();
void kill(int signo = SIGTERM) const;
const std::string err() const;
protected:
bool is_child() const { return pid == 0; }
virtual void exec();
void close(int &fd);
#ifdef _WIN32
void close_h(HANDLE &handle);
#endif
protected:
std::string cmd;
std::vector<std::string> cmd_args;
std_fd_op stdin_op;
std_fd_op stdout_op;
std_fd_op stderr_op;
int stdin_pipe_out_fd;
int stdout_pipe_in_fd;
int stderr_pipe_in_fd;
int pid;
std::ostringstream errstr;
#ifdef _WIN32
HANDLE proc_handle = INVALID_HANDLE_VALUE;
#endif
};
class SubProcessTimed : public SubProcess {
public:
SubProcessTimed(const char *cmd, std_fd_op stdin_op = CLOSE,
std_fd_op stdout_op = CLOSE, std_fd_op stderr_op = CLOSE,
int timeout = 0, int sigkill = SIGKILL);
#ifdef _WIN32
int spawn() override;
int join() override;
#endif
protected:
void exec() override;
private:
int timeout;
int sigkill;
#ifdef _WIN32
std::thread waiter;
#endif
};
void timeout_sighandler(int sig);
#endif
| 2,902 | 19.884892 | 70 | h |
null | ceph-main/src/common/TextTable.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "TextTable.h"
using namespace std;
void TextTable::define_column(const string &heading,
enum TextTable::Align hd_align,
enum TextTable::Align col_align)
{
TextTableColumn def(heading, heading.length(), hd_align, col_align);
col.push_back(def);
}
void TextTable::clear() {
currow = 0;
curcol = 0;
indent = 0;
row.clear();
// reset widths to heading widths
for (unsigned int i = 0; i < col.size(); i++)
col[i].width = col[i].heading.size();
}
/**
* Pad s with space to appropriate alignment
*
* @param s string to pad
* @param width width of field to contain padded string
* @param align desired alignment (LEFT, CENTER, RIGHT)
*
* @return padded string
*/
static string
pad(string s, int width, TextTable::Align align)
{
int lpad, rpad;
lpad = 0;
rpad = 0;
switch (align) {
case TextTable::LEFT:
rpad = width - s.length();
break;
case TextTable::CENTER:
lpad = width / 2 - s.length() / 2;
rpad = width - lpad - s.length();
break;
case TextTable::RIGHT:
lpad = width - s.length();
break;
}
return string(lpad, ' ') + s + string(rpad, ' ');
}
std::ostream &operator<<(std::ostream &out, const TextTable &t)
{
for (unsigned int i = 0; i < t.col.size(); i++) {
TextTable::TextTableColumn col = t.col[i];
if (i) {
out << t.column_separation;
}
out << string(t.indent, ' ')
<< pad(col.heading, col.width, col.hd_align);
}
out << endl;
for (unsigned int i = 0; i < t.row.size(); i++) {
for (unsigned int j = 0; j < t.row[i].size(); j++) {
TextTable::TextTableColumn col = t.col[j];
if (j) {
out << t.column_separation;
}
out << string(t.indent, ' ')
<< pad(t.row[i][j], col.width, col.col_align);
}
out << endl;
}
return out;
}
| 2,242 | 23.380435 | 70 | cc |
null | ceph-main/src/common/TextTable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef TEXT_TABLE_H_
#define TEXT_TABLE_H_
#include <vector>
#include <sstream>
#include "include/ceph_assert.h"
/**
* TextTable:
* Manage tabular output of data. Caller defines heading of each column
* and alignment of heading and column data,
* then inserts rows of data including tuples of
* length (ncolumns) terminated by TextTable::endrow. When all rows
* are inserted, caller asks for output with ostream <<
* which sizes/pads/dumps the table to ostream.
*
* Columns autosize to largest heading or datum. One space is printed
* between columns.
*/
class TextTable {
public:
enum Align {LEFT = 1, CENTER, RIGHT};
private:
struct TextTableColumn {
std::string heading;
int width;
Align hd_align;
Align col_align;
TextTableColumn() {}
TextTableColumn(const std::string &h, int w, Align ha, Align ca) :
heading(h), width(w), hd_align(ha), col_align(ca) { }
~TextTableColumn() {}
};
std::vector<TextTableColumn> col; // column definitions
unsigned int curcol, currow; // col, row being inserted into
unsigned int indent; // indent width when rendering
std::string column_separation = {" "};
protected:
std::vector<std::vector<std::string> > row; // row data array
public:
TextTable(): curcol(0), currow(0), indent(0) {}
~TextTable() {}
/**
* Define a column in the table.
*
* @param heading Column heading string (or "")
* @param hd_align Alignment for heading in column
* @param col_align Data alignment
*
* @note alignment is of type TextTable::Align; values are
* TextTable::LEFT, TextTable::CENTER, or TextTable::RIGHT
*
*/
void define_column(const std::string& heading, Align hd_align,
Align col_align);
/**
* Set indent for table. Only affects table output.
*
* @param i Number of spaces to indent
*/
void set_indent(int i) { indent = i; }
/**
* Set column separation
*
* @param s String to separate columns
*/
void set_column_separation(const std::string& s) {
column_separation = s;
}
/**
* Add item to table, perhaps on new row.
* table << val1 << val2 << TextTable::endrow;
*
* @param: value to output.
*
* @note: Numerics are output in decimal; strings are not truncated.
* Output formatting choice is limited to alignment in define_column().
*
* @return TextTable& for chaining.
*/
template<typename T> TextTable& operator<<(const T& item)
{
if (row.size() < currow + 1)
row.resize(currow + 1);
/**
* col.size() is a good guess for how big row[currow] needs to be,
* so just expand it out now
*/
if (row[currow].size() < col.size()) {
row[currow].resize(col.size());
}
// inserting more items than defined columns is a coding error
ceph_assert(curcol + 1 <= col.size());
// get rendered width of item alone
std::ostringstream oss;
oss << item;
int width = oss.str().length();
oss.seekp(0);
// expand column width if necessary
if (width > col[curcol].width) {
col[curcol].width = width;
}
// now store the rendered item with its proper width
row[currow][curcol] = oss.str();
curcol++;
return *this;
}
/**
* Degenerate type/variable here is just to allow selection of the
* following operator<< for "<< TextTable::endrow"
*/
struct endrow_t {};
static constexpr endrow_t endrow{};
/**
* Implements TextTable::endrow
*/
TextTable &operator<<(endrow_t)
{
curcol = 0;
currow++;
return *this;
}
/**
* Render table to ostream (i.e. cout << table)
*/
friend std::ostream &operator<<(std::ostream &out, const TextTable &t);
/**
* clear: Reset everything in a TextTable except column defs
* resize cols to heading widths, clear indent
*/
void clear();
};
#endif
| 4,269 | 23.261364 | 73 | h |
null | ceph-main/src/common/Thread.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <signal.h>
#include <unistd.h>
#ifdef __linux__
#include <sys/syscall.h> /* For SYS_xxx definitions */
#endif
#ifdef WITH_SEASTAR
#include "crimson/os/alienstore/alien_store.h"
#endif
#include "common/Thread.h"
#include "common/code_environment.h"
#include "common/debug.h"
#include "common/signal.h"
#ifdef HAVE_SCHED
#include <sched.h>
#endif
pid_t ceph_gettid(void)
{
#ifdef __linux__
return syscall(SYS_gettid);
#else
return -ENOSYS;
#endif
}
static int _set_affinity(int id)
{
#ifdef HAVE_SCHED
if (id >= 0 && id < CPU_SETSIZE) {
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(id, &cpuset);
if (sched_setaffinity(0, sizeof(cpuset), &cpuset) < 0)
return -errno;
/* guaranteed to take effect immediately */
sched_yield();
}
#endif
return 0;
}
Thread::Thread()
: thread_id(0),
pid(0),
cpuid(-1)
{
}
Thread::~Thread()
{
}
void *Thread::_entry_func(void *arg) {
void *r = ((Thread*)arg)->entry_wrapper();
return r;
}
void *Thread::entry_wrapper()
{
int p = ceph_gettid(); // may return -ENOSYS on other platforms
if (p > 0)
pid = p;
if (pid && cpuid >= 0)
_set_affinity(cpuid);
ceph_pthread_setname(pthread_self(), thread_name.c_str());
return entry();
}
const pthread_t &Thread::get_thread_id() const
{
return thread_id;
}
bool Thread::is_started() const
{
return thread_id != 0;
}
bool Thread::am_self() const
{
return (pthread_self() == thread_id);
}
int Thread::kill(int signal)
{
if (thread_id)
return pthread_kill(thread_id, signal);
else
return -EINVAL;
}
int Thread::try_create(size_t stacksize)
{
pthread_attr_t *thread_attr = NULL;
pthread_attr_t thread_attr_loc;
stacksize &= CEPH_PAGE_MASK; // must be multiple of page
if (stacksize) {
thread_attr = &thread_attr_loc;
pthread_attr_init(thread_attr);
pthread_attr_setstacksize(thread_attr, stacksize);
}
int r;
// The child thread will inherit our signal mask. Set our signal mask to
// the set of signals we want to block. (It's ok to block signals more
// signals than usual for a little while-- they will just be delivered to
// another thread or delieverd to this thread later.)
#ifndef _WIN32
sigset_t old_sigset;
if (g_code_env == CODE_ENVIRONMENT_LIBRARY) {
block_signals(NULL, &old_sigset);
}
else {
int to_block[] = { SIGPIPE , 0 };
block_signals(to_block, &old_sigset);
}
r = pthread_create(&thread_id, thread_attr, _entry_func, (void*)this);
restore_sigset(&old_sigset);
#else
r = pthread_create(&thread_id, thread_attr, _entry_func, (void*)this);
#endif
if (thread_attr) {
pthread_attr_destroy(thread_attr);
}
return r;
}
void Thread::create(const char *name, size_t stacksize)
{
ceph_assert(strlen(name) < 16);
thread_name = name;
int ret = try_create(stacksize);
if (ret != 0) {
char buf[256];
snprintf(buf, sizeof(buf), "Thread::try_create(): pthread_create "
"failed with error %d", ret);
dout_emergency(buf);
ceph_assert(ret == 0);
}
}
int Thread::join(void **prval)
{
if (thread_id == 0) {
ceph_abort_msg("join on thread that was never started");
return -EINVAL;
}
int status = pthread_join(thread_id, prval);
if (status != 0) {
char buf[256];
snprintf(buf, sizeof(buf), "Thread::join(): pthread_join "
"failed with error %d\n", status);
dout_emergency(buf);
ceph_assert(status == 0);
}
thread_id = 0;
return status;
}
int Thread::detach()
{
return pthread_detach(thread_id);
}
int Thread::set_affinity(int id)
{
int r = 0;
cpuid = id;
if (pid && ceph_gettid() == pid)
r = _set_affinity(id);
return r;
}
// Functions for std::thread
// =========================
void set_thread_name(std::thread& t, const std::string& s) {
int r = ceph_pthread_setname(t.native_handle(), s.c_str());
if (r != 0) {
throw std::system_error(r, std::generic_category());
}
}
std::string get_thread_name(const std::thread& t) {
std::string s(256, '\0');
int r = ceph_pthread_getname(const_cast<std::thread&>(t).native_handle(),
s.data(), s.length());
if (r != 0) {
throw std::system_error(r, std::generic_category());
}
s.resize(std::strlen(s.data()));
return s;
}
void kill(std::thread& t, int signal)
{
auto r = pthread_kill(t.native_handle(), signal);
if (r != 0) {
throw std::system_error(r, std::generic_category());
}
}
| 4,856 | 20.025974 | 75 | cc |
null | ceph-main/src/common/Thread.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_THREAD_H
#define CEPH_THREAD_H
#include <functional>
#include <string_view>
#include <system_error>
#include <thread>
#include <pthread.h>
#include <sys/types.h>
#include "include/compat.h"
extern pid_t ceph_gettid();
class Thread {
private:
pthread_t thread_id;
pid_t pid;
int cpuid;
std::string thread_name;
void *entry_wrapper();
public:
Thread(const Thread&) = delete;
Thread& operator=(const Thread&) = delete;
Thread();
virtual ~Thread();
protected:
virtual void *entry() = 0;
private:
static void *_entry_func(void *arg);
public:
const pthread_t &get_thread_id() const;
pid_t get_pid() const { return pid; }
bool is_started() const;
bool am_self() const;
int kill(int signal);
int try_create(size_t stacksize);
void create(const char *name, size_t stacksize = 0);
int join(void **prval = 0);
int detach();
int set_affinity(int cpuid);
};
// Functions for with std::thread
void set_thread_name(std::thread& t, const std::string& s);
std::string get_thread_name(const std::thread& t);
void kill(std::thread& t, int signal);
template<typename Fun, typename... Args>
std::thread make_named_thread(std::string_view n,
Fun&& fun,
Args&& ...args) {
return std::thread([n = std::string(n)](auto&& fun, auto&& ...args) {
ceph_pthread_setname(pthread_self(), n.data());
std::invoke(std::forward<Fun>(fun),
std::forward<Args>(args)...);
}, std::forward<Fun>(fun), std::forward<Args>(args)...);
}
#endif
| 1,941 | 22.119048 | 71 | h |
null | ceph-main/src/common/Throttle.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/scope_guard.h"
#include "common/Throttle.h"
#include "common/ceph_time.h"
#include "common/perf_counters.h"
// re-include our assert to clobber the system one; fix dout:
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_throttle
#undef dout_prefix
#define dout_prefix *_dout << "throttle(" << name << " " << (void*)this << ") "
using std::list;
using std::ostream;
using std::string;
using ceph::mono_clock;
using ceph::mono_time;
using ceph::timespan;
enum {
l_throttle_first = 532430,
l_throttle_val,
l_throttle_max,
l_throttle_get_started,
l_throttle_get,
l_throttle_get_sum,
l_throttle_get_or_fail_fail,
l_throttle_get_or_fail_success,
l_throttle_take,
l_throttle_take_sum,
l_throttle_put,
l_throttle_put_sum,
l_throttle_wait,
l_throttle_last,
};
Throttle::Throttle(CephContext *cct, const std::string& n, int64_t m,
bool _use_perf)
: cct(cct), name(n), max(m),
use_perf(_use_perf)
{
ceph_assert(m >= 0);
if (!use_perf)
return;
if (cct->_conf->throttler_perf_counter) {
PerfCountersBuilder b(cct, string("throttle-") + name, l_throttle_first, l_throttle_last);
b.add_u64(l_throttle_val, "val", "Currently taken slots");
b.add_u64(l_throttle_max, "max", "Max value for throttle");
b.add_u64_counter(l_throttle_get_started, "get_started", "Number of get calls, increased before wait");
b.add_u64_counter(l_throttle_get, "get", "Gets");
b.add_u64_counter(l_throttle_get_sum, "get_sum", "Got data");
b.add_u64_counter(l_throttle_get_or_fail_fail, "get_or_fail_fail", "Get blocked during get_or_fail");
b.add_u64_counter(l_throttle_get_or_fail_success, "get_or_fail_success", "Successful get during get_or_fail");
b.add_u64_counter(l_throttle_take, "take", "Takes");
b.add_u64_counter(l_throttle_take_sum, "take_sum", "Taken data");
b.add_u64_counter(l_throttle_put, "put", "Puts");
b.add_u64_counter(l_throttle_put_sum, "put_sum", "Put data");
b.add_time_avg(l_throttle_wait, "wait", "Waiting latency");
logger = { b.create_perf_counters(), cct };
cct->get_perfcounters_collection()->add(logger.get());
logger->set(l_throttle_max, max);
}
}
Throttle::~Throttle()
{
std::lock_guard l(lock);
ceph_assert(conds.empty());
}
void Throttle::_reset_max(int64_t m)
{
// lock must be held.
if (max == m)
return;
if (!conds.empty())
conds.front().notify_one();
if (logger)
logger->set(l_throttle_max, m);
max = m;
}
bool Throttle::_wait(int64_t c, std::unique_lock<std::mutex>& l)
{
mono_time start;
bool waited = false;
if (_should_wait(c) || !conds.empty()) { // always wait behind other waiters.
{
auto cv = conds.emplace(conds.end());
auto w = make_scope_guard([this, cv]() {
conds.erase(cv);
});
waited = true;
ldout(cct, 2) << "_wait waiting..." << dendl;
if (logger)
start = mono_clock::now();
cv->wait(l, [this, c, cv]() { return (!_should_wait(c) &&
cv == conds.begin()); });
ldout(cct, 2) << "_wait finished waiting" << dendl;
if (logger) {
logger->tinc(l_throttle_wait, mono_clock::now() - start);
}
}
// wake up the next guy
if (!conds.empty())
conds.front().notify_one();
}
return waited;
}
bool Throttle::wait(int64_t m)
{
if (0 == max && 0 == m) {
return false;
}
std::unique_lock l(lock);
if (m) {
ceph_assert(m > 0);
_reset_max(m);
}
ldout(cct, 10) << "wait" << dendl;
return _wait(0, l);
}
int64_t Throttle::take(int64_t c)
{
if (0 == max) {
return 0;
}
ceph_assert(c >= 0);
ldout(cct, 10) << "take " << c << dendl;
count += c;
if (logger) {
logger->inc(l_throttle_take);
logger->inc(l_throttle_take_sum, c);
logger->set(l_throttle_val, count);
}
return count;
}
bool Throttle::get(int64_t c, int64_t m)
{
if (0 == max && 0 == m) {
count += c;
return false;
}
ceph_assert(c >= 0);
ldout(cct, 10) << "get " << c << " (" << count.load() << " -> " << (count.load() + c) << ")" << dendl;
if (logger) {
logger->inc(l_throttle_get_started);
}
bool waited = false;
{
std::unique_lock l(lock);
if (m) {
ceph_assert(m > 0);
_reset_max(m);
}
waited = _wait(c, l);
count += c;
}
if (logger) {
logger->inc(l_throttle_get);
logger->inc(l_throttle_get_sum, c);
logger->set(l_throttle_val, count);
}
return waited;
}
/* Returns true if it successfully got the requested amount,
* or false if it would block.
*/
bool Throttle::get_or_fail(int64_t c)
{
if (0 == max) {
count += c;
return true;
}
assert (c >= 0);
bool result = false;
{
std::lock_guard l(lock);
if (_should_wait(c) || !conds.empty()) {
ldout(cct, 10) << "get_or_fail " << c << " failed" << dendl;
result = false;
} else {
ldout(cct, 10) << "get_or_fail " << c << " success (" << count.load()
<< " -> " << (count.load() + c) << ")" << dendl;
count += c;
result = true;
}
}
if (logger) {
if (result) {
logger->inc(l_throttle_get_or_fail_success);
logger->inc(l_throttle_get);
logger->inc(l_throttle_get_sum, c);
logger->set(l_throttle_val, count);
} else {
logger->inc(l_throttle_get_or_fail_fail);
}
}
return result;
}
int64_t Throttle::put(int64_t c)
{
if (0 == max) {
count -= c;
return 0;
}
ceph_assert(c >= 0);
ldout(cct, 10) << "put " << c << " (" << count.load() << " -> "
<< (count.load()-c) << ")" << dendl;
int64_t new_count;
{
std::lock_guard l(lock);
new_count = count;
if (c) {
if (!conds.empty())
conds.front().notify_one();
// if count goes negative, we failed somewhere!
ceph_assert(count >= c);
new_count = count -= c;
}
}
if (logger) {
logger->inc(l_throttle_put);
logger->inc(l_throttle_put_sum, c);
logger->set(l_throttle_val, count);
}
return new_count;
}
void Throttle::reset()
{
std::lock_guard l(lock);
if (!conds.empty())
conds.front().notify_one();
count = 0;
if (logger) {
logger->set(l_throttle_val, 0);
}
}
enum {
l_backoff_throttle_first = l_throttle_last + 1,
l_backoff_throttle_val,
l_backoff_throttle_max,
l_backoff_throttle_get,
l_backoff_throttle_get_sum,
l_backoff_throttle_take,
l_backoff_throttle_take_sum,
l_backoff_throttle_put,
l_backoff_throttle_put_sum,
l_backoff_throttle_wait,
l_backoff_throttle_last,
};
BackoffThrottle::BackoffThrottle(CephContext *cct, const std::string& n,
unsigned expected_concurrency, bool _use_perf)
: name(n),
conds(expected_concurrency),///< [in] determines size of conds
use_perf(_use_perf)
{
if (!use_perf)
return;
if (cct->_conf->throttler_perf_counter) {
PerfCountersBuilder b(cct, string("throttle-") + name,
l_backoff_throttle_first, l_backoff_throttle_last);
b.add_u64(l_backoff_throttle_val, "val", "Currently available throttle");
b.add_u64(l_backoff_throttle_max, "max", "Max value for throttle");
b.add_u64_counter(l_backoff_throttle_get, "get", "Gets");
b.add_u64_counter(l_backoff_throttle_get_sum, "get_sum", "Got data");
b.add_u64_counter(l_backoff_throttle_take, "take", "Takes");
b.add_u64_counter(l_backoff_throttle_take_sum, "take_sum", "Taken data");
b.add_u64_counter(l_backoff_throttle_put, "put", "Puts");
b.add_u64_counter(l_backoff_throttle_put_sum, "put_sum", "Put data");
b.add_time_avg(l_backoff_throttle_wait, "wait", "Waiting latency");
logger = { b.create_perf_counters(), cct };
cct->get_perfcounters_collection()->add(logger.get());
logger->set(l_backoff_throttle_max, max);
}
}
BackoffThrottle::~BackoffThrottle()
{
std::lock_guard l(lock);
ceph_assert(waiters.empty());
}
bool BackoffThrottle::set_params(
double _low_threshold,
double _high_threshold,
double _expected_throughput,
double _high_multiple,
double _max_multiple,
uint64_t _throttle_max,
ostream *errstream)
{
bool valid = true;
if (_low_threshold > _high_threshold) {
valid = false;
if (errstream) {
*errstream << "low_threshold (" << _low_threshold
<< ") > high_threshold (" << _high_threshold
<< ")" << std::endl;
}
}
if (_high_multiple > _max_multiple) {
valid = false;
if (errstream) {
*errstream << "_high_multiple (" << _high_multiple
<< ") > _max_multiple (" << _max_multiple
<< ")" << std::endl;
}
}
if (_low_threshold > 1 || _low_threshold < 0) {
valid = false;
if (errstream) {
*errstream << "invalid low_threshold (" << _low_threshold << ")"
<< std::endl;
}
}
if (_high_threshold > 1 || _high_threshold < 0) {
valid = false;
if (errstream) {
*errstream << "invalid high_threshold (" << _high_threshold << ")"
<< std::endl;
}
}
if (_max_multiple < 0) {
valid = false;
if (errstream) {
*errstream << "invalid _max_multiple ("
<< _max_multiple << ")"
<< std::endl;
}
}
if (_high_multiple < 0) {
valid = false;
if (errstream) {
*errstream << "invalid _high_multiple ("
<< _high_multiple << ")"
<< std::endl;
}
}
if (_expected_throughput < 0) {
valid = false;
if (errstream) {
*errstream << "invalid _expected_throughput("
<< _expected_throughput << ")"
<< std::endl;
}
}
if (!valid)
return false;
locker l(lock);
low_threshold = _low_threshold;
high_threshold = _high_threshold;
high_delay_per_count = _high_multiple / _expected_throughput;
max_delay_per_count = _max_multiple / _expected_throughput;
max = _throttle_max;
if (logger)
logger->set(l_backoff_throttle_max, max);
if (high_threshold - low_threshold > 0) {
s0 = high_delay_per_count / (high_threshold - low_threshold);
} else {
low_threshold = high_threshold;
s0 = 0;
}
if (1 - high_threshold > 0) {
s1 = (max_delay_per_count - high_delay_per_count)
/ (1 - high_threshold);
} else {
high_threshold = 1;
s1 = 0;
}
_kick_waiters();
return true;
}
ceph::timespan BackoffThrottle::_get_delay(uint64_t c) const
{
if (max == 0)
return ceph::timespan(0);
double r = ((double)current) / ((double)max);
if (r < low_threshold) {
return ceph::timespan(0);
} else if (r < high_threshold) {
return c * ceph::make_timespan(
(r - low_threshold) * s0);
} else {
return c * ceph::make_timespan(
high_delay_per_count + ((r - high_threshold) * s1));
}
}
ceph::timespan BackoffThrottle::get(uint64_t c)
{
locker l(lock);
auto delay = _get_delay(c);
if (logger) {
logger->inc(l_backoff_throttle_get);
logger->inc(l_backoff_throttle_get_sum, c);
}
// fast path
if (delay.count() == 0 &&
waiters.empty() &&
((max == 0) || (current == 0) || ((current + c) <= max))) {
current += c;
if (logger) {
logger->set(l_backoff_throttle_val, current);
}
return ceph::make_timespan(0);
}
auto ticket = _push_waiter();
auto wait_from = mono_clock::now();
bool waited = false;
while (waiters.begin() != ticket) {
(*ticket)->wait(l);
waited = true;
}
auto start = mono_clock::now();
delay = _get_delay(c);
while (true) {
if (max != 0 && current != 0 && (current + c) > max) {
(*ticket)->wait(l);
waited = true;
} else if (delay.count() > 0) {
(*ticket)->wait_for(l, delay);
waited = true;
} else {
break;
}
ceph_assert(ticket == waiters.begin());
delay = _get_delay(c);
auto elapsed = mono_clock::now() - start;
if (delay <= elapsed) {
delay = timespan::zero();
} else {
delay -= elapsed;
}
}
waiters.pop_front();
_kick_waiters();
current += c;
if (logger) {
logger->set(l_backoff_throttle_val, current);
if (waited) {
logger->tinc(l_backoff_throttle_wait, mono_clock::now() - wait_from);
}
}
return mono_clock::now() - start;
}
uint64_t BackoffThrottle::put(uint64_t c)
{
locker l(lock);
ceph_assert(current >= c);
current -= c;
_kick_waiters();
if (logger) {
logger->inc(l_backoff_throttle_put);
logger->inc(l_backoff_throttle_put_sum, c);
logger->set(l_backoff_throttle_val, current);
}
return current;
}
uint64_t BackoffThrottle::take(uint64_t c)
{
locker l(lock);
current += c;
if (logger) {
logger->inc(l_backoff_throttle_take);
logger->inc(l_backoff_throttle_take_sum, c);
logger->set(l_backoff_throttle_val, current);
}
return current;
}
uint64_t BackoffThrottle::get_current()
{
locker l(lock);
return current;
}
uint64_t BackoffThrottle::get_max()
{
locker l(lock);
return max;
}
SimpleThrottle::SimpleThrottle(uint64_t max, bool ignore_enoent)
: m_max(max), m_ignore_enoent(ignore_enoent) {}
SimpleThrottle::~SimpleThrottle()
{
std::lock_guard l(m_lock);
ceph_assert(m_current == 0);
ceph_assert(waiters == 0);
}
void SimpleThrottle::start_op()
{
std::unique_lock l(m_lock);
waiters++;
m_cond.wait(l, [this]() { return m_max != m_current; });
waiters--;
++m_current;
}
void SimpleThrottle::end_op(int r)
{
std::lock_guard l(m_lock);
--m_current;
if (r < 0 && !m_ret && !(r == -ENOENT && m_ignore_enoent))
m_ret = r;
m_cond.notify_all();
}
bool SimpleThrottle::pending_error() const
{
std::lock_guard l(m_lock);
return (m_ret < 0);
}
int SimpleThrottle::wait_for_ret()
{
std::unique_lock l(m_lock);
waiters++;
m_cond.wait(l, [this]() { return m_current == 0; });
waiters--;
return m_ret;
}
void C_OrderedThrottle::finish(int r) {
m_ordered_throttle->finish_op(m_tid, r);
}
OrderedThrottle::OrderedThrottle(uint64_t max, bool ignore_enoent)
: m_max(max), m_ignore_enoent(ignore_enoent) {}
OrderedThrottle::~OrderedThrottle() {
std::lock_guard l(m_lock);
ceph_assert(waiters == 0);
}
C_OrderedThrottle *OrderedThrottle::start_op(Context *on_finish) {
ceph_assert(on_finish);
std::unique_lock l(m_lock);
uint64_t tid = m_next_tid++;
m_tid_result[tid] = Result(on_finish);
auto ctx = std::make_unique<C_OrderedThrottle>(this, tid);
complete_pending_ops(l);
while (m_max == m_current) {
++waiters;
m_cond.wait(l);
--waiters;
complete_pending_ops(l);
}
++m_current;
return ctx.release();
}
void OrderedThrottle::end_op(int r) {
std::lock_guard l(m_lock);
ceph_assert(m_current > 0);
if (r < 0 && m_ret_val == 0 && (r != -ENOENT || !m_ignore_enoent)) {
m_ret_val = r;
}
--m_current;
m_cond.notify_all();
}
void OrderedThrottle::finish_op(uint64_t tid, int r) {
std::lock_guard l(m_lock);
auto it = m_tid_result.find(tid);
ceph_assert(it != m_tid_result.end());
it->second.finished = true;
it->second.ret_val = r;
m_cond.notify_all();
}
bool OrderedThrottle::pending_error() const {
std::lock_guard l(m_lock);
return (m_ret_val < 0);
}
int OrderedThrottle::wait_for_ret() {
std::unique_lock l(m_lock);
complete_pending_ops(l);
while (m_current > 0) {
++waiters;
m_cond.wait(l);
--waiters;
complete_pending_ops(l);
}
return m_ret_val;
}
void OrderedThrottle::complete_pending_ops(std::unique_lock<std::mutex>& l) {
while (true) {
auto it = m_tid_result.begin();
if (it == m_tid_result.end() || it->first != m_complete_tid ||
!it->second.finished) {
break;
}
Result result = it->second;
m_tid_result.erase(it);
l.unlock();
result.on_finish->complete(result.ret_val);
l.lock();
++m_complete_tid;
}
}
#undef dout_prefix
#define dout_prefix *_dout << "TokenBucketThrottle(" << m_name << " " \
<< (void*)this << ") "
uint64_t TokenBucketThrottle::Bucket::get(uint64_t c) {
if (0 == max) {
return 0;
}
uint64_t got = 0;
if (available >= c) {
// There is enough token in bucket, take c.
got = c;
available -= c;
remain -= c;
} else {
// There is not enough, take all available.
got = available;
remain -= available;
available = 0;
}
return got;
}
uint64_t TokenBucketThrottle::Bucket::put(uint64_t tokens, double burst_ratio) {
if (0 == max) {
return 0;
}
if (tokens) {
// put tokens into bucket
uint64_t current = remain;
if ((current + tokens) <= capacity) {
remain += tokens;
} else {
remain = capacity;
}
// available tokens increase at burst speed
uint64_t available_inc = tokens;
if (burst_ratio > 1) {
available_inc = (uint64_t)(tokens * burst_ratio);
}
uint64_t inc_upper_limit = remain > max ? max : remain;
if ((available + available_inc) <= inc_upper_limit ){
available += available_inc;
}else{
available = inc_upper_limit;
}
}
return remain;
}
void TokenBucketThrottle::Bucket::set_max(uint64_t max, uint64_t burst_seconds) {
// the capacity of bucket should not be less than max
if (burst_seconds < 1){
burst_seconds = 1;
}
uint64_t new_capacity = max*burst_seconds;
if (capacity != new_capacity){
capacity = new_capacity;
remain = capacity;
}
if (available > max || 0 == max) {
available = max;
}
this->max = max;
}
TokenBucketThrottle::TokenBucketThrottle(
CephContext *cct,
const std::string &name,
uint64_t burst,
uint64_t avg,
SafeTimer *timer,
ceph::mutex *timer_lock)
: m_cct(cct), m_name(name),
m_throttle(m_cct, name + "_bucket", burst),
m_burst(burst), m_avg(avg), m_timer(timer), m_timer_lock(timer_lock),
m_lock(ceph::make_mutex(name + "_lock"))
{}
TokenBucketThrottle::~TokenBucketThrottle() {
// cancel the timer events.
{
std::lock_guard timer_locker(*m_timer_lock);
cancel_timer();
}
list<Blocker> tmp_blockers;
{
std::lock_guard blockers_lock(m_lock);
tmp_blockers.splice(tmp_blockers.begin(), m_blockers, m_blockers.begin(), m_blockers.end());
}
for (auto b : tmp_blockers) {
b.ctx->complete(0);
}
}
int TokenBucketThrottle::set_limit(uint64_t average, uint64_t burst, uint64_t burst_seconds) {
{
std::lock_guard lock{m_lock};
if (0 < burst && burst < average) {
// the burst should never less than the average.
return -EINVAL;
}
m_avg = average;
m_burst = burst;
if (0 == average) {
// The limit is not set, and no tokens will be put into the bucket.
// So, we can schedule the timer slowly, or even cancel it.
m_tick = 1000;
} else {
// calculate the tick(ms), don't less than the minimum.
m_tick = 1000 / average;
if (m_tick < m_tick_min) {
m_tick = m_tick_min;
}
// this is for the number(avg) can not be divisible.
m_ticks_per_second = 1000 / m_tick;
m_current_tick = 0;
// for the default configuration of burst.
m_throttle.set_max(0 == burst ? average : burst, burst_seconds);
}
// turn millisecond to second
m_schedule_tick = m_tick / 1000.0;
}
// The schedule period will be changed when the average rate is set.
{
std::lock_guard timer_locker{*m_timer_lock};
cancel_timer();
schedule_timer();
}
return 0;
}
void TokenBucketThrottle::set_schedule_tick_min(uint64_t tick) {
std::lock_guard lock(m_lock);
if (tick != 0) {
m_tick_min = tick;
}
}
uint64_t TokenBucketThrottle::tokens_filled(double tick) {
return (0 == m_avg) ? 0 : (tick / m_ticks_per_second * m_avg);
}
uint64_t TokenBucketThrottle::tokens_this_tick() {
if (0 == m_avg) {
return 0;
}
if (m_current_tick >= m_ticks_per_second) {
m_current_tick = 0;
}
m_current_tick++;
return tokens_filled(m_current_tick) - tokens_filled(m_current_tick - 1);
}
void TokenBucketThrottle::add_tokens() {
list<Blocker> tmp_blockers;
{
std::lock_guard lock(m_lock);
// put tokens into bucket.
double burst_ratio = 1.0;
if (m_throttle.max > m_avg && m_avg > 0){
burst_ratio = (double)m_throttle.max/m_avg;
}
m_throttle.put(tokens_this_tick(), burst_ratio);
if (0 == m_avg || 0 == m_throttle.max)
tmp_blockers.swap(m_blockers);
// check the m_blockers from head to tail, if blocker can get
// enough tokens, let it go.
while (!m_blockers.empty()) {
Blocker &blocker = m_blockers.front();
uint64_t got = m_throttle.get(blocker.tokens_requested);
if (got == blocker.tokens_requested) {
// got enough tokens for front.
tmp_blockers.splice(tmp_blockers.end(), m_blockers, m_blockers.begin());
} else {
// there is no more tokens.
blocker.tokens_requested -= got;
break;
}
}
}
for (auto b : tmp_blockers) {
b.ctx->complete(0);
}
}
void TokenBucketThrottle::schedule_timer() {
m_token_ctx = new LambdaContext(
[this](int r) {
schedule_timer();
});
m_timer->add_event_after(m_schedule_tick, m_token_ctx);
add_tokens();
}
void TokenBucketThrottle::cancel_timer() {
m_timer->cancel_event(m_token_ctx);
}
| 21,092 | 22.753378 | 114 | cc |
null | ceph-main/src/common/Throttle.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_THROTTLE_H
#define CEPH_THROTTLE_H
#include <atomic>
#include <chrono>
#include <iostream>
#include <list>
#include <map>
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "common/ThrottleInterface.h"
#include "common/Timer.h"
#include "common/convenience.h"
#if defined(WITH_SEASTAR) && !defined(WITH_ALIEN)
#include "crimson/common/perf_counters_collection.h"
#else
#include "common/perf_counters_collection.h"
#endif
/**
* @class Throttle
* Throttles the maximum number of active requests.
*
* This class defines the maximum number of slots currently taken away. The
* excessive requests for more of them are delayed, until some slots are put
* back, so @p get_current() drops below the limit after fulfills the requests.
*/
class Throttle final : public ThrottleInterface {
CephContext *cct;
const std::string name;
PerfCountersRef logger;
std::atomic<int64_t> count = { 0 }, max = { 0 };
std::mutex lock;
std::list<std::condition_variable> conds;
const bool use_perf;
public:
Throttle(CephContext *cct, const std::string& n, int64_t m = 0, bool _use_perf = true);
~Throttle() override;
private:
void _reset_max(int64_t m);
bool _should_wait(int64_t c) const {
int64_t m = max;
int64_t cur = count;
return
m &&
((c <= m && cur + c > m) || // normally stay under max
(c >= m && cur > m)); // except for large c
}
bool _wait(int64_t c, std::unique_lock<std::mutex>& l);
public:
/**
* gets the number of currently taken slots
* @returns the number of taken slots
*/
int64_t get_current() const {
return count;
}
/**
* get the max number of slots
* @returns the max number of slots
*/
int64_t get_max() const { return max; }
/**
* return true if past midpoint
*/
bool past_midpoint() const {
return count >= max / 2;
}
/**
* set the new max number, and wait until the number of taken slots drains
* and drops below this limit.
*
* @param m the new max number
* @returns true if this method is blocked, false it it returns immediately
*/
bool wait(int64_t m = 0);
/**
* take the specified number of slots from the stock regardless the throttling
* @param c number of slots to take
* @returns the total number of taken slots
*/
int64_t take(int64_t c = 1) override;
/**
* get the specified amount of slots from the stock, but will wait if the
* total number taken by consumer would exceed the maximum number.
* @param c number of slots to get
* @param m new maximum number to set, ignored if it is 0
* @returns true if this request is blocked due to the throttling, false
* otherwise
*/
bool get(int64_t c = 1, int64_t m = 0);
/**
* the unblocked version of @p get()
* @returns true if it successfully got the requested amount,
* or false if it would block.
*/
bool get_or_fail(int64_t c = 1);
/**
* put slots back to the stock
* @param c number of slots to return
* @returns number of requests being hold after this
*/
int64_t put(int64_t c = 1) override;
/**
* reset the zero to the stock
*/
void reset();
void reset_max(int64_t m) {
std::lock_guard l(lock);
_reset_max(m);
}
};
/**
* BackoffThrottle
*
* Creates a throttle which gradually induces delays when get() is called
* based on params low_threshold, high_threshold, expected_throughput,
* high_multiple, and max_multiple.
*
* In [0, low_threshold), we want no delay.
*
* In [low_threshold, high_threshold), delays should be injected based
* on a line from 0 at low_threshold to
* high_multiple * (1/expected_throughput) at high_threshold.
*
* In [high_threshold, 1), we want delays injected based on a line from
* (high_multiple * (1/expected_throughput)) at high_threshold to
* (high_multiple * (1/expected_throughput)) +
* (max_multiple * (1/expected_throughput)) at 1.
*
* Let the current throttle ratio (current/max) be r, low_threshold be l,
* high_threshold be h, high_delay (high_multiple / expected_throughput) be e,
* and max_delay (max_multiple / expected_throughput) be m.
*
* delay = 0, r \in [0, l)
* delay = (r - l) * (e / (h - l)), r \in [l, h)
* delay = e + (r - h)((m - e)/(1 - h))
*/
class BackoffThrottle {
const std::string name;
PerfCountersRef logger;
std::mutex lock;
using locker = std::unique_lock<std::mutex>;
unsigned next_cond = 0;
/// allocated once to avoid constantly allocating new ones
std::vector<std::condition_variable> conds;
const bool use_perf;
/// pointers into conds
std::list<std::condition_variable*> waiters;
std::list<std::condition_variable*>::iterator _push_waiter() {
unsigned next = next_cond++;
if (next_cond == conds.size())
next_cond = 0;
return waiters.insert(waiters.end(), &(conds[next]));
}
void _kick_waiters() {
if (!waiters.empty())
waiters.front()->notify_all();
}
/// see above, values are in [0, 1].
double low_threshold = 0;
double high_threshold = 1;
/// see above, values are in seconds
double high_delay_per_count = 0;
double max_delay_per_count = 0;
/// Filled in in set_params
double s0 = 0; ///< e / (h - l), l != h, 0 otherwise
double s1 = 0; ///< (m - e)/(1 - h), 1 != h, 0 otherwise
/// max
uint64_t max = 0;
uint64_t current = 0;
ceph::timespan _get_delay(uint64_t c) const;
public:
/**
* set_params
*
* Sets params. If the params are invalid, returns false
* and populates errstream (if non-null) with a user comprehensible
* explanation.
*/
bool set_params(
double _low_threshold,
double _high_threshold,
double expected_throughput,
double high_multiple,
double max_multiple,
uint64_t throttle_max,
std::ostream *errstream);
ceph::timespan get(uint64_t c = 1);
ceph::timespan wait() {
return get(0);
}
uint64_t put(uint64_t c = 1);
uint64_t take(uint64_t c = 1);
uint64_t get_current();
uint64_t get_max();
BackoffThrottle(CephContext *cct, const std::string& n,
unsigned expected_concurrency, ///< [in] determines size of conds
bool _use_perf = true);
~BackoffThrottle();
};
/**
* @class SimpleThrottle
* This is a simple way to bound the number of concurrent operations.
*
* It tracks the first error encountered, and makes it available
* when all requests are complete. wait_for_ret() should be called
* before the instance is destroyed.
*
* Re-using the same instance isn't safe if you want to check each set
* of operations for errors, since the return value is not reset.
*/
class SimpleThrottle {
public:
SimpleThrottle(uint64_t max, bool ignore_enoent);
~SimpleThrottle();
void start_op();
void end_op(int r);
bool pending_error() const;
int wait_for_ret();
private:
mutable std::mutex m_lock;
std::condition_variable m_cond;
uint64_t m_max;
uint64_t m_current = 0;
int m_ret = 0;
bool m_ignore_enoent;
uint32_t waiters = 0;
};
class OrderedThrottle;
class C_OrderedThrottle : public Context {
public:
C_OrderedThrottle(OrderedThrottle *ordered_throttle, uint64_t tid)
: m_ordered_throttle(ordered_throttle), m_tid(tid) {
}
protected:
void finish(int r) override;
private:
OrderedThrottle *m_ordered_throttle;
uint64_t m_tid;
};
/**
* @class OrderedThrottle
* Throttles the maximum number of active requests and completes them in order
*
* Operations can complete out-of-order but their associated Context callback
* will completed in-order during invocation of start_op() and wait_for_ret()
*/
class OrderedThrottle {
public:
OrderedThrottle(uint64_t max, bool ignore_enoent);
~OrderedThrottle();
C_OrderedThrottle *start_op(Context *on_finish);
void end_op(int r);
bool pending_error() const;
int wait_for_ret();
protected:
friend class C_OrderedThrottle;
void finish_op(uint64_t tid, int r);
private:
struct Result {
bool finished;
int ret_val;
Context *on_finish;
Result(Context *_on_finish = NULL)
: finished(false), ret_val(0), on_finish(_on_finish) {
}
};
typedef std::map<uint64_t, Result> TidResult;
mutable std::mutex m_lock;
std::condition_variable m_cond;
uint64_t m_max;
uint64_t m_current = 0;
int m_ret_val = 0;
bool m_ignore_enoent;
uint64_t m_next_tid = 0;
uint64_t m_complete_tid = 0;
TidResult m_tid_result;
void complete_pending_ops(std::unique_lock<std::mutex>& l);
uint32_t waiters = 0;
};
class TokenBucketThrottle {
struct Bucket {
CephContext *cct;
const std::string name;
uint64_t remain;
uint64_t max;
uint64_t capacity;
uint64_t available;
Bucket(CephContext *cct, const std::string &name, uint64_t m)
: cct(cct), name(name), remain(m), max(m), capacity(m), available(m) {}
uint64_t get(uint64_t c);
uint64_t put(uint64_t tokens, double burst_ratio);
void set_max(uint64_t max, uint64_t burst_seconds);
};
struct Blocker {
uint64_t tokens_requested;
Context *ctx;
Blocker(uint64_t _tokens_requested, Context* _ctx)
: tokens_requested(_tokens_requested), ctx(_ctx) {}
};
CephContext *m_cct;
const std::string m_name;
Bucket m_throttle;
uint64_t m_burst = 0;
uint64_t m_avg = 0;
SafeTimer *m_timer;
ceph::mutex *m_timer_lock;
Context *m_token_ctx = nullptr;
std::list<Blocker> m_blockers;
ceph::mutex m_lock;
// minimum of the filling period.
uint64_t m_tick_min = 50;
// tokens filling period, its unit is millisecond.
uint64_t m_tick = 0;
/**
* These variables are used to calculate how many tokens need to be put into
* the bucket within each tick.
*
* In actual use, the tokens to be put per tick(m_avg / m_ticks_per_second)
* may be a floating point number, but we need an 'uint64_t' to put into the
* bucket.
*
* For example, we set the value of rate to be 950, means 950 iops(or bps).
*
* In this case, the filling period(m_tick) should be 1000 / 950 = 1.052,
* which is too small for the SafeTimer. So we should set the period(m_tick)
* to be 50(m_tick_min), and 20 ticks in one second(m_ticks_per_second).
* The tokens filled in bucket per tick is 950 / 20 = 47.5, not an integer.
*
* To resolve this, we use a method called tokens_filled(m_current_tick) to
* calculate how many tokens will be put so far(until m_current_tick):
*
* tokens_filled = m_current_tick / m_ticks_per_second * m_avg
*
* And the difference between two ticks will be the result we expect.
* tokens in tick 0: (1 / 20 * 950) - (0 / 20 * 950) = 47 - 0 = 47
* tokens in tick 1: (2 / 20 * 950) - (1 / 20 * 950) = 95 - 47 = 48
* tokens in tick 2: (3 / 20 * 950) - (2 / 20 * 950) = 142 - 95 = 47
*
* As a result, the tokens filled in one second will shown as this:
* tick | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16|17|18|19|20|
* tokens |47|48|47|48|47|48|47|48|47|48|47|48|47|48|47|48|47|48|47|48|
*/
uint64_t m_ticks_per_second = 0;
uint64_t m_current_tick = 0;
// period for the bucket filling tokens, its unit is seconds.
double m_schedule_tick = 1.0;
public:
TokenBucketThrottle(CephContext *cct, const std::string &name,
uint64_t burst, uint64_t avg,
SafeTimer *timer, ceph::mutex *timer_lock);
~TokenBucketThrottle();
const std::string &get_name() {
return m_name;
}
template <typename T, typename MF, typename I>
void add_blocker(uint64_t c, T&& t, MF&& mf, I&& item, uint64_t flag) {
auto ctx = new LambdaContext(
[t, mf, item=std::forward<I>(item), flag](int) mutable {
(t->*mf)(std::forward<I>(item), flag);
});
m_blockers.emplace_back(c, ctx);
}
template <typename T, typename MF, typename I>
bool get(uint64_t c, T&& t, MF&& mf, I&& item, uint64_t flag) {
bool wait = false;
uint64_t got = 0;
std::lock_guard lock(m_lock);
if (!m_blockers.empty()) {
// Keep the order of requests, add item after previous blocked requests.
wait = true;
} else {
if (0 == m_throttle.max || 0 == m_avg)
return false;
got = m_throttle.get(c);
if (got < c) {
// Not enough tokens, add a blocker for it.
wait = true;
}
}
if (wait) {
add_blocker(c - got, std::forward<T>(t), std::forward<MF>(mf),
std::forward<I>(item), flag);
}
return wait;
}
int set_limit(uint64_t average, uint64_t burst, uint64_t burst_seconds);
void set_schedule_tick_min(uint64_t tick);
private:
uint64_t tokens_filled(double tick);
uint64_t tokens_this_tick();
void add_tokens();
void schedule_timer();
void cancel_timer();
};
#endif
| 12,846 | 26.334043 | 89 | h |
null | ceph-main/src/common/ThrottleInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstdint>
class ThrottleInterface {
public:
virtual ~ThrottleInterface() {}
/**
* take the specified number of slots from the stock regardless the throttling
* @param c number of slots to take
* @returns the total number of taken slots
*/
virtual int64_t take(int64_t c = 1) = 0;
/**
* put slots back to the stock
* @param c number of slots to return
* @returns number of requests being hold after this
*/
virtual int64_t put(int64_t c = 1) = 0;
};
| 606 | 24.291667 | 80 | h |
null | ceph-main/src/common/Timer.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "Cond.h"
#include "Timer.h"
#define dout_subsys ceph_subsys_timer
#undef dout_prefix
#define dout_prefix *_dout << "timer(" << this << ")."
using std::pair;
using ceph::operator <<;
template <class Mutex>
class CommonSafeTimerThread : public Thread {
CommonSafeTimer<Mutex> *parent;
public:
explicit CommonSafeTimerThread(CommonSafeTimer<Mutex> *s) : parent(s) {}
void *entry() override {
parent->timer_thread();
return NULL;
}
};
template <class Mutex>
CommonSafeTimer<Mutex>::CommonSafeTimer(CephContext *cct_, Mutex &l, bool safe_callbacks)
: cct(cct_), lock(l),
safe_callbacks(safe_callbacks),
thread(NULL),
stopping(false)
{
}
template <class Mutex>
CommonSafeTimer<Mutex>::~CommonSafeTimer()
{
ceph_assert(thread == NULL);
}
template <class Mutex>
void CommonSafeTimer<Mutex>::init()
{
ldout(cct,10) << "init" << dendl;
thread = new CommonSafeTimerThread<Mutex>(this);
thread->create("safe_timer");
}
template <class Mutex>
void CommonSafeTimer<Mutex>::shutdown()
{
ldout(cct,10) << "shutdown" << dendl;
if (thread) {
ceph_assert(ceph_mutex_is_locked(lock));
cancel_all_events();
stopping = true;
cond.notify_all();
lock.unlock();
thread->join();
lock.lock();
delete thread;
thread = NULL;
}
}
template <class Mutex>
void CommonSafeTimer<Mutex>::timer_thread()
{
std::unique_lock l{lock};
ldout(cct,10) << "timer_thread starting" << dendl;
while (!stopping) {
auto now = clock_t::now();
while (!schedule.empty()) {
auto p = schedule.begin();
// is the future now?
if (p->first > now)
break;
Context *callback = p->second;
events.erase(callback);
schedule.erase(p);
ldout(cct,10) << "timer_thread executing " << callback << dendl;
if (!safe_callbacks) {
l.unlock();
callback->complete(0);
l.lock();
} else {
callback->complete(0);
}
}
// recheck stopping if we dropped the lock
if (!safe_callbacks && stopping)
break;
ldout(cct,20) << "timer_thread going to sleep" << dendl;
if (schedule.empty()) {
cond.wait(l);
} else {
auto when = schedule.begin()->first;
cond.wait_until(l, when);
}
ldout(cct,20) << "timer_thread awake" << dendl;
}
ldout(cct,10) << "timer_thread exiting" << dendl;
}
template <class Mutex>
Context* CommonSafeTimer<Mutex>::add_event_after(double seconds, Context *callback)
{
return add_event_after(ceph::make_timespan(seconds), callback);
}
template <class Mutex>
Context* CommonSafeTimer<Mutex>::add_event_after(ceph::timespan duration, Context *callback)
{
ceph_assert(ceph_mutex_is_locked(lock));
auto when = clock_t::now() + duration;
return add_event_at(when, callback);
}
template <class Mutex>
Context* CommonSafeTimer<Mutex>::add_event_at(CommonSafeTimer<Mutex>::clock_t::time_point when, Context *callback)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct,10) << __func__ << " " << when << " -> " << callback << dendl;
if (stopping) {
ldout(cct,5) << __func__ << " already shutdown, event not added" << dendl;
delete callback;
return nullptr;
}
scheduled_map_t::value_type s_val(when, callback);
scheduled_map_t::iterator i = schedule.insert(s_val);
event_lookup_map_t::value_type e_val(callback, i);
pair < event_lookup_map_t::iterator, bool > rval(events.insert(e_val));
/* If you hit this, you tried to insert the same Context* twice. */
ceph_assert(rval.second);
/* If the event we have just inserted comes before everything else, we need to
* adjust our timeout. */
if (i == schedule.begin())
cond.notify_all();
return callback;
}
template <class Mutex>
Context* CommonSafeTimer<Mutex>::add_event_at(ceph::real_clock::time_point when, Context *callback)
{
ceph_assert(ceph_mutex_is_locked(lock));
// convert from real_clock to mono_clock
auto mono_now = ceph::mono_clock::now();
auto real_now = ceph::real_clock::now();
const auto delta = when - real_now;
const auto mono_atime = (mono_now +
std::chrono::ceil<clock_t::duration>(delta));
return add_event_at(mono_atime, callback);
}
template <class Mutex>
bool CommonSafeTimer<Mutex>::cancel_event(Context *callback)
{
ceph_assert(ceph_mutex_is_locked(lock));
auto p = events.find(callback);
if (p == events.end()) {
ldout(cct,10) << "cancel_event " << callback << " not found" << dendl;
return false;
}
ldout(cct,10) << "cancel_event " << p->second->first << " -> " << callback << dendl;
delete p->first;
schedule.erase(p->second);
events.erase(p);
return true;
}
template <class Mutex>
void CommonSafeTimer<Mutex>::cancel_all_events()
{
ldout(cct,10) << "cancel_all_events" << dendl;
ceph_assert(ceph_mutex_is_locked(lock));
while (!events.empty()) {
auto p = events.begin();
ldout(cct,10) << " cancelled " << p->second->first << " -> " << p->first << dendl;
delete p->first;
schedule.erase(p->second);
events.erase(p);
}
}
template <class Mutex>
void CommonSafeTimer<Mutex>::dump(const char *caller) const
{
if (!caller)
caller = "";
ldout(cct,10) << "dump " << caller << dendl;
for (scheduled_map_t::const_iterator s = schedule.begin();
s != schedule.end();
++s)
ldout(cct,10) << " " << s->first << "->" << s->second << dendl;
}
template class CommonSafeTimer<ceph::mutex>;
template class CommonSafeTimer<ceph::fair_mutex>;
| 5,874 | 24.995575 | 114 | cc |
null | ceph-main/src/common/Timer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_TIMER_H
#define CEPH_TIMER_H
#include <map>
#include "include/common_fwd.h"
#include "ceph_time.h"
#include "ceph_mutex.h"
#include "fair_mutex.h"
#include <condition_variable>
class Context;
template <class Mutex> class CommonSafeTimerThread;
template <class Mutex>
class CommonSafeTimer
{
CephContext *cct;
Mutex& lock;
std::condition_variable_any cond;
bool safe_callbacks;
friend class CommonSafeTimerThread<Mutex>;
class CommonSafeTimerThread<Mutex> *thread;
void timer_thread();
void _shutdown();
using clock_t = ceph::mono_clock;
using scheduled_map_t = std::multimap<clock_t::time_point, Context*>;
scheduled_map_t schedule;
using event_lookup_map_t = std::map<Context*, scheduled_map_t::iterator>;
event_lookup_map_t events;
bool stopping;
void dump(const char *caller = 0) const;
public:
// This class isn't supposed to be copied
CommonSafeTimer(const CommonSafeTimer&) = delete;
CommonSafeTimer& operator=(const CommonSafeTimer&) = delete;
/* Safe callbacks determines whether callbacks are called with the lock
* held.
*
* safe_callbacks = true (default option) guarantees that a cancelled
* event's callback will never be called.
*
* Under some circumstances, holding the lock can cause lock cycles.
* If you are able to relax requirements on cancelled callbacks, then
* setting safe_callbacks = false eliminates the lock cycle issue.
* */
CommonSafeTimer(CephContext *cct, Mutex &l, bool safe_callbacks=true);
virtual ~CommonSafeTimer();
/* Call with the event_lock UNLOCKED.
*
* Cancel all events and stop the timer thread.
*
* If there are any events that still have to run, they will need to take
* the event_lock first. */
void init();
void shutdown();
/* Schedule an event in the future
* Call with the event_lock LOCKED */
Context* add_event_after(ceph::timespan duration, Context *callback);
Context* add_event_after(double seconds, Context *callback);
Context* add_event_at(clock_t::time_point when, Context *callback);
Context* add_event_at(ceph::real_clock::time_point when, Context *callback);
/* Cancel an event.
* Call with the event_lock LOCKED
*
* Returns true if the callback was cancelled.
* Returns false if you never added the callback in the first place.
*/
bool cancel_event(Context *callback);
/* Cancel all events.
* Call with the event_lock LOCKED
*
* When this function returns, all events have been cancelled, and there are no
* more in progress.
*/
void cancel_all_events();
};
extern template class CommonSafeTimer<ceph::mutex>;
extern template class CommonSafeTimer<ceph::fair_mutex>;
using SafeTimer = class CommonSafeTimer<ceph::mutex>;
#endif
| 3,185 | 28.5 | 81 | h |
null | ceph-main/src/common/TracepointProvider.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/TracepointProvider.h"
#include "common/config.h"
TracepointProvider::TracepointProvider(CephContext *cct, const char *library,
const char *config_key)
: m_cct(cct), m_library(library), m_config_keys{config_key, NULL}
{
m_cct->_conf.add_observer(this);
verify_config(m_cct->_conf);
}
TracepointProvider::~TracepointProvider() {
m_cct->_conf.remove_observer(this);
if (m_handle) {
dlclose(m_handle);
}
}
void TracepointProvider::handle_conf_change(
const ConfigProxy& conf, const std::set<std::string> &changed) {
if (changed.count(m_config_keys[0])) {
verify_config(conf);
}
}
void TracepointProvider::verify_config(const ConfigProxy& conf) {
std::lock_guard locker(m_lock);
if (m_handle) {
return;
}
char buf[10];
char *pbuf = buf;
if (conf.get_val(m_config_keys[0], &pbuf, sizeof(buf)) != 0 ||
strncmp(buf, "true", 5) != 0) {
return;
}
m_handle = dlopen(m_library.c_str(), RTLD_NOW | RTLD_NODELETE);
ceph_assert(m_handle);
}
| 1,148 | 23.978261 | 77 | cc |
null | ceph-main/src/common/TracepointProvider.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TRACEPOINT_PROVIDER_H
#define CEPH_TRACEPOINT_PROVIDER_H
#include "common/ceph_context.h"
#include "common/config_obs.h"
#include "common/ceph_mutex.h"
#include "include/dlfcn_compat.h"
class TracepointProvider : public md_config_obs_t {
public:
struct Traits {
const char *library;
const char *config_key;
Traits(const char *library, const char *config_key)
: library(library), config_key(config_key) {
}
};
class Singleton {
public:
Singleton(CephContext *cct, const char *library, const char *config_key)
: tracepoint_provider(new TracepointProvider(cct, library, config_key)) {
}
~Singleton() {
delete tracepoint_provider;
}
inline bool is_enabled() const {
return tracepoint_provider->m_handle != nullptr;
}
private:
TracepointProvider *tracepoint_provider;
};
template <const Traits &traits>
class TypedSingleton : public Singleton {
public:
explicit TypedSingleton(CephContext *cct)
: Singleton(cct, traits.library, traits.config_key) {
}
};
TracepointProvider(CephContext *cct, const char *library,
const char *config_key);
~TracepointProvider() override;
TracepointProvider(const TracepointProvider&) = delete;
TracepointProvider operator =(const TracepointProvider&) = delete;
TracepointProvider(TracepointProvider&&) = delete;
TracepointProvider operator =(TracepointProvider&&) = delete;
template <const Traits &traits>
static void initialize(CephContext *cct) {
#ifdef WITH_LTTNG
cct->lookup_or_create_singleton_object<TypedSingleton<traits>>(
traits.library, false, cct);
#endif
}
protected:
const char** get_tracked_conf_keys() const override {
return m_config_keys;
}
void handle_conf_change(const ConfigProxy& conf,
const std::set <std::string> &changed) override;
private:
CephContext *m_cct;
std::string m_library;
mutable const char* m_config_keys[2];
ceph::mutex m_lock = ceph::make_mutex("TracepointProvider::m_lock");
void* m_handle = nullptr;
void verify_config(const ConfigProxy& conf);
};
#endif // CEPH_TRACEPOINT_PROVIDER_H
| 2,258 | 26.216867 | 79 | h |
null | ceph-main/src/common/TrackedOp.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
* Copyright 2013 Inktank
*/
#include "TrackedOp.h"
#define dout_context cct
#define dout_subsys ceph_subsys_optracker
#undef dout_prefix
#define dout_prefix _prefix(_dout)
using std::list;
using std::make_pair;
using std::ostream;
using std::pair;
using std::set;
using std::string;
using std::stringstream;
using ceph::Formatter;
static ostream& _prefix(std::ostream* _dout)
{
return *_dout << "-- op tracker -- ";
}
void OpHistoryServiceThread::break_thread() {
queue_spinlock.lock();
_external_queue.clear();
_break_thread = true;
queue_spinlock.unlock();
}
void* OpHistoryServiceThread::entry() {
int sleep_time = 1000;
list<pair<utime_t, TrackedOpRef>> internal_queue;
while (1) {
queue_spinlock.lock();
if (_break_thread) {
queue_spinlock.unlock();
break;
}
internal_queue.swap(_external_queue);
queue_spinlock.unlock();
if (internal_queue.empty()) {
usleep(sleep_time);
if (sleep_time < 128000) {
sleep_time <<= 2;
}
} else {
sleep_time = 1000;
}
while (!internal_queue.empty()) {
pair<utime_t, TrackedOpRef> op = internal_queue.front();
_ophistory->_insert_delayed(op.first, op.second);
internal_queue.pop_front();
}
}
return nullptr;
}
void OpHistory::on_shutdown()
{
opsvc.break_thread();
opsvc.join();
std::lock_guard history_lock(ops_history_lock);
arrived.clear();
duration.clear();
slow_op.clear();
shutdown = true;
}
void OpHistory::_insert_delayed(const utime_t& now, TrackedOpRef op)
{
std::lock_guard history_lock(ops_history_lock);
if (shutdown)
return;
double opduration = op->get_duration();
duration.insert(make_pair(opduration, op));
arrived.insert(make_pair(op->get_initiated(), op));
if (opduration >= history_slow_op_threshold.load()) {
slow_op.insert(make_pair(op->get_initiated(), op));
logger->inc(l_osd_slow_op_count);
}
cleanup(now);
}
void OpHistory::cleanup(utime_t now)
{
while (arrived.size() &&
(now - arrived.begin()->first >
(double)(history_duration.load()))) {
duration.erase(make_pair(
arrived.begin()->second->get_duration(),
arrived.begin()->second));
arrived.erase(arrived.begin());
}
while (duration.size() > history_size.load()) {
arrived.erase(make_pair(
duration.begin()->second->get_initiated(),
duration.begin()->second));
duration.erase(duration.begin());
}
while (slow_op.size() > history_slow_op_size.load()) {
slow_op.erase(make_pair(
slow_op.begin()->second->get_initiated(),
slow_op.begin()->second));
}
}
void OpHistory::dump_ops(utime_t now, Formatter *f, set<string> filters, bool by_duration)
{
std::lock_guard history_lock(ops_history_lock);
cleanup(now);
f->open_object_section("op_history");
f->dump_int("size", history_size.load());
f->dump_int("duration", history_duration.load());
{
f->open_array_section("ops");
auto dump_fn = [&f, &now, &filters](auto begin_iter, auto end_iter) {
for (auto i=begin_iter; i!=end_iter; ++i) {
if (!i->second->filter_out(filters))
continue;
f->open_object_section("op");
i->second->dump(now, f);
f->close_section();
}
};
if (by_duration) {
dump_fn(duration.rbegin(), duration.rend());
} else {
dump_fn(arrived.begin(), arrived.end());
}
f->close_section();
}
f->close_section();
}
struct ShardedTrackingData {
ceph::mutex ops_in_flight_lock_sharded;
TrackedOp::tracked_op_list_t ops_in_flight_sharded;
explicit ShardedTrackingData(string lock_name)
: ops_in_flight_lock_sharded(ceph::make_mutex(lock_name)) {}
};
OpTracker::OpTracker(CephContext *cct_, bool tracking, uint32_t num_shards):
seq(0),
history(cct_),
num_optracker_shards(num_shards),
complaint_time(0), log_threshold(0),
tracking_enabled(tracking),
cct(cct_) {
for (uint32_t i = 0; i < num_optracker_shards; i++) {
char lock_name[34] = {0};
snprintf(lock_name, sizeof(lock_name), "%s:%" PRIu32, "OpTracker::ShardedLock", i);
ShardedTrackingData* one_shard = new ShardedTrackingData(lock_name);
sharded_in_flight_list.push_back(one_shard);
}
}
OpTracker::~OpTracker() {
while (!sharded_in_flight_list.empty()) {
ShardedTrackingData* sdata = sharded_in_flight_list.back();
ceph_assert(NULL != sdata);
while (!sdata->ops_in_flight_sharded.empty()) {
{
std::lock_guard locker(sdata->ops_in_flight_lock_sharded);
sdata->ops_in_flight_sharded.pop_back();
}
}
ceph_assert((sharded_in_flight_list.back())->ops_in_flight_sharded.empty());
delete sharded_in_flight_list.back();
sharded_in_flight_list.pop_back();
}
}
bool OpTracker::dump_historic_ops(Formatter *f, bool by_duration, set<string> filters)
{
if (!tracking_enabled)
return false;
std::shared_lock l{lock};
utime_t now = ceph_clock_now();
history.dump_ops(now, f, filters, by_duration);
return true;
}
void OpHistory::dump_slow_ops(utime_t now, Formatter *f, set<string> filters)
{
std::lock_guard history_lock(ops_history_lock);
cleanup(now);
f->open_object_section("OpHistory slow ops");
f->dump_int("num to keep", history_slow_op_size.load());
f->dump_int("threshold to keep", history_slow_op_threshold.load());
{
f->open_array_section("Ops");
for (set<pair<utime_t, TrackedOpRef> >::const_iterator i =
slow_op.begin();
i != slow_op.end();
++i) {
if (!i->second->filter_out(filters))
continue;
f->open_object_section("Op");
i->second->dump(now, f);
f->close_section();
}
f->close_section();
}
f->close_section();
}
bool OpTracker::dump_historic_slow_ops(Formatter *f, set<string> filters)
{
if (!tracking_enabled)
return false;
std::shared_lock l{lock};
utime_t now = ceph_clock_now();
history.dump_slow_ops(now, f, filters);
return true;
}
bool OpTracker::dump_ops_in_flight(Formatter *f, bool print_only_blocked, set<string> filters, bool count_only)
{
if (!tracking_enabled)
return false;
std::shared_lock l{lock};
f->open_object_section("ops_in_flight"); // overall dump
uint64_t total_ops_in_flight = 0;
if (!count_only) {
f->open_array_section("ops"); // list of TrackedOps
}
utime_t now = ceph_clock_now();
for (uint32_t i = 0; i < num_optracker_shards; i++) {
ShardedTrackingData* sdata = sharded_in_flight_list[i];
ceph_assert(NULL != sdata);
std::lock_guard locker(sdata->ops_in_flight_lock_sharded);
for (auto& op : sdata->ops_in_flight_sharded) {
if (print_only_blocked && (now - op.get_initiated() <= complaint_time))
break;
if (!op.filter_out(filters))
continue;
if (!count_only) {
f->open_object_section("op");
op.dump(now, f);
f->close_section(); // this TrackedOp
}
total_ops_in_flight++;
}
}
if (!count_only) {
f->close_section(); // list of TrackedOps
}
if (print_only_blocked) {
f->dump_float("complaint_time", complaint_time);
f->dump_int("num_blocked_ops", total_ops_in_flight);
} else {
f->dump_int("num_ops", total_ops_in_flight);
}
f->close_section(); // overall dump
return true;
}
bool OpTracker::register_inflight_op(TrackedOp *i)
{
if (!tracking_enabled)
return false;
std::shared_lock l{lock};
uint64_t current_seq = ++seq;
uint32_t shard_index = current_seq % num_optracker_shards;
ShardedTrackingData* sdata = sharded_in_flight_list[shard_index];
ceph_assert(NULL != sdata);
{
std::lock_guard locker(sdata->ops_in_flight_lock_sharded);
sdata->ops_in_flight_sharded.push_back(*i);
i->seq = current_seq;
}
return true;
}
void OpTracker::unregister_inflight_op(TrackedOp* const i)
{
// caller checks;
ceph_assert(i->state);
uint32_t shard_index = i->seq % num_optracker_shards;
ShardedTrackingData* sdata = sharded_in_flight_list[shard_index];
ceph_assert(NULL != sdata);
{
std::lock_guard locker(sdata->ops_in_flight_lock_sharded);
auto p = sdata->ops_in_flight_sharded.iterator_to(*i);
sdata->ops_in_flight_sharded.erase(p);
}
}
void OpTracker::record_history_op(TrackedOpRef&& i)
{
std::shared_lock l{lock};
history.insert(ceph_clock_now(), std::move(i));
}
bool OpTracker::visit_ops_in_flight(utime_t* oldest_secs,
std::function<bool(TrackedOp&)>&& visit)
{
if (!tracking_enabled)
return false;
const utime_t now = ceph_clock_now();
utime_t oldest_op = now;
// single representation of all inflight operations reunified
// from OpTracker's shards. TrackedOpRef extends the lifetime
// to carry the ops outside of the critical section, and thus
// allows to call the visitor without any lock being held.
// This simplifies the contract on API at the price of plenty
// additional moves and atomic ref-counting. This seems OK as
// `visit_ops_in_flight()` is definitely not intended for any
// hot path.
std::vector<TrackedOpRef> ops_in_flight;
std::shared_lock l{lock};
for (const auto sdata : sharded_in_flight_list) {
ceph_assert(sdata);
std::lock_guard locker(sdata->ops_in_flight_lock_sharded);
if (!sdata->ops_in_flight_sharded.empty()) {
utime_t oldest_op_tmp =
sdata->ops_in_flight_sharded.front().get_initiated();
if (oldest_op_tmp < oldest_op) {
oldest_op = oldest_op_tmp;
}
}
std::transform(std::begin(sdata->ops_in_flight_sharded),
std::end(sdata->ops_in_flight_sharded),
std::back_inserter(ops_in_flight),
[] (TrackedOp& op) { return TrackedOpRef(&op); });
}
if (ops_in_flight.empty())
return false;
*oldest_secs = now - oldest_op;
dout(10) << "ops_in_flight.size: " << ops_in_flight.size()
<< "; oldest is " << *oldest_secs
<< " seconds old" << dendl;
if (*oldest_secs < complaint_time)
return false;
l.unlock();
for (auto& op : ops_in_flight) {
// `lock` neither `ops_in_flight_lock_sharded` should be held when
// calling the visitor. Otherwise `OSD::get_health_metrics()` can
// dead-lock due to the `~TrackedOp()` calling `record_history_op()`
// or `unregister_inflight_op()`.
if (!visit(*op))
break;
}
return true;
}
bool OpTracker::with_slow_ops_in_flight(utime_t* oldest_secs,
int* num_slow_ops,
int* num_warned_ops,
std::function<void(TrackedOp&)>&& on_warn)
{
const utime_t now = ceph_clock_now();
auto too_old = now;
too_old -= complaint_time;
int slow = 0;
int warned = 0;
auto check = [&](TrackedOp& op) {
if (op.get_initiated() >= too_old) {
// no more slow ops in flight
return false;
}
if (!op.warn_interval_multiplier)
return true;
slow++;
if (warned >= log_threshold) {
// enough samples of slow ops
return true;
}
auto time_to_complain = (op.get_initiated() +
complaint_time * op.warn_interval_multiplier);
if (time_to_complain >= now) {
// complain later if the op is still in flight
return true;
}
// will warn, increase counter
warned++;
on_warn(op);
return true;
};
if (visit_ops_in_flight(oldest_secs, check)) {
if (num_slow_ops) {
*num_slow_ops = slow;
*num_warned_ops = warned;
}
return true;
} else {
return false;
}
}
bool OpTracker::check_ops_in_flight(std::string* summary,
std::vector<string> &warnings,
int *num_slow_ops)
{
const utime_t now = ceph_clock_now();
auto too_old = now;
too_old -= complaint_time;
int warned = 0;
utime_t oldest_secs;
auto warn_on_slow_op = [&](TrackedOp& op) {
stringstream ss;
utime_t age = now - op.get_initiated();
ss << "slow request " << age << " seconds old, received at "
<< op.get_initiated() << ": " << op.get_desc()
<< " currently "
<< op.state_string();
warnings.push_back(ss.str());
// only those that have been shown will backoff
op.warn_interval_multiplier *= 2;
};
int slow = 0;
if (with_slow_ops_in_flight(&oldest_secs, &slow, &warned, warn_on_slow_op) &&
slow > 0) {
stringstream ss;
ss << slow << " slow requests, "
<< warned << " included below; oldest blocked for > "
<< oldest_secs << " secs";
*summary = ss.str();
if (num_slow_ops) {
*num_slow_ops = slow;
}
return true;
} else {
return false;
}
}
void OpTracker::get_age_ms_histogram(pow2_hist_t *h)
{
h->clear();
utime_t now = ceph_clock_now();
for (uint32_t iter = 0; iter < num_optracker_shards; iter++) {
ShardedTrackingData* sdata = sharded_in_flight_list[iter];
ceph_assert(NULL != sdata);
std::lock_guard locker(sdata->ops_in_flight_lock_sharded);
for (auto& i : sdata->ops_in_flight_sharded) {
utime_t age = now - i.get_initiated();
uint32_t ms = (long)(age * 1000.0);
h->add(ms);
}
}
}
#undef dout_context
#define dout_context tracker->cct
void TrackedOp::mark_event(std::string_view event, utime_t stamp)
{
if (!state)
return;
{
std::lock_guard l(lock);
events.emplace_back(stamp, event);
}
dout(6) << " seq: " << seq
<< ", time: " << stamp
<< ", event: " << event
<< ", op: " << get_desc()
<< dendl;
_event_marked();
}
void TrackedOp::dump(utime_t now, Formatter *f) const
{
// Ignore if still in the constructor
if (!state)
return;
f->dump_string("description", get_desc());
f->dump_stream("initiated_at") << get_initiated();
f->dump_float("age", now - get_initiated());
f->dump_float("duration", get_duration());
{
f->open_object_section("type_data");
_dump(f);
f->close_section();
}
}
| 14,052 | 26.340467 | 111 | cc |
null | ceph-main/src/common/TrackedOp.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 New Dream Network/Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef TRACKEDREQUEST_H_
#define TRACKEDREQUEST_H_
#include <atomic>
#include "common/ceph_mutex.h"
#include "common/histogram.h"
#include "common/Thread.h"
#include "common/Clock.h"
#include "include/spinlock.h"
#include "msg/Message.h"
#define OPTRACKER_PREALLOC_EVENTS 20
class TrackedOp;
class OpHistory;
typedef boost::intrusive_ptr<TrackedOp> TrackedOpRef;
class OpHistoryServiceThread : public Thread
{
private:
std::list<std::pair<utime_t, TrackedOpRef>> _external_queue;
OpHistory* _ophistory;
mutable ceph::spinlock queue_spinlock;
bool _break_thread;
public:
explicit OpHistoryServiceThread(OpHistory* parent)
: _ophistory(parent),
_break_thread(false) { }
void break_thread();
void insert_op(const utime_t& now, TrackedOpRef op) {
queue_spinlock.lock();
_external_queue.emplace_back(now, op);
queue_spinlock.unlock();
}
void *entry() override;
};
enum {
l_osd_slow_op_first = 1000,
l_osd_slow_op_count,
l_osd_slow_op_last,
};
class OpHistory {
CephContext* cct = nullptr;
std::set<std::pair<utime_t, TrackedOpRef> > arrived;
std::set<std::pair<double, TrackedOpRef> > duration;
std::set<std::pair<utime_t, TrackedOpRef> > slow_op;
ceph::mutex ops_history_lock = ceph::make_mutex("OpHistory::ops_history_lock");
void cleanup(utime_t now);
std::atomic_size_t history_size{0};
std::atomic_uint32_t history_duration{0};
std::atomic_size_t history_slow_op_size{0};
std::atomic_uint32_t history_slow_op_threshold{0};
std::atomic_bool shutdown{false};
OpHistoryServiceThread opsvc;
friend class OpHistoryServiceThread;
std::unique_ptr<PerfCounters> logger;
public:
OpHistory(CephContext *c) : cct(c), opsvc(this) {
PerfCountersBuilder b(cct, "osd-slow-ops",
l_osd_slow_op_first, l_osd_slow_op_last);
b.add_u64_counter(l_osd_slow_op_count, "slow_ops_count",
"Number of operations taking over ten second");
logger.reset(b.create_perf_counters());
cct->get_perfcounters_collection()->add(logger.get());
opsvc.create("OpHistorySvc");
}
~OpHistory() {
ceph_assert(arrived.empty());
ceph_assert(duration.empty());
ceph_assert(slow_op.empty());
if(logger) {
cct->get_perfcounters_collection()->remove(logger.get());
logger.reset();
}
}
void insert(const utime_t& now, TrackedOpRef op)
{
if (shutdown)
return;
opsvc.insert_op(now, op);
}
void _insert_delayed(const utime_t& now, TrackedOpRef op);
void dump_ops(utime_t now, ceph::Formatter *f, std::set<std::string> filters = {""}, bool by_duration=false);
void dump_slow_ops(utime_t now, ceph::Formatter *f, std::set<std::string> filters = {""});
void on_shutdown();
void set_size_and_duration(size_t new_size, uint32_t new_duration) {
history_size = new_size;
history_duration = new_duration;
}
void set_slow_op_size_and_threshold(size_t new_size, uint32_t new_threshold) {
history_slow_op_size = new_size;
history_slow_op_threshold = new_threshold;
}
};
struct ShardedTrackingData;
class OpTracker {
friend class OpHistory;
std::atomic<int64_t> seq = { 0 };
std::vector<ShardedTrackingData*> sharded_in_flight_list;
OpHistory history;
uint32_t num_optracker_shards;
float complaint_time;
int log_threshold;
std::atomic<bool> tracking_enabled;
ceph::shared_mutex lock = ceph::make_shared_mutex("OpTracker::lock");
public:
CephContext *cct;
OpTracker(CephContext *cct_, bool tracking, uint32_t num_shards);
void set_complaint_and_threshold(float time, int threshold) {
complaint_time = time;
log_threshold = threshold;
}
void set_history_size_and_duration(uint32_t new_size, uint32_t new_duration) {
history.set_size_and_duration(new_size, new_duration);
}
void set_history_slow_op_size_and_threshold(uint32_t new_size, uint32_t new_threshold) {
history.set_slow_op_size_and_threshold(new_size, new_threshold);
}
bool is_tracking() const {
return tracking_enabled;
}
void set_tracking(bool enable) {
tracking_enabled = enable;
}
bool dump_ops_in_flight(ceph::Formatter *f, bool print_only_blocked = false, std::set<std::string> filters = {""}, bool count_only = false);
bool dump_historic_ops(ceph::Formatter *f, bool by_duration = false, std::set<std::string> filters = {""});
bool dump_historic_slow_ops(ceph::Formatter *f, std::set<std::string> filters = {""});
bool register_inflight_op(TrackedOp *i);
void unregister_inflight_op(TrackedOp *i);
void record_history_op(TrackedOpRef&& i);
void get_age_ms_histogram(pow2_hist_t *h);
/**
* walk through ops in flight
*
* @param oldest_sec the amount of time since the oldest op was initiated
* @param check a function consuming tracked ops, the function returns
* false if it don't want to be fed with more ops
* @return True if there are any Ops to warn on, false otherwise
*/
bool visit_ops_in_flight(utime_t* oldest_secs,
std::function<bool(TrackedOp&)>&& visit);
/**
* walk through slow ops in flight
*
* @param[out] oldest_sec the amount of time since the oldest op was initiated
* @param[out] num_slow_ops total number of slow ops
* @param[out] num_warned_ops total number of warned ops
* @param on_warn a function consuming tracked ops, the function returns
* false if it don't want to be fed with more ops
* @return True if there are any Ops to warn on, false otherwise
*/
bool with_slow_ops_in_flight(utime_t* oldest_secs,
int* num_slow_ops,
int* num_warned_ops,
std::function<void(TrackedOp&)>&& on_warn);
/**
* Look for Ops which are too old, and insert warning
* strings for each Op that is too old.
*
* @param summary[out] a std::string summarizing slow Ops.
* @param warning_strings[out] A std::vector<std::string> reference which is filled
* with a warning std::string for each old Op.
* @param slow[out] total number of slow ops
* @return True if there are any Ops to warn on, false otherwise.
*/
bool check_ops_in_flight(std::string* summary,
std::vector<std::string> &warning_strings,
int* slow = nullptr);
void on_shutdown() {
history.on_shutdown();
}
~OpTracker();
template <typename T, typename U>
typename T::Ref create_request(U params)
{
typename T::Ref retval(new T(params, this));
retval->tracking_start();
if (is_tracking()) {
retval->mark_event("header_read", params->get_recv_stamp());
retval->mark_event("throttled", params->get_throttle_stamp());
retval->mark_event("all_read", params->get_recv_complete_stamp());
retval->mark_event("dispatched", params->get_dispatch_stamp());
}
return retval;
}
};
class TrackedOp : public boost::intrusive::list_base_hook<> {
private:
friend class OpHistory;
friend class OpTracker;
boost::intrusive::list_member_hook<> tracker_item;
public:
typedef boost::intrusive::list<
TrackedOp,
boost::intrusive::member_hook<
TrackedOp,
boost::intrusive::list_member_hook<>,
&TrackedOp::tracker_item> > tracked_op_list_t;
// for use when clearing lists. e.g.,
// ls.clear_and_dispose(TrackedOp::Putter());
struct Putter {
void operator()(TrackedOp *op) {
op->put();
}
};
protected:
OpTracker *tracker; ///< the tracker we are associated with
std::atomic_int nref = {0}; ///< ref count
utime_t initiated_at;
struct Event {
utime_t stamp;
std::string str;
Event(utime_t t, std::string_view s) : stamp(t), str(s) {}
int compare(const char *s) const {
return str.compare(s);
}
const char *c_str() const {
return str.c_str();
}
void dump(ceph::Formatter *f) const {
f->dump_stream("time") << stamp;
f->dump_string("event", str);
}
};
std::vector<Event> events; ///< std::list of events and their times
mutable ceph::mutex lock = ceph::make_mutex("TrackedOp::lock"); ///< to protect the events list
uint64_t seq = 0; ///< a unique value std::set by the OpTracker
uint32_t warn_interval_multiplier = 1; //< limits output of a given op warning
enum {
STATE_UNTRACKED = 0,
STATE_LIVE,
STATE_HISTORY
};
std::atomic<int> state = {STATE_UNTRACKED};
mutable std::string desc_str; ///< protected by lock
mutable const char *desc = nullptr; ///< readable without lock
mutable std::atomic<bool> want_new_desc = {false};
TrackedOp(OpTracker *_tracker, const utime_t& initiated) :
tracker(_tracker),
initiated_at(initiated)
{
events.reserve(OPTRACKER_PREALLOC_EVENTS);
}
/// output any type-specific data you want to get when dump() is called
virtual void _dump(ceph::Formatter *f) const {}
/// if you want something else to happen when events are marked, implement
virtual void _event_marked() {}
/// return a unique descriptor of the Op; eg the message it's attached to
virtual void _dump_op_descriptor_unlocked(std::ostream& stream) const = 0;
/// called when the last non-OpTracker reference is dropped
virtual void _unregistered() {}
virtual bool filter_out(const std::set<std::string>& filters) { return true; }
public:
ZTracer::Trace osd_trace;
ZTracer::Trace pg_trace;
ZTracer::Trace store_trace;
ZTracer::Trace journal_trace;
virtual ~TrackedOp() {}
void get() {
++nref;
}
void put() {
again:
auto nref_snap = nref.load();
if (nref_snap == 1) {
switch (state.load()) {
case STATE_UNTRACKED:
_unregistered();
delete this;
break;
case STATE_LIVE:
mark_event("done");
tracker->unregister_inflight_op(this);
_unregistered();
if (!tracker->is_tracking()) {
delete this;
} else {
state = TrackedOp::STATE_HISTORY;
tracker->record_history_op(
TrackedOpRef(this, /* add_ref = */ false));
}
break;
case STATE_HISTORY:
delete this;
break;
default:
ceph_abort();
}
} else if (!nref.compare_exchange_weak(nref_snap, nref_snap - 1)) {
goto again;
}
}
const char *get_desc() const {
if (!desc || want_new_desc.load()) {
std::lock_guard l(lock);
_gen_desc();
}
return desc;
}
private:
void _gen_desc() const {
std::ostringstream ss;
_dump_op_descriptor_unlocked(ss);
desc_str = ss.str();
desc = desc_str.c_str();
want_new_desc = false;
}
public:
void reset_desc() {
want_new_desc = true;
}
const utime_t& get_initiated() const {
return initiated_at;
}
double get_duration() const {
std::lock_guard l(lock);
if (!events.empty() && events.rbegin()->compare("done") == 0)
return events.rbegin()->stamp - get_initiated();
else
return ceph_clock_now() - get_initiated();
}
void mark_event(std::string_view event, utime_t stamp=ceph_clock_now());
void mark_nowarn() {
warn_interval_multiplier = 0;
}
virtual std::string_view state_string() const {
std::lock_guard l(lock);
return events.empty() ? std::string_view() : std::string_view(events.rbegin()->str);
}
void dump(utime_t now, ceph::Formatter *f) const;
void tracking_start() {
if (tracker->register_inflight_op(this)) {
events.emplace_back(initiated_at, "initiated");
state = STATE_LIVE;
}
}
// ref counting via intrusive_ptr, with special behavior on final
// put for historical op tracking
friend void intrusive_ptr_add_ref(TrackedOp *o) {
o->get();
}
friend void intrusive_ptr_release(TrackedOp *o) {
o->put();
}
};
#endif
| 12,029 | 28.128329 | 142 | h |
null | ceph-main/src/common/WeightedPriorityQueue.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef WP_QUEUE_H
#define WP_QUEUE_H
#include "OpQueue.h"
#include <boost/intrusive/list.hpp>
#include <boost/intrusive/rbtree.hpp>
#include <boost/intrusive/avl_set.hpp>
#include "include/ceph_assert.h"
namespace bi = boost::intrusive;
template <typename T, typename S>
class MapKey
{
public:
bool operator()(const S i, const T &k) const
{
return i < k.key;
}
bool operator()(const T &k, const S i) const
{
return k.key < i;
}
};
template <typename T>
class DelItem
{
public:
void operator()(T* delete_this)
{ delete delete_this; }
};
template <typename T, typename K>
class WeightedPriorityQueue : public OpQueue <T, K>
{
private:
class ListPair : public bi::list_base_hook<>
{
public:
unsigned cost;
T item;
ListPair(unsigned c, T&& i) :
cost(c),
item(std::move(i))
{}
};
class Klass : public bi::set_base_hook<>
{
typedef bi::list<ListPair> ListPairs;
typedef typename ListPairs::iterator Lit;
public:
K key; // klass
ListPairs lp;
Klass(K& k) :
key(k) {
}
~Klass() {
lp.clear_and_dispose(DelItem<ListPair>());
}
friend bool operator< (const Klass &a, const Klass &b)
{ return a.key < b.key; }
friend bool operator> (const Klass &a, const Klass &b)
{ return a.key > b.key; }
friend bool operator== (const Klass &a, const Klass &b)
{ return a.key == b.key; }
void insert(unsigned cost, T&& item, bool front) {
if (front) {
lp.push_front(*new ListPair(cost, std::move(item)));
} else {
lp.push_back(*new ListPair(cost, std::move(item)));
}
}
//Get the cost of the next item to dequeue
unsigned get_cost() const {
ceph_assert(!empty());
return lp.begin()->cost;
}
T pop() {
ceph_assert(!lp.empty());
T ret = std::move(lp.begin()->item);
lp.erase_and_dispose(lp.begin(), DelItem<ListPair>());
return ret;
}
bool empty() const {
return lp.empty();
}
unsigned get_size() const {
return lp.size();
}
void filter_class(std::list<T>* out) {
for (Lit i = --lp.end();; --i) {
if (out) {
out->push_front(std::move(i->item));
}
i = lp.erase_and_dispose(i, DelItem<ListPair>());
if (i == lp.begin()) {
break;
}
}
}
};
class SubQueue : public bi::set_base_hook<>
{
typedef bi::rbtree<Klass> Klasses;
typedef typename Klasses::iterator Kit;
void check_end() {
if (next == klasses.end()) {
next = klasses.begin();
}
}
public:
unsigned key; // priority
Klasses klasses;
Kit next;
SubQueue(unsigned& p) :
key(p),
next(klasses.begin()) {
}
~SubQueue() {
klasses.clear_and_dispose(DelItem<Klass>());
}
friend bool operator< (const SubQueue &a, const SubQueue &b)
{ return a.key < b.key; }
friend bool operator> (const SubQueue &a, const SubQueue &b)
{ return a.key > b.key; }
friend bool operator== (const SubQueue &a, const SubQueue &b)
{ return a.key == b.key; }
bool empty() const {
return klasses.empty();
}
void insert(K cl, unsigned cost, T&& item, bool front = false) {
typename Klasses::insert_commit_data insert_data;
std::pair<Kit, bool> ret =
klasses.insert_unique_check(cl, MapKey<Klass, K>(), insert_data);
if (ret.second) {
ret.first = klasses.insert_unique_commit(*new Klass(cl), insert_data);
check_end();
}
ret.first->insert(cost, std::move(item), front);
}
unsigned get_cost() const {
ceph_assert(!empty());
return next->get_cost();
}
T pop() {
T ret = next->pop();
if (next->empty()) {
next = klasses.erase_and_dispose(next, DelItem<Klass>());
} else {
++next;
}
check_end();
return ret;
}
void filter_class(K& cl, std::list<T>* out) {
Kit i = klasses.find(cl, MapKey<Klass, K>());
if (i != klasses.end()) {
i->filter_class(out);
Kit tmp = klasses.erase_and_dispose(i, DelItem<Klass>());
if (next == i) {
next = tmp;
}
check_end();
}
}
// this is intended for unit tests and should be never used on hot paths
unsigned get_size_slow() const {
unsigned count = 0;
for (const auto& klass : klasses) {
count += klass.get_size();
}
return count;
}
void dump(ceph::Formatter *f) const {
f->dump_int("num_keys", next->get_size());
if (!empty()) {
f->dump_int("first_item_cost", next->get_cost());
}
}
};
class Queue {
typedef bi::rbtree<SubQueue> SubQueues;
typedef typename SubQueues::iterator Sit;
SubQueues queues;
unsigned total_prio;
unsigned max_cost;
public:
Queue() :
total_prio(0),
max_cost(0) {
}
~Queue() {
queues.clear_and_dispose(DelItem<SubQueue>());
}
bool empty() const {
return queues.empty();
}
void insert(unsigned p, K cl, unsigned cost, T&& item, bool front = false) {
typename SubQueues::insert_commit_data insert_data;
std::pair<typename SubQueues::iterator, bool> ret =
queues.insert_unique_check(p, MapKey<SubQueue, unsigned>(), insert_data);
if (ret.second) {
ret.first = queues.insert_unique_commit(*new SubQueue(p), insert_data);
total_prio += p;
}
ret.first->insert(cl, cost, std::move(item), front);
if (cost > max_cost) {
max_cost = cost;
}
}
T pop(bool strict = false) {
Sit i = --queues.end();
if (strict) {
T ret = i->pop();
if (i->empty()) {
queues.erase_and_dispose(i, DelItem<SubQueue>());
}
return ret;
}
if (queues.size() > 1) {
while (true) {
// Pick a new priority out of the total priority.
unsigned prio = rand() % total_prio + 1;
unsigned tp = total_prio - i->key;
// Find the priority corresponding to the picked number.
// Subtract high priorities to low priorities until the picked number
// is more than the total and try to dequeue that priority.
// Reverse the direction from previous implementation because there is a higher
// chance of dequeuing a high priority op so spend less time spinning.
while (prio <= tp) {
--i;
tp -= i->key;
}
// Flip a coin to see if this priority gets to run based on cost.
// The next op's cost is multiplied by .9 and subtracted from the
// max cost seen. Ops with lower costs will have a larger value
// and allow them to be selected easier than ops with high costs.
if (max_cost == 0 || rand() % max_cost <=
(max_cost - ((i->get_cost() * 9) / 10))) {
break;
}
i = --queues.end();
}
}
T ret = i->pop();
if (i->empty()) {
total_prio -= i->key;
queues.erase_and_dispose(i, DelItem<SubQueue>());
}
return ret;
}
void filter_class(K& cl, std::list<T>* out) {
for (Sit i = queues.begin(); i != queues.end();) {
i->filter_class(cl, out);
if (i->empty()) {
total_prio -= i->key;
i = queues.erase_and_dispose(i, DelItem<SubQueue>());
} else {
++i;
}
}
}
// this is intended for unit tests and should be never used on hot paths
unsigned get_size_slow() const {
unsigned count = 0;
for (const auto& queue : queues) {
count += queue.get_size_slow();
}
return count;
}
void dump(ceph::Formatter *f) const {
for (typename SubQueues::const_iterator i = queues.begin();
i != queues.end(); ++i) {
f->dump_int("total_priority", total_prio);
f->dump_int("max_cost", max_cost);
f->open_object_section("subqueue");
f->dump_int("priority", i->key);
i->dump(f);
f->close_section();
}
}
};
Queue strict;
Queue normal;
public:
WeightedPriorityQueue(unsigned max_per, unsigned min_c) :
strict(),
normal()
{
std::srand(time(0));
}
void remove_by_class(K cl, std::list<T>* removed = 0) final {
strict.filter_class(cl, removed);
normal.filter_class(cl, removed);
}
bool empty() const final {
return strict.empty() && normal.empty();
}
void enqueue_strict(K cl, unsigned p, T&& item) final {
strict.insert(p, cl, 0, std::move(item));
}
void enqueue_strict_front(K cl, unsigned p, T&& item) final {
strict.insert(p, cl, 0, std::move(item), true);
}
void enqueue(K cl, unsigned p, unsigned cost, T&& item) final {
normal.insert(p, cl, cost, std::move(item));
}
void enqueue_front(K cl, unsigned p, unsigned cost, T&& item) final {
normal.insert(p, cl, cost, std::move(item), true);
}
T dequeue() override {
ceph_assert(!empty());
if (!strict.empty()) {
return strict.pop(true);
}
return normal.pop();
}
unsigned get_size_slow() {
return strict.get_size_slow() + normal.get_size_slow();
}
void dump(ceph::Formatter *f) const override {
f->open_array_section("high_queues");
strict.dump(f);
f->close_section();
f->open_array_section("queues");
normal.dump(f);
f->close_section();
}
void print(std::ostream &ostream) const final {
ostream << "WeightedPriorityQueue";
}
};
#endif
| 10,003 | 27.259887 | 86 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.