Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null | ceph-main/examples/rgw/java/ceph-s3-upload/src/test/java/org/example/cephs3upload/AppTest.java | package org.example.cephs3upload;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
/**
* Unit test for simple App.
*/
public class AppTest
extends TestCase
{
/**
* Create the test case
*
* @param testName name of the test case
*/
public AppTest( String testName )
{
super( testName );
}
/**
* @return the suite of tests being tested
*/
public static Test suite()
{
return new TestSuite( AppTest.class );
}
/**
* Rigourous Test :-)
*/
public void testApp()
{
assertTrue( true );
}
}
| 652 | 15.74359 | 46 | java |
null | ceph-main/examples/rgw/lua/elasticsearch_adapter.lua | local elasticsearch = require ("elasticsearch")
local json = require ("lunajson")
local client = elasticsearch.client{
hosts = {
{
host = "localhost",
port = "9200"
}
}
}
local copyfrom = {}
if (Request.CopyFrom ~= nil) then
copyfrom = {
Tenant = Request.CopyFrom.Tenant,
Bucket = Request.CopyFrom.Bucket,
Object = {
Name = Request.CopyFrom.Object.Name,
Instance = Request.CopyFrom.Object.Instance,
Id = Request.CopyFrom.Object.Id,
Size = Request.CopyFrom.Object.Size,
MTime = Request.CopyFrom.Object.MTime
}
}
end
local res, status = client:index{
index = "rgw",
type = "Request",
id = Request.Id,
body =
{
RGWOp = Request.RGWOp,
DecodedURI = Request.DecodedURI,
ContentLength = Request.ContentLength,
GenericAttributes = json.encode(Request.GenericAttributes),
Response = {
HTTPStatusCode = Request.Response.HTTPStatusCode,
HTTPStatus = Request.Response.HTTPStatus,
RGWCode = Request.Response.RGWCode,
Message = Request.Response.Message
},
SwiftAccountName = Request.SwiftAccountName,
Bucket = {
Tenant = Request.Bucket.Tenant,
Name = Request.Bucket.Name,
Marker = Request.Bucket.Marker,
Id = Request.Bucket.Id,
Count = Request.Bucket.Count,
Size = Request.Bucket.Size,
ZoneGroupId = Request.Bucket.ZoneGroupId,
CreationTime = Request.Bucket.CreationTime,
MTime = Request.Bucket.MTime,
Quota = {
MaxSize = Request.Bucket.Quota.MaxSize,
MaxObjects = Request.Bucket.Quota.MaxObjects,
Enabled = Request.Bucket.Quota.Enabled,
Rounded = Request.Bucket.Quota.Rounded
},
PlacementRule = {
Name = Request.Bucket.PlacementRule.Name,
StorageClass = Request.Bucket.PlacementRule.StorageClass
},
User = {
Tenant = Request.Bucket.User.Tenant,
Id = Request.Bucket.User.Id
}
},
Object = {
Name = Request.Object.Name,
Instance = Request.Object.Instance,
Id = Request.Object.Id,
Size = Request.Object.Size,
MTime = Request.Object.MTime
},
CopyFrom = copyfrom,
ObjectOwner = {
DisplayName = Request.ObjectOwner.DisplayName,
User = {
Tenant = Request.ObjectOwner.User.Tenant,
Id = Request.ObjectOwner.User.Id
}
},
ZoneGroup = {
Name = Request.ZoneGroup.Name,
Endpoint = Request.ZoneGroup.Endpoint
},
Environment = json.encode(Request.Environment),
Policy = json.encode(Request.Policy),
UserPolicies = json.encode(Request.UserPolicies),
RGWId = Request.RGWId,
HTTP = {
Parameters = json.encode(Request.HTTP.Parameters),
Resources = json.encode(Request.HTTP.Resources),
Metadata = json.encode(Request.HTTP.Metadata),
Host = Request.HTTP.Host,
Method = Request.HTTP.Method,
URI = Request.HTTP.URI,
QueryString = Request.HTTP.QueryString,
Domain = Request.HTTP.Domain
},
Time = Request.Time,
Dialect = Request.Dialect,
Id = Request.Id,
TransactionId = Request.TransactionId,
Tags = json.encode(Request.Tags),
User = {
Tenant = Request.User.Tenant,
Id = Request.User.Id
}
}
}
| 3,276 | 27.495652 | 64 | lua |
null | ceph-main/examples/rgw/lua/elasticsearch_adapter.md | # Introduction
This directory contains an example `elasticsearch_adapter.lua` on how to
use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/)
to push fields of the RGW requests
to [Elasticsearch](https://www.elastic.co/elasticsearch/).
## Elasticsearch
Install and run Elasticsearch using docker:
```bash
docker network create elastic
docker pull elasticsearch:2.4.6
docker run --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:2.4.6
```
[Full documentation for Elasticsearch installation](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html)
## Usage
* Upload the script:
```bash
radosgw-admin script put --infile=elasticsearch_adapter.lua --context=postRequest
```
* Add the packages used in the script:
```bash
radosgw-admin script-package add --package='elasticsearch 1.0.0-1' --allow-compilation
radosgw-admin script-package add --package='lunajson' --allow-compilation
radosgw-admin script-package add --package='lua-cjson 2.1.0-1' --allow-compilation
```
* Restart radosgw.
* Send a request:
```bash
s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" --access_key=0555b35654ad1656d804 --secret_key=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== mb s3://mybucket
s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" --access_key=0555b35654ad1656d804 --secret_key=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== put -P /etc/hosts s3://mybucket
curl http://localhost:8000/mybucket/hosts
```
* Search by bucket id from Elasticsearch:
```bash
curl -X GET "localhost:9200/rgw/_search?pretty" -H 'Content-Type: application/json' -d'
{
"query": {
"match": {
"Bucket.Id": "05382336-b2db-409f-82dc-f28ab5fef978.4471.4471"
}
}
}
'
```
## Requirements
* Lua 5.3
| 1,831 | 29.533333 | 204 | md |
null | ceph-main/examples/rgw/lua/nats_adapter.lua | local json = require ("lunajson")
local nats = require ("nats")
function nats_connect(nats_host, nats_port)
local nats_params = {
host = nats_host,
port = nats_port,
}
client = nats.connect(nats_params)
client:connect()
end
function toJson(request, eventName, opaqueData, configure)
supported_event = true
local notification = {
["Records"] = {
["eventVersion"] = "2.1",
["eventSource"] = "ceph:s3",
["awsRegion"] = request.ZoneGroup.Name,
["eventTime"] = request.Time,
["eventName"] = eventName,
["userIdentity"] = {
["principalId"] = request.User.Id
},
["requestParameters"] = {
["sourceIPAddress"] = ""
},
["responseElements"] = {
["x-amz-request-id"] = request.Id,
["x-amz-id-2"] = request.RGWId
},
["s3"] = {
["s3SchemaVersion"] = "1.0",
["configurationId"] = configure,
["bucket"] = {
["name"] = request.Bucket.Name,
["ownerIdentity"] = {
["principalId"] = request.Bucket.User.Id
},
["arn"] = "arn:aws:s3:" .. request.ZoneGroup.Name .. "::" .. request.Bucket.Name,
["id"] = request.Bucket.Id
},
["object"] = {
["key"] = request.Object.Name,
["size"] = request.Object.Size,
["eTag"] = "", -- eTag is not supported yet
["versionId"] = request.Object.Instance,
["sequencer"] = string.format("%x", os.time()),
["metadata"] = {
json.encode(request.HTTP.Metadata)
},
["tags"] = {
json.encode(request.Tags)
}
}
},
["eventId"] = "",
["opaqueData"] = opaqueData
}
}
return notification
end
supported_event = false
configure = "mynotif1"
opaqueData = "[email protected]"
topic = "Bucket_Notification"
bucket_name = "mybucket"
nats_host = '0.0.0.0'
nats_port = 4222
if bucket_name == Request.Bucket.Name then
--Object Created
if Request.RGWOp == "put_obj" then
notification = toJson(Request ,'ObjectCreated:Put', opaqueData, configure)
elseif Request.RGWOp == "post_obj" then
notification = toJson(Request ,'ObjectCreated:Post', opaqueData, configure)
elseif Request.RGWOp == "copy_obj" then
notification = toJson(Request ,'ObjectCreated:Copy', opaqueData, configure)
--Object Removed
elseif Request.RGWOp == "delete_obj" then
notification = toJson(Request ,'ObjectRemoved:Delete', opaqueData, configure)
end
if supported_event == true then
nats_connect()
local payload = json.encode(notification)
client:publish(topic, payload)
RGWDebugLog("bucket notification sent to nats://" .. nats_host .. ":" .. nats_port .. "/" .. topic)
end
end
| 3,874 | 40.223404 | 115 | lua |
null | ceph-main/examples/rgw/lua/nats_adapter.md | # Introduction
This directory contains examples on how to use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/) together with a [NATS Lua client](https://github.com/dawnangel/lua-nats) to add NATS to the list of bucket notifications endpoints.
## NATS
To test your setup:
* Install [NATS](https://docs.nats.io/nats-server/installation) and start a nats-server.
* Subscribe to the NATS server using a [nats subscriber](https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe), choosing the topic to be 'Bucket_Notification' (as defined in the [script]())
```bash
nats-sub "Bucket_Notification"
```
[Full documentation for subscribing](https://docs.nats.io/nats-server/clients).
Alternatively, configure the script to point to an existing NATS broker by editing the following part in the script to match the parameters of your existing nats server.
```
nats_host = '{host}',
nats_port = {port},
```
## Usage
* Upload the [script]():
```bash
radosgw-admin script put --infile=nats_adapter.lua --context=postRequest
```
* Add the packages used in the script:
```bash
radosgw-admin script-package add --package=nats --allow-compilation
radosgw-admin script-package add --package=lunajson --allow-compilation
radosgw-admin script-package add --package='lua-cjson 2.1.0-1' --allow-compilation
```
* Restart radosgw.
* create a bucket:
```
s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" mb s3://mybucket
```
* upload a file to the bucket and make sure that the nats server received the notification
```
s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" put hello.txt s3://mybucket
```
Expected output:
```
Received on [Bucket_Notification]:
{"Records":[
{
"eventVersion":"2.1",
"eventSource":"ceph:s3",
"awsRegion":"default",
"eventTime":"2019-11-22T13:47:35.124724Z",
"eventName":"ObjectCreated:Put",
"userIdentity":{
"principalId":"tester"
},
"requestParameters":{
"sourceIPAddress":""
},
"responseElements":{
"x-amz-request-id":"503a4c37-85eb-47cd-8681-2817e80b4281.5330.903595",
"x-amz-id-2":"14d2-zone1-zonegroup1"
},
"s3":{
"s3SchemaVersion":"1.0",
"configurationId":"mynotif1",
"bucket":{
"name":"mybucket",
"ownerIdentity":{
"principalId":"tester"
},
"arn":"arn:aws:s3:us-east-1::mybucket1",
"id":"503a4c37-85eb-47cd-8681-2817e80b4281.5332.38"
},
"object":{
"key":"hello.txt",
"size":"1024",
"eTag":"",
"versionId":"",
"sequencer": "F7E6D75DC742D108",
"metadata":[],
"tags":[]
}
},
"eventId":"",
"opaqueData":"[email protected]"
}
]}
```
## Requirements
* Lua 5.3 (or higher)
* Luarocks
| 3,178 | 30.166667 | 250 | md |
null | ceph-main/examples/rgw/lua/prometheus_adapter.lua | local http = require("socket.http")
local ltn12 = require("ltn12")
local respbody = {}
local op = "rgw_other_request_content_length"
if (Request.RGWOp == "put_obj") then
op = "rgw_put_request_content_length"
elseif (Request.RGWOp == "get_obj") then
op = "rgw_get_request_content_length"
end
local field = op .. " " .. tostring(Request.ContentLength) .. "\n"
local body, code, headers, status = http.request{
url = "http://127.0.0.1:9091/metrics/job/rgw",
method = "POST",
headers = {
["Content-Type"] = "application/x-www-form-urlencoded",
["Content-Length"] = string.len(field)
},
source = ltn12.source.string(field),
sink = ltn12.sink.table(respbody),
}
| 685 | 27.583333 | 66 | lua |
null | ceph-main/examples/rgw/lua/prometheus_adapter.md | # Introduction
This directory contains an example `prometheus_adapter.lua` on how to
use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/)
to push metrics from the RGW requests to [Prometheus](https://prometheus.io/),
specifically to collect information on object sizes.
## Prometheus
As every single run of a lua script is short-lived,
so [Pushgateway](https://github.com/prometheus/pushgateway)
should be used as an intermediate service to enable Prometheus to scrape data
from RGW.
* Install and run Pushgateway using docker:
```bash
docker pull prom/pushgateway
docker run -p 9091:9091 -it prom/pushgateway
```
* Install and run Prometheus using docker:
```bash
docker pull prom/prometheus
docker run --network host -v ${CEPH_DIR}/examples/lua/config/prometheus.yml:/etc/prometheus/prometheus.yml prom/prometheus
```
[Full documentation for Prometheus installation](https://prometheus.io/docs/prometheus/latest/installation/)
## Usage
* Upload the script:
```bash
radosgw-admin script put --infile=prometheus_adapter.lua --context=postRequest
```
* Add the packages used in the script:
```bash
radosgw-admin script-package add --package='luasocket' --allow-compilation
```
* Restart radosgw.
* Send a request:
```bash
s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" --access_key=0555b35654ad1656d804 --secret_key=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== mb s3://mybucket
s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" --access_key=0555b35654ad1656d804 --secret_key=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== put -P /etc/hosts s3://mybucket
curl http://localhost:8000/mybucket/hosts
```
* Open `http://localhost:9090` by browser and search for `rgw_request_content_length`

## Requirements
* Lua 5.3 or higher
| 1,850 | 29.85 | 204 | md |
null | ceph-main/examples/rgw/lua/storage_class.lua | local function isempty(input)
return input == nil or input == ''
end
if Request.RGWOp == 'put_obj' then
RGWDebugLog("Put_Obj with StorageClass: " .. Request.HTTP.StorageClass )
if (isempty(Request.HTTP.StorageClass)) then
if (Request.ContentLength >= 65536) then
RGWDebugLog("No StorageClass for Object and size >= threshold: " .. Request.Object.Name .. " adding QLC StorageClass")
Request.HTTP.StorageClass = "QLC_CLASS"
else
RGWDebugLog("No StorageClass for Object and size < threshold: " .. Request.Object.Name .. " adding STANDARD StorageClass")
Request.HTTP.StorageClass = "STANDARD"
end
else
RGWDebugLog("Storage Class Header Present on Object: " .. Request.Object.Name .. " with StorageClass: " .. Request.HTTP.StorageClass)
end
end
| 792 | 38.65 | 137 | lua |
null | ceph-main/examples/rgw/lua/storage_class.md | # Introduction
This directory contains an example `storage_class.lua` on how to
use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/)
to read and write the Storage Class field of a put request.
## Usage - following examples based on vstart environment built in ceph/build and commands invoked from ceph/build
* Create Zonegroup placement info for a Storage Class (QLC_CLASS in this example) and point class to a data pool (qlc_pool in this example)
NOTE: RGW will need restarted due to the Zonegroup placement info change.
See: https://docs.ceph.com/en/latest/radosgw/placement/#zonegroup-zone-configuration for more information.
```bash
# Create Storage Class
./bin/radosgw-admin zonegroup placement add --rgw-zonegroup default --placement-id default-placement --storage-class QLC_CLASS
# Steer objects in QLC_CLASS to the qlc_pool data pool
./bin/radosgw-admin zone placement add --rgw-zone default --placement-id default-placement --storage-class QLC_CLASS --data-pool qlc_pool
```
* Restart radosgw for Zone/ZoneGroup placement changes to take effect.
* Upload the script:
```bash
./bin/radosgw-admin script put --infile=storage_class.lua --context=preRequest
```
* Create a bucket and put and object with a Storage Class header (no modification will occur):
```bash
aws --profile=ceph --endpoint=http://localhost:8000 s3api create-bucket --bucket test-bucket
aws --profile=ceph --endpoint=http://localhost:8000 s3api put-object --bucket test-bucket --key truv-0 --body ./64KiB_object.bin --storage-class STANDARD
```
* Send a request without a Storage Class header (Storage Class will be changed to QLC_CLASS by Lua script):
```bash
aws --profile=ceph --endpoint=http://localhost:8000 s3api put-object --bucket test-bucket --key truv-0 --body ./64KiB_object.bin
```
NOTE: If you use s3cmd instead of aws command-line, s3cmd adds "STANDARD" StorageClass to any put request so the example Lua script will not modify it.
* Verify S3 object had its StorageClass header added
```bash
grep Lua ceph/build/out/radosgw.8000.log
2021-11-01T17:10:14.048-0400 7f9c7f697640 20 Lua INFO: Put_Obj with StorageClass:
2021-11-01T17:10:14.048-0400 7f9c7f697640 20 Lua INFO: No StorageClass for Object and size >= threshold: truv-0 adding QLC StorageClass
```
## Requirements
* Lua 5.3
| 2,307 | 45.16 | 153 | md |
null | ceph-main/examples/rgw/lua/config/prometheus.yml | global:
scrape_interval: 2s # By default, scrape targets every 15 seconds.
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'codelab-monitor'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'rgw'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 1s
static_configs:
- targets: ['127.0.0.1:9091'] | 669 | 34.263158 | 97 | yml |
null | ceph-main/examples/rgw/rgw-cache/nginx-lua-file.lua | local check = ngx.req.get_headers()["AUTHORIZATION"]
local uri = ngx.var.request_uri
local ngx_re = require "ngx.re"
local hdrs = ngx.req.get_headers()
--Take all signedheaders names, this for creating the X-Amz-Cache which is necessary to override range header to be able to readahead an object
local res, err = ngx_re.split(check,"SignedHeaders=")
local res2, err2 = ngx_re.split(res[2],",")
local res3, err3 = ngx_re.split(res2[1],";")
local t = {}
local concathdrs = string.char(0x00)
for i = 1, #res3, 1 do
if hdrs[res3[i]] ~= nil then
--0xB1 is the separator between header name and value
t[i] = res3[i] .. string.char(0xB1) .. hdrs[res3[i]]
--0xB2 is the separator between headers
concathdrs = concathdrs .. string.char(0xB2) .. t[i]
end
end
-- check if the authorization header is not empty
if check ~= nil then
local xamzcache = concathdrs:sub(2)
xamzcache = xamzcache .. string.char(0xB2) .. "Authorization" .. string.char(0xB1) .. check
if xamzcache:find("aws4_request") ~= nil and uri ~= "/" and uri:find("?") == nil and hdrs["if-match"] == nil then
ngx.var.authvar = xamzcache
end
end
| 1,161 | 42.037037 | 143 | lua |
null | ceph-main/fusetrace/fusetrace_ll.cc | // -*- mode:C++; tab-width:8; c-basic-offset:4; indent-tabs-mode:t -*-
// vim: ts=8 sw=4 smarttab
/*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2007 Miklos Szeredi <[email protected]>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
gcc -Wall `pkg-config fuse --cflags --libs` -lulockmgr fusexmp_fh.c -o fusexmp_fh
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <fuse/fuse_lowlevel.h>
#include <ulockmgr.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <dirent.h>
#include <errno.h>
#include <sys/time.h>
#ifdef HAVE_SETXATTR
#include <sys/xattr.h>
#endif
#include <time.h>
#include "include/unordered_map.h"
#include "include/hash_namespace.h"
#ifndef __LP64__
CEPH_HASH_NAMESPACE_START
template<> struct hash<uint64_t> {
size_t operator()(uint64_t __x) const {
static hash<uint32_t> H;
return H((__x >> 32) ^ (__x & 0xffffffff));
}
};
CEPH_HASH_NAMESPACE_END
#endif
#include <iostream>
#include <fstream>
#include <map>
#include <set>
using namespace std;
#include "common/ceph_mutex.h"
ceph::mutex trace_lock;
ofstream tracefile;
#define traceout (tracefile.is_open() ? tracefile : cout)
char *basedir = 0;
int debug = 0;
bool do_timestamps = true;
#define dout if (debug) cout
ceph::mutex lock;
struct Inode {
struct stat stbuf;
int ref;
set<int> fds;
map<pair<string,ino_t>,Inode*> parents;
// if dir,
map<string,Inode*> dentries;
Inode() : ref(0) {}
Inode *lookup(const string& dname) {
if (dentries.count(dname))
return dentries[dname];
return 0;
}
};
Inode *root = 0;
ceph::unordered_map<ino_t, Inode*> inode_map;
bool make_inode_path(string &buf, Inode *in)
{
if (!in->parents.empty()) {
if (!make_inode_path(buf, in->parents.begin()->second))
return false;
buf += "/";
buf += in->parents.begin()->first.first;
} else {
if (in != root) return false;
assert(in->stbuf.st_ino == 1);
buf = basedir;
buf += "/";
}
return true;
//dout << "path: " << in->stbuf.st_ino << " -> " << buf << endl;
}
bool make_inode_path(string &buf, Inode *in, const char *name)
{
if (!make_inode_path(buf, in)) return false;
buf += "/";
buf += name;
return true;
}
bool make_ino_path(string &buf, ino_t ino)
{
Inode *in = inode_map[ino];
assert(in);
return make_inode_path(buf, in);
}
bool make_ino_path(string &buf, ino_t ino, const char *name)
{
Inode *in = inode_map[ino];
assert(in);
if (!make_inode_path(buf, in))
return false;
buf += "/";
buf += name;
return true;
}
void remove_dentry(Inode *pin, const string& dname)
{
dout << "remove_dentry " << pin->stbuf.st_ino << " " << dname << endl;
Inode *in = pin->lookup(dname);
assert(in);
pin->dentries.erase(dname);
in->parents.erase(pair<string,ino_t>(dname,pin->stbuf.st_ino));
dout << "remove_dentry " << pin->stbuf.st_ino << " " << dname
<< " ... inode " << in->stbuf.st_ino << " ref " << in->ref
<< endl;
}
void add_dentry(Inode *parent, const string& dname, Inode *in)
{
dout << "add_dentry " << parent->stbuf.st_ino << " " << dname << " to " << in->stbuf.st_ino << endl;
if (parent->dentries.count(dname))
remove_dentry(parent, dname); // e.g., when renaming over another file..
parent->dentries[dname] = in;
in->parents[pair<string,ino_t>(dname,parent->stbuf.st_ino)] = parent;
}
void unlink_inode(Inode *in)
{
dout << "unlink_inode " << in->stbuf.st_ino << " ref " << in->ref << endl;
// remove parent links
while (!in->parents.empty()) {
Inode *parent = in->parents.begin()->second;
string dname = in->parents.begin()->first.first;
remove_dentry(parent, dname);
}
// remove children
while (!in->dentries.empty())
remove_dentry(in, in->dentries.begin()->first);
while (!in->fds.empty()) {
int fd = *in->fds.begin();
::close(fd);
in->fds.erase(in->fds.begin());
dout << "remove_inode closeing stray fd " << fd << endl;
}
}
void remove_inode(Inode *in)
{
dout << "remove_inode " << in->stbuf.st_ino << " ref " << in->ref << endl;
unlink_inode(in);
inode_map.erase(in->stbuf.st_ino);
dout << "remove_inode " << in->stbuf.st_ino << " done" << endl;
delete in;
}
Inode *add_inode(Inode *parent, const char *name, struct stat *attr)
{
dout << "add_inode " << parent->stbuf.st_ino << " " << name << " " << attr->st_ino << endl;
Inode *in;
if (inode_map.count(attr->st_ino)) {
// reuse inode
in = inode_map[attr->st_ino];
unlink_inode(in); // hrm.. should this close open fds? probably.
dout << "** REUSING INODE **" << endl;
} else {
inode_map[attr->st_ino] = in = new Inode;
}
memcpy(&in->stbuf, attr, sizeof(*attr));
string dname(name);
add_dentry(parent, dname, in);
return in;
}
void print_time()
{
if (do_timestamps) {
struct timeval tv;
gettimeofday(&tv, 0);
traceout << "@" << endl
<< tv.tv_sec << endl
<< tv.tv_usec << endl;
}
}
bool has_perm(int mask, Inode *in, int uid, int gid)
{
dout << "hash_perm " << uid << "." << gid << " " << oct << mask << " in " << in->stbuf.st_mode
<< " " << in->stbuf.st_uid << "." << in->stbuf.st_gid << endl;
if (in->stbuf.st_mode & mask) return true;
if (in->stbuf.st_gid == gid && in->stbuf.st_mode & (mask << 3)) return true;
if (in->stbuf.st_uid == uid && in->stbuf.st_mode & (mask << 6)) return true;
return false;
}
static void ft_ll_lookup(fuse_req_t req, fuse_ino_t pino, const char *name)
{
int res = 0;
//dout << "lookup " << pino << " " << name << endl;
struct fuse_entry_param fe;
memset(&fe, 0, sizeof(fe));
lock.lock();
Inode *parent = inode_map[pino];
assert(parent);
// check permissions
string dname(name);
string path;
Inode *in = 0;
if (!has_perm(0001, parent, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid)) {
res = EPERM;
}
else if (!make_inode_path(path, parent, name)) {
res = ENOENT;
} else {
in = parent->lookup(dname);
if (in && res == 0) {
// re-stat, for good measure
res = ::lstat(path.c_str(), &in->stbuf);
// hrm!
if (res != 0) {
dout << "** WEIRD ** lookup on " << pino << " " << name << " inode went away!" << endl;
in = 0;
res = errno;
}
//dout << "have " << in->stbuf.st_ino << endl;
} else {
in = new Inode;
res = ::lstat(path.c_str(), &in->stbuf);
//dout << "stat " << path << " res = " << res << endl;
if (res == 0) {
inode_map[in->stbuf.st_ino] = in;
add_dentry(parent, dname, in);
} else {
delete in;
in = 0;
res = errno;
}
}
if (in) {
in->ref++;
fe.ino = in->stbuf.st_ino;
memcpy(&fe.attr, &in->stbuf, sizeof(in->stbuf));
}
}
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_lookup" << endl << pino << endl << name << endl << fe.attr.st_ino << endl;
trace_lock.unlock();
if (in)
fuse_reply_entry(req, &fe);
else
fuse_reply_err(req, res);
}
static void ft_ll_forget(fuse_req_t req, fuse_ino_t ino, long unsigned nlookup)
{
if (ino != 1) {
std::scoped_lock l{lock};
Inode *in = inode_map[ino];
if (in) {
dout << "forget on " << ino << " ref " << in->ref << ", forget " << nlookup << endl;
if (in->ref < nlookup)
dout << "**** BAD **** forget on " << ino << " ref " << in->ref << ", forget " << nlookup << endl;
in->ref -= nlookup;
if (in->ref <= 0)
remove_inode(in);
} else {
dout << "**** BAD **** forget " << nlookup << " on nonexistent inode " << ino << endl;
}
}
{
std::scoped_lock l{trace_lock};
print_time();
traceout << "ll_forget" << endl << ino << endl << nlookup << endl;
}
fuse_reply_none(req);
}
static void ft_ll_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi)
{
int res = 0;
string path;
int fd = 0;
Inode *in = 0;
struct stat attr;
lock.lock();
in = inode_map[ino];
if (in->fds.empty()) {
if (!make_inode_path(path, in))
res = ENOENT;
} else
fd = *in->fds.begin();
lock.unlock();
if (fd > 0) {
res = ::fstat(fd, &attr);
dout << "getattr fstat on fd " << fd << " res " << res << endl;
} else if (res == 0) {
res = ::lstat(path.c_str(), &attr);
dout << "getattr lstat on " << path << " res " << res << endl;
}
if (res < 0) res = errno;
if (ino == 1) attr.st_ino = 1;
trace_lock.lock();
print_time();
traceout << "ll_getattr" << endl << ino << endl;
trace_lock.unlock();
if (res == 0) {
lock.lock();
memcpy(&in->stbuf, &attr, sizeof(attr));
lock.unlock();
fuse_reply_attr(req, &attr, 0);
} else
fuse_reply_err(req, res);
}
static void ft_ll_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr,
int to_set, struct fuse_file_info *fi)
{
string path;
Inode *in = 0;
int fd = 0;
int res = 0;
lock.lock();
in = inode_map[ino];
if (in->fds.empty() || (to_set & FUSE_SET_ATTR_MTIME)) {
if (!make_inode_path(path, in))
res = ENOENT;
} else
fd = *in->fds.begin();
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_setattr" << endl << ino << endl;
traceout << attr->st_mode << endl;
traceout << attr->st_uid << endl << attr->st_gid << endl;
traceout << attr->st_size << endl;
traceout << attr->st_mtime << endl;
traceout << attr->st_atime << endl;
traceout << to_set << endl;
trace_lock.unlock();
if (res == 0 && !has_perm(0010, in, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid)) {
res = EPERM;
} else if (res == 0) {
if (to_set & FUSE_SET_ATTR_MODE) {
if (fd > 0)
res = ::fchmod(fd, attr->st_mode);
else
res = ::chmod(path.c_str(), attr->st_mode);
}
if (!res && to_set & FUSE_SET_ATTR_UID) {
if (fd > 0)
res = ::fchown(fd, attr->st_uid, attr->st_gid);
else
res = ::chown(path.c_str(), attr->st_uid, attr->st_gid);
}
if (!res && to_set & FUSE_SET_ATTR_SIZE) {
if (fd > 0)
res = ::ftruncate(fd, attr->st_size);
else
res = ::truncate(path.c_str(), attr->st_size);
}
if (!res && to_set & FUSE_SET_ATTR_MTIME) {
struct utimbuf ut;
ut.actime = attr->st_atime;
ut.modtime = attr->st_mtime;
res = ::utime(path.c_str(), &ut);
}
if (res < 0) res = errno;
}
if (res == 0) {
lock.lock();
::lstat(path.c_str(), &in->stbuf);
if (ino == 1) in->stbuf.st_ino = 1;
memcpy(attr, &in->stbuf, sizeof(*attr));
lock.unlock();
fuse_reply_attr(req, attr, 0);
} else
fuse_reply_err(req, res);
}
static void ft_ll_readlink(fuse_req_t req, fuse_ino_t ino)
{
string path;
int res = 0;
lock.lock();
if (!make_ino_path(path, ino))
res = ENOENT;
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_readlink" << endl << ino << endl;
trace_lock.unlock();
char buf[256];
if (res == 0) res = readlink(path.c_str(), buf, 255);
if (res < 0) res = errno;
if (res >= 0) {
buf[res] = 0;
fuse_reply_readlink(req, buf);
} else {
fuse_reply_err(req, res);
}
}
static void ft_ll_opendir(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi)
{
string path;
int res = 0;
lock.lock();
Inode *in = inode_map[ino];
if (!make_inode_path(path, in))
res = ENOENT;
lock.unlock();
DIR *dir = 0;
if (res == 0 && !has_perm(0100, in, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) dir = opendir(path.c_str());
if (res < 0) res = errno;
trace_lock.lock();
print_time();
traceout << "ll_opendir" << endl << ino << endl << (unsigned long)dir << endl;
trace_lock.unlock();
if (dir) {
fi->fh = (long)dir;
fuse_reply_open(req, fi);
} else
fuse_reply_err(req, res);
}
static void ft_ll_readdir(fuse_req_t req, fuse_ino_t ino, size_t size,
off_t off, struct fuse_file_info *fi)
{
struct dirent *de;
DIR *dp = (DIR*)fi->fh;
// buffer
char *buf;
size_t pos = 0;
buf = new char[size];
if (!buf) {
fuse_reply_err(req, ENOMEM);
return;
}
seekdir(dp, off);
while ((de = readdir(dp)) != NULL) {
struct stat st;
memset(&st, 0, sizeof(st));
st.st_ino = de->d_ino;
st.st_mode = de->d_type << 12;
size_t entrysize = fuse_add_direntry(req, buf + pos, size - pos,
de->d_name, &st, telldir(dp));
if (entrysize > size - pos)
break; // didn't fit, done for now.
pos += entrysize;
}
fuse_reply_buf(req, buf, pos);
delete[] buf;
}
static void ft_ll_releasedir(fuse_req_t req, fuse_ino_t ino,
struct fuse_file_info *fi)
{
DIR *dir = (DIR*)fi->fh;
trace_lock.lock();
print_time();
traceout << "ll_releasedir" << endl << (unsigned long)dir << endl;
trace_lock.unlock();
closedir(dir);
fuse_reply_err(req, 0);
}
static void ft_ll_mknod(fuse_req_t req, fuse_ino_t parent, const char *name,
mode_t mode, dev_t rdev)
{
string path;
Inode *pin = 0;
int res = 0;
lock.lock();
pin = inode_map[parent];
if (!make_inode_path(path, pin, name))
res = ENOENT;
lock.unlock();
dout << "mknod " << path << endl;
if (res == 0 && !has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) res = ::mknod(path.c_str(), mode, rdev);
if (res < 0)
res = errno;
else
::chown(path.c_str(), fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid);
struct fuse_entry_param fe;
if (res == 0) {
memset(&fe, 0, sizeof(fe));
::lstat(path.c_str(), &fe.attr);
fe.ino = fe.attr.st_ino;
lock.lock();
Inode *in = add_inode(pin, name, &fe.attr);
in->ref++;
lock.unlock();
}
trace_lock.lock();
print_time();
traceout << "ll_mknod" << endl << parent << endl << name << endl << mode << endl << rdev << endl;
traceout << (res == 0 ? fe.ino:0) << endl;
trace_lock.unlock();
if (res == 0)
fuse_reply_entry(req, &fe);
else
fuse_reply_err(req, res);
}
static void ft_ll_mkdir(fuse_req_t req, fuse_ino_t parent, const char *name,
mode_t mode)
{
string path;
Inode *pin = 0;
int res = 0;
lock.lock();
pin = inode_map[parent];
if (!make_inode_path(path, pin, name))
res = ENOENT;
lock.unlock();
if (res == 0 && !has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) res = ::mkdir(path.c_str(), mode);
if (res < 0)
res = errno;
else
::chown(path.c_str(), fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid);
struct fuse_entry_param fe;
if (res == 0) {
memset(&fe, 0, sizeof(fe));
::lstat(path.c_str(), &fe.attr);
fe.ino = fe.attr.st_ino;
lock.lock();
Inode *in = add_inode(pin, name, &fe.attr);
in->ref++;
lock.unlock();
}
trace_lock.lock();
print_time();
traceout << "ll_mkdir" << endl << parent << endl << name << endl << mode << endl;
traceout << (res == 0 ? fe.ino:0) << endl;
trace_lock.unlock();
if (res == 0)
fuse_reply_entry(req, &fe);
else
fuse_reply_err(req, res);
}
static void ft_ll_symlink(fuse_req_t req, const char *value, fuse_ino_t parent, const char *name)
{
string path;
Inode *pin = 0;
int res = 0;
lock.lock();
pin = inode_map[parent];
if (!make_inode_path(path, pin, name))
res = ENOENT;
lock.unlock();
if (res == 0 && !has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) res = ::symlink(value, path.c_str());
if (res < 0)
res = errno;
else
::chown(path.c_str(), fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid);
struct fuse_entry_param fe;
if (res == 0) {
memset(&fe, 0, sizeof(fe));
::lstat(path.c_str(), &fe.attr);
fe.ino = fe.attr.st_ino;
lock.lock();
Inode *in = add_inode(pin, name, &fe.attr);
in->ref++;
lock.unlock();
}
trace_lock.lock();
print_time();
traceout << "ll_symlink" << endl << parent << endl << name << endl << value << endl;
traceout << (res == 0 ? fe.ino:0) << endl;
trace_lock.unlock();
if (res == 0)
fuse_reply_entry(req, &fe);
else
fuse_reply_err(req, res);
}
static void ft_ll_create(fuse_req_t req, fuse_ino_t parent, const char *name,
mode_t mode, struct fuse_file_info *fi)
{
string path;
Inode *pin = 0;
int res = 0;
lock.lock();
pin = inode_map[parent];
if (!make_inode_path(path, pin, name))
res = ENOENT;
lock.unlock();
dout << "create " << path << endl;
int fd = 0;
if (res == 0 && !has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) {
fd = ::open(path.c_str(), fi->flags|O_CREAT, mode);
if (fd < 0) {
res = errno;
} else {
::fchown(fd, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid);
}
}
struct fuse_entry_param fe;
memset(&fe, 0, sizeof(fe));
if (res == 0) {
::lstat(path.c_str(), &fe.attr);
fe.ino = fe.attr.st_ino;
lock.lock();
Inode *in = add_inode(pin, name, &fe.attr);
in->ref++;
in->fds.insert(fd);
lock.unlock();
fi->fh = fd;
}
trace_lock.lock();
print_time();
traceout << "ll_create" << endl
<< parent << endl
<< name << endl
<< mode << endl
<< fi->flags << endl
<< (res == 0 ? fd:0) << endl
<< fe.ino << endl;
trace_lock.unlock();
if (res == 0)
fuse_reply_create(req, &fe, fi);
else
fuse_reply_err(req, res);
}
static void ft_ll_statfs(fuse_req_t req, fuse_ino_t ino)
{
string path;
int res = 0;
if (ino) {
lock.lock();
if (!make_ino_path(path, ino))
res = ENOENT;
lock.unlock();
} else {
path = basedir;
}
trace_lock.lock();
print_time();
traceout << "ll_statfs" << endl << ino << endl;
trace_lock.unlock();
struct statvfs stbuf;
if (res == 0) res = statvfs(path.c_str(), &stbuf);
if (res < 0) res = errno;
if (res == 0)
fuse_reply_statfs(req, &stbuf);
else
fuse_reply_err(req, res);
}
static void ft_ll_unlink(fuse_req_t req, fuse_ino_t parent, const char *name)
{
string path;
Inode *pin = 0;
Inode *in = 0;
string dname(name);
int res = 0;
lock.lock();
pin = inode_map[parent];
in = pin->lookup(dname);
if (!make_inode_path(path, pin, name))
res = ENOENT;
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_unlink" << endl << parent << endl << name << endl;
trace_lock.unlock();
if (res == 0 && !has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) {
if (in && in->fds.empty()) {
int fd = ::open(path.c_str(), O_RDWR);
if (fd > 0)
in->fds.insert(fd); // for slow getattrs.. wtf
dout << "unlink opening paranoia fd " << fd << endl;
}
res = ::unlink(path.c_str());
if (res < 0) res = errno;
}
if (res == 0) {
// remove from out cache
lock.lock();
string dname(name);
if (pin->lookup(dname))
remove_dentry(pin, dname);
lock.unlock();
fuse_reply_err(req, 0);
} else
fuse_reply_err(req, res);
}
static void ft_ll_rmdir(fuse_req_t req, fuse_ino_t parent, const char *name)
{
string path;
Inode *pin = 0;
int res = 0;
lock.lock();
pin = inode_map[parent];
if (!make_inode_path(path, pin, name))
res = ENOENT;
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_rmdir" << endl << parent << endl << name << endl;
trace_lock.unlock();
if (res == 0 && !has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) res = ::rmdir(path.c_str());
if (res < 0) res = errno;
if (res == 0) {
// remove from out cache
lock.lock();
string dname(name);
if (pin->lookup(dname))
remove_dentry(pin, dname);
lock.unlock();
fuse_reply_err(req, 0);
} else
fuse_reply_err(req, res);
}
static void ft_ll_rename(fuse_req_t req, fuse_ino_t parent, const char *name,
fuse_ino_t newparent, const char *newname)
{
string path;
string newpath;
Inode *pin = 0;
Inode *newpin = 0;
int res = 0;
lock.lock();
pin = inode_map[parent];
if (!make_inode_path(path, pin, name))
res = ENOENT;
newpin = inode_map[newparent];
if (!make_inode_path(newpath, newpin, newname))
res = ENOENT;
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_rename" << endl
<< parent << endl
<< name << endl
<< newparent << endl
<< newname << endl;
trace_lock.unlock();
if (res == 0 && (!has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid) ||
!has_perm(0010, newpin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid)))
res = EPERM;
else if (res == 0) res = ::rename(path.c_str(), newpath.c_str());
if (res < 0) res = errno;
if (res == 0) {
string dname(name);
string newdname(newname);
lock.lock();
Inode *in = pin->lookup(dname);
if (in) {
add_dentry(newpin, newdname, in);
remove_dentry(pin, dname);
} else {
dout << "hrm, rename didn't have renamed inode.. " << path << " to " << newpath << endl;
}
lock.unlock();
fuse_reply_err(req, 0);
} else
fuse_reply_err(req, res);
}
static void ft_ll_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t newparent,
const char *newname)
{
string path;
string newpath;
Inode *in = 0;
Inode *newpin = 0;
int res = 0;
lock.lock();
in = inode_map[ino];
if (!make_inode_path(path, in))
res = ENOENT;
newpin = inode_map[newparent];
if (!make_inode_path(newpath, newpin, newname))
res = ENOENT;
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_link" << endl
<< ino << endl
<< newparent << endl
<< newname << endl;
trace_lock.unlock();
//cout << "link " << path << " newpath " << newpath << endl;
if (res == 0 && (!has_perm(0010, in, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid) ||
!has_perm(0010, newpin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid)))
res = EPERM;
else if (res == 0) res = ::link(path.c_str(), newpath.c_str());
if (res < 0) res = errno;
if (res == 0) {
struct fuse_entry_param fe;
memset(&fe, 0, sizeof(fe));
::lstat(newpath.c_str(), &fe.attr);
lock.lock();
string newdname(newname);
add_dentry(newpin, newdname, in);
in->ref++;
memcpy(&in->stbuf, &fe.attr, sizeof(fe.attr)); // re-read, bc we changed the link count
lock.unlock();
fe.ino = fe.attr.st_ino;
fuse_reply_entry(req, &fe);
} else
fuse_reply_err(req, res);
}
static void ft_ll_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi)
{
string path;
Inode *in = 0;
int res = 0;
lock.lock();
in = inode_map[ino];
if (!make_inode_path(path, in))
res = ENOENT;
lock.unlock();
int want = 0100;
if (fi->flags & O_RDWR) want |= 0010;
if (fi->flags == O_WRONLY) want = 0010;
int fd = 0;
if (res == 0 && !has_perm(want, in, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) {
fd = ::open(path.c_str(), fi->flags);
if (fd <= 0) res = errno;
}
trace_lock.lock();
print_time();
traceout << "ll_open" << endl
<< ino << endl
<< fi->flags << endl
<< (fd > 0 ? fd:0) << endl;
trace_lock.unlock();
if (res == 0) {
lock.lock();
in->fds.insert(fd);
lock.unlock();
fi->fh = fd;
fuse_reply_open(req, fi);
} else
fuse_reply_err(req, res);
}
static void ft_ll_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
struct fuse_file_info *fi)
{
char *buf = new char[size];
int res = ::pread(fi->fh, buf, size, off);
//cout << "read " << path << " " << off << "~" << size << endl;
trace_lock.lock();
print_time();
traceout << "ll_read" << endl
<< fi->fh << endl
<< off << endl
<< size << endl;
trace_lock.unlock();
if (res >= 0)
fuse_reply_buf(req, buf, res);
else
fuse_reply_err(req, errno);
delete[] buf;
}
static void ft_ll_write(fuse_req_t req, fuse_ino_t ino, const char *buf,
size_t size, off_t off, struct fuse_file_info *fi)
{
int res = ::pwrite(fi->fh, buf, size, off);
trace_lock.lock();
print_time();
traceout << "ll_write" << endl
<< fi->fh << endl
<< off << endl
<< size << endl;
trace_lock.unlock();
if (res >= 0)
fuse_reply_write(req, res);
else
fuse_reply_err(req, errno);
}
static void ft_ll_flush(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi)
{
trace_lock.lock();
print_time();
traceout << "ll_flush" << endl << fi->fh << endl;
trace_lock.unlock();
int res = ::fdatasync(fi->fh);
//int res = ::close(dup(fi->fh));
if (res >= 0)
fuse_reply_err(req, 0);
else
fuse_reply_err(req, errno);
}
static void ft_ll_release(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi)
{
trace_lock.lock();
print_time();
traceout << "ll_release" << endl << fi->fh << endl;
trace_lock.unlock();
lock.lock();
Inode *in = inode_map[ino];
in->fds.erase(fi->fh);
lock.unlock();
int res = ::close(fi->fh);
if (res >= 0)
fuse_reply_err(req, 0);
else
fuse_reply_err(req, errno);
}
static void ft_ll_fsync(fuse_req_t req, fuse_ino_t ino, int datasync,
struct fuse_file_info *fi)
{
trace_lock.lock();
print_time();
traceout << "ll_fsync" << endl << fi->fh << endl;
trace_lock.unlock();
int res = ::fsync(fi->fh);
if (res >= 0)
fuse_reply_err(req, 0);
else
fuse_reply_err(req, errno);
}
static struct fuse_lowlevel_ops ft_ll_oper = {
init: 0,
destroy: 0,
lookup: ft_ll_lookup,
forget: ft_ll_forget,
getattr: ft_ll_getattr,
setattr: ft_ll_setattr,
readlink: ft_ll_readlink,
mknod: ft_ll_mknod,
mkdir: ft_ll_mkdir,
unlink: ft_ll_unlink,
rmdir: ft_ll_rmdir,
symlink: ft_ll_symlink,
rename: ft_ll_rename,
link: ft_ll_link,
open: ft_ll_open,
read: ft_ll_read,
write: ft_ll_write,
flush: ft_ll_flush,
release: ft_ll_release,
fsync: ft_ll_fsync,
opendir: ft_ll_opendir,
readdir: ft_ll_readdir,
releasedir: ft_ll_releasedir,
fsyncdir: 0,
statfs: ft_ll_statfs,
setxattr: 0,
getxattr: 0,
listxattr: 0,
removexattr: 0,
access: 0,
create: ft_ll_create,
getlk: 0,
setlk: 0,
bmap: 0
};
int main(int argc, char *argv[])
{
// open trace
// figure base dir
char *newargv[100];
int newargc = 0;
for (int i=0; i<argc; i++) {
if (strcmp(argv[i], "--basedir") == 0) {
basedir = argv[++i];
} else if (strcmp(argv[i], "--timestamps") == 0) {
do_timestamps = atoi(argv[++i]);
} else if (strcmp(argv[i], "--trace") == 0) {
tracefile.open(argv[++i], ios::out|ios::trunc);
if (!tracefile.is_open())
cerr << "** couldn't open trace file " << argv[i] << endl;
} else if (strcmp(argv[i], "--debug") == 0) {
debug = 1;
} else {
cout << "arg: " << newargc << " " << argv[i] << endl;
newargv[newargc++] = argv[i];
}
}
newargv[newargc++] = "-o";
newargv[newargc++] = "allow_other";
// newargv[newargc++] = "-o";
// newargv[newargc++] = "default_permissions";
if (!basedir) return 1;
cout << "basedir is " << basedir << endl;
// create root ino
root = new Inode;
::lstat(basedir, &root->stbuf);
root->stbuf.st_ino = 1;
inode_map[1] = root;
root->ref++;
umask(0);
// go go gadget fuse
struct fuse_args args = FUSE_ARGS_INIT(newargc, newargv);
struct fuse_chan *ch;
char *mountpoint;
if (fuse_parse_cmdline(&args, &mountpoint, NULL, NULL) != -1 &&
(ch = fuse_mount(mountpoint, &args)) != NULL) {
struct fuse_session *se;
// init fuse
se = fuse_lowlevel_new(&args, &ft_ll_oper, sizeof(ft_ll_oper),
NULL);
if (se != NULL) {
if (fuse_set_signal_handlers(se) != -1) {
fuse_session_add_chan(se, ch);
if (fuse_session_loop(se) <= -1) {
cout << "Failed fuse_session_loop() call." << endl;
return 1;
}
fuse_remove_signal_handlers(se);
fuse_session_remove_chan(ch);
}
fuse_session_destroy(se);
}
fuse_unmount(mountpoint, ch);
}
fuse_opt_free_args(&args);
}
| 28,326 | 22.904641 | 104 | cc |
null | ceph-main/man/conf.py | import os
import sys
project = u'Ceph'
copyright = u'2010-2014, Inktank Storage, Inc. and contributors. Licensed under Creative Commons Attribution Share Alike 3.0 (CC-BY-SA-3.0)'
version = 'dev'
release = 'dev'
exclude_patterns = ['**/.#*', '**/*~']
def _get_description(fname, base):
with open(fname) as f:
one = None
while True:
line = f.readline().rstrip('\n')
if not line:
continue
if line.startswith(':') and line.endswith(':'):
continue
if line.startswith('.. '):
continue
one = line
break
two = f.readline().rstrip('\n')
three = f.readline().rstrip('\n')
assert one == three
assert all(c=='=' for c in one)
name, description = two.split('--', 1)
assert name.strip() == base
return description.strip()
def _get_manpages():
src_dir = os.path.dirname(__file__)
top_srcdir = os.path.dirname(src_dir)
man_dir = os.path.join(top_srcdir, 'doc', 'man')
sections = os.listdir(man_dir)
for section in sections:
section_dir = os.path.join(man_dir, section)
if not os.path.isdir(section_dir):
continue
for filename in os.listdir(section_dir):
base, ext = os.path.splitext(filename)
if ext != '.rst':
continue
if base == 'index':
continue
path = os.path.join(section_dir, filename)
try:
description = _get_description(path, base)
except UnicodeDecodeError as e:
print(f"unable to decode {path}", file=sys.stderr)
raise e
yield (
os.path.join(section, base),
base,
description,
'',
section,
)
man_pages = list(_get_manpages())
# sphinx warns if no toc is found, so feed it with a random file
# which is also rendered in this run.
master_doc = '8/ceph'
| 2,065 | 29.835821 | 140 | py |
null | ceph-main/mirroring/README.md | # Mirroring Ceph
Ceph is primarily distributed from download.ceph.com which is based in the US.
However, globally there are multiple mirrors which offer the same content. Often
faster than downloading from the primary source.
Using the script found in this directory you can easily mirror Ceph to your local
datacenter and serve packages from there to your servers.
## Guidelines
If you want to mirror Ceph please follow these guidelines:
* Please use a mirror close to you
* Do not sync in a shorter interval than 3 hours
* Avoid syncing at minute 0 of the hour, use something between 0 and 59.
## Mirror script
The 'mirror-ceph.sh' script is written in Bash and will use rsync to mirror
all the contents to a local directory.
Usage is simple:
<pre>
./mirror-ceph.sh -q -s eu -t /srv/mirrors/ceph
</pre>
This example will mirror all contents from the source 'eu' which is *eu.ceph.com*.
### Running with CRON
The script can easily be run with CRON:
<pre>
13 1,5,9,13,17,21 * * * /home/ceph/mirror-ceph.sh -q -s eu -t /srv/mirrors/ceph
</pre>
This will sync from *eu.ceph.com* on 01:13, 05:13, 09:13, 13:13, 17:13 and 21:13.
## Becoming a mirror source
If you have spare hardware and resources available you can opt for becoming a mirror
source for others.
A few things which are required:
* 1Gbit connection or more
* Native IPv4 **and** IPv6
* HTTP access
* rsync access
* 2TB of storage or more
* Monitoring of the mirror/source
You can then run the *mirror-ceph.sh* script and mirror all the contents.
Mirror maintainers should sign up to the [Ceph-mirrors mailing list](https://lists.ceph.io/postorius/lists/ceph-mirrors.ceph.io/).
### Logs
The project wants to analyze the downloads of Ceph a few times a year. From mirrors
we expect that they store HTTP access logs for at least 6 months so they can be
used for analysis.
### DNS
Using a DNS CNAME record a XX.ceph.com entry can be forwarded to the server and
added to the mirror script.
You can request such a DNS entry on the ceph mailinglists.
### Apache configuration
A Apache 2.4 VirtualHost example configuration can be found the Git repository
with the name *apache2.vhost.conf*
| 2,161 | 31.268657 | 130 | md |
null | ceph-main/mirroring/mirror-ceph.sh | #!/usr/bin/env bash
set -e
#
# Script to mirror Ceph locally
#
# Please, choose a local source and do not sync in a shorter interval than
# 3 hours.
#
SILENT=0
# All available source mirrors
declare -A SOURCES
SOURCES[eu]="eu.ceph.com"
SOURCES[de]="de.ceph.com"
SOURCES[se]="se.ceph.com"
SOURCES[au]="au.ceph.com"
SOURCES[us]="download.ceph.com"
SOURCES[fr]="fr.ceph.com"
SOURCES[ca]="ca.ceph.com"
SOURCES[us-west]="us-west.ceph.com"
SOURCES[global]="download.ceph.com"
function print_usage() {
echo "$0 [-q ] -s <source mirror> -t <target directory>"
}
while getopts ":qhs:t:" opt; do
case $opt in
q)
SILENT=1
;;
s)
SOURCE=$OPTARG
;;
t)
TARGET=$OPTARG
;;
h)
HELP=1
;;
\?)
print_usage
exit 1
;;
esac
done
if [ ! -z "$HELP" ] || [ -z "$TARGET" ] || [ -z "$SOURCE" ]; then
print_usage
exit 1
fi
if [ ! -d "$TARGET" ]; then
echo "$TARGET is not a valid target directory"
exit 1
fi
for i in "${!SOURCES[@]}"; do
if [ "$i" == "$SOURCE" ]; then
SOURCE_HOST=${SOURCES[$i]}
fi
done
if [ -z "$SOURCE_HOST" ]; then
echo -n "Please select one of the following sources:"
for i in "${!SOURCES[@]}"; do
echo -n " $i"
done
echo ""
exit 1
fi
RSYNC_OPTS="--stats --progress"
if [ $SILENT -eq 1 ]; then
RSYNC_OPTS="--quiet"
fi
# We start a two-stage sync here for DEB and RPM
# Based on: https://www.debian.org/mirror/ftpmirror
#
# The idea is to prevent temporary situations where metadata points to files
# which do not exist
#
# Exclude all metadata files
rsync ${RSYNC_OPTS} ${SOURCE_HOST}::ceph --recursive --times --links \
--hard-links \
--exclude Packages* \
--exclude Sources* \
--exclude Release* \
--exclude InRelease \
--exclude i18n/* \
--exclude ls-lR* \
--exclude repodata/* \
${TARGET}
# Now also transfer the metadata and delete afterwards
rsync ${RSYNC_OPTS} ${SOURCE_HOST}::ceph --recursive --times --links \
--hard-links --delete-after \
${TARGET}
| 2,560 | 24.356436 | 76 | sh |
null | ceph-main/mirroring/test-mirrors.sh | #!/usr/bin/env bash
#
# Simple script which performs a HTTP and rsync check on
# all Ceph mirrors over IPv4 and IPv6 to see if they are online
#
# Requires IPv4, IPv6, rsync and curl
#
# Example usage:
# - ./test-mirrors.sh eu.ceph.com,de.ceph.com,au.ceph.com
# - cat MIRRORS |cut -d ':' -f 1|xargs -n 1 ./test-mirrors.sh
#
function print_usage {
echo "Usage: $0 mirror1,mirror2,mirror3,mirror4,etc"
}
function test_http {
HOST=$1
echo -n "$HOST HTTP IPv4: "
curl -s -I -4 -o /dev/null http://$HOST
if [ "$?" -ne 0 ]; then
echo "FAIL"
else
echo "OK"
fi
echo -n "$HOST HTTP IPv6: "
curl -s -I -6 -o /dev/null http://$HOST
if [ "$?" -ne 0 ]; then
echo "FAIL"
else
echo "OK"
fi
}
function test_rsync {
HOST=$1
echo -n "$HOST RSYNC IPv4: "
rsync -4 -avrqn ${HOST}::ceph /tmp 2>/dev/null
if [ "$?" -ne 0 ]; then
echo "FAIL"
else
echo "OK"
fi
echo -n "$HOST RSYNC IPv6: "
rsync -6 -avrqn ${HOST}::ceph /tmp 2>/dev/null
if [ "$?" -ne 0 ]; then
echo "FAIL"
else
echo "OK"
fi
}
MIRRORS=$1
if [ -z "$MIRRORS" ]; then
print_usage
exit 1
fi
IFS=', ' read -r -a array <<< "$MIRRORS"
for MIRROR in "${array[@]}"; do
test_http $MIRROR
test_rsync $MIRROR
done
| 1,327 | 17.971429 | 63 | sh |
null | ceph-main/monitoring/ceph-mixin/README.md | ## Prometheus Monitoring Mixin for Ceph
A set of Grafana dashboards and Prometheus alerts for Ceph.
All the Grafana dashboards are already generated in the `dashboards_out`
directory and alerts in the `prometheus_alerts.yml` file.
You can use the Grafana dashboards and alerts with Jsonnet like any other
prometheus mixin. You can find more resources about mixins in general on
[monitoring.mixins.dev](https://monitoring.mixins.dev/).
### Grafana dashboards for Ceph
In `dashboards_out` you can find a collection of
[Grafana](https://grafana.com/grafana) dashboards for Ceph Monitoring.
These dashboards are based on metrics collected
from [prometheus](https://prometheus.io/) scraping the [prometheus mgr
plugin](http://docs.ceph.com/en/latest/mgr/prometheus/) and the
[node_exporter (0.17.0)](https://github.com/prometheus/node_exporter).
##### Recommended versions:
-grafana 8.3.5
-grafana-piechart-panel 1.6.2
-grafana-status-panel 1.0.11
#### Requirements
- [Status Panel](https://grafana.com/plugins/vonage-status-panel) installed on
your Grafana instance
- [Pie Chart Panel](https://grafana.com/grafana/plugins/grafana-piechart-panel/)
installed on your Grafana instance
### Prometheus alerts
In `prometheus_alerts.libsonnet` you'll find a set of Prometheus
alert rules that should provide a decent set of default alerts for a
Ceph cluster. After building them with jsonnet put this file in place according to your Prometheus
configuration (wherever the `rules` configuration stanza points).
### Multi-cluster support
Ceph-mixin supports dashboards and alerts across multiple clusters.
To enable this feature you need to configure the following in `config.libsonnnet`:
```
showMultiCluster: true,
clusterLabel: '<your cluster label>',
```
##### Recommended versions:
-prometheus v2.33.4
#### SNMP
Ceph provides a MIB (CEPH-PROMETHEUS-ALERT-MIB.txt) to support sending
Prometheus alerts to an SNMP management platform. The translation from
Prometheus alert to SNMP trap requires the Prometheus alert to contain an OID
that maps to a definition within the MIB. When making changes to the Prometheus
alert rules file, developers should include any necessary changes to the MIB.
##### Recommended:
-alertmanager 0.16.2
### Building from Jsonnet
- Install [jsonnet](https://jsonnet.org/) (at least v0.18.0)
- By installing the package `jsonnet` in most of the distro and
`golang-github-google-jsonnet` in fedora
- Install [jsonnet-bundler](https://github.com/jsonnet-bundler/jsonnet-bundler)
To rebuild all the generated files, you can run `tox -egrafonnet-fix`.
The jsonnet code located in this directory depends on some Jsonnet third party
libraries. To update those libraries you can run `jb update` and then update
the generated files using `tox -egrafonnet-fix`.
##### Any upgrade or downgrade to different major versions of the recommended tools mentioned above is not supported.
| 2,933 | 37.103896 | 117 | md |
null | ceph-main/monitoring/ceph-mixin/jsonnet-bundler-build.sh | #!/bin/sh -ex
JSONNET_VERSION="v0.4.0"
OUTPUT_DIR=${1:-$(pwd)}
git clone -b ${JSONNET_VERSION} --depth 1 https://github.com/jsonnet-bundler/jsonnet-bundler
make -C jsonnet-bundler build
mv jsonnet-bundler/_output/jb ${OUTPUT_DIR}
| 233 | 25 | 92 | sh |
null | ceph-main/monitoring/ceph-mixin/lint-jsonnet.sh | #!/bin/sh -e
JSONNETS_FILES=$(find . -name 'vendor' -prune -o \
-name '*.jsonnet' -print -o -name '*.libsonnet' -print)
jsonnetfmt "$@" ${JSONNETS_FILES}
| 179 | 29 | 79 | sh |
null | ceph-main/monitoring/ceph-mixin/prometheus_alerts.yml | groups:
- name: "cluster health"
rules:
- alert: "CephHealthError"
annotations:
description: "The cluster state has been HEALTH_ERROR for more than 5 minutes. Please check 'ceph health detail' for more information."
summary: "Ceph is in the ERROR state"
expr: "ceph_health_status == 2"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.2.1"
severity: "critical"
type: "ceph_default"
- alert: "CephHealthWarning"
annotations:
description: "The cluster state has been HEALTH_WARN for more than 15 minutes. Please check 'ceph health detail' for more information."
summary: "Ceph is in the WARNING state"
expr: "ceph_health_status == 1"
for: "15m"
labels:
severity: "warning"
type: "ceph_default"
- name: "mon"
rules:
- alert: "CephMonDownQuorumAtRisk"
annotations:
description: "{{ $min := query \"floor(count(ceph_mon_metadata) / 2) + 1\" | first | value }}Quorum requires a majority of monitors (x {{ $min }}) to be active. Without quorum the cluster will become inoperable, affecting all services and connected clients. The following monitors are down: {{- range query \"(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)\" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down"
summary: "Monitor quorum is at risk"
expr: |
(
(ceph_health_detail{name="MON_DOWN"} == 1) * on() (
count(ceph_mon_quorum_status == 1) == bool (floor(count(ceph_mon_metadata) / 2) + 1)
)
) == 1
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.3.1"
severity: "critical"
type: "ceph_default"
- alert: "CephMonDown"
annotations:
description: |
{{ $down := query "count(ceph_mon_quorum_status == 0)" | first | value }}{{ $s := "" }}{{ if gt $down 1.0 }}{{ $s = "s" }}{{ end }}You have {{ $down }} monitor{{ $s }} down. Quorum is still intact, but the loss of an additional monitor will make your cluster inoperable. The following monitors are down: {{- range query "(ceph_mon_quorum_status == 0) + on(ceph_daemon) group_left(hostname) (ceph_mon_metadata * 0)" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down"
summary: "One or more monitors down"
expr: |
count(ceph_mon_quorum_status == 0) <= (count(ceph_mon_metadata) - floor(count(ceph_mon_metadata) / 2) + 1)
for: "30s"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephMonDiskspaceCritical"
annotations:
description: "The free space available to a monitor's store is critically low. You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; {{- range query \"ceph_mon_metadata\"}} - {{ .Labels.hostname }} {{- end }}"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-crit"
summary: "Filesystem space on at least one monitor is critically low"
expr: "ceph_health_detail{name=\"MON_DISK_CRIT\"} == 1"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.3.2"
severity: "critical"
type: "ceph_default"
- alert: "CephMonDiskspaceLow"
annotations:
description: "The space available to a monitor's store is approaching full (>70% is the default). You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; {{- range query \"ceph_mon_metadata\"}} - {{ .Labels.hostname }} {{- end }}"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-low"
summary: "Drive space on at least one monitor is approaching full"
expr: "ceph_health_detail{name=\"MON_DISK_LOW\"} == 1"
for: "5m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephMonClockSkew"
annotations:
description: "Ceph monitors rely on closely synchronized time to maintain quorum and cluster consistency. This event indicates that the time on at least one mon has drifted too far from the lead mon. Review cluster status with ceph -s. This will show which monitors are affected. Check the time sync status on each monitor host with 'ceph time-sync-status' and the state and peers of your ntpd or chrony daemon."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-clock-skew"
summary: "Clock skew detected among monitors"
expr: "ceph_health_detail{name=\"MON_CLOCK_SKEW\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- name: "osd"
rules:
- alert: "CephOSDDownHigh"
annotations:
description: "{{ $value | humanize }}% or {{ with query \"count(ceph_osd_up == 0)\" }}{{ . | first | value }}{{ end }} of {{ with query \"count(ceph_osd_up)\" }}{{ . | first | value }}{{ end }} OSDs are down (>= 10%). The following OSDs are down: {{- range query \"(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0\" }} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}"
summary: "More than 10% of OSDs are down"
expr: "count(ceph_osd_up == 0) / count(ceph_osd_up) * 100 >= 10"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.1"
severity: "critical"
type: "ceph_default"
- alert: "CephOSDHostDown"
annotations:
description: "The following OSDs are down: {{- range query \"(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0\" }} - {{ .Labels.hostname }} : {{ .Labels.ceph_daemon }} {{- end }}"
summary: "An OSD host is offline"
expr: "ceph_health_detail{name=\"OSD_HOST_DOWN\"} == 1"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.8"
severity: "warning"
type: "ceph_default"
- alert: "CephOSDDown"
annotations:
description: |
{{ $num := query "count(ceph_osd_up == 0)" | first | value }}{{ $s := "" }}{{ if gt $num 1.0 }}{{ $s = "s" }}{{ end }}{{ $num }} OSD{{ $s }} down for over 5mins. The following OSD{{ $s }} {{ if eq $s "" }}is{{ else }}are{{ end }} down: {{- range query "(ceph_osd_up * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) == 0"}} - {{ .Labels.ceph_daemon }} on {{ .Labels.hostname }} {{- end }}
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-down"
summary: "An OSD has been marked down"
expr: "ceph_health_detail{name=\"OSD_DOWN\"} == 1"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.2"
severity: "warning"
type: "ceph_default"
- alert: "CephOSDNearFull"
annotations:
description: "One or more OSDs have reached the NEARFULL threshold. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-nearfull"
summary: "OSD(s) running low on free space (NEARFULL)"
expr: "ceph_health_detail{name=\"OSD_NEARFULL\"} == 1"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.3"
severity: "warning"
type: "ceph_default"
- alert: "CephOSDFull"
annotations:
description: "An OSD has reached the FULL threshold. Writes to pools that share the affected OSD will be blocked. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-full"
summary: "OSD full, writes blocked"
expr: "ceph_health_detail{name=\"OSD_FULL\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.6"
severity: "critical"
type: "ceph_default"
- alert: "CephOSDBackfillFull"
annotations:
description: "An OSD has reached the BACKFILL FULL threshold. This will prevent rebalance operations from completing. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-backfillfull"
summary: "OSD(s) too full for backfill operations"
expr: "ceph_health_detail{name=\"OSD_BACKFILLFULL\"} > 0"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephOSDTooManyRepairs"
annotations:
description: "Reads from an OSD have used a secondary PG to return data to the client, indicating a potential failing drive."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-too-many-repairs"
summary: "OSD reports a high number of read errors"
expr: "ceph_health_detail{name=\"OSD_TOO_MANY_REPAIRS\"} == 1"
for: "30s"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephOSDTimeoutsPublicNetwork"
annotations:
description: "OSD heartbeats on the cluster's 'public' network (frontend) are running slow. Investigate the network for latency or loss issues. Use 'ceph health detail' to show the affected OSDs."
summary: "Network issues delaying OSD heartbeats (public network)"
expr: "ceph_health_detail{name=\"OSD_SLOW_PING_TIME_FRONT\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephOSDTimeoutsClusterNetwork"
annotations:
description: "OSD heartbeats on the cluster's 'cluster' network (backend) are slow. Investigate the network for latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs."
summary: "Network issues delaying OSD heartbeats (cluster network)"
expr: "ceph_health_detail{name=\"OSD_SLOW_PING_TIME_BACK\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephOSDInternalDiskSizeMismatch"
annotations:
description: "One or more OSDs have an internal inconsistency between metadata and the size of the device. This could lead to the OSD(s) crashing in future. You should redeploy the affected OSDs."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-disk-size-mismatch"
summary: "OSD size inconsistency error"
expr: "ceph_health_detail{name=\"BLUESTORE_DISK_SIZE_MISMATCH\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephDeviceFailurePredicted"
annotations:
description: "The device health module has determined that one or more devices will fail soon. To review device status use 'ceph device ls'. To show a specific device use 'ceph device info <dev id>'. Mark the OSD out so that data may migrate to other OSDs. Once the OSD has drained, destroy the OSD, replace the device, and redeploy the OSD."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#id2"
summary: "Device(s) predicted to fail soon"
expr: "ceph_health_detail{name=\"DEVICE_HEALTH\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephDeviceFailurePredictionTooHigh"
annotations:
description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availabililty. Prevent data integrity issues by adding new OSDs so that data may be relocated."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany"
summary: "Too many devices are predicted to fail, unable to resolve"
expr: "ceph_health_detail{name=\"DEVICE_HEALTH_TOOMANY\"} == 1"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.7"
severity: "critical"
type: "ceph_default"
- alert: "CephDeviceFailureRelocationIncomplete"
annotations:
description: "The device health module has determined that one or more devices will fail soon, but the normal process of relocating the data on the device to other OSDs in the cluster is blocked. \nEnsure that the cluster has available free space. It may be necessary to add capacity to the cluster to allow data from the failing device to successfully migrate, or to enable the balancer."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-in-use"
summary: "Device failure is predicted, but unable to relocate data"
expr: "ceph_health_detail{name=\"DEVICE_HEALTH_IN_USE\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephOSDFlapping"
annotations:
description: "OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked down and back up {{ $value | humanize }} times once a minute for 5 minutes. This may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster network, or the public network if no cluster network is deployed. Check the network stats on the listed host(s)."
documentation: "https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds"
summary: "Network issues are causing OSDs to flap (mark each other down)"
expr: "(rate(ceph_osd_up[5m]) * on(ceph_daemon) group_left(hostname) ceph_osd_metadata) * 60 > 1"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.4"
severity: "warning"
type: "ceph_default"
- alert: "CephOSDReadErrors"
annotations:
description: "An OSD has encountered read errors, but the OSD has recovered by retrying the reads. This may indicate an issue with hardware or the kernel."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-spurious-read-errors"
summary: "Device read errors detected"
expr: "ceph_health_detail{name=\"BLUESTORE_SPURIOUS_READ_ERRORS\"} == 1"
for: "30s"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephPGImbalance"
annotations:
description: "OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} deviates by more than 30% from average PG count."
summary: "PGs are not balanced across OSDs"
expr: |
abs(
((ceph_osd_numpg > 0) - on (job) group_left avg(ceph_osd_numpg > 0) by (job)) /
on (job) group_left avg(ceph_osd_numpg > 0) by (job)
) * on (ceph_daemon) group_left(hostname) ceph_osd_metadata > 0.30
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.4.5"
severity: "warning"
type: "ceph_default"
- name: "mds"
rules:
- alert: "CephFilesystemDamaged"
annotations:
description: "Filesystem metadata has been corrupted. Data may be inaccessible. Analyze metrics from the MDS daemon admin socket, or escalate to support."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages"
summary: "CephFS filesystem is damaged."
expr: "ceph_health_detail{name=\"MDS_DAMAGE\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.5.1"
severity: "critical"
type: "ceph_default"
- alert: "CephFilesystemOffline"
annotations:
description: "All MDS ranks are unavailable. The MDS daemons managing metadata are down, rendering the filesystem offline."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-all-down"
summary: "CephFS filesystem is offline"
expr: "ceph_health_detail{name=\"MDS_ALL_DOWN\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.5.3"
severity: "critical"
type: "ceph_default"
- alert: "CephFilesystemDegraded"
annotations:
description: "One or more metadata daemons (MDS ranks) are failed or in a damaged state. At best the filesystem is partially available, at worst the filesystem is completely unusable."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-degraded"
summary: "CephFS filesystem is degraded"
expr: "ceph_health_detail{name=\"FS_DEGRADED\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.5.4"
severity: "critical"
type: "ceph_default"
- alert: "CephFilesystemMDSRanksLow"
annotations:
description: "The filesystem's 'max_mds' setting defines the number of MDS ranks in the filesystem. The current number of active MDS daemons is less than this value."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-up-less-than-max"
summary: "Ceph MDS daemon count is lower than configured"
expr: "ceph_health_detail{name=\"MDS_UP_LESS_THAN_MAX\"} > 0"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephFilesystemInsufficientStandby"
annotations:
description: "The minimum number of standby daemons required by standby_count_wanted is less than the current number of standby daemons. Adjust the standby count or increase the number of MDS daemons."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-insufficient-standby"
summary: "Ceph filesystem standby daemons too few"
expr: "ceph_health_detail{name=\"MDS_INSUFFICIENT_STANDBY\"} > 0"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephFilesystemFailureNoStandby"
annotations:
description: "An MDS daemon has failed, leaving only one active rank and no available standby. Investigate the cause of the failure or add a standby MDS."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-with-failed-mds"
summary: "MDS daemon failed, no further standby available"
expr: "ceph_health_detail{name=\"FS_WITH_FAILED_MDS\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.5.5"
severity: "critical"
type: "ceph_default"
- alert: "CephFilesystemReadOnly"
annotations:
description: "The filesystem has switched to READ ONLY due to an unexpected error when writing to the metadata pool. Either analyze the output from the MDS daemon admin socket, or escalate to support."
documentation: "https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages"
summary: "CephFS filesystem in read only mode due to write error(s)"
expr: "ceph_health_detail{name=\"MDS_HEALTH_READ_ONLY\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.5.2"
severity: "critical"
type: "ceph_default"
- name: "mgr"
rules:
- alert: "CephMgrModuleCrash"
annotations:
description: "One or more mgr modules have crashed and have yet to be acknowledged by an administrator. A crashed module may impact functionality within the cluster. Use the 'ceph crash' command to determine which module has failed, and archive it to acknowledge the failure."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#recent-mgr-module-crash"
summary: "A manager module has recently crashed"
expr: "ceph_health_detail{name=\"RECENT_MGR_MODULE_CRASH\"} == 1"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.6.1"
severity: "critical"
type: "ceph_default"
- alert: "CephMgrPrometheusModuleInactive"
annotations:
description: "The mgr/prometheus module at {{ $labels.instance }} is unreachable. This could mean that the module has been disabled or the mgr daemon itself is down. Without the mgr/prometheus module metrics and alerts will no longer function. Open a shell to an admin node or toolbox pod and use 'ceph -s' to to determine whether the mgr is active. If the mgr is not active, restart it, otherwise you can determine module status with 'ceph mgr module ls'. If it is not listed as enabled, enable it with 'ceph mgr module enable prometheus'."
summary: "The mgr/prometheus module is not available"
expr: "up{job=\"ceph\"} == 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.6.2"
severity: "critical"
type: "ceph_default"
- name: "pgs"
rules:
- alert: "CephPGsInactive"
annotations:
description: "{{ $value }} PGs have been inactive for more than 5 minutes in pool {{ $labels.name }}. Inactive placement groups are not able to serve read/write requests."
summary: "One or more placement groups are inactive"
expr: "ceph_pool_metadata * on(pool_id,instance) group_left() (ceph_pg_total - ceph_pg_active) > 0"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.7.1"
severity: "critical"
type: "ceph_default"
- alert: "CephPGsUnclean"
annotations:
description: "{{ $value }} PGs have been unclean for more than 15 minutes in pool {{ $labels.name }}. Unclean PGs have not recovered from a previous failure."
summary: "One or more placement groups are marked unclean"
expr: "ceph_pool_metadata * on(pool_id,instance) group_left() (ceph_pg_total - ceph_pg_clean) > 0"
for: "15m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.7.2"
severity: "warning"
type: "ceph_default"
- alert: "CephPGsDamaged"
annotations:
description: "During data consistency checks (scrub), at least one PG has been flagged as being damaged or inconsistent. Check to see which PG is affected, and attempt a manual repair if necessary. To list problematic placement groups, use 'rados list-inconsistent-pg <pool>'. To repair PGs use the 'ceph pg repair <pg_num>' command."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-damaged"
summary: "Placement group damaged, manual intervention needed"
expr: "ceph_health_detail{name=~\"PG_DAMAGED|OSD_SCRUB_ERRORS\"} == 1"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.7.4"
severity: "critical"
type: "ceph_default"
- alert: "CephPGRecoveryAtRisk"
annotations:
description: "Data redundancy is at risk since one or more OSDs are at or above the 'full' threshold. Add more capacity to the cluster, restore down/out OSDs, or delete unwanted data."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-recovery-full"
summary: "OSDs are too full for recovery"
expr: "ceph_health_detail{name=\"PG_RECOVERY_FULL\"} == 1"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.7.5"
severity: "critical"
type: "ceph_default"
- alert: "CephPGUnavilableBlockingIO"
annotations:
description: "Data availability is reduced, impacting the cluster's ability to service I/O. One or more placement groups (PGs) are in a state that blocks I/O."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability"
summary: "PG is unavailable, blocking I/O"
expr: "((ceph_health_detail{name=\"PG_AVAILABILITY\"} == 1) - scalar(ceph_health_detail{name=\"OSD_DOWN\"})) == 1"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.7.3"
severity: "critical"
type: "ceph_default"
- alert: "CephPGBackfillAtRisk"
annotations:
description: "Data redundancy may be at risk due to lack of free space within the cluster. One or more OSDs have reached the 'backfillfull' threshold. Add more capacity, or delete unwanted data."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-backfill-full"
summary: "Backfill operations are blocked due to lack of free space"
expr: "ceph_health_detail{name=\"PG_BACKFILL_FULL\"} == 1"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.7.6"
severity: "critical"
type: "ceph_default"
- alert: "CephPGNotScrubbed"
annotations:
description: "One or more PGs have not been scrubbed recently. Scrubs check metadata integrity, protecting against bit-rot. They check that metadata is consistent across data replicas. When PGs miss their scrub interval, it may indicate that the scrub window is too small, or PGs were not in a 'clean' state during the scrub window. You can manually initiate a scrub with: ceph pg scrub <pgid>"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-scrubbed"
summary: "Placement group(s) have not been scrubbed"
expr: "ceph_health_detail{name=\"PG_NOT_SCRUBBED\"} == 1"
for: "5m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephPGsHighPerOSD"
annotations:
description: "The number of placement groups per OSD is too high (exceeds the mon_max_pg_per_osd setting).\n Check that the pg_autoscaler has not been disabled for any pools with 'ceph osd pool autoscale-status', and that the profile selected is appropriate. You may also adjust the target_size_ratio of a pool to guide the autoscaler based on the expected relative size of the pool ('ceph osd pool set cephfs.cephfs.meta target_size_ratio .1') or set the pg_autoscaler mode to 'warn' and adjust pg_num appropriately for one or more pools."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks/#too-many-pgs"
summary: "Placement groups per OSD is too high"
expr: "ceph_health_detail{name=\"TOO_MANY_PGS\"} == 1"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephPGNotDeepScrubbed"
annotations:
description: "One or more PGs have not been deep scrubbed recently. Deep scrubs protect against bit-rot. They compare data replicas to ensure consistency. When PGs miss their deep scrub interval, it may indicate that the window is too small or PGs were not in a 'clean' state during the deep-scrub window."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-deep-scrubbed"
summary: "Placement group(s) have not been deep scrubbed"
expr: "ceph_health_detail{name=\"PG_NOT_DEEP_SCRUBBED\"} == 1"
for: "5m"
labels:
severity: "warning"
type: "ceph_default"
- name: "nodes"
rules:
- alert: "CephNodeRootFilesystemFull"
annotations:
description: "Root volume is dangerously full: {{ $value | humanize }}% free."
summary: "Root filesystem is dangerously full"
expr: "node_filesystem_avail_bytes{mountpoint=\"/\"} / node_filesystem_size_bytes{mountpoint=\"/\"} * 100 < 5"
for: "5m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.8.1"
severity: "critical"
type: "ceph_default"
- alert: "CephNodeNetworkPacketDrops"
annotations:
description: "Node {{ $labels.instance }} experiences packet drop > 0.5% or > 10 packets/s on interface {{ $labels.device }}."
summary: "One or more NICs reports packet drops"
expr: |
(
rate(node_network_receive_drop_total{device!="lo"}[1m]) +
rate(node_network_transmit_drop_total{device!="lo"}[1m])
) / (
rate(node_network_receive_packets_total{device!="lo"}[1m]) +
rate(node_network_transmit_packets_total{device!="lo"}[1m])
) >= 0.0050000000000000001 and (
rate(node_network_receive_drop_total{device!="lo"}[1m]) +
rate(node_network_transmit_drop_total{device!="lo"}[1m])
) >= 10
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.8.2"
severity: "warning"
type: "ceph_default"
- alert: "CephNodeNetworkPacketErrors"
annotations:
description: "Node {{ $labels.instance }} experiences packet errors > 0.01% or > 10 packets/s on interface {{ $labels.device }}."
summary: "One or more NICs reports packet errors"
expr: |
(
rate(node_network_receive_errs_total{device!="lo"}[1m]) +
rate(node_network_transmit_errs_total{device!="lo"}[1m])
) / (
rate(node_network_receive_packets_total{device!="lo"}[1m]) +
rate(node_network_transmit_packets_total{device!="lo"}[1m])
) >= 0.0001 or (
rate(node_network_receive_errs_total{device!="lo"}[1m]) +
rate(node_network_transmit_errs_total{device!="lo"}[1m])
) >= 10
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.8.3"
severity: "warning"
type: "ceph_default"
- alert: "CephNodeNetworkBondDegraded"
annotations:
summary: "Degraded Bond on Node {{ $labels.instance }}"
description: "Bond {{ $labels.master }} is degraded on Node {{ $labels.instance }}."
expr: |
node_bonding_slaves - node_bonding_active != 0
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephNodeDiskspaceWarning"
annotations:
description: "Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will be full in less than 5 days based on the 48 hour trailing fill rate."
summary: "Host filesystem free space is getting low"
expr: "predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5) *on(instance) group_left(nodename) node_uname_info < 0"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.8.4"
severity: "warning"
type: "ceph_default"
- alert: "CephNodeInconsistentMTU"
annotations:
description: "Node {{ $labels.instance }} has a different MTU size ({{ $value }}) than the median of devices named {{ $labels.device }}."
summary: "MTU settings across Ceph hosts are inconsistent"
expr: "node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0) == scalar( max by (device) (node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0)) != quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0)) )or node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0) == scalar( min by (device) (node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0)) != quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!=\"lo\"} > 0)) )"
labels:
severity: "warning"
type: "ceph_default"
- name: "pools"
rules:
- alert: "CephPoolGrowthWarning"
annotations:
description: "Pool '{{ $labels.name }}' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours."
summary: "Pool growth rate may soon exceed capacity"
expr: "(predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(pool_id, instance) group_right() ceph_pool_metadata) >= 95"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.9.2"
severity: "warning"
type: "ceph_default"
- alert: "CephPoolBackfillFull"
annotations:
description: "A pool is approaching the near full threshold, which will prevent recovery/backfill operations from completing. Consider adding more capacity."
summary: "Free space in a pool is too low for recovery/backfill"
expr: "ceph_health_detail{name=\"POOL_BACKFILLFULL\"} > 0"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephPoolFull"
annotations:
description: "A pool has reached its MAX quota, or OSDs supporting the pool have reached the FULL threshold. Until this is resolved, writes to the pool will be blocked. Pool Breakdown (top 5) {{- range query \"topk(5, sort_desc(ceph_pool_percent_used * on(pool_id) group_right ceph_pool_metadata))\" }} - {{ .Labels.name }} at {{ .Value }}% {{- end }} Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota <pool_name> max_bytes <bytes>)"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#pool-full"
summary: "Pool is full - writes are blocked"
expr: "ceph_health_detail{name=\"POOL_FULL\"} > 0"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.9.1"
severity: "critical"
type: "ceph_default"
- alert: "CephPoolNearFull"
annotations:
description: "A pool has exceeded the warning (percent full) threshold, or OSDs supporting the pool have reached the NEARFULL threshold. Writes may continue, but you are at risk of the pool going read-only if more capacity isn't made available. Determine the affected pool with 'ceph df detail', looking at QUOTA BYTES and STORED. Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota <pool_name> max_bytes <bytes>). Also ensure that the balancer is active."
summary: "One or more Ceph pools are nearly full"
expr: "ceph_health_detail{name=\"POOL_NEAR_FULL\"} > 0"
for: "5m"
labels:
severity: "warning"
type: "ceph_default"
- name: "healthchecks"
rules:
- alert: "CephSlowOps"
annotations:
description: "{{ $value }} OSD requests are taking too long to process (osd_op_complaint_time exceeded)"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops"
summary: "OSD operations are slow to complete"
expr: "ceph_healthcheck_slow_ops > 0"
for: "30s"
labels:
severity: "warning"
type: "ceph_default"
- alert: "CephDaemonSlowOps"
for: "30s"
expr: "ceph_daemon_health_metrics{type=\"SLOW_OPS\"} > 0"
labels:
severity: 'warning'
type: 'ceph_default'
annotations:
summary: "{{ $labels.ceph_daemon }} operations are slow to complete"
description: "{{ $labels.ceph_daemon }} operations are taking too long to process (complaint time exceeded)"
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops"
- name: "cephadm"
rules:
- alert: "CephadmUpgradeFailed"
annotations:
description: "The cephadm cluster upgrade process has failed. The cluster remains in an undetermined state. Please review the cephadm logs, to understand the nature of the issue"
summary: "Ceph version upgrade has failed"
expr: "ceph_health_detail{name=\"UPGRADE_EXCEPTION\"} > 0"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.11.2"
severity: "critical"
type: "ceph_default"
- alert: "CephadmDaemonFailed"
annotations:
description: "A daemon managed by cephadm is no longer active. Determine, which daemon is down with 'ceph health detail'. you may start daemons with the 'ceph orch daemon start <daemon_id>'"
summary: "A ceph daemon manged by cephadm is down"
expr: "ceph_health_detail{name=\"CEPHADM_FAILED_DAEMON\"} > 0"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.11.1"
severity: "critical"
type: "ceph_default"
- alert: "CephadmPaused"
annotations:
description: "Cluster management has been paused manually. This will prevent the orchestrator from service management and reconciliation. If this is not intentional, resume cephadm operations with 'ceph orch resume'"
documentation: "https://docs.ceph.com/en/latest/cephadm/operations#cephadm-paused"
summary: "Orchestration tasks via cephadm are PAUSED"
expr: "ceph_health_detail{name=\"CEPHADM_PAUSED\"} > 0"
for: "1m"
labels:
severity: "warning"
type: "ceph_default"
- name: "PrometheusServer"
rules:
- alert: "PrometheusJobMissing"
annotations:
description: "The prometheus job that scrapes from Ceph is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance."
summary: "The scrape job for Ceph is missing from Prometheus"
expr: "absent(up{job=\"ceph\"})"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.12.1"
severity: "critical"
type: "ceph_default"
- name: "rados"
rules:
- alert: "CephObjectMissing"
annotations:
description: "The latest version of a RADOS object can not be found, even though all OSDs are up. I/O requests for this object from clients will block (hang). Resolving this issue may require the object to be rolled back to a prior version manually, and manually verified."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#object-unfound"
summary: "Object(s) marked UNFOUND"
expr: "(ceph_health_detail{name=\"OBJECT_UNFOUND\"} == 1) * on() (count(ceph_osd_up == 1) == bool count(ceph_osd_metadata)) == 1"
for: "30s"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.10.1"
severity: "critical"
type: "ceph_default"
- name: "generic"
rules:
- alert: "CephDaemonCrash"
annotations:
description: "One or more daemons have crashed recently, and need to be acknowledged. This notification ensures that software crashes do not go unseen. To acknowledge a crash, use the 'ceph crash archive <id>' command."
documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks/#recent-crash"
summary: "One or more Ceph daemons have crashed, and are pending acknowledgement"
expr: "ceph_health_detail{name=\"RECENT_CRASH\"} == 1"
for: "1m"
labels:
oid: "1.3.6.1.4.1.50495.1.2.1.1.2"
severity: "critical"
type: "ceph_default"
| 40,452 | 60.760305 | 658 | yml |
null | ceph-main/monitoring/ceph-mixin/test-jsonnet.sh | #!/bin/sh -e
TEMPDIR=$(mktemp -d)
BASEDIR=$(dirname "$0")
jsonnet -J vendor -m ${TEMPDIR} $BASEDIR/dashboards.jsonnet
truncate -s 0 ${TEMPDIR}/json_difference.log
for file in ${BASEDIR}/dashboards_out/*.json
do
file_name="$(basename $file)"
for generated_file in ${TEMPDIR}/*.json
do
generated_file_name="$(basename $generated_file)"
if [ "$file_name" == "$generated_file_name" ]; then
jsondiff --indent 2 "${generated_file}" "${file}" \
| tee -a ${TEMPDIR}/json_difference.log
fi
done
done
jsonnet -J vendor -S alerts.jsonnet -o ${TEMPDIR}/prometheus_alerts.yml
jsondiff --indent 2 "prometheus_alerts.yml" "${TEMPDIR}/prometheus_alerts.yml" \
| tee -a ${TEMPDIR}/json_difference.log
err=0
if [ $(wc -l < ${TEMPDIR}/json_difference.log) -eq 0 ]
then
rm -rf ${TEMPDIR}
echo "Congratulations! Grafonnet Check Passed"
else
rm -rf ${TEMPDIR}
echo "Grafonnet Check Failed, failed comparing generated file with existing"
exit 1
fi
| 1,022 | 27.416667 | 80 | sh |
null | ceph-main/monitoring/ceph-mixin/tests_alerts/README.md |
## Alert Rule Standards
The alert rules should adhere to the following principles
- each alert must have a unique name
- each alert should define a common structure
- labels : must contain severity and type
- annotations : must provide description
- expr : must define the promql expression
- alert : defines the alert name
- alerts that have a corresponding section within docs.ceph.com must include a
documentation field in the annotations section
- critical alerts should declare an oid in the labels section
- critical alerts should have a corresponding entry in the Ceph MIB
## Testing Prometheus Rules
Once you have updated the `ceph_default_alerts.yml` file, you should use the
`validate_rules.py` script directly, or via `tox` to ensure the format of any update
or change aligns to our rule structure guidelines. The validate_rules.py script will
process the rules and look for any configuration anomalies and output a report if
problems are detected.
Here's an example run, to illustrate the format and the kinds of issues detected.
```
[paul@myhost tests]$ ./validate_rules.py
Checking rule groups
cluster health : ..
mon : E.W..
osd : E...W......W.E..
mds : WW
mgr : WW
pgs : ..WWWW..
nodes : .EEEE
pools : EEEW.
healthchecks : .
cephadm : WW.
prometheus : W
rados : W
Summary
Rule file : ../alerts/ceph_default_alerts.yml
Unit Test file : test_alerts.yml
Rule groups processed : 12
Rules processed : 51
Rule errors : 10
Rule warnings : 16
Rule name duplicates : 0
Unit tests missing : 4
Problem Report
Group Severity Alert Name Problem Description
----- -------- ---------- -------------------
cephadm Warning Cluster upgrade has failed critical level alert is missing an SNMP oid entry
cephadm Warning A daemon managed by cephadm is down critical level alert is missing an SNMP oid entry
mds Warning Ceph Filesystem damage detected critical level alert is missing an SNMP oid entry
mds Warning Ceph Filesystem switched to READ ONLY critical level alert is missing an SNMP oid entry
mgr Warning mgr module failure critical level alert is missing an SNMP oid entry
mgr Warning mgr prometheus module is not active critical level alert is missing an SNMP oid entry
mon Error Monitor down, quorum is at risk documentation link error: #mon-downwah not found on the page
mon Warning Ceph mon disk space critically low critical level alert is missing an SNMP oid entry
nodes Error network packets dropped invalid alert structure. Missing field: for
nodes Error network packet errors invalid alert structure. Missing field: for
nodes Error storage filling up invalid alert structure. Missing field: for
nodes Error MTU Mismatch invalid alert structure. Missing field: for
osd Error 10% OSDs down invalid alert structure. Missing field: for
osd Error Flapping OSD invalid alert structure. Missing field: for
osd Warning OSD Full critical level alert is missing an SNMP oid entry
osd Warning Too many devices predicted to fail critical level alert is missing an SNMP oid entry
pgs Warning Placement Group (PG) damaged critical level alert is missing an SNMP oid entry
pgs Warning Recovery at risk, cluster too full critical level alert is missing an SNMP oid entry
pgs Warning I/O blocked to some data critical level alert is missing an SNMP oid entry
pgs Warning Cluster too full, automatic data recovery impaired critical level alert is missing an SNMP oid entry
pools Error pool full invalid alert structure. Missing field: for
pools Error pool filling up (growth forecast) invalid alert structure. Missing field: for
pools Error Ceph pool is too full for recovery/rebalance invalid alert structure. Missing field: for
pools Warning Ceph pool is full - writes blocked critical level alert is missing an SNMP oid entry
prometheus Warning Scrape job is missing critical level alert is missing an SNMP oid entry
rados Warning Data not found/missing critical level alert is missing an SNMP oid entry
Unit tests are incomplete. Tests missing for the following alerts;
- Placement Group (PG) damaged
- OSD Full
- storage filling up
- pool filling up (growth forecast)
```
| 5,399 | 57.064516 | 136 | md |
null | ceph-main/monitoring/ceph-mixin/tests_alerts/__init__.py | 0 | 0 | 0 | py |
|
null | ceph-main/monitoring/ceph-mixin/tests_alerts/settings.py | import os
ALERTS_FILE = '../prometheus_alerts.yml'
UNIT_TESTS_FILE = 'test_alerts.yml'
MIB_FILE = '../../snmp/CEPH-MIB.txt'
current_dir = os.path.dirname(os.path.abspath(__file__))
ALERTS_FILE = os.path.join(current_dir, ALERTS_FILE)
UNIT_TESTS_FILE = os.path.join(current_dir, UNIT_TESTS_FILE)
MIB_FILE = os.path.join(current_dir, MIB_FILE)
| 345 | 27.833333 | 60 | py |
null | ceph-main/monitoring/ceph-mixin/tests_alerts/test_alerts.yml | rule_files:
- ../prometheus_alerts.yml
evaluation_interval: 5m
tests:
# health error
- interval: 5m
input_series:
- series: 'ceph_health_status{instance="ceph:9283",job="ceph"}'
values: '2 2 2 2 2 2 2'
promql_expr_test:
- expr: ceph_health_status == 2
eval_time: 5m
exp_samples:
- labels: 'ceph_health_status{instance="ceph:9283",job="ceph"}'
value: 2
alert_rule_test:
- eval_time: 1m
alertname: CephHealthError
- eval_time: 6m
alertname: CephHealthError
exp_alerts:
- exp_labels:
instance: ceph:9283
job: ceph
oid: 1.3.6.1.4.1.50495.1.2.1.2.1
type: ceph_default
severity: critical
exp_annotations:
summary: Ceph is in the ERROR state
description: The cluster state has been HEALTH_ERROR for more than 5 minutes. Please check 'ceph health detail' for more information.
# health warning
- interval: 5m
input_series:
- series: 'ceph_health_status{instance="ceph:9283",job="ceph"}'
values: '1 1 1 1 1 1 1 1 1 1'
promql_expr_test:
- expr: ceph_health_status == 1
eval_time: 15m
exp_samples:
- labels: 'ceph_health_status{instance="ceph:9283",job="ceph"}'
value: 1
alert_rule_test:
- eval_time: 10m
alertname: CephHealthWarning
- eval_time: 20m
alertname: CephHealthWarning
exp_alerts:
- exp_labels:
instance: ceph:9283
job: ceph
type: ceph_default
severity: warning
exp_annotations:
summary: Ceph is in the WARNING state
description: The cluster state has been HEALTH_WARN for more than 15 minutes. Please check 'ceph health detail' for more information.
# 10% OSDs down
- interval: 1m
input_series:
- series: 'ceph_osd_up{ceph_daemon="osd.0",instance="ceph:9283",job="ceph"}'
values: '1 1 1 1 1'
- series: 'ceph_osd_up{ceph_daemon="osd.1",instance="ceph:9283",job="ceph"}'
values: '0 0 0 0 0'
- series: 'ceph_osd_up{ceph_daemon="osd.2",instance="ceph:9283",job="ceph"}'
values: '1 1 1 1 1'
- series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.0",
ceph_version="ceph version 17.0.0-189-g3558fd72
(3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",
cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",
public_addr="172.20.0.2"}'
values: '1 1 1 1 1'
- series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.1",
ceph_version="ceph version 17.0.0-189-g3558fd72
(3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",
cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",
public_addr="172.20.0.2"}'
values: '1 1 1 1 1'
- series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.2",
ceph_version="ceph version 17.0.0-189-g3558fd72
(3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",
cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",
public_addr="172.20.0.2"}'
values: '1 1 1 1 1'
promql_expr_test:
- expr: count(ceph_osd_up == 0) / count(ceph_osd_up) * 100 >= 10
eval_time: 1m
exp_samples:
- labels: '{}'
value: 3.333333333333333E+01
alert_rule_test:
- eval_time: 1m
alertname: CephOSDDownHigh
exp_alerts:
- exp_labels:
oid: 1.3.6.1.4.1.50495.1.2.1.4.1
type: ceph_default
severity: critical
exp_annotations:
summary: More than 10% of OSDs are down
description: "33.33% or 1 of 3 OSDs are down (>= 10%). The following OSDs are down: - osd.1 on ceph"
# flapping OSD
- interval: 1s
input_series:
- series: 'ceph_osd_up{ceph_daemon="osd.0",instance="ceph:9283",job="ceph"}'
values: '1+1x100'
- series: 'ceph_osd_up{ceph_daemon="osd.1",instance="ceph:9283",job="ceph"}'
values: '1+0x100'
- series: 'ceph_osd_up{ceph_daemon="osd.2",instance="ceph:9283",job="ceph"}'
values: '1+0x100'
- series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.0",
ceph_version="ceph version 17.0.0-189-g3558fd72
(3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",
cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",
public_addr="172.20.0.2"}'
values: '1 1 1 1 1 1'
- series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.1",
ceph_version="ceph version 17.0.0-189-g3558fd72
(3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",
cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",
public_addr="172.20.0.2"}'
values: '1 1 1 1 1 1'
- series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.2",
ceph_version="ceph version 17.0.0-189-g3558fd72
(3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",
cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",
public_addr="172.20.0.2"}'
values: '1 1 1 1 1 1'
promql_expr_test:
- expr: |
(
rate(ceph_osd_up[5m])
* on(ceph_daemon) group_left(hostname) ceph_osd_metadata
) * 60 > 1
eval_time: 1m
exp_samples:
- labels: '{ceph_daemon="osd.0", hostname="ceph", instance="ceph:9283",
job="ceph"}'
value: 1.2200000000000001E+01
alert_rule_test:
- eval_time: 5m
alertname: CephOSDFlapping
exp_alerts:
- exp_labels:
ceph_daemon: osd.0
hostname: ceph
instance: ceph:9283
job: ceph
oid: 1.3.6.1.4.1.50495.1.2.1.4.4
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds
summary: Network issues are causing OSDs to flap (mark each other down)
description: "OSD osd.0 on ceph was marked down and back up 20.1 times once a minute for 5 minutes. This may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster network, or the public network if no cluster network is deployed. Check the network stats on the listed host(s)."
# high pg count deviation
- interval: 1m
input_series:
- series: 'ceph_osd_numpg{ceph_daemon="osd.0",instance="ceph:9283",
job="ceph"}'
values: '100 100 100 100 100 160'
- series: 'ceph_osd_numpg{ceph_daemon="osd.1",instance="ceph:9283",
job="ceph"}'
values: '100 100 100 100 100 320'
- series: 'ceph_osd_numpg{ceph_daemon="osd.2",instance="ceph:9283",
job="ceph"}'
values: '100 100 100 100 100 160'
- series: 'ceph_osd_numpg{ceph_daemon="osd.3",instance="ceph:9283",
job="ceph"}'
values: '100 100 100 100 100 160'
- series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.0",
ceph_version="ceph version 17.0.0-189-g3558fd72
(3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",
cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",
public_addr="172.20.0.2"}'
values: '1 1 1 1 1 1'
- series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.1",
ceph_version="ceph version 17.0.0-189-g3558fd72
(3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",
cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",
public_addr="172.20.0.2"}'
values: '1 1 1 1 1 1'
- series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.2",
ceph_version="ceph version 17.0.0-189-g3558fd72
(3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",
cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",
public_addr="172.20.0.2"}'
values: '1 1 1 1 1 1'
- series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.3",
ceph_version="ceph version 17.0.0-189-g3558fd72
(3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",
cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",
public_addr="172.20.0.2"}'
values: '1 1 1 1 1 1'
promql_expr_test:
- expr: |
abs(
(
(ceph_osd_numpg > 0) - on (job) group_left avg(ceph_osd_numpg > 0)
by (job)
) / on (job) group_left avg(ceph_osd_numpg > 0) by (job)
) * on(ceph_daemon) group_left(hostname) ceph_osd_metadata > 0.30
eval_time: 5m
exp_samples:
- labels: '{ceph_daemon="osd.1", hostname="ceph", instance="ceph:9283",
job="ceph"}'
value: 6E-01
alert_rule_test:
- eval_time: 10m
alertname: CephPGImbalance
exp_alerts:
- exp_labels:
ceph_daemon: osd.1
hostname: ceph
instance: ceph:9283
job: ceph
oid: 1.3.6.1.4.1.50495.1.2.1.4.5
severity: warning
type: ceph_default
exp_annotations:
summary: PGs are not balanced across OSDs
description: "OSD osd.1 on ceph deviates by more than 30% from average PG count."
# pgs inactive
- interval: 1m
input_series:
- series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph",
name="device_health_metrics",pool_id="1"}'
values: '1 1 1 1 1 1 1 1'
- series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph",
name="device_health_metrics",pool_id="2"}'
values: '1 1 1 1 1 1 1 1'
- series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph",
name="device_health_metrics",pool_id="3"}'
values: '1 1 1 1 1 1 1 1'
- series: 'ceph_pg_total{instance="ceph:9283",job="ceph",pool_id="1"}'
values: '1 1 1 1 1 1 1 1'
- series: 'ceph_pg_total{instance="ceph:9283",job="ceph",pool_id="2"}'
values: '32 32 32 32 32 32 32 32'
- series: 'ceph_pg_total{instance="ceph:9283",job="ceph",pool_id="3"}'
values: '33 32 32 32 32 33 33 32'
- series: 'ceph_pg_active{instance="ceph:9283",job="ceph",pool_id="1"}'
values: '1 1 1 1 1 1 1 1 1'
- series: 'ceph_pg_active{instance="ceph:9283",job="ceph",pool_id="2"}'
values: '32 32 32 32 32 32 32 32'
- series: 'ceph_pg_active{instance="ceph:9283",job="ceph",pool_id="3"}'
values: '32 32 32 32 32 32 32 32'
promql_expr_test:
- expr: ceph_pool_metadata * on(pool_id,instance) group_left()
(ceph_pg_total - ceph_pg_active) > 0
eval_time: 5m
exp_samples:
- labels: '{instance="ceph:9283", job="ceph",
name="device_health_metrics",
pool_id="3"}'
value: 1
alert_rule_test:
- eval_time: 5m
alertname: CephPGsInactive
exp_alerts:
- exp_labels:
instance: ceph:9283
job: ceph
name: device_health_metrics
oid: 1.3.6.1.4.1.50495.1.2.1.7.1
pool_id: 3
severity: critical
type: ceph_default
exp_annotations:
summary: One or more placement groups are inactive
description: "1 PGs have been inactive for more than 5 minutes in pool device_health_metrics. Inactive placement groups are not able to serve read/write requests."
#pgs unclean
- interval: 1m
input_series:
- series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph",
name="device_health_metrics",pool_id="1"}'
values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1'
- series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph",
name="device_health_metrics",pool_id="2"}'
values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1'
- series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph",
name="device_health_metrics",pool_id="3"}'
values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1'
- series: 'ceph_pg_total{instance="ceph:9283",job="ceph",pool_id="1"}'
values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1'
- series: 'ceph_pg_total{instance="ceph:9283",job="ceph",pool_id="2"}'
values: '32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32
32 32 32'
- series: 'ceph_pg_total{instance="ceph:9283",job="ceph",pool_id="3"}'
values: '33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33
33 33'
- series: 'ceph_pg_clean{instance="ceph:9283",job="ceph",pool_id="1"}'
values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1'
- series: 'ceph_pg_clean{instance="ceph:9283",job="ceph",pool_id="2"}'
values: '32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32
32 32'
- series: 'ceph_pg_clean{instance="ceph:9283",job="ceph",pool_id="3"}'
values: '32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32
32 32'
promql_expr_test:
- expr: ceph_pool_metadata * on(pool_id,instance) group_left()
(ceph_pg_total - ceph_pg_clean) > 0
eval_time: 15m
exp_samples:
- labels: '{instance="ceph:9283", job="ceph",
name="device_health_metrics", pool_id="3"}'
value: 1
alert_rule_test:
- eval_time: 16m
alertname: CephPGsUnclean
exp_alerts:
- exp_labels:
instance: ceph:9283
job: ceph
name: device_health_metrics
oid: 1.3.6.1.4.1.50495.1.2.1.7.2
pool_id: 3
severity: warning
type: ceph_default
exp_annotations:
summary: One or more placement groups are marked unclean
description: "1 PGs have been unclean for more than 15 minutes in pool device_health_metrics. Unclean PGs have not recovered from a previous failure."
# root volume full
- interval: 1m
input_series:
- series: 'node_filesystem_avail_bytes{device="/dev/mapper/fedora_localhost
--live-home",fstype="ext4",instance="node-exporter",job="node-exporter",
mountpoint="/"}'
values: '35336400896 35336400896 35336400896 35336400896 35336400896
3525385519.104 3533640089'
- series: 'node_filesystem_size_bytes{device="/dev/mapper/fedora_localhost
--live-home",fstype="ext4",instance="node-exporter",job="node-exporter",
mountpoint="/"}'
values: '73445531648 73445531648 73445531648 73445531648 73445531648
73445531648 73445531648'
promql_expr_test:
- expr: node_filesystem_avail_bytes{mountpoint="/"} /
node_filesystem_size_bytes{mountpoint="/"} * 100 < 5
eval_time: 5m
exp_samples:
- labels: '{device="/dev/mapper/fedora_localhost --live-home",
fstype="ext4", instance="node-exporter", job="node-exporter",
mountpoint="/"}'
value: 4.8E+00
alert_rule_test:
- eval_time: 10m
alertname: CephNodeRootFilesystemFull
exp_alerts:
- exp_labels:
device: /dev/mapper/fedora_localhost --live-home
fstype: ext4
instance: node-exporter
job: node-exporter
mountpoint: /
oid: 1.3.6.1.4.1.50495.1.2.1.8.1
severity: critical
type: ceph_default
exp_annotations:
summary: Root filesystem is dangerously full
description: "Root volume is dangerously full: 4.811% free."
# network packets dropped
- interval: 1m
input_series:
- series: 'node_network_receive_drop_total{device="eth0",
instance="node-exporter",job="node-exporter"}'
values: '0+600x10'
- series: 'node_network_transmit_drop_total{device="eth0",
instance="node-exporter",job="node-exporter"}'
values: '0+600x10'
- series: 'node_network_receive_packets_total{device="eth0",
instance="node-exporter",job="node-exporter"}'
values: '0+750x10'
- series: 'node_network_transmit_packets_total{device="eth0",
instance="node-exporter",job="node-exporter"}'
values: '0+750x10'
promql_expr_test:
- expr: |
(
rate(node_network_receive_drop_total{device!="lo"}[1m]) +
rate(node_network_transmit_drop_total{device!="lo"}[1m])
) / (
rate(node_network_receive_packets_total{device!="lo"}[1m]) +
rate(node_network_transmit_packets_total{device!="lo"}[1m])
) >= 0.0050000000000000001 and (
rate(node_network_receive_drop_total{device!="lo"}[1m]) +
rate(node_network_transmit_drop_total{device!="lo"}[1m])
) >= 10
eval_time: 5m
exp_samples:
- labels: '{device="eth0", instance="node-exporter",
job="node-exporter"}'
value: 8E-1
alert_rule_test:
- eval_time: 5m
alertname: CephNodeNetworkPacketDrops
exp_alerts:
- exp_labels:
device: eth0
instance: node-exporter
job: node-exporter
oid: 1.3.6.1.4.1.50495.1.2.1.8.2
severity: warning
type: ceph_default
exp_annotations:
summary: One or more NICs reports packet drops
description: "Node node-exporter experiences packet drop > 0.5% or > 10 packets/s on interface eth0."
# network packets errors
- interval: 1m
input_series:
- series: 'node_network_receive_errs_total{device="eth0",
instance="node-exporter",job="node-exporter"}'
values: '0+600x10'
- series: 'node_network_transmit_errs_total{device="eth0",
instance="node-exporter",job="node-exporter"}'
values: '0+600x10'
- series: 'node_network_transmit_packets_total{device="eth0",
instance="node-exporter",job="node-exporter"}'
values: '0+750x10'
- series: 'node_network_receive_packets_total{device="eth0",
instance="node-exporter",job="node-exporter"}'
values: '0+750x10'
promql_expr_test:
- expr: |
(
rate(node_network_receive_errs_total{device!="lo"}[1m]) +
rate(node_network_transmit_errs_total{device!="lo"}[1m])
) / (
rate(node_network_receive_packets_total{device!="lo"}[1m]) +
rate(node_network_transmit_packets_total{device!="lo"}[1m])
) >= 0.0001 or (
rate(node_network_receive_errs_total{device!="lo"}[1m]) +
rate(node_network_transmit_errs_total{device!="lo"}[1m])
) >= 10
eval_time: 5m
exp_samples:
- labels: '{device="eth0", instance="node-exporter",
job="node-exporter"}'
value: 8E-01
alert_rule_test:
- eval_time: 5m
alertname: CephNodeNetworkPacketErrors
exp_alerts:
- exp_labels:
device: eth0
instance: node-exporter
job: node-exporter
oid: 1.3.6.1.4.1.50495.1.2.1.8.3
severity: warning
type: ceph_default
exp_annotations:
summary: One or more NICs reports packet errors
description: "Node node-exporter experiences packet errors > 0.01% or > 10 packets/s on interface eth0."
# Bond is missing a peer
- interval: 1m
input_series:
- series: 'node_bonding_active{master="bond0",
instance="node-exporter",job="node-exporter"}'
values: '3'
- series: 'node_bonding_slaves{master="bond0",
instance="node-exporter",job="node-exporter"}'
values: '4'
promql_expr_test:
- expr: |
node_bonding_slaves - node_bonding_active != 0
eval_time: 5m
exp_samples:
- labels: '{master="bond0", instance="node-exporter",
job="node-exporter"}'
value: 1
alert_rule_test:
- eval_time: 5m
alertname: CephNodeNetworkBondDegraded
exp_alerts:
- exp_labels:
master: bond0
instance: node-exporter
job: node-exporter
severity: warning
type: ceph_default
exp_annotations:
summary: Degraded Bond on Node node-exporter
description: "Bond bond0 is degraded on Node node-exporter."
# Node Storage disk space filling up
- interval: 1m
# 20GB = 21474836480, 256MB = 268435456
input_series:
- series: 'node_filesystem_free_bytes{device="/dev/mapper/vg-root",
fstype="xfs",instance="node-1",mountpoint="/rootfs"}'
values: '21474836480-268435456x48'
- series: 'node_filesystem_free_bytes{device="/dev/mapper/vg-root",
fstype="xfs",instance="node-2",mountpoint="/rootfs"}'
values: '21474836480+0x48'
- series: 'node_uname_info{instance="node-1", nodename="node-1.unittests.com"}'
values: 1+0x48
- series: 'node_uname_info{instance="node-2", nodename="node-2.unittests.com"}'
values: 1+0x48
promql_expr_test:
- expr: |
predict_linear(node_filesystem_free_bytes{device=~"/.*"}[2d], 3600 * 24 * 5) *
on(instance) group_left(nodename) node_uname_info < 0
eval_time: 5m
exp_samples:
- labels: '{device="/dev/mapper/vg-root",instance="node-1",fstype="xfs",
mountpoint="/rootfs",nodename="node-1.unittests.com"}'
value: -1.912602624E+12
alert_rule_test:
- eval_time: 5m
alertname: CephNodeDiskspaceWarning
exp_alerts:
- exp_labels:
severity: warning
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.8.4
device: /dev/mapper/vg-root
fstype: xfs
instance: node-1
mountpoint: /rootfs
nodename: node-1.unittests.com
exp_annotations:
summary: Host filesystem free space is getting low
description: "Mountpoint /rootfs on node-1.unittests.com will be full in less than 5 days based on the 48 hour trailing fill rate."
# MTU Mismatch
- interval: 1m
input_series:
- series: 'node_network_mtu_bytes{device="eth0",instance="node-exporter",
job="node-exporter"}'
values: '1500 1500 1500 1500 1500'
- series: 'node_network_mtu_bytes{device="eth1",instance="node-exporter",
job="node-exporter"}'
values: '1500 1500 1500 1500 1500'
- series: 'node_network_mtu_bytes{device="eth2",instance="node-exporter",
job="node-exporter"}'
values: '1500 1500 1500 1500 1500'
- series: 'node_network_mtu_bytes{device="eth3",instance="node-exporter",
job="node-exporter"}'
values: '1500 1500 1500 1500 1500'
- series: 'node_network_mtu_bytes{device="eth4",instance="node-exporter",
job="node-exporter"}'
values: '9000 9000 9000 9000 9000'
- series: 'node_network_mtu_bytes{device="eth4",instance="hostname1",
job="node-exporter"}'
values: '2200 2200 2200 2200 2200'
- series: 'node_network_mtu_bytes{device="eth4",instance="hostname2",
job="node-exporter"}'
values: '2400 2400 2400 2400 2400'
- series: 'node_network_up{device="eth0",instance="node-exporter",
job="node-exporter"}'
values: '0 0 0 0 0'
- series: 'node_network_up{device="eth1",instance="node-exporter",
job="node-exporter"}'
values: '0 0 0 0 0'
- series: 'node_network_up{device="eth2",instance="node-exporter",
job="node-exporter"}'
values: '1 1 1 1 1'
- series: 'node_network_up{device="eth3",instance="node-exporter",
job="node-exporter"}'
values: '1 1 1 1 1'
- series: 'node_network_up{device="eth4",instance="node-exporter",
job="node-exporter"}'
values: '1 1 1 1 1'
- series: 'node_network_up{device="eth4",instance="hostname1",
job="node-exporter"}'
values: '1 1 1 1 1'
- series: 'node_network_up{device="eth4",instance="hostname2",
job="node-exporter"}'
values: '0 0 0 0 0'
promql_expr_test:
- expr: |
node_network_mtu_bytes * (node_network_up{device!="lo"} > 0) ==
scalar(
max by (device) (node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) !=
quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!="lo"} > 0))
)
or
node_network_mtu_bytes * (node_network_up{device!="lo"} > 0) ==
scalar(
min by (device) (node_network_mtu_bytes * (node_network_up{device!="lo"} > 0)) !=
quantile by (device) (.5, node_network_mtu_bytes * (node_network_up{device!="lo"} > 0))
)
eval_time: 1m
exp_samples:
- labels: '{device="eth4", instance="node-exporter", job="node-exporter"}'
value: 9000
- labels: '{device="eth4", instance="hostname1", job="node-exporter"}'
value: 2200
alert_rule_test:
- eval_time: 1m
alertname: CephNodeInconsistentMTU
exp_alerts:
- exp_labels:
device: eth4
instance: hostname1
job: node-exporter
severity: warning
type: ceph_default
exp_annotations:
summary: MTU settings across Ceph hosts are inconsistent
description: "Node hostname1 has a different MTU size (2200) than the median of devices named eth4."
- exp_labels:
device: eth4
instance: node-exporter
job: node-exporter
severity: warning
type: ceph_default
exp_annotations:
summary: MTU settings across Ceph hosts are inconsistent
description: "Node node-exporter has a different MTU size (9000) than the median of devices named eth4."
# pool full, data series has 6 but using topk(5) so to ensure the
# results are working as expected
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="POOL_FULL"}'
values: '0 0 0 1 1 1 1 1 1 1 1'
- series: 'ceph_pool_percent_used{pool_id="1"}'
values: '32+0x10'
- series: 'ceph_pool_percent_used{pool_id="2"}'
values: '96+0x10'
- series: 'ceph_pool_percent_used{pool_id="3"}'
values: '90+0x10'
- series: 'ceph_pool_percent_used{pool_id="4"}'
values: '72+0x10'
- series: 'ceph_pool_percent_used{pool_id="5"}'
values: '19+0x10'
- series: 'ceph_pool_percent_used{pool_id="6"}'
values: '10+0x10'
- series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph",
name="cephfs_data",pool_id="1"}'
values: '1 1 1 1 1 1 1 1 1'
- series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph",
name="rbd",pool_id="2"}'
values: '1 1 1 1 1 1 1 1 1'
- series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph",
name="iscsi",pool_id="3"}'
values: '1 1 1 1 1 1 1 1 1'
- series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph",
name="default.rgw.index",pool_id="4"}'
values: '1 1 1 1 1 1 1 1 1'
- series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph",
name="default.rgw.log",pool_id="5"}'
values: '1 1 1 1 1 1 1 1 1'
- series: 'ceph_pool_metadata{instance="ceph:9283",job="ceph",
name="dummy",pool_id="6"}'
values: '1 1 1 1 1 1 1 1 1'
promql_expr_test:
- expr: ceph_health_detail{name="POOL_FULL"} > 0
eval_time: 5m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="POOL_FULL"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephPoolFull
- eval_time: 10m
alertname: CephPoolFull
exp_alerts:
- exp_labels:
name: POOL_FULL
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.9.1
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pool-full
summary: Pool is full - writes are blocked
description: "A pool has reached its MAX quota, or OSDs supporting the pool have reached the FULL threshold. Until this is resolved, writes to the pool will be blocked. Pool Breakdown (top 5) - rbd at 96% - iscsi at 90% - default.rgw.index at 72% - cephfs_data at 32% - default.rgw.log at 19% Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota <pool_name> max_bytes <bytes>)"
# slow OSD ops
- interval : 1m
input_series:
- series: 'ceph_healthcheck_slow_ops{instance="ceph:9283",job="ceph"}'
values: '1+0x120'
promql_expr_test:
- expr: ceph_healthcheck_slow_ops > 0
eval_time: 1m
exp_samples:
- labels: '{__name__="ceph_healthcheck_slow_ops", instance="ceph:9283",
job="ceph"}'
value: 1
alert_rule_test:
- eval_time: 20m
alertname: CephSlowOps
exp_alerts:
- exp_labels:
instance: ceph:9283
job: ceph
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops
summary: OSD operations are slow to complete
description: "1 OSD requests are taking too long to process (osd_op_complaint_time exceeded)"
# slow daemon ops
- interval : 1m
input_series:
- series: 'ceph_daemon_health_metrics{ceph_daemon="osd.1", instance="ceph:9283",job="ceph", type="SLOW_OPS"}'
values: '1+0x120'
promql_expr_test:
- expr: 'ceph_daemon_health_metrics{type="SLOW_OPS"} > 0'
eval_time: 1m
exp_samples:
- labels: '{__name__="ceph_daemon_health_metrics", ceph_daemon="osd.1",instance="ceph:9283",
job="ceph", type="SLOW_OPS"}'
value: 1
alert_rule_test:
- eval_time: 20m
alertname: CephDaemonSlowOps
exp_alerts:
- exp_labels:
instance: ceph:9283
ceph_daemon: "osd.1"
job: ceph
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops
summary: osd.1 operations are slow to complete
description: "osd.1 operations are taking too long to process (complaint time exceeded)"
# CEPHADM orchestrator alert triggers
- interval: 30s
input_series:
- series: 'ceph_health_detail{name="UPGRADE_EXCEPTION"}'
values: '1+0x40'
promql_expr_test:
- expr: ceph_health_detail{name="UPGRADE_EXCEPTION"} > 0
eval_time: 2m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="UPGRADE_EXCEPTION"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephadmUpgradeFailed
- eval_time: 5m
alertname: CephadmUpgradeFailed
exp_alerts:
- exp_labels:
name: UPGRADE_EXCEPTION
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.11.2
exp_annotations:
summary: Ceph version upgrade has failed
description: "The cephadm cluster upgrade process has failed. The cluster remains in an undetermined state. Please review the cephadm logs, to understand the nature of the issue"
- interval: 30s
input_series:
- series: 'ceph_health_detail{name="CEPHADM_FAILED_DAEMON"}'
values: '1+0x40'
promql_expr_test:
- expr: ceph_health_detail{name="CEPHADM_FAILED_DAEMON"} > 0
eval_time: 2m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="CEPHADM_FAILED_DAEMON"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephadmDaemonFailed
- eval_time: 5m
alertname: CephadmDaemonFailed
exp_alerts:
- exp_labels:
name: CEPHADM_FAILED_DAEMON
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.11.1
exp_annotations:
summary: A ceph daemon manged by cephadm is down
description: "A daemon managed by cephadm is no longer active. Determine, which daemon is down with 'ceph health detail'. you may start daemons with the 'ceph orch daemon start <daemon_id>'"
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="CEPHADM_PAUSED"}'
values: '1 1 1 1 1 1 1 1 1'
promql_expr_test:
- expr: ceph_health_detail{name="CEPHADM_PAUSED"} > 0
eval_time: 2m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="CEPHADM_PAUSED"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephadmPaused
- eval_time: 5m
alertname: CephadmPaused
exp_alerts:
- exp_labels:
name: CEPHADM_PAUSED
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephadm/operations#cephadm-paused
summary: Orchestration tasks via cephadm are PAUSED
description: "Cluster management has been paused manually. This will prevent the orchestrator from service management and reconciliation. If this is not intentional, resume cephadm operations with 'ceph orch resume'"
# MDS
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MDS_DAMAGE"}'
values: '1 1 1 1 1 1 1 1 1'
promql_expr_test:
- expr: ceph_health_detail{name="MDS_DAMAGE"} > 0
eval_time: 2m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="MDS_DAMAGE"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephFilesystemDamaged
- eval_time: 5m
alertname: CephFilesystemDamaged
exp_alerts:
- exp_labels:
name: MDS_DAMAGE
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.5.1
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages
summary: CephFS filesystem is damaged.
description: "Filesystem metadata has been corrupted. Data may be inaccessible. Analyze metrics from the MDS daemon admin socket, or escalate to support."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MDS_HEALTH_READ_ONLY"}'
values: '1 1 1 1 1 1 1 1 1'
promql_expr_test:
- expr: ceph_health_detail{name="MDS_HEALTH_READ_ONLY"} > 0
eval_time: 2m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="MDS_HEALTH_READ_ONLY"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephFilesystemReadOnly
- eval_time: 5m
alertname: CephFilesystemReadOnly
exp_alerts:
- exp_labels:
name: MDS_HEALTH_READ_ONLY
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.5.2
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages#cephfs-health-messages
summary: CephFS filesystem in read only mode due to write error(s)
description: "The filesystem has switched to READ ONLY due to an unexpected error when writing to the metadata pool. Either analyze the output from the MDS daemon admin socket, or escalate to support."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MDS_ALL_DOWN"}'
values: '0 0 1 1 1 1 1 1 1 1 1'
promql_expr_test:
- expr: ceph_health_detail{name="MDS_ALL_DOWN"} > 0
eval_time: 2m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="MDS_ALL_DOWN"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephFilesystemOffline
- eval_time: 10m
alertname: CephFilesystemOffline
exp_alerts:
- exp_labels:
name: MDS_ALL_DOWN
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.5.3
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-all-down
summary: CephFS filesystem is offline
description: "All MDS ranks are unavailable. The MDS daemons managing metadata are down, rendering the filesystem offline."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="FS_DEGRADED"}'
values: '0 0 1 1 1 1 1 1 1 1 1'
promql_expr_test:
- expr: ceph_health_detail{name="FS_DEGRADED"} > 0
eval_time: 2m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="FS_DEGRADED"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephFilesystemDegraded
- eval_time: 10m
alertname: CephFilesystemDegraded
exp_alerts:
- exp_labels:
name: FS_DEGRADED
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.5.4
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-degraded
summary: CephFS filesystem is degraded
description: "One or more metadata daemons (MDS ranks) are failed or in a damaged state. At best the filesystem is partially available, at worst the filesystem is completely unusable."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MDS_INSUFFICIENT_STANDBY"}'
values: '0 0 1 1 1 1 1 1 1 1 1'
promql_expr_test:
- expr: ceph_health_detail{name="MDS_INSUFFICIENT_STANDBY"} > 0
eval_time: 2m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="MDS_INSUFFICIENT_STANDBY"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephFilesystemInsufficientStandby
- eval_time: 10m
alertname: CephFilesystemInsufficientStandby
exp_alerts:
- exp_labels:
name: MDS_INSUFFICIENT_STANDBY
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-insufficient-standby
summary: Ceph filesystem standby daemons too few
description: "The minimum number of standby daemons required by standby_count_wanted is less than the current number of standby daemons. Adjust the standby count or increase the number of MDS daemons."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="FS_WITH_FAILED_MDS"}'
values: '0 0 1 1 1 1 1 1 1 1 1'
promql_expr_test:
- expr: ceph_health_detail{name="FS_WITH_FAILED_MDS"} > 0
eval_time: 2m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="FS_WITH_FAILED_MDS"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephFilesystemFailureNoStandby
- eval_time: 10m
alertname: CephFilesystemFailureNoStandby
exp_alerts:
- exp_labels:
name: FS_WITH_FAILED_MDS
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.5.5
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#fs-with-failed-mds
summary: MDS daemon failed, no further standby available
description: "An MDS daemon has failed, leaving only one active rank and no available standby. Investigate the cause of the failure or add a standby MDS."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MDS_UP_LESS_THAN_MAX"}'
values: '0 0 1 1 1 1 1 1 1 1 1'
promql_expr_test:
- expr: ceph_health_detail{name="MDS_UP_LESS_THAN_MAX"} > 0
eval_time: 2m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="MDS_UP_LESS_THAN_MAX"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephFilesystemMDSRanksLow
- eval_time: 10m
alertname: CephFilesystemMDSRanksLow
exp_alerts:
- exp_labels:
name: MDS_UP_LESS_THAN_MAX
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/cephfs/health-messages/#mds-up-less-than-max
summary: Ceph MDS daemon count is lower than configured
description: "The filesystem's 'max_mds' setting defines the number of MDS ranks in the filesystem. The current number of active MDS daemons is less than this value."
# MGR
- interval: 1m
input_series:
- series: 'up{job="ceph", instance="ceph-mgr:9283"}'
values: '1+0x2 0+0x10'
promql_expr_test:
- expr: up{job="ceph"} == 0
eval_time: 3m
exp_samples:
- labels: '{__name__="up", job="ceph", instance="ceph-mgr:9283"}'
value: 0
alert_rule_test:
- eval_time: 1m
alertname: CephMgrPrometheusModuleInactive
- eval_time: 10m
alertname: CephMgrPrometheusModuleInactive
exp_alerts:
- exp_labels:
instance: ceph-mgr:9283
job: ceph
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.6.2
exp_annotations:
summary: The mgr/prometheus module is not available
description: "The mgr/prometheus module at ceph-mgr:9283 is unreachable. This could mean that the module has been disabled or the mgr daemon itself is down. Without the mgr/prometheus module metrics and alerts will no longer function. Open a shell to an admin node or toolbox pod and use 'ceph -s' to to determine whether the mgr is active. If the mgr is not active, restart it, otherwise you can determine module status with 'ceph mgr module ls'. If it is not listed as enabled, enable it with 'ceph mgr module enable prometheus'."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="RECENT_MGR_MODULE_CRASH"}'
values: '0+0x2 1+0x20'
promql_expr_test:
- expr: ceph_health_detail{name="RECENT_MGR_MODULE_CRASH"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="RECENT_MGR_MODULE_CRASH"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephMgrModuleCrash
- eval_time: 15m
alertname: CephMgrModuleCrash
exp_alerts:
- exp_labels:
name: RECENT_MGR_MODULE_CRASH
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.6.1
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#recent-mgr-module-crash
summary: A manager module has recently crashed
description: "One or more mgr modules have crashed and have yet to be acknowledged by an administrator. A crashed module may impact functionality within the cluster. Use the 'ceph crash' command to determine which module has failed, and archive it to acknowledge the failure."
# MON
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MON_DISK_CRIT"}'
values: '0+0x2 1+0x10'
- series: 'ceph_mon_metadata{ceph_daemon="mon.a", hostname="ceph-mon-a"}'
values: '1+0x13'
promql_expr_test:
- expr: ceph_health_detail{name="MON_DISK_CRIT"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="MON_DISK_CRIT"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephMonDiskspaceCritical
- eval_time: 10m
alertname: CephMonDiskspaceCritical
exp_alerts:
- exp_labels:
name: "MON_DISK_CRIT"
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.3.2
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-crit
summary: Filesystem space on at least one monitor is critically low
description: "The free space available to a monitor's store is critically low. You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; - ceph-mon-a"
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MON_DISK_LOW"}'
values: '0+0x2 1+0x10'
- series: 'ceph_mon_metadata{ceph_daemon="mon.a", hostname="ceph-mon-a"}'
values: '1+0x13'
promql_expr_test:
- expr: ceph_health_detail{name="MON_DISK_LOW"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="MON_DISK_LOW"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephMonDiskspaceLow
- eval_time: 10m
alertname: CephMonDiskspaceLow
exp_alerts:
- exp_labels:
name: "MON_DISK_LOW"
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-disk-low
summary: Drive space on at least one monitor is approaching full
description: "The space available to a monitor's store is approaching full (>70% is the default). You should increase the space available to the monitor(s). The default directory is /var/lib/ceph/mon-*/data/store.db on traditional deployments, and /var/lib/rook/mon-*/data/store.db on the mon pod's worker node for Rook. Look for old, rotated versions of *.log and MANIFEST*. Do NOT touch any *.sst files. Also check any other directories under /var/lib/rook and other directories on the same filesystem, often /var/log and /var/tmp are culprits. Your monitor hosts are; - ceph-mon-a"
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MON_CLOCK_SKEW"}'
values: '0+0x2 1+0x10'
promql_expr_test:
- expr: ceph_health_detail{name="MON_CLOCK_SKEW"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="MON_CLOCK_SKEW"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephMonClockSkew
- eval_time: 10m
alertname: CephMonClockSkew
exp_alerts:
- exp_labels:
name: "MON_CLOCK_SKEW"
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-clock-skew
summary: Clock skew detected among monitors
description: "Ceph monitors rely on closely synchronized time to maintain quorum and cluster consistency. This event indicates that the time on at least one mon has drifted too far from the lead mon. Review cluster status with ceph -s. This will show which monitors are affected. Check the time sync status on each monitor host with 'ceph time-sync-status' and the state and peers of your ntpd or chrony daemon."
# Check 3 mons one down, quorum at risk
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="MON_DOWN"}'
values: '0+0x2 1+0x12'
- series: 'ceph_mon_quorum_status{ceph_daemon="mon.a"}'
values: '1+0x14'
- series: 'ceph_mon_quorum_status{ceph_daemon="mon.b"}'
values: '1+0x14'
- series: 'ceph_mon_quorum_status{ceph_daemon="mon.c"}'
values: '1+0x2 0+0x12'
- series: 'ceph_mon_metadata{ceph_daemon="mon.a", hostname="ceph-mon-1"}'
values: '1+0x14'
- series: 'ceph_mon_metadata{ceph_daemon="mon.b", hostname="ceph-mon-2"}'
values: '1+0x14'
- series: 'ceph_mon_metadata{ceph_daemon="mon.c", hostname="ceph-mon-3"}'
values: '1+0x14'
promql_expr_test:
- expr: ((ceph_health_detail{name="MON_DOWN"} == 1) * on() (count(ceph_mon_quorum_status == 1) == bool (floor(count(ceph_mon_metadata) / 2) + 1))) == 1
eval_time: 3m
exp_samples:
- labels: '{}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephMonDownQuorumAtRisk
# shouldn't fire
- eval_time: 10m
alertname: CephMonDownQuorumAtRisk
exp_alerts:
- exp_labels:
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.3.1
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down
summary: Monitor quorum is at risk
description: "Quorum requires a majority of monitors (x 2) to be active. Without quorum the cluster will become inoperable, affecting all services and connected clients. The following monitors are down: - mon.c on ceph-mon-3"
# check 5 mons, 1 down - warning only
- interval: 1m
input_series:
- series: 'ceph_mon_quorum_status{ceph_daemon="mon.a"}'
values: '1+0x14'
- series: 'ceph_mon_quorum_status{ceph_daemon="mon.b"}'
values: '1+0x14'
- series: 'ceph_mon_quorum_status{ceph_daemon="mon.c"}'
values: '1+0x14'
- series: 'ceph_mon_quorum_status{ceph_daemon="mon.d"}'
values: '1+0x14'
- series: 'ceph_mon_quorum_status{ceph_daemon="mon.e"}'
values: '1+0x2 0+0x12'
- series: 'ceph_mon_metadata{ceph_daemon="mon.a", hostname="ceph-mon-1"}'
values: '1+0x14'
- series: 'ceph_mon_metadata{ceph_daemon="mon.b", hostname="ceph-mon-2"}'
values: '1+0x14'
- series: 'ceph_mon_metadata{ceph_daemon="mon.c", hostname="ceph-mon-3"}'
values: '1+0x14'
- series: 'ceph_mon_metadata{ceph_daemon="mon.d", hostname="ceph-mon-4"}'
values: '1+0x14'
- series: 'ceph_mon_metadata{ceph_daemon="mon.e", hostname="ceph-mon-5"}'
values: '1+0x14'
promql_expr_test:
- expr: (count(ceph_mon_quorum_status == 0) <= (count(ceph_mon_metadata) - floor(count(ceph_mon_metadata) / 2) + 1))
eval_time: 3m
exp_samples:
- labels: '{}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephMonDown
- eval_time: 10m
alertname: CephMonDown
exp_alerts:
- exp_labels:
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#mon-down
summary: One or more monitors down
description: "You have 1 monitor down. Quorum is still intact, but the loss of an additional monitor will make your cluster inoperable. The following monitors are down: - mon.e on ceph-mon-5\n"
# Device Health
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="DEVICE_HEALTH"}'
values: '0+0x2 1+0x10'
promql_expr_test:
- expr: ceph_health_detail{name="DEVICE_HEALTH"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="DEVICE_HEALTH"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephDeviceFailurePredicted
- eval_time: 10m
alertname: CephDeviceFailurePredicted
exp_alerts:
- exp_labels:
name: "DEVICE_HEALTH"
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#id2
summary: Device(s) predicted to fail soon
description: "The device health module has determined that one or more devices will fail soon. To review device status use 'ceph device ls'. To show a specific device use 'ceph device info <dev id>'. Mark the OSD out so that data may migrate to other OSDs. Once the OSD has drained, destroy the OSD, replace the device, and redeploy the OSD."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="DEVICE_HEALTH_TOOMANY"}'
values: '0+0x2 1+0x10'
promql_expr_test:
- expr: ceph_health_detail{name="DEVICE_HEALTH_TOOMANY"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="DEVICE_HEALTH_TOOMANY"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephDeviceFailurePredictionTooHigh
- eval_time: 10m
alertname: CephDeviceFailurePredictionTooHigh
exp_alerts:
- exp_labels:
name: "DEVICE_HEALTH_TOOMANY"
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.4.7
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-toomany
summary: Too many devices are predicted to fail, unable to resolve
description: "The device health module has determined that devices predicted to fail can not be remediated automatically, since too many OSDs would be removed from the cluster to ensure performance and availabililty. Prevent data integrity issues by adding new OSDs so that data may be relocated."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="DEVICE_HEALTH_IN_USE"}'
values: '0+0x2 1+0x10'
promql_expr_test:
- expr: ceph_health_detail{name="DEVICE_HEALTH_IN_USE"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="DEVICE_HEALTH_IN_USE"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephDeviceFailureRelocationIncomplete
- eval_time: 10m
alertname: CephDeviceFailureRelocationIncomplete
exp_alerts:
- exp_labels:
name: "DEVICE_HEALTH_IN_USE"
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#device-health-in-use
summary: Device failure is predicted, but unable to relocate data
description: "The device health module has determined that one or more devices will fail soon, but the normal process of relocating the data on the device to other OSDs in the cluster is blocked. \nEnsure that the cluster has available free space. It may be necessary to add capacity to the cluster to allow data from the failing device to successfully migrate, or to enable the balancer."
# OSD
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_HOST_DOWN"}'
values: '0+0x2 1+0x10'
- series: 'ceph_osd_up{ceph_daemon="osd.0"}'
values: '1+0x2 0+0x10'
- series: 'ceph_osd_metadata{ceph_daemon="osd.0", hostname="ceph-osd-1"}'
values: '1+0x12'
promql_expr_test:
- expr: ceph_health_detail{name="OSD_HOST_DOWN"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="OSD_HOST_DOWN"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephOSDHostDown
- eval_time: 10m
alertname: CephOSDHostDown
exp_alerts:
- exp_labels:
name: "OSD_HOST_DOWN"
severity: warning
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.4.8
exp_annotations:
summary: An OSD host is offline
description: "The following OSDs are down: - ceph-osd-1 : osd.0"
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_SLOW_PING_TIME_FRONT"}'
values: '0+0x2 1+0x20'
promql_expr_test:
- expr: ceph_health_detail{name="OSD_SLOW_PING_TIME_FRONT"} == 0
eval_time: 1m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="OSD_SLOW_PING_TIME_FRONT"}'
value: 0
alert_rule_test:
- eval_time: 1m
alertname: CephOSDTimeoutsPublicNetwork
- eval_time: 10m
alertname: CephOSDTimeoutsPublicNetwork
exp_alerts:
- exp_labels:
name: "OSD_SLOW_PING_TIME_FRONT"
severity: warning
type: ceph_default
exp_annotations:
summary: Network issues delaying OSD heartbeats (public network)
description: "OSD heartbeats on the cluster's 'public' network (frontend) are running slow. Investigate the network for latency or loss issues. Use 'ceph health detail' to show the affected OSDs."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_SLOW_PING_TIME_BACK"}'
values: '0+0x2 1+0x20'
promql_expr_test:
- expr: ceph_health_detail{name="OSD_SLOW_PING_TIME_BACK"} == 0
eval_time: 1m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="OSD_SLOW_PING_TIME_BACK"}'
value: 0
alert_rule_test:
- eval_time: 1m
alertname: CephOSDTimeoutsClusterNetwork
- eval_time: 10m
alertname: CephOSDTimeoutsClusterNetwork
exp_alerts:
- exp_labels:
name: "OSD_SLOW_PING_TIME_BACK"
severity: warning
type: ceph_default
exp_annotations:
summary: Network issues delaying OSD heartbeats (cluster network)
description: "OSD heartbeats on the cluster's 'cluster' network (backend) are slow. Investigate the network for latency issues on this subnet. Use 'ceph health detail' to show the affected OSDs."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="BLUESTORE_DISK_SIZE_MISMATCH"}'
values: '0+0x2 1+0x20'
promql_expr_test:
- expr: ceph_health_detail{name="BLUESTORE_DISK_SIZE_MISMATCH"} == 0
eval_time: 1m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="BLUESTORE_DISK_SIZE_MISMATCH"}'
value: 0
alert_rule_test:
- eval_time: 1m
alertname: CephOSDInternalDiskSizeMismatch
- eval_time: 10m
alertname: CephOSDInternalDiskSizeMismatch
exp_alerts:
- exp_labels:
name: "BLUESTORE_DISK_SIZE_MISMATCH"
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-disk-size-mismatch
summary: OSD size inconsistency error
description: "One or more OSDs have an internal inconsistency between metadata and the size of the device. This could lead to the OSD(s) crashing in future. You should redeploy the affected OSDs."
- interval: 30s
input_series:
- series: 'ceph_health_detail{name="BLUESTORE_SPURIOUS_READ_ERRORS"}'
values: '0+0x2 1+0x20'
promql_expr_test:
- expr: ceph_health_detail{name="BLUESTORE_SPURIOUS_READ_ERRORS"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="BLUESTORE_SPURIOUS_READ_ERRORS"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephOSDReadErrors
- eval_time: 10m
alertname: CephOSDReadErrors
exp_alerts:
- exp_labels:
name: "BLUESTORE_SPURIOUS_READ_ERRORS"
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#bluestore-spurious-read-errors
summary: Device read errors detected
description: "An OSD has encountered read errors, but the OSD has recovered by retrying the reads. This may indicate an issue with hardware or the kernel."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_DOWN"}'
values: '0+0x2 1+0x10'
- series: 'ceph_osd_up{ceph_daemon="osd.0"}'
values: '1+0x12'
- series: 'ceph_osd_up{ceph_daemon="osd.1"}'
values: '1+0x2 0+0x10'
- series: 'ceph_osd_up{ceph_daemon="osd.2"}'
values: '1+0x12'
- series: 'ceph_osd_metadata{ceph_daemon="osd.0", hostname="ceph-osd-1"}'
values: '1+0x12'
- series: 'ceph_osd_metadata{ceph_daemon="osd.1", hostname="ceph-osd-2"}'
values: '1+0x12'
- series: 'ceph_osd_metadata{ceph_daemon="osd.2", hostname="ceph-osd-3"}'
values: '1+0x12'
promql_expr_test:
- expr: ceph_health_detail{name="OSD_DOWN"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="OSD_DOWN"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephOSDDown
- eval_time: 10m
alertname: CephOSDDown
exp_alerts:
- exp_labels:
name: "OSD_DOWN"
severity: warning
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.4.2
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-down
summary: An OSD has been marked down
description: "1 OSD down for over 5mins. The following OSD is down: - osd.1 on ceph-osd-2\n"
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_NEARFULL"}'
values: '0+0x2 1+0x10'
promql_expr_test:
- expr: ceph_health_detail{name="OSD_NEARFULL"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="OSD_NEARFULL"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephOSDNearFull
- eval_time: 10m
alertname: CephOSDNearFull
exp_alerts:
- exp_labels:
name: "OSD_NEARFULL"
severity: warning
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.4.3
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-nearfull
summary: OSD(s) running low on free space (NEARFULL)
description: One or more OSDs have reached the NEARFULL threshold. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_FULL"}'
values: '0+0x2 1+0x10'
promql_expr_test:
- expr: ceph_health_detail{name="OSD_FULL"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="OSD_FULL"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephOSDFull
- eval_time: 10m
alertname: CephOSDFull
exp_alerts:
- exp_labels:
name: "OSD_FULL"
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.4.6
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-full
summary: OSD full, writes blocked
description: An OSD has reached the FULL threshold. Writes to pools that share the affected OSD will be blocked. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OSD_BACKFILLFULL"}'
values: '0+0x2 1+0x10'
promql_expr_test:
- expr: ceph_health_detail{name="OSD_BACKFILLFULL"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="OSD_BACKFILLFULL"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephOSDBackfillFull
- eval_time: 10m
alertname: CephOSDBackfillFull
exp_alerts:
- exp_labels:
name: "OSD_BACKFILLFULL"
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-backfillfull
summary: OSD(s) too full for backfill operations
description: "An OSD has reached the BACKFILL FULL threshold. This will prevent rebalance operations from completing. Use 'ceph health detail' and 'ceph osd df' to identify the problem. To resolve, add capacity to the affected OSD's failure domain, restore down/out OSDs, or delete unwanted data."
- interval: 30s
input_series:
- series: 'ceph_health_detail{name="OSD_TOO_MANY_REPAIRS"}'
values: '0+0x2 1+0x20'
promql_expr_test:
- expr: ceph_health_detail{name="OSD_TOO_MANY_REPAIRS"} == 0
eval_time: 1m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="OSD_TOO_MANY_REPAIRS"}'
value: 0
alert_rule_test:
- eval_time: 1m
alertname: CephOSDTooManyRepairs
- eval_time: 10m
alertname: CephOSDTooManyRepairs
exp_alerts:
- exp_labels:
name: "OSD_TOO_MANY_REPAIRS"
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#osd-too-many-repairs
summary: OSD reports a high number of read errors
description: Reads from an OSD have used a secondary PG to return data to the client, indicating a potential failing drive.
# Pools
# trigger percent full prediction on pools 1 and 2 only
- interval: 12h
input_series:
- series: 'ceph_pool_percent_used{pool_id="1", instance="9090"}'
values: '1 1 1 1 1'
- series: 'ceph_pool_percent_used{pool_id="1", instance="8090"}'
values: '78 89 79 98 78'
- series: 'ceph_pool_percent_used{pool_id="2", instance="9090"}'
values: '1 1 1 1 1'
- series: 'ceph_pool_percent_used{pool_id="2", instance="8090"}'
values: '22 22 23 23 24'
- series: 'ceph_pool_metadata{pool_id="1" , instance="9090" ,name="rbd",type="replicated"}'
values: '1 1 1 1 1'
- series: 'ceph_pool_metadata{pool_id="1", instance="8090",name="default.rgw.index",type="replicated"}'
values: '1 1 1 1 1'
- series: 'ceph_pool_metadata{pool_id="2" , instance="9090" ,name="rbd",type="replicated"}'
values: '1 1 1 1 1'
- series: 'ceph_pool_metadata{pool_id="2", instance="8090",name="default.rgw.index",type="replicated"}'
values: '1 1 1 1 1'
promql_expr_test:
- expr: |
(predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(pool_id, instance)
group_right() ceph_pool_metadata) >= 95
eval_time: 36h
exp_samples:
- labels: '{instance="8090",name="default.rgw.index",pool_id="1",type="replicated"}'
value: 1.435E+02 # 142%
alert_rule_test:
- eval_time: 48h
alertname: CephPoolGrowthWarning
exp_alerts:
- exp_labels:
instance: 8090
name: default.rgw.index
pool_id: 1
severity: warning
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.9.2
exp_annotations:
summary: Pool growth rate may soon exceed capacity
description: Pool 'default.rgw.index' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="POOL_BACKFILLFULL"}'
values: '0+0x2 1+0x10'
promql_expr_test:
- expr: ceph_health_detail{name="POOL_BACKFILLFULL"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="POOL_BACKFILLFULL"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephPoolBackfillFull
- eval_time: 5m
alertname: CephPoolBackfillFull
exp_alerts:
- exp_labels:
name: "POOL_BACKFILLFULL"
severity: warning
type: ceph_default
exp_annotations:
summary: Free space in a pool is too low for recovery/backfill
description: A pool is approaching the near full threshold, which will prevent recovery/backfill operations from completing. Consider adding more capacity.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="POOL_NEAR_FULL"}'
values: '0+0x2 1+0x10'
promql_expr_test:
- expr: ceph_health_detail{name="POOL_NEAR_FULL"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="POOL_NEAR_FULL"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephPoolNearFull
- eval_time: 10m
alertname: CephPoolNearFull
exp_alerts:
- exp_labels:
name: "POOL_NEAR_FULL"
severity: warning
type: ceph_default
exp_annotations:
summary: One or more Ceph pools are nearly full
description: "A pool has exceeded the warning (percent full) threshold, or OSDs supporting the pool have reached the NEARFULL threshold. Writes may continue, but you are at risk of the pool going read-only if more capacity isn't made available. Determine the affected pool with 'ceph df detail', looking at QUOTA BYTES and STORED. Increase the pool's quota, or add capacity to the cluster first then increase the pool's quota (e.g. ceph osd pool set quota <pool_name> max_bytes <bytes>). Also ensure that the balancer is active."
# PGs
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="PG_NOT_SCRUBBED"}'
values: '0+0x2 1+0x10'
promql_expr_test:
- expr: ceph_health_detail{name="PG_NOT_SCRUBBED"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="PG_NOT_SCRUBBED"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephPGNotScrubbed
- eval_time: 10m
alertname: CephPGNotScrubbed
exp_alerts:
- exp_labels:
name: "PG_NOT_SCRUBBED"
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-scrubbed
summary: Placement group(s) have not been scrubbed
description: "One or more PGs have not been scrubbed recently. Scrubs check metadata integrity, protecting against bit-rot. They check that metadata is consistent across data replicas. When PGs miss their scrub interval, it may indicate that the scrub window is too small, or PGs were not in a 'clean' state during the scrub window. You can manually initiate a scrub with: ceph pg scrub <pgid>"
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="PG_DAMAGED"}'
values: '0+0x4 1+0x20'
promql_expr_test:
- expr: ceph_health_detail{name=~"PG_DAMAGED|OSD_SCRUB_ERRORS"} == 1
eval_time: 5m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="PG_DAMAGED"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephPGsDamaged
- eval_time: 10m
alertname: CephPGsDamaged
exp_alerts:
- exp_labels:
name: "PG_DAMAGED"
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.7.4
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-damaged
summary: Placement group damaged, manual intervention needed
description: During data consistency checks (scrub), at least one PG has been flagged as being damaged or inconsistent. Check to see which PG is affected, and attempt a manual repair if necessary. To list problematic placement groups, use 'rados list-inconsistent-pg <pool>'. To repair PGs use the 'ceph pg repair <pg_num>' command.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="TOO_MANY_PGS"}'
values: '0+0x4 1+0x20'
promql_expr_test:
- expr: ceph_health_detail{name="TOO_MANY_PGS"} == 1
eval_time: 5m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="TOO_MANY_PGS"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephPGsHighPerOSD
- eval_time: 10m
alertname: CephPGsHighPerOSD
exp_alerts:
- exp_labels:
name: "TOO_MANY_PGS"
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks/#too-many-pgs
summary: Placement groups per OSD is too high
description: "The number of placement groups per OSD is too high (exceeds the mon_max_pg_per_osd setting).\n Check that the pg_autoscaler has not been disabled for any pools with 'ceph osd pool autoscale-status', and that the profile selected is appropriate. You may also adjust the target_size_ratio of a pool to guide the autoscaler based on the expected relative size of the pool ('ceph osd pool set cephfs.cephfs.meta target_size_ratio .1') or set the pg_autoscaler mode to 'warn' and adjust pg_num appropriately for one or more pools."
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="PG_RECOVERY_FULL"}'
values: '0+0x2 1+0x20'
promql_expr_test:
- expr: ceph_health_detail{name="PG_RECOVERY_FULL"} == 0
eval_time: 1m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="PG_RECOVERY_FULL"}'
value: 0
alert_rule_test:
- eval_time: 1m
alertname: CephPGRecoveryAtRisk
- eval_time: 10m
alertname: CephPGRecoveryAtRisk
exp_alerts:
- exp_labels:
name: "PG_RECOVERY_FULL"
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.7.5
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-recovery-full
summary: OSDs are too full for recovery
description: Data redundancy is at risk since one or more OSDs are at or above the 'full' threshold. Add more capacity to the cluster, restore down/out OSDs, or delete unwanted data.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="PG_BACKFILL_FULL"}'
values: '0+0x2 1+0x20'
promql_expr_test:
- expr: ceph_health_detail{name="PG_BACKFILL_FULL"} == 0
eval_time: 1m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="PG_BACKFILL_FULL"}'
value: 0
alert_rule_test:
- eval_time: 1m
alertname: CephPGBackfillAtRisk
- eval_time: 10m
alertname: CephPGBackfillAtRisk
exp_alerts:
- exp_labels:
name: "PG_BACKFILL_FULL"
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.7.6
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-backfill-full
summary: Backfill operations are blocked due to lack of free space
description: Data redundancy may be at risk due to lack of free space within the cluster. One or more OSDs have reached the 'backfillfull' threshold. Add more capacity, or delete unwanted data.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="PG_AVAILABILITY"}'
values: '0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1'
- series: 'ceph_health_detail{name="OSD_DOWN"}'
values: '0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0'
promql_expr_test:
- expr: ((ceph_health_detail{name="PG_AVAILABILITY"} == 1) - scalar(ceph_health_detail{name="OSD_DOWN"}))
eval_time: 1m
# empty set at 1m
exp_samples:
alert_rule_test:
# PG_AVAILABILITY and OSD_DOWN not firing .. no alert
- eval_time: 1m
alertname: CephPGUnavilableBlockingIO
exp_alerts:
# PG_AVAILABILITY firing, but osd_down is active .. no alert
- eval_time: 5m
alertname: CephPGUnavilableBlockingIO
exp_alerts:
# PG_AVAILABILITY firing, AND OSD_DOWN is not active...raise the alert
- eval_time: 15m
alertname: CephPGUnavilableBlockingIO
exp_alerts:
- exp_labels:
name: "PG_AVAILABILITY"
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.7.3
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-availability
summary: PG is unavailable, blocking I/O
description: Data availability is reduced, impacting the cluster's ability to service I/O. One or more placement groups (PGs) are in a state that blocks I/O.
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="PG_NOT_DEEP_SCRUBBED"}'
values: '0+0x2 1+0x10'
promql_expr_test:
- expr: ceph_health_detail{name="PG_NOT_DEEP_SCRUBBED"} == 1
eval_time: 3m
exp_samples:
- labels: '{__name__="ceph_health_detail", name="PG_NOT_DEEP_SCRUBBED"}'
value: 1
alert_rule_test:
- eval_time: 1m
alertname: CephPGNotDeepScrubbed
- eval_time: 10m
alertname: CephPGNotDeepScrubbed
exp_alerts:
- exp_labels:
name: "PG_NOT_DEEP_SCRUBBED"
severity: warning
type: ceph_default
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#pg-not-deep-scrubbed
summary: Placement group(s) have not been deep scrubbed
description: One or more PGs have not been deep scrubbed recently. Deep scrubs protect against bit-rot. They compare data replicas to ensure consistency. When PGs miss their deep scrub interval, it may indicate that the window is too small or PGs were not in a 'clean' state during the deep-scrub window.
# Prometheus
- interval: 1m
input_series:
- series: 'up{job="myjob"}'
values: '1+0x10'
promql_expr_test:
- expr: absent(up{job="ceph"})
eval_time: 1m
exp_samples:
- labels: '{job="ceph"}'
value: 1
alert_rule_test:
- eval_time: 5m
alertname: PrometheusJobMissing
exp_alerts:
- exp_labels:
job: ceph
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.12.1
exp_annotations:
summary: The scrape job for Ceph is missing from Prometheus
description: The prometheus job that scrapes from Ceph is no longer defined, this will effectively mean you'll have no metrics or alerts for the cluster. Please review the job definitions in the prometheus.yml file of the prometheus instance.
# RADOS
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="OBJECT_UNFOUND"}'
values: '0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1'
- series: 'ceph_osd_up{ceph_daemon="osd.0"}'
values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1'
- series: 'ceph_osd_up{ceph_daemon="osd.1"}'
values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1'
- series: 'ceph_osd_up{ceph_daemon="osd.2"}'
values: '1 1 1 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1'
- series: 'ceph_osd_metadata{ceph_daemon="osd.0"}'
values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1'
- series: 'ceph_osd_metadata{ceph_daemon="osd.1"}'
values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1'
- series: 'ceph_osd_metadata{ceph_daemon="osd.2"}'
values: '1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1'
promql_expr_test:
- expr: (ceph_health_detail{name="OBJECT_UNFOUND"} == 1) * on() (count(ceph_osd_up == 1) == bool count(ceph_osd_metadata)) == 1
eval_time: 1m
exp_samples:
alert_rule_test:
# OBJECT_UNFOUND but osd.2 is down, so don't fire
- eval_time: 5m
alertname: CephObjectMissing
exp_alerts:
# OBJECT_UNFOUND and all osd's are online, so fire
- eval_time: 15m
alertname: CephObjectMissing
exp_alerts:
- exp_labels:
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.10.1
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#object-unfound
summary: Object(s) marked UNFOUND
description: The latest version of a RADOS object can not be found, even though all OSDs are up. I/O requests for this object from clients will block (hang). Resolving this issue may require the object to be rolled back to a prior version manually, and manually verified.
# Generic Alerts
- interval: 1m
input_series:
- series: 'ceph_health_detail{name="RECENT_CRASH"}'
values: '0 0 0 1 1 1 1 1 1 1 1'
promql_expr_test:
- expr: ceph_health_detail{name="RECENT_CRASH"} == 1
eval_time: 1m
exp_samples:
alert_rule_test:
# not firing
- eval_time: 1m
alertname: CephDaemonCrash
exp_alerts:
# firing
- eval_time: 10m
alertname: CephDaemonCrash
exp_alerts:
- exp_labels:
name: RECENT_CRASH
severity: critical
type: ceph_default
oid: 1.3.6.1.4.1.50495.1.2.1.1.2
exp_annotations:
documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks/#recent-crash
summary: One or more Ceph daemons have crashed, and are pending acknowledgement
description: One or more daemons have crashed recently, and need to be acknowledged. This notification ensures that software crashes do not go unseen. To acknowledge a crash, use the 'ceph crash archive <id>' command.
| 81,203 | 41.987824 | 595 | yml |
null | ceph-main/monitoring/ceph-mixin/tests_alerts/test_syntax.py | import pytest
import os
import yaml
from .utils import promtool_available, call
from .settings import ALERTS_FILE, UNIT_TESTS_FILE
def load_yaml(file_name):
yaml_data = None
with open(file_name, 'r') as alert_file:
raw = alert_file.read()
try:
yaml_data = yaml.safe_load(raw)
except yaml.YAMLError as e:
pass
return yaml_data
def test_alerts_present():
assert os.path.exists(ALERTS_FILE), f"{ALERTS_FILE} not found"
def test_unittests_present():
assert os.path.exists(UNIT_TESTS_FILE), f"{UNIT_TESTS_FILE} not found"
@pytest.mark.skipif(not os.path.exists(ALERTS_FILE), reason=f"{ALERTS_FILE} missing")
def test_rules_format():
assert load_yaml(ALERTS_FILE)
@pytest.mark.skipif(not os.path.exists(UNIT_TESTS_FILE), reason=f"{UNIT_TESTS_FILE} missing")
def test_unittests_format():
assert load_yaml(UNIT_TESTS_FILE)
@pytest.mark.skipif(not promtool_available(), reason="promtool is not installed. Unable to check syntax")
def test_rule_syntax():
completion = call(f"promtool check rules {ALERTS_FILE}")
assert completion.returncode == 0
assert b"SUCCESS" in completion.stdout
| 1,176 | 26.372093 | 105 | py |
null | ceph-main/monitoring/ceph-mixin/tests_alerts/test_unittests.py | import pytest
import os
from .utils import promtool_available, call
from .settings import ALERTS_FILE, UNIT_TESTS_FILE
def test_alerts_present():
assert os.path.exists(ALERTS_FILE), f"{ALERTS_FILE} not found"
def test_unittests_present():
assert os.path.exists(UNIT_TESTS_FILE), f"{UNIT_TESTS_FILE} not found"
@pytest.mark.skipif(not promtool_available(), reason="promtool is not installed. Unable to run unit tests")
def test_run_unittests():
completion = call(f"promtool test rules {UNIT_TESTS_FILE}")
assert completion.returncode == 0
assert b"SUCCESS" in completion.stdout
| 603 | 29.2 | 107 | py |
null | ceph-main/monitoring/ceph-mixin/tests_alerts/utils.py | import pytest
import shutil
import subprocess
def promtool_available() -> bool:
return shutil.which('promtool') is not None
def call(cmd):
completion = subprocess.run(cmd.split(), stdout=subprocess.PIPE)
return completion
| 238 | 17.384615 | 68 | py |
null | ceph-main/monitoring/ceph-mixin/tests_alerts/validate_rules.py | #!/usr/bin/env python3
#
# Check the Prometheus rules for format, and integration
# with the unit tests. This script has the following exit
# codes:
# 0 .. Everything worked
# 4 .. rule problems or missing unit tests
# 8 .. Missing fields in YAML
# 12 .. Invalid YAML - unable to load
# 16 .. Missing input files
#
# Externals
# snmptranslate .. used to determine the oid's in the MIB to verify the rule -> MIB is correct
#
import re
import os
import sys
import yaml
import shutil
import string
from bs4 import BeautifulSoup
from typing import List, Any, Dict, Set, Optional, Tuple
import subprocess
import urllib.request
import urllib.error
from urllib.parse import urlparse
from settings import ALERTS_FILE, MIB_FILE, UNIT_TESTS_FILE
DOCLINK_NAME = 'documentation'
def isascii(s: str) -> bool:
try:
s.encode('ascii')
except UnicodeEncodeError:
return False
return True
def read_file(file_name: str) -> Tuple[str, str]:
try:
with open(file_name, 'r') as input_file:
raw_data = input_file.read()
except OSError:
return '', f"Unable to open {file_name}"
return raw_data, ''
def load_yaml(file_name: str) -> Tuple[Dict[str, Any], str]:
data = {}
errs = ''
raw_data, err = read_file(file_name)
if not err:
try:
data = yaml.safe_load(raw_data)
except yaml.YAMLError as e:
errs = f"filename '{file_name} is not a valid YAML file"
return data, errs
def run_command(command: str):
c = command.split()
completion = subprocess.run(c, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return (completion.returncode,
completion.stdout.decode('utf-8').split('\n'),
completion.stderr.decode('utf-8').split('\n'))
class HTMLCache:
def __init__(self) -> None:
self.cache: Dict[str, Tuple[int, str]] = {}
def fetch(self, url_str: str) -> None:
parsed = urlparse(url_str)
url = f"{parsed.scheme}://{parsed.netloc}{parsed.path}"
if url in self.cache:
return self.cache[url]
req = urllib.request.Request(url)
try:
r = urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
self.cache[url] = e.code, e.reason
return self.cache[url]
except urllib.error.URLError as e:
self.cache[url] = 400, e.reason
return self.cache[url]
if r.status == 200:
html = r.read().decode('utf-8')
self.cache[url] = 200, html
return self.cache[url]
self.cache[url] = r.status, r.reason
return r.status, r.reason
@property
def cached_pages(self) -> List[str]:
return self.cache.keys()
@property
def cached_pages_total(self) -> int:
return len(self.cache.keys())
class PrometheusRule:
expected_attrs = [
'alert',
'expr',
'labels',
'annotations'
]
def __init__(self, rule_group, rule_data: Dict[str, Any]):
assert 'alert' in rule_data
self.group: RuleGroup = rule_group
self.name = rule_data.get('alert')
self.rule = rule_data
self.errors: List[str] = []
self.warnings: List[str] = []
self.validate()
@property
def has_oid(self):
return True if self.rule.get('labels', {}).get('oid', '') else False
@property
def labels(self) -> Dict[str, str]:
return self.rule.get('labels', {})
@property
def annotations(self) -> Dict[str, str]:
return self.rule.get('annotations', {})
def _check_alert_name(self):
# this is simplistic, but works in the context of the alert name
if self.name[0] in string.ascii_uppercase and \
self.name != self.name.lower() and \
self.name != self.name.upper() and \
" " not in self.name and \
"_" not in self.name:
return
self.warnings.append("Alert name is not in CamelCase format")
def _check_structure(self):
rule_attrs = self.rule.keys()
missing_attrs = [a for a in PrometheusRule.expected_attrs if a not in rule_attrs]
if missing_attrs:
self.errors.append(
f"invalid alert structure. Missing field{'s' if len(missing_attrs) > 1 else ''}"
f": {','.join(missing_attrs)}")
def _check_labels(self):
for rqd in ['severity', 'type']:
if rqd not in self.labels.keys():
self.errors.append(f"rule is missing {rqd} label definition")
def _check_annotations(self):
for rqd in ['summary', 'description']:
if rqd not in self.annotations:
self.errors.append(f"rule is missing {rqd} annotation definition")
def _check_doclink(self):
doclink = self.annotations.get(DOCLINK_NAME, '')
if doclink:
url = urlparse(doclink)
status, content = self.group.fetch_html_page(doclink)
if status == 200:
if url.fragment:
soup = BeautifulSoup(content, 'html.parser')
if not soup.find(id=url.fragment):
self.errors.append(f"documentation link error: {url.fragment} anchor not found on the page")
else:
# catch all
self.errors.append(f"documentation link error: {status} {content}")
def _check_snmp(self):
oid = self.labels.get('oid', '')
if self.labels.get('severity', '') == 'critical' and not oid:
self.warnings.append("critical level alert is missing an SNMP oid entry")
if oid and not re.search('^1.3.6.1.4.1.50495.1.2.\\d+.\\d+.\\d+$', oid):
self.errors.append("invalid OID format provided")
if self.group.get_oids():
if oid and oid not in self.group.get_oids():
self.errors.append(f"rule defines an OID {oid} that is missing from the MIB file({os.path.basename(MIB_FILE)})")
def _check_ascii(self):
if 'oid' not in self.labels:
return
desc = self.annotations.get('description', '')
summary = self.annotations.get('summary', '')
if not isascii(desc):
self.errors.append(f"non-ascii characters found in 'description' field will cause issues in associated snmp trap.")
if not isascii(summary):
self.errors.append(f"non-ascii characters found in 'summary' field will cause issues in associated snmp trap.")
def validate(self):
self._check_alert_name()
self._check_structure()
self._check_labels()
self._check_annotations()
self._check_doclink()
self._check_snmp()
self._check_ascii()
char = '.'
if self.errors:
char = 'E'
self.group.update('error', self.name)
elif self.warnings:
char = 'W'
self.group.update('warning', self.name)
sys.stdout.write(char)
class RuleGroup:
def __init__(self, rule_file, group_name: str, group_name_width: int):
self.rule_file: RuleFile = rule_file
self.group_name = group_name
self.rules: Dict[str, PrometheusRule] = {}
self.problems = {
"error": [],
"warning": [],
}
sys.stdout.write(f"\n\t{group_name:<{group_name_width}} : ")
def add_rule(self, rule_data:Dict[str, Any]):
alert_name = rule_data.get('alert')
self.rules[alert_name] = PrometheusRule(self, rule_data)
def update(self, problem_type:str, alert_name:str):
assert problem_type in ['error', 'warning']
self.problems[problem_type].append(alert_name)
self.rule_file.update(self.group_name)
def fetch_html_page(self, url):
return self.rule_file.fetch_html_page(url)
def get_oids(self):
return self.rule_file.oid_list
@property
def error_count(self):
return len(self.problems['error'])
def warning_count(self):
return len(self.problems['warning'])
@property
def count(self):
return len(self.rules)
class RuleFile:
def __init__(self, parent, file_name, rules, oid_list):
self.parent = parent
self.file_name = file_name
self.rules: Dict[str, Any] = rules
self.oid_list = oid_list
self.problems: Set[str] = set()
self.group: Dict[str, RuleGroup] = {}
self.alert_names_seen: Set[str] = set()
self.duplicate_alert_names:List[str] = []
self.html_cache = HTMLCache()
assert 'groups' in self.rules
self.max_group_name_width = self.get_max_group_name()
self.load_groups()
def update(self, group_name):
self.problems.add(group_name)
self.parent.mark_invalid()
def fetch_html_page(self, url):
return self.html_cache.fetch(url)
@property
def group_count(self):
return len(self.rules['groups'])
@property
def rule_count(self):
rule_count = 0
for _group_name, rule_group in self.group.items():
rule_count += rule_group.count
return rule_count
@property
def oid_count(self):
oid_count = 0
for _group_name, rule_group in self.group.items():
for _rule_name, rule in rule_group.rules.items():
if rule.has_oid:
oid_count += 1
return oid_count
@property
def group_names(self):
return self.group.keys()
@property
def problem_count(self):
return len(self.problems)
def get_max_group_name(self):
group_name_list = []
for group in self.rules.get('groups'):
group_name_list.append(group['name'])
return max([len(g) for g in group_name_list])
def load_groups(self):
sys.stdout.write("\nChecking rule groups")
for group in self.rules.get('groups'):
group_name = group['name']
rules = group['rules']
self.group[group_name] = RuleGroup(self, group_name, self.max_group_name_width)
for rule_data in rules:
if 'alert' in rule_data:
alert_name = rule_data.get('alert')
if alert_name in self.alert_names_seen:
self.duplicate_alert_names.append(alert_name)
else:
self.alert_names_seen.add(alert_name)
self.group[group_name].add_rule(rule_data)
else:
# skipped recording rule
pass
def report(self):
def max_width(item_list: Set[str], min_width: int = 0) -> int:
return max([len(i) for i in item_list] + [min_width])
if not self.problems and not self.duplicate_alert_names:
print("\nNo problems detected in the rule file")
return
print("\nProblem Report\n")
group_width = max_width(self.problems, 5)
alert_names = set()
for g in self.problems:
group = self.group[g]
alert_names.update(group.problems.get('error', []))
alert_names.update(group.problems.get('warning', []))
alert_width = max_width(alert_names, 10)
template = " {group:<{group_width}} {severity:<8} {alert_name:<{alert_width}} {description}"
print(template.format(
group="Group",
group_width=group_width,
severity="Severity",
alert_name="Alert Name",
alert_width=alert_width,
description="Problem Description"))
print(template.format(
group="-----",
group_width=group_width,
severity="--------",
alert_name="----------",
alert_width=alert_width,
description="-------------------"))
for group_name in sorted(self.problems):
group = self.group[group_name]
rules = group.rules
for alert_name in group.problems.get('error', []):
for desc in rules[alert_name].errors:
print(template.format(
group=group_name,
group_width=group_width,
severity="Error",
alert_name=alert_name,
alert_width=alert_width,
description=desc))
for alert_name in group.problems.get('warning', []):
for desc in rules[alert_name].warnings:
print(template.format(
group=group_name,
group_width=group_width,
severity="Warning",
alert_name=alert_name,
alert_width=alert_width,
description=desc))
if self.duplicate_alert_names:
print("Duplicate alert names detected:")
for a in self.duplicate_alert_names:
print(f" - {a}")
class UnitTests:
expected_attrs = [
'rule_files',
'tests',
'evaluation_interval'
]
def __init__(self, filename):
self.filename = filename
self.unit_test_data: Dict[str, Any] = {}
self.alert_names_seen: Set[str] = set()
self.problems: List[str] = []
self.load()
def load(self):
self.unit_test_data, errs = load_yaml(self.filename)
if errs:
print(f"\n\nError in unit tests file: {errs}")
sys.exit(12)
missing_attr = [a for a in UnitTests.expected_attrs if a not in self.unit_test_data.keys()]
if missing_attr:
print(f"\nMissing attributes in unit tests: {','.join(missing_attr)}")
sys.exit(8)
def _check_alert_names(self, alert_names: List[str]):
alerts_tested: Set[str] = set()
for t in self.unit_test_data.get('tests'):
test_cases = t.get('alert_rule_test', [])
if not test_cases:
continue
for case in test_cases:
alertname = case.get('alertname', '')
if alertname:
alerts_tested.add(alertname)
alerts_defined = set(alert_names)
self.problems = list(alerts_defined.difference(alerts_tested))
def process(self, defined_alert_names: List[str]):
self._check_alert_names(defined_alert_names)
def report(self) -> None:
if not self.problems:
print("\nNo problems detected in unit tests file")
return
print("\nUnit tests are incomplete. Tests missing for the following alerts;")
for p in self.problems:
print(f" - {p}")
class RuleChecker:
def __init__(self, rules_filename: str = None, test_filename: str = None):
self.rules_filename = rules_filename or ALERTS_FILE
self.test_filename = test_filename or UNIT_TESTS_FILE
self.rule_file: Optional[RuleFile] = None
self.unit_tests: Optional[UnitTests] = None
self.rule_file_problems: bool = False
self.errors = {}
self.warnings = {}
self.error_count = 0
self.warning_count = 0
self.oid_count = 0
self.oid_list = self.build_oid_list()
def build_oid_list(self) -> List[str]:
cmd = shutil.which('snmptranslate')
if not cmd:
return []
rc, stdout, stderr = run_command(f"{cmd} -Pu -Tz -M ../../snmp:/usr/share/snmp/mibs -m CEPH-MIB")
if rc != 0:
return []
oid_list: List[str] = []
for line in stdout[:-1]:
_label, oid = line.replace('"', '').replace('\t', ' ').split()
oid_list.append(oid)
return oid_list
@property
def status(self):
if self.rule_file_problems or self.unit_tests.problems:
return 4
return 0
def mark_invalid(self):
self.rule_file_problems = True
def summarise_rule_file(self):
for group_name in self.rule_file.problems:
group = self.rule_file.group[group_name]
self.error_count += len(group.problems['error'])
self.warning_count += len(group.problems['warning'])
def ready(self):
errs: List[str] = []
ready_state = True
if not os.path.exists(self.rules_filename):
errs.append(f"rule file '{self.rules_filename}' not found")
ready_state = False
if not os.path.exists(self.test_filename):
errs.append(f"test file '{self.test_filename}' not found")
ready_state = False
return ready_state, errs
def run(self):
ready, errs = self.ready()
if not ready:
print("Unable to start:")
for e in errs:
print(f"- {e}")
sys.exit(16)
rules, errs = load_yaml(self.rules_filename)
if errs:
print(errs)
sys.exit(12)
self.rule_file = RuleFile(self, self.rules_filename, rules, self.oid_list)
self.summarise_rule_file()
self.unit_tests = UnitTests(self.test_filename)
self.unit_tests.process(self.rule_file.alert_names_seen)
def report(self):
print("\n\nSummary\n")
print(f"Rule file : {self.rules_filename}")
print(f"Unit Test file : {self.test_filename}")
print(f"\nRule groups processed : {self.rule_file.group_count:>3}")
print(f"Rules processed : {self.rule_file.rule_count:>3}")
print(f"SNMP OIDs declared : {self.rule_file.oid_count:>3} {'(snmptranslate missing, unable to cross check)' if not self.oid_list else ''}")
print(f"Rule errors : {self.error_count:>3}")
print(f"Rule warnings : {self.warning_count:>3}")
print(f"Rule name duplicates : {len(self.rule_file.duplicate_alert_names):>3}")
print(f"Unit tests missing : {len(self.unit_tests.problems):>3}")
self.rule_file.report()
self.unit_tests.report()
def main():
checker = RuleChecker()
checker.run()
checker.report()
print()
sys.exit(checker.status)
if __name__ == '__main__':
main()
| 18,342 | 31.068182 | 151 | py |
null | ceph-main/monitoring/ceph-mixin/tests_dashboards/__init__.py | import re
import subprocess
import sys
import tempfile
from dataclasses import asdict, dataclass, field
from typing import Any, List
import yaml
from .util import replace_grafana_expr_variables
@dataclass
class InputSeries:
series: str = ''
values: str = ''
@dataclass
class ExprSample:
labels: str = ''
value: float = -1
@dataclass
class PromqlExprTest:
expr: str = ''
eval_time: str = '1m'
exp_samples: List[ExprSample] = field(default_factory=list)
@dataclass
class Test:
interval: str = '1m'
input_series: List[InputSeries] = field(default_factory=list)
promql_expr_test: List[PromqlExprTest] = field(default_factory=list)
@dataclass
class TestFile:
evaluation_interval: str = '1m'
tests: List[Test] = field(default_factory=list)
class PromqlTest:
"""
Base class to provide prometheus query test capabilities. After setting up
the query test with its input and expected output it's expected to run promtool.
https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#test-yml
The workflow of testing would be something like:
# add prometheus query to test
self.set_expression('bonding_slaves > 0')
# add some prometheus input series
self.add_series('bonding_slaves{master="bond0"}', '2')
self.add_series('bonding_slaves{master="bond1"}', '3')
self.add_series('node_network_receive_bytes{instance="127.0.0.1",
device="eth1"}', "10 100 230 22")
# expected output of the query
self.add_exp_samples('bonding_slaves{master="bond0"}', 2)
self.add_exp_samples('bonding_slaves{master="bond1"}', 3)
# at last, always call promtool with:
self.assertTrue(self.run_promtool())
# assertTrue means it expect promtool to succeed
"""
def __init__(self):
self.test_output_file = tempfile.NamedTemporaryFile('w+')
self.test_file = TestFile()
self.test = Test()
self.promql_expr_test = PromqlExprTest()
self.test.promql_expr_test.append(self.promql_expr_test)
self.test_file.tests.append(self.test)
self.variables = {}
def __del__(self):
self.test_output_file.close()
def set_evaluation_interval(self, interval: int, unit: str = 'm') -> None:
"""
Set the evaluation interval of the time series
Args:
interval (int): number of units.
unit (str): unit type: 'ms', 's', 'm', etc...
"""
self.test_file.evaluation_interval = f'{interval}{unit}'
def set_interval(self, interval: int, unit: str = 'm') -> None:
"""
Set the duration of the time series
Args:
interval (int): number of units.
unit (str): unit type: 'ms', 's', 'm', etc...
"""
self.test.interval = f'{interval}{unit}'
def set_expression(self, expr: str) -> None:
"""
Set the prometheus expression/query used to filter data.
Args:
expr(str): expression/query.
"""
self.promql_expr_test.expr = expr
def add_series(self, series: str, values: str) -> None:
"""
Add a series to the input.
Args:
series(str): Prometheus series.
Notation: '<metric name>{<label name>=<label value>, ...}'
values(str): Value of the series.
"""
input_series = InputSeries(series=series, values=values)
self.test.input_series.append(input_series)
def set_eval_time(self, eval_time: int, unit: str = 'm') -> None:
"""
Set the time when the expression will be evaluated
Args:
interval (int): number of units.
unit (str): unit type: 'ms', 's', 'm', etc...
"""
self.promql_expr_test.eval_time = f'{eval_time}{unit}'
def add_exp_samples(self, sample: str, values: Any) -> None:
"""
Add an expected sample/output of the query given the series/input
Args:
sample(str): Expected sample.
Notation: '<metric name>{<label name>=<label value>, ...}'
values(Any): Value of the sample.
"""
expr_sample = ExprSample(labels=sample, value=values)
self.promql_expr_test.exp_samples.append(expr_sample)
def set_variable(self, variable: str, value: str):
"""
If a query makes use of grafonnet variables, for example
'$osd_hosts', you should change this to a real value. Example:
> self.set_expression('bonding_slaves{master="$osd_hosts"} > 0')
> self.set_variable('osd_hosts', '127.0.0.1')
> print(self.query)
> bonding_slaves{master="127.0.0.1"} > 0
Args:
variable(str): Variable name
value(str): Value to replace variable with
"""
self.variables[variable] = value
def run_promtool(self):
"""
Run promtool to test the query after setting up the input, output
and extra parameters.
Returns:
bool: True if successful, False otherwise.
"""
for variable, value in self.variables.items():
expr = self.promql_expr_test.expr
new_expr = replace_grafana_expr_variables(expr, variable, value)
self.set_expression(new_expr)
test_as_dict = asdict(self.test_file)
yaml.dump(test_as_dict, self.test_output_file)
args = f'promtool test rules {self.test_output_file.name}'.split()
try:
subprocess.run(args, check=True)
return True
except subprocess.CalledProcessError as process_error:
print(yaml.dump(test_as_dict))
print(process_error.stderr)
return False
| 5,809 | 29.578947 | 91 | py |
null | ceph-main/monitoring/ceph-mixin/tests_dashboards/util.py | import json
import re
from pathlib import Path
from typing import Any, Dict, Tuple, Union
from termcolor import cprint
UNITS = ['ms', 's', 'm', 'h', 'd', 'w', 'y']
def resolve_time_and_unit(time: str) -> Union[Tuple[int, str], Tuple[None, None]]:
"""
Divide time with its unit and return a tuple like (10, 'm')
Return None if its and invalid prometheus time
Valid units are inside UNITS.
"""
if time[-1] in UNITS:
return int(time[:-1]), time[-1]
if time[-2:] in UNITS:
return int(time[:-2]), time[-2:]
return None, None
def get_dashboards_data() -> Dict[str, Any]:
data: Dict[str, Any] = {'queries': {}, 'variables': {}, 'stats': {}}
for file in Path(__file__).parent.parent \
.joinpath('dashboards_out').glob('*.json'):
with open(file, 'r') as f:
dashboard_data = json.load(f)
data['stats'][str(file)] = {'total': 0, 'tested': 0}
add_dashboard_queries(data, dashboard_data, str(file))
add_dashboard_variables(data, dashboard_data)
add_default_dashboards_variables(data)
return data
def add_dashboard_queries(data: Dict[str, Any], dashboard_data: Dict[str, Any], path: str) -> None:
"""
Grafana panels can have more than one target/query, in order to identify each
query in the panel we append the "legendFormat" of the target to the panel name.
format: panel_name-legendFormat
"""
if 'panels' not in dashboard_data:
return
error = 0
for panel in dashboard_data['panels']:
if (
'title' in panel
and 'targets' in panel
and len(panel['targets']) > 0
and 'expr' in panel['targets'][0]
):
for target in panel['targets']:
title = panel['title']
legend_format = target['legendFormat'] if 'legendFormat' in target else ""
query_id = f'{title}-{legend_format}'
if query_id in data['queries']:
# NOTE: If two or more panels have the same name and legend it
# might suggest a refactoring is needed or add something else
# to identify each query.
conflict_file = Path(data['queries'][query_id]['path']).name
file = Path(path).name
cprint((f'ERROR: Query in panel "{title}" with legend "{legend_format}"'
f' already exists. Conflict "{conflict_file}" '
f'with: "{file}"'), 'red')
error = 1
data['queries'][query_id] = {'query': target['expr'], 'path': path}
data['stats'][path]['total'] += 1
if error:
raise ValueError('Missing legend_format in queries, please add a proper value.')
def add_dashboard_variables(data: Dict[str, Any], dashboard_data: Dict[str, Any]) -> None:
if 'templating' not in dashboard_data or 'list' not in dashboard_data['templating']:
return
for variable in dashboard_data['templating']['list']:
if 'name' in variable:
data['variables'][variable['name']] = 'UNSET VARIABLE'
def add_default_dashboards_variables(data: Dict[str, Any]) -> None:
data['variables']['job'] = 'ceph'
data['variables']['job_haproxy'] = 'haproxy'
data['variables']['__rate_interval'] = '1m'
def replace_grafana_expr_variables(expr: str, variable: str, value: Any) -> str:
""" Replace grafana variables in expression with a value
It should match the whole word, 'osd' musn't match with the 'osd' prefix in 'osd_hosts'
>>> replace_grafana_expr_variables('metric{name~="$osd_hosts|$other|$osd"}', \
'osd', 'replacement')
'metric{name~="$osd_hosts|$other|replacement"}'
>>> replace_grafana_expr_variables('metric{name~="$osd_hosts|$other|$osd"}', \
'other', 'replacement')
'metric{name~="$osd_hosts|replacement|$osd"}'
It replaces words with dollar prefix
>>> replace_grafana_expr_variables('metric{name~="no_dollar|$other|$osd"}', \
'no_dollar', 'replacement')
'metric{name~="no_dollar|$other|$osd"}'
It shouldn't replace the next char after the variable (positive lookahead test).
>>> replace_grafana_expr_variables('metric{name~="$osd"}', \
'osd', 'replacement')
'metric{name~="replacement"}'
"""
regex = fr'\${variable}(?=\W)'
new_expr = re.sub(regex, fr'{value}', expr)
return new_expr
| 4,549 | 40.363636 | 99 | py |
null | ceph-main/monitoring/ceph-mixin/tests_dashboards/features/__init__.py | 0 | 0 | 0 | py |
|
null | ceph-main/monitoring/ceph-mixin/tests_dashboards/features/environment.py | # type: ignore[no-redef]
# pylint: disable=E0611,W0613,E0102
import copy
from behave import given, then, when
from prettytable import PrettyTable
from tests_dashboards import PromqlTest
from tests_dashboards.util import get_dashboards_data, resolve_time_and_unit
class GlobalContext:
def __init__(self):
self.tested_queries_count = 0
self.promql_expr_test = None
self.data = get_dashboards_data()
self.query_map = self.data['queries']
def reset_promql_test(self):
self.promql_expr_test = PromqlTest()
self.promql_expr_test.variables = copy.copy(self.data['variables'])
def print_query_stats(self):
total = len(self.query_map)
table = PrettyTable()
table.field_names = ['Name', 'Queries', 'Tested', 'Cover']
def percent(tested, total):
return str(round((tested / total) * 100, 2)) + '%'
def file_name(path):
return path.split('/')[-1]
total = 0
tested = 0
for path, stat in self.data['stats'].items():
assert stat['total']
table.add_row([file_name(path), stat['total'], stat['tested'],
percent(stat['tested'], stat['total'])])
total += stat['total']
tested += stat['tested']
assert total
table.add_row(['Total', total, tested, percent(tested, total)])
print(table)
global_context = GlobalContext()
# Behave function overloading
# ===========================
def before_scenario(context, scenario):
global_context.reset_promql_test()
def after_scenario(context, scenario):
assert global_context.promql_expr_test.run_promtool()
def after_all(context):
global_context.print_query_stats()
@given("the following series")
def step_impl(context):
for row in context.table:
metric = row['metrics']
value = row['values']
global_context.promql_expr_test.add_series(metric, value)
@when('evaluation interval is `{interval}`')
def step_impl(context, interval):
interval_without_unit, unit = resolve_time_and_unit(interval)
if interval_without_unit is None:
raise ValueError(f'Invalid interval time: {interval_without_unit}. ' +
'A valid time looks like "1m" where you have a number plus a unit')
global_context.promql_expr_test.set_evaluation_interval(interval_without_unit, unit)
@when('interval is `{interval}`')
def step_impl(context, interval):
interval_without_unit, unit = resolve_time_and_unit(interval)
if interval_without_unit is None:
raise ValueError(f'Invalid interval time: {interval_without_unit}. ' +
'A valid time looks like "1m" where you have a number plus a unit')
global_context.promql_expr_test.set_interval(interval_without_unit, unit)
@when('evaluation time is `{eval_time}`')
def step_impl(context, eval_time):
eval_time_without_unit, unit = resolve_time_and_unit(eval_time)
if eval_time_without_unit is None:
raise ValueError(f'Invalid evalution time: {eval_time}. ' +
'A valid time looks like "1m" where you have a number plus a unit')
global_context.promql_expr_test.set_eval_time(eval_time_without_unit, unit)
@when('variable `{variable}` is `{value}`')
def step_impl(context, variable, value):
global_context.promql_expr_test.set_variable(variable, value)
@then('Grafana panel `{panel_name}` with legend `{legend}` shows')
def step_impl(context, panel_name, legend):
"""
This step can have an empty legend. As 'behave' doesn't provide a way
to say it's empty we use EMPTY to mark as empty.
"""
if legend == "EMPTY":
legend = ''
query_id = panel_name + '-' + legend
if query_id not in global_context.query_map:
raise KeyError((f'Query with legend {legend} in panel "{panel_name}"'
'couldn\'t be found'))
expr = global_context.query_map[query_id]['query']
global_context.promql_expr_test.set_expression(expr)
for row in context.table:
metric = row['metrics']
value = row['values']
global_context.promql_expr_test.add_exp_samples(metric, float(value))
path = global_context.query_map[query_id]['path']
global_context.data['stats'][path]['tested'] += 1
@then('query `{query}` produces')
def step_impl(context, query):
global_context.promql_expr_test.set_expression(query)
for row in context.table:
metric = row['metrics']
value = row['values']
global_context.promql_expr_test.add_exp_samples(metric, float(value))
| 4,649 | 33.191176 | 94 | py |
null | ceph-main/monitoring/ceph-mixin/tests_dashboards/features/steps/__init__.py | # This file and steps files is needed even if its empty because of 'behave' :(
| 79 | 39 | 78 | py |
null | ceph-main/monitoring/grafana/build/README.md | # Building the ceph-grafana container image
From Nautilus onwards, grafana is embedded into the mgr/dashboard UI and uses two discrete grafana plugins to provide visualisations within the UI. To better support disconnected installs, and provide a more tested configuration you may use the Makefile, in this directory, to (re)generate the grafana containers based on each Ceph release.
The versions of grafana, and the plugins are defined in the script so testing can be done against a known configuration.
## Container
The current implementation uses buildah with a CentOS8 base image.
## Dependencies
Ensure you have the following dependencies installed on your system, before attempting to build the image(s)
- podman or docker
- buildah
- jq
- make
## Build Process
The Makefile supports the following invocations;
```
# make <-- create container with dashboards from master
# make all
# make ceph_version=octopus
# make ceph_version=nautilus
```
Once complete, a ```make all``` execution will provide the following containers on your system.
```
# podman images
REPOSITORY TAG IMAGE ID CREATED SIZE
localhost/ceph/ceph-grafana master 606fa5444fc6 14 minutes ago 497 MB
localhost/ceph-grafana master 606fa5444fc6 14 minutes ago 497 MB
localhost/ceph-grafana octopus 580b089c14e7 15 minutes ago 497 MB
localhost/ceph/ceph-grafana octopus 580b089c14e7 15 minutes ago 497 MB
localhost/ceph-grafana nautilus 3c91712dd26f 17 minutes ago 497 MB
localhost/ceph/ceph-grafana nautilus 3c91712dd26f 17 minutes ago 497 MB
registry.centos.org/centos 8 29d8fb6c94af 30 hours ago 223 MB
```
| 1,744 | 44.921053 | 340 | md |
null | ceph-main/monitoring/snmp/README.md | # SNMP schema
To show the [OID](https://en.wikipedia.org/wiki/Object_identifier)'s supported by the MIB, use the snmptranslate command. Here's an example:
```
snmptranslate -Pu -Tz -M ~/git/ceph/monitoring/snmp:/usr/share/snmp/mibs -m CEPH-MIB
```
*The `snmptranslate` command is in the net-snmp-utils package*
The MIB provides a NOTIFICATION only implementation since ceph doesn't have an SNMP
agent feature.
## Integration
The SNMP MIB is has been aligned to the Prometheus rules. Any rule that defines a
critical alert should have a corresponding oid in the CEPH-MIB.txt file. To generate
an SNMP notification, you must use an SNMP gateway that the Prometheus Alertmanager
service can forward alerts through to, via it's webhooks feature.
## SNMP Gateway
The recommended SNMP gateway is https://github.com/maxwo/snmp_notifier. This is a widely
used and generic SNMP gateway implementation written in go. It's usage (syntax and
parameters) is very similar to Prometheus, AlertManager and even node-exporter.
## SNMP OIDs
The main components of the Ceph MIB is can be broken down into discrete areas
```
internet private enterprise ceph ceph Notifications Prometheus Notification
org cluster (alerts) source Category
1.3.6.1 .4 .1 .50495 .1 .2 .1 .2 (Ceph Health)
.3 (MON)
.4 (OSD)
.5 (MDS)
.6 (MGR)
.7 (PGs)
.8 (Nodes)
.9 (Pools)
.10 (Rados)
.11 (cephadm)
.12 (prometheus)
```
Individual alerts are placed within the appropriate alert category. For example, to add
a notification relating to a MGR issue, you would use the oid 1.3.6.1.4.1.50495.1.2.1.6.x
The SNMP gateway also adds additional components to the SNMP notification ;
| Suffix | Description |
|--------|-------------|
| .1 | The oid |
| .2 | Severity of the alert. When an alert is resolved, severity is 'info', and the description is set to Status:OK|
| .3 | Text of the alert(s) |
| 2,746 | 48.945455 | 140 | md |
null | ceph-main/qa/find-used-ports.sh | #!/bin/bash
git --no-pager grep -n '127.0.0.1:[0-9]\+' | sed -n 's/.*127.0.0.1:\([0-9]\+\).*/\1/p' | sort -n | uniq -u
| 120 | 29.25 | 106 | sh |
null | ceph-main/qa/loopall.sh | #!/usr/bin/env bash
set -ex
basedir=`echo $0 | sed 's/[^/]*$//g'`.
testdir="$1"
[ -n "$2" ] && logdir=$2 || logdir=$1
[ ${basedir:0:1} == "." ] && basedir=`pwd`/${basedir:1}
PATH="$basedir/src:$PATH"
[ -z "$testdir" ] || [ ! -d "$testdir" ] && echo "specify test dir" && exit 1
cd $testdir
while true
do
for test in `cd $basedir/workunits && find . -executable -type f | $basedir/../src/script/permute`
do
echo "------ running test $test ------"
pwd
[ -d $test ] && rm -r $test
mkdir -p $test
mkdir -p `dirname $logdir/$test.log`
test -e $logdir/$test.log && rm $logdir/$test.log
sh -c "cd $test && $basedir/workunits/$test" 2>&1 | tee $logdir/$test.log
done
done
| 689 | 22.793103 | 102 | sh |
null | ceph-main/qa/run-standalone.sh | #!/usr/bin/env bash
set -e
if [ ! -e CMakeCache.txt -o ! -d bin ]; then
echo 'run this from the build dir'
exit 1
fi
function get_cmake_variable() {
local variable=$1
grep "$variable" CMakeCache.txt | cut -d "=" -f 2
}
function get_python_path() {
python_common=$(realpath ../src/python-common)
echo $(realpath ../src/pybind):$(pwd)/lib/cython_modules/lib.3:$python_common
}
if [ `uname` = FreeBSD ]; then
# otherwise module prettytable will not be found
export PYTHONPATH=$(get_python_path):/usr/local/lib/python3.6/site-packages
exec_mode=+111
KERNCORE="kern.corefile"
COREPATTERN="core.%N.%P"
else
export PYTHONPATH=$(get_python_path)
exec_mode=/111
KERNCORE="kernel.core_pattern"
COREPATTERN="core.%e.%p.%t"
fi
function cleanup() {
if [ -n "$precore" ]; then
sudo sysctl -w "${KERNCORE}=${precore}"
fi
}
function finish() {
cleanup
exit 0
}
trap finish TERM HUP INT
PATH=$(pwd)/bin:$PATH
# add /sbin and /usr/sbin to PATH to find sysctl in those cases where the
# user's PATH does not get these directories by default (e.g., tumbleweed)
PATH=$PATH:/sbin:/usr/sbin
export LD_LIBRARY_PATH="$(pwd)/lib"
# TODO: Use getops
dryrun=false
if [[ "$1" = "--dry-run" ]]; then
dryrun=true
shift
fi
all=false
if [ "$1" = "" ]; then
all=true
fi
select=("$@")
location="../qa/standalone"
count=0
errors=0
userargs=""
precore="$(sysctl -n $KERNCORE)"
# If corepattern already set, avoid having to use sudo
if [ "$precore" = "$COREPATTERN" ]; then
precore=""
else
sudo sysctl -w "${KERNCORE}=${COREPATTERN}"
fi
# Clean out any cores in core target directory (currently .)
if ls $(dirname $(sysctl -n $KERNCORE)) | grep -q '^core\|core$' ; then
mkdir found.cores.$$ 2> /dev/null || true
for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do
mv $i found.cores.$$
done
echo "Stray cores put in $(pwd)/found.cores.$$"
fi
ulimit -c unlimited
for f in $(cd $location ; find . -mindepth 2 -perm $exec_mode -type f)
do
f=$(echo $f | sed 's/\.\///')
if [[ "$all" = "false" ]]; then
found=false
for c in "${!select[@]}"
do
# Get command and any arguments of subset of tests to run
allargs="${select[$c]}"
arg1=$(echo "$allargs" | cut --delimiter " " --field 1)
# Get user args for this selection for use below
userargs="$(echo $allargs | cut -s --delimiter " " --field 2-)"
if [[ "$arg1" = $(basename $f) ]] || [[ "$arg1" = $(dirname $f) ]]; then
found=true
break
fi
if [[ "$arg1" = "$f" ]]; then
found=true
break
fi
done
if [[ "$found" = "false" ]]; then
continue
fi
fi
# Don't run test-failure.sh unless explicitly specified
if [ "$all" = "true" -a "$f" = "special/test-failure.sh" ]; then
continue
fi
cmd="$location/$f $userargs"
count=$(expr $count + 1)
echo "--- $cmd ---"
if [[ "$dryrun" != "true" ]]; then
if ! PATH=$PATH:bin \
CEPH_ROOT=.. \
CEPH_LIB=lib \
LOCALRUN=yes \
time -f "Elapsed %E (%e seconds)" $cmd ; then
echo "$f .............. FAILED"
errors=$(expr $errors + 1)
fi
fi
done
cleanup
if [ "$errors" != "0" ]; then
echo "$errors TESTS FAILED, $count TOTAL TESTS"
exit 1
fi
echo "ALL $count TESTS PASSED"
exit 0
| 3,517 | 23.774648 | 85 | sh |
null | ceph-main/qa/run_xfstests-obsolete.sh | #!/usr/bin/env bash
# Copyright (C) 2012 Dreamhost, LLC
#
# This is free software; see the source for copying conditions.
# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as
# published by the Free Software Foundation version 2.
# Usage:
# run_xfs_tests -t /dev/<testdev> -s /dev/<scratchdev> -f <fstype> <tests>
# - test device and scratch device will both get trashed
# - fstypes can be xfs, ext4, or btrfs (xfs default)
# - tests can be listed individually or in ranges: 1 3-5 8
# tests can also be specified by group: -g quick
#
# Exit status:
# 0: success
# 1: usage error
# 2: other runtime error
# 99: argument count error (programming error)
# 100: getopt error (internal error)
# Alex Elder <[email protected]>
# April 13, 2012
set -e
PROGNAME=$(basename $0)
# xfstests is downloaded from this git repository and then built.
# XFSTESTS_REPO="git://oss.sgi.com/xfs/cmds/xfstests.git"
XFSTESTS_REPO="git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git"
# Default command line option values
COUNT="1"
FS_TYPE="xfs"
SCRATCH_DEV="" # MUST BE SPECIFIED
TEST_DEV="" # MUST BE SPECIFIED
TESTS="-g auto" # The "auto" group is supposed to be "known good"
# rbd presents geometry information that causes mkfs.xfs to
# issue a warning. This option avoids this class of problems.
XFS_MKFS_OPTIONS="-l su=32k"
# Override the default test list with a list of tests known to pass
# until we can work through getting them all passing reliably.
TESTS="1-7 9 11-15 17 19-21 26-29 31-34 41 46-48 50-54 56 61 63-67 69-70 74-76"
TESTS="${TESTS} 78 79 84-89 91-92 100 103 105 108 110 116-121 124 126"
TESTS="${TESTS} 129-135 137-141 164-167 182 184 187-190 192 194"
TESTS="${TESTS} 196 199 201 203 214-216 220-227 234 236-238 241 243-249"
TESTS="${TESTS} 253 257-259 261 262 269 273 275 277 278 280 285 286"
# 275 was the highest available test as of 4/10/12.
# 289 was the highest available test as of 11/15/12.
######
# Some explanation of why tests have been excluded above:
#
# Test 008 was pulled because it contained a race condition leading to
# spurious failures.
#
# Test 049 was pulled because it caused a kernel fault.
# http://tracker.newdream.net/issues/2260
# Test 232 was pulled because it caused an XFS error
# http://tracker.newdream.net/issues/2302
#
# This test passes but takes a LONG time (1+ hours): 127
#
# These were not run for one (anticipated) reason or another:
# 010 016 030 035 040 044 057 058-060 072 077 090 093-095 097-099 104
# 112 113 122 123 125 128 142 147-163 168 175-178 180 185 191 193
# 195 197 198 207-213 217 228 230-233 235 239 240 252 254 255 264-266
# 270-272 276 278-279 281-284 288 289
#
# These tests all failed (produced output different from golden):
# 042 073 083 096 109 169 170 200 202 204-206 218 229 240 242 250
# 263 276 277 279 287
#
# The rest were not part of the "auto" group:
# 018 022 023 024 025 036 037 038 039 043 055 071 080 081 082 101
# 102 106 107 111 114 115 136 171 172 173 251 267 268
######
# print an error message and quit with non-zero status
function err() {
if [ $# -gt 0 ]; then
echo "" >&2
echo "${PROGNAME}: ${FUNCNAME[1]}: $@" >&2
fi
exit 2
}
# routine used to validate argument counts to all shell functions
function arg_count() {
local func
local want
local got
if [ $# -eq 2 ]; then
func="${FUNCNAME[1]}" # calling function
want=$1
got=$2
else
func="${FUNCNAME[0]}" # i.e., arg_count
want=2
got=$#
fi
[ "${want}" -eq "${got}" ] && return 0
echo "${PROGNAME}: ${func}: arg count bad (want ${want} got ${got})" >&2
exit 99
}
# validation function for repeat count argument
function count_valid() {
arg_count 1 $#
test "$1" -gt 0 # 0 is pointless; negative is wrong
}
# validation function for filesystem type argument
function fs_type_valid() {
arg_count 1 $#
case "$1" in
xfs|ext4|btrfs) return 0 ;;
*) return 1 ;;
esac
}
# validation function for device arguments
function device_valid() {
arg_count 1 $#
# Very simple testing--really should try to be more careful...
test -b "$1"
}
# print a usage message and quit
#
# if a message is supplied, print that first, and then exit
# with non-zero status
function usage() {
if [ $# -gt 0 ]; then
echo "" >&2
echo "$@" >&2
fi
echo "" >&2
echo "Usage: ${PROGNAME} <options> <tests>" >&2
echo "" >&2
echo " options:" >&2
echo " -h or --help" >&2
echo " show this message" >&2
echo " -c or --count" >&2
echo " iteration count (1 or more)" >&2
echo " -f or --fs-type" >&2
echo " one of: xfs, ext4, btrfs" >&2
echo " (default fs-type: xfs)" >&2
echo " -s or --scratch-dev (REQUIRED)" >&2
echo " name of device used for scratch filesystem" >&2
echo " -t or --test-dev (REQUIRED)" >&2
echo " name of device used for test filesystem" >&2
echo " tests:" >&2
echo " list of test numbers or ranges, e.g.:" >&2
echo " 1-9 11-15 17 19-21 26-28 31-34 41" >&2
echo " or possibly an xfstests test group, e.g.:" >&2
echo " -g quick" >&2
echo " (default tests: -g auto)" >&2
echo "" >&2
[ $# -gt 0 ] && exit 1
exit 0 # This is used for a --help
}
# parse command line arguments
function parseargs() {
# Short option flags
SHORT_OPTS=""
SHORT_OPTS="${SHORT_OPTS},h"
SHORT_OPTS="${SHORT_OPTS},c:"
SHORT_OPTS="${SHORT_OPTS},f:"
SHORT_OPTS="${SHORT_OPTS},s:"
SHORT_OPTS="${SHORT_OPTS},t:"
# Short option flags
LONG_OPTS=""
LONG_OPTS="${LONG_OPTS},help"
LONG_OPTS="${LONG_OPTS},count:"
LONG_OPTS="${LONG_OPTS},fs-type:"
LONG_OPTS="${LONG_OPTS},scratch-dev:"
LONG_OPTS="${LONG_OPTS},test-dev:"
TEMP=$(getopt --name "${PROGNAME}" \
--options "${SHORT_OPTS}" \
--longoptions "${LONG_OPTS}" \
-- "$@")
eval set -- "$TEMP"
while [ "$1" != "--" ]; do
case "$1" in
-h|--help)
usage
;;
-c|--count)
count_valid "$2" ||
usage "invalid count '$2'"
COUNT="$2"
shift
;;
-f|--fs-type)
fs_type_valid "$2" ||
usage "invalid fs_type '$2'"
FS_TYPE="$2"
shift
;;
-s|--scratch-dev)
device_valid "$2" ||
usage "invalid scratch-dev '$2'"
SCRATCH_DEV="$2"
shift
;;
-t|--test-dev)
device_valid "$2" ||
usage "invalid test-dev '$2'"
TEST_DEV="$2"
shift
;;
*)
exit 100 # Internal error
;;
esac
shift
done
shift
[ -n "${TEST_DEV}" ] || usage "test-dev must be supplied"
[ -n "${SCRATCH_DEV}" ] || usage "scratch-dev must be supplied"
[ $# -eq 0 ] || TESTS="$@"
}
################################################################
[ -z "$TESTDIR" ] && export TESTDIR="/tmp/cephtest"
# Set up some environment for normal teuthology test setup.
# This really should not be necessary but I found it was.
export CEPH_ARGS="--conf ${TESTDIR}/ceph.conf"
export CEPH_ARGS="${CEPH_ARGS} --keyring ${TESTDIR}/data/client.0.keyring"
export CEPH_ARGS="${CEPH_ARGS} --name client.0"
export LD_LIBRARY_PATH="${TESTDIR}/binary/usr/local/lib:${LD_LIBRARY_PATH}"
export PATH="${TESTDIR}/binary/usr/local/bin:${PATH}"
export PATH="${TESTDIR}/binary/usr/local/sbin:${PATH}"
################################################################
# Filesystem-specific mkfs options--set if not supplied
export XFS_MKFS_OPTIONS="${XFS_MKFS_OPTIONS:--f -l su=65536}"
export EXT4_MKFS_OPTIONS="${EXT4_MKFS_OPTIONS:--F}"
export BTRFS_MKFS_OPTION # No defaults
XFSTESTS_DIR="/var/lib/xfstests" # Where the tests live
# download, build, and install xfstests
function install_xfstests() {
arg_count 0 $#
local multiple=""
local ncpu
pushd "${TESTDIR}"
git clone "${XFSTESTS_REPO}"
cd xfstests-dev
# FIXME: use an older version before the tests were rearranged!
git reset --hard e5f1a13792f20cfac097fef98007610b422f2cac
ncpu=$(getconf _NPROCESSORS_ONLN 2>&1)
[ -n "${ncpu}" -a "${ncpu}" -gt 1 ] && multiple="-j ${ncpu}"
make realclean
make ${multiple}
make -k install
popd
}
# remove previously-installed xfstests files
function remove_xfstests() {
arg_count 0 $#
rm -rf "${TESTDIR}/xfstests-dev"
rm -rf "${XFSTESTS_DIR}"
}
# create a host options file that uses the specified devices
function setup_host_options() {
arg_count 0 $#
# Create mount points for the test and scratch filesystems
local test_dir="$(mktemp -d ${TESTDIR}/test_dir.XXXXXXXXXX)"
local scratch_dir="$(mktemp -d ${TESTDIR}/scratch_mnt.XXXXXXXXXX)"
# Write a host options file that uses these devices.
# xfstests uses the file defined by HOST_OPTIONS as the
# place to get configuration variables for its run, and
# all (or most) of the variables set here are required.
export HOST_OPTIONS="$(mktemp ${TESTDIR}/host_options.XXXXXXXXXX)"
cat > "${HOST_OPTIONS}" <<-!
# Created by ${PROGNAME} on $(date)
# HOST_OPTIONS="${HOST_OPTIONS}"
TEST_DEV="${TEST_DEV}"
SCRATCH_DEV="${SCRATCH_DEV}"
TEST_DIR="${test_dir}"
SCRATCH_MNT="${scratch_dir}"
FSTYP="${FS_TYPE}"
export TEST_DEV SCRATCH_DEV TEST_DIR SCRATCH_MNT FSTYP
#
export XFS_MKFS_OPTIONS="${XFS_MKFS_OPTIONS}"
!
# Now ensure we are using the same values
. "${HOST_OPTIONS}"
}
# remove the host options file, plus the directories it refers to
function cleanup_host_options() {
arg_count 0 $#
rm -rf "${TEST_DIR}" "${SCRATCH_MNT}"
rm -f "${HOST_OPTIONS}"
}
# run mkfs on the given device using the specified filesystem type
function do_mkfs() {
arg_count 1 $#
local dev="${1}"
local options
case "${FSTYP}" in
xfs) options="${XFS_MKFS_OPTIONS}" ;;
ext4) options="${EXT4_MKFS_OPTIONS}" ;;
btrfs) options="${BTRFS_MKFS_OPTIONS}" ;;
esac
"mkfs.${FSTYP}" ${options} "${dev}" ||
err "unable to make ${FSTYP} file system on device \"${dev}\""
}
# mount the given device on the given mount point
function do_mount() {
arg_count 2 $#
local dev="${1}"
local dir="${2}"
mount "${dev}" "${dir}" ||
err "unable to mount file system \"${dev}\" on \"${dir}\""
}
# unmount a previously-mounted device
function do_umount() {
arg_count 1 $#
local dev="${1}"
if mount | grep "${dev}" > /dev/null; then
if ! umount "${dev}"; then
err "unable to unmount device \"${dev}\""
fi
else
# Report it but don't error out
echo "device \"${dev}\" was not mounted" >&2
fi
}
# do basic xfstests setup--make and mount the test and scratch filesystems
function setup_xfstests() {
arg_count 0 $#
# TEST_DEV can persist across test runs, but for now we
# don't bother. I believe xfstests prefers its devices to
# have been already been formatted for the desired
# filesystem type--it uses blkid to identify things or
# something. So we mkfs both here for a fresh start.
do_mkfs "${TEST_DEV}"
do_mkfs "${SCRATCH_DEV}"
# I believe the test device is expected to be mounted; the
# scratch doesn't need to be (but it doesn't hurt).
do_mount "${TEST_DEV}" "${TEST_DIR}"
do_mount "${SCRATCH_DEV}" "${SCRATCH_MNT}"
}
# clean up changes made by setup_xfstests
function cleanup_xfstests() {
arg_count 0 $#
# Unmount these in case a test left them mounted (plus
# the corresponding setup function mounted them...)
do_umount "${TEST_DEV}"
do_umount "${SCRATCH_DEV}"
}
# top-level setup routine
function setup() {
arg_count 0 $#
setup_host_options
install_xfstests
setup_xfstests
}
# top-level (final) cleanup routine
function cleanup() {
arg_count 0 $#
cd /
cleanup_xfstests
remove_xfstests
cleanup_host_options
}
trap cleanup EXIT ERR HUP INT QUIT
# ################################################################
start_date="$(date)"
parseargs "$@"
setup
pushd "${XFSTESTS_DIR}"
for (( i = 1 ; i <= "${COUNT}" ; i++ )); do
[ "${COUNT}" -gt 1 ] && echo "=== Iteration "$i" starting at: $(date)"
./check ${TESTS} # Here we actually run the tests
status=$?
[ "${COUNT}" -gt 1 ] && echo "=== Iteration "$i" complete at: $(date)"
done
popd
# cleanup is called via the trap call, above
echo "This xfstests run started at: ${start_date}"
echo "xfstests run completed at: $(date)"
[ "${COUNT}" -gt 1 ] && echo "xfstests run consisted of ${COUNT} iterations"
exit "${status}"
| 12,288 | 25.77342 | 79 | sh |
null | ceph-main/qa/run_xfstests.sh | #!/usr/bin/env bash
# Copyright (C) 2012 Dreamhost, LLC
#
# This is free software; see the source for copying conditions.
# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as
# published by the Free Software Foundation version 2.
# Usage:
# run_xfstests -t /dev/<testdev> -s /dev/<scratchdev> [-f <fstype>] -- <tests>
# - test device and scratch device will both get trashed
# - fstypes can be xfs, ext4, or btrfs (xfs default)
# - tests can be listed individually: generic/001 xfs/008 xfs/009
# tests can also be specified by group: -g quick
#
# Exit status:
# 0: success
# 1: usage error
# 2: other runtime error
# 99: argument count error (programming error)
# 100: getopt error (internal error)
# Alex Elder <[email protected]>
# April 13, 2012
set -e
PROGNAME=$(basename $0)
# Default command line option values
COUNT="1"
EXPUNGE_FILE=""
DO_RANDOMIZE="" # false
FSTYP="xfs"
SCRATCH_DEV="" # MUST BE SPECIFIED
TEST_DEV="" # MUST BE SPECIFIED
TESTS="-g auto" # The "auto" group is supposed to be "known good"
# print an error message and quit with non-zero status
function err() {
if [ $# -gt 0 ]; then
echo "" >&2
echo "${PROGNAME}: ${FUNCNAME[1]}: $@" >&2
fi
exit 2
}
# routine used to validate argument counts to all shell functions
function arg_count() {
local func
local want
local got
if [ $# -eq 2 ]; then
func="${FUNCNAME[1]}" # calling function
want=$1
got=$2
else
func="${FUNCNAME[0]}" # i.e., arg_count
want=2
got=$#
fi
[ "${want}" -eq "${got}" ] && return 0
echo "${PROGNAME}: ${func}: arg count bad (want ${want} got ${got})" >&2
exit 99
}
# validation function for repeat count argument
function count_valid() {
arg_count 1 $#
test "$1" -gt 0 # 0 is pointless; negative is wrong
}
# validation function for filesystem type argument
function fs_type_valid() {
arg_count 1 $#
case "$1" in
xfs|ext4|btrfs) return 0 ;;
*) return 1 ;;
esac
}
# validation function for device arguments
function device_valid() {
arg_count 1 $#
# Very simple testing--really should try to be more careful...
test -b "$1"
}
# validation function for expunge file argument
function expunge_file_valid() {
arg_count 1 $#
test -s "$1"
}
# print a usage message and quit
#
# if a message is supplied, print that first, and then exit
# with non-zero status
function usage() {
if [ $# -gt 0 ]; then
echo "" >&2
echo "$@" >&2
fi
echo "" >&2
echo "Usage: ${PROGNAME} <options> -- <tests>" >&2
echo "" >&2
echo " options:" >&2
echo " -h or --help" >&2
echo " show this message" >&2
echo " -c or --count" >&2
echo " iteration count (1 or more)" >&2
echo " -f or --fs-type" >&2
echo " one of: xfs, ext4, btrfs" >&2
echo " (default fs-type: xfs)" >&2
echo " -r or --randomize" >&2
echo " randomize test order" >&2
echo " -s or --scratch-dev (REQUIRED)" >&2
echo " name of device used for scratch filesystem" >&2
echo " -t or --test-dev (REQUIRED)" >&2
echo " name of device used for test filesystem" >&2
echo " -x or --expunge-file" >&2
echo " name of file with list of tests to skip" >&2
echo " tests:" >&2
echo " list of test numbers, e.g.:" >&2
echo " generic/001 xfs/008 shared/032 btrfs/009" >&2
echo " or possibly an xfstests test group, e.g.:" >&2
echo " -g quick" >&2
echo " (default tests: -g auto)" >&2
echo "" >&2
[ $# -gt 0 ] && exit 1
exit 0 # This is used for a --help
}
# parse command line arguments
function parseargs() {
# Short option flags
SHORT_OPTS=""
SHORT_OPTS="${SHORT_OPTS},h"
SHORT_OPTS="${SHORT_OPTS},c:"
SHORT_OPTS="${SHORT_OPTS},f:"
SHORT_OPTS="${SHORT_OPTS},r"
SHORT_OPTS="${SHORT_OPTS},s:"
SHORT_OPTS="${SHORT_OPTS},t:"
SHORT_OPTS="${SHORT_OPTS},x:"
# Long option flags
LONG_OPTS=""
LONG_OPTS="${LONG_OPTS},help"
LONG_OPTS="${LONG_OPTS},count:"
LONG_OPTS="${LONG_OPTS},fs-type:"
LONG_OPTS="${LONG_OPTS},randomize"
LONG_OPTS="${LONG_OPTS},scratch-dev:"
LONG_OPTS="${LONG_OPTS},test-dev:"
LONG_OPTS="${LONG_OPTS},expunge-file:"
TEMP=$(getopt --name "${PROGNAME}" \
--options "${SHORT_OPTS}" \
--longoptions "${LONG_OPTS}" \
-- "$@")
eval set -- "$TEMP"
while [ "$1" != "--" ]; do
case "$1" in
-h|--help)
usage
;;
-c|--count)
count_valid "$2" ||
usage "invalid count '$2'"
COUNT="$2"
shift
;;
-f|--fs-type)
fs_type_valid "$2" ||
usage "invalid fs_type '$2'"
FSTYP="$2"
shift
;;
-r|--randomize)
DO_RANDOMIZE="t"
;;
-s|--scratch-dev)
device_valid "$2" ||
usage "invalid scratch-dev '$2'"
SCRATCH_DEV="$2"
shift
;;
-t|--test-dev)
device_valid "$2" ||
usage "invalid test-dev '$2'"
TEST_DEV="$2"
shift
;;
-x|--expunge-file)
expunge_file_valid "$2" ||
usage "invalid expunge-file '$2'"
EXPUNGE_FILE="$2"
shift
;;
*)
exit 100 # Internal error
;;
esac
shift
done
shift
[ -n "${TEST_DEV}" ] || usage "test-dev must be supplied"
[ -n "${SCRATCH_DEV}" ] || usage "scratch-dev must be supplied"
[ $# -eq 0 ] || TESTS="$@"
}
################################################################
# run mkfs on the given device using the specified filesystem type
function do_mkfs() {
arg_count 1 $#
local dev="${1}"
local options
case "${FSTYP}" in
xfs) options="-f" ;;
ext4) options="-F" ;;
btrfs) options="-f" ;;
esac
"mkfs.${FSTYP}" ${options} "${dev}" ||
err "unable to make ${FSTYP} file system on device \"${dev}\""
}
# top-level setup routine
function setup() {
arg_count 0 $#
wget -P "${TESTDIR}" http://download.ceph.com/qa/xfstests.tar.gz
tar zxf "${TESTDIR}/xfstests.tar.gz" -C "$(dirname "${XFSTESTS_DIR}")"
mkdir "${TEST_DIR}"
mkdir "${SCRATCH_MNT}"
do_mkfs "${TEST_DEV}"
}
# top-level (final) cleanup routine
function cleanup() {
arg_count 0 $#
# ensure teuthology can clean up the logs
chmod -R a+rw "${TESTDIR}/archive"
findmnt "${TEST_DEV}" && umount "${TEST_DEV}"
[ -d "${SCRATCH_MNT}" ] && rmdir "${SCRATCH_MNT}"
[ -d "${TEST_DIR}" ] && rmdir "${TEST_DIR}"
rm -rf "${XFSTESTS_DIR}"
rm -f "${TESTDIR}/xfstests.tar.gz"
}
# ################################################################
start_date="$(date)"
parseargs "$@"
[ -n "${TESTDIR}" ] || usage "TESTDIR env variable must be set"
[ -d "${TESTDIR}/archive" ] || usage "\$TESTDIR/archive directory must exist"
TESTDIR="$(readlink -e "${TESTDIR}")"
[ -n "${EXPUNGE_FILE}" ] && EXPUNGE_FILE="$(readlink -e "${EXPUNGE_FILE}")"
XFSTESTS_DIR="/var/lib/xfstests" # hardcoded into dbench binary
TEST_DIR="/mnt/test_dir"
SCRATCH_MNT="/mnt/scratch_mnt"
MKFS_OPTIONS=""
EXT_MOUNT_OPTIONS="-o block_validity,dioread_nolock"
trap cleanup EXIT ERR HUP INT QUIT
setup
export TEST_DEV
export TEST_DIR
export SCRATCH_DEV
export SCRATCH_MNT
export FSTYP
export MKFS_OPTIONS
export EXT_MOUNT_OPTIONS
pushd "${XFSTESTS_DIR}"
for (( i = 1 ; i <= "${COUNT}" ; i++ )); do
[ "${COUNT}" -gt 1 ] && echo "=== Iteration "$i" starting at: $(date)"
RESULT_BASE="${TESTDIR}/archive/results-${i}"
mkdir "${RESULT_BASE}"
export RESULT_BASE
EXPUNGE=""
[ -n "${EXPUNGE_FILE}" ] && EXPUNGE="-E ${EXPUNGE_FILE}"
RANDOMIZE=""
[ -n "${DO_RANDOMIZE}" ] && RANDOMIZE="-r"
# -T output timestamps
PATH="${PWD}/bin:${PATH}" ./check -T ${RANDOMIZE} ${EXPUNGE} ${TESTS}
findmnt "${TEST_DEV}" && umount "${TEST_DEV}"
[ "${COUNT}" -gt 1 ] && echo "=== Iteration "$i" complete at: $(date)"
done
popd
# cleanup is called via the trap call, above
echo "This xfstests run started at: ${start_date}"
echo "xfstests run completed at: $(date)"
[ "${COUNT}" -gt 1 ] && echo "xfstests run consisted of ${COUNT} iterations"
echo OK
| 8,000 | 23.694444 | 78 | sh |
null | ceph-main/qa/run_xfstests_qemu.sh | #!/usr/bin/env bash
#
# TODO switch to run_xfstests.sh (see run_xfstests_krbd.sh)
set -x
[ -n "${TESTDIR}" ] || export TESTDIR="/tmp/cephtest"
[ -d "${TESTDIR}" ] || mkdir "${TESTDIR}"
URL_BASE="https://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa"
SCRIPT="run_xfstests-obsolete.sh"
cd "${TESTDIR}"
curl -O "${URL_BASE}/${SCRIPT}"
# mark executable only if the file isn't empty since ./"${SCRIPT}"
# on an empty file would succeed
if [[ -s "${SCRIPT}" ]]; then
chmod +x "${SCRIPT}"
fi
TEST_DEV="/dev/vdb"
if [[ ! -b "${TEST_DEV}" ]]; then
TEST_DEV="/dev/sdb"
fi
SCRATCH_DEV="/dev/vdc"
if [[ ! -b "${SCRATCH_DEV}" ]]; then
SCRATCH_DEV="/dev/sdc"
fi
# tests excluded fail in the current testing vm regardless of whether
# rbd is used
./"${SCRIPT}" -c 1 -f xfs -t "${TEST_DEV}" -s "${SCRATCH_DEV}" \
1-7 9-17 19-26 28-49 51-61 63 66-67 69-79 83 85-105 108-110 112-135 \
137-170 174-191 193-204 206-217 220-227 230-231 233 235-241 243-249 \
252-259 261-262 264-278 281-286 289
STATUS=$?
rm -f "${SCRIPT}"
exit "${STATUS}"
| 1,050 | 23.44186 | 73 | sh |
null | ceph-main/qa/runallonce.sh | #!/usr/bin/env bash
set -ex
basedir=`echo $0 | sed 's/[^/]*$//g'`.
testdir="$1"
[ -n "$2" ] && logdir=$2 || logdir=$1
[ ${basedir:0:1} == "." ] && basedir=`pwd`/${basedir:1}
PATH="$basedir/src:$PATH"
[ -z "$testdir" ] || [ ! -d "$testdir" ] && echo "specify test dir" && exit 1
cd $testdir
for test in `cd $basedir/workunits && find . -executable -type f | $basedir/../src/script/permute`
do
echo "------ running test $test ------"
pwd
[ -d $test ] && rm -r $test
mkdir -p $test
mkdir -p `dirname $logdir/$test.log`
test -e $logdir/$test.log && rm $logdir/$test.log
sh -c "cd $test && $basedir/workunits/$test" 2>&1 | tee $logdir/$test.log
done
| 665 | 24.615385 | 98 | sh |
null | ceph-main/qa/runoncfuse.sh | #!/usr/bin/env bash
set -x
mkdir -p testspace
ceph-fuse testspace -m $1
./runallonce.sh testspace
killall ceph-fuse
| 118 | 12.222222 | 25 | sh |
null | ceph-main/qa/runonkclient.sh | #!/usr/bin/env bash
set -x
mkdir -p testspace
/bin/mount -t ceph $1 testspace
./runallonce.sh testspace
/bin/umount testspace
| 129 | 12 | 31 | sh |
null | ceph-main/qa/setup-chroot.sh | #!/usr/bin/env bash
die() {
echo ${@}
exit 1
}
usage()
{
cat << EOF
$0: sets up a chroot environment for building the ceph server
usage:
-h Show this message
-r [install_dir] location of the root filesystem to install to
example: -r /images/sepia/
-s [src_dir] location of the directory with the source code
example: -s ./src/ceph
EOF
}
cleanup() {
umount -l "${INSTALL_DIR}/mnt/tmp"
umount -l "${INSTALL_DIR}/proc"
umount -l "${INSTALL_DIR}/sys"
}
INSTALL_DIR=
SRC_DIR=
while getopts “hr:s:” OPTION; do
case $OPTION in
h) usage; exit 1 ;;
r) INSTALL_DIR=$OPTARG ;;
s) SRC_DIR=$OPTARG ;;
?) usage; exit
;;
esac
done
[ $EUID -eq 0 ] || die "This script uses chroot, which requires root permissions."
[ -d "${INSTALL_DIR}" ] || die "No such directory as '${INSTALL_DIR}'. \
You must specify an install directory with -r"
[ -d "${SRC_DIR}" ] || die "no such directory as '${SRC_DIR}'. \
You must specify a source directory with -s"
readlink -f ${SRC_DIR} || die "readlink failed on ${SRC_DIR}"
ABS_SRC_DIR=`readlink -f ${SRC_DIR}`
trap cleanup INT TERM EXIT
mount --bind "${ABS_SRC_DIR}" "${INSTALL_DIR}/mnt/tmp" || die "bind mount failed"
mount -t proc none "${INSTALL_DIR}/proc" || die "mounting proc failed"
mount -t sysfs none "${INSTALL_DIR}/sys" || die "mounting sys failed"
echo "$0: starting chroot."
echo "cd /mnt/tmp before building"
echo
chroot ${INSTALL_DIR} env HOME=/mnt/tmp /bin/bash
echo "$0: exiting chroot."
exit 0
| 1,636 | 23.80303 | 82 | sh |
null | ceph-main/qa/test_import.py | # try to import all .py files from a given directory
import glob
import os
import importlib
import importlib.util
import pytest
def _module_name(path):
task = os.path.splitext(path)[0]
parts = task.split(os.path.sep)
package = parts[0]
name = ''.join('.' + c for c in parts[1:])
return package, name
def _import_file(path):
package, mod_name = _module_name(path)
line = f'Importing {package}{mod_name} from {path}'
print(f'{line:<80}', end='')
mod_spec = importlib.util.find_spec(mod_name, package)
mod = mod_spec.loader.load_module(f'{package}{mod_name}')
if mod is None:
result = 'FAIL'
else:
result = 'DONE'
print(f'{result:>6}')
mod_spec.loader.exec_module(mod)
return result
def get_paths():
for g in ['tasks/**/*.py']:
for p in glob.glob(g, recursive=True):
yield p
@pytest.mark.parametrize("path", list(sorted(get_paths())))
def test_import(path):
assert _import_file(path) == 'DONE'
| 1,001 | 24.692308 | 61 | py |
null | ceph-main/qa/archs/aarch64.yaml | arch: aarch64
| 14 | 6.5 | 13 | yaml |
null | ceph-main/qa/archs/armv7.yaml | arch: armv7l
| 13 | 6 | 12 | yaml |
null | ceph-main/qa/archs/i686.yaml | arch: i686
| 11 | 5 | 10 | yaml |
null | ceph-main/qa/archs/x86_64.yaml | arch: x86_64
| 13 | 6 | 12 | yaml |
null | ceph-main/qa/btrfs/clone_range.c | #include <fcntl.h>
#include <stdlib.h>
#include <sys/ioctl.h>
#include <string.h>
#include <linux/types.h>
#include "../../src/os/btrfs_ioctl.h"
#include <stdio.h>
#include <errno.h>
int main(int argc, char **argv)
{
struct btrfs_ioctl_clone_range_args ca;
int dfd;
int r;
if (argc < 6) {
printf("usage: %s <srcfn> <srcoffset> <srclen> <destfn> <destoffset>\n", argv[0]);
exit(1);
}
ca.src_fd = open(argv[1], O_RDONLY);
ca.src_offset = atoi(argv[2]);
ca.src_length = atoi(argv[3]);
dfd = open(argv[4], O_WRONLY|O_CREAT);
ca.dest_offset = atoi(argv[5]);
r = ioctl(dfd, BTRFS_IOC_CLONE_RANGE, &ca);
printf("clone_range %s %lld %lld~%lld to %s %d %lld = %d %s\n",
argv[1], ca.src_fd,
ca.src_offset, ca.src_length,
argv[4], dfd,
ca.dest_offset, r, strerror(errno));
return r;
}
| 919 | 24.555556 | 84 | c |
null | ceph-main/qa/btrfs/create_async_snap.c | #include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <sys/ioctl.h>
#include <string.h>
#include <linux/ioctl.h>
#include <linux/types.h>
#include "../../src/os/btrfs_ioctl.h"
struct btrfs_ioctl_vol_args_v2 va;
int main(int argc, char **argv)
{
int fd;
int r;
if (argc != 3) {
printf("usage: %s <source subvol> <name>\n", argv[0]);
return 1;
}
printf("creating snap ./%s from %s\n", argv[2], argv[1]);
fd = open(".", O_RDONLY);
va.fd = open(argv[1], O_RDONLY);
va.flags = BTRFS_SUBVOL_CREATE_ASYNC;
strcpy(va.name, argv[2]);
r = ioctl(fd, BTRFS_IOC_SNAP_CREATE_V2, (unsigned long long)&va);
printf("result %d\n", r ? -errno:0);
return r;
}
| 757 | 20.657143 | 66 | c |
null | ceph-main/qa/btrfs/test_async_snap.c | #include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <sys/ioctl.h>
#include <string.h>
#include <linux/ioctl.h>
#include <linux/types.h>
#include "../../src/os/btrfs_ioctl.h"
struct btrfs_ioctl_vol_args_v2 va;
struct btrfs_ioctl_vol_args vold;
int max = 4;
void check_return(int r)
{
if (r < 0) {
printf("********* failed with %d %s ********\n", errno, strerror(errno));
exit(1);
}
}
int main(int argc, char **argv)
{
int num = 1000;
if (argc > 1)
num = atoi(argv[1]);
printf("will do %d iterations\n", num);
int cwd = open(".", O_RDONLY);
printf("cwd = %d\n", cwd);
while (num-- > 0) {
if (rand() % 10 == 0) {
__u64 transid;
int r;
printf("sync starting\n");
r = ioctl(cwd, BTRFS_IOC_START_SYNC, &transid);
check_return(r);
printf("sync started, transid %lld, waiting\n", transid);
r = ioctl(cwd, BTRFS_IOC_WAIT_SYNC, &transid);
check_return(r);
printf("sync finished\n");
}
int i = rand() % max;
struct stat st;
va.fd = cwd;
sprintf(va.name, "test.%d", i);
va.transid = 0;
int r = stat(va.name, &st);
if (r < 0) {
if (rand() % 3 == 0) {
printf("snap create (sync) %s\n", va.name);
va.flags = 0;
r = ioctl(cwd, BTRFS_IOC_SNAP_CREATE_V2, &va);
check_return(r);
} else {
printf("snap create (async) %s\n", va.name);
va.flags = BTRFS_SUBVOL_CREATE_ASYNC;
r = ioctl(cwd, BTRFS_IOC_SNAP_CREATE_V2, &va);
check_return(r);
printf("snap created, transid %lld\n", va.transid);
if (rand() % 2 == 0) {
printf("waiting for async snap create\n");
r = ioctl(cwd, BTRFS_IOC_WAIT_SYNC, &va.transid);
check_return(r);
}
}
} else {
printf("snap remove %s\n", va.name);
vold.fd = va.fd;
strcpy(vold.name, va.name);
r = ioctl(cwd, BTRFS_IOC_SNAP_DESTROY, &vold);
check_return(r);
}
}
return 0;
}
| 2,148 | 24.583333 | 75 | c |
null | ceph-main/qa/btrfs/test_rmdir_async_snap.c | #include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
#include <sys/ioctl.h>
#include <string.h>
#include <linux/ioctl.h>
#include <linux/types.h>
#include "../../src/os/btrfs_ioctl.h"
struct btrfs_ioctl_vol_args_v2 va;
struct btrfs_ioctl_vol_args vold;
int main(int argc, char **argv)
{
int num = 1000;
int i, r, fd;
char buf[30];
if (argc > 1)
num = atoi(argv[1]);
printf("will do %d iterations\n", num);
fd = open(".", O_RDONLY);
vold.fd = 0;
strcpy(vold.name, "current");
r = ioctl(fd, BTRFS_IOC_SUBVOL_CREATE, (unsigned long int)&vold);
printf("create current ioctl got %d\n", r ? errno:0);
if (r)
return 1;
for (i=0; i<num; i++) {
sprintf(buf, "current/dir.%d", i);
r = mkdir(buf, 0755);
printf("mkdir got %d\n", r ? errno:0);
if (r)
return 1;
}
va.fd = open("current", O_RDONLY);
va.flags = BTRFS_SUBVOL_CREATE_ASYNC;
for (i=0; i<num; i++) {
system("/bin/cp /boot/vmlinuz-3.2.0-ceph-00142-g9e98323 current/foo");
sprintf(buf, "current/dir.%d", i);
r = rmdir(buf);
printf("rmdir got %d\n", r ? errno:0);
if (r)
return 1;
if (i % 10) continue;
sprintf(va.name, "snap.%d", i);
r = ioctl(fd, BTRFS_IOC_SNAP_CREATE_V2, (unsigned long long)&va);
printf("ioctl got %d\n", r ? errno:0);
if (r)
return 1;
}
return 0;
}
| 1,373 | 20.809524 | 72 | c |
null | ceph-main/qa/cephfs/unshare_ns_mount.sh | #!/usr/bin/env bash
# This is one helper for mounting the ceph-fuse/kernel clients by
# unsharing the network namespace, let's call it netns container.
# With the netns container, you can easily suspend or resume the
# virtual network interface to simulate the client node hard
# shutdown for some test cases.
#
# netnsX netnsY netnsZ
# -------------- -------------- --------------
# | mount client | | mount client | | mount client |
# | default | ... | default | ... | default |
# |192.168.0.1/16| |192.168.0.2/16| |192.168.0.3/16|
# | veth0 | | veth0 | | veth0 |
# -------------- -------------- -------------
# | | |
# \ | brx.Y /
# \ ---------------------- /
# \ brx.X | ceph-brx | brx.Z /
# \------>| default |<------/
# | | 192.168.255.254/16 | |
# | ---------------------- |
# (suspend/resume) | (suspend/resume)
# -----------
# | Physical |
# | A.B.C.D/M |
# -----------
#
# Defaultly it will use the 192.168.X.Y/16 private network IPs for
# the ceph-brx and netnses as above. And you can also specify your
# own new ip/mask for the ceph-brx, like:
#
# $ unshare_ns_mount.sh --fuse /mnt/cephfs --brxip 172.19.100.100/12
#
# Then the each netns will get a new ip from the ranges:
# [172.16.0.1 ~ 172.19.100.99]/12 and [172.19.100.101 ~ 172.31.255.254]/12
usage() {
echo ""
echo "This will help to isolate the network namespace from OS for the mount client!"
echo ""
echo "usage: unshare_ns_mount.sh [OPTIONS [paramters]] [--brxip <ip_address/mask>]"
echo "OPTIONS:"
echo -e " --fuse <ceph-fuse options>"
echo -e "\tThe ceph-fuse command options"
echo -e "\t $ unshare_ns_mount.sh --fuse -m 192.168.0.1:6789 /mnt/cephfs -o nonempty"
echo ""
echo -e " --kernel <mount options>"
echo -e "\tThe mount command options"
echo -e "\t $ unshare_ns_mount.sh --kernel -t ceph 192.168.0.1:6789:/ /mnt/cephfs -o fs=a"
echo ""
echo -e " --suspend <mountpoint>"
echo -e "\tDown the veth interface in the network namespace"
echo -e "\t $ unshare_ns_mount.sh --suspend /mnt/cephfs"
echo ""
echo -e " --resume <mountpoint>"
echo -e "\tUp the veth interface in the network namespace"
echo -e "\t $ unshare_ns_mount.sh --resume /mnt/cephfs"
echo ""
echo -e " --umount <mountpoint>"
echo -e "\tUmount and delete the network namespace"
echo -e "\t $ unshare_ns_mount.sh --umount /mnt/cephfs"
echo ""
echo -e " --brxip <ip_address/mask>"
echo -e "\tSpecify ip/mask for ceph-brx and it only makes sense for --fuse/--kernel options"
echo -e "\t(default: 192.168.255.254/16, netns ip: 192.168.0.1/16 ~ 192.168.255.253/16)"
echo -e "\t $ unshare_ns_mount.sh --fuse -m 192.168.0.1:6789 /mnt/cephfs --brxip 172.19.255.254/12"
echo -e "\t $ unshare_ns_mount.sh --kernel 192.168.0.1:6789:/ /mnt/cephfs --brxip 172.19.255.254/12"
echo ""
echo -e " -h, --help"
echo -e "\tPrint help"
echo ""
}
CEPH_BRX=ceph-brx
CEPH_BRX_IP_DEF=192.168.255.254
NET_MASK_DEF=16
BRD_DEF=192.168.255.255
CEPH_BRX_IP=$CEPH_BRX_IP_DEF
NET_MASK=$NET_MASK_DEF
BRD=$BRD_DEF
mountpoint=""
new_netns=""
fuse_type=false
function get_mountpoint() {
for param in $@
do
if [ -d $param ]; then
# skipping "--client_mountpoint/-r root_directory"
# option for ceph-fuse command
if [ "$last" == "-r" -o "$last" == "--client_mountpoint" ]; then
last=$param
continue
fi
if [ "0$mountpoint" != "0" ]; then
echo "Oops: too many mountpiont options!"
exit 1
fi
mountpoint=$param
fi
last=$param
done
if [ "0$mountpoint" == "0" ]; then
echo "Oops: mountpoint path is not a directory or no mountpoint specified!"
exit 1
fi
}
function get_new_netns() {
# prune the repeating slashes:
# "/mnt///cephfs///" --> "/mnt/cephfs/"
__mountpoint=`echo "$mountpoint" | sed 's/\/\+/\//g'`
# prune the leading slashes
while [ ${__mountpoint:0:1} == "/" ]
do
__mountpoint=${__mountpoint:1}
done
# prune the last slashes
while [ ${__mountpoint: -1} == "/" ]
do
__mountpoint=${__mountpoint:0:-1}
done
# replace '/' with '-'
__mountpoint=${__mountpoint//\//-}
# "mnt/cephfs" --> "ceph-fuse-mnt-cephfs"
if [ "$1" == "--fuse" ]; then
new_netns=`echo ceph-fuse-$__mountpoint`
fuse_type=true
return
fi
# "mnt/cephfs" --> "ceph-kernel-mnt-cephfs"
if [ "$1" == "--kernel" ]; then
new_netns=`echo ceph-kernel-$__mountpoint`
return
fi
# we are in umount/suspend/resume routines
for ns in `ip netns list | awk '{print $1}'`
do
if [ "$ns" == "ceph-fuse-$__mountpoint" ]; then
new_netns=$ns
fuse_type=true
return
fi
if [ "$ns" == "ceph-kernel-$__mountpoint" ]; then
new_netns=$ns
return
fi
done
if [ "0$new_netns" == "0" ]; then
echo "Oops, netns 'ceph-{fuse/kernel}-$__mountpoint' does not exists!"
exit 1
fi
}
# the peer veth name will be "brx.$nsid" on host node
function get_netns_brx() {
get_new_netns
nsid=`ip netns list-id | grep "$new_netns" | awk '{print $2}'`
netns_veth=brx.$nsid
eval $1="$netns_veth"
}
function suspend_netns_veth() {
get_mountpoint $@
get_netns_brx brx
ip link set $brx down
exit 0
}
function resume_netns_veth() {
get_mountpoint $@
get_netns_brx brx
ip link set $brx up
exit 0
}
# help and usage
if [ $# == 0 -o "$1" == "-h" -o "$1" == "--help" ]; then
usage
exit 0
fi
# suspend the veth from network namespace
if [ $1 == "--suspend" ]; then
suspend_netns_veth $@
exit 0
fi
# resume the veth from network namespace
if [ $1 == "--resume" ]; then
resume_netns_veth $@
exit 0
fi
function ceph_umount() {
get_mountpoint $@
get_new_netns
if [ $fuse_type == true ]; then
nsenter --net=/var/run/netns/$new_netns fusermount -u $mountpoint 2>/dev/null
else
nsenter --net=/var/run/netns/$new_netns umount $mountpoint 2>/dev/null
fi
# let's wait for a while to let the umount operation
# to finish before deleting the netns
while [ 1 ]
do
for pid in `ip netns pids $new_netns 2>/dev/null`
do
name=`cat /proc/$pid/comm 2>/dev/null`
if [ "$name" == "ceph-fuse" ]; then
break
fi
done
if [ "$name" == "ceph-fuse" ]; then
name=""
usleep 100000
continue
fi
break
done
nsid=`ip netns list-id | grep "$new_netns" | awk '{print $2}'`
netns_brx=brx.$nsid
# brctl delif $CEPH_BRX $netns_brx 2>/dev/null
nmcli connection down $netns_brx down 2>/dev/null
nmcli connection delete $netns_brx 2>/dev/null
ip netns delete $new_netns 2>/dev/null
# if this is the last netns_brx, will delete
# the $CEPH_BRX and restore the OS configure
# rc=`brctl show ceph-brx 2>/dev/null | grep 'brx\.'|wc -l`
rc=`nmcli connection show 2>/dev/null | grep 'brx\.' | wc -l`
if [ $rc == 0 ]; then
ip link set $CEPH_BRX down 2>/dev/null
# brctl delbr $CEPH_BRX 2>/dev/null
nmcli connection delete $CEPH_BRX 2>/dev/null
# restore the ip forward
tmpfile=`ls /tmp/ | grep "$CEPH_BRX\."`
tmpfile=/tmp/$tmpfile
if [ ! -f $tmpfile ]; then
echo "Oops, the $CEPH_BRX.XXX temp file does not exist!"
else
save=`cat $tmpfile`
echo $save > /proc/sys/net/ipv4/ip_forward
rm -rf $tmpfile
fi
# drop the iptables NAT rules
host_nic=`route | grep default | awk '{print $8}'`
iptables -D FORWARD -o $host_nic -i $CEPH_BRX -j ACCEPT
iptables -D FORWARD -i $host_nic -o $CEPH_BRX -j ACCEPT
iptables -t nat -D POSTROUTING -s $CEPH_BRX_IP/$NET_MASK -o $host_nic -j MASQUERADE
fi
}
function get_brd_mask() {
first=`echo "$CEPH_BRX_IP" | awk -F. '{print $1}'`
second=`echo "$CEPH_BRX_IP" | awk -F. '{print $2}'`
third=`echo "$CEPH_BRX_IP" | awk -F. '{print $3}'`
fourth=`echo "$CEPH_BRX_IP" | awk -F. '{print $4}'`
if [ "$first" == "172" ]; then
second_max=31
else
second_max=255
fi
third_max=255
fourth_max=255
if [ $NET_MASK -lt 16 ]; then
let power=16-$NET_MASK
m=`awk 'BEGIN{printf 2^"'$power'"-1}'`
second=$((second&~m))
let second_max=$second+$m
elif [ $NET_MASK -lt 24 ]; then
let power=24-$NET_MASK
m=`awk 'BEGIN{printf 2^"'$power'"-1}'`
third=$((third&~m))
let third_max=$third+$m
second_max=$second
elif [ $NET_MASK -lt 32 ]; then
let power=32-$NET_MASK
m=`awk 'BEGIN{printf 2^"'$power'"-1}'`
fourth=$((fourth&~m))
let fourth_max=$fourth+$m
second_max=$second
third_max=$third
fi
BRD=$first.$second_max.$third_max.$fourth_max
}
# As default:
# The netns IP will be 192.168.0.1 ~ 192.168.255.253,
# and 192.168.255.254 is saved for $CEPH_BRX
function get_new_ns_ip() {
first=`echo "$CEPH_BRX_IP" | awk -F. '{print $1}'`
second=`echo "$CEPH_BRX_IP" | awk -F. '{print $2}'`
third=`echo "$CEPH_BRX_IP" | awk -F. '{print $3}'`
fourth=`echo "$CEPH_BRX_IP" | awk -F. '{print $4}'`
if [ "$first" == ""172 ]; then
second_max=31
else
second_max=255
fi
third_max=255
fourth_max=254
if [ $NET_MASK -lt 16 ]; then
let power=16-$NET_MASK
m=`awk 'BEGIN{printf 2^"'$power'"-1}'`
second=$((second&~m))
let second_max=$second+$m
third=0
fourth=1
elif [ $NET_MASK -lt 24 ]; then
let power=24-$NET_MASK
m=`awk 'BEGIN{printf 2^"'$power'"-1}'`
third=$((third&~m))
let third_max=$third+$m
second_max=$second
fourth=1
elif [ $NET_MASK -lt 32 ]; then
let power=32-$NET_MASK
m=`awk 'BEGIN{printf 2^"'$power'"-1}'`
fourth=$((fourth&~m))
let fourth+=1
let fourth_max=$fourth+$m-1
second_max=$second
third_max=$third
fi
while [ $second -le $second_max -a $third -le $third_max -a $fourth -le $fourth_max ]
do
conflict=false
# check from the existing network namespaces
for netns in `ip netns list | awk '{print $1}'`
do
ip=`ip netns exec $netns ip addr | grep "inet " | grep "veth0"`
ip=`echo "$ip" | awk '{print $2}' | awk -F/ '{print $1}'`
if [ "0$ip" == "0" ]; then
continue
fi
if [ "$first.$second.$third.$fourth" == "$ip" ]; then
conflict=true
let fourth+=1
if [ $fourth -le $fourth_max ]; then
break
fi
fourth=0
let third+=1
if [ $third -le $third_max ]; then
break
fi
third=0
let second+=1
if [ $second -le $second_max ]; then
break
fi
echo "Oops: we have ran out of the ip addresses!"
exit 1
fi
done
# have we found one ?
if [ $conflict == false ]; then
break
fi
done
ip=$first.$second.$third.$fourth
max=$first.$second_max.$third_max.$fourth_max
if [ "$ip" == "$max" ]; then
echo "Oops: we have ran out of the ip addresses!"
exit 1
fi
eval $1="$ip"
}
function check_valid_private_ip() {
first=`echo "$1" | awk -F. '{print $1}'`
second=`echo "$1" | awk -F. '{print $2}'`
# private network class A 10.0.0.0 - 10.255.255.255
if [ "$first" == "10" -a $NET_MASK -ge 8 ]; then
return
fi
# private network class B 172.16.0.0 - 172.31.255.255
if [ "$first" == "172" -a $second -ge 16 -a $second -le 31 -a $NET_MASK -ge 12 ]; then
return
fi
# private network class C 192.168.0.0 - 192.168.255.255
if [ "$first" == "192" -a "$second" == "168" -a $NET_MASK -ge 16 ]; then
return
fi
echo "Oops: invalid private ip address '$CEPH_BRX_IP/$NET_MASK'!"
exit 1
}
function setup_bridge_and_nat() {
# check and parse the --brxip parameter
is_brxip=false
for ip in $@
do
if [ "$ip" == "--brxip" ]; then
is_brxip=true
continue
fi
if [ $is_brxip == true ]; then
new_brxip=$ip
break
fi
done
# if the $CEPH_BRX already exists, then check the new
# brxip, if not match fail it without doing anything.
rc=`ip addr | grep "inet " | grep " $CEPH_BRX"`
if [ "0$rc" != "0" ]; then
existing_brxip=`echo "$rc" | awk '{print $2}'`
if [ "0$new_brxip" != "0" -a "$existing_brxip" != "$new_brxip" ]; then
echo "Oops: conflict with the existing $CEPH_BRX ip '$existing_brxip', new '$new_brxip'!"
exit 1
fi
CEPH_BRX_IP=`echo "$existing_brxip" | awk -F/ '{print $1}'`
NET_MASK=`echo "$existing_brxip" | awk -F/ '{print $2}'`
get_brd_mask
return
fi
# if it is the first time to run the the script or there
# is no any network namespace exists, we need to setup
# the $CEPH_BRX, if no --brxip is specified will use the
# default $CEPH_BRX_IP/$NET_MASK
if [ "0$new_brxip" != "0" ]; then
CEPH_BRX_IP=`echo "$new_brxip" | awk -F/ '{print $1}'`
NET_MASK=`echo "$new_brxip" | awk -F/ '{print $2}'`
get_brd_mask
check_valid_private_ip $CEPH_BRX_IP
fi
# brctl addbr $CEPH_BRX
nmcli connection add type bridge con-name $CEPH_BRX ifname $CEPH_BRX stp no
# ip link set $CEPH_BRX up
# ip addr add $CEPH_BRX_IP/$NET_MASK brd $BRD dev $CEPH_BRX
nmcli connection modify $CEPH_BRX ipv4.addresses $CEPH_BRX_IP/$NET_MASK ipv4.method manual
nmcli connection up $CEPH_BRX
# setup the NAT
rm -rf /tmp/ceph-brx.*
tmpfile=$(mktemp /tmp/ceph-brx.XXXXXXXX)
save=`cat /proc/sys/net/ipv4/ip_forward`
echo $save > $tmpfile
echo 1 > /proc/sys/net/ipv4/ip_forward
host_nic=`route | grep default | awk '{print $8}'`
iptables -A FORWARD -o $host_nic -i $CEPH_BRX -j ACCEPT
iptables -A FORWARD -i $host_nic -o $CEPH_BRX -j ACCEPT
iptables -t nat -A POSTROUTING -s $CEPH_BRX_IP/$NET_MASK -o $host_nic -j MASQUERADE
}
function __ceph_mount() {
# for some options like the '-t' in mount command
# the nsenter command will take over it, so it is
# hard to pass it direct to the netns.
# here we will create one temp file with x mode
tmpfile=$(mktemp /tmp/ceph-nsenter.XXXXXXXX)
chmod +x $tmpfile
if [ "$1" == "--kernel" ]; then
cmd=`echo "$@" | sed 's/--kernel/mount/'`
else
cmd=`echo "$@" | sed 's/--fuse/ceph-fuse/'`
fi
# remove the --brxip parameter
cmd=`echo "$cmd" | sed 's/--brxip.*\/[0-9]* //'`
# enter $new_netns and run ceph fuse client mount,
# we couldn't use 'ip netns exec' here because it
# will unshare the mount namespace.
echo "$cmd" > $tmpfile
nsenter --net=/var/run/netns/$new_netns /bin/bash $tmpfile ; echo $? > $tmpfile
rc=`cat $tmpfile`
rm -f $tmpfile
# fall back
if [ $rc != 0 ]; then
m=$mountpoint
mountpoint=""
ceph_umount $m
fi
}
function get_new_nsid() {
# get one uniq netns id
uniq_id=0
while [ 1 ]
do
rc=`ip netns list-id | grep "nsid $uniq_id "`
if [ "0$rc" == "0" ]; then
break
fi
let uniq_id+=1
done
eval $1="$uniq_id"
}
function ceph_mount() {
get_mountpoint $@
setup_bridge_and_nat $@
get_new_netns $1
rc=`ip netns list | grep "$new_netns" | awk '{print $1}'`
if [ "0$rc" != "0" ]; then
echo "Oops: the netns "$new_netns" already exists!"
exit 1
fi
get_new_nsid new_nsid
# create a new network namespace
ip netns add $new_netns
ip netns set $new_netns $new_nsid
get_new_ns_ip ns_ip
if [ 0"$ns_ip" == "0" ]; then
echo "Oops: there is no ip address could be used any more!"
exit 1
fi
# veth interface in netns
ns_veth=veth0
netns_brx=brx.$new_nsid
# setup veth interfaces
ip link add $ns_veth netns $new_netns type veth peer name $netns_brx
ip netns exec $new_netns ip addr add $ns_ip/$NET_MASK brd $BRD dev $ns_veth
ip netns exec $new_netns ip link set $ns_veth up
ip netns exec $new_netns ip link set lo up
ip netns exec $new_netns ip route add default via $CEPH_BRX_IP
# bring up the bridge interface and join it to $CEPH_BRX
# brctl addif $CEPH_BRX $netns_brx
nmcli connection add type bridge-slave con-name $netns_brx ifname $netns_brx master $CEPH_BRX
nmcli connection up $netns_brx
# ip link set $netns_brx up
__ceph_mount $@
}
if [ "$1" == "--umount" ]; then
ceph_umount $@
exit 0
fi
# mount in the netns
if [ "$1" != "--kernel" -a "$1" != "--fuse" ]; then
echo "Oops: invalid mount options '$1'!"
exit 1
fi
ceph_mount $@
| 17,928 | 29.132773 | 105 | sh |
null | ceph-main/qa/cephfs/begin/0-install.yaml | tasks:
- install:
extra_packages:
rpm:
- python3-cephfs
- cephfs-top
- cephfs-mirror
deb:
- python3-cephfs
- cephfs-shell
- cephfs-top
- cephfs-mirror
# For kernel_untar_build workunit
extra_system_packages:
deb:
- bison
- flex
- libelf-dev
- libssl-dev
- network-manager
- iproute2
- util-linux
# for xfstests-dev
- dump
- indent
# for fsx
- libaio-dev
- libtool-bin
- uuid-dev
- xfslibs-dev
# for postgres
- postgresql
- postgresql-client
- postgresql-common
- postgresql-contrib
rpm:
- bison
- flex
- elfutils-libelf-devel
- openssl-devel
- NetworkManager
- iproute
- util-linux
# for xfstests-dev
- libacl-devel
- libaio-devel
- libattr-devel
- libtool
- libuuid-devel
- xfsdump
- xfsprogs
- xfsprogs-devel
# for fsx
- libaio-devel
- libtool
- libuuid-devel
- xfsprogs-devel
# for postgres
- postgresql
- postgresql-server
- postgresql-contrib
syslog:
ignorelist:
- WARNING*.*check_session_state
- WARNING*.*__ceph_remove_cap
| 1,408 | 20.348485 | 39 | yaml |
null | ceph-main/qa/cephfs/begin/1-ceph.yaml | log-rotate:
ceph-mds: 10G
ceph-osd: 10G
tasks:
- ceph:
| 61 | 9.333333 | 15 | yaml |
null | ceph-main/qa/cephfs/begin/2-logrotate.yaml | log-rotate:
ceph-mds: 10G
ceph-osd: 10G
| 44 | 10.25 | 15 | yaml |
null | ceph-main/qa/cephfs/clusters/1-mds-1-client-coloc.yaml | roles:
- [mon.a, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3, client.0]
- [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7]
openstack:
- volumes: # attached to each instance
count: 4
size: 20 # GB
- machine:
disk: 200 # GB
| 239 | 23 | 61 | yaml |
null | ceph-main/qa/cephfs/clusters/1-mds-1-client-micro.yaml | roles:
- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2, osd.3]
- [client.0]
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
| 167 | 20 | 65 | yaml |
null | ceph-main/qa/cephfs/clusters/1-mds-1-client.yaml | roles:
- [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3]
- [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7]
- [client.0]
openstack:
- volumes: # attached to each instance
count: 4
size: 20 # GB
- machine:
disk: 200 # GB
| 249 | 21.727273 | 58 | yaml |
null | ceph-main/qa/cephfs/clusters/1-mds-2-client-coloc.yaml | roles:
- [mon.a, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3, client.0]
- [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7, client.1]
openstack:
- volumes: # attached to each instance
count: 4
size: 20 # GB
- machine:
disk: 200 # GB
| 249 | 24 | 68 | yaml |
null | ceph-main/qa/cephfs/clusters/1-mds-2-client-micro.yaml | roles:
- [mon.a, mon.b, mon.c, mgr.x, mgr.y, mds.a, mds.b, mds.c, osd.0, osd.1, osd.2, osd.3]
- [client.0]
- [client.1]
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
| 201 | 21.444444 | 86 | yaml |
null | ceph-main/qa/cephfs/clusters/1-mds-2-client.yaml | roles:
- [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3]
- [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7]
- [client.0]
- [client.1]
openstack:
- volumes: # attached to each instance
count: 4
size: 30 # GB
- machine:
disk: 200 # GB
| 262 | 20.916667 | 58 | yaml |
null | ceph-main/qa/cephfs/clusters/1-mds-3-client.yaml | roles:
- [mon.a, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3]
- [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7]
- [client.0]
- [client.1]
- [client.2]
openstack:
- volumes: # attached to each instance
count: 4
size: 30 # GB
- machine:
disk: 200 # GB
| 268 | 19.692308 | 58 | yaml |
null | ceph-main/qa/cephfs/clusters/1-mds-4-client-coloc.yaml | roles:
- [mon.a, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3, client.0, client.1]
- [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7, client.2, client.3]
openstack:
- volumes: # attached to each instance
count: 4
size: 30 # GB
- machine:
disk: 200 # GB
| 269 | 26 | 78 | yaml |
null | ceph-main/qa/cephfs/clusters/1-mds-4-client.yaml | roles:
- [mon.a, mgr.y, mds.a, mds.b, osd.0, osd.1, osd.2, osd.3]
- [mon.b, mon.c, mgr.x, mds.c, osd.4, osd.5, osd.6, osd.7]
- [client.0]
- [client.1]
- [client.2]
- [client.3]
openstack:
- volumes: # attached to each instance
count: 4
size: 30 # GB
- machine:
disk: 200 # GB
| 288 | 19.642857 | 58 | yaml |
null | ceph-main/qa/cephfs/clusters/1-node-1-mds-1-osd.yaml | roles:
- [mon.a, mgr.x, mds.a, osd.0, client.0]
openstack:
- volumes: # attached to each instance
count: 1
size: 5 # GB
- machine:
disk: 10 # GB
| 157 | 16.555556 | 40 | yaml |
null | ceph-main/qa/cephfs/clusters/1a11s-mds-1c-client-3node.yaml | roles:
- [mon.a, mgr.x, mds.a, mds.d, mds.g, mds.j, osd.0, osd.3, osd.6, osd.9, client.0]
- [mon.b, mgr.y, mds.b, mds.e, mds.h, mds.k, osd.1, osd.4, osd.7, osd.10]
- [mon.c, mgr.z, mds.c, mds.f, mds.i, mds.l, osd.2, osd.5, osd.8, osd.11]
openstack:
- volumes: # attached to each instance
count: 4
size: 20 # GB
- machine:
disk: 200 # GB
| 349 | 30.818182 | 82 | yaml |
null | ceph-main/qa/cephfs/clusters/1a2s-mds-1c-client-3node.yaml | roles:
- [mon.a, mgr.x, mds.a, osd.0, osd.3, osd.6, osd.9, client.0]
- [mon.b, mgr.y, mds.b, osd.1, osd.4, osd.7, osd.10]
- [mon.c, mgr.z, mds.c, osd.2, osd.5, osd.8, osd.11]
openstack:
- volumes: # attached to each instance
count: 4
size: 20 # GB
- machine:
disk: 200 # GB
| 286 | 25.090909 | 61 | yaml |
null | ceph-main/qa/cephfs/clusters/1a3s-mds-1c-client.yaml | roles:
- [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3, client.0]
- [mon.b, mon.c, mgr.x, mds.b, mds.d, osd.4, osd.5, osd.6, osd.7]
openstack:
- volumes: # attached to each instance
count: 4
size: 20 # GB
- machine:
disk: 200 # GB
| 253 | 24.4 | 68 | yaml |
null | ceph-main/qa/cephfs/clusters/1a3s-mds-2c-client.yaml | roles:
- [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3, client.0]
- [mon.b, mon.c, mgr.x, mds.b, mds.d, osd.4, osd.5, osd.6, osd.7, client.1]
openstack:
- volumes: # attached to each instance
count: 4
size: 20 # GB
- machine:
disk: 200 # GB
| 263 | 25.4 | 75 | yaml |
null | ceph-main/qa/cephfs/clusters/1a3s-mds-4c-client.yaml | roles:
- [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3, client.0, client.2]
- [mon.b, mon.c, mgr.x, mds.b, mds.d, osd.4, osd.5, osd.6, osd.7, client.1, client.3]
openstack:
- volumes: # attached to each instance
count: 4
size: 20 # GB
- machine:
disk: 200 # GB
| 283 | 27.4 | 85 | yaml |
null | ceph-main/qa/cephfs/clusters/1a5s-mds-1c-client-3node.yaml | roles:
- [mon.a, mgr.x, mds.a, mds.d, osd.0, osd.3, osd.6, osd.9, client.0]
- [mon.b, mgr.y, mds.b, mds.e, osd.1, osd.4, osd.7, osd.10]
- [mon.c, mgr.z, mds.c, mds.f, osd.2, osd.5, osd.8, osd.11]
openstack:
- volumes: # attached to each instance
count: 4
size: 20 # GB
- machine:
disk: 200 # GB
| 307 | 27 | 68 | yaml |
null | ceph-main/qa/cephfs/clusters/1a5s-mds-1c-client.yaml | roles:
- [mon.a, mgr.y, mds.a, mds.c, mds.e, osd.0, osd.1, osd.2, osd.3, client.0]
- [mon.b, mon.c, mgr.x, mds.b, mds.d, mds.f, osd.4, osd.5, osd.6, osd.7]
openstack:
- volumes: # attached to each instance
count: 4
size: 20 # GB
- machine:
disk: 200 # GB
| 267 | 25.8 | 75 | yaml |
null | ceph-main/qa/cephfs/clusters/3-mds.yaml | roles:
- [mon.a, mon.c, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3]
- [mon.b, mgr.x, mds.b, mds.c, osd.4, osd.5, osd.6, osd.7]
- [client.0, client.1]
overrides:
ceph:
cephfs:
max_mds: 3
openstack:
- volumes: # attached to each instance
count: 4
size: 30 # GB
- machine:
disk: 200 # GB
| 307 | 19.533333 | 58 | yaml |
null | ceph-main/qa/cephfs/clusters/9-mds.yaml | roles:
- [mon.a, mon.c, mgr.y, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2, osd.3]
- [mon.b, mgr.x, mds.e, mds.f, mds.g, mds.h, mds.i, osd.4, osd.5, osd.6, osd.7]
- [client.0, client.1]
overrides:
ceph:
cephfs:
max_mds: 9
openstack:
- volumes: # attached to each instance
count: 4
size: 30 # GB
- machine:
disk: 200 # GB
| 349 | 22.333333 | 79 | yaml |
null | ceph-main/qa/cephfs/clusters/fixed-2-ucephfs.yaml | roles:
- [mon.a, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3, client.0]
- [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7]
openstack:
- volumes: # attached to each instance
count: 4
size: 30 # GB
- machine:
disk: 200 # GB
| 239 | 23 | 61 | yaml |
null | ceph-main/qa/cephfs/conf/client.yaml | overrides:
ceph:
conf:
client:
client mount timeout: 600
debug ms: 1
debug client: 20
rados mon op timeout: 900
rados osd op timeout: 900
| 190 | 18.1 | 33 | yaml |
null | ceph-main/qa/cephfs/conf/mds.yaml | overrides:
ceph:
conf:
mds:
debug mds: 20
debug mds balancer: 20
debug ms: 1
mds debug frag: true
mds debug scatterstat: true
mds op complaint time: 180
mds verify scatter: true
osd op complaint time: 180
rados mon op timeout: 900
rados osd op timeout: 900
| 349 | 22.333333 | 35 | yaml |
null | ceph-main/qa/cephfs/conf/mon.yaml | overrides:
ceph:
conf:
mon:
mon op complaint time: 120
| 75 | 11.666667 | 34 | yaml |
null | ceph-main/qa/cephfs/conf/osd.yaml | overrides:
ceph:
conf:
osd:
osd op complaint time: 180
| 75 | 11.666667 | 34 | yaml |
null | ceph-main/qa/cephfs/mount/fuse.yaml | teuthology:
postmerge:
- local function is_kupstream()
return false
end
- local function is_kdistro()
return false
end
- local function is_fuse()
return true
end
- local function syntax_version()
return ''
end
tasks:
- ceph-fuse:
| 282 | 15.647059 | 35 | yaml |
null | ceph-main/qa/cephfs/mount/kclient/mount.yaml | teuthology:
postmerge:
- local function is_kupstream()
return yaml.ktype == 'upstream'
end
- local function is_kdistro()
return yaml.ktype == 'distro'
end
- local function is_fuse()
return false
end
- local function syntax_version()
return yaml.overrides.kclient.syntax
end
tasks:
- kclient:
| 342 | 19.176471 | 42 | yaml |
null | ceph-main/qa/cephfs/mount/kclient/mount-syntax/v1.yaml | overrides:
kclient:
syntax: 'v1'
| 41 | 9.5 | 18 | yaml |
null | ceph-main/qa/cephfs/mount/kclient/mount-syntax/v2.yaml | overrides:
kclient:
syntax: 'v2'
| 41 | 9.5 | 18 | yaml |
null | ceph-main/qa/cephfs/mount/kclient/overrides/ms-die-on-skipped.yaml | overrides:
ceph:
conf:
global:
ms die on skipped message: false
| 84 | 13.166667 | 40 | yaml |
null | ceph-main/qa/cephfs/mount/kclient/overrides/distro/stock/k-stock.yaml | kernel:
client:
sha1: distro
ktype: distro
| 49 | 9 | 16 | yaml |
null | ceph-main/qa/cephfs/mount/kclient/overrides/distro/stock/rhel_8.yaml | .qa/distros/all/rhel_8.yaml | 27 | 27 | 27 | yaml |
null | ceph-main/qa/cephfs/mount/kclient/overrides/distro/testing/k-testing.yaml | kernel:
client:
branch: testing
ktype: upstream
| 54 | 10 | 19 | yaml |
null | ceph-main/qa/cephfs/objectstore-ec/bluestore-bitmap.yaml | ../../objectstore/bluestore-bitmap.yaml | 39 | 39 | 39 | yaml |
null | ceph-main/qa/cephfs/objectstore-ec/bluestore-comp-ec-root.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
cephfs:
ec_profile:
- m=2
- k=2
- crush-failure-domain=osd
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore compression mode: aggressive
bluestore fsck on mount: true
# lower the full ratios since we can fill up a 100gb osd so quickly
mon osd full ratio: .9
mon osd backfillfull_ratio: .85
mon osd nearfull ratio: .8
osd failsafe full ratio: .95
# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true
| 818 | 26.3 | 90 | yaml |
null | ceph-main/qa/cephfs/objectstore-ec/bluestore-comp.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore compression mode: aggressive
bluestore fsck on mount: true
# lower the full ratios since we can fill up a 100gb osd so quickly
mon osd full ratio: .9
mon osd backfillfull_ratio: .85
mon osd nearfull ratio: .8
osd failsafe full ratio: .95
# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true
| 724 | 29.208333 | 90 | yaml |
null | ceph-main/qa/cephfs/objectstore-ec/bluestore-ec-root.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
cephfs:
ec_profile:
- m=2
- k=2
- crush-failure-domain=osd
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore fsck on mount: true
# lower the full ratios since we can fill up a 100gb osd so quickly
mon osd full ratio: .9
mon osd backfillfull_ratio: .85
mon osd nearfull ratio: .8
osd failsafe full ratio: .95
# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true
ceph-deploy:
fs: xfs
bluestore: yes
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore fsck on mount: true
# lower the full ratios since we can fill up a 100gb osd so quickly
mon osd full ratio: .9
mon osd backfillfull_ratio: .85
mon osd nearfull ratio: .8
osd failsafe full ratio: .95
| 1,250 | 27.431818 | 90 | yaml |
null | ceph-main/qa/cephfs/overrides/frag.yaml | overrides:
ceph:
conf:
mds:
mds bal fragment size max: 10000
mds bal merge size: 5
mds bal split bits: 3
mds bal split size: 100
| 174 | 16.5 | 40 | yaml |
null | ceph-main/qa/cephfs/overrides/ignorelist_health.yaml | overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(FS_DEGRADED\)
- \(MDS_FAILED\)
- \(MDS_DEGRADED\)
- \(FS_WITH_FAILED_MDS\)
- \(MDS_DAMAGE\)
- \(MDS_ALL_DOWN\)
- \(MDS_UP_LESS_THAN_MAX\)
- \(FS_INLINE_DATA_DEPRECATED\)
| 285 | 21 | 37 | yaml |
null | ceph-main/qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml | overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(OSD_DOWN\)
- \(OSD_
- but it is still running
# MDS daemon 'b' is not responding, replacing it as rank 0 with standby 'a'
- is not responding
| 233 | 22.4 | 75 | yaml |
null | ceph-main/qa/cephfs/overrides/osd-asserts.yaml | overrides:
ceph:
conf:
osd:
osd shutdown pgref assert: true
| 80 | 12.5 | 39 | yaml |
null | ceph-main/qa/cephfs/overrides/session_timeout.yaml | overrides:
ceph:
cephfs:
session_timeout: 300
| 58 | 10.8 | 26 | yaml |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.