Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/qa/suites/rados/objectstore/backends/objectcacher-stress.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, client.0] openstack: - volumes: # attached to each instance count: 2 size: 10 # GB tasks: - install: - ceph: fs: xfs - workunit: clients: all: - osdc/stress_objectcacher.sh
241
15.133333
40
yaml
null
ceph-main/qa/suites/rados/objectstore/backends/objectstore-bluestore-a.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, client.0] openstack: - volumes: # attached to each instance count: 2 size: 10 # GB tasks: - install: - exec: client.0: - mkdir $TESTDIR/archive/ostest && cd $TESTDIR/archive/ostest && ulimit -Sn 16384 && CEPH_ARGS="--no-log-to-stderr --log-file $TESTDIR/archive/ceph_test_objectstore.log --debug-bluestore 20" ceph_test_objectstore --gtest_filter=*/1:-*SyntheticMatrixC* --gtest_catch_exceptions=0 - rm -rf $TESTDIR/archive/ostest
493
37
284
yaml
null
ceph-main/qa/suites/rados/objectstore/backends/objectstore-bluestore-b.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, client.0] openstack: - volumes: # attached to each instance count: 2 size: 10 # GB tasks: - install: - exec: client.0: - mkdir $TESTDIR/archive/ostest && cd $TESTDIR/archive/ostest && ulimit -Sn 16384 && CEPH_ARGS="--no-log-to-stderr --log-file $TESTDIR/archive/ceph_test_objectstore.log --debug-bluestore 20" ceph_test_objectstore --gtest_filter=*SyntheticMatrixC*/2 --gtest_catch_exceptions=0 - rm -rf $TESTDIR/archive/ostest
490
36.769231
281
yaml
null
ceph-main/qa/suites/rados/objectstore/backends/objectstore-memstore.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, client.0] openstack: - volumes: # attached to each instance count: 2 size: 10 # GB tasks: - install: - exec: client.0: - mkdir $TESTDIR/archive/ostest && cd $TESTDIR/archive/ostest && ulimit -Sn 16384 && CEPH_ARGS="--no-log-to-stderr --log-file $TESTDIR/archive/ceph_test_objectstore.log --debug-bluestore 20" ceph_test_objectstore --gtest_filter=*/0 --gtest_catch_exceptions=0 - rm -rf $TESTDIR/archive/ostest
473
35.461538
264
yaml
null
ceph-main/qa/suites/rados/perf/ceph.yaml
overrides: ceph: conf: global: osd client message cap: 5000 roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] tasks: - install: - ceph: fs: xfs wait-for-scrub: false log-ignorelist: - \(PG_ - \(OSD_ - \(OBJECT_ - overall HEALTH - ssh_keys:
301
14.894737
47
yaml
null
ceph-main/qa/suites/rados/perf/openstack.yaml
openstack: - volumes: # attached to each instance count: 3 size: 30 # GB
87
16.6
40
yaml
null
ceph-main/qa/suites/rados/perf/ubuntu_latest.yaml
.qa/distros/supported/ubuntu_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/rados/perf/objectstore/bluestore-basic-min-osd-mem-target.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore osd memory target: 2147483648 # min recommended is 2_G bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true bdev enable discard: true bdev async discard: true
807
30.076923
90
yaml
null
ceph-main/qa/suites/rados/perf/objectstore/bluestore-bitmap.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true bluestore allocator: bitmap # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true bdev enable discard: true bdev async discard: true ceph-deploy: fs: xfs bluestore: yes conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 bdev enable discard: true bdev async discard: true
1,327
29.181818
90
yaml
null
ceph-main/qa/suites/rados/perf/objectstore/bluestore-comp.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore compression mode: aggressive bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true
724
29.208333
90
yaml
null
ceph-main/qa/suites/rados/perf/objectstore/bluestore-low-osd-mem-target.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore osd memory target: 1610612736 # reduced to 1.5_G bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true bdev enable discard: true bdev async discard: true
801
29.846154
90
yaml
null
ceph-main/qa/suites/rados/perf/objectstore/bluestore-stupid.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true bluestore allocator: stupid # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true bdev enable discard: true bdev async discard: true ceph-deploy: fs: xfs bluestore: yes conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 bdev enable discard: true bdev async discard: true
1,327
29.181818
90
yaml
null
ceph-main/qa/suites/rados/perf/scheduler/dmclock_1Shard_16Threads.yaml
overrides: ceph: conf: osd: osd op num shards: 1 osd op num threads per shard: 16 osd op queue: mclock_scheduler
149
17.75
40
yaml
null
ceph-main/qa/suites/rados/perf/scheduler/dmclock_default_shards.yaml
overrides: ceph: conf: osd: osd op queue: mclock_scheduler
79
12.333333
38
yaml
null
ceph-main/qa/suites/rados/perf/scheduler/wpq_default_shards.yaml
overrides: ceph: conf: osd: osd op queue: wpq
66
10.166667
25
yaml
null
ceph-main/qa/suites/rados/perf/settings/optimized.yaml
overrides: ceph: conf: mon: debug mon: "0/0" debug ms: "0/0" debug paxos: "0/0" osd: debug filestore: "0/0" debug journal: "0/0" debug ms: "0/0" debug osd: "0/0" global: auth client required: none auth cluster required: none auth service required: none auth supported: none debug lockdep: "0/0" debug context: "0/0" debug crush: "0/0" debug mds: "0/0" debug mds balancer: "0/0" debug mds locker: "0/0" debug mds log: "0/0" debug mds log expire: "0/0" debug mds migrator: "0/0" debug buffer: "0/0" debug timer: "0/0" debug filer: "0/0" debug striper: "0/0" debug objecter: "0/0" debug rados: "0/0" debug rbd: "0/0" debug rbd mirror: "0/0" debug rbd replay: "0/0" debug journaler: "0/0" debug objectcacher: "0/0" debug client: "0/0" debug osd: "0/0" debug optracker: "0/0" debug objclass: "0/0" debug filestore: "0/0" debug journal: "0/0" debug ms: "0/0" debug mon: "0/0" debug monc: "0/0" debug paxos: "0/0" debug tp: "0/0" debug auth: "0/0" debug crypto: "0/0" debug finisher: "0/0" debug heartbeatmap: "0/0" debug perfcounter: "0/0" debug rgw: "0/0" debug rgw sync: "0/0" debug civetweb: "0/0" debug javaclient: "0/0" debug asok: "0/0" debug throttle: "0/0" debug refs: "0/0" debug compressor: "0/0" debug bluestore: "0/0" debug bluefs: "0/0" debug bdev: "0/0" debug kstore: "0/0" debug rocksdb: "0/0" debug memdb: "0/0" debug fuse: "0/0" debug mgr: "0/0" debug mgrc: "0/0" debug dpdk: "0/0" debug eventtrace: "0/0"
1,970
25.635135
35
yaml
null
ceph-main/qa/suites/rados/perf/workloads/fio_4K_rand_read.yaml
tasks: - cbt: benchmarks: librbdfio: op_size: [4096] time: 60 mode: ['randread'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 128 pgp_size: 128 replication: 3
508
19.36
31
yaml
null
ceph-main/qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml
tasks: - cbt: benchmarks: librbdfio: op_size: [4096] time: 60 mode: ['randrw'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 128 pgp_size: 128 replication: 3
506
19.28
31
yaml
null
ceph-main/qa/suites/rados/perf/workloads/fio_4M_rand_read.yaml
tasks: - cbt: benchmarks: librbdfio: op_size: [4194304] time: 60 mode: ['randread'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 128 pgp_size: 128 replication: 3
511
19.48
31
yaml
null
ceph-main/qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml
tasks: - cbt: benchmarks: librbdfio: op_size: [4194304] time: 60 mode: ['randrw'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 128 pgp_size: 128 replication: 3
509
19.4
31
yaml
null
ceph-main/qa/suites/rados/perf/workloads/fio_4M_rand_write.yaml
tasks: - cbt: benchmarks: librbdfio: op_size: [4194304] time: 60 mode: ['randwrite'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 128 pgp_size: 128 replication: 3
512
19.52
31
yaml
null
ceph-main/qa/suites/rados/perf/workloads/radosbench_4K_rand_read.yaml
tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4096] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 60 write_only: false readmode: 'rand' cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
527
20.12
35
yaml
null
ceph-main/qa/suites/rados/perf/workloads/radosbench_4K_seq_read.yaml
tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4096] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 60 write_only: false cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
502
19.958333
35
yaml
null
ceph-main/qa/suites/rados/perf/workloads/radosbench_4M_rand_read.yaml
tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4194304] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 60 write_only: false readmode: 'rand' cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
530
20.24
35
yaml
null
ceph-main/qa/suites/rados/perf/workloads/radosbench_4M_seq_read.yaml
tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4194304] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 60 write_only: false cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
505
20.083333
35
yaml
null
ceph-main/qa/suites/rados/perf/workloads/radosbench_4M_write.yaml
tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4194304] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 60 write_only: true cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
504
20.041667
35
yaml
null
ceph-main/qa/suites/rados/perf/workloads/radosbench_omap_write.yaml
tasks: - radosbench: clients: [client.0] write-omap: True objectsize: 4096 size: 4096 time: 300
116
13.625
23
yaml
null
ceph-main/qa/suites/rados/perf/workloads/sample_fio.yaml
tasks: - cbt: benchmarks: librbdfio: op_size: [4096] time: 60 mode: ['randwrite'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 128 pgp_size: 128 replication: 3
509
19.4
31
yaml
null
ceph-main/qa/suites/rados/perf/workloads/sample_radosbench.yaml
tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4096] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 60 write_only: true cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
501
19.916667
35
yaml
null
ceph-main/qa/suites/rados/rest/mgr-restful.yaml
openstack: - volumes: # attached to each instance count: 3 size: 10 # GB roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, mds.a, client.a] tasks: - install: - ceph: log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ - \(OSD_ - \(OBJECT_ - \(OSDMAP_FLAGS\) - exec: mon.a: - ceph restful create-key admin - ceph restful create-self-signed-cert - ceph restful restart - workunit: clients: client.a: - rest/test-restful.sh - exec: mon.a: - ceph restful delete-key admin - ceph restful list-keys | jq ".admin" | grep null
624
19.16129
56
yaml
null
ceph-main/qa/suites/rados/singleton-bluestore/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/rados/singleton-bluestore/all/cephtool.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: log-ignorelist: - but it is still running - had wrong client addr - had wrong cluster addr - must scrub before tier agent can activate - failsafe engaged, dropping updates - failsafe disengaged, no longer dropping updates - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(SMALLER_PG_NUM\) - \(SMALLER_PGP_NUM\) - \(CACHE_POOL_NO_HIT_SET\) - \(CACHE_POOL_NEAR_FULL\) - \(FS_WITH_FAILED_MDS\) - \(FS_DEGRADED\) - \(POOL_BACKFILLFULL\) - \(POOL_FULL\) - \(SMALLER_PGP_NUM\) - \(POOL_NEARFULL\) - \(POOL_APP_NOT_ENABLED\) - \(AUTH_BAD_CAPS\) - \(FS_INLINE_DATA_DEPRECATED\) - \(MON_DOWN\) - \(SLOW_OPS\) - slow request - workunit: clients: all: - cephtool - mon/pool_ops.sh
1,015
19.734694
53
yaml
null
ceph-main/qa/suites/rados/singleton-bluestore/objectstore/bluestore-bitmap.yaml
.qa/objectstore_debug/bluestore-bitmap.yaml
43
43
43
yaml
null
ceph-main/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-lz4.yaml
.qa/objectstore_debug/bluestore-comp-lz4.yaml
45
45
45
yaml
null
ceph-main/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml
.qa/objectstore_debug/bluestore-comp-snappy.yaml
48
48
48
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
openstack: - volumes: # attached to each instance count: 2 size: 10 # GB roles: - [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] overrides: ceph: log-ignorelist: - MDS in read-only mode - force file system read-only - overall HEALTH_ - \(FS_DEGRADED\) - \(OSDMAP_FLAGS\) - \(OSD_FULL\) - \(MDS_READ_ONLY\) - \(POOL_FULL\) tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force - rgw: - client.0 - exec: client.0: - ceph_test_admin_socket_output --all
579
19.714286
55
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/balancer.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force fs: xfs log-ignorelist: - \(PG_AVAILABILITY\) - cram: clients: client.0: - src/test/cli-integration/balancer/misplaced.t
310
19.733333
55
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
openstack: - volumes: # attached to each instance count: 3 size: 10 # GB roles: - [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1] tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) conf: global: osd max object name len: 460 osd max object namespace len: 64 debug client: 20 debug mds: 20 debug ms: 1 - exec: client.0: - ceph osd pool create data_cache 4 - ceph osd tier add cephfs_data data_cache - ceph osd tier cache-mode data_cache writeback - ceph osd tier set-overlay cephfs_data data_cache - ceph osd pool set data_cache hit_set_type bloom - ceph osd pool set data_cache hit_set_count 8 - ceph osd pool set data_cache hit_set_period 3600 - ceph osd pool set data_cache min_read_recency_for_promote 0 - ceph-fuse: - exec: client.0: - sudo chmod 777 $TESTDIR/mnt.0/ - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5 - ls -al $TESTDIR/mnt.0/foo - truncate --size 0 $TESTDIR/mnt.0/foo - ls -al $TESTDIR/mnt.0/foo - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5 - ls -al $TESTDIR/mnt.0/foo - cp $TESTDIR/mnt.0/foo /tmp/foo - sync - rados -p data_cache ls - - sleep 10 - rados -p data_cache ls - - rados -p data_cache cache-flush-evict-all - rados -p data_cache ls - - sleep 1 - exec: client.1: - hexdump -C /tmp/foo | head - hexdump -C $TESTDIR/mnt.1/foo | head - cmp $TESTDIR/mnt.1/foo /tmp/foo
1,675
29.472727
65
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml
openstack: - volumes: # attached to each instance count: 3 size: 10 # GB roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] overrides: ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) tasks: - install: - ceph: - workunit: clients: all: - cephtool/test_kvstore_tool.sh
446
17.625
55
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
openstack: - volumes: # attached to each instance count: 3 size: 10 # GB roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] tasks: - install: - workunit: clients: all: - post-file.sh
219
15.923077
47
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/crushdiff.yaml
openstack: - volumes: # attached to each instance count: 4 size: 10 # GB roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, client.0] overrides: ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) - \(PG_DEGRADED\) tasks: - install: - ceph: - workunit: clients: all: - rados/test_crushdiff.sh
469
17.8
55
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
openstack: - volumes: # attached to each instance count: 3 size: 10 # GB roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) conf: global: osd max object name len: 460 osd max object namespace len: 64 - exec: client.0: - ceph osd pool create base-pool 4 - ceph osd pool application enable base-pool rados - ceph osd pool create cache-pool 4 - ceph osd tier add base-pool cache-pool - ceph osd tier cache-mode cache-pool writeback - ceph osd tier set-overlay base-pool cache-pool - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1 - rbd import --image-format 2 $TESTDIR/foo base-pool/bar - rbd snap create base-pool/bar@snap - rados -p base-pool cache-flush-evict-all - rbd export base-pool/bar $TESTDIR/bar - rbd export base-pool/bar@snap $TESTDIR/snap - cmp $TESTDIR/foo $TESTDIR/bar - cmp $TESTDIR/foo $TESTDIR/snap - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap
1,167
27.487805
60
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
# verify #13098 fix openstack: - volumes: # attached to each instance count: 3 size: 10 # GB roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] overrides: ceph: log-ignorelist: - is full - overall HEALTH_ - \(POOL_FULL\) - \(POOL_NEAR_FULL\) - \(CACHE_POOL_NO_HIT_SET\) - \(CACHE_POOL_NEAR_FULL\) tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force conf: global: osd max object name len: 460 osd max object namespace len: 64 - exec: client.0: - ceph osd pool create ec-ca 1 1 - ceph osd pool create ec 1 1 erasure default - ceph osd pool application enable ec rados - ceph osd tier add ec ec-ca - ceph osd tier cache-mode ec-ca readproxy - ceph osd tier set-overlay ec ec-ca - ceph osd pool set ec-ca hit_set_type bloom - ceph osd pool set-quota ec-ca max_bytes 20480000 - ceph osd pool set-quota ec max_bytes 20480000 - ceph osd pool set ec-ca target_max_bytes 20480000 - timeout 30 rados -p ec-ca bench 30 write || true - ceph osd pool set-quota ec-ca max_bytes 0 - ceph osd pool set-quota ec max_bytes 0
1,227
28.95122
57
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, client.0] tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force conf: osd: # we may land on ext4 osd max object name len: 400 osd max object namespace len: 64 log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - workunit: clients: all: - rados/test_health_warnings.sh
535
22.304348
96
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml
openstack: - volumes: # attached to each instance count: 2 size: 10 # GB roles: - [mon.a, mgr.x, osd.0, osd.1, client.0] overrides: ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - \(OSDMAP_FLAGS\) - \(OSD_FULL\) - \(MDS_READ_ONLY\) - \(POOL_APP_NOT_ENABLED\) - large omap objects - Large omap object found - application not enabled conf: osd: osd scrub backoff ratio: 0 osd deep scrub large omap object value sum threshold: 8800000 osd deep scrub large omap object key threshold: 20000 tasks: - install: - ceph: - workunit: clients: all: - rados/test_large_omap_detection.py
743
23
69
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml
openstack: - volumes: # attached to each instance count: 2 size: 10 # GB roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] overrides: ceph: log-ignorelist: - \(POOL_APP_NOT_ENABLED\) tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force - exec: client.0: - ceph_test_lazy_omap_stats
374
18.736842
55
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml
roles: - [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] overrides: ceph: log-ignorelist: - \(POOL_APP_NOT_ENABLED\) tasks: - install: extra_packages: deb: - libradosstriper-dev - librados-dev - libradospp-dev rpm: - libradosstriper-devel - librados-devel - libradospp-devel - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force - workunit: clients: all: - rados/test_librados_build.sh
510
19.44
55
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, client.0] tasks: - install: - exec: client.0: - ceph_test_async_driver - ceph_test_msgr openstack: - machine: disk: 40 # GB ram: 15000 # MB cpus: 1 volumes: # attached to each instance count: 0 size: 1 # GB overrides: ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force conf: client: debug ms: 20
447
17.666667
55
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
openstack: - volumes: # attached to each instance count: 3 size: 10 # GB roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 - - osd.3 - osd.4 - osd.5 tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(PG_ - \(OSD_ - \(OBJECT_ conf: osd: osd debug reject backfill probability: .3 osd min pg log entries: 25 osd max pg log entries: 100 osd max object name len: 460 osd max object namespace len: 64 - exec: client.0: - sudo ceph osd pool create foo 64 - sudo ceph osd pool application enable foo rados - rados -p foo bench 60 write -b 1024 --no-cleanup - sudo ceph osd pool set foo size 3 - sudo ceph osd out 0 1 - sleep: duration: 60 - exec: client.0: - sudo ceph osd in 0 1 - sleep: duration: 60 - exec: client.0: - sudo ceph osd pool set foo size 2 - sleep: duration: 300
1,044
19.490196
56
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml
openstack: - volumes: # attached to each instance count: 3 size: 10 # GB roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] overrides: ceph: log-ignorelist: - \(OSD_DOWN\) - \(POOL_APP_NOT_ENABLED\) - \(SLOW_OPS\) - \(PG_AVAILABILITY\) - \(PG_DEGRADED\) - application not enabled - slow request conf: osd: osd scrub backoff ratio: 0 osd deep scrub large omap object value sum threshold: 8800000 osd deep scrub large omap object key threshold: 20000 tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force - exec: client.0: - ceph_test_osd_stale_read
708
22.633333
69
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
openstack: - volumes: # attached to each instance count: 2 size: 10 # GB roles: - [mon.a, mgr.x, osd.0, osd.1, client.0] tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force - workunit: clients: all: - rados/test_pool_access.sh
311
18.5
55
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 openstack: - volumes: # attached to each instance count: 2 size: 20 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force fs: xfs conf: osd: osd recovery sleep: .1 osd objectstore: bluestore log-ignorelist: - \(POOL_APP_NOT_ENABLED\) - \(OSDMAP_FLAGS\) - \(OSD_ - \(OBJECT_ - \(PG_ - overall HEALTH - exec: osd.0: - ceph osd pool create foo 32 - ceph osd pool application enable foo foo - rados -p foo bench 30 write -b 4096 --no-cleanup - ceph osd set noup - ceph.restart: daemons: [osd.0] wait-for-up: false wait-for-healthy: false - exec: osd.0: - sleep 5 - rados -p foo bench 3 write -b 4096 --no-cleanup - ceph osd unset noup - sleep 10 - ceph osd set noup - ceph.restart: daemons: [osd.1] wait-for-up: false wait-for-healthy: false - exec: osd.0: - ceph osd out 0 - sleep 10 - ceph osd unset noup - ceph.healthy: wait-for-healthy: false # only wait for osds up and pgs clean, ignore misplaced - exec: osd.0: - ceph osd in 0 - ceph.healthy:
1,276
19.934426
85
yaml
null
ceph-main/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml
roles: - [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] overrides: ceph: log-ignorelist: - \(POOL_APP_NOT_ENABLED\) tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force - workunit: clients: all: - rados/version_number_sanity.sh
308
18.3125
55
yaml
null
ceph-main/qa/suites/rados/singleton/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/rados/singleton/all/admin-socket.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - client.a openstack: - volumes: # attached to each instance count: 2 size: 10 # GB tasks: - install: - ceph: - admin_socket: osd.0: version: git_version: help: config show: config help: config set bluestore_csum_type xxhash64: perf dump: perf schema: get_heap_property tcmalloc.max_total_thread_cache_byte || dump_metrics memory: set_heap_property tcmalloc.max_total_thread_cache_bytes 67108864 || dump_metrics memory: set_heap_property tcmalloc.max_total_thread_cache_bytes 33554432 || dump_metrics memory:
642
22.814815
94
yaml
null
ceph-main/qa/suites/rados/singleton/all/backfill-toofull.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 - osd.3 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB tasks: - install: - ceph: create_rbd_pool: false pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force log-ignorelist: - Error - overall HEALTH_ - \(OBJECT_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(POOL_BACKFILLFULL\) - \(POOL_NEARFULL\) - \(SLOW_OPS\) - \(TOO_FEW_PGS\) - Monitor daemon marked osd\.[[:digit:]]+ down, but it is still running - slow request conf: osd: osd min pg log entries: 5 osd max pg log entries: 5 - backfill_toofull:
770
19.289474
81
yaml
null
ceph-main/qa/suites/rados/singleton/all/deduptool.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 2 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - had wrong client addr - had wrong cluster addr - reached quota - overall HEALTH_ - \(POOL_FULL\) - \(POOL_APP_NOT_ENABLED\) - workunit: clients: all: - rados/test_dedup_tool.sh
532
17.37931
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/divergent_priors.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB overrides: ceph: log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(OBJECT_ - \(POOL_APP_NOT_ENABLED\) tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force - divergent_priors:
451
14.586207
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/divergent_priors2.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB overrides: ceph: log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(OBJECT_ - \(POOL_APP_NOT_ENABLED\) tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force - divergent_priors2:
452
14.62069
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/dump-stuck.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 openstack: - volumes: # attached to each instance count: 2 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - dump_stuck:
382
16.409091
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 - osd.3 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB tasks: - install: - ceph: create_rbd_pool: false pre-mgr-commands: - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force log-ignorelist: - \(OBJECT_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(SLOW_OPS\) - deep-scrub - missing - overall HEALTH_ - repair - slow request - unfound conf: osd: osd min pg log entries: 5 osd max pg log entries: 5 - ec_inconsistent_hinfo:
669
17.108108
81
yaml
null
ceph-main/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 - osd.3 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB tasks: - install: - ceph: create_rbd_pool: false pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(OBJECT_ - \(SLOW_OPS\) - slow request - ec_lost_unfound:
526
16.566667
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - workunit: clients: all: - erasure-code/encode-decode-non-regression.sh
266
13.833333
54
yaml
null
ceph-main/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(OBJECT_ - \(SLOW_OPS\) - slow request - rep_lost_unfound_delete:
497
16.785714
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/lost-unfound.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(OBJECT_ - \(SLOW_OPS\) - slow request - lost_unfound:
486
16.392857
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 openstack: - volumes: # attached to each instance count: 2 size: 10 # GB overrides: ceph: create_rbd_pool: False pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force conf: mon: osd pool default size: 2 osd: mon max pg per osd : 2 osd max pg per osd hard ratio : 1 log-ignorelist: - \(TOO_FEW_PGS\) - \(PENDING_CREATING_PGS\) tasks: - install: - ceph: - osd_max_pg_per_osd: test_create_from_mon: True pg_num: 2
566
17.9
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - osd.3 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB overrides: ceph: create_rbd_pool: False pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force conf: mon: osd pool default size: 2 osd: mon max pg per osd : 1 osd max pg per osd hard ratio : 1 log-ignorelist: - \(TOO_FEW_PGS\) - \(PG_ - \(PENDING_CREATING_PGS\) tasks: - install: - ceph: - osd_max_pg_per_osd: test_create_from_mon: False pg_num: 1 pool_size: 2 from_primary: True
641
17.342857
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - osd.3 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB overrides: ceph: create_rbd_pool: False pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force conf: mon: osd pool default size: 2 osd: mon max pg per osd : 1 osd max pg per osd hard ratio : 1 log-ignorelist: - \(TOO_FEW_PGS\) - \(PG_ - \(PENDING_CREATING_PGS\) tasks: - install: - ceph: - osd_max_pg_per_osd: test_create_from_mon: False pg_num: 1 pool_size: 2 from_primary: False
642
17.371429
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/mon-auth-caps.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(AUTH_BAD_CAPS\) - workunit: clients: all: - mon/auth_caps.sh - mon/auth_key_rotation.sh
338
15.142857
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/mon-config-key-caps.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(AUTH_BAD_CAPS\) - workunit: clients: all: - mon/test_config_key_caps.sh
314
14.75
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/mon-config-keys.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force - workunit: clients: all: - mon/test_mon_config_key.py
354
14.434783
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/mon-config.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force - workunit: clients: all: - mon/config.sh
341
13.869565
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/osd-backfill.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(OBJECT_ conf: osd: osd min pg log entries: 5 - osd_backfill:
487
15.827586
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 - osd.3 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(OBJECT_ conf: osd: osd min pg log entries: 5 osd_fast_fail_on_connection_refused: false - osd_recovery.test_incomplete_pgs:
568
17.354839
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/osd-recovery.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(OBJECT_DEGRADED\) - \(SLOW_OPS\) - slow request conf: osd: osd min pg log entries: 5 osd pg log trim min: 0 osd_fast_fail_on_connection_refused: false - osd_recovery:
621
17.848485
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/peer.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force config: global: osd pool default min size : 1 log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - peer:
482
16.25
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - - mon.b - mon.c - osd.4 - osd.5 - osd.6 - osd.7 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB tasks: - install: - ceph: create_rbd_pool: false pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(POOL_ - \(CACHE_POOL_ - \(OBJECT_ - \(SLOW_OPS\) - \(REQUEST_SLOW\) - \(TOO_FEW_PGS\) - slow request - exec: client.0: - ceph progress off - workunit: clients: all: - mon/pg_autoscaler.sh
717
14.955556
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/pg-autoscaler.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - osd.3 - osd.4 - osd.5 - client.0 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB tasks: - install: - ceph: create_rbd_pool: false pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(POOL_ - \(CACHE_POOL_ - \(OBJECT_ - \(SLOW_OPS\) - \(REQUEST_SLOW\) - \(TOO_FEW_PGS\) - slow request - workunit: clients: all: - mon/pg_autoscaler.sh
630
16.054054
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - slow request - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - exec: client.0: - sudo ceph osd pool create foo 128 128 - sudo ceph osd pool application enable foo rados - sleep 5 - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it - ceph.wait_for_failure: [osd.0] - exec: client.0: - sudo ceph osd down 0 - ceph.restart: [osd.0] - ceph.healthy:
826
21.351351
77
yaml
null
ceph-main/qa/suites/rados/singleton/all/radostool.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 2 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - had wrong client addr - had wrong cluster addr - reached quota - overall HEALTH_ - \(POOL_FULL\) - \(POOL_APP_NOT_ENABLED\) - workunit: clients: all: - rados/test_rados_tool.sh
532
17.37931
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/random-eio.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - - osd.3 - osd.4 - osd.5 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - missing primary copy of - objects unfound and apparently lost - had a read error - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) - \(PG_DEGRADED\) - \(OSD_TOO_MANY_REPAIRS\) - full_sequential: - exec: client.0: - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.33 - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.33 - sudo ceph osd pool create test 16 16 - sudo ceph osd pool set test size 3 - sudo ceph pg dump pgs --format=json-pretty - radosbench: clients: [client.0] time: 360 type: rand objectsize: 1048576 pool: test create_pool: false - exec: client.0: - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.0 - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.0
1,206
24.680851
83
yaml
null
ceph-main/qa/suites/rados/singleton/all/rebuild-mondb.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - no reply from - overall HEALTH_ - \(MON_DOWN\) - \(MGR_DOWN\) - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ conf: mon: debug auth: 30 - full_sequential: - radosbench: clients: [client.0] time: 30 - rebuild_mondb: - radosbench: clients: [client.0] time: 30
639
15.842105
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/recovery-preemption.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 - osd.3 openstack: - volumes: # attached to each instance count: 3 size: 20 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force conf: osd: osd recovery sleep: .1 osd min pg log entries: 10 osd max pg log entries: 1000 osd_target_pg_log_entries_per_osd: 0 osd pg log trim min: 10 log-ignorelist: - \(POOL_APP_NOT_ENABLED\) - \(OSDMAP_FLAGS\) - \(OSD_ - \(OBJECT_ - \(PG_ - \(SLOW_OPS\) - overall HEALTH - slow request - exec: osd.0: - ceph osd pool create foo 128 - ceph osd pool application enable foo foo - sleep 5 - ceph.healthy: - exec: osd.0: - rados -p foo bench 30 write -b 4096 --no-cleanup - ceph osd out 0 - sleep 5 - ceph osd set noup - ceph.restart: daemons: [osd.1] wait-for-up: false wait-for-healthy: false - exec: osd.0: - rados -p foo bench 3 write -b 4096 --no-cleanup - ceph osd unset noup - sleep 10 - for f in 0 1 2 3 ; do sudo ceph daemon osd.$f config set osd_recovery_sleep 0 ; sudo ceph daemon osd.$f config set osd_recovery_max_active 20 ; done - ceph.healthy: - exec: osd.0: - egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log
1,424
22.360656
156
yaml
null
ceph-main/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
roles: - [mon.a, mgr.x] - [osd.0, osd.1, osd.2, client.0] tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force fs: xfs log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(OBJECT_DEGRADED\) - \(POOL_APP_NOT_ENABLED\) - resolve_stuck_peering:
360
17.05
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/test-crash.yaml
roles: - [client.0, mon.a, mgr.x, osd.0, osd.1, osd.2] tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - Reduced data availability - OSD_.*DOWN - \(RECENT_CRASH\) - workunit: clients: client.0: - rados/test_crash.sh - ceph.restart: [osd.*] - exec: mon.a: - find $TESTDIR/archive/coredump -type f -exec rm -f {} \;
473
21.571429
66
yaml
null
ceph-main/qa/suites/rados/singleton/all/test-noautoscale-flag.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - osd.3 - client.0 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB overrides: ceph: create_rbd_pool: false pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force conf: mon: osd pool default pg autoscale mode: on log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(POOL_ - \(CACHE_POOL_ - \(OBJECT_ - \(SLOW_OPS\) - \(REQUEST_SLOW\) - \(TOO_FEW_PGS\) - slow request tasks: - install: - ceph: - workunit: clients: all: - mon/test_noautoscale_flag.sh
705
16.65
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/thrash-backfill-full.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - - osd.3 - osd.4 - osd.5 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB overrides: ceph: conf: mon: osd pool default size: 3 osd min pg log entries: 5 osd max pg log entries: 10 tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - missing primary copy of - objects unfound and apparently lost - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(SLOW_OPS\) - \(PG_ - \(OBJECT_MISPLACED\) - \(OSD_ - \(OBJECT_ - \(TOO_FEW_PGS\) - \(POOL_BACKFILLFULL\) - slow request - thrashosds: op_delay: 30 clean_interval: 120 chance_down: .75 min_live: 5 min_in: 5 chance_test_backfill_full: .5 - radosbench: clients: [client.0] time: 1800 type: rand objectsize: 1048576
988
17.660377
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/thrash-eio.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - - osd.3 - osd.4 - osd.5 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB overrides: ceph: conf: mon: osd pool default size: 3 tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - missing primary copy of - objects unfound and apparently lost - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(SLOW_OPS\) - \(PG_ - \(OBJECT_MISPLACED\) - \(OSD_ - \(OBJECT_ - \(TOO_FEW_PGS\) - slow request - thrashosds: op_delay: 30 clean_interval: 120 chance_down: .5 random_eio: .33 min_live: 5 min_in: 5 - radosbench: clients: [client.0] time: 720 type: rand objectsize: 1048576
875
16.52
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - - osd.3 - osd.4 - osd.5 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 30 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - but it is still running - slow request - overall HEALTH_ - \(CACHE_POOL_ - exec: client.0: - sudo ceph osd pool create base 4 - sudo ceph osd pool application enable base rados - sudo ceph osd pool create cache 4 - sudo ceph osd tier add base cache - sudo ceph osd tier cache-mode cache writeback - sudo ceph osd tier set-overlay base cache - sudo ceph osd pool set cache hit_set_type bloom - sudo ceph osd pool set cache hit_set_count 8 - sudo ceph osd pool set cache hit_set_period 60 - sudo ceph osd pool set cache target_max_objects 500 - background_exec: mon.a: - while true - do sleep 30 - sudo ceph osd pool set cache cache_target_full_ratio .001 - echo cache-try-flush-evict-all - rados -p cache cache-try-flush-evict-all - sleep 5 - echo cache-flush-evict-all - rados -p cache cache-flush-evict-all - sleep 5 - echo remove overlay - sudo ceph osd tier remove-overlay base - sleep 20 # Disabled due to https://tracker.ceph.com/issues/46323 #- echo add writeback overlay #- sudo ceph osd tier cache-mode cache writeback #- sudo ceph osd pool set cache cache_target_full_ratio .8 #- sudo ceph osd tier set-overlay base cache #- sleep 30 #- sudo ceph osd tier cache-mode cache readproxy - done - rados: clients: [client.0] pools: [base] max_seconds: 600 ops: 400000 objects: 10000 size: 1024 op_weights: read: 100 write: 100 delete: 50 copy_from: 50
1,932
26.225352
65
yaml
null
ceph-main/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force config: global: osd pool default min size : 1 client: debug ms: 1 debug objecter: 20 debug rados: 20 log-ignorelist: - objects unfound and apparently lost - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(OBJECT_DEGRADED\) - watch_notify_same_primary: clients: [client.0]
653
17.685714
55
yaml
null
ceph-main/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - - osd.3 - osd.4 - osd.5 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - ceph: log-ignorelist: - but it is still running - thrashosds: op_delay: 30 clean_interval: 120 chance_down: .5 - workunit: clients: all: - rados/load-gen-mix-small.sh
412
13.75
40
yaml
null
ceph-main/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
.qa/tasks/thrashosds-health.yaml
32
32
32
yaml
null
ceph-main/qa/suites/rados/singleton/msgr-failures/few.yaml
overrides: ceph: conf: global: ms inject socket failures: 5000 mon client directed command retry: 5 log-ignorelist: - \(OSD_SLOW_PING_TIME - \(MON_DOWN\)
198
18.9
44
yaml
null
ceph-main/qa/suites/rados/singleton/msgr-failures/many.yaml
overrides: ceph: conf: global: ms inject socket failures: 1000 mon mgr beacon grace: 90 mon client hunt interval max multiple: 2 mon client directed command retry: 5 mgr: debug monc: 10 log-ignorelist: - \(OSD_SLOW_PING_TIME - \(MON_DOWN\)
314
21.5
48
yaml
null
ceph-main/qa/suites/rados/singleton/msgr-failures/none.yaml
0
0
0
yaml
null
ceph-main/qa/suites/rados/standalone/workloads/c2c.yaml
arch: x86_64 roles: - - mon.a - mgr.x - osd.0 - client.0 tasks: - install: extra_system_packages: rpm: - perf deb: - linux-tools-generic - workunit: basedir: qa/standalone clients: all: - c2c
252
12.315789
29
yaml
null
ceph-main/qa/suites/rados/standalone/workloads/crush.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - workunit: basedir: qa/standalone clients: all: - crush
254
12.421053
40
yaml
null
ceph-main/qa/suites/rados/standalone/workloads/erasure-code.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - workunit: basedir: qa/standalone clients: all: - erasure-code
261
12.789474
40
yaml
null
ceph-main/qa/suites/rados/standalone/workloads/mgr.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - workunit: basedir: qa/standalone clients: all: - mgr
252
12.315789
40
yaml