Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/qa/suites/orch/rook/smoke/2-workload/none.yaml
0
0
0
yaml
null
ceph-main/qa/suites/orch/rook/smoke/2-workload/radosbench.yaml
tasks: - install: host.a: - radosbench: clients: [client.a]
68
10.5
23
yaml
null
ceph-main/qa/suites/orch/rook/smoke/cluster/1-node.yaml
overrides: ceph: conf: global: osd crush chooseleaf type: 0 roles: - - host.a - client.a
112
10.3
36
yaml
null
ceph-main/qa/suites/orch/rook/smoke/cluster/3-node.yaml
roles: - - host.a - client.a - - host.b - client.b - - host.c - client.c
79
9
12
yaml
null
ceph-main/qa/suites/orch/rook/smoke/k8s/1.21.yaml
overrides: kubeadm: version: "1.21"
42
9.75
19
yaml
null
ceph-main/qa/suites/orch/rook/smoke/net/calico.yaml
overrides: kubeadm: pod_network: calico
46
10.75
23
yaml
null
ceph-main/qa/suites/orch/rook/smoke/net/flannel.yaml
overrides: kubeadm: pod_network: flannel
47
11
24
yaml
null
ceph-main/qa/suites/orch/rook/smoke/net/host.yaml
overrides: ceph: spec: mon: allowMultiplePerNode: false network: provider: host
114
13.375
35
yaml
null
ceph-main/qa/suites/orch/rook/smoke/rook/1.7.2.yaml
overrides: rook: rook_image: rook/ceph:v1.7.2 rook_branch: v1.7.2
76
14.4
32
yaml
null
ceph-main/qa/suites/orch/rook/smoke/rook/master.yaml
overrides: rook: rook_image: rook/ceph:master
52
12.25
32
yaml
null
ceph-main/qa/suites/perf-basic/ceph.yaml
meta: - desc: | perf-basic is a basic performance suite. Must be run on bare-metal machines. On VMs performance results will be inconsistent and can't be compared across runs. Run ceph on a single node. Use xfs beneath the osds. Setup rgw on client.0 roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] tasks: - install: - ceph: fs: xfs wait-for-scrub: false log-ignorelist: - \(PG_ - \(OSD_ - \(OBJECT_ - overall HEALTH - ssh_keys:
494
19.625
50
yaml
null
ceph-main/qa/suites/perf-basic/ubuntu_latest.yaml
.qa/distros/supported/ubuntu_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/perf-basic/objectstore/bluestore.yaml
overrides: ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 ceph-deploy: fs: xfs bluestore: yes conf: osd: osd objectstore: bluestore bluestore block size: 96636764160
274
16.1875
41
yaml
null
ceph-main/qa/suites/perf-basic/settings/optimized.yaml
meta: - desc: | Use debug level 0/0 for performance tests. overrides: ceph: conf: mon: debug mon: "0/0" debug ms: "0/0" debug paxos: "0/0" osd: debug filestore: "0/0" debug journal: "0/0" debug ms: "0/0" debug osd: "0/0" global: auth client required: none auth cluster required: none auth service required: none auth supported: none debug lockdep: "0/0" debug context: "0/0" debug crush: "0/0" debug mds: "0/0" debug mds balancer: "0/0" debug mds locker: "0/0" debug mds log: "0/0" debug mds log expire: "0/0" debug mds migrator: "0/0" debug buffer: "0/0" debug timer: "0/0" debug filer: "0/0" debug striper: "0/0" debug objecter: "0/0" debug rados: "0/0" debug rbd: "0/0" debug rbd mirror: "0/0" debug rbd replay: "0/0" debug journaler: "0/0" debug objectcacher: "0/0" debug client: "0/0" debug osd: "0/0" debug optracker: "0/0" debug objclass: "0/0" debug filestore: "0/0" debug journal: "0/0" debug ms: "0/0" debug mon: "0/0" debug monc: "0/0" debug paxos: "0/0" debug tp: "0/0" debug auth: "0/0" debug crypto: "0/0" debug finisher: "0/0" debug heartbeatmap: "0/0" debug perfcounter: "0/0" debug rgw: "0/0" debug rgw sync: "0/0" debug civetweb: "0/0" debug javaclient: "0/0" debug asok: "0/0" debug throttle: "0/0" debug refs: "0/0" debug compressor: "0/0" debug bluestore: "0/0" debug bluefs: "0/0" debug bdev: "0/0" debug kstore: "0/0" debug rocksdb: "0/0" debug memdb: "0/0" debug fuse: "0/0" debug mgr: "0/0" debug mgrc: "0/0" debug dpdk: "0/0" debug eventtrace: "0/0"
2,033
25.076923
45
yaml
null
ceph-main/qa/suites/perf-basic/workloads/client_endpoint_rbd_4K_rand_write.yaml
meta: - desc: | Run librbdfio benchmark using cbt client endpoint for rbd. 4K randwrite workload. tasks: - cbt: benchmarks: fio: client_endpoints: 'fiotest' op_size: [4096] time: 300 mode: ['randwrite'] norandommap: True size: 4096 iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 256 pgp_size: 256 replication: 3 client_endpoints: fiotest: driver: 'librbd'
642
18.484848
61
yaml
null
ceph-main/qa/suites/perf-basic/workloads/fio_4K_rand_write.yaml
meta: - desc: | Run librbdfio benchmark using cbt. 4K randwrite workload. tasks: - cbt: benchmarks: librbdfio: op_size: [4096] time: 300 mode: ['randwrite'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 256 pgp_size: 256 replication: 3
591
18.733333
37
yaml
null
ceph-main/qa/suites/perf-basic/workloads/radosbench_4K_write.yaml
meta: - desc: | Run radosbench benchmark using cbt. 4K write workload. tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4096] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 300 write_only: true cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
580
19.034483
38
yaml
null
ceph-main/qa/suites/powercycle/osd/ignorelist_health.yaml
overrides: ceph: log-ignorelist: - \(MDS_TRIM\) - \(MDS_SLOW_REQUEST\) - MDS_SLOW_METADATA_IO - Behind on trimming
145
17.25
28
yaml
null
ceph-main/qa/suites/powercycle/osd/thrashosds-health.yaml
.qa/tasks/thrashosds-health.yaml
32
32
32
yaml
null
ceph-main/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml
roles: - [mon.a, mon.b, mon.c, mgr.x, mgr.y, mds.a, client.0] - [osd.0] - [osd.1] - [osd.2]
92
14.5
54
yaml
null
ceph-main/qa/suites/powercycle/osd/powercycle/default.yaml
tasks: - install: extra_system_packages: deb: - bison - flex - libelf-dev - libssl-dev - libaio-dev - libtool-bin - uuid-dev - xfslibs-dev rpm: - bison - flex - elfutils-libelf-devel - openssl-devel - libaio-devel - libtool - libuuid-devel - xfsprogs-devel - ceph: - thrashosds: chance_down: 1.0 powercycle: true timeout: 600
450
15.703704
29
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml
overrides: ceph: conf: client.0: admin socket: /var/run/ceph/ceph-$name.asok tasks: - radosbench: clients: [client.0] time: 60 - admin_socket: client.0: objecter_requests: test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
323
22.142857
112
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml
overrides: ceph: conf: client: fuse_default_permissions: 0 tasks: - ceph-fuse: - workunit: timeout: 6h clients: all: - kernel_untar_build.sh
183
13.153846
35
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml
tasks: - ceph-fuse: - workunit: timeout: 6h clients: all: - fs/misc
90
10.375
17
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml
overrides: ceph: conf: osd: filestore flush min: 0 mds: debug ms: 1 debug mds: 20 tasks: - ceph-fuse: - workunit: clients: all: - suites/ffsb.sh
205
12.733333
30
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml
tasks: - ceph-fuse: - workunit: clients: all: - suites/fsstress.sh
85
11.285714
28
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml
tasks: - ceph-fuse: - workunit: timeout: 6h clients: all: - suites/fsx.sh
96
11.125
23
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml
overrides: ceph: conf: global: osd_pg_log_dups_tracked: 10000 tasks: - ceph-fuse: - workunit: clients: all: - suites/fsync-tester.sh
172
12.307692
38
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml
overrides: ceph: conf: client: fuse set user groups: true tasks: - ceph-fuse: - workunit: clients: all: - suites/pjd.sh
158
12.25
34
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml
overrides: ceph: conf: client: ms_inject_delay_probability: 1 ms_inject_delay_type: osd ms_inject_delay_max: 5 client_oc_max_dirty_age: 1 tasks: - ceph-fuse: - exec: client.0: - dd if=/dev/zero of=./foo count=100 - sleep 2 - truncate --size 0 ./foo
315
18.75
42
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml
overrides: ceph: log-ignorelist: - reached quota - \(POOL_APP_NOT_ENABLED\) - \(PG_AVAILABILITY\) conf: mon: mon warn on pool no app: false osd: osd class load list: "*" osd class default list: "*" tasks: - ceph-fuse: - workunit: clients: client.0: - rados/test.sh
347
17.315789
38
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/radosbench.yaml
tasks: - full_sequential: - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90
710
17.230769
25
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/readwrite.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 500 op_weights: read: 45 write: 45 delete: 10
135
12.6
23
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 50 op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50
219
14.714286
23
yaml
null
ceph-main/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 500 op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50
220
14.785714
23
yaml
null
ceph-main/qa/suites/rados/basic/ceph.yaml
overrides: ceph: conf: mon: mon min osdmap epochs: 50 paxos service trim min: 10 # prune full osdmaps regularly mon osdmap full prune min: 15 mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 tasks: - install: extra_system_packages: rpm: - sqlite-devel deb: - sqlite3 - ceph:
380
19.052632
41
yaml
null
ceph-main/qa/suites/rados/basic/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/rados/basic/clusters/fixed-2.yaml
.qa/clusters/fixed-2.yaml
25
25
25
yaml
null
ceph-main/qa/suites/rados/basic/clusters/openstack.yaml
openstack: - volumes: # attached to each instance count: 4 size: 10 # GB
87
16.6
40
yaml
null
ceph-main/qa/suites/rados/basic/msgr-failures/few.yaml
overrides: ceph: conf: global: ms inject socket failures: 5000 mon client directed command retry: 5 log-ignorelist: - \(OSD_SLOW_PING_TIME
177
18.777778
44
yaml
null
ceph-main/qa/suites/rados/basic/msgr-failures/many.yaml
overrides: ceph: conf: global: ms inject socket failures: 1500 mon client directed command retry: 5 log-ignorelist: - \(OSD_SLOW_PING_TIME
177
18.777778
44
yaml
null
ceph-main/qa/suites/rados/basic/tasks/libcephsqlite.yaml
overrides: ceph: conf: client: debug ms: 1 debug client: 20 debug cephsqlite: 20 log-ignorelist: - POOL_APP_NOT_ENABLED - do not have an application enabled tasks: - exec: client.0: - ceph osd pool create cephsqlite - ceph auth get-or-create client.libcephsqlite mon 'profile simple-rados-client-with-blocklist' osd 'allow rwx pool=cephsqlite' >> /etc/ceph/ceph.keyring - exec: client.0: - ceph_test_libcephsqlite --id libcephsqlite --no-log-to-stderr - workunit: clients: client.0: - rados/test_libcephsqlite.sh cephsqlite env: CEPH_ARGS: --id libcephsqlite --no-log-to-stderr
662
25.52
156
yaml
null
ceph-main/qa/suites/rados/basic/tasks/rados_api_tests.yaml
overrides: ceph: log-ignorelist: - reached quota - but it is still running - overall HEALTH_ - \(POOL_FULL\) - \(SMALLER_PGP_NUM\) - \(CACHE_POOL_NO_HIT_SET\) - \(CACHE_POOL_NEAR_FULL\) - \(POOL_APP_NOT_ENABLED\) - \(PG_AVAILABILITY\) - \(PG_DEGRADED\) conf: client: debug ms: 1 mon: mon warn on pool no app: false osd: osd class load list: "*" osd class default list: "*" tasks: - workunit: clients: client.0: - rados/test.sh - rados/test_pool_quota.sh
581
19.068966
38
yaml
null
ceph-main/qa/suites/rados/basic/tasks/rados_cls_all.yaml
overrides: ceph: log-ignorelist: - \(PG_AVAILABILITY\) - \(POOL_APP_NOT_ENABLED\) conf: osd: osd_class_load_list: "*" osd_class_default_list: "*" tasks: - workunit: clients: client.0: - cls
248
15.6
35
yaml
null
ceph-main/qa/suites/rados/basic/tasks/rados_python.yaml
overrides: ceph: log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(PG_ - \(OSD_ - \(OBJECT_ - \(POOL_APP_NOT_ENABLED\) tasks: - workunit: timeout: 1h clients: client.0: - rados/test_python.sh
281
15.588235
30
yaml
null
ceph-main/qa/suites/rados/basic/tasks/rados_stress_watch.yaml
overrides: ceph: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(TOO_FEW_PGS\) tasks: - workunit: clients: client.0: - rados/stress_watch.sh
201
15.833333
33
yaml
null
ceph-main/qa/suites/rados/basic/tasks/rados_striper.yaml
tasks: - exec: client.0: - ceph_test_rados_striper_api_io - ceph_test_rados_striper_api_aio - ceph_test_rados_striper_api_striping
144
17.125
41
yaml
null
ceph-main/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml
overrides: ceph: log-ignorelist: - but it is still running - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) tasks: - workunit: clients: all: - rados/load-gen-big.sh
197
15.5
31
yaml
null
ceph-main/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml
overrides: ceph: log-ignorelist: - but it is still running - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) tasks: - workunit: clients: all: - rados/load-gen-mix.sh
197
15.5
31
yaml
null
ceph-main/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml
overrides: ceph: log-ignorelist: - but it is still running - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) tasks: - workunit: clients: all: - rados/load-gen-mostlyread.sh
204
16.083333
38
yaml
null
ceph-main/qa/suites/rados/basic/tasks/readwrite.yaml
overrides: ceph: crush_tunables: optimal conf: mon: mon osd initial require min compat client: luminous osd: osd_discard_disconnected_ops: false tasks: - rados: clients: [client.0] ops: 4000 objects: 500 op_weights: read: 45 write: 45 delete: 10
318
16.722222
59
yaml
null
ceph-main/qa/suites/rados/basic/tasks/repair_test.yaml
overrides: ceph: wait-for-scrub: false log-ignorelist: - candidate had a stat error - candidate had a read error - deep-scrub 0 missing, 1 inconsistent objects - deep-scrub 0 missing, 4 inconsistent objects - deep-scrub [0-9]+ errors - '!= omap_digest' - '!= data_digest' - repair 0 missing, 1 inconsistent objects - repair 0 missing, 4 inconsistent objects - repair [0-9]+ errors, [0-9]+ fixed - scrub 0 missing, 1 inconsistent objects - scrub [0-9]+ errors - 'size 1 != size' - attr name mismatch - Regular scrub request, deep-scrub details will be lost - candidate size [0-9]+ info size [0-9]+ mismatch - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ conf: osd: filestore debug inject read err: true bluestore debug inject read err: true tasks: - repair_test:
926
27.96875
62
yaml
null
ceph-main/qa/suites/rados/basic/tasks/scrub_test.yaml
overrides: ceph: wait-for-scrub: false log-ignorelist: - '!= data_digest' - '!= omap_digest' - '!= size' - 'deep-scrub 0 missing, 1 inconsistent objects' - 'deep-scrub [0-9]+ errors' - 'repair 0 missing, 1 inconsistent objects' - 'repair [0-9]+ errors, [0-9]+ fixed' - 'shard [0-9]+ .* : missing' - 'deep-scrub 1 missing, 1 inconsistent objects' - 'does not match object info size' - 'attr name mistmatch' - 'deep-scrub 1 missing, 0 inconsistent objects' - 'failed to pick suitable auth object' - 'candidate size [0-9]+ info size [0-9]+ mismatch' - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(OSD_SCRUB_ERRORS\) - \(TOO_FEW_PGS\) conf: osd: osd deep scrub update digest min age: 0 osd skip data digest: false tasks: - scrub_test:
858
26.709677
55
yaml
null
ceph-main/qa/suites/rados/dashboard/0-single-container-host.yaml
.qa/distros/single-container-host.yaml
38
38
38
yaml
null
ceph-main/qa/suites/rados/dashboard/debug/mgr.yaml
.qa/debug/mgr.yaml
18
18
18
yaml
null
ceph-main/qa/suites/rados/dashboard/tasks/dashboard.yaml
roles: - [mgr.x, mon.a, mon.c, mds.a, mds.c, osd.0, client.0] - [mgr.y, mgr.z, mon.b, mds.b, osd.1, osd.2, osd.3, client.1] overrides: ceph: conf: osd: osd mclock override recovery settings: true tasks: - install: - ceph: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ - replacing it with standby - No standby daemons available - \(FS_DEGRADED\) - \(MDS_FAILED\) - \(MDS_DEGRADED\) - \(FS_WITH_FAILED_MDS\) - \(MDS_DAMAGE\) - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - \(OSD_DOWN\) - \(OSD_HOST_DOWN\) - \(POOL_APP_NOT_ENABLED\) - \(OSDMAP_FLAGS\) - \(OSD_FLAGS\) - \(TELEMETRY_CHANGED\) - pauserd,pausewr flag\(s\) set - Monitor daemon marked osd\.[[:digit:]]+ down, but it is still running - evicting unresponsive client .+ - MON_DOWN - rgw: [client.0] - cephfs_test_runner: fail_on_skip: false modules: - tasks.mgr.test_dashboard - tasks.mgr.dashboard.test_api - tasks.mgr.dashboard.test_auth - tasks.mgr.dashboard.test_cephfs - tasks.mgr.dashboard.test_cluster - tasks.mgr.dashboard.test_cluster_configuration - tasks.mgr.dashboard.test_crush_rule - tasks.mgr.dashboard.test_erasure_code_profile - tasks.mgr.dashboard.test_health - tasks.mgr.dashboard.test_host - tasks.mgr.dashboard.test_logs - tasks.mgr.dashboard.test_mgr_module - tasks.mgr.dashboard.test_monitor - tasks.mgr.dashboard.test_motd - tasks.mgr.dashboard.test_orchestrator - tasks.mgr.dashboard.test_osd - tasks.mgr.dashboard.test_perf_counters - tasks.mgr.dashboard.test_pool - tasks.mgr.dashboard.test_rbd - tasks.mgr.dashboard.test_rbd_mirroring - tasks.mgr.dashboard.test_requests - tasks.mgr.dashboard.test_rgw - tasks.mgr.dashboard.test_role - tasks.mgr.dashboard.test_settings - tasks.mgr.dashboard.test_summary - tasks.mgr.dashboard.test_telemetry - tasks.mgr.dashboard.test_user
2,356
32.197183
79
yaml
null
ceph-main/qa/suites/rados/dashboard/tasks/e2e.yaml
roles: # 3 osd roles on host.a is required for cephadm task. It checks if the cluster is healthy. # More daemons will be deployed on both hosts in e2e tests. - - host.a - osd.0 - osd.1 - osd.2 - mon.a - mgr.a - client.0 - - host.b - client.1 tasks: - install: - cephadm: - workunit: clients: client.1: - cephadm/create_iscsi_disks.sh - workunit: clients: client.0: - cephadm/test_dashboard_e2e.sh
447
17.666667
90
yaml
null
ceph-main/qa/suites/rados/mgr/clusters/2-node-mgr.yaml
.qa/clusters/2-node-mgr.yaml
28
28
28
yaml
null
ceph-main/qa/suites/rados/mgr/debug/mgr.yaml
.qa/debug/mgr.yaml
18
18
18
yaml
null
ceph-main/qa/suites/rados/mgr/mgr_ttl_cache/disable.yaml
.qa/mgr_ttl_cache/disable.yaml
30
30
30
yaml
null
ceph-main/qa/suites/rados/mgr/mgr_ttl_cache/enable.yaml
.qa/mgr_ttl_cache/enable.yaml
29
29
29
yaml
null
ceph-main/qa/suites/rados/mgr/tasks/crash.yaml
tasks: - install: - ceph: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ - \(RECENT_CRASH\) - replacing it with standby - No standby daemons available - cephfs_test_runner: modules: - tasks.mgr.test_crash
435
23.222222
68
yaml
null
ceph-main/qa/suites/rados/mgr/tasks/failover.yaml
tasks: - install: - ceph: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ - replacing it with standby - No standby daemons available - cephfs_test_runner: modules: - tasks.mgr.test_failover
411
23.235294
68
yaml
null
ceph-main/qa/suites/rados/mgr/tasks/insights.yaml
tasks: - install: - ceph: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(MGR_INSIGHTS_WARNING\) - \(insights_health_check - \(PG_ - \(RECENT_CRASH\) - replacing it with standby - No standby daemons available - cephfs_test_runner: modules: - tasks.mgr.test_insights
507
24.4
68
yaml
null
ceph-main/qa/suites/rados/mgr/tasks/module_selftest.yaml
tasks: - install: - ceph: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ - replacing it with standby - No standby daemons available - Reduced data availability - Degraded data redundancy - objects misplaced - Synthetic exception in serve - influxdb python module not found - \(MGR_ZABBIX_ - foo bar - Failed to open Telegraf - evicting unresponsive client - 1 mgr modules have recently crashed \(RECENT_MGR_MODULE_CRASH\) - cephfs_test_runner: modules: - tasks.mgr.test_module_selftest fail_on_skip: false
814
28.107143
73
yaml
null
ceph-main/qa/suites/rados/mgr/tasks/per_module_finisher_stats.yaml
tasks: - install: - ceph: wait-for-scrub: false - check-counter: counters: mgr: - name: "finisher-balancer.complete_latency.avgcount" min: 1 - name: "finisher-balancer.queue_len" expected_val: 0 - name: "finisher-crash.complete_latency.avgcount" min: 2 - name: "finisher-crash.queue_len" expected_val: 0 - name: "finisher-devicehealth.complete_latency.avgcount" min: 1 - name: "finisher-devicehealth.queue_len" expected_val: 0 - name: "finisher-iostat.complete_latency.avgcount" min: 1 - name: "finisher-iostat.queue_len" expected_val: 0 - name: "finisher-pg_autoscaler.complete_latency.avgcount" min: 1 - name: "finisher-pg_autoscaler.queue_len" expected_val: 0 - name: "finisher-progress.complete_latency.avgcount" min: 2 - name: "finisher-progress.queue_len" expected_val: 0 - name: "finisher-status.complete_latency.avgcount" min: 2 - name: "finisher-status.queue_len" expected_val: 0 - name: "finisher-telemetry.complete_latency.avgcount" min: 2 - name: "finisher-telemetry.queue_len" expected_val: 0 - workunit: clients: client.0: - mgr/test_per_module_finisher.sh
1,539
34
70
yaml
null
ceph-main/qa/suites/rados/mgr/tasks/progress.yaml
overrides: ceph: conf: osd: osd mclock profile: high_recovery_ops tasks: - install: - ceph: config: global: osd pool default size : 3 osd pool default min size : 2 # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - \(FS_WITH_FAILED_MDS\) - \(FS_DEGRADED\) - \(PG_ - \(OSDMAP_FLAGS\) - replacing it with standby - No standby daemons available - cephfs_test_runner: modules: - tasks.mgr.test_progress
750
24.033333
68
yaml
null
ceph-main/qa/suites/rados/mgr/tasks/prometheus.yaml
tasks: - install: - ceph: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ - replacing it with standby - No standby daemons available - cephfs_test_runner: modules: - tasks.mgr.test_prometheus
413
23.352941
68
yaml
null
ceph-main/qa/suites/rados/mgr/tasks/workunits.yaml
tasks: - install: - ceph: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(PG_ - replacing it with standby - No standby daemons available - workunit: clients: client.0: - mgr/test_localpool.sh
418
23.647059
68
yaml
null
ceph-main/qa/suites/rados/monthrash/ceph.yaml
overrides: ceph: conf: client: debug monc: 20 debug ms: 1 mon: mon min osdmap epochs: 25 paxos service trim min: 5 # prune full osdmaps regularly mon osdmap full prune min: 15 mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 mon scrub inject crc mismatch: 0.01 mon scrub inject missing keys: 0.05 # thrashing monitors may make mgr have trouble w/ its keepalive log-ignorelist: - ScrubResult - scrub mismatch - overall HEALTH_ - \(MGR_DOWN\) # slow mons -> slow peering -> PG_AVAILABILITY - \(PG_AVAILABILITY\) - \(SLOW_OPS\) - slow request tasks: - install: - ceph:
727
24.103448
63
yaml
null
ceph-main/qa/suites/rados/monthrash/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/rados/monthrash/clusters/3-mons.yaml
roles: - [mon.a, mon.c, osd.0, osd.1, osd.2] - [mon.b, mgr.x, osd.3, osd.4, osd.5, client.0] openstack: - volumes: # attached to each instance count: 3 size: 10 # GB
180
21.625
47
yaml
null
ceph-main/qa/suites/rados/monthrash/clusters/9-mons.yaml
roles: - [mon.a, mon.b, mon.c, mon.d, mon.e, osd.0, osd.1, osd.2] - [mon.f, mon.g, mon.h, mon.i, mgr.x, osd.3, osd.4, osd.5, client.0] openstack: - volumes: # attached to each instance count: 3 size: 10 # GB
222
26.875
68
yaml
null
ceph-main/qa/suites/rados/monthrash/msgr-failures/few.yaml
overrides: ceph: conf: global: ms inject socket failures: 5000 mon client directed command retry: 5 log-ignorelist: - \(OSD_SLOW_PING_TIME
177
18.777778
44
yaml
null
ceph-main/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml
overrides: ceph: conf: global: ms inject socket failures: 2500 ms inject delay type: mon ms inject delay probability: .005 ms inject delay max: 1 ms inject internal delays: .002 mon client directed command retry: 5 mgr: debug monc: 10 log-ignorelist: - \(OSD_SLOW_PING_TIME
358
22.933333
44
yaml
null
ceph-main/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml
overrides: ceph: log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) - \(TOO_FEW_PGS\) tasks: - mon_thrash: revive_delay: 90 thrash_delay: 1 store_thrash: true thrash_many: true
215
15.615385
23
yaml
null
ceph-main/qa/suites/rados/monthrash/thrashers/many.yaml
overrides: ceph: log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) conf: osd: mon client ping interval: 4 mon client ping timeout: 12 tasks: - mon_thrash: revive_delay: 20 thrash_delay: 1 thrash_many: true freeze_mon_duration: 20 freeze_mon_probability: 10
320
17.882353
35
yaml
null
ceph-main/qa/suites/rados/monthrash/thrashers/one.yaml
overrides: ceph: log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) tasks: - mon_thrash: revive_delay: 20 thrash_delay: 1
146
13.7
23
yaml
null
ceph-main/qa/suites/rados/monthrash/thrashers/sync-many.yaml
overrides: ceph: log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) conf: mon: paxos min: 10 paxos trim min: 10 tasks: - mon_thrash: revive_delay: 90 thrash_delay: 1 thrash_many: true
238
14.933333
26
yaml
null
ceph-main/qa/suites/rados/monthrash/thrashers/sync.yaml
overrides: ceph: log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) conf: mon: paxos min: 10 paxos trim min: 10 tasks: - mon_thrash: revive_delay: 90 thrash_delay: 1
216
14.5
26
yaml
null
ceph-main/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml
overrides: ceph: log-ignorelist: - slow request - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) tasks: - exec: client.0: - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel - ceph_test_rados_delete_pools_parallel
2,400
39.694915
45
yaml
null
ceph-main/qa/suites/rados/monthrash/workloads/rados_5925.yaml
overrides: ceph: log-ignorelist: - overall HEALTH_ - \(POOL_APP_NOT_ENABLED\) tasks: - exec: client.0: - ceph_test_rados_delete_pools_parallel --debug_objecter 20 --debug_ms 1 --debug_rados 20 --debug_monc 20
240
23.1
111
yaml
null
ceph-main/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml
overrides: ceph: log-ignorelist: - reached quota - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(CACHE_POOL_NEAR_FULL\) - \(POOL_FULL\) - \(SLOW_OPS\) - \(MON_DOWN\) - \(PG_ - \(POOL_APP_NOT_ENABLED\) - \(SMALLER_PGP_NUM\) - slow request conf: global: debug objecter: 20 debug rados: 20 debug ms: 1 mon: mon warn on pool no app: false osd: osd class load list: "*" osd class default list: "*" tasks: - workunit: clients: client.0: - rados/test.sh
609
19.333333
38
yaml
null
ceph-main/qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml
overrides: ceph: conf: mon: mon debug extra checks: true mon min osdmap epochs: 100 mon osdmap full prune enabled: true mon osdmap full prune min: 200 mon osdmap full prune interval: 10 mon osdmap full prune txsize: 100 osd: osd beacon report interval: 10 log-ignorelist: # setting/unsetting noup will trigger health warns, # causing tests to fail due to health warns, even if # the tests themselves are successful. - \(OSDMAP_FLAGS\) tasks: - workunit: clients: client.0: - mon/test_mon_osdmap_prune.sh
624
26.173913
58
yaml
null
ceph-main/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml
overrides: ceph: log-ignorelist: - but it is still running - overall HEALTH_ - \(PG_ - \(MON_DOWN\) - \(AUTH_BAD_CAPS\) tasks: - workunit: clients: client.0: - mon/pool_ops.sh - mon/crush_ops.sh - mon/osd.sh - mon/caps.sh
291
15.222222
29
yaml
null
ceph-main/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 50 op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50
219
14.714286
23
yaml
null
ceph-main/qa/suites/rados/multimon/no_pools.yaml
overrides: ceph: create_rbd_pool: false pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force
124
19.833333
55
yaml
null
ceph-main/qa/suites/rados/multimon/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/rados/multimon/clusters/21.yaml
roles: - [mon.a, mon.d, mon.g, mon.j, mon.m, mon.p, mon.s] - [mon.b, mon.e, mon.h, mon.k, mon.n, mon.q, mon.t, mgr.x] - [mon.c, mon.f, mon.i, mon.l, mon.o, mon.r, mon.u] openstack: - volumes: # attached to each instance count: 1 size: 10 # GB
251
27
58
yaml
null
ceph-main/qa/suites/rados/multimon/clusters/3.yaml
roles: - [mon.a, mon.c] - [mon.b, mgr.x] openstack: - volumes: # attached to each instance count: 2 size: 10 # GB
122
14.375
38
yaml
null
ceph-main/qa/suites/rados/multimon/clusters/6.yaml
roles: - [mon.a, mon.c, mon.e, mgr.x] - [mon.b, mon.d, mon.f, mgr.y] openstack: - volumes: # attached to each instance count: 1 size: 10 # GB
150
17.875
38
yaml
null
ceph-main/qa/suites/rados/multimon/clusters/9.yaml
roles: - [mon.a, mon.d, mon.g] - [mon.b, mon.e, mon.h, mgr.x] - [mon.c, mon.f, mon.i] openstack: - volumes: # attached to each instance count: 1 size: 10 # GB
167
17.666667
38
yaml
null
ceph-main/qa/suites/rados/multimon/msgr-failures/few.yaml
overrides: ceph: conf: global: ms inject socket failures: 5000 mon client directed command retry: 5 log-ignorelist: - \(OSD_SLOW_PING_TIME
177
18.777778
44
yaml
null
ceph-main/qa/suites/rados/multimon/msgr-failures/many.yaml
overrides: ceph: conf: global: ms inject socket failures: 1000 mon client directed command retry: 5 mon mgr beacon grace: 90 log-ignorelist: - \(OSD_SLOW_PING_TIME
210
20.1
44
yaml
null
ceph-main/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml
tasks: - install: - ceph: log-ignorelist: - slow request - .*clock.*skew.* - clocks not synchronized - overall HEALTH_ - \(MON_CLOCK_SKEW\) - mon_clock_skew_check: expect-skew: false
211
16.666667
29
yaml
null
ceph-main/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml
tasks: - install: - exec: mon.b: - sudo systemctl stop chronyd.service || true - sudo systemctl stop systemd-timesync.service || true - sudo systemctl stop ntpd.service || true - sudo systemctl stop ntp.service || true - date -u -s @$(expr $(date -u +%s) + 2) - ceph: wait-for-healthy: false log-ignorelist: - .*clock.*skew.* - clocks not synchronized - overall HEALTH_ - \(MON_CLOCK_SKEW\) - \(MGR_DOWN\) - \(MON_DOWN\) - \(PG_ - \(SLOW_OPS\) - No standby daemons available - slow request - mon_clock_skew_check: expect-skew: true
608
23.36
58
yaml
null
ceph-main/qa/suites/rados/multimon/tasks/mon_recovery.yaml
tasks: - install: - ceph: log-ignorelist: - overall HEALTH_ - \(MON_DOWN\) - \(PG_AVAILABILITY\) - \(SLOW_OPS\) - slow request - mon_recovery:
177
15.181818
27
yaml
null
ceph-main/qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, client.0] openstack: - volumes: # attached to each instance count: 6 size: 10 # GB tasks: - install: - ceph: fs: xfs conf: global: osd max object name len: 460 osd max object namespace len: 64 osd: osd objectstore: bluestore osd op queue: wpq log-ignorelist: - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(TOO_FEW_PGS\) - \(POOL_APP_NOT_ENABLED\) - ceph_objectstore_tool: objects: 20
565
19.962963
68
yaml
null
ceph-main/qa/suites/rados/objectstore/backends/fusestore.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, client.0] tasks: - install: - workunit: clients: all: - objectstore/test_fuse.sh
138
12.9
40
yaml
null
ceph-main/qa/suites/rados/objectstore/backends/keyvaluedb.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, client.0] tasks: - install: - exec: client.0: - mkdir $TESTDIR/kvtest && cd $TESTDIR/kvtest && ceph_test_keyvaluedb - rm -rf $TESTDIR/kvtest
195
20.777778
75
yaml