Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/qa/suites/fs/verify/validater/valgrind.yaml
overrides: install: ceph: debuginfo: true ceph: # Valgrind makes everything slow, so ignore slow requests and extend heartbeat grace log-ignorelist: - slow request - SLOW_OPS - MON_DOWN conf: global: osd heartbeat grace: 60 mds heartbeat grace: 60 mds beacon grace: 60 mds: mds valgrind exit: true mon: mon osd crush smoke test: false osd: osd fast shutdown: false valgrind: mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] mds: [--tool=memcheck] watchdog: daemon_restart: normal ceph-fuse: client.0: valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
739
23.666667
88
yaml
null
ceph-main/qa/suites/fs/volumes/clusters/1a3s-mds-4c-client.yaml
.qa/cephfs/clusters/1a3s-mds-4c-client.yaml
43
43
43
yaml
null
ceph-main/qa/suites/fs/volumes/objectstore/bluestore-bitmap.yaml
.qa/objectstore/bluestore-bitmap.yaml
37
37
37
yaml
null
ceph-main/qa/suites/fs/volumes/overrides/ignorelist_health.yaml
.qa/cephfs/overrides/ignorelist_health.yaml
43
43
43
yaml
null
ceph-main/qa/suites/fs/volumes/overrides/ignorelist_wrongly_marked_down.yaml
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
56
56
56
yaml
null
ceph-main/qa/suites/fs/volumes/overrides/no_client_pidfile.yaml
.qa/overrides/no_client_pidfile.yaml
36
36
36
yaml
null
ceph-main/qa/suites/fs/volumes/tasks/volumes/overrides.yaml
overrides: ceph: conf: mgr: debug client: 20 debug ms: 1 debug finisher: 20 debug mgr: 20 log-ignorelist: - OSD full dropping all updates - OSD near full - pausewr flag - failsafe engaged, dropping updates - failsafe disengaged, no longer dropping - is full \(reached quota - POOL_FULL - POOL_BACKFILLFULL
402
21.388889
47
yaml
null
ceph-main/qa/suites/fs/volumes/tasks/volumes/test/basic.yaml
tasks: - cephfs_test_runner: fail_on_skip: false modules: - tasks.cephfs.test_volumes.TestVolumes - tasks.cephfs.test_volumes.TestSubvolumeGroups - tasks.cephfs.test_volumes.TestSubvolumes - tasks.cephfs.test_subvolume.TestSubvolume
279
30.111111
55
yaml
null
ceph-main/qa/suites/fs/volumes/tasks/volumes/test/clone.yaml
tasks: - cephfs_test_runner: fail_on_skip: false modules: - tasks.cephfs.test_volumes.TestSubvolumeSnapshotClones
136
21.833333
63
yaml
null
ceph-main/qa/suites/fs/volumes/tasks/volumes/test/finisher_per_module.yaml
tasks: - check-counter: counters: mgr: - name: "finisher-volumes.complete_latency.avgcount" min: 4 - name: "finisher-volumes.queue_len" expected_val: 0 - cephfs_test_runner: fail_on_skip: false modules: - tasks.cephfs.test_volumes.TestPerModuleFinsherThread
349
24
64
yaml
null
ceph-main/qa/suites/fs/volumes/tasks/volumes/test/misc.yaml
tasks: - cephfs_test_runner: fail_on_skip: false modules: - tasks.cephfs.test_volumes.TestMisc
117
18.666667
44
yaml
null
ceph-main/qa/suites/fs/volumes/tasks/volumes/test/snapshot.yaml
tasks: - cephfs_test_runner: fail_on_skip: false modules: - tasks.cephfs.test_volumes.TestSubvolumeGroupSnapshots - tasks.cephfs.test_volumes.TestSubvolumeSnapshots
195
27
63
yaml
null
ceph-main/qa/suites/fs/workload/0-rhel_8.yaml
.qa/distros/podman/rhel_8.6_container_tools_rhel8.yaml
54
54
54
yaml
null
ceph-main/qa/suites/fs/workload/standby-replay.yaml
overrides: ceph: cephfs: standby_replay: true
58
10.8
26
yaml
null
ceph-main/qa/suites/fs/workload/begin/0-install.yaml
.qa/cephfs/begin/0-install.yaml
31
31
31
yaml
null
ceph-main/qa/suites/fs/workload/begin/1-cephadm.yaml
overrides: ceph: conf: osd: osd shutdown pgref assert: true tasks: - cephadm: roleless: false - cephadm.shell: mon.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - cephadm.shell: mon.a: - ceph fs dump - ceph osd dump - fs.ready: timeout: 300
367
15.727273
39
yaml
null
ceph-main/qa/suites/fs/workload/begin/2-logrotate.yaml
.qa/cephfs/begin/2-logrotate.yaml
33
33
33
yaml
null
ceph-main/qa/suites/fs/workload/clusters/1a11s-mds-1c-client-3node.yaml
.qa/cephfs/clusters/1a11s-mds-1c-client-3node.yaml
50
50
50
yaml
null
ceph-main/qa/suites/fs/workload/mount/fuse.yaml
.qa/cephfs/mount/fuse.yaml
26
26
26
yaml
null
ceph-main/qa/suites/fs/workload/mount/kclient/ms_mode/crc.yaml
overrides: kclient: mntopts: ["ms_mode=crc"]
51
12
28
yaml
null
ceph-main/qa/suites/fs/workload/mount/kclient/ms_mode/legacy.yaml
overrides: kclient: mntopts: ["ms_mode=legacy"]
54
12.75
31
yaml
null
ceph-main/qa/suites/fs/workload/mount/kclient/ms_mode/secure.yaml
overrides: kclient: mntopts: ["ms_mode=secure"]
54
12.75
31
yaml
null
ceph-main/qa/suites/fs/workload/mount/kclient/wsync/no.yaml
overrides: kclient: mntopts: ["nowsync"]
47
11
24
yaml
null
ceph-main/qa/suites/fs/workload/mount/kclient/wsync/yes.yaml
overrides: kclient: mntopts: ["wsync"]
45
10.5
22
yaml
null
ceph-main/qa/suites/fs/workload/omap_limit/10.yaml
overrides: ceph: conf: osd: osd_max_omap_entries_per_request: 10
85
13.333333
44
yaml
null
ceph-main/qa/suites/fs/workload/omap_limit/10000.yaml
overrides: ceph: conf: osd: osd_max_omap_entries_per_request: 10000
88
13.833333
47
yaml
null
ceph-main/qa/suites/fs/workload/overrides/cephsqlite-timeout.yaml
# increase lock renewal timeout: OSD stress from small clusters may cause # spurious timeouts overrides: ceph: conf: mgr: cephsqlite lock renewal timeout: 900000
182
21.875
73
yaml
null
ceph-main/qa/suites/fs/workload/overrides/frag.yaml
.qa/cephfs/overrides/frag.yaml
30
30
30
yaml
null
ceph-main/qa/suites/fs/workload/overrides/ignorelist_health.yaml
.qa/cephfs/overrides/ignorelist_health.yaml
43
43
43
yaml
null
ceph-main/qa/suites/fs/workload/overrides/ignorelist_wrongly_marked_down.yaml
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
56
56
56
yaml
null
ceph-main/qa/suites/fs/workload/overrides/osd-asserts.yaml
.qa/cephfs/overrides/osd-asserts.yaml
37
37
37
yaml
null
ceph-main/qa/suites/fs/workload/overrides/session_timeout.yaml
.qa/cephfs/overrides/session_timeout.yaml
41
41
41
yaml
null
ceph-main/qa/suites/fs/workload/ranks/1.yaml
0
0
0
yaml
null
ceph-main/qa/suites/fs/workload/ranks/multi/export-check.yaml
overrides: check-counter: counters: mds: - mds.exported - mds.imported
99
13.285714
22
yaml
null
ceph-main/qa/suites/fs/workload/ranks/multi/n/3.yaml
overrides: ceph: cephfs: max_mds: 3
48
8.8
16
yaml
null
ceph-main/qa/suites/fs/workload/ranks/multi/n/5.yaml
overrides: ceph: cephfs: max_mds: 5
48
8.8
16
yaml
null
ceph-main/qa/suites/fs/workload/ranks/multi/replication/always.yaml
# To exercise lock/witness code paths more regularly, try to get all # directories replicated. overrides: ceph: conf: mds: mds_bal_replicate_threshold: 1 # Note: dir_update is only sent by an MDS trying to replicate a dir. # dir_update is always sent for root, so the count should be more than 2 check-counter: counters: mds: - name: mds_cache.dir_update min: 3 - name: mds_cache.dir_update_receipt min: 3
494
25.052632
72
yaml
null
ceph-main/qa/suites/fs/workload/ranks/multi/replication/default.yaml
# Use default (8000)
21
10
20
yaml
null
ceph-main/qa/suites/fs/workload/tasks/1-check-counter.yaml
tasks: - check-counter: {}
27
8.333333
19
yaml
null
ceph-main/qa/suites/fs/workload/tasks/0-subvolume/no-subvolume.yaml
0
0
0
yaml
null
ceph-main/qa/suites/fs/workload/tasks/0-subvolume/with-namespace-isolated-and-quota.yaml
overrides: ceph: subvols: create: 2 subvol_options: "--namespace-isolated --size 25000000000" ceph-fuse: client.0: mount_subvol_num: 0 kclient: client.0: mount_subvol_num: 1
216
17.083333
63
yaml
null
ceph-main/qa/suites/fs/workload/tasks/0-subvolume/with-namespace-isolated.yaml
overrides: ceph: subvols: create: 2 subvol_options: "--namespace-isolated" ceph-fuse: client.0: mount_subvol_num: 0 kclient: client.0: mount_subvol_num: 1
197
15.5
44
yaml
null
ceph-main/qa/suites/fs/workload/tasks/0-subvolume/with-no-extra-options.yaml
overrides: ceph: subvols: create: 2 ceph-fuse: client.0: mount_subvol_num: 0 kclient: client.0: mount_subvol_num: 1
152
12.909091
25
yaml
null
ceph-main/qa/suites/fs/workload/tasks/0-subvolume/with-quota.yaml
overrides: ceph: subvols: create: 2 subvol_options: "--size 25000000000" ceph-fuse: client.0: mount_subvol_num: 0 kclient: client.0: mount_subvol_num: 1
195
15.333333
42
yaml
null
ceph-main/qa/suites/fs/workload/tasks/2-scrub/no.yaml
0
0
0
yaml
null
ceph-main/qa/suites/fs/workload/tasks/2-scrub/yes.yaml
overrides: ceph: log-ignorelist: - slow metadata IO - SLOW_OPS - slow request tasks: - fwd_scrub: scrub_timeout: 900 sleep_between_iterations: 1
177
15.181818
31
yaml
null
ceph-main/qa/suites/fs/workload/tasks/3-snaps/no.yaml
0
0
0
yaml
null
ceph-main/qa/suites/fs/workload/tasks/3-snaps/yes.yaml
overrides: ceph: conf: mgr: debug cephsqlite: 20 check-counter: counters: mds: - mds.root_rsnaps - mds_server.req_mksnap_latency.avgcount - mds_server.req_rmsnap_latency.avgcount tasks: - exec: mon.a: - ceph mgr module enable snap_schedule - ceph config set mgr mgr/snap_schedule/allow_m_granularity true - ceph config set mgr mgr/snap_schedule/dump_on_update true - ceph fs snap-schedule add --fs=cephfs --path=/ --snap_schedule=1M - ceph fs snap-schedule retention add --fs=cephfs --path=/ --retention-spec-or-period=6M3h - ceph fs snap-schedule status --fs=cephfs --path=/ - ceph fs snap-schedule list --fs=cephfs --path=/ --recursive=true - date +%s > START_TIME - full_sequential_finally: - exec: mon.a: # Ensure that we have some snaps which get deleted (so check-counters does not fail) - date +%s > END_TIME - START_TIME=$(cat START_TIME); END_TIME=$(cat END_TIME); DIFF_TIME=$((600-(END_TIME-START_TIME))); if [ "$DIFF_TIME" -gt 0 ]; then sleep "$DIFF_TIME"; fi - ceph fs snap-schedule status --fs=cephfs --path=/ - ceph fs snap-schedule list --fs=cephfs --path=/ --recursive=true
1,246
39.225806
162
yaml
null
ceph-main/qa/suites/fs/workload/tasks/4-flush/no.yaml
0
0
0
yaml
null
ceph-main/qa/suites/fs/workload/tasks/4-flush/yes.yaml
tasks: - background_exec: mon.a: - while sleep 13; do ceph tell mds.cephfs:0 flush journal; done
107
20.6
69
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/direct_io.yaml
overrides: check-counter: dry_run: true tasks: - workunit: clients: all: - direct_io
110
10.1
19
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/kernel_untar_build.yaml
overrides: check-counter: counters: mds: - "mds.dir_split" tasks: - workunit: clients: all: - kernel_untar_build.sh
154
13.090909
31
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/postgres.yaml
# I would expect setting the context on the postgresql database directories # would correctly trickle down to the files created by the postgresql daemon, # but this does not appear to work. I would still see denials like: # type=AVC msg=audit(1655861665.521:21354): avc: denied { create } for pid=131994 comm="postmaster" name="replorigin_checkpoint.tmp" scontext=system_u:system_r:postgresql_t:s0 tcontext=system_u:object_r:cephfs_t:s0 tclass=file permissive=1' # Instead, we'll just set the context for the mount and be done with it. I've # left in the context setting for the directories below. overrides: ceph-fuse: client.0: mountpoint: /tmp/cephfs mntopts: ["context=system_u:object_r:postgresql_db_t:s0"] kclient: client.0: mountpoint: /tmp/cephfs mntopts: ["context=system_u:object_r:postgresql_db_t:s0"] tasks: - exec: client.0: - sudo ls -l /tmp/cephfs/ && sudo df -h /tmp/cephfs/ - sudo mkdir -m 755 --context=system_u:system_r:postgresql_t:s0 /tmp/cephfs/postgres && sudo chown postgres:postgres /tmp/cephfs/postgres - sudo -u postgres -- mkdir -m 700 --context=system_u:system_r:postgresql_t:s0 /tmp/cephfs/postgres/data - sudo semanage fcontext -a -t postgresql_db_t "/tmp/cephfs/postgres(/.*)?" - sudo grep -i postgresql /etc/selinux/targeted/contexts/files/file_contexts.local - sudo restorecon -R -v /tmp/cephfs/postgres - sudo ls -lZaR /tmp/cephfs/postgres/ - sudo mkdir -p /etc/systemd/system/postgresql.service.d/ && printf '[Service]\nEnvironment=PGDATA=/tmp/cephfs/postgres/data\nEnvironment=PGLOG=/tmp/cephfs/postgres/pgstartup.log\n' | sudo tee /etc/systemd/system/postgresql.service.d/env.conf - sudo -u postgres -- postgresql-setup --initdb - sudo ls -lZaR /tmp/cephfs/postgres/ - sudo systemctl start postgresql - sudo -u postgres -- pgbench -s 500 -i - sudo -u postgres -- pgbench -c 100 -j 4 --progress=5 --time=900 - sudo systemctl stop postgresql - sudo ls -lZaR /tmp/cephfs/postgres/
2,056
54.594595
248
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/fs/misc.yaml
overrides: check-counter: counters: mds: - "mds.dir_split" tasks: - workunit: clients: all: - fs/misc
140
11.818182
25
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/fs/norstats.yaml
overrides: check-counter: counters: mds: - "mds.dir_split" tasks: - workunit: clients: all: - fs/norstats overrides: kclient: rbytes: false ceph: conf: client: client dirsize rbytes: false
253
13.111111
36
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/fs/test_o_trunc.yaml
overrides: check-counter: dry_run: true tasks: - workunit: clients: all: - fs/test_o_trunc.sh
118
12.222222
28
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/suites/blogbench.yaml
overrides: check-counter: counters: mds: - "mds.dir_split" tasks: - workunit: clients: all: - suites/blogbench.sh
152
12.909091
29
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/suites/dbench.yaml
tasks: - workunit: clients: all: - suites/dbench.sh
70
10.833333
26
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/suites/ffsb.yaml
overrides: ceph: log-ignorelist: - SLOW_OPS - slow request conf: osd: filestore flush min: 0 osd heartbeat grace: 60 check-counter: counters: mds: - "mds.dir_split" tasks: - workunit: clients: all: - suites/ffsb.sh
293
14.473684
31
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/suites/fsstress.yaml
tasks: - workunit: timeout: 6h clients: all: - suites/fsstress.sh
88
11.714286
28
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/suites/fsx.yaml
overrides: check-counter: counters: mds: - "mds.dir_split" tasks: - workunit: clients: all: - suites/fsx.sh
146
12.363636
25
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/suites/fsync-tester.yaml
overrides: check-counter: dry_run: true tasks: - workunit: clients: all: - suites/fsync-tester.sh
122
12.666667
32
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/suites/iogen.yaml
overrides: check-counter: dry_run: true tasks: - workunit: clients: all: - suites/iogen.sh
115
11.888889
25
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/suites/iozone.yaml
overrides: check-counter: dry_run: true tasks: - workunit: clients: all: - suites/iozone.sh
116
12
26
yaml
null
ceph-main/qa/suites/fs/workload/tasks/5-workunit/suites/pjd.yaml
overrides: ceph: conf: client: fuse set user groups: true fuse default permissions: false tasks: - workunit: timeout: 6h clients: all: - suites/pjd.sh
201
14.538462
39
yaml
null
ceph-main/qa/suites/hadoop/basic/clusters/fixed-3.yaml
overrides: ceph: conf: client: client permissions: false roles: - [mon.0, mds.a, osd.0, hadoop.master.0] - [mon.1, mgr.x, osd.1, hadoop.slave.0] - [mon.2, mgr.y, hadoop.slave.1, client.0] openstack: - volumes: # attached to each instance count: 1 size: 10 # GB
289
19.714286
42
yaml
null
ceph-main/qa/suites/hadoop/basic/distros/ubuntu_latest.yaml
.qa/distros/supported/ubuntu_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/hadoop/basic/tasks/repl.yaml
tasks: - ssh_keys: - install: - ceph: - hadoop: - workunit: clients: client.0: [hadoop/repl.sh]
106
10.888889
32
yaml
null
ceph-main/qa/suites/hadoop/basic/tasks/terasort.yaml
tasks: - ssh_keys: - install: - ceph: - hadoop: - workunit: clients: client.0: [hadoop/terasort.sh] env: NUM_RECORDS: "10000000"
150
12.727273
36
yaml
null
ceph-main/qa/suites/hadoop/basic/tasks/wordcount.yaml
tasks: - ssh_keys: - install: - ceph: - hadoop: - workunit: clients: client.0: [hadoop/wordcount.sh]
112
11.555556
37
yaml
null
ceph-main/qa/suites/krbd/basic/bluestore-bitmap.yaml
.qa/objectstore/bluestore-bitmap.yaml
37
37
37
yaml
null
ceph-main/qa/suites/krbd/basic/conf.yaml
overrides: ceph: conf: global: ms die on skipped message: false client: rbd default features: 37
131
15.5
40
yaml
null
ceph-main/qa/suites/krbd/basic/ceph/ceph.yaml
tasks: - install: - ceph:
26
5.75
10
yaml
null
ceph-main/qa/suites/krbd/basic/clusters/fixed-1.yaml
.qa/clusters/fixed-1.yaml
25
25
25
yaml
null
ceph-main/qa/suites/krbd/basic/ms_mode/secure.yaml
overrides: ceph: conf: client: rbd default map options: ms_mode=secure
91
14.333333
47
yaml
null
ceph-main/qa/suites/krbd/basic/ms_mode/crc$/crc-rxbounce.yaml
overrides: ceph: conf: client: rbd default map options: ms_mode=crc,rxbounce
97
15.333333
53
yaml
null
ceph-main/qa/suites/krbd/basic/ms_mode/crc$/crc.yaml
overrides: ceph: conf: client: rbd default map options: ms_mode=crc
88
13.833333
44
yaml
null
ceph-main/qa/suites/krbd/basic/ms_mode/legacy$/legacy-rxbounce.yaml
overrides: ceph: conf: client: rbd default map options: ms_mode=legacy,rxbounce
100
15.833333
56
yaml
null
ceph-main/qa/suites/krbd/basic/ms_mode/legacy$/legacy.yaml
overrides: ceph: conf: client: rbd default map options: ms_mode=legacy
91
14.333333
47
yaml
null
ceph-main/qa/suites/krbd/basic/tasks/krbd_deep_flatten.yaml
tasks: - cram: clients: client.0: - qa/rbd/krbd_deep_flatten.t
79
12.333333
34
yaml
null
ceph-main/qa/suites/krbd/basic/tasks/krbd_discard.yaml
tasks: - cram: clients: client.0: - qa/rbd/krbd_discard.t - qa/rbd/krbd_discard_512b.t - qa/rbd/krbd_discard_4M.t - qa/rbd/krbd_zeroout.t - qa/rbd/krbd_discard_granularity.t
214
20.5
41
yaml
null
ceph-main/qa/suites/krbd/basic/tasks/krbd_huge_image.yaml
tasks: - cram: clients: client.0: - qa/rbd/krbd_huge_image.t
77
12
32
yaml
null
ceph-main/qa/suites/krbd/basic/tasks/krbd_modprobe.yaml
tasks: - cram: clients: client.0: - qa/rbd/krbd_modprobe.t
75
11.666667
30
yaml
null
ceph-main/qa/suites/krbd/basic/tasks/krbd_msgr_segments.yaml
tasks: - cram: clients: client.0: - qa/rbd/krbd_msgr_segments.t
80
12.5
35
yaml
null
ceph-main/qa/suites/krbd/basic/tasks/krbd_parent_overlap.yaml
tasks: - cram: clients: client.0: - qa/rbd/krbd_parent_overlap.t
81
12.666667
36
yaml
null
ceph-main/qa/suites/krbd/basic/tasks/krbd_read_only.yaml
tasks: - cram: clients: client.0: - qa/rbd/krbd_blkroset.t - qa/rbd/krbd_get_features.t
110
14.857143
34
yaml
null
ceph-main/qa/suites/krbd/basic/tasks/krbd_whole_object_zeroout.yaml
tasks: - cram: clients: client.0: - qa/rbd/krbd_whole_object_zeroout.t
87
13.666667
42
yaml
null
ceph-main/qa/suites/krbd/fsx/conf.yaml
overrides: ceph: conf: global: ms die on skipped message: false
84
13.166667
40
yaml
null
ceph-main/qa/suites/krbd/fsx/ceph/ceph.yaml
tasks: - install: - ceph:
26
5.75
10
yaml
null
ceph-main/qa/suites/krbd/fsx/clusters/3-node.yaml
# fixed-3.yaml, but with two additional clients on the same target roles: - [mon.a, mon.c, mgr.x, osd.0, osd.1, osd.2, osd.3] - [mon.b, mgr.y, osd.4, osd.5, osd.6, osd.7] - [client.0, client.1, client.2] openstack: - volumes: # attached to each instance count: 4 size: 10 # GB overrides: ceph: conf: osd: osd shutdown pgref assert: true
365
23.4
66
yaml
null
ceph-main/qa/suites/krbd/fsx/features/no-object-map.yaml
overrides: ceph: conf: client: # layering, deep-flatten rbd default features: 33
109
14.714286
32
yaml
null
ceph-main/qa/suites/krbd/fsx/features/object-map.yaml
overrides: ceph: conf: client: # layering, exclusive-lock, object-map, fast-diff, deep-flatten rbd default features: 61
148
20.285714
71
yaml
null
ceph-main/qa/suites/krbd/fsx/ms_mode$/crc-rxbounce.yaml
overrides: ceph: conf: client: rbd default map options: ms_mode=crc,rxbounce,read_from_replica=balance
123
19.666667
79
yaml
null
ceph-main/qa/suites/krbd/fsx/ms_mode$/crc.yaml
overrides: ceph: conf: client: rbd default map options: ms_mode=crc,read_from_replica=balance
114
18.166667
70
yaml
null
ceph-main/qa/suites/krbd/fsx/ms_mode$/legacy-rxbounce.yaml
overrides: ceph: conf: client: rbd default map options: ms_mode=legacy,rxbounce,read_from_replica=balance
126
20.166667
82
yaml
null
ceph-main/qa/suites/krbd/fsx/ms_mode$/legacy.yaml
overrides: ceph: conf: client: rbd default map options: ms_mode=legacy,read_from_replica=balance
117
18.666667
73
yaml
null
ceph-main/qa/suites/krbd/fsx/ms_mode$/secure.yaml
overrides: ceph: conf: client: rbd default map options: ms_mode=secure,read_from_replica=balance
117
18.666667
73
yaml
null
ceph-main/qa/suites/krbd/fsx/objectstore/bluestore-bitmap.yaml
.qa/objectstore/bluestore-bitmap.yaml
37
37
37
yaml
null
ceph-main/qa/suites/krbd/fsx/striping/default/randomized-striping-off.yaml
overrides: rbd_fsx: randomized_striping: false
53
12.5
30
yaml
null
ceph-main/qa/suites/krbd/fsx/striping/default/msgr-failures/few.yaml
overrides: ceph: conf: global: ms inject socket failures: 5000 mon client directed command retry: 5 log-ignorelist: - \(OSD_SLOW_PING_TIME
177
18.777778
44
yaml