Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/qa/suites/rados/standalone/workloads/misc.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - workunit: basedir: qa/standalone clients: all: - misc
253
12.368421
40
yaml
null
ceph-main/qa/suites/rados/standalone/workloads/mon-stretch.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - workunit: basedir: qa/standalone clients: all: - mon-stretch
259
13.444444
40
yaml
null
ceph-main/qa/suites/rados/standalone/workloads/mon.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - workunit: basedir: qa/standalone clients: all: - mon
252
12.315789
40
yaml
null
ceph-main/qa/suites/rados/standalone/workloads/osd-backfill.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - workunit: basedir: qa/standalone clients: all: - osd-backfill
261
12.789474
40
yaml
null
ceph-main/qa/suites/rados/standalone/workloads/osd.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - workunit: basedir: qa/standalone clients: all: - osd
252
12.315789
40
yaml
null
ceph-main/qa/suites/rados/standalone/workloads/scrub.yaml
roles: - - mon.a - mgr.x - osd.0 - osd.1 - osd.2 - client.0 openstack: - volumes: # attached to each instance count: 3 size: 10 # GB tasks: - install: - workunit: basedir: qa/standalone clients: all: - scrub
254
12.421053
40
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/ceph.yaml
../thrash/ceph.yaml
19
19
19
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml
.qa/tasks/thrashosds-health.yaml
32
32
32
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml
roles: - [osd.0, osd.1, osd.2, osd.3, client.0, mon.a] - [osd.4, osd.5, osd.6, osd.7, mon.b, mgr.x] - [osd.8, osd.9, osd.10, osd.11, mon.c]
140
27.2
47
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml
openstack: - volumes: # attached to each instance count: 4 size: 10 # GB
87
16.6
40
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost - slow request conf: osd: osd debug reject backfill probability: .3 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 6 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 1 chance_pgnum_shrink: 1 chance_pgpnum_fix: 1 min_in: 8 aggressive_pg_num_changes: false
468
21.333333
49
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost - slow request conf: osd: osd debug reject backfill probability: .1 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 6 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 1 chance_pgnum_shrink: 1 chance_pgpnum_fix: 1 min_in: 8 chance_bluestore_reshard: 1 bluestore_new_sharding: random
498
21.681818
49
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost conf: mon: osd pool default ec fast read: true osd: osd debug reject backfill probability: .1 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 2 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 1 chance_pgnum_shrink: 1 chance_pgpnum_fix: 1 min_in: 4
467
21.285714
49
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost - osd_map_cache_size conf: mon: mon min osdmap epochs: 2 osd: osd map cache size: 1 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 6 tasks: - thrashosds: timeout: 1800 chance_pgnum_grow: 1 chance_pgnum_shrink: 1 chance_pgpnum_fix: 1 chance_test_map_discontinuity: 0.5 min_in: 8
500
20.782609
41
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml
overrides: ceph: conf: osd: osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 9 log-ignorelist: - but it is still running - objects unfound and apparently lost tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 3 chance_pgpnum_fix: 1 min_in: 8
335
18.764706
41
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost conf: osd: osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 6 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 2 chance_pgpnum_fix: 1 min_in: 8
335
18.764706
41
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=clay-k=4-m=2.yaml
.qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml
50
50
50
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml
.qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml
54
54
54
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
.qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
53
53
53
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml
../thrash/ceph.yaml
19
19
19
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-isa/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml
.qa/tasks/thrashosds-health.yaml
32
32
32
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml
arch: x86_64
13
6
12
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml
.qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml
49
49
49
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-overwrites/bluestore-bitmap.yaml
../thrash-erasure-code/objectstore/bluestore-bitmap.yaml
56
56
56
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml
../thrash/ceph.yaml
19
19
19
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml
../thrash-erasure-code/rados.yaml
33
33
33
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml
.qa/tasks/thrashosds-health.yaml
32
32
32
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml
overrides: ceph: conf: global: enable experimental unrecoverable data corrupting features: '*' thrashosds: disable_objectstore_tool_tests: true tasks: - rados: clients: [client.0] ops: 4000 objects: 50 pool_snaps: true ec_pool: true erasure_code_use_overwrites: true op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50
466
18.458333
71
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml
overrides: ceph: conf: global: enable experimental unrecoverable data corrupting features: '*' thrashosds: disable_objectstore_tool_tests: true tasks: - rados: clients: [client.0] ops: 400000 max_seconds: 600 max_in_flight: 64 objects: 1024 size: 16384 ec_pool: true erasure_code_use_overwrites: true fast_read: true op_weights: read: 100 write: 100 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25
581
18.4
71
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml
overrides: ceph: conf: global: enable experimental unrecoverable data corrupting features: '*' thrashosds: disable_objectstore_tool_tests: true tasks: - rados: clients: [client.0] ops: 400000 max_seconds: 600 max_in_flight: 64 objects: 1024 size: 16384 ec_pool: true erasure_code_use_overwrites: true op_weights: read: 100 write: 100 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25
561
18.37931
71
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml
overrides: ceph: conf: global: enable experimental unrecoverable data corrupting features: '*' thrashosds: disable_objectstore_tool_tests: true tasks: - rados: clients: [client.0] ops: 4000 objects: 50 ec_pool: true erasure_code_use_overwrites: true op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50
445
18.391304
71
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml
../thrash/ceph.yaml
19
19
19
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-shec/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml
.qa/tasks/thrashosds-health.yaml
32
32
32
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml
.qa/clusters/fixed-4.yaml
25
25
25
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml
openstack: - volumes: # attached to each instance count: 4 size: 10 # GB
87
16.6
40
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost - slow request conf: osd: osd debug reject backfill probability: .3 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 3 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 1 chance_pgnum_shrink: 1 chance_pgpnum_fix: 1 min_in: 8 aggressive_pg_num_changes: false
468
21.333333
49
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost - slow request conf: osd: osd debug reject backfill probability: .1 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 3 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 1 chance_pgnum_shrink: 1 chance_pgpnum_fix: 1 min_in: 8 chance_bluestore_reshard: 1 bluestore_new_sharding: random
498
21.681818
49
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
.qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
54
54
54
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/ceph.yaml
tasks: - install: - ceph:
26
5.75
10
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml
.qa/tasks/thrashosds-health.yaml
32
32
32
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/fast/fast.yaml
overrides: ceph: conf: global: osd pool default ec fast read: true
87
13.666667
43
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/fast/normal.yaml
0
0
0
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost conf: osd: osd debug reject backfill probability: .3 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 2 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 1 chance_pgnum_shrink: 1 chance_pgpnum_fix: 1 min_in: 4 aggressive_pg_num_changes: false
449
21.5
49
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost conf: osd: osd debug reject backfill probability: .1 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 2 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 1 chance_pgnum_shrink: 1 chance_pgpnum_fix: 1 min_in: 4 chance_bluestore_reshard: 1 bluestore_new_sharding: random
479
21.857143
49
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost conf: mon: osd pool default ec fast read: true osd: osd debug reject backfill probability: .1 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 3 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 1 chance_pgnum_shrink: 1 chance_pgpnum_fix: 1 min_in: 4
467
21.285714
49
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/thrashers/minsize_recovery.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost create_rbd_pool: False pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force conf: osd: osd debug reject backfill probability: .3 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 2 tasks: - thrashosds: timeout: 1200 chance_test_min_size: 3
455
23
55
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml
overrides: ceph: conf: osd: osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 9 log-ignorelist: - but it is still running - objects unfound and apparently lost tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 3 chance_pgpnum_fix: 1 min_in: 4
335
18.764706
41
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost conf: osd: osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 4 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 2 chance_pgpnum_fix: 1 min_in: 4
335
18.764706
41
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=clay-k=4-m=2.yaml
.qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml
50
50
50
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml
.qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml
54
54
54
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml
.qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml
54
54
54
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml
tasks: - full_sequential: - radosbench: clients: [client.0] time: 150 unique_pool: true ec_pool: true - radosbench: clients: [client.0] time: 150 unique_pool: true ec_pool: true - radosbench: clients: [client.0] time: 150 unique_pool: true ec_pool: true - radosbench: clients: [client.0] time: 150 unique_pool: true ec_pool: true - radosbench: clients: [client.0] time: 150 unique_pool: true ec_pool: true
536
18.178571
25
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-balanced.yaml
tasks: - rados: clients: [client.0] ops: 400000 max_seconds: 600 max_in_flight: 64 objects: 1024 size: 16384 ec_pool: true balanced_reads: true op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25
376
16.136364
24
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml
tasks: - rados: clients: [client.0] ops: 400000 max_seconds: 600 max_in_flight: 64 objects: 1024 size: 16384 ec_pool: true fast_read: true op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25
371
15.909091
23
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-many-deletes.yaml
tasks: - rados: clients: [client.0] ops: 400000 max_seconds: 600 max_in_flight: 8 objects: 20 size: 16384 ec_pool: true op_weights: write: 0 read: 0 append: 10 delete: 20
227
14.2
23
yaml
null
ceph-main/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml
tasks: - rados: clients: [client.0] ops: 400000 max_seconds: 600 max_in_flight: 64 objects: 1024 size: 16384 ec_pool: true op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25
351
15.761905
23
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/ceph.yaml
# Don't verify os + flavor + sha1 verify_ceph_hash: false tasks: - cephadm: conf: mon: auth allow insecure global id reclaim: true
149
17.75
51
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/thrashosds-health.yaml
.qa/tasks/thrashosds-health.yaml
32
32
32
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/0-distro$/centos_8.stream_container_tools.yaml
.qa/distros/podman/centos_8.stream_container_tools.yaml
55
55
55
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/2-size-2-min-size.yaml
.qa/overrides/2-size-2-min-size.yaml
36
36
36
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/3-size-2-min-size.yaml
.qa/overrides/3-size-2-min-size.yaml
36
36
36
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/1-install/nautilus-v1only.yaml
overrides: ceph: mon_bind_msgr2: false log-ignorelist: - \(MON_DOWN\) conf: global: ms type: async ms bind msgr2: false tasks: - install: branch: nautilus exclude_packages: - cephadm - ceph-mgr-cephadm - ceph-immutable-object-cache - python3-rados - python3-rgw - python3-rbd - python3-cephfs - ceph-volume extra_packages: - python-rados - python-rgw - python-rbd - python-cephfs
506
17.777778
35
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/1-install/nautilus-v2only.yaml
overrides: ceph: log-ignorelist: - \(MON_DOWN\) conf: global: ms type: async ms bind msgr2: true ms bind msgr1: false tasks: - install: branch: nautilus exclude_packages: - cephadm - ceph-mgr-cephadm - ceph-immutable-object-cache - python3-rados - python3-rgw - python3-rbd - python3-cephfs - ceph-volume extra_packages: - python-rados - python-rgw - python-rbd - python-cephfs
508
17.851852
35
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/1-install/nautilus.yaml
overrides: ceph: log-ignorelist: - \(MON_DOWN\) tasks: - install: branch: nautilus exclude_packages: - cephadm - ceph-mgr-cephadm - ceph-immutable-object-cache - python3-rados - python3-rgw - python3-rbd - python3-cephfs - ceph-volume extra_packages: - python-rados - python-rgw - python-rbd - python-cephfs
404
17.409091
35
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/1-install/octopus.yaml
overrides: ceph: log-ignorelist: - \(MON_DOWN\) tasks: - install: branch: octopus exclude_packages: - ceph-mgr-dashboard - ceph-mgr-diskprediction-local - ceph-mgr-rook - ceph-mgr-cephadm - ceph-base-debuginfo - ceph-common-debuginfo - ceph-immutable-object-cache-debuginfo - ceph-radosgw-debuginfo - ceph-test-debuginfo - ceph-base-debuginfo - ceph-mgr-debuginfo - ceph-mds-debuginfo - ceph-mon-debuginfo - ceph-osd-debuginfo - ceph-fuse-debuginfo - librados-devel-debuginfo - libcephfs2-debuginfo - librados2-debuginfo - librbd1-debuginfo - python3-cephfs-debuginfo - python3-rados-debuginfo - python3-rbd-debuginfo - python3-rgw-debuginfo - rbd-fuse-debuginfo - rbd-mirror-debuginfo - rbd-nbd-debuginfo - ceph-volume
902
24.083333
45
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/1-install/pacific.yaml
overrides: ceph: log-ignorelist: - \(MON_DOWN\) tasks: - install: branch: pacific exclude_packages: - ceph-mgr-dashboard - ceph-mgr-diskprediction-local - ceph-mgr-rook - ceph-mgr-cephadm - ceph-base-debuginfo - ceph-common-debuginfo - ceph-immutable-object-cache-debuginfo - ceph-radosgw-debuginfo - ceph-test-debuginfo - ceph-base-debuginfo - ceph-mgr-debuginfo - ceph-mds-debuginfo - ceph-mon-debuginfo - ceph-osd-debuginfo - ceph-fuse-debuginfo - librados-devel-debuginfo - libcephfs2-debuginfo - librados2-debuginfo - librbd1-debuginfo - python3-cephfs-debuginfo - python3-rados-debuginfo - python3-rbd-debuginfo - python3-rgw-debuginfo - rbd-fuse-debuginfo - rbd-mirror-debuginfo - rbd-nbd-debuginfo - ceph-volume
902
24.083333
45
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/1-install/quincy.yaml
overrides: ceph: log-ignorelist: - \(MON_DOWN\) tasks: - install: branch: quincy exclude_packages: - ceph-mgr-dashboard - ceph-mgr-diskprediction-local - ceph-mgr-rook - ceph-mgr-cephadm - ceph-base-debuginfo - ceph-common-debuginfo - ceph-immutable-object-cache-debuginfo - ceph-radosgw-debuginfo - ceph-test-debuginfo - ceph-base-debuginfo - ceph-mgr-debuginfo - ceph-mds-debuginfo - ceph-mon-debuginfo - ceph-osd-debuginfo - ceph-fuse-debuginfo - librados-devel-debuginfo - libcephfs2-debuginfo - librados2-debuginfo - librbd1-debuginfo - python3-cephfs-debuginfo - python3-rados-debuginfo - python3-rbd-debuginfo - python3-rgw-debuginfo - rbd-fuse-debuginfo - rbd-mirror-debuginfo - rbd-nbd-debuginfo - ceph-volume
901
24.055556
45
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/backoff/normal.yaml
0
0
0
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/backoff/peering.yaml
overrides: ceph: conf: osd: osd backoff on peering: true
77
12
36
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/backoff/peering_and_degraded.yaml
overrides: ceph: conf: osd: osd backoff on peering: true osd backoff on degraded: true
115
15.571429
37
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/clusters/openstack.yaml
openstack: - volumes: # attached to each instance count: 4 size: 30 # GB
87
16.6
40
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/clusters/three-plus-one.yaml
roles: - [mon.a, mgr.y, osd.0, osd.1, osd.2, osd.3, client.0] - [mon.b, mgr.x, osd.4, osd.5, osd.6, osd.7, client.1] - [mon.c, osd.8, osd.9, osd.10, osd.11, client.2] openstack: - volumes: # attached to each instance count: 4 size: 10 # GB overrides: ceph: conf: osd: osd shutdown pgref assert: true
328
22.5
54
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/d-balancer/crush-compat.yaml
tasks: - exec: mon.a: - while ! ceph balancer status ; do sleep 1 ; done - ceph balancer mode crush-compat - ceph balancer on
148
20.285714
56
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/d-balancer/on.yaml
0
0
0
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml
overrides: ceph: conf: global: ms inject socket failures: 2500 ms tcp read timeout: 5 mon client directed command retry: 5 log-ignorelist: - \(OSD_SLOW_PING_TIME
208
19.9
44
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml
overrides: ceph: conf: global: ms inject socket failures: 5000 mon client directed command retry: 5 osd: osd heartbeat use min delay socket: true log-ignorelist: - \(OSD_SLOW_PING_TIME
237
20.636364
48
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml
overrides: ceph: conf: global: ms inject socket failures: 2500 ms inject delay type: osd ms inject delay probability: .005 ms inject delay max: 1 ms inject internal delays: .002 mon client directed command retry: 5 log-ignorelist: - \(OSD_SLOW_PING_TIME
324
24
44
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost conf: osd: osd debug reject backfill probability: .3 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 3 osd snap trim sleep: 2 mon: mon min osdmap epochs: 50 paxos service trim min: 10 # prune full osdmaps regularly mon osdmap full prune min: 15 mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1 aggressive_pg_num_changes: false
678
25.115385
49
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/thrashers/default.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost conf: osd: osd debug reject backfill probability: .3 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 3 osd snap trim sleep: 2 mon: mon min osdmap epochs: 50 paxos service trim min: 10 # prune full osdmaps regularly mon osdmap full prune min: 15 mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 1 chance_pgpnum_fix: 1
641
24.68
49
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost - osd_map_cache_size conf: mon: mon min osdmap epochs: 50 paxos service trim min: 10 # prune full osdmaps regularly mon osdmap full prune min: 15 mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 osd: osd map cache size: 1 osd scrub min interval: 60 osd scrub max interval: 120 osd scrub during recovery: false osd max backfills: 6 tasks: - thrashosds: timeout: 1800 chance_pgnum_grow: 0.25 chance_pgpnum_fix: 0.25 chance_test_map_discontinuity: 2
699
24.925926
41
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml
overrides: ceph: conf: osd: osd scrub min interval: 60 osd scrub max interval: 120 journal throttle high multiple: 2 journal throttle max multiple: 10 filestore queue throttle high multiple: 2 filestore queue throttle max multiple: 10 osd max backfills: 9 log-ignorelist: - but it is still running - objects unfound and apparently lost tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 3 chance_pgpnum_fix: 1 openstack: - volumes: size: 50
540
22.521739
49
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/thrashers/none.yaml
0
0
0
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost conf: osd: osd scrub min interval: 60 osd scrub max interval: 120 filestore odsync write: true osd max backfills: 2 osd snap trim sleep: .5 mon: mon min osdmap epochs: 50 paxos service trim min: 10 # prune full osdmaps regularly mon osdmap full prune min: 15 mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 2 chance_pgpnum_fix: 1
629
24.2
41
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml
overrides: ceph: log-ignorelist: - must scrub before tier agent can activate tasks: - exec: client.0: - sudo ceph osd pool create base 4 - sudo ceph osd pool application enable base rados - sudo ceph osd pool create cache 4 - sudo ceph osd tier add base cache - sudo ceph osd tier cache-mode cache writeback - sudo ceph osd tier set-overlay base cache - sudo ceph osd pool set cache hit_set_type bloom - sudo ceph osd pool set cache hit_set_count 8 - sudo ceph osd pool set cache hit_set_period 3600 - sudo ceph osd pool set cache target_max_objects 250 - sudo ceph osd pool set cache min_read_recency_for_promote 2 - rados: clients: [client.2] pools: [base] ops: 4000 objects: 500 op_weights: read: 100 write: 100 delete: 50 copy_from: 50 cache_flush: 50 cache_try_flush: 50 cache_evict: 50 snap_create: 50 snap_remove: 50 rollback: 50
999
27.571429
67
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/workloads/radosbench.yaml
overrides: ceph: conf: client.2: debug ms: 1 debug objecter: 20 debug rados: 20 tasks: - full_sequential: - radosbench: clients: [client.2] time: 90 - radosbench: clients: [client.2] time: 90 - radosbench: clients: [client.2] time: 90 - radosbench: clients: [client.2] time: 90 - radosbench: clients: [client.2] time: 90 - radosbench: clients: [client.2] time: 90 - radosbench: clients: [client.2] time: 90 - radosbench: clients: [client.2] time: 90
598
16.617647
26
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/workloads/rbd_cls.yaml
meta: - desc: | rbd object class functional tests tasks: - exec: client.2: - ceph_test_cls_rbd --gtest_filter=-TestClsRbd.get_features:TestClsRbd.parents:TestClsRbd.mirror
185
22.25
102
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/workloads/snaps-few-objects.yaml
tasks: - rados: clients: [client.2] ops: 4000 objects: 50 op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50
219
14.714286
23
yaml
null
ceph-main/qa/suites/rados/thrash-old-clients/workloads/test_rbd_api.yaml
meta: - desc: | librbd C and C++ api tests workload: - workunit: clients: client.2: - rbd/test_librbd.sh
126
13.111111
29
yaml
null
ceph-main/qa/suites/rados/thrash/ceph.yaml
tasks: - install: - ceph: conf: osd: debug monc: 20
70
9.142857
22
yaml
null
ceph-main/qa/suites/rados/thrash/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/rados/thrash/thrashosds-health.yaml
.qa/tasks/thrashosds-health.yaml
32
32
32
yaml
null
ceph-main/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml
.qa/overrides/2-size-2-min-size.yaml
36
36
36
yaml
null
ceph-main/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml
.qa/overrides/3-size-2-min-size.yaml
36
36
36
yaml
null
ceph-main/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml
0
0
0
yaml
null
ceph-main/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml
.qa/overrides/short_pg_log.yaml
31
31
31
yaml