Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/qa/suites/ceph-deploy/config/ceph_volume_bluestore.yaml
overrides: ceph-deploy: use-ceph-volume: True bluestore: True conf: osd: bluestore fsck on mount: true
142
16.875
39
yaml
null
ceph-main/qa/suites/ceph-deploy/config/ceph_volume_bluestore_dmcrypt.yaml
overrides: ceph-deploy: use-ceph-volume: True bluestore: True dmcrypt: True conf: osd: bluestore fsck on mount: true
162
17.111111
39
yaml
null
ceph-main/qa/suites/ceph-deploy/config/ceph_volume_dmcrypt_off.yaml
overrides: ceph-deploy: use-ceph-volume: True
55
13
27
yaml
null
ceph-main/qa/suites/ceph-deploy/distros/centos_latest.yaml
.qa/distros/supported/centos_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/ceph-deploy/distros/ubuntu_latest.yaml
.qa/distros/supported/ubuntu_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/ceph-deploy/python_versions/python_2.yaml
overrides: ceph-deploy: python_version: "2"
50
11.75
23
yaml
null
ceph-main/qa/suites/ceph-deploy/python_versions/python_3.yaml
overrides: ceph-deploy: python_version: "3"
50
11.75
23
yaml
null
ceph-main/qa/suites/ceph-deploy/tasks/ceph-admin-commands.yaml
meta: - desc: "test basic ceph admin commands" tasks: - ssh_keys: - print: "**** done ssh_keys" - ceph-deploy: - print: "**** done ceph-deploy" - workunit: clients: client.0: - ceph-tests/ceph-admin-commands.sh - print: "**** done ceph-tests/ceph-admin-commands.sh"
290
21.384615
54
yaml
null
ceph-main/qa/suites/ceph-deploy/tasks/rbd_import_export.yaml
meta: - desc: "Setup cluster using ceph-deploy, Run the rbd import/export tests" tasks: - ssh-keys: - ceph-deploy: - workunit: clients: client.0: - rbd/import_export.sh
187
17.8
74
yaml
null
ceph-main/qa/suites/cephmetrics/0-clusters/3-node.yaml
meta: - desc: "4-node cluster" roles: - [mon.a, mds.a, osd.0, osd.1, osd.2] - [mon.b, mgr.x, osd.3, osd.4, osd.5] - [mon.c, mgr.y, osd.6, osd.7, osd.8, client.0] - [cephmetrics.0] openstack: - volumes: # attached to each instance count: 3 size: 10 # GB
261
20.833333
47
yaml
null
ceph-main/qa/suites/cephmetrics/1-distros/centos_latest.yaml
.qa/distros/supported/centos_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/cephmetrics/1-distros/ubuntu_latest.yaml
.qa/distros/supported/ubuntu_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/cephmetrics/2-ceph/ceph_ansible.yaml
meta: - desc: "Build the ceph cluster using ceph-ansible" overrides: ceph_ansible: vars: ceph_conf_overrides: global: osd default pool size: 2 mon pg warn min per osd: 2 osd pool default pg num: 64 osd pool default pgp num: 64 mon_max_pg_per_osd: 1024 ceph_test: true ceph_stable_release: luminous osd_scenario: collocated journal_size: 1024 osd_auto_discovery: false ceph_origin: repository ceph_repository: dev ceph_mgr_modules: - status - restful cephfs_pools: - name: "cephfs_data" pg_num: "64" - name: "cephfs_metadata" pg_num: "64" tasks: - ssh-keys: - ceph_ansible: - install.ship_utilities:
822
23.939394
51
yaml
null
ceph-main/qa/suites/cephmetrics/3-ceph-config/bluestore_with_dmcrypt.yaml
meta: - desc: "use bluestore + dmcrypt" overrides: ceph_ansible: vars: osd_objectstore: bluestore dmcrypt: True
137
14.333333
34
yaml
null
ceph-main/qa/suites/cephmetrics/3-ceph-config/bluestore_without_dmcrypt.yaml
meta: - desc: "use bluestore without dmcrypt" overrides: ceph_ansible: vars: osd_objectstore: bluestore dmcrypt: False
144
15.111111
39
yaml
null
ceph-main/qa/suites/cephmetrics/3-ceph-config/dmcrypt_off.yaml
meta: - desc: "without dmcrypt" overrides: ceph_ansible: vars: dmcrypt: False
95
11
25
yaml
null
ceph-main/qa/suites/cephmetrics/3-ceph-config/dmcrypt_on.yaml
meta: - desc: "with dmcrypt" overrides: ceph_ansible: vars: dmcrypt: True
91
10.5
22
yaml
null
ceph-main/qa/suites/cephmetrics/4-epel/no_epel.yaml
meta: - desc: "Without EPEL" overrides: cephmetrics: group_vars: all: use_epel: false
108
12.625
24
yaml
null
ceph-main/qa/suites/cephmetrics/4-epel/use_epel.yaml
meta: - desc: "Using EPEL" overrides: cephmetrics: group_vars: all: use_epel: true
105
12.25
22
yaml
null
ceph-main/qa/suites/cephmetrics/5-containers/containerized.yaml
meta: - desc: "Containerized prometheus and grafana" overrides: cephmetrics: group_vars: all: prometheus: containerized: true grafana: containerized: true
205
17.727273
48
yaml
null
ceph-main/qa/suites/cephmetrics/5-containers/no_containers.yaml
meta: - desc: "Packaged prometheus and grafana" overrides: cephmetrics: group_vars: all: prometheus: containerized: false grafana: containerized: false
202
17.454545
43
yaml
null
ceph-main/qa/suites/cephmetrics/6-tasks/cephmetrics.yaml
meta: - desc: "Deploy cephmetrics and run integration tests" tasks: - cephmetrics:
83
15.8
54
yaml
null
ceph-main/qa/suites/crimson-rados-experimental/seastore/basic/centos_latest.yaml
.qa/distros/supported/centos_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/crimson-rados-experimental/seastore/basic/crimson_qa_overrides.yaml
.qa/config/crimson_qa_overrides.yaml
36
36
36
yaml
null
ceph-main/qa/suites/crimson-rados-experimental/seastore/basic/clusters/fixed-1.yaml
overrides: ceph-deploy: conf: global: osd pool default size: 2 osd crush chooseleaf type: 0 osd pool default pg num: 128 osd pool default pgp num: 128 ceph: conf: osd: osd shutdown pgref assert: true roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
321
20.466667
47
yaml
null
ceph-main/qa/suites/crimson-rados-experimental/seastore/basic/clusters/fixed-2.yaml
roles: - [mon.a, osd.0, osd.1, client.0, node-exporter.a] - [mgr.x, osd.2, osd.3, client.1, prometheus.a, node-exporter.b] overrides: ceph: conf: osd: osd shutdown pgref assert: true global: ms cluster mode: crc ms service mode: crc ms client mode: crc ms mon service mode: crc ms mon cluster mode: crc ms mon client mode: crc
401
24.125
64
yaml
null
ceph-main/qa/suites/crimson-rados-experimental/seastore/basic/deploy/ceph.yaml
overrides: install: ceph: flavor: crimson tasks: - install: - ceph: conf: osd: debug monc: 20 mon: mon min osdmap epochs: 50 paxos service trim min: 10 # prune full osdmaps regularly mon osdmap full prune min: 15 mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 flavor: crimson
383
19.210526
41
yaml
null
ceph-main/qa/suites/crimson-rados-experimental/seastore/basic/objectstore/seastore.yaml
overrides: ceph: fs: xfs conf: osd: osd objectstore: seastore
86
11.428571
33
yaml
null
ceph-main/qa/suites/crimson-rados-experimental/seastore/basic/tasks/rados_api_tests.yaml
overrides: ceph: log-ignorelist: - reached quota - but it is still running - overall HEALTH_ - \(POOL_FULL\) - \(SMALLER_PGP_NUM\) - \(CACHE_POOL_NO_HIT_SET\) - \(CACHE_POOL_NEAR_FULL\) - \(POOL_APP_NOT_ENABLED\) - \(PG_AVAILABILITY\) - \(PG_DEGRADED\) conf: client: debug ms: 1 mon: mon warn on pool no app: false osd: osd class load list: "*" osd class default list: "*" osd blocked scrub grace period: 3600 tasks: - workunit: clients: client.0: - rados/test.sh - rados/test_pool_quota.sh
625
20.586207
44
yaml
null
ceph-main/qa/suites/crimson-rados-experimental/seastore/basic/tasks/readwrite.yaml
overrides: ceph: crush_tunables: optimal conf: mon: mon osd initial require min compat client: luminous osd: osd_discard_disconnected_ops: false tasks: - rados: clients: [client.0] ops: 4000 objects: 500 op_weights: read: 45 write: 45 delete: 10
318
16.722222
59
yaml
null
ceph-main/qa/suites/crimson-rados/basic/centos_latest.yaml
.qa/distros/supported/centos_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/crimson-rados/basic/crimson_qa_overrides.yaml
.qa/config/crimson_qa_overrides.yaml
36
36
36
yaml
null
ceph-main/qa/suites/crimson-rados/basic/clusters/fixed-2.yaml
roles: - [mon.a, osd.0, osd.1, client.0, node-exporter.a] - [mgr.x, osd.2, osd.3, client.1, prometheus.a, node-exporter.b] overrides: ceph: conf: osd: osd shutdown pgref assert: true global: ms cluster mode: crc ms service mode: crc ms client mode: crc ms mon service mode: crc ms mon cluster mode: crc ms mon client mode: crc
401
24.125
64
yaml
null
ceph-main/qa/suites/crimson-rados/basic/deploy/ceph.yaml
overrides: install: ceph: flavor: crimson tasks: - install: - ceph: conf: osd: debug monc: 20 mon: mon min osdmap epochs: 50 paxos service trim min: 10 # prune full osdmaps regularly mon osdmap full prune min: 15 mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 flavor: crimson
383
19.210526
41
yaml
null
ceph-main/qa/suites/crimson-rados/basic/tasks/rados_api_tests.yaml
overrides: ceph: log-ignorelist: - reached quota - but it is still running - overall HEALTH_ - \(POOL_FULL\) - \(SMALLER_PGP_NUM\) - \(CACHE_POOL_NO_HIT_SET\) - \(CACHE_POOL_NEAR_FULL\) - \(POOL_APP_NOT_ENABLED\) - \(PG_AVAILABILITY\) - \(PG_DEGRADED\) conf: client: debug ms: 1 mon: mon warn on pool no app: false osd: osd class load list: "*" osd class default list: "*" osd blocked scrub grace period: 3600 tasks: - workunit: clients: client.0: - rados/test.sh - rados/test_pool_quota.sh
625
20.586207
44
yaml
null
ceph-main/qa/suites/crimson-rados/basic/tasks/rados_python.yaml
overrides: ceph: log-ignorelist: - but it is still running - overall HEALTH_ - \(OSDMAP_FLAGS\) - \(PG_ - \(OSD_ - \(OBJECT_ - \(POOL_APP_NOT_ENABLED\) tasks: - workunit: timeout: 1h clients: client.0: - rados/test_python.sh --eval-attr 'not (wait or tier or ec or bench or stats)'
338
18.941176
87
yaml
null
ceph-main/qa/suites/crimson-rados/basic/tasks/readwrite.yaml
overrides: ceph: crush_tunables: optimal conf: mon: mon osd initial require min compat client: luminous osd: osd_discard_disconnected_ops: false tasks: - rados: clients: [client.0] ops: 4000 objects: 500 op_weights: read: 45 write: 45 delete: 10
318
16.722222
59
yaml
null
ceph-main/qa/suites/crimson-rados/perf/centos_latest.yaml
.qa/distros/supported/centos_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/crimson-rados/perf/crimson_qa_overrides.yaml
.qa/config/crimson_qa_overrides.yaml
36
36
36
yaml
null
ceph-main/qa/suites/crimson-rados/perf/clusters/fixed-2.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] overrides: ceph: conf: osd: osd shutdown pgref assert: true global: ms cluster mode: crc ms service mode: crc ms client mode: crc ms mon service mode: crc ms mon cluster mode: crc ms mon client mode: crc
333
21.266667
47
yaml
null
ceph-main/qa/suites/crimson-rados/perf/deploy/ceph.yaml
overrides: install: ceph: flavor: crimson tasks: - install: - ceph: wait-for-scrub: false conf: osd: debug monc: 20 flavor: crimson
170
12.153846
25
yaml
null
ceph-main/qa/suites/crimson-rados/perf/objectstore/bluestore.yaml
overrides: ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore compression mode: aggressive bluestore fsck on mount: true bluestore compression algorithm: snappy # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 bluestore rocksdb cf: false log to stderr: true err to stderr: true log flush on exit: true log to file: false
711
28.666667
75
yaml
null
ceph-main/qa/suites/crimson-rados/perf/settings/optimized.yaml
overrides: ceph: conf: mon: debug mon: "0/0" debug ms: "0/0" debug paxos: "0/0" osd: debug filestore: "0/0" debug journal: "0/0" debug ms: "0/0" debug osd: "0/0" global: auth client required: none auth cluster required: none auth service required: none auth supported: none debug lockdep: "0/0" debug context: "0/0" debug crush: "0/0" debug mds: "0/0" debug mds balancer: "0/0" debug mds locker: "0/0" debug mds log: "0/0" debug mds log expire: "0/0" debug mds migrator: "0/0" debug buffer: "0/0" debug timer: "0/0" debug filer: "0/0" debug striper: "0/0" debug objecter: "0/0" debug rados: "0/0" debug rbd: "0/0" debug rbd mirror: "0/0" debug rbd replay: "0/0" debug journaler: "0/0" debug objectcacher: "0/0" debug client: "0/0" debug osd: "0/0" debug optracker: "0/0" debug objclass: "0/0" debug filestore: "0/0" debug journal: "0/0" debug ms: "0/0" debug mon: "0/0" debug monc: "0/0" debug paxos: "0/0" debug tp: "0/0" debug auth: "0/0" debug crypto: "0/0" debug finisher: "0/0" debug heartbeatmap: "0/0" debug perfcounter: "0/0" debug rgw: "0/0" debug rgw sync: "0/0" debug civetweb: "0/0" debug javaclient: "0/0" debug asok: "0/0" debug throttle: "0/0" debug refs: "0/0" debug compressor: "0/0" debug bluestore: "0/0" debug bluefs: "0/0" debug bdev: "0/0" debug kstore: "0/0" debug rocksdb: "0/0" debug leveldb: "0/0" debug memdb: "0/0" debug fuse: "0/0" debug mgr: "0/0" debug mgrc: "0/0" debug dpdk: "0/0" debug eventtrace: "0/0"
1,999
25.666667
35
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/fio_4K_rand_read.yaml
tasks: - cbt: benchmarks: librbdfio: op_size: [4096] time: 60 mode: ['randread'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 128 pgp_size: 128 replication: 3
508
19.36
31
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/fio_4K_rand_rw.yaml
tasks: - cbt: benchmarks: librbdfio: op_size: [4096] time: 60 mode: ['randrw'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 128 pgp_size: 128 replication: 3
506
19.28
31
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/fio_4M_rand_read.yaml
tasks: - cbt: benchmarks: librbdfio: op_size: [4194304] time: 60 mode: ['randread'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 128 pgp_size: 128 replication: 3
511
19.48
31
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/fio_4M_rand_rw.yaml
tasks: - cbt: benchmarks: librbdfio: op_size: [4194304] time: 60 mode: ['randrw'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 128 pgp_size: 128 replication: 3
509
19.4
31
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/fio_4M_rand_write.yaml
tasks: - cbt: benchmarks: librbdfio: op_size: [4194304] time: 60 mode: ['randwrite'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 128 pgp_size: 128 replication: 3
512
19.52
31
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/radosbench_4K_rand_read.yaml
tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4096] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 60 write_only: false readmode: 'rand' cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
527
20.12
35
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/radosbench_4K_seq_read.yaml
tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4096] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 60 write_only: false cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
502
19.958333
35
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/radosbench_4M_rand_read.yaml
tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4194304] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 60 write_only: false readmode: 'rand' cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
530
20.24
35
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/radosbench_4M_seq_read.yaml
tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4194304] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 60 write_only: false cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
505
20.083333
35
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/radosbench_4M_write.yaml
tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4194304] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 60 write_only: true cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
504
20.041667
35
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/radosbench_omap_write.yaml
tasks: - radosbench: clients: [client.0] write-omap: True objectsize: 4096 size: 4096 time: 300
116
13.625
23
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/sample_fio.yaml
tasks: - cbt: benchmarks: librbdfio: op_size: [4096] time: 60 mode: ['randwrite'] norandommap: True vol_size: 4096 procs_per_volume: [1] volumes_per_client: [2] iodepth: [32] osd_ra: [4096] pool_profile: 'rbd' log_avg_msec: 100 cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: rbd: pg_size: 128 pgp_size: 128 replication: 3
509
19.4
31
yaml
null
ceph-main/qa/suites/crimson-rados/perf/workloads/sample_radosbench.yaml
tasks: - cbt: benchmarks: radosbench: concurrent_ops: 4 concurrent_procs: 2 op_size: [4096] pool_monitoring_list: - collectl pool_profile: 'replicated' run_monitoring_list: - collectl time: 60 write_only: true cluster: user: 'ubuntu' osds_per_node: 3 iterations: 1 pool_profiles: replicated: pg_size: 256 pgp_size: 256 replication: 'replicated'
501
19.916667
35
yaml
null
ceph-main/qa/suites/crimson-rados/rbd/centos_latest.yaml
.qa/distros/supported/centos_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/crimson-rados/rbd/crimson_qa_overrides.yaml
.qa/config/crimson_qa_overrides.yaml
36
36
36
yaml
null
ceph-main/qa/suites/crimson-rados/rbd/clusters/fixed-1.yaml
overrides: ceph-deploy: conf: global: osd pool default size: 2 osd crush chooseleaf type: 0 osd pool default pg num: 128 osd pool default pgp num: 128 ceph: conf: osd: osd shutdown pgref assert: true global: ms cluster mode: crc ms service mode: crc ms client mode: crc ms mon service mode: crc ms mon cluster mode: crc ms mon client mode: crc roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
519
22.636364
47
yaml
null
ceph-main/qa/suites/crimson-rados/rbd/deploy/ceph.yaml
overrides: install: ceph: flavor: crimson tasks: - install: - ceph: conf: osd: debug monc: 20 mon: mon min osdmap epochs: 50 paxos service trim min: 10 # prune full osdmaps regularly mon osdmap full prune min: 15 mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 flavor: crimson
383
19.210526
41
yaml
null
ceph-main/qa/suites/crimson-rados/rbd/tasks/rbd_api_tests.yaml
overrides: ceph: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(OSDMAP_FLAGS\) - \(OSD_ - \(PG_ - \(POOL_ - \(CACHE_POOL_ - \(POOL_FULL\) tasks: - workunit: clients: client.0: - rbd/crimson/test_crimson_librbd.sh env: RBD_FEATURES: "61"
404
17.409091
44
yaml
null
ceph-main/qa/suites/crimson-rados/rbd/tasks/rbd_api_tests_old_format.yaml
overrides: ceph: log-ignorelist: - overall HEALTH_ - \(CACHE_POOL_NO_HIT_SET\) - \(POOL_APP_NOT_ENABLED\) - is full \(reached quota - \(POOL_FULL\) tasks: - workunit: clients: client.0: - rbd/crimson/test_crimson_librbd.sh
277
18.857143
44
yaml
null
ceph-main/qa/suites/crimson-rados/rbd/tasks/rbd_cls_tests.yaml
tasks: - workunit: clients: client.0: - cls/test_cls_rbd.sh - cls/test_cls_lock.sh - cls/test_cls_journal.sh
143
17
33
yaml
null
ceph-main/qa/suites/crimson-rados/rbd/tasks/rbd_lock_and_fence.yaml
tasks: - workunit: clients: client.0: - rbd/test_lock_fence.sh
81
12.666667
32
yaml
null
ceph-main/qa/suites/crimson-rados/rbd/tasks/rbd_python_api_tests.yaml
overrides: ceph: log-ignorelist: - \(SLOW_OPS\) - slow request tasks: - workunit: clients: client.0: - rbd/test_librbd_python.sh --eval-attr 'not (SKIP_IF_CRIMSON)' env: RBD_FEATURES: "61"
235
17.153846
71
yaml
null
ceph-main/qa/suites/crimson-rados/rbd/tasks/rbd_python_api_tests_old_format.yaml
overrides: ceph: log-ignorelist: - \(SLOW_OPS\) - slow request tasks: - workunit: clients: client.0: - rbd/test_librbd_python.sh --eval-attr 'not (SKIP_IF_CRIMSON)'
201
17.363636
71
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/centos_latest.yaml
.qa/distros/supported/centos_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/crimson_qa_overrides.yaml
.qa/config/crimson_qa_overrides.yaml
36
36
36
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/thrashosds-health.yaml
.qa/tasks/thrashosds-health.yaml
32
32
32
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml
.qa/overrides/3-size-2-min-size.yaml
36
36
36
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/1-pg-log-overrides/normal_pg_log.yaml
0
0
0
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/2-recovery-overrides/default.yaml
0
0
0
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/clusters/fixed-2.yaml
roles: - [mon.a, osd.0, osd.1, client.0, node-exporter.a] - [mgr.x, osd.2, osd.3, client.1, prometheus.a, node-exporter.b] overrides: ceph: conf: osd: osd shutdown pgref assert: true global: ms cluster mode: crc ms service mode: crc ms client mode: crc ms mon service mode: crc ms mon cluster mode: crc ms mon client mode: crc
401
24.125
64
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/deploy/ceph.yaml
overrides: install: ceph: flavor: crimson tasks: - install: - ceph: conf: osd: debug monc: 20 flavor: crimson
144
11.083333
22
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/objectstore/bluestore.yaml
overrides: ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore compression mode: aggressive bluestore fsck on mount: true bluestore compression algorithm: snappy # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 bluestore rocksdb cf: false log to stderr: true err to stderr: true log flush on exit: true log to file: false
711
28.666667
75
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/thrashers/default.yaml
overrides: ceph: wait-for-scrub: false log-ignorelist: - but it is still running - objects unfound and apparently lost conf: osd: osd debug reject backfill probability: .3 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 3 osd snap trim sleep: 2 osd delete sleep: 1 mon: mon min osdmap epochs: 50 paxos service trim min: 10 # prune full osdmaps regularly mon osdmap full prune min: 15 mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 tasks: - thrashosds: timeout: 2400 dump_ops_enable: false sighup_delay: 0 min_in: 3 noscrub_toggle_delay: 0 chance_down: 0 chance_thrash_pg_upmap: 0 reweight_osd: 0 thrash_primary_affinity: false ceph_objectstore_tool: false chance_inject_pause_short: 0 chance_thrash_cluster_full: 0
938
25.083333
49
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/workloads/admin_socket_objecter_requests.yaml
overrides: ceph: conf: client.0: admin socket: /var/run/ceph/ceph-$name.asok tasks: - radosbench: clients: [client.0] time: 150 - admin_socket: client.0: objecter_requests: test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
324
22.214286
112
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/workloads/pool-snaps-few-objects.yaml
overrides: conf: osd: osd deep scrub update digest min age: 0 tasks: - rados: clients: [client.0] ops: 4000 objects: 50 pool_snaps: true op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 0 # TODO: CEPH_OSD_OP_COPY_FROM copy_from: 0
349
15.666667
45
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/workloads/radosbench-high-concurrency.yaml
overrides: ceph: conf: client.0: debug ms: 1 debug objecter: 20 debug rados: 20 tasks: - full_sequential: - radosbench: clients: [client.0] concurrency: 128 size: 8192 time: 90 - radosbench: clients: [client.0] concurrency: 128 size: 8192 time: 90 - radosbench: clients: [client.0] concurrency: 128 size: 8192 time: 90 - radosbench: clients: [client.0] concurrency: 128 size: 8192 time: 90 - radosbench: clients: [client.0] concurrency: 128 size: 8192 time: 90 - radosbench: clients: [client.0] concurrency: 128 size: 8192 time: 90 - radosbench: clients: [client.0] concurrency: 128 size: 8192 time: 90 - radosbench: clients: [client.0] concurrency: 128 size: 8192 time: 90
918
17.38
26
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/workloads/radosbench.yaml
overrides: ceph: conf: client.0: debug ms: 1 debug objecter: 20 debug rados: 20 tasks: - full_sequential: - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90 - radosbench: clients: [client.0] time: 90
427
16.12
26
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/workloads/small-objects-balanced.yaml
overrides: ceph: crush_tunables: jewel tasks: - rados: clients: [client.0] ops: 400000 max_seconds: 600 max_in_flight: 64 objects: 1024 size: 16384 balance_reads: true op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 0 # TODO: CEPH_OSD_OP_COPY_FROM copy_from: 0 setattr: 25 rmattr: 25
420
16.541667
35
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/workloads/small-objects-localized.yaml
overrides: ceph: crush_tunables: jewel tasks: - rados: clients: [client.0] ops: 400000 max_seconds: 600 max_in_flight: 64 objects: 1024 size: 16384 localize_reads: true op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 0 # TODO: CEPH_OSD_OP_COPY_FROM copy_from: 0 setattr: 25 rmattr: 25
421
16.583333
35
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/workloads/small-objects.yaml
overrides: ceph: crush_tunables: jewel tasks: - rados: clients: [client.0] ops: 400000 max_seconds: 600 max_in_flight: 64 objects: 1024 size: 16384 op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 0 # TODO: CEPH_OSD_OP_COPY_FROM copy_from: 0 setattr: 25 rmattr: 25
396
16.26087
35
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/workloads/snaps-few-objects-balanced.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 50 balance_reads: true op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 0 # TODO: CEPH_OSD_OP_COPY_FROM copy_from: 0
277
16.375
35
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/workloads/snaps-few-objects-localized.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 50 localize_reads: true op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 0 # TODO: CEPH_OSD_OP_COPY_FROM copy_from: 0
278
16.4375
35
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/workloads/snaps-few-objects.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 50 op_weights: read: 100 write: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 0 # TODO: CEPH_OSD_OP_COPY_FROM copy_from: 0
253
15.933333
35
yaml
null
ceph-main/qa/suites/crimson-rados/thrash/workloads/write_fadvise_dontneed.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 500 write_fadvise_dontneed: true op_weights: write: 100
137
14.333333
32
yaml
null
ceph-main/qa/suites/dummy/all/nop.yaml
roles: - [mon.a, mgr.x, mds.a, osd.0, osd.1, client.0] tasks: - nop:
79
10.428571
51
yaml
null
ceph-main/qa/suites/experimental/multimds/clusters/7-multimds.yaml
roles: - [mon.a, mgr.x, mds.a, mds.d] - [mon.b, mgr.y, mds.b, mds.e] - [mon.c, mgr.z, mds.c, mds.f] - [osd.0] - [osd.1] - [osd.2] - [client.0]
143
15
30
yaml
null
ceph-main/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml
tasks: - install: - ceph: conf: mds: mds thrash exports: 1 mds debug subtrees: 1 mds debug scatterstat: 1 mds verify scatter: 1 - ceph-fuse: - workunit: clients: client.0: - suites/fsstress.sh
254
14.9375
32
yaml
null
ceph-main/qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml
.qa/cephfs/clusters/fixed-2-ucephfs.yaml
40
40
40
yaml
null
ceph-main/qa/suites/fs/32bits/mount/fuse.yaml
.qa/cephfs/mount/fuse.yaml
26
26
26
yaml
null
ceph-main/qa/suites/fs/32bits/overrides/faked-ino.yaml
overrides: ceph: conf: client: client use faked inos: true
79
12.333333
35
yaml
null
ceph-main/qa/suites/fs/32bits/overrides/ignorelist_health.yaml
.qa/cephfs/overrides/ignorelist_health.yaml
43
43
43
yaml
null
ceph-main/qa/suites/fs/32bits/overrides/ignorelist_wrongly_marked_down.yaml
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
56
56
56
yaml
null
ceph-main/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_fsstress.yaml
.qa/cephfs/tasks/cfuse_workunit_suites_fsstress.yaml
52
52
52
yaml
null
ceph-main/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml
overrides: ceph: conf: client: fuse set user groups: true tasks: - workunit: timeout: 6h clients: all: - suites/pjd.sh
161
12.5
34
yaml
null
ceph-main/qa/suites/fs/bugs/client_trim_caps/centos_latest.yaml
.qa/distros/supported/centos_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/fs/bugs/client_trim_caps/clusters/small-cluster.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, mds.a, mds.b, client.0] openstack: - volumes: # attached to each instance count: 2 size: 10 # GB - machine: disk: 100 # GB log-rotate: ceph-mds: 10G ceph-osd: 10G
224
17.75
61
yaml
null
ceph-main/qa/suites/fs/bugs/client_trim_caps/objectstore/bluestore-bitmap.yaml
.qa/objectstore/bluestore-bitmap.yaml
37
37
37
yaml