Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/qa/suites/rgw/upgrade/cluster.yaml
|
.qa/clusters/fixed-2.yaml
| 25 | 25 | 25 |
yaml
|
null |
ceph-main/qa/suites/rgw/upgrade/overrides.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(MON_DOWN\)
- \(MGR_DOWN\)
- \(OSD_DOWN\)
- \(PG_AVAILABILITY\)
- \(PG_DEGRADED\)
- slow request
- failed to encode map
conf:
mon:
mon warn on osd down out interval zero: false
osd:
osd min pg log entries: 1
osd max pg log entries: 2
ragweed:
rgw_server: client.0
| 418 | 19.95 | 53 |
yaml
|
null |
ceph-main/qa/suites/rgw/upgrade/1-install/pacific/install.yaml
|
tasks:
- install:
branch: pacific
exclude_packages:
- ceph-volume
| 80 | 12.5 | 21 |
yaml
|
null |
ceph-main/qa/suites/rgw/upgrade/1-install/pacific/overrides.yaml
|
overrides:
ragweed:
default-branch: ceph-master # ceph-pacific doesn't have tox, but tests are the same
| 110 | 26.75 | 87 |
yaml
|
null |
ceph-main/qa/suites/rgw/upgrade/1-install/pacific/distro$/centos_latest.yaml
|
.qa/distros/supported/centos_latest.yaml
| 40 | 40 | 40 |
yaml
|
null |
ceph-main/qa/suites/rgw/upgrade/1-install/pacific/distro$/ubuntu_20.04.yaml
|
.qa/distros/supported/ubuntu_20.04.yaml
| 39 | 39 | 39 |
yaml
|
null |
ceph-main/qa/suites/rgw/upgrade/1-install/quincy/install.yaml
|
tasks:
- install:
branch: quincy
| 37 | 8.5 | 18 |
yaml
|
null |
ceph-main/qa/suites/rgw/upgrade/1-install/quincy/overrides.yaml
|
overrides:
ragweed:
default-branch: ceph-master # ceph-quincy doesn't have tox, but tests are the same
| 109 | 26.5 | 86 |
yaml
|
null |
ceph-main/qa/suites/rgw/upgrade/1-install/quincy/distro$/centos_latest.yaml
|
.qa/distros/supported/centos_latest.yaml
| 40 | 40 | 40 |
yaml
|
null |
ceph-main/qa/suites/rgw/upgrade/1-install/quincy/distro$/ubuntu_20.04.yaml
|
.qa/distros/supported/ubuntu_20.04.yaml
| 39 | 39 | 39 |
yaml
|
null |
ceph-main/qa/suites/rgw/upgrade/3-upgrade-sequence/osds-then-rgws.yaml
|
tasks:
- print: "ragweed prepare before upgrade"
- ragweed:
client.0:
stages: prepare
- print: "restarting upgraded osds"
- ceph.restart:
daemons: [osd.0, osd.2]
- ceph.restart:
daemons: [osd.1, osd.3]
- ceph.restart:
daemons: [osd.4, osd.6]
- ceph.restart:
daemons: [osd.5, osd.7]
- print: "ragweed check/prepare after osd upgrade"
- ragweed:
client.0:
stages: check
client.1:
stages: prepare
- print: "restarting upgraded rgw"
- ceph.restart:
daemons: [rgw.*]
- print: "ragweed check after rgw upgrade"
- ragweed:
client.1:
stages: check
| 600 | 20.464286 | 50 |
yaml
|
null |
ceph-main/qa/suites/rgw/upgrade/3-upgrade-sequence/rgws-then-osds.yaml
|
tasks:
- print: "ragweed prepare before upgrade"
- ragweed:
client.0:
stages: prepare
- print: "restarting upgraded rgws"
- ceph.restart:
daemons: [rgw.*]
- print: "ragweed check/prepare after rgw upgrade"
- ragweed:
client.0:
stages: check
client.1:
stages: prepare
- print: "restarting upgraded osds"
- ceph.restart:
daemons: [osd.0, osd.2]
- ceph.restart:
daemons: [osd.1, osd.3]
- ceph.restart:
daemons: [osd.4, osd.6]
- ceph.restart:
daemons: [osd.5, osd.7]
- print: "ragweed check after osd upgrade"
- ragweed:
client.1:
stages: check
| 601 | 20.5 | 50 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/0-install.yaml
|
tasks:
- install:
# extra packages added for the rgw-datacache task
extra_system_packages:
deb: ['s3cmd']
rpm: ['s3cmd']
- ceph:
- openssl_keys:
- rgw:
client.0:
- tox: [client.0]
overrides:
ceph:
conf:
global:
osd_min_pg_log_entries: 10
osd_max_pg_log_entries: 10
client:
rgw lc debug interval: 10
| 374 | 16.857143 | 55 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/ignore-pg-availability.yaml
|
.qa/rgw/ignore-pg-availability.yaml
| 35 | 35 | 35 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/overrides.yaml
|
overrides:
ceph:
conf:
client:
setuser: ceph
setgroup: ceph
debug rgw: 20
rgw crypt s3 kms backend: testing
rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
rgw crypt require ssl: false
rgw torrent flag: true
rgw:
compression type: random
storage classes: LUKEWARM, FROZEN
| 446 | 28.8 | 151 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/s3tests-branch.yaml
|
.qa/rgw/s3tests-branch.yaml
| 27 | 27 | 27 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/clusters/fixed-2.yaml
|
.qa/clusters/fixed-2.yaml
| 25 | 25 | 25 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/datacache/no_datacache.yaml
| 0 | 0 | 0 |
yaml
|
|
null |
ceph-main/qa/suites/rgw/verify/datacache/rgw-datacache.yaml
|
overrides:
ceph:
conf:
client:
rgw d3n l1 local datacache enabled: true
rgw enable ops log: true
rgw d3n l1 datacache persistent path: /tmp/rgw_datacache/
rgw d3n l1 datacache size: 10737418240
rgw:
datacache: true
datacache_path: /tmp/rgw_datacache
tasks:
- workunit:
clients:
client.0:
- rgw/run-datacache.sh
env:
RGW_DATACACHE_PATH: /tmp/rgw_datacache
| 435 | 21.947368 | 65 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/inline-data$/off.yaml
|
overrides:
rgw:
inline data: false
| 41 | 9.5 | 22 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/inline-data$/on.yaml
| 0 | 0 | 0 |
yaml
|
|
null |
ceph-main/qa/suites/rgw/verify/msgr-failures/few.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 5000
mon client directed command retry: 5
log-ignorelist:
- \(OSD_SLOW_PING_TIME
| 177 | 18.777778 | 44 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/proto/http.yaml
| 0 | 0 | 0 |
yaml
|
|
null |
ceph-main/qa/suites/rgw/verify/proto/https.yaml
|
overrides:
openssl_keys:
root:
client: client.0
key-type: rsa:4096
cn: teuthology
install: [client.0]
rgw.client.0:
client: client.0
ca: root
embed-key: true
rgw:
client.0:
ssl certificate: rgw.client.0
| 267 | 16.866667 | 35 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/striping$/stripe-equals-chunk.yaml
|
overrides:
ceph:
conf:
client:
# use default values where chunk-size=stripe-size
#rgw max chunk size: 4194304
#rgw obj stripe size: 4194304
| 176 | 21.125 | 57 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/striping$/stripe-greater-than-chunk.yaml
|
overrides:
ceph:
conf:
client:
rgw max chunk size: 4194304
# stripe size greater than (and not a multiple of) chunk size
rgw obj stripe size: 6291456
| 186 | 22.375 | 69 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/tasks/cls.yaml
|
tasks:
- workunit:
clients:
client.0:
- cls/test_cls_lock.sh
- cls/test_cls_log.sh
- cls/test_cls_refcount.sh
- cls/test_cls_rgw.sh
- cls/test_cls_rgw_gc.sh
- cls/test_cls_rgw_stats.sh
- cls/test_cls_cmpomap.sh
- cls/test_cls_2pc_queue.sh
- rgw/test_rgw_gc_log.sh
- rgw/test_rgw_obj.sh
- rgw/test_librgw_file.sh
| 410 | 24.6875 | 35 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/tasks/mp_reupload.yaml
|
tasks:
- workunit:
clients:
client.0:
- rgw/test_rgw_s3_mp_reupload.sh
| 89 | 14 | 40 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/tasks/ragweed.yaml
|
tasks:
- ragweed:
client.0:
default-branch: ceph-master
rgw_server: client.0
stages: prepare,check
| 121 | 16.428571 | 33 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/tasks/reshard.yaml
|
tasks:
- workunit:
clients:
client.0:
- rgw/run-reshard.sh
| 77 | 12 | 28 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/tasks/s3tests-java.yaml
|
tasks:
- s3tests-java:
client.0:
force-branch: ceph-master
force-repo: https://github.com/ceph/java_s3tests.git
| 134 | 18.285714 | 61 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/tasks/s3tests.yaml
|
tasks:
- s3tests:
client.0:
rgw_server: client.0
| 59 | 11 | 26 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/validater/lockdep.yaml
|
overrides:
ceph:
conf:
osd:
lockdep: true
mon:
lockdep: true
| 95 | 11 | 21 |
yaml
|
null |
ceph-main/qa/suites/rgw/verify/validater/valgrind.yaml
|
overrides:
install:
ceph:
#debuginfo: true
rgw:
client.0:
valgrind: [--tool=memcheck, --max-threads=1024] # http://tracker.ceph.com/issues/25214
ceph:
conf:
global:
osd heartbeat grace: 40
mon:
mon osd crush smoke test: false
osd:
osd fast shutdown: false
# valgrind:
# mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
# osd: [--tool=memcheck]
# mds: [--tool=memcheck]
## https://tracker.ceph.com/issues/38621
## mgr: [--tool=memcheck]
| 546 | 23.863636 | 92 |
yaml
|
null |
ceph-main/qa/suites/rgw/website/http.yaml
|
# https tests would need to generate wildcard certificates; only test http for now
| 83 | 41 | 82 |
yaml
|
null |
ceph-main/qa/suites/rgw/website/overrides.yaml
|
overrides:
install:
ceph:
conf:
global:
osd_min_pg_log_entries: 10
osd_max_pg_log_entries: 10
client:
setuser: ceph
setgroup: ceph
debug rgw: 20
rgw crypt s3 kms backend: testing
rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
rgw crypt require ssl: false
rgw enable static website: True
client.0:
rgw lc debug interval: 10
client.1:
rgw enable apis: s3website
rgw:
client.0:
valgrind: [--tool=memcheck, --max-threads=1024] # http://tracker.ceph.com/issues/25214
client.1:
valgrind: [--tool=memcheck, --max-threads=1024] # http://tracker.ceph.com/issues/25214
s3tests:
calling-format: subdomain
| 849 | 30.481481 | 151 |
yaml
|
null |
ceph-main/qa/suites/rgw/website/s3tests-branch.yaml
|
.qa/rgw/s3tests-branch.yaml
| 27 | 27 | 27 |
yaml
|
null |
ceph-main/qa/suites/rgw/website/ubuntu_latest.yaml
|
.qa/distros/supported/ubuntu_latest.yaml
| 40 | 40 | 40 |
yaml
|
null |
ceph-main/qa/suites/rgw/website/clusters/fixed-2.yaml
|
.qa/clusters/fixed-2.yaml
| 25 | 25 | 25 |
yaml
|
null |
ceph-main/qa/suites/rgw/website/tasks/s3tests-website.yaml
|
tasks:
- install:
- ceph:
- dnsmasq:
client.0:
s3.: client.0
s3-website.: client.1
- rgw:
client.0:
dns-name: s3.
client.1:
dns-s3website-name: s3-website.
- tox: [client.0]
- s3tests:
client.0:
rgw_server: client.0
rgw_website_server: client.1
| 297 | 15.555556 | 37 |
yaml
|
null |
ceph-main/qa/suites/samba/clusters/samba-basic.yaml
|
roles:
- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1]
- [samba.0, client.0, client.1]
openstack:
- volumes: # attached to each instance
count: 2
size: 10 # GB
| 172 | 20.625 | 51 |
yaml
|
null |
ceph-main/qa/suites/samba/install/install.yaml
|
# we currently can't install Samba on RHEL; need a gitbuilder and code updates
os_type: ubuntu
tasks:
- install:
- install:
project: samba
extra_packages: ['samba']
- ceph:
| 182 | 17.3 | 78 |
yaml
|
null |
ceph-main/qa/suites/samba/mount/fuse.yaml
|
tasks:
- ceph-fuse: [client.0]
- samba:
samba.0:
ceph: "{testdir}/mnt.0"
| 84 | 11.142857 | 29 |
yaml
|
null |
ceph-main/qa/suites/samba/mount/kclient.yaml
|
overrides:
ceph:
conf:
global:
ms die on skipped message: false
kernel:
client:
branch: testing
tasks:
- kclient: [client.0]
- samba:
samba.0:
ceph: "{testdir}/mnt.0"
| 204 | 12.666667 | 40 |
yaml
|
null |
ceph-main/qa/suites/samba/mount/native.yaml
|
tasks:
- samba:
| 16 | 4.666667 | 8 |
yaml
|
null |
ceph-main/qa/suites/samba/mount/noceph.yaml
|
tasks:
- localdir: [client.0]
- samba:
samba.0:
ceph: "{testdir}/mnt.0"
| 82 | 12.833333 | 29 |
yaml
|
null |
ceph-main/qa/suites/samba/workload/cifs-dbench.yaml
|
tasks:
- cifs-mount:
client.1:
share: ceph
- workunit:
clients:
client.1:
- suites/dbench.sh
| 121 | 12.555556 | 26 |
yaml
|
null |
ceph-main/qa/suites/samba/workload/cifs-fsstress.yaml
|
tasks:
- cifs-mount:
client.1:
share: ceph
- workunit:
clients:
client.1:
- suites/fsstress.sh
| 123 | 12.777778 | 28 |
yaml
|
null |
ceph-main/qa/suites/samba/workload/smbtorture.yaml
|
tasks:
- pexec:
client.1:
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.lock
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.fdpass
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.unlink
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.attr
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.trans2
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.negnowait
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.dir1
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny1
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny2
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny3
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.denydos
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny1
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny2
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcon
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcondev
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.vuid
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rw1
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.open
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.defer_open
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.xcopy
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rename
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.properties
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.mangle
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.openattr
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.chkpath
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.secleak
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.disconnect
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.samba3error
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.smb
# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdcon
# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdopen
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-readwrite
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-torture
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-pipe_number
- /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-ioctl
# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-maxfid
| 3,221 | 79.55 | 96 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml
|
.qa/clusters/fixed-3-cephfs.yaml
| 32 | 32 | 32 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/clusters/openstack.yaml
|
openstack:
- machine:
disk: 40 # GB
ram: 8000 # MB
cpus: 1
volumes: # attached to each instance
count: 4
size: 10 # GB
| 155 | 16.333333 | 40 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/objectstore/bluestore-bitmap.yaml
|
.qa/objectstore_debug/bluestore-bitmap.yaml
| 43 | 43 | 43 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/0-install.yaml
|
tasks:
- install:
cleanup: true
| 36 | 8.25 | 17 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_blogbench.yaml
|
tasks:
- ceph:
fs: xfs
- ceph-fuse:
- workunit:
clients:
all:
- suites/blogbench.sh
| 106 | 10.888889 | 29 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_fsstress.yaml
|
tasks:
- ceph:
- ceph-fuse:
- workunit:
clients:
all:
- suites/fsstress.sh
| 93 | 10.75 | 28 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_iozone.yaml
|
tasks:
- ceph:
- ceph-fuse: [client.0]
- workunit:
clients:
all:
- suites/iozone.sh
| 102 | 11.875 | 26 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/cfuse_workunit_suites_pjd.yaml
|
tasks:
- ceph:
fs: xfs
conf:
mds:
debug mds: 20
debug ms: 1
client:
debug client: 20
debug ms: 1
fuse set user groups: true
- ceph-fuse:
- workunit:
clients:
all:
- suites/pjd.sh
| 257 | 14.176471 | 34 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/kclient_workunit_direct_io.yaml
|
overrides:
ceph:
conf:
global:
ms die on skipped message: false
tasks:
- ceph:
- kclient:
- workunit:
clients:
all:
- direct_io
| 166 | 11.846154 | 40 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_dbench.yaml
|
overrides:
ceph:
conf:
global:
ms die on skipped message: false
tasks:
- ceph:
fs: xfs
- kclient:
- workunit:
clients:
all:
- suites/dbench.sh
| 185 | 12.285714 | 40 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_fsstress.yaml
|
overrides:
ceph:
conf:
global:
ms die on skipped message: false
tasks:
- ceph:
fs: xfs
- kclient:
- workunit:
clients:
all:
- suites/fsstress.sh
| 187 | 12.428571 | 40 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/kclient_workunit_suites_pjd.yaml
|
overrides:
ceph:
conf:
global:
ms die on skipped message: false
tasks:
- ceph:
fs: xfs
- kclient:
- workunit:
clients:
all:
- suites/pjd.sh
| 182 | 12.071429 | 40 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/libcephfs_interface_tests.yaml
|
overrides:
ceph:
conf:
client:
debug ms: 1
debug client: 20
mds:
debug ms: 1
debug mds: 20
tasks:
- ceph:
- ceph-fuse:
- workunit:
clients:
client.0:
- libcephfs/test.sh
| 238 | 13.058824 | 27 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/mon_thrash.yaml
|
overrides:
ceph:
log-ignorelist:
- reached quota
- mons down
- overall HEALTH_
- \(OSDMAP_FLAGS\)
- \(OSD_
- \(PG_
- \(POOL_
- \(CACHE_POOL_
- \(SMALLER_PGP_NUM\)
- \(OBJECT_
- \(SLOW_OPS\)
- \(TOO_FEW_PGS\)
- \(OSD_SLOW_PING_TIME
- slow request
conf:
global:
ms inject delay max: 1
ms inject delay probability: 0.005
ms inject delay type: mon
ms inject internal delays: 0.002
ms inject socket failures: 2500
mon client directed command retry: 5
osd:
osd class load list: "*"
osd class default list: "*"
tasks:
- ceph:
fs: xfs
- mon_thrash:
revive_delay: 90
thrash_delay: 1
thrash_many: true
- workunit:
clients:
client.0:
- rados/test.sh
| 841 | 20.05 | 44 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rados_api_tests.yaml
|
tasks:
- ceph:
fs: ext4
log-ignorelist:
- overall HEALTH_
- \(OSDMAP_FLAGS\)
- \(OSD_
- \(PG_
- \(POOL_
- \(CACHE_POOL_
- \(SMALLER_PGP_NUM\)
- \(OBJECT_
- \(SLOW_OPS\)
- \(TOO_FEW_PGS\)
- reached quota
- but it is still running
- slow request
conf:
mon:
mon warn on pool no app: false
osd:
osd class load list: "*"
osd class default list: "*"
- thrashosds:
chance_pgnum_grow: 2
chance_pgnum_shrink: 2
chance_pgpnum_fix: 1
timeout: 1200
- workunit:
clients:
client.0:
- rados/test.sh
| 642 | 18.484848 | 38 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rados_bench.yaml
|
overrides:
ceph:
conf:
global:
ms inject delay max: 1
ms inject delay probability: 0.005
ms inject delay type: osd
ms inject internal delays: 0.002
ms inject socket failures: 2500
mon client directed command retry: 5
tasks:
- ceph:
fs: xfs
log-ignorelist:
- overall HEALTH_
- \(OSDMAP_FLAGS\)
- \(OSD_
- \(PG_
- \(POOL_
- \(CACHE_POOL_
- \(SMALLER_PGP_NUM\)
- \(OBJECT_
- \(SLOW_OPS\)
- \(TOO_FEW_PGS\)
- \(OSD_SLOW_PING_TIME
- slow request
- thrashosds:
chance_pgnum_grow: 2
chance_pgnum_shrink: 2
chance_pgpnum_fix: 1
timeout: 1200
- full_sequential:
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
- radosbench:
clients: [client.0]
time: 150
| 999 | 19.833333 | 44 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rados_cache_snaps.yaml
|
tasks:
- ceph:
log-ignorelist:
- overall HEALTH_
- \(OSDMAP_FLAGS\)
- \(OSD_
- \(PG_
- \(POOL_
- \(CACHE_POOL_
- \(SMALLER_PGP_NUM\)
- \(OBJECT_
- \(SLOW_OPS\)
- \(TOO_FEW_PGS\)
- slow request
- thrashosds:
chance_pgnum_grow: 2
chance_pgnum_shrink: 2
chance_pgpnum_fix: 1
timeout: 1200
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 3600
- sudo ceph osd pool set cache target_max_objects 250
- rados:
clients:
- client.0
objects: 500
op_weights:
copy_from: 50
delete: 50
cache_evict: 50
cache_flush: 50
read: 100
rollback: 50
snap_create: 50
snap_remove: 50
cache_try_flush: 50
write: 100
ops: 4000
pool_snaps: true
pools:
- base
| 1,216 | 22.862745 | 57 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rados_cls_all.yaml
|
overrides:
ceph:
conf:
osd:
osd_class_load_list: "*"
osd_class_default_list: "*"
tasks:
- ceph:
fs: xfs
- workunit:
clients:
client.0:
- cls
| 191 | 12.714286 | 35 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rados_ec_snaps.yaml
|
tasks:
- ceph:
fs: xfs
log-ignorelist:
- overall HEALTH_
- \(OSDMAP_FLAGS\)
- \(OSD_
- \(PG_
- \(POOL_
- \(CACHE_POOL_
- \(SMALLER_PGP_NUM\)
- \(OBJECT_
- \(SLOW_OPS\)
- \(TOO_FEW_PGS\)
- slow request
- thrashosds:
chance_pgnum_grow: 3
chance_pgnum_shrink: 2
chance_pgpnum_fix: 1
timeout: 1200
- rados:
clients:
- client.0
ec_pool: true
max_in_flight: 64
max_seconds: 600
objects: 1024
op_weights:
append: 100
copy_from: 50
delete: 50
read: 100
rmattr: 25
rollback: 50
setattr: 25
snap_create: 50
snap_remove: 50
write: 0
ops: 400000
size: 16384
| 732 | 16.878049 | 27 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rados_python.yaml
|
tasks:
- ceph:
log-ignorelist:
- but it is still running
- overall HEALTH_
- \(OSDMAP_FLAGS\)
- \(PG_
- \(OSD_
- \(OBJECT_
- \(POOL_APP_NOT_ENABLED\)
- ceph-fuse:
- workunit:
timeout: 1h
clients:
client.0:
- rados/test_python.sh
| 283 | 15.705882 | 30 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rados_workunit_loadgen_mix.yaml
|
tasks:
- ceph:
fs: ext4
log-ignorelist:
- but it is still running
- overall HEALTH_
- \(POOL_APP_NOT_ENABLED\)
- ceph-fuse:
- workunit:
clients:
all:
- rados/load-gen-mix.sh
| 212 | 15.384615 | 31 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rbd_api_tests.yaml
|
tasks:
- ceph:
log-ignorelist:
- overall HEALTH_
- \(OSDMAP_FLAGS\)
- \(OSD_
- \(PG_
- \(POOL_
- \(CACHE_POOL_
- is full \(reached quota
fs: xfs
- ceph-fuse:
- workunit:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "1"
| 311 | 15.421053 | 31 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rbd_cli_import_export.yaml
|
tasks:
- ceph:
fs: xfs
- ceph-fuse:
- workunit:
clients:
client.0:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
| 157 | 13.363636 | 35 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rbd_fsx.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(OSDMAP_FLAGS\)
- \(OSD_
- \(PG_
- \(POOL_
- \(CACHE_POOL_
- \(SMALLER_PGP_NUM\)
- \(OBJECT_
- \(SLOW_OPS\)
- \(TOO_FEW_PGS\)
- \(OSD_SLOW_PING_TIME
- slow request
conf:
client:
rbd cache: true
global:
ms inject socket failures: 5000
mon client directed command retry: 5
tasks:
- ceph:
fs: xfs
- thrashosds:
timeout: 1200
- rbd_fsx:
clients:
- client.0
ops: 2000
| 555 | 16.935484 | 44 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rbd_python_api_tests.yaml
|
tasks:
- ceph:
- ceph-fuse:
- workunit:
clients:
client.0:
- rbd/test_librbd_python.sh
env:
RBD_FEATURES: "1"
| 138 | 12.9 | 35 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rbd_workunit_suites_iozone.yaml
|
overrides:
ceph:
conf:
global:
ms die on skipped message: false
client:
rbd default features: 5
tasks:
- ceph:
- rbd:
all:
image_size: 20480
- workunit:
clients:
all:
- suites/iozone.sh
| 248 | 13.647059 | 40 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rgw_ec_s3tests.yaml
|
overrides:
rgw:
ec-data-pool: true
cache-pools: true
tasks:
- ceph:
- rgw: [client.0]
- tox: [client.0]
- s3tests:
client.0:
force-branch: ceph-master
rgw_server: client.0
overrides:
ceph:
conf:
client:
rgw lc debug interval: 10
rgw crypt s3 kms backend: testing
rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
rgw crypt require ssl: false
| 507 | 23.190476 | 151 |
yaml
|
null |
ceph-main/qa/suites/smoke/basic/tasks/test/rgw_s3tests.yaml
|
tasks:
- ceph:
fs: xfs
- rgw: [client.0]
- tox: [client.0]
- s3tests:
client.0:
force-branch: ceph-master
rgw_server: client.0
overrides:
ceph:
conf:
client:
rgw lc debug interval: 10
rgw crypt s3 kms backend: testing
rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
rgw crypt require ssl: false
| 455 | 24.333333 | 151 |
yaml
|
null |
ceph-main/qa/suites/stress/bench/clusters/fixed-3-cephfs.yaml
|
.qa/clusters/fixed-3-cephfs.yaml
| 32 | 32 | 32 |
yaml
|
null |
ceph-main/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml
|
tasks:
- install:
- ceph:
- ceph-fuse:
- workunit:
clients:
all:
- snaps
| 91 | 9.222222 | 15 |
yaml
|
null |
ceph-main/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml
|
tasks:
- install:
extra_system_packages:
deb:
- libaio-dev
- libtool-bin
- uuid-dev
- xfslibs-dev
rpm:
- libaio-devel
- libtool
- libuuid-devel
- xfsprogs-devel
- ceph:
- kclient:
- workunit:
clients:
all:
- suites/fsx.sh
| 304 | 14.25 | 26 |
yaml
|
null |
ceph-main/qa/suites/stress/thrash/clusters/16-osd.yaml
|
roles:
- [mon.a, mds.a, osd.0]
- [mon.b, mgr.x, osd.1]
- [mon.c, mgr.y, osd.2]
- [osd.3]
- [osd.4]
- [osd.5]
- [osd.6]
- [osd.7]
- [osd.8]
- [osd.9]
- [osd.10]
- [osd.11]
- [osd.12]
- [osd.13]
- [osd.14]
- [osd.15]
- [client.0]
| 228 | 11.052632 | 23 |
yaml
|
null |
ceph-main/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml
|
roles:
- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2]
- [mon.b, mon.c, client.0]
| 79 | 19 | 44 |
yaml
|
null |
ceph-main/qa/suites/stress/thrash/clusters/8-osd.yaml
|
roles:
- [mon.a, mds.a, osd.0]
- [mon.b, mgr.x, osd.1]
- [mon.c, osd.2]
- [osd.3]
- [osd.4]
- [osd.5]
- [osd.6]
- [osd.7]
- [client.0]
| 135 | 11.363636 | 23 |
yaml
|
null |
ceph-main/qa/suites/stress/thrash/thrashers/default.yaml
|
tasks:
- install:
- ceph:
log-ignorelist:
- but it is still running
- objects unfound and apparently lost
- thrashosds:
| 132 | 15.625 | 41 |
yaml
|
null |
ceph-main/qa/suites/stress/thrash/thrashers/fast.yaml
|
tasks:
- install:
- ceph:
log-ignorelist:
- but it is still running
- objects unfound and apparently lost
- thrashosds:
op_delay: 1
chance_down: 10
| 168 | 15.9 | 41 |
yaml
|
null |
ceph-main/qa/suites/stress/thrash/thrashers/more-down.yaml
|
tasks:
- install:
- ceph:
log-ignorelist:
- but it is still running
- objects unfound and apparently lost
- thrashosds:
chance_down: 50
| 152 | 16 | 41 |
yaml
|
null |
ceph-main/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml
|
tasks:
- ceph-fuse:
- workunit:
clients:
all:
- suites/bonnie.sh
| 83 | 11 | 26 |
yaml
|
null |
ceph-main/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml
|
tasks:
- ceph-fuse:
- workunit:
clients:
all:
- suites/iozone.sh
| 83 | 11 | 26 |
yaml
|
null |
ceph-main/qa/suites/stress/thrash/workloads/radosbench.yaml
|
tasks:
- radosbench:
clients: [client.0]
time: 1800
| 60 | 11.2 | 23 |
yaml
|
null |
ceph-main/qa/suites/stress/thrash/workloads/readwrite.yaml
|
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 500
op_weights:
read: 45
write: 45
delete: 10
| 135 | 12.6 | 23 |
yaml
|
null |
ceph-main/qa/suites/teuthology/integration.yaml
|
tasks:
- teuthology_integration:
| 33 | 10.333333 | 25 |
yaml
|
null |
ceph-main/qa/suites/teuthology/buildpackages/tasks/default.yaml
|
roles:
- [client.0]
tasks:
- install:
tag: v0.94.1
- exec:
client.0:
- ceph --version | grep 'version 0.94.1'
- install.upgrade:
client.0:
tag: v0.94.3
- exec:
client.0:
- ceph --version | grep 'version 0.94.3'
| 293 | 18.6 | 50 |
yaml
|
null |
ceph-main/qa/suites/teuthology/buildpackages/tasks/tag.yaml
|
roles:
- [mon.a, mgr.x, client.0]
tasks:
- install:
# tag has precedence over branch and sha1
tag: v0.94.1
branch: firefly
sha1: e5b6eea91cc37434f78a987d2dd1d3edd4a23f3f # dumpling
- exec:
client.0:
- ceph --version | grep 'version 0.94.1'
| 302 | 24.25 | 65 |
yaml
|
null |
ceph-main/qa/suites/teuthology/ceph/clusters/single.yaml
|
roles:
- [mon.a, mgr.x, client.0]
| 38 | 12 | 30 |
yaml
|
null |
ceph-main/qa/suites/teuthology/ceph/tasks/teuthology.yaml
|
tasks:
- install:
- tests:
| 35 | 8 | 14 |
yaml
|
null |
ceph-main/qa/suites/teuthology/multi-cluster/all/ceph.yaml
|
roles:
- - ceph.mon.a
- ceph.mon.b
- ceph.mgr.x
- backup.osd.0
- backup.osd.1
- backup.osd.2
- backup.client.0
- - backup.mon.a
- backup.mgr.x
- ceph.osd.0
- ceph.osd.1
- ceph.osd.2
- ceph.client.0
- client.1
- osd.3
tasks:
- install:
- ceph:
cluster: backup
- ceph:
- workunit:
clients:
ceph.client.0: [true.sh]
backup.client.0: [true.sh]
| 386 | 13.884615 | 32 |
yaml
|
null |
ceph-main/qa/suites/teuthology/multi-cluster/all/thrashosds.yaml
|
roles:
- - backup.mon.a
- backup.mon.b
- backup.mgr.x
- backup.osd.0
- backup.osd.1
- backup.osd.2
- - backup.mon.c
- backup.osd.3
- backup.osd.4
- backup.osd.5
- backup.client.0
tasks:
- install:
- ceph:
cluster: backup
- thrashosds:
cluster: backup
- workunit:
clients:
all: [true.sh]
| 323 | 13.727273 | 20 |
yaml
|
null |
ceph-main/qa/suites/teuthology/multi-cluster/all/upgrade.yaml
|
overrides:
ceph:
log-ignorelist:
- failed to encode map
conf:
mon:
mon warn on legacy crush tunables: false
roles:
- - ceph.mon.a
- ceph.mon.b
- ceph.mgr.x
- backup.osd.0
- backup.osd.1
- backup.osd.2
- backup.client.0
- - backup.mon.a
- backup.mgr.x
- ceph.osd.0
- ceph.osd.1
- ceph.osd.2
- ceph.client.0
- client.1
- osd.3
tasks:
- install:
branch: infernalis
- ceph:
cluster: backup
- ceph:
- workunit:
clients:
backup.client.0: [true.sh]
ceph.client.0: [true.sh]
- install.upgrade:
ceph.mon.a:
branch: jewel
backup.mon.a:
branch: jewel
- ceph.restart: [ceph.mon.a, ceph.mon.b, ceph.osd.0, ceph.osd.1, ceph.osd.2, osd.3]
- exec:
ceph.client.0:
- ceph --version | grep -F 'version 10.'
client.1:
- ceph --cluster backup --version | grep -F 'version 10.'
backup.client.0:
# cli upgraded
- ceph --cluster backup --id 0 --version | grep -F 'version 10.'
- ceph --version | grep -F 'version 10.'
# backup cluster mon not upgraded
- ceph --cluster backup --id 0 tell mon.a version | grep -F 'version 9.2.'
- ceph tell mon.a version | grep -F 'version 10.'
| 1,189 | 21.884615 | 83 |
yaml
|
null |
ceph-main/qa/suites/teuthology/multi-cluster/all/workunit.yaml
|
roles:
- - backup.mon.a
- backup.mgr.x
- osd.0
- osd.1
- osd.2
- client.0
- backup.client.0
- - mon.a
- mgr.x
- backup.osd.0
- backup.osd.1
- backup.osd.2
- client.1
- backup.client.1
tasks:
- install:
- workunit:
clients:
all: [true.sh]
- workunit:
clients:
backup.client.1: [true.sh]
| 330 | 12.791667 | 32 |
yaml
|
null |
ceph-main/qa/suites/teuthology/no-ceph/clusters/single.yaml
|
roles:
- [mon.a, mgr.x, client.0]
| 38 | 12 | 30 |
yaml
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.