Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/qa/suites/krbd/wac/wac/clusters/fixed-3.yaml
.qa/clusters/fixed-3.yaml
25
25
25
yaml
null
ceph-main/qa/suites/krbd/wac/wac/tasks/wac.yaml
tasks: - exec: client.0: - "dmesg -C" - workunit: clients: all: - rbd/krbd_wac.sh
108
11.111111
25
yaml
null
ceph-main/qa/suites/krbd/wac/wac/verify/many-resets.yaml
overrides: ceph: conf: global: ms inject socket failures: 500 mon client directed command retry: 5 log-ignorelist: - \(OSD_SLOW_PING_TIME tasks: - exec: client.0: - "dmesg | grep -q 'libceph: osd.* socket closed'" - "dmesg | grep -q 'libceph: osd.* socket error on write'"
323
22.142857
62
yaml
null
ceph-main/qa/suites/krbd/wac/wac/verify/no-resets.yaml
tasks: - exec: client.0: - "! dmesg | grep -q 'libceph: osd.* socket closed'" - "! dmesg | grep -q 'libceph: osd.* socket error on write'"
151
24.333333
64
yaml
null
ceph-main/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml
roles: - [mon.a, mgr.x, mds.a, osd.0, osd.1] - [mon.b, mon.c, osd.2, osd.3, client.0] - [client.1]
99
19
40
yaml
null
ceph-main/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml
overrides: ceph: conf: global: ms die on skipped message: false tasks: - install: branch: dumpling - ceph: - parallel: - user-workload - kclient-workload user-workload: sequential: - ceph-fuse: [client.0] - workunit: clients: client.0: - suites/iozone.sh kclient-workload: sequential: - kclient: [client.1] - workunit: clients: client.1: - suites/dbench.sh
452
15.777778
40
yaml
null
ceph-main/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml
overrides: ceph: conf: global: ms die on skipped message: false tasks: - install: branch: dumpling - ceph: - parallel: - user-workload - kclient-workload user-workload: sequential: - ceph-fuse: [client.0] - workunit: clients: client.0: - suites/blogbench.sh kclient-workload: sequential: - kclient: [client.1] - workunit: clients: client.1: - kernel_untar_build.sh
459
16.037037
40
yaml
null
ceph-main/qa/suites/netsplit/ceph.yaml
overrides: ceph: conf: global: mon election default strategy: 3 mon: mon min osdmap epochs: 25 paxos service trim min: 5 # prune full osdmaps regularly mon osdmap full prune min: 15 mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 # thrashing monitors may make mgr have trouble w/ its keepalive log-whitelist: - overall HEALTH_ - \(MGR_DOWN\) - \(MON_DOWN\) # slow mons -> slow peering -> PG_AVAILABILITY - \(PG_AVAILABILITY\) - \(SLOW_OPS\) tasks: - install: - ceph:
593
23.75
63
yaml
null
ceph-main/qa/suites/netsplit/cluster.yaml
roles: - [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3] - [mon.b, mgr.y, osd.4, osd.5, osd.6, osd.7, client.0] - [mon.c] openstack: - volumes: # attached to each instance count: 4 size: 10 # GB overrides: ceph: conf: osd: osd shutdown pgref assert: true
278
18.928571
54
yaml
null
ceph-main/qa/suites/netsplit/msgr.yaml
../../msgr/async.yaml
21
21
21
yaml
null
ceph-main/qa/suites/netsplit/rados.yaml
.qa/config/rados.yaml
21
21
21
yaml
null
ceph-main/qa/suites/netsplit/tests/mon_pool_ops.yaml
overrides: ceph: conf: global: mon election default strategy: 3 tasks: - workunit: clients: client.0: - mon/pool_ops.sh - netsplit.disconnect: [mon.a, mon.c] - workunit: clients: client.0: - mon/pool_ops.sh - netsplit.reconnect: [mon.a, mon.c] - netsplit.disconnect: [mon.b, mon.c] - workunit: clients: client.0: - mon/pool_ops.sh
404
18.285714
40
yaml
null
ceph-main/qa/suites/orch/cephadm/mgr-nfs-upgrade/0-centos_8.stream_container_tools.yaml
.qa/distros/podman/centos_8.stream_container_tools.yaml
55
55
55
yaml
null
ceph-main/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-start.yaml
tasks: - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls roles: - - host.a - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - - host.b - osd.4 - osd.5 - osd.6 - osd.7 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB overrides: ceph: conf: osd: osd shutdown pgref assert: true
440
13.7
39
yaml
null
ceph-main/qa/suites/orch/cephadm/mgr-nfs-upgrade/2-nfs.yaml
tasks: # stop kernel nfs server, if running - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph fs volume create foofs - cephadm.wait_for_service: service: mds.foofs - cephadm.shell: host.a: - ceph nfs cluster create foo --placement=2 || ceph nfs cluster create cephfs foo --placement=2 - ceph nfs export create cephfs --fsname foofs --clusterid foo --binding /fake || ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake # we can't do wait_for_service here because with octopus it's nfs.ganesha-foo not nfs.foo - while ! ceph orch ls | grep nfs | grep 2/2 ; do sleep 1 ; done - vip.exec: host.a: - mkdir /mnt/foo - while ! mount -t nfs $(hostname):/fake /mnt/foo -o sync ; do sleep 5 ; done - echo test > /mnt/foo/testfile - sync
879
28.333333
170
yaml
null
ceph-main/qa/suites/orch/cephadm/mgr-nfs-upgrade/3-upgrade-with-workload.yaml
tasks: - parallel: - upgrade-tasks - workload-tasks upgrade-tasks: sequential: - cephadm.shell: env: [sha1] host.a: - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force - ceph mgr module enable nfs --force - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 - cephadm.shell: env: [sha1] host.a: - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done - ceph orch ps - ceph versions - echo "wait for servicemap items w/ changing names to refresh" - sleep 60 - ceph orch ps - ceph orch upgrade status - ceph health detail - ceph versions - ceph versions | jq -e '.overall | length == 1' - ceph versions | jq -e '.overall | keys' | grep $sha1 # this should be a no-op, but confirms nfs.ganesha-foo was remapped to nfs.foo - cephadm.wait_for_service: service: nfs.foo workload-tasks: sequential: - exec: host.a: - cd /mnt/foo && dbench 5 -t 600 || true # might fail with ESTALE # make sure mount works - umount /mnt/foo - while ! mount -t nfs $(hostname):/fake /mnt/foo ; do sleep 5 ; done - cd /mnt/foo && dbench 5 -t 5
1,576
34.840909
231
yaml
null
ceph-main/qa/suites/orch/cephadm/mgr-nfs-upgrade/4-final.yaml
tasks: - vip.exec: host.a: - umount /mnt/foo - cephadm.shell: host.a: - ceph nfs cluster ls | grep foo - ceph nfs export ls foo --detailed - rados -p .nfs --all ls - - ceph config get mgr mgr/cephadm/migration_current | grep 6
265
23.181818
66
yaml
null
ceph-main/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/16.2.0.yaml
tasks: - cephadm: roleless: true image: quay.io/ceph/ceph:v16.2.0 cephadm_branch: v16.2.0 cephadm_git_url: https://github.com/ceph/ceph # needed for v16.2.0 due to --skip-admin-label avoid_pacific_features: true
236
25.333333
50
yaml
null
ceph-main/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/16.2.4.yaml
tasks: - cephadm: roleless: true image: quay.io/ceph/ceph:v16.2.4 cephadm_branch: v16.2.4 cephadm_git_url: https://github.com/ceph/ceph # needed for v16.2.4 due to --skip-admin-label avoid_pacific_features: true
236
25.333333
50
yaml
null
ceph-main/qa/suites/orch/cephadm/mgr-nfs-upgrade/1-bootstrap/16.2.5.yaml
tasks: - cephadm: roleless: true image: quay.io/ceph/ceph:v16.2.5 cephadm_branch: v16.2.5 cephadm_git_url: https://github.com/ceph/ceph
152
20.857143
49
yaml
null
ceph-main/qa/suites/orch/cephadm/orchestrator_cli/2-node-mgr.yaml
.qa/clusters/2-node-mgr.yaml
28
28
28
yaml
null
ceph-main/qa/suites/orch/cephadm/orchestrator_cli/orchestrator_cli.yaml
tasks: - install: - ceph: # tests may leave mgrs broken, so don't try and call into them # to invoke e.g. pg dump during teardown. wait-for-scrub: false log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(DEVICE_IDENT_ON\) - \(DEVICE_FAULT_ON\) - \(PG_ - replacing it with standby - No standby daemons available - cephfs_test_runner: modules: - tasks.mgr.test_orchestrator_cli
478
25.611111
68
yaml
null
ceph-main/qa/suites/orch/cephadm/osds/0-nvme-loop.yaml
.qa/overrides/nvme_loop.yaml
28
28
28
yaml
null
ceph-main/qa/suites/orch/cephadm/osds/1-start.yaml
tasks: - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' roles: - - host.a - client.0 - - host.b - client.1 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB overrides: ceph: conf: osd: osd shutdown pgref assert: true
461
16.769231
57
yaml
null
ceph-main/qa/suites/orch/cephadm/osds/2-ops/repave-all.yaml
tasks: - cephadm.shell: host.a: - | set -e set -x ceph orch ps ceph orch device ls ceph osd tree for osd in `ceph osd ls` ; do ceph orch osd rm $osd --force --zap --replace done while ceph orch osd rm ls | wc | grep ^1 ; do sleep 10 ; done
326
22.357143
69
yaml
null
ceph-main/qa/suites/orch/cephadm/osds/2-ops/rm-zap-add.yaml
tasks: - cephadm.shell: host.a: - | set -e set -x ceph orch ps ceph orch device ls DEVID=$(ceph device ls | grep osd.1 | awk '{print $1}') HOST=$(ceph orch device ls | grep $DEVID | awk '{print $1}') DEV=$(ceph orch device ls | grep $DEVID | awk '{print $2}') echo "host $HOST, dev $DEV, devid $DEVID" ceph orch osd rm 1 while ceph orch osd rm status | grep ^1 ; do sleep 5 ; done ceph orch device zap $HOST $DEV --force ceph orch daemon add osd $HOST:$DEV while ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done
636
34.388889
72
yaml
null
ceph-main/qa/suites/orch/cephadm/osds/2-ops/rm-zap-flag.yaml
tasks: - cephadm.shell: host.a: - | set -e set -x ceph orch ps ceph orch device ls DEVID=$(ceph device ls | grep osd.1 | awk '{print $1}') HOST=$(ceph orch device ls | grep "$DEVID" | awk '{print $1}') DEV=$(ceph orch device ls | grep "$DEVID" | awk '{print $2}') echo "host $HOST, dev $DEV, devid $DEVID" ceph orch osd rm --zap --replace 1 while ceph orch osd rm status | grep ^1 ; do sleep 5 ; done while ! ceph osd dump | grep osd.1 | grep "up\s*in" ; do sleep 5 ; done
571
34.75
79
yaml
null
ceph-main/qa/suites/orch/cephadm/osds/2-ops/rm-zap-wait.yaml
tasks: - cephadm.shell: host.a: - | set -e set -x ceph orch ps ceph orch device ls DEVID=$(ceph device ls | grep osd.1 | awk '{print $1}') HOST=$(ceph orch device ls | grep $DEVID | awk '{print $1}') DEV=$(ceph orch device ls | grep $DEVID | awk '{print $2}') echo "host $HOST, dev $DEV, devid $DEVID" ceph orch osd rm 1 while ceph orch osd rm status | grep ^1 ; do sleep 5 ; done ceph orch device zap $HOST $DEV --force while ! ceph osd dump | grep osd.1 | grep up ; do sleep 5 ; done
592
33.882353
72
yaml
null
ceph-main/qa/suites/orch/cephadm/osds/2-ops/rmdir-reactivate.yaml
tasks: - cephadm.shell: host.a: - | set -e set -x ceph orch ps HOST=$(hostname -s) OSD=$(ceph orch ps $HOST | grep osd | head -n 1 | awk '{print $1}') echo "host $HOST, osd $OSD" ceph orch daemon stop $OSD while ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done ceph auth export $OSD > k ceph orch daemon rm $OSD --force ceph orch ps --refresh while ceph orch ps | grep $OSD ; do sleep 5 ; done ceph auth add $OSD -i k ceph cephadm osd activate $HOST while ! ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done - cephadm.healthy:
678
31.333333
75
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/0-nvme-loop.yaml
.qa/overrides/nvme_loop.yaml
28
28
28
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/1-start.yaml
tasks: - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls roles: - - host.a - client.0 - - host.b - client.1 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB overrides: ceph: conf: osd: osd shutdown pgref assert: true
403
15.16
39
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/3-final.yaml
tasks: - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices '
270
23.636364
57
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/basic.yaml
0
0
0
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/client-keyring.yaml
tasks: - cephadm.shell: host.a: - ceph orch host label add `hostname` foo - ceph auth get-or-create client.foo mon 'allow r' - ceph orch client-keyring set client.foo label:foo --mode 770 --owner 11111:22222 - exec: host.a: - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx--- - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111 - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222 - test -e /etc/ceph/ceph.conf - exec: host.b: - test ! -e /etc/ceph/ceph.client.foo.keyring - cephadm.shell: host.b: - ceph orch host label add `hostname` foo - exec: host.b: - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx--- - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111 - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222 - cephadm.shell: host.b: - ceph orch host label rm `hostname` foo - exec: host.b: - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done - exec: host.a: - test -e /etc/ceph/ceph.client.foo.keyring - cephadm.shell: host.a: - ceph orch client-keyring rm client.foo - exec: host.a: - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
1,405
33.292683
88
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/iscsi.yaml
tasks: - cephadm.shell: host.a: - ceph osd pool create foo - rbd pool init foo - ceph orch apply iscsi foo u p - cephadm.wait_for_service: service: iscsi.foo
184
19.555556
37
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/jaeger.yaml
tasks: - cephadm.shell: host.a: - ceph orch apply jaeger - cephadm.wait_for_service: service: elasticsearch - cephadm.wait_for_service: service: jaeger-collector - cephadm.wait_for_service: service: jaeger-query - cephadm.wait_for_service: service: jaeger-agent
287
23
30
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/mirror.yaml
tasks: - cephadm.shell: host.a: - ceph orch apply rbd-mirror "--placement=*" - ceph orch apply cephfs-mirror "--placement=*" - cephadm.wait_for_service: service: rbd-mirror - cephadm.wait_for_service: service: cephfs-mirror
248
23.9
53
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-haproxy-proto.yaml
tasks: - vip: # make sure cephadm notices the new IP - cephadm.shell: host.a: - ceph orch device ls --refresh # stop kernel nfs server, if running - vip.exec: all-hosts: - systemctl stop nfs-server # use nfs module to create cluster and export - cephadm.shell: host.a: - ceph fs volume create fs1 - ceph nfs cluster create happy --ingress --virtual-ip={{VIP0}} --ingress-mode=haproxy-protocol - ceph nfs export create cephfs --fsname fs1 --cluster-id happy --pseudo-path /d1 # wait for services to start - cephadm.wait_for_service: service: nfs.happy - cephadm.wait_for_service: service: ingress.nfs.happy # make sure mount can be reached over VIP, ensuring both that # keepalived is maintaining the VIP and that the nfs has bound to it - vip.exec: host.a: - mkdir /mnt/happy - sleep 1 - mount -t nfs {{VIP0}}:/d1 /mnt/happy - echo test > /mnt/happy/testfile - sync
955
25.555556
101
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml
tasks: - vip: # make sure cephadm notices the new IP - cephadm.shell: host.a: - ceph orch device ls --refresh # stop kernel nfs server, if running - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph orch apply rgw foorgw --port 8800 - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} - vip.exec: host.a: - dnf install -y python3-boto3 || apt install -y python3-boto3 - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json - python: host.a: | import boto3 import json with open('/tmp/user.json', 'rt') as f: info = json.loads(f.read()) s3 = boto3.resource( 's3', aws_access_key_id=info['keys'][0]['access_key'], aws_secret_access_key=info['keys'][0]['secret_key'], endpoint_url='http://localhost:8800', ) bucket = s3.Bucket('foobucket') bucket.create() bucket.put_object(Key='myobject', Body='thebody') - cephadm.shell: host.a: - ceph nfs export create rgw --bucket foobucket --cluster-id foo --pseudo-path /foobucket - cephadm.wait_for_service: service: nfs.foo - cephadm.wait_for_service: service: ingress.nfs.foo ## export and mount - vip.exec: host.a: - mkdir /mnt/foo - sleep 5 - mount -t nfs {{VIP0}}:/foobucket /mnt/foo - find /mnt/foo -ls - grep thebody /mnt/foo/myobject - echo test > /mnt/foo/newobject - sync - python: host.a: | import boto3 import json from io import BytesIO with open('/tmp/user.json', 'rt') as f: info = json.loads(f.read()) s3 = boto3.resource( 's3', aws_access_key_id=info['keys'][0]['access_key'], aws_secret_access_key=info['keys'][0]['secret_key'], endpoint_url='http://localhost:8800', ) bucket = s3.Bucket('foobucket') data = BytesIO() bucket.download_fileobj(Fileobj=data, Key='newobject') print(data.getvalue()) assert data.getvalue().decode() == 'test\n' - vip.exec: host.a: - umount /mnt/foo - cephadm.shell: host.a: - ceph nfs export rm foo /foobucket - ceph nfs cluster rm foo
2,317
24.755556
119
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml
tasks: - vip: # make sure cephadm notices the new IP - cephadm.shell: host.a: - ceph orch device ls --refresh # stop kernel nfs server, if running - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph orch apply rgw foorgw --port 8800 - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} - vip.exec: host.a: - dnf install -y python3-boto3 || apt install -y python3-boto3 - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json - python: host.a: | import boto3 import json with open('/tmp/user.json', 'rt') as f: info = json.loads(f.read()) s3 = boto3.resource( 's3', aws_access_key_id=info['keys'][0]['access_key'], aws_secret_access_key=info['keys'][0]['secret_key'], endpoint_url='http://localhost:8800', ) bucket = s3.Bucket('foobucket') bucket.create() bucket.put_object(Key='myobject', Body='thebody') - cephadm.shell: host.a: - ceph nfs export create rgw --cluster-id foo --pseudo-path /foouser --user-id foouser - cephadm.wait_for_service: service: nfs.foo - cephadm.wait_for_service: service: ingress.nfs.foo ## export and mount - vip.exec: host.a: - mkdir /mnt/foo - sleep 5 - mount -t nfs {{VIP0}}:/foouser /mnt/foo - test -d /mnt/foo/foobucket - find /mnt/foo -ls - grep thebody /mnt/foo/foobucket/myobject - echo test > /mnt/foo/foobucket/newobject - sync - python: host.a: | import boto3 import json from io import BytesIO with open('/tmp/user.json', 'rt') as f: info = json.loads(f.read()) s3 = boto3.resource( 's3', aws_access_key_id=info['keys'][0]['access_key'], aws_secret_access_key=info['keys'][0]['secret_key'], endpoint_url='http://localhost:8800', ) bucket = s3.Bucket('foobucket') data = BytesIO() bucket.download_fileobj(Fileobj=data, Key='newobject') print(data.getvalue()) assert data.getvalue().decode() == 'test\n' - vip.exec: host.a: - umount /mnt/foo - cephadm.shell: host.a: - ceph nfs export rm foo /foouser - ceph nfs cluster rm foo
2,364
24.989011
119
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml
tasks: - vip: # make sure cephadm notices the new IP - cephadm.shell: host.a: - ceph orch device ls --refresh # stop kernel nfs server, if running - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph fs volume create foofs # deploy nfs + ingress - cephadm.apply: specs: - service_type: nfs service_id: foo placement: count: 2 spec: port: 12049 - service_type: ingress service_id: nfs.foo spec: backend_service: nfs.foo frontend_port: 2049 monitor_port: 9002 virtual_ip: "{{VIP0}}/{{VIPPREFIXLEN}}" - cephadm.wait_for_service: service: nfs.foo - cephadm.wait_for_service: service: ingress.nfs.foo ## export and mount - cephadm.shell: host.a: - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake - vip.exec: host.a: - mkdir /mnt/foo - sleep 5 - mount -t nfs {{VIP0}}:/fake /mnt/foo - echo test > /mnt/foo/testfile - sync # take each gateway down in turn and ensure things still work - cephadm.shell: volumes: - /mnt/foo:/mnt/foo host.a: - | echo "Check with each haproxy down in turn..." for haproxy in `ceph orch ps | grep ^haproxy.nfs.foo. | awk '{print $1}'`; do ceph orch daemon stop $haproxy while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done cat /mnt/foo/testfile echo $haproxy > /mnt/foo/testfile sync ceph orch daemon start $haproxy while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done done
1,716
23.884058
89
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml
tasks: - vip: # make sure cephadm notices the new IP - cephadm.shell: host.a: - ceph orch device ls --refresh # stop kernel nfs server, if running - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph fs volume create foofs - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} --port 2999 - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake - cephadm.wait_for_service: service: nfs.foo - cephadm.wait_for_service: service: ingress.nfs.foo ## export and mount - vip.exec: host.a: - mkdir /mnt/foo - sleep 5 - mount -t nfs {{VIP0}}:/fake /mnt/foo -o port=2999 - echo test > /mnt/foo/testfile - sync # take each gateway down in turn and ensure things still work - cephadm.shell: volumes: - /mnt/foo:/mnt/foo host.a: - | echo "Check with each haproxy down in turn..." for haproxy in `ceph orch ps | grep ^haproxy.nfs.foo. | awk '{print $1}'`; do ceph orch daemon stop $haproxy while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done cat /mnt/foo/testfile echo $haproxy > /mnt/foo/testfile sync ceph orch daemon start $haproxy while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done done # take each ganesha down in turn. # simulate "failure" by deleting the container - vip.exec: all-hosts: - | echo "Check with $(hostname) ganesha(s) down..." for c in `systemctl | grep ceph- | grep @nfs | awk '{print $1}'`; do cid=`echo $c | sed 's/@/-/'` id=`echo $c | cut -d @ -f 2 | sed 's/.service$//'` fsid=`echo $c | cut -d @ -f 1 | cut -d - -f 2-` echo "Removing daemon $id fsid $fsid..." sudo $TESTDIR/cephadm rm-daemon --fsid $fsid --name $id echo "Waking up cephadm..." sudo $TESTDIR/cephadm shell -- ceph orch ps --refresh while ! timeout 1 cat /mnt/foo/testfile ; do true ; done echo "Mount is back!" done
2,167
29.535211
96
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-keepalive-only.yaml
tasks: - vip: # make sure cephadm notices the new IP - cephadm.shell: host.a: - ceph orch device ls --refresh # stop kernel nfs server, if running - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph fs volume create foofs # deploy nfs + keepalive-only ingress service - cephadm.apply: specs: - service_type: nfs service_id: foo placement: count: 1 spec: port: 2049 virtual_ip: "{{VIP0}}" - service_type: ingress service_id: nfs.foo placement: count: 1 spec: backend_service: nfs.foo monitor_port: 9002 virtual_ip: "{{VIP0}}/{{VIPPREFIXLEN}}" keepalive_only: true - cephadm.wait_for_service: service: nfs.foo - cephadm.wait_for_service: service: ingress.nfs.foo # export and mount - cephadm.shell: host.a: - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake # make sure mount can be reached over VIP, ensuring both that # keepalived is maintaining the VIP and that the nfs has bound to it - vip.exec: host.a: - mkdir /mnt/foo - sleep 5 - mount -t nfs {{VIP0}}:/fake /mnt/foo - echo test > /mnt/foo/testfile - sync
1,308
22.375
89
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs.yaml
tasks: # stop kernel nfs server, if running - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.apply: specs: - service_type: nfs service_id: foo - cephadm.wait_for_service: service: nfs.foo
234
15.785714
36
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs2.yaml
tasks: # stop kernel nfs server, if running - vip.exec: all-hosts: - systemctl stop nfs-server - cephadm.shell: host.a: - ceph nfs cluster create foo - cephadm.wait_for_service: service: nfs.foo
221
16.076923
36
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw-ingress.yaml
tasks: - vip: # make sure cephadm notices the new IP - cephadm.shell: host.a: - ceph orch device ls --refresh # deploy rgw + ingress - cephadm.apply: specs: - service_type: rgw service_id: foo placement: count: 4 host_pattern: "*" spec: rgw_frontend_port: 8000 - service_type: ingress service_id: rgw.foo placement: count: 2 spec: backend_service: rgw.foo frontend_port: 9000 monitor_port: 9001 virtual_ip: "{{VIP0}}/{{VIPPREFIXLEN}}" - cephadm.wait_for_service: service: rgw.foo - cephadm.wait_for_service: service: ingress.rgw.foo # take each component down in turn and ensure things still work - cephadm.shell: host.a: - | echo "Check while healthy..." curl http://{{VIP0}}:9000/ # stop each rgw in turn echo "Check with each rgw stopped in turn..." for rgw in `ceph orch ps | grep ^rgw.foo. | awk '{print $1}'`; do ceph orch daemon stop $rgw while ! ceph orch ps | grep $rgw | grep stopped; do sleep 1 ; done while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done ceph orch daemon start $rgw while ! ceph orch ps | grep $rgw | grep running; do sleep 1 ; done done # stop each haproxy in turn echo "Check with each haproxy down in turn..." for haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do ceph orch daemon stop $haproxy while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done ceph orch daemon start $haproxy while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done done while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
1,907
30.278689
85
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw.yaml
tasks: - cephadm.apply: specs: - service_type: rgw service_id: foo placement: count_per_host: 4 host_pattern: "*" spec: rgw_frontend_port: 8000 - cephadm.wait_for_service: service: rgw.foo
257
18.846154
33
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-singlehost/1-start.yaml
tasks: - cephadm: roleless: true single_host_defaults: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls roles: - - host.a - osd.0 - osd.1 - osd.2 - osd.3 - client.0 openstack: - volumes: # attached to each instance count: 4 size: 10 # GB overrides: ceph: conf: osd: osd shutdown pgref assert: true
450
15.107143
39
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-singlehost/3-final.yaml
tasks: - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls
157
16.555556
27
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-singlehost/2-services/basic.yaml
0
0
0
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-singlehost/2-services/rgw.yaml
tasks: - cephadm.apply: specs: - service_type: rgw service_id: foo placement: count_per_host: 4 host_pattern: "*" spec: rgw_frontend_port: 8000 - cephadm.wait_for_service: service: rgw.foo
257
18.846154
33
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-small/0-nvme-loop.yaml
.qa/overrides/nvme_loop.yaml
28
28
28
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-small/fixed-2.yaml
roles: - - mon.a - mgr.y - osd.0 - client.0 - ceph.rgw.foo.a - node-exporter.a - alertmanager.a - - mon.b - mgr.x - osd.1 - client.1 - prometheus.a - grafana.a - node-exporter.b - - mon.c - mgr.z - osd.2 - client.2 - node-exporter.c openstack: - volumes: # attached to each instance count: 1 size: 10 # GB overrides: ceph: conf: osd: osd shutdown pgref assert: true
426
13.233333
39
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-small/start.yaml
tasks: - cephadm: conf: mgr: debug ms: 1 debug mgr: 20 - cephadm.shell: mon.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls --format yaml - ceph orch ls | grep '^osd '
356
20
54
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-small/0-distro/centos_8.stream_container_tools_crun.yaml
../.qa/distros/container-hosts/centos_8.stream_container_tools_crun.yaml
72
72
72
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-small/agent/off.yaml
overrides: ceph: conf: mgr: mgr/cephadm/use_agent: false
77
12
36
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke-small/agent/on.yaml
overrides: ceph: conf: mgr: mgr/cephadm/use_agent: true
76
11.833333
35
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke/0-nvme-loop.yaml
.qa/overrides/nvme_loop.yaml
28
28
28
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke/fixed-2.yaml
roles: - - mon.a - mon.c - mgr.y - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - ceph.rgw.foo.a - node-exporter.a - alertmanager.a - - mon.b - mgr.x - osd.4 - osd.5 - osd.6 - osd.7 - client.1 - prometheus.a - grafana.a - node-exporter.b - ceph.iscsi.iscsi.a openstack: - volumes: # attached to each instance count: 4 size: 10 # GB overrides: ceph: conf: osd: osd shutdown pgref assert: true
456
12.848485
39
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke/start.yaml
tasks: - cephadm: conf: mgr: debug ms: 1 debug mgr: 20 - cephadm.shell: mon.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls --format yaml - ceph orch ls | grep '^osd '
356
20
54
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke/agent/off.yaml
overrides: ceph: conf: mgr: mgr/cephadm/use_agent: false
77
12
36
yaml
null
ceph-main/qa/suites/orch/cephadm/smoke/agent/on.yaml
overrides: ceph: conf: mgr: mgr/cephadm/use_agent: true
76
11.833333
35
yaml
null
ceph-main/qa/suites/orch/cephadm/thrash/1-start.yaml
tasks: - install: - cephadm: conf: mgr: debug ms: 1 debug mgr: 20
92
10.625
21
yaml
null
ceph-main/qa/suites/orch/cephadm/thrash/2-thrash.yaml
overrides: ceph: log-ignorelist: - but it is still running - objects unfound and apparently lost conf: osd: osd debug reject backfill probability: .3 osd scrub min interval: 60 osd scrub max interval: 120 osd max backfills: 3 osd snap trim sleep: 2 osd delete sleep: 1 mon: mon min osdmap epochs: 50 paxos service trim min: 10 # prune full osdmaps regularly mon osdmap full prune min: 15 mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 tasks: - thrashosds: timeout: 1200 chance_pgnum_grow: 1 chance_pgnum_shrink: 1 chance_pgpnum_fix: 1
696
24.814815
49
yaml
null
ceph-main/qa/suites/orch/cephadm/thrash/fixed-2.yaml
../smoke/fixed-2.yaml
21
21
21
yaml
null
ceph-main/qa/suites/orch/cephadm/thrash/root.yaml
overrides: cephadm: cephadm_mode: root
45
10.5
22
yaml
null
ceph-main/qa/suites/orch/cephadm/thrash/3-tasks/rados_api_tests.yaml
.qa/suites/rados/thrash/workloads/rados_api_tests.yaml
54
54
54
yaml
null
ceph-main/qa/suites/orch/cephadm/thrash/3-tasks/radosbench.yaml
.qa/suites/rados/thrash/workloads/radosbench.yaml
49
49
49
yaml
null
ceph-main/qa/suites/orch/cephadm/thrash/3-tasks/small-objects.yaml
.qa/suites/rados/thrash/workloads/small-objects.yaml
52
52
52
yaml
null
ceph-main/qa/suites/orch/cephadm/thrash/3-tasks/snaps-few-objects.yaml
.qa/suites/rados/thrash/workloads/snaps-few-objects.yaml
56
56
56
yaml
null
ceph-main/qa/suites/orch/cephadm/upgrade/4-wait.yaml
tasks: - cephadm.shell: env: [sha1] mon.a: - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done - ceph orch ps - ceph versions - echo "wait for servicemap items w/ changing names to refresh" - sleep 60 - ceph orch ps - ceph versions - ceph orch upgrade status - ceph health detail - ceph versions | jq -e '.overall | length == 1' - ceph versions | jq -e '.overall | keys' | grep $sha1 - ceph orch ls | grep '^osd '
666
38.235294
229
yaml
null
ceph-main/qa/suites/orch/cephadm/upgrade/5-upgrade-ls.yaml
tasks: - cephadm.shell: mon.a: - ceph orch upgrade ls - ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0 - ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2
230
32
88
yaml
null
ceph-main/qa/suites/orch/cephadm/upgrade/1-start-distro/1-start-centos_8.stream_container-tools.yaml
os_type: centos os_version: "8.stream" tasks: - pexec: all: - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.backup - sudo dnf -y module reset container-tools - sudo dnf -y module install container-tools - sudo cp /etc/containers/registries.conf.backup /etc/containers/registries.conf - cephadm: image: quay.io/ceph/ceph:v16.2.0 cephadm_branch: v16.2.0 cephadm_git_url: https://github.com/ceph/ceph # avoid --cap-add=PTRACE + --privileged for older cephadm versions allow_ptrace: false avoid_pacific_features: true roles: - - mon.a - mon.c - mgr.y - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - node-exporter.a - alertmanager.a - - mon.b - mgr.x - osd.4 - osd.5 - osd.6 - osd.7 - client.1 - prometheus.a - grafana.a - node-exporter.b
841
20.05
84
yaml
null
ceph-main/qa/suites/orch/cephadm/upgrade/1-start-distro/1-start-ubuntu_20.04.yaml
os_type: ubuntu os_version: "20.04" tasks: - cephadm: image: quay.io/ceph/ceph:v16.2.0 cephadm_branch: v16.2.0 cephadm_git_url: https://github.com/ceph/ceph # avoid --cap-add=PTRACE + --privileged for older cephadm versions allow_ptrace: false avoid_pacific_features: true roles: - - mon.a - mon.c - mgr.y - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - node-exporter.a - alertmanager.a - - mon.b - mgr.x - osd.4 - osd.5 - osd.6 - osd.7 - client.1 - prometheus.a - grafana.a - node-exporter.b
552
15.264706
70
yaml
null
ceph-main/qa/suites/orch/cephadm/upgrade/2-repo_digest/defaut.yaml
0
0
0
yaml
null
ceph-main/qa/suites/orch/cephadm/upgrade/2-repo_digest/repo_digest.yaml
tasks: - cephadm.shell: mon.a: - ceph config set mgr mgr/cephadm/use_repo_digest false --force
105
20.2
69
yaml
null
ceph-main/qa/suites/orch/cephadm/upgrade/3-upgrade/simple.yaml
tasks: - cephadm.shell: env: [sha1] mon.a: # setup rgw - radosgw-admin realm create --rgw-realm=r --default - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default - radosgw-admin period update --rgw-realm=r --commit - ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000 # simple rgw spec (will have no "spec" field) to make sure that works with rgw spec migration - ceph orch apply rgw smpl # setup iscsi - ceph osd pool create foo - rbd pool init foo - ceph orch apply iscsi foo u p - sleep 120 - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
1,007
44.818182
99
yaml
null
ceph-main/qa/suites/orch/cephadm/upgrade/3-upgrade/staggered.yaml
tasks: - cephadm.shell: env: [sha1] mon.a: # setup rgw - radosgw-admin realm create --rgw-realm=r --default - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default - radosgw-admin period update --rgw-realm=r --commit - ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000 # setup iscsi - ceph osd pool create foo - rbd pool init foo - ceph orch apply iscsi foo u p - sleep 180 - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force # get some good info on the state of things pre-upgrade. Useful for debugging - ceph orch ps - ceph versions - ceph -s - ceph orch ls # doing staggered upgrade requires mgr daemons being on a version that contains the staggered upgrade code # until there is a stable version that contains it, we can test by manually upgrading a mgr daemon - ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1 - ceph orch ps --refresh - sleep 180 # gather more possible debugging info - ceph orch ps - ceph versions - ceph -s - ceph health detail # check that there are two different versions found for mgr daemon (which implies we upgraded one) - ceph versions | jq -e '.mgr | length == 2' - ceph mgr fail - sleep 180 # now try upgrading the other mgr - ceph orch daemon redeploy "mgr.$(ceph mgr dump -f json | jq .standbys | jq .[] | jq -r .name)" --image quay.ceph.io/ceph-ci/ceph:$sha1 - ceph orch ps --refresh - sleep 180 # gather more possible debugging info - ceph orch ps - ceph versions - ceph health detail - ceph -s - ceph mgr fail - sleep 180 # gather more debugging info - ceph orch ps - ceph versions - ceph -s - ceph health detail # now that both mgrs should have been redeployed with the new version, we should be back on only 1 version for the mgrs - ceph versions | jq -e '.mgr | length == 1' - ceph mgr fail - sleep 180 # debugging info - ceph orch ps - ceph orch ls - ceph versions # to make sure mgr daemons upgrade is fully completed, including being deployed by a mgr on a new version # also serves as an early failure if manually upgrading the mgrs failed as --daemon-types won't be recognized - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done # verify only one version found for mgrs and that their version hash matches what we are upgrading to - ceph versions | jq -e '.mgr | length == 1' - ceph versions | jq -e '.mgr | keys' | grep $sha1 # verify overall we still see two versions, basically to make sure --daemon-types wasn't ignored and all daemons upgraded - ceph versions | jq -e '.overall | length == 2' # check that exactly two daemons have been upgraded to the new image (our 2 mgr daemons) - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 2' - ceph orch upgrade status - ceph health detail # upgrade only the mons on one of the two hosts - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.x | awk '{print $2}') - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps # verify two different version seen for mons - ceph versions | jq -e '.mon | length == 2' - ceph orch upgrade status - ceph health detail # upgrade mons on the other hosts - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --hosts $(ceph orch ps | grep mgr.y | awk '{print $2}') - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps # verify all mons now on same version and version hash matches what we are upgrading to - ceph versions | jq -e '.mon | length == 1' - ceph versions | jq -e '.mon | keys' | grep $sha1 # verify exactly 5 daemons are now upgraded (2 mgrs, 3 mons) - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 5' - ceph orch upgrade status - ceph health detail # upgrade exactly 2 osd daemons - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types osd --limit 2 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps # verify two different versions now seen for osds - ceph versions | jq -e '.osd | length == 2' # verify exactly 7 daemons have been upgraded (2 mgrs, 3 mons, 2 osds) - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 7' - ceph orch upgrade status - ceph health detail # upgrade one more osd - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd --limit 1 - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps - ceph versions | jq -e '.osd | length == 2' # verify now 8 daemons have been upgraded - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 8' # upgrade the rest of the osds - ceph orch upgrade status - ceph health detail - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types crash,osd - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps # verify all osds are now on same version and version hash matches what we are upgrading to - ceph versions | jq -e '.osd | length == 1' - ceph versions | jq -e '.osd | keys' | grep $sha1 - ceph orch upgrade status - ceph health detail # upgrade the rgw daemons using --services - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --services rgw.foo - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph orch ps # verify all rgw daemons on same version and version hash matches what we are upgrading to - ceph versions | jq -e '.rgw | length == 1' - ceph versions | jq -e '.rgw | keys' | grep $sha1 - ceph orch upgrade status - ceph health detail # run upgrade one more time with no filter parameters to make sure anything left gets upgraded - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
7,926
58.601504
208
yaml
null
ceph-main/qa/suites/orch/cephadm/with-work/fixed-2.yaml
../smoke/fixed-2.yaml
21
21
21
yaml
null
ceph-main/qa/suites/orch/cephadm/with-work/start.yaml
tasks: - install: - cephadm: conf: mgr: debug ms: 1 debug mgr: 20
92
10.625
21
yaml
null
ceph-main/qa/suites/orch/cephadm/with-work/mode/packaged.yaml
overrides: cephadm: cephadm_mode: cephadm-package install: extra_packages: [cephadm]
97
15.333333
33
yaml
null
ceph-main/qa/suites/orch/cephadm/with-work/mode/root.yaml
overrides: cephadm: cephadm_mode: root
45
10.5
22
yaml
null
ceph-main/qa/suites/orch/cephadm/with-work/tasks/rados_api_tests.yaml
.qa/suites/rados/basic/tasks/rados_api_tests.yaml
49
49
49
yaml
null
ceph-main/qa/suites/orch/cephadm/with-work/tasks/rados_python.yaml
.qa/suites/rados/basic/tasks/rados_python.yaml
46
46
46
yaml
null
ceph-main/qa/suites/orch/cephadm/with-work/tasks/rotate-keys.yaml
tasks: - cephadm.shell: mon.a: - | set -ex for f in osd.0 osd.1 osd.2 osd.3 osd.4 osd.5 osd.6 osd.7 mgr.y mgr.x do echo "rotating key for $f" K=$(ceph auth get-key $f) NK="$K" ceph orch daemon rotate-key $f while [ "$K" == "$NK" ]; do sleep 5 NK=$(ceph auth get-key $f) done done
400
22.588235
74
yaml
null
ceph-main/qa/suites/orch/cephadm/workunits/task/test_adoption.yaml
roles: - [mon.a, mgr.x, osd.0, client.0] tasks: - install: - exec: mon.a: - yum install -y python3 || apt install -y python3 - workunit: clients: client.0: - cephadm/test_adoption.sh
211
16.666667
56
yaml
null
ceph-main/qa/suites/orch/cephadm/workunits/task/test_cephadm.yaml
roles: - [mon.a, mgr.x, osd.0, client.0] tasks: - install: - exec: mon.a: - yum install -y python3 || apt install -y python3 - workunit: clients: client.0: - cephadm/test_cephadm.sh
210
16.583333
56
yaml
null
ceph-main/qa/suites/orch/cephadm/workunits/task/test_cephadm_repos.yaml
roles: - [mon.a, mgr.x, osd.0, client.0] tasks: - workunit: no_coverage_and_limits: true clients: client.0: - cephadm/test_repos.sh
154
16.222222
33
yaml
null
ceph-main/qa/suites/orch/cephadm/workunits/task/test_extra_daemon_features.yaml
roles: - - host.a - mon.a - mgr.a - osd.0 - - host.b - mon.b - mgr.b - osd.1 tasks: - install: - cephadm: - exec: all-hosts: - mkdir /etc/cephadm_testing - cephadm.apply: specs: - service_type: mon placement: host_pattern: '*' extra_container_args: - "--cpus=2" extra_entrypoint_args: - "--debug_ms 10" - service_type: container service_id: foo placement: host_pattern: '*' spec: image: "quay.io/fedora/fedora:latest" entrypoint: "bash" extra_container_args: - "-v" - "/etc/cephadm_testing:/root/cephadm_testing" extra_entrypoint_args: - "/root/write_thing_to_file.sh" - "-c" - "testing_custom_containers" - "-o" - "/root/cephadm_testing/testing.txt" custom_configs: - mount_path: "/root/write_thing_to_file.sh" content: | while getopts "o:c:" opt; do case ${opt} in o ) OUT_FILE=${OPTARG} ;; c ) CONTENT=${OPTARG} esac done echo $CONTENT > $OUT_FILE sleep infinity - cephadm.wait_for_service: service: mon - cephadm.wait_for_service: service: container.foo - exec: host.a: - | set -ex FSID=$(/home/ubuntu/cephtest/cephadm shell -- ceph fsid) sleep 60 # check extra container and entrypoint args written to mon unit run file grep "\-\-cpus=2" /var/lib/ceph/$FSID/mon.*/unit.run grep "\-\-debug_ms 10" /var/lib/ceph/$FSID/mon.*/unit.run # check that custom container properly wrote content to file. # This requires the custom config, extra container args, and # entrypoint args to all be working in order for this to have # been written. The container entrypoint was set up with custom_configs, # the content and where to write to with the entrypoint args, and the mounting # of the /etc/cephadm_testing dir with extra container args grep "testing_custom_containers" /etc/cephadm_testing/testing.txt
2,257
29.106667
86
yaml
null
ceph-main/qa/suites/orch/cephadm/workunits/task/test_nfs.yaml
roles: - - host.a - osd.0 - osd.1 - osd.2 - mon.a - mgr.a - client.0 tasks: - install: - cephadm: - cephadm.shell: host.a: - ceph orch apply mds a - cephfs_test_runner: modules: - tasks.cephfs.test_nfs
234
12.055556
29
yaml
null
ceph-main/qa/suites/orch/cephadm/workunits/task/test_orch_cli.yaml
roles: - - host.a - osd.0 - osd.1 - osd.2 - mon.a - mgr.a - client.0 tasks: - install: - cephadm: - cephadm.shell: host.a: - ceph orch apply mds a - cephfs_test_runner: modules: - tasks.cephadm_cases.test_cli
241
12.444444
36
yaml
null
ceph-main/qa/suites/orch/cephadm/workunits/task/test_orch_cli_mon.yaml
roles: - - host.a - osd.0 - osd.1 - osd.2 - mon.a - mgr.a - client.0 - - host.b - osd.3 - osd.4 - osd.5 - mon.b - mgr.b - client.1 - - host.c - osd.6 - osd.7 - osd.8 - mon.c - mgr.c - client.2 - - host.d - osd.9 - osd.10 - osd.11 - mon.d - mgr.d - client.3 - - host.e - osd.12 - osd.13 - osd.14 - mon.e - mgr.e - client.4 tasks: - install: - cephadm: - cephadm.shell: host.a: - ceph orch apply mds a - cephfs_test_runner: modules: - tasks.cephadm_cases.test_cli_mon
546
10.891304
40
yaml
null
ceph-main/qa/suites/orch/cephadm/workunits/task/test_rgw_multisite.yaml
roles: - - host.a - mon.a - mgr.a - osd.0 - - host.b - mon.b - mgr.b - osd.1 - - host.c - mon.c - osd.2 tasks: - install: - cephadm: - cephadm.shell: host.a: - ceph mgr module enable rgw - rgw_module.apply: specs: - rgw_realm: myrealm1 rgw_zonegroup: myzonegroup1 rgw_zone: myzone1 spec: rgw_frontend_port: 5500 - cephadm.shell: host.a: - | set -e set -x while true; do TOKEN=$(ceph rgw realm tokens | jq -r '.[0].token'); echo $TOKEN; if [ "$TOKEN" != "master zone has no endpoint" ]; then break; fi; sleep 5; done TOKENS=$(ceph rgw realm tokens) echo $TOKENS | jq --exit-status '.[0].realm == "myrealm1"' echo $TOKENS | jq --exit-status '.[0].token' TOKEN_JSON=$(ceph rgw realm tokens | jq -r '.[0].token' | base64 --decode) echo $TOKEN_JSON | jq --exit-status '.realm_name == "myrealm1"' echo $TOKEN_JSON | jq --exit-status '.endpoint | test("http://.+:\\d+")' echo $TOKEN_JSON | jq --exit-status '.realm_id | test("^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$")' echo $TOKEN_JSON | jq --exit-status '.access_key' echo $TOKEN_JSON | jq --exit-status '.secret'
1,257
29.682927
168
yaml
null
ceph-main/qa/suites/orch/cephadm/workunits/task/test_set_mon_crush_locations.yaml
roles: - - host.a - osd.0 - mon.a - mgr.a - - host.b - osd.1 - mon.b - mgr.b - - host.c - osd.2 - mon.c tasks: - install: - cephadm: - cephadm.apply: specs: - service_type: mon service_id: foo placement: count: 3 spec: crush_locations: host.a: - datacenter=a host.b: - datacenter=b - rack=2 host.c: - datacenter=a - rack=3 - cephadm.shell: host.a: - | set -ex # since we don't know the real hostnames before the test, the next # bit is in order to replace the fake hostnames "host.a/b/c" with # the actual names cephadm knows the host by within the mon spec ceph orch host ls --format json | jq -r '.[] | .hostname' > realnames echo $'host.a\nhost.b\nhost.c' > fakenames echo $'a\nb\nc' > mon_ids echo $'{datacenter=a}\n{datacenter=b,rack=2}\n{datacenter=a,rack=3}' > crush_locs ceph orch ls --service-name mon --export > mon.yaml MONSPEC=`cat mon.yaml` echo "$MONSPEC" while read realname <&3 && read fakename <&4; do MONSPEC="${MONSPEC//$fakename/$realname}" done 3<realnames 4<fakenames echo "$MONSPEC" > mon.yaml cat mon.yaml # now the spec should have the real hostnames, so let's re-apply ceph orch apply -i mon.yaml sleep 90 ceph orch ps --refresh ceph orch ls --service-name mon --export > mon.yaml; ceph orch apply -i mon.yaml sleep 90 ceph mon dump ceph mon dump --format json # verify all the crush locations got set from "ceph mon dump" output while read monid <&3 && read crushloc <&4; do ceph mon dump --format json | jq --arg monid "$monid" --arg crushloc "$crushloc" -e '.mons | .[] | select(.name == $monid) | .crush_location == $crushloc' done 3<mon_ids 4<crush_locs
1,999
30.746032
164
yaml
null
ceph-main/qa/suites/orch/cephadm/workunits/task/test_iscsi_container/centos_8.stream_container_tools.yaml
.qa/distros/podman/centos_8.stream_container_tools.yaml
55
55
55
yaml
null
ceph-main/qa/suites/orch/cephadm/workunits/task/test_iscsi_container/test_iscsi_container.yaml
roles: - - host.a - osd.0 - osd.1 - osd.2 - mon.a - mgr.a - client.0 tasks: - install: - cephadm: - cephadm.shell: host.a: - ceph osd pool create foo - rbd pool init foo - ceph orch apply iscsi foo u p - workunit: clients: client.0: - cephadm/test_iscsi_pids_limit.sh - cephadm/test_iscsi_etc_hosts.sh
362
15.5
42
yaml
null
ceph-main/qa/suites/orch/rook/smoke/0-kubeadm.yaml
tasks: - kubeadm:
18
5.333333
10
yaml
null
ceph-main/qa/suites/orch/rook/smoke/0-nvme-loop.yaml
.qa/overrides/nvme_loop.yaml
28
28
28
yaml
null
ceph-main/qa/suites/orch/rook/smoke/1-rook.yaml
tasks: - rook:
15
4.333333
7
yaml
null
ceph-main/qa/suites/orch/rook/smoke/0-distro/ubuntu_20.04.yaml
.qa/distros/container-hosts/ubuntu_20.04.yaml
45
45
45
yaml