Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/qa/suites/teuthology/no-ceph/tasks/teuthology.yaml
|
tasks:
- tests:
| 20 | 6 | 12 |
yaml
|
null |
ceph-main/qa/suites/teuthology/nop/clusters/empty.yaml
| 0 | 0 | 0 |
yaml
|
|
null |
ceph-main/qa/suites/teuthology/nop/clusters/single.yaml
|
roles:
- [mon.a, mgr.x, client.0]
| 38 | 12 | 30 |
yaml
|
null |
ceph-main/qa/suites/teuthology/nop/tasks/nop.yaml
|
tasks:
- nop:
| 19 | 4 | 10 |
yaml
|
null |
ceph-main/qa/suites/teuthology/rgw/tasks/s3tests-fastcgi.yaml
|
# this runs s3tests against rgw, using mod_fastcgi
roles:
- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.1]
tasks:
- install:
branch: master
- ceph:
- rgw: [client.0]
- s3tests:
client.0:
rgw_server: client.0
force-branch: ceph-master
overrides:
ceph:
fs: xfs
conf:
client:
debug rgw: 20
rgw lc debug interval: 10
rgw:
ec-data-pool: false
frontend: apache
| 465 | 17.64 | 50 |
yaml
|
null |
ceph-main/qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml
|
# this runs s3tests against rgw, using mod_proxy_fcgi
# the choice between uds or tcp with mod_proxy_fcgi depends on the distro
roles:
- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.1]
tasks:
- install:
branch: master
- ceph:
- rgw: [client.0]
- s3tests:
client.0:
rgw_server: client.0
force-branch: ceph-master
overrides:
ceph:
fs: xfs
conf:
client:
debug rgw: 20
rgw lc debug interval: 10
rgw:
ec-data-pool: false
frontend: apache
use_fcgi: true
| 561 | 19.814815 | 73 |
yaml
|
null |
ceph-main/qa/suites/teuthology/rgw/tasks/s3tests.yaml
|
# this runs s3tests against rgw
roles:
- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.1]
tasks:
- install:
branch: master
- ceph:
- rgw: [client.0]
- s3tests:
client.0:
rgw_server: client.0
force-branch: ceph-master
overrides:
ceph:
fs: xfs
conf:
client:
debug rgw: 20
rgw lc debug interval: 10
rgw:
ec-data-pool: false
| 425 | 16.75 | 47 |
yaml
|
null |
ceph-main/qa/suites/teuthology/workunits/yes.yaml
|
roles:
- [client.0]
tasks:
- install:
- workunit:
clients:
all:
- true.sh
| 96 | 9.777778 | 17 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/clusters/fixed-3.yaml
|
roles:
- [mon.a, mon.c, osd.0, osd.1, osd.2]
- [mon.b, mgr.x, mds.a, osd.3, osd.4, osd.5]
- [client.0]
| 103 | 19.8 | 44 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/msgr-failures/few.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 5000
mon client directed command retry: 5
log-ignorelist:
- \(OSD_SLOW_PING_TIME
| 177 | 18.777778 | 44 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/msgr-failures/many.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 500
mon client directed command retry: 5
log-ignorelist:
- \(OSD_SLOW_PING_TIME
| 176 | 18.666667 | 44 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/blogbench.yaml
|
tasks:
- install:
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/blogbench.sh
| 114 | 10.5 | 33 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/bonnie.yaml
|
tasks:
- install:
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/bonnie.sh
| 111 | 10.2 | 30 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/dbench-short.yaml
|
tasks:
- install:
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/dbench-short.sh
| 117 | 10.8 | 36 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/dbench.yaml
|
tasks:
- install:
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/dbench.sh
| 111 | 10.2 | 30 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/ffsb.yaml
|
tasks:
- install:
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/ffsb.sh
| 109 | 10 | 28 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/fio.yaml
|
tasks:
- install:
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/fio.sh
| 108 | 9.9 | 27 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/fsstress.yaml
|
tasks:
- install:
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/fsstress.sh
| 113 | 10.4 | 32 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/fsx.yaml
|
tasks:
- install:
extra_system_packages:
deb:
- libaio-dev
- libtool-bin
- uuid-dev
- xfslibs-dev
rpm:
- libaio-devel
- libtool
- libuuid-devel
- xfsprogs-devel
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/fsx.sh
| 315 | 14.047619 | 27 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/fsync-tester.yaml
|
tasks:
- install:
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/fsync-tester.sh
| 117 | 10.8 | 36 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/iogen.yaml
|
tasks:
- install:
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/iogen.sh
| 110 | 10.1 | 29 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/iozone-sync.yaml
|
tasks:
- install:
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/iozone-sync.sh
| 116 | 10.7 | 35 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/iozone.yaml
|
tasks:
- install:
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/iozone.sh
| 111 | 10.2 | 30 |
yaml
|
null |
ceph-main/qa/suites/tgt/basic/tasks/pjd.yaml
|
tasks:
- install:
- ceph:
- tgt:
- iscsi:
- workunit:
clients:
all:
- suites/pjd.sh
| 108 | 9.9 | 27 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/parallel/0-start.yaml
|
roles:
- - mon.a
- mon.c
- mgr.y
- mds.a
- osd.0
- osd.1
- osd.2
- osd.3
- client.0
- node-exporter.a
- alertmanager.a
- - mon.b
- mds.b
- mgr.x
- osd.4
- osd.5
- osd.6
- osd.7
- client.1
- prometheus.a
- grafana.a
- node-exporter.b
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
overrides:
ceph:
create_rbd_pool: true
conf:
osd:
osd shutdown pgref assert: true
| 460 | 12.558824 | 39 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/parallel/1-tasks.yaml
|
tasks:
- install:
branch: pacific
exclude_packages:
- ceph-volume
- print: "**** done install task..."
- print: "**** done start installing pacific cephadm ..."
- cephadm:
image: quay.ceph.io/ceph-ci/ceph:pacific
cephadm_branch: pacific
cephadm_git_url: https://github.com/ceph/ceph
conf:
osd:
#set config option for which cls modules are allowed to be loaded / used
osd_class_load_list: "*"
osd_class_default_list: "*"
- print: "**** done end installing pacific cephadm ..."
- print: "**** done start cephadm.shell ceph config set mgr..."
- cephadm.shell:
mon.a:
- ceph config set mgr mgr/cephadm/use_repo_digest true --force
- print: "**** done cephadm.shell ceph config set mgr..."
- print: "**** done start telemetry pacific..."
- workunit:
clients:
client.0:
- test_telemetry_pacific.sh
- print: "**** done end telemetry pacific..."
- print: "**** done start parallel"
- parallel:
- workload
- upgrade-sequence
- print: "**** done end parallel"
- print: "**** done start telemetry x..."
- workunit:
clients:
client.0:
- test_telemetry_pacific_x.sh
- print: "**** done end telemetry x..."
| 1,211 | 26.545455 | 80 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/parallel/upgrade-sequence.yaml
|
# renamed tasks: to upgrade-sequence:
upgrade-sequence:
sequential:
- print: "**** done start upgrade, wait"
- cephadm.shell:
env: [sha1]
mon.a:
- ceph config set global log_to_journald false --force
- ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
- while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
- ceph orch ps
- ceph versions
- ceph versions | jq -e '.overall | length == 1'
- ceph versions | jq -e '.overall | keys' | grep $sha1
- print: "**** done end upgrade, wait..."
| 650 | 37.294118 | 125 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/parallel/workload/ec-rados-default.yaml
|
meta:
- desc: |
run run randomized correctness test for rados operations
on an erasure-coded pool
workload:
full_sequential:
- print: "**** done start ec-rados-default.yaml"
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
write_append_excl: false
op_weights:
read: 100
write: 0
append: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
setattr: 25
rmattr: 25
- print: "**** done end ec-rados-default.yaml"
| 577 | 21.230769 | 59 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/parallel/workload/rados_api.yaml
|
meta:
- desc: |
object class functional tests
workload:
full_sequential:
- print: "**** done start rados_api.yaml"
- workunit:
branch: pacific
clients:
client.0:
- cls
- print: "**** done end rados_api.yaml"
| 263 | 19.307692 | 45 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/parallel/workload/rados_loadgenbig.yaml
|
meta:
- desc: |
generate read/write load with rados objects ranging from 1MB to 25MB
workload:
full_sequential:
- print: "**** done start rados_loadgenbig.yaml"
- workunit:
branch: pacific
clients:
client.0:
- rados/load-gen-big.sh
- print: "**** done end rados_loadgenbig.yaml"
| 334 | 24.769231 | 71 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/parallel/workload/rbd_import_export.yaml
|
meta:
- desc: |
run basic import/export cli tests for rbd
workload:
full_sequential:
- print: "**** done start rbd_import_export.yaml"
- workunit:
branch: pacific
clients:
client.1:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done end rbd_import_export.yaml"
| 363 | 23.266667 | 53 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/parallel/workload/test_rbd_api.yaml
|
meta:
- desc: |
librbd C and C++ api tests
workload:
full_sequential:
- print: "**** done start test_rbd_api.yaml"
- workunit:
branch: pacific
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done end test_rbd_api.yaml"
| 283 | 20.846154 | 48 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/parallel/workload/test_rbd_python.yaml
|
meta:
- desc: |
librbd python api tests
workload:
full_sequential:
- print: "**** done start test_rbd_python.yaml"
- workunit:
branch: pacific
clients:
client.0:
- rbd/test_librbd_python.sh
- print: "**** done end test_rbd_python.yaml"
| 292 | 19.928571 | 51 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/0-roles.yaml
|
roles:
- - mon.a
- mon.c
- mgr.y
- osd.0
- osd.1
- osd.2
- osd.3
- client.0
- node-exporter.a
- alertmanager.a
- - mon.b
- mgr.x
- osd.4
- osd.5
- osd.6
- osd.7
- client.1
- prometheus.a
- grafana.a
- node-exporter.b
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
overrides:
ceph:
create_rbd_pool: true
conf:
osd:
osd shutdown pgref assert: true
| 440 | 12.78125 | 39 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/1-start.yaml
|
tasks:
- install:
branch: pacific
exclude_packages:
- ceph-volume
- cephadm:
image: quay.ceph.io/ceph-ci/ceph:pacific
cephadm_branch: pacific
cephadm_git_url: https://github.com/ceph/ceph
conf:
osd:
#set config option for which cls modules are allowed to be loaded / used
osd_class_load_list: "*"
osd_class_default_list: "*"
- cephadm.shell:
mon.a:
- ceph fs volume create foo
- ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
- ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
- ceph.healthy:
- print: "**** upgrading first half of cluster, with stress ****"
- parallel:
- first-half-tasks
- first-half-sequence
- print: "**** done upgrading first half of cluster ****"
- ceph.healthy:
- print: "**** applying stress + thrashing to mixed-version cluster ****"
- parallel:
- stress-tasks
- ceph.healthy:
- print: "**** finishing upgrade ****"
- parallel:
- second-half-tasks
- second-half-sequence
- ceph.healthy:
#################
first-half-sequence:
- cephadm.shell:
env: [sha1]
mon.a:
- ceph config set mgr mgr/cephadm/daemon_cache_timeout 60
- ceph config set global log_to_journald false --force
- ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
- ceph orch ps
- echo wait for minority of mons to upgrade
- while ! ceph mon versions | grep $sha1 ; do sleep 2 ; done
- ceph orch ps
- ceph orch upgrade pause
- sleep 60
- ceph orch upgrade resume
- echo wait for majority of mons to upgrade
- "while ! ceph mon versions | grep $sha1 | egrep ': [23]' ; do sleep 2 ; done"
- ceph orch ps
- ceph orch upgrade pause
- sleep 60
- ceph orch upgrade resume
- echo wait for all mons to upgrade
- "while ! ceph mon versions | grep $sha1 | grep ': 3' ; do sleep 2 ; done"
- ceph orch ps
- ceph orch upgrade pause
- sleep 60
- ceph orch upgrade resume
- echo wait for half of osds to upgrade
- "while ! ceph osd versions | grep $sha1 | egrep ': [45678]'; do sleep 2 ; done"
- ceph orch upgrade pause
- ceph orch ps
- ceph orch ps
- ceph versions
#################
stress-tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 1
chance_pgpnum_fix: 1
chance_thrash_cluster_full: 0
chance_thrash_pg_upmap: 0
chance_thrash_pg_upmap_items: 0
disable_objectstore_tool_tests: true
chance_force_recovery: 0
aggressive_pg_num_changes: false
#################
second-half-sequence:
sequential:
- cephadm.shell:
env: [sha1]
mon.a:
- ceph orch upgrade resume
- sleep 60
- echo wait for upgrade to complete
- while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
- echo upgrade complete
- ceph orch ps
- ceph versions
- ceph versions | jq -e '.overall | length == 1'
- ceph versions | jq -e '.overall | keys' | grep $sha1
| 3,188 | 24.926829 | 126 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/2-first-half-tasks/radosbench.yaml
|
meta:
- desc: |
run randomized correctness test for rados operations
generate write load with rados bench
first-half-tasks:
- full_sequential:
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- print: "**** done end radosbench.yaml"
| 418 | 19.95 | 55 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/2-first-half-tasks/rbd-cls.yaml
|
meta:
- desc: |
run basic cls tests for rbd
first-half-tasks:
- workunit:
branch: pacific
clients:
client.0:
- cls/test_cls_rbd.sh
- print: "**** done cls/test_cls_rbd.sh 5-workload"
| 208 | 18 | 51 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/2-first-half-tasks/rbd-import-export.yaml
|
meta:
- desc: |
run basic import/export cli tests for rbd
first-half-tasks:
- workunit:
branch: pacific
clients:
client.0:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done rbd/import_export.sh 5-workload"
| 269 | 19.769231 | 52 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/2-first-half-tasks/rbd_api.yaml
|
meta:
- desc: |
librbd C and C++ api tests
first-half-tasks:
- workunit:
branch: pacific
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done rbd/test_librbd.sh 7-workload"
| 212 | 18.363636 | 50 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/2-first-half-tasks/readwrite.yaml
|
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool,
using only reads, writes, and deletes
first-half-tasks:
- full_sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_append_excl: false
op_weights:
read: 45
write: 45
delete: 10
- print: "**** done rados/readwrite 5-workload"
| 391 | 22.058824 | 73 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/2-first-half-tasks/snaps-few-objects.yaml
|
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool with snapshot operations
first-half-tasks:
- full_sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 50
write_append_excl: false
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done rados/snaps-few-objects 5-workload"
| 452 | 22.842105 | 97 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/3-stress-tasks/radosbench.yaml
|
meta:
- desc: |
run randomized correctness test for rados operations
generate write load with rados bench
stress-tasks:
- full_sequential:
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- print: "**** done end radosbench.yaml"
| 528 | 19.346154 | 55 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/3-stress-tasks/rbd-cls.yaml
|
meta:
- desc: |
run basic cls tests for rbd
stress-tasks:
- workunit:
branch: pacific
clients:
client.0:
- cls/test_cls_rbd.sh
- print: "**** done cls/test_cls_rbd.sh 5-workload"
| 204 | 17.636364 | 51 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/3-stress-tasks/rbd-import-export.yaml
|
meta:
- desc: |
run basic import/export cli tests for rbd
stress-tasks:
- workunit:
branch: pacific
clients:
client.0:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done rbd/import_export.sh 5-workload"
| 265 | 19.461538 | 52 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/3-stress-tasks/rbd_api.yaml
|
meta:
- desc: |
librbd C and C++ api tests
stress-tasks:
- workunit:
branch: pacific
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done rbd/test_librbd.sh 7-workload"
| 208 | 18 | 50 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/3-stress-tasks/readwrite.yaml
|
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool,
using only reads, writes, and deletes
stress-tasks:
- full_sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_append_excl: false
op_weights:
read: 45
write: 45
delete: 10
- print: "**** done rados/readwrite 5-workload"
| 387 | 21.823529 | 73 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/3-stress-tasks/snaps-few-objects.yaml
|
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool with snapshot operations
stress-tasks:
- full_sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 50
write_append_excl: false
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done rados/snaps-few-objects 5-workload"
| 448 | 22.631579 | 97 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/4-second-half-tasks/radosbench.yaml
|
meta:
- desc: |
run randomized correctness test for rados operations
generate write load with rados bench
second-half-tasks:
- full_sequential:
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- print: "**** done end radosbench.yaml"
| 362 | 20.352941 | 55 |
yaml
|
null |
ceph-main/qa/suites/upgrade/pacific-x/stress-split/4-second-half-tasks/rbd-import-export.yaml
|
meta:
- desc: |
run basic import/export cli tests for rbd
second-half-tasks:
- workunit:
branch: pacific
clients:
client.0:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done rbd/import_export.sh 5-workload"
| 270 | 19.846154 | 52 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/filestore-remove-check/2 - upgrade.yaml
|
meta:
- desc: |
install upgrade ceph/-x on cluster
restart : mons, osd.*
tasks:
- install.upgrade:
mon.a:
- exec:
osd.0:
- ceph osd require-osd-release quincy
- print: "**** done install.upgrade of nodes"
- ceph.restart:
daemons: [mon.a,mgr.x,osd.0,osd.1,osd.2]
mon-health-to-clog: false
wait-for-healthy: false
wait-for-osds-up: false
wait-for-scrub: false
skip_stop_pg_num_changes: true
expected-failure: "FileStore has been deprecated and is no longer supported"
- print: "**** done ceph.restart of all mons and osds"
| 569 | 26.142857 | 80 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/filestore-remove-check/ubuntu_20.04.yaml
|
os_type: ubuntu
os_version: "20.04"
# the normal ubuntu 20.04 kernel (5.4.0-88-generic currently) have a bug that prevents the nvme_loop
# from behaving. I think it is this:
# https://lkml.org/lkml/2020/9/21/1456
# (at least, that is the symptom: nvme nvme1: Connect command failed, error wo/DNR bit: 880)
overrides:
kernel:
hwe: true
| 343 | 37.222222 | 100 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/filestore-remove-check/0-cluster/openstack.yaml
|
openstack:
- machine:
disk: 100 # GB
- volumes: # attached to each instance
count: 4
size: 30 # GB
| 121 | 16.428571 | 40 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/filestore-remove-check/0-cluster/start.yaml
|
meta:
- desc: |
Run ceph on one nodes,
Use xfs beneath the osds. upgrade to reef
should fail to start the osds with filestore
overrides:
ceph:
mon_bind_msgr2: false
mon_bind_addrvec: false
mon-health-to-clog: false
wait-for-healthy: false
wait-for-osds-up: false
wait-for-scrub: false
skip_stop_pg_num_changes: true
fs: xfs
log-ignorelist:
- overall HEALTH_
- \(MON_DOWN\)
- \(MGR_DOWN\)
- slow request
- \(MON_MSGR2_NOT_ENABLED\)
conf:
global:
enable experimental unrecoverable data corrupting features: "*"
mon warn on msgr2 not enabled: false
mon:
mon warn on osd down out interval zero: false
roles:
- - mon.a
- mgr.x
- osd.0
- osd.1
- osd.2
| 770 | 22.363636 | 71 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/filestore-remove-check/1-ceph-install/quincy.yaml
|
meta:
- desc: install ceph/quincy latest
tasks:
- install:
exclude_packages:
- ceph-mgr-cephadm
- cephadm
- libcephfs-dev
branch: quincy
- print: "**** done install quincy"
- ceph:
create_rbd_pool: false
conf:
global:
bluestore_warn_on_legacy_statfs: false
bluestore warn on no per pool omap: false
mon pg warn min per osd: 0
mon:
mon_warn_on_insecure_global_id_reclaim: false
mon_warn_on_insecure_global_id_reclaim_allowed: false
log-ignorelist:
- Not found or unloadable
- evicting unresponsive client
- exec:
osd.0:
- ceph osd require-osd-release quincy
- print: "**** done ceph"
overrides:
ceph:
conf:
mon:
mon warn on osd down out interval zero: false
| 789 | 22.939394 | 61 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/filestore-remove-check/objectstore/filestore-xfs.yaml
|
overrides:
ceph:
fs: xfs
conf:
osd:
osd objectstore: filestore
osd sloppy crc: true
ceph-deploy:
fs: xfs
filestore: True
conf:
osd:
osd objectstore: filestore
osd sloppy crc: true
| 247 | 16.714286 | 34 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/parallel/0-start.yaml
|
roles:
- - mon.a
- mon.c
- mgr.y
- mds.a
- osd.0
- osd.1
- osd.2
- osd.3
- client.0
- node-exporter.a
- alertmanager.a
- - mon.b
- mds.b
- mgr.x
- osd.4
- osd.5
- osd.6
- osd.7
- client.1
- prometheus.a
- grafana.a
- node-exporter.b
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
overrides:
ceph:
create_rbd_pool: true
conf:
osd:
osd shutdown pgref assert: true
| 460 | 12.558824 | 39 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/parallel/1-tasks.yaml
|
tasks:
- install:
branch: quincy
exclude_packages:
- ceph-volume
- print: "**** done install task..."
- print: "**** done start installing quincy cephadm ..."
- cephadm:
image: quay.ceph.io/ceph-ci/ceph:quincy
cephadm_branch: quincy
cephadm_git_url: https://github.com/ceph/ceph
conf:
osd:
#set config option for which cls modules are allowed to be loaded / used
osd_class_load_list: "*"
osd_class_default_list: "*"
- print: "**** done end installing quincy cephadm ..."
- print: "**** done start cephadm.shell ceph config set mgr..."
- cephadm.shell:
mon.a:
- ceph config set mgr mgr/cephadm/use_repo_digest true --force
- print: "**** done cephadm.shell ceph config set mgr..."
- print: "**** done start telemetry quincy..."
- workunit:
clients:
client.0:
- test_telemetry_quincy.sh
- print: "**** done end telemetry quincy..."
- print: "**** done start parallel"
- parallel:
- workload
- upgrade-sequence
- print: "**** done end parallel"
- print: "**** done start telemetry x..."
- workunit:
clients:
client.0:
- test_telemetry_quincy_x.sh
- print: "**** done end telemetry x..."
| 1,202 | 26.340909 | 80 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/parallel/upgrade-sequence.yaml
|
# renamed tasks: to upgrade-sequence:
upgrade-sequence:
sequential:
- print: "**** done start upgrade, wait"
- cephadm.shell:
env: [sha1]
mon.a:
- ceph config set global log_to_journald false --force
- ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
- while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
- ceph orch ps
- ceph versions
- ceph versions | jq -e '.overall | length == 1'
- ceph versions | jq -e '.overall | keys' | grep $sha1
- print: "**** done end upgrade, wait..."
| 650 | 37.294118 | 125 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/parallel/workload/ec-rados-default.yaml
|
meta:
- desc: |
run run randomized correctness test for rados operations
on an erasure-coded pool
workload:
full_sequential:
- print: "**** done start ec-rados-default.yaml"
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
write_append_excl: false
op_weights:
read: 100
write: 0
append: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
setattr: 25
rmattr: 25
- print: "**** done end ec-rados-default.yaml"
| 577 | 21.230769 | 59 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/parallel/workload/rados_api.yaml
|
meta:
- desc: |
object class functional tests
workload:
full_sequential:
- print: "**** done start rados_api.yaml"
- workunit:
branch: quincy
clients:
client.0:
- cls
- print: "**** done end rados_api.yaml"
| 262 | 19.230769 | 45 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/parallel/workload/rados_loadgenbig.yaml
|
meta:
- desc: |
generate read/write load with rados objects ranging from 1MB to 25MB
workload:
full_sequential:
- print: "**** done start rados_loadgenbig.yaml"
- workunit:
branch: quincy
clients:
client.0:
- rados/load-gen-big.sh
- print: "**** done end rados_loadgenbig.yaml"
| 333 | 24.692308 | 71 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/parallel/workload/rbd_import_export.yaml
|
meta:
- desc: |
run basic import/export cli tests for rbd
workload:
full_sequential:
- print: "**** done start rbd_import_export.yaml"
- workunit:
branch: quincy
clients:
client.1:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done end rbd_import_export.yaml"
| 362 | 23.2 | 53 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/parallel/workload/test_rbd_api.yaml
|
meta:
- desc: |
librbd C and C++ api tests
workload:
full_sequential:
- print: "**** done start test_rbd_api.yaml"
- workunit:
branch: quincy
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done end test_rbd_api.yaml"
| 282 | 20.769231 | 48 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/parallel/workload/test_rbd_python.yaml
|
meta:
- desc: |
librbd python api tests
workload:
full_sequential:
- print: "**** done start test_rbd_python.yaml"
- workunit:
branch: quincy
clients:
client.0:
- rbd/test_librbd_python.sh
- print: "**** done end test_rbd_python.yaml"
| 291 | 19.857143 | 51 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/0-roles.yaml
|
roles:
- - mon.a
- mon.c
- mgr.y
- osd.0
- osd.1
- osd.2
- osd.3
- client.0
- node-exporter.a
- alertmanager.a
- - mon.b
- mgr.x
- osd.4
- osd.5
- osd.6
- osd.7
- client.1
- prometheus.a
- grafana.a
- node-exporter.b
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
overrides:
ceph:
create_rbd_pool: true
conf:
osd:
osd shutdown pgref assert: true
| 440 | 12.78125 | 39 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/1-start.yaml
|
tasks:
- install:
branch: quincy
exclude_packages:
- ceph-volume
- cephadm:
image: quay.ceph.io/ceph-ci/ceph:quincy
cephadm_branch: quincy
cephadm_git_url: https://github.com/ceph/ceph
conf:
osd:
#set config option for which cls modules are allowed to be loaded / used
osd_class_load_list: "*"
osd_class_default_list: "*"
- cephadm.shell:
mon.a:
- ceph fs volume create foo
- ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
- ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
- ceph.healthy:
- print: "**** upgrading first half of cluster, with stress ****"
- parallel:
- first-half-tasks
- first-half-sequence
- print: "**** done upgrading first half of cluster ****"
- ceph.healthy:
- print: "**** applying stress + thrashing to mixed-version cluster ****"
- parallel:
- stress-tasks
- ceph.healthy:
- print: "**** finishing upgrade ****"
- parallel:
- second-half-tasks
- second-half-sequence
- ceph.healthy:
#################
first-half-sequence:
- cephadm.shell:
env: [sha1]
mon.a:
- ceph config set mgr mgr/cephadm/daemon_cache_timeout 60
- ceph config set global log_to_journald false --force
- ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
- ceph orch ps
- echo wait for minority of mons to upgrade
- while ! ceph mon versions | grep $sha1 ; do sleep 2 ; done
- ceph orch ps
- ceph orch upgrade pause
- sleep 60
- ceph orch upgrade resume
- echo wait for majority of mons to upgrade
- "while ! ceph mon versions | grep $sha1 | egrep ': [23]' ; do sleep 2 ; done"
- ceph orch ps
- ceph orch upgrade pause
- sleep 60
- ceph orch upgrade resume
- echo wait for all mons to upgrade
- "while ! ceph mon versions | grep $sha1 | grep ': 3' ; do sleep 2 ; done"
- ceph orch ps
- ceph orch upgrade pause
- sleep 60
- ceph orch upgrade resume
- echo wait for half of osds to upgrade
- "while ! ceph osd versions | grep $sha1 | egrep ': [45678]'; do sleep 2 ; done"
- ceph orch upgrade pause
- ceph orch ps
- ceph orch ps
- ceph versions
#################
stress-tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 1
chance_pgpnum_fix: 1
chance_thrash_cluster_full: 0
chance_thrash_pg_upmap: 0
chance_thrash_pg_upmap_items: 0
disable_objectstore_tool_tests: true
chance_force_recovery: 0
aggressive_pg_num_changes: false
#################
second-half-sequence:
sequential:
- cephadm.shell:
env: [sha1]
mon.a:
- ceph orch upgrade resume
- sleep 60
- echo wait for upgrade to complete
- while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
- echo upgrade complete
- ceph orch ps
- ceph versions
- ceph versions | jq -e '.overall | length == 1'
- ceph versions | jq -e '.overall | keys' | grep $sha1
| 3,185 | 24.902439 | 126 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/2-first-half-tasks/radosbench.yaml
|
meta:
- desc: |
run randomized correctness test for rados operations
generate write load with rados bench
first-half-tasks:
- full_sequential:
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- print: "**** done end radosbench.yaml"
| 418 | 19.95 | 55 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/2-first-half-tasks/rbd-cls.yaml
|
meta:
- desc: |
run basic cls tests for rbd
first-half-tasks:
- workunit:
branch: quincy
clients:
client.0:
- cls/test_cls_rbd.sh
- print: "**** done cls/test_cls_rbd.sh 5-workload"
| 207 | 17.909091 | 51 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/2-first-half-tasks/rbd-import-export.yaml
|
meta:
- desc: |
run basic import/export cli tests for rbd
first-half-tasks:
- workunit:
branch: quincy
clients:
client.0:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done rbd/import_export.sh 5-workload"
| 268 | 19.692308 | 52 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/2-first-half-tasks/rbd_api.yaml
|
meta:
- desc: |
librbd C and C++ api tests
first-half-tasks:
- workunit:
branch: quincy
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done rbd/test_librbd.sh 7-workload"
| 211 | 18.272727 | 50 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/2-first-half-tasks/readwrite.yaml
|
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool,
using only reads, writes, and deletes
first-half-tasks:
- full_sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_append_excl: false
op_weights:
read: 45
write: 45
delete: 10
- print: "**** done rados/readwrite 5-workload"
| 391 | 22.058824 | 73 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/2-first-half-tasks/snaps-few-objects.yaml
|
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool with snapshot operations
first-half-tasks:
- full_sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 50
write_append_excl: false
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done rados/snaps-few-objects 5-workload"
| 452 | 22.842105 | 97 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/3-stress-tasks/radosbench.yaml
|
meta:
- desc: |
run randomized correctness test for rados operations
generate write load with rados bench
stress-tasks:
- full_sequential:
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- print: "**** done end radosbench.yaml"
| 528 | 19.346154 | 55 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/3-stress-tasks/rbd-cls.yaml
|
meta:
- desc: |
run basic cls tests for rbd
stress-tasks:
- workunit:
branch: quincy
clients:
client.0:
- cls/test_cls_rbd.sh
- print: "**** done cls/test_cls_rbd.sh 5-workload"
| 203 | 17.545455 | 51 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/3-stress-tasks/rbd-import-export.yaml
|
meta:
- desc: |
run basic import/export cli tests for rbd
stress-tasks:
- workunit:
branch: quincy
clients:
client.0:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done rbd/import_export.sh 5-workload"
| 264 | 19.384615 | 52 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/3-stress-tasks/rbd_api.yaml
|
meta:
- desc: |
librbd C and C++ api tests
stress-tasks:
- workunit:
branch: quincy
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done rbd/test_librbd.sh 7-workload"
| 207 | 17.909091 | 50 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/3-stress-tasks/readwrite.yaml
|
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool,
using only reads, writes, and deletes
stress-tasks:
- full_sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_append_excl: false
op_weights:
read: 45
write: 45
delete: 10
- print: "**** done rados/readwrite 5-workload"
| 387 | 21.823529 | 73 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/3-stress-tasks/snaps-few-objects.yaml
|
meta:
- desc: |
randomized correctness test for rados operations on a replicated pool with snapshot operations
stress-tasks:
- full_sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 50
write_append_excl: false
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
- print: "**** done rados/snaps-few-objects 5-workload"
| 448 | 22.631579 | 97 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/4-second-half-tasks/radosbench.yaml
|
meta:
- desc: |
run randomized correctness test for rados operations
generate write load with rados bench
second-half-tasks:
- full_sequential:
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- print: "**** done end radosbench.yaml"
| 362 | 20.352941 | 55 |
yaml
|
null |
ceph-main/qa/suites/upgrade/quincy-x/stress-split/4-second-half-tasks/rbd-import-export.yaml
|
meta:
- desc: |
run basic import/export cli tests for rbd
second-half-tasks:
- workunit:
branch: quincy
clients:
client.0:
- rbd/import_export.sh
env:
RBD_CREATE_ARGS: --new-format
- print: "**** done rbd/import_export.sh 5-workload"
| 269 | 19.769231 | 52 |
yaml
|
null |
ceph-main/qa/suites/upgrade/telemetry-upgrade/pacific-x/0-start.yaml
|
roles:
- - mon.a
- mon.c
- mgr.y
- mds.a
- osd.0
- osd.1
- osd.2
- osd.3
- client.0
- node-exporter.a
- alertmanager.a
- - mon.b
- mds.b
- mgr.x
- osd.4
- osd.5
- osd.6
- osd.7
- client.1
- prometheus.a
- grafana.a
- node-exporter.b
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
overrides:
ceph:
create_rbd_pool: true
conf:
osd:
osd shutdown pgref assert: true
| 460 | 12.558824 | 39 |
yaml
|
null |
ceph-main/qa/suites/upgrade/telemetry-upgrade/pacific-x/1-tasks.yaml
|
tasks:
- install:
branch: pacific
exclude_packages:
- ceph-volume
- print: "**** done install task..."
- print: "**** done start installing pacific cephadm ..."
- cephadm:
image: quay.io/ceph/daemon-base:latest-pacific
cephadm_branch: pacific
cephadm_git_url: https://github.com/ceph/ceph
conf:
osd:
#set config option for which cls modules are allowed to be loaded / used
osd_class_load_list: "*"
osd_class_default_list: "*"
- print: "**** done end installing pacific cephadm ..."
- print: "**** done start cephadm.shell ceph config set mgr..."
- cephadm.shell:
mon.a:
- ceph config set mgr mgr/cephadm/use_repo_digest true --force
- print: "**** done cephadm.shell ceph config set mgr..."
- print: "**** done start telemetry pacific..."
- workunit:
clients:
client.0:
- test_telemetry_pacific.sh
- print: "**** done end telemetry pacific..."
- print: "**** done start upgrade sequence..."
- sequential:
- print: "**** done start upgrade..."
- cephadm.shell:
env: [sha1]
mon.a:
- ceph config set global log_to_journald false --force
- ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
- while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
- ceph orch ps
- ceph versions
- ceph versions | jq -e '.overall | length == 1'
- ceph versions | jq -e '.overall | keys' | grep $sha1
- print: "**** done end upgrade..."
- print: "**** done start telemetry x..."
- workunit:
clients:
client.0:
- test_telemetry_pacific_x.sh
- print: "**** done end telemetry x..."
- print: "**** done end upgrade sequence..."
| 1,823 | 32.163636 | 128 |
yaml
|
null |
ceph-main/qa/suites/upgrade/telemetry-upgrade/quincy-x/0-start.yaml
|
roles:
- - mon.a
- mon.c
- mgr.y
- mds.a
- osd.0
- osd.1
- osd.2
- osd.3
- client.0
- node-exporter.a
- alertmanager.a
- - mon.b
- mds.b
- mgr.x
- osd.4
- osd.5
- osd.6
- osd.7
- client.1
- prometheus.a
- grafana.a
- node-exporter.b
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
overrides:
ceph:
create_rbd_pool: true
conf:
osd:
osd shutdown pgref assert: true
| 460 | 12.558824 | 39 |
yaml
|
null |
ceph-main/qa/suites/upgrade/telemetry-upgrade/quincy-x/1-tasks.yaml
|
tasks:
- install:
branch: quincy
exclude_packages:
- ceph-volume
- print: "**** done install task..."
- print: "**** done start installing quincy cephadm ..."
- cephadm:
image: quay.io/ceph/daemon-base:latest-quincy
cephadm_branch: quincy
cephadm_git_url: https://github.com/ceph/ceph
conf:
osd:
#set config option for which cls modules are allowed to be loaded / used
osd_class_load_list: "*"
osd_class_default_list: "*"
- print: "**** done end installing quincy cephadm ..."
- print: "**** done start cephadm.shell ceph config set mgr..."
- cephadm.shell:
mon.a:
- ceph config set mgr mgr/cephadm/use_repo_digest true --force
- print: "**** done cephadm.shell ceph config set mgr..."
- print: "**** done start telemetry quincy..."
- workunit:
clients:
client.0:
- test_telemetry_quincy.sh
- print: "**** done end telemetry quincy..."
- print: "**** done start upgrade sequence..."
- sequential:
- print: "**** done start upgrade..."
- cephadm.shell:
env: [sha1]
mon.a:
- ceph config set global log_to_journald false --force
- ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
- while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
- ceph orch ps
- ceph versions
- ceph versions | jq -e '.overall | length == 1'
- ceph versions | jq -e '.overall | keys' | grep $sha1
- print: "**** done end upgrade..."
- print: "**** done start telemetry x..."
- workunit:
clients:
client.0:
- test_telemetry_quincy_x.sh
- print: "**** done end telemetry x..."
- print: "**** done end upgrade sequence..."
| 1,813 | 32.592593 | 128 |
yaml
|
null |
ceph-main/qa/suites/windows/basic/ubuntu_latest.yaml
|
.qa/distros/supported/ubuntu_latest.yaml
| 40 | 40 | 40 |
yaml
|
null |
ceph-main/qa/suites/windows/basic/clusters/fixed-1.yaml
|
.qa/clusters/fixed-1.yaml
| 25 | 25 | 25 |
yaml
|
null |
ceph-main/qa/suites/windows/basic/install/install.yaml
|
tasks:
- install:
- ceph:
| 26 | 5.75 | 10 |
yaml
|
null |
ceph-main/qa/suites/windows/basic/tasks/windows_tests.yaml
|
tasks:
- workunit:
clients:
client.0:
- windows/libvirt_vm/setup.sh
- windows/run-tests.sh
| 117 | 15.857143 | 37 |
yaml
|
null |
ceph-main/qa/tasks/__init__.py
|
import logging
# Inherit teuthology's log level
teuthology_log = logging.getLogger('teuthology')
log = logging.getLogger(__name__)
log.setLevel(teuthology_log.level)
| 167 | 23 | 48 |
py
|
null |
ceph-main/qa/tasks/admin_socket.py
|
"""
Admin Socket task -- used in rados, powercycle, and smoke testing
"""
import json
import logging
import os
import time
from teuthology.exceptions import CommandFailedError
from teuthology.orchestra import run
from teuthology import misc as teuthology
from teuthology.parallel import parallel
from teuthology.config import config as teuth_config
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Run an admin socket command, make sure the output is json, and run
a test program on it. The test program should read json from
stdin. This task succeeds if the test program exits with status 0.
To run the same test on all clients::
tasks:
- ceph:
- rados:
- admin_socket:
all:
dump_requests:
test: http://example.com/script
To restrict it to certain clients::
tasks:
- ceph:
- rados: [client.1]
- admin_socket:
client.1:
dump_requests:
test: http://example.com/script
If an admin socket command has arguments, they can be specified as
a list::
tasks:
- ceph:
- rados: [client.0]
- admin_socket:
client.0:
dump_requests:
test: http://example.com/script
help:
test: http://example.com/test_help_version
args: [version]
Note that there must be a ceph client with an admin socket running
before this task is run. The tests are parallelized at the client
level. Tests for a single client are run serially.
:param ctx: Context
:param config: Configuration
"""
assert isinstance(config, dict), \
'admin_socket task requires a dict for configuration'
teuthology.replace_all_with_clients(ctx.cluster, config)
with parallel() as ptask:
for client, tests in config.items():
ptask.spawn(_run_tests, ctx, client, tests)
def _socket_command(ctx, remote, socket_path, command, args):
"""
Run an admin socket command and return the result as a string.
:param ctx: Context
:param remote: Remote site
:param socket_path: path to socket
:param command: command to be run remotely
:param args: command arguments
:returns: output of command in json format
"""
testdir = teuthology.get_testdir(ctx)
max_tries = 120
sub_commands = [c.strip() for c in command.split('||')]
ex = None
for _ in range(max_tries):
for sub_command in sub_commands:
try:
out = remote.sh([
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'ceph',
'--admin-daemon', socket_path,
] + sub_command.split(' ') + args)
except CommandFailedError as e:
ex = e
log.info('ceph cli "%s" returned an error %s, '
'command not registered yet?', sub_command, e)
else:
log.debug('admin socket command %s returned %s',
sub_command, out)
return json.loads(out)
else:
# exhausted all commands
log.info('sleeping and retrying ...')
time.sleep(1)
else:
# i tried max_tries times..
assert ex is not None
raise ex
def _run_tests(ctx, client, tests):
"""
Create a temp directory and wait for a client socket to be created.
For each test, copy the executable locally and run the test.
Remove temp directory when finished.
:param ctx: Context
:param client: client machine to run the test
:param tests: list of tests to run
"""
testdir = teuthology.get_testdir(ctx)
log.debug('Running admin socket tests on %s', client)
(remote,) = ctx.cluster.only(client).remotes.keys()
socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client)
overrides = ctx.config.get('overrides', {}).get('admin_socket', {})
try:
tmp_dir = os.path.join(
testdir,
'admin_socket_{client}'.format(client=client),
)
remote.run(
args=[
'mkdir',
'--',
tmp_dir,
run.Raw('&&'),
# wait for client process to create the socket
'while', 'test', '!', '-e', socket_path, run.Raw(';'),
'do', 'sleep', '1', run.Raw(';'), 'done',
],
)
for command, config in tests.items():
if config is None:
config = {}
teuthology.deep_merge(config, overrides)
log.debug('Testing %s with config %s', command, str(config))
test_path = None
if 'test' in config:
# hack: the git_url is always ceph-ci or ceph
git_url = teuth_config.get_ceph_git_url()
repo_name = 'ceph.git'
if git_url.count('ceph-ci'):
repo_name = 'ceph-ci.git'
url = config['test'].format(
branch=config.get('branch', 'master'),
repo=repo_name,
)
test_path = os.path.join(tmp_dir, command)
remote.run(
args=[
'wget',
'-q',
'-O',
test_path,
'--',
url,
run.Raw('&&'),
'chmod',
'u=rx',
'--',
test_path,
],
)
args = config.get('args', [])
assert isinstance(args, list), \
'admin socket command args must be a list'
sock_out = _socket_command(ctx, remote, socket_path, command, args)
if test_path is not None:
remote.run(
args=[
test_path,
],
stdin=json.dumps(sock_out),
)
finally:
remote.run(
args=[
'rm', '-rf', '--', tmp_dir,
],
)
| 6,476 | 30.595122 | 79 |
py
|
null |
ceph-main/qa/tasks/autotest.py
|
"""
Run an autotest test on the ceph cluster.
"""
import json
import logging
import os
from teuthology import misc as teuthology
from teuthology.parallel import parallel
from teuthology.orchestra import run
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Run an autotest test on the ceph cluster.
Only autotest client tests are supported.
The config is a mapping from role name to list of tests to run on
that client.
For example::
tasks:
- ceph:
- ceph-fuse: [client.0, client.1]
- autotest:
client.0: [dbench]
client.1: [bonnie]
You can also specify a list of tests to run on all clients::
tasks:
- ceph:
- ceph-fuse:
- autotest:
all: [dbench]
"""
assert isinstance(config, dict)
config = teuthology.replace_all_with_clients(ctx.cluster, config)
log.info('Setting up autotest...')
testdir = teuthology.get_testdir(ctx)
with parallel() as p:
for role in config.keys():
(remote,) = ctx.cluster.only(role).remotes.keys()
p.spawn(_download, testdir, remote)
log.info('Making a separate scratch dir for every client...')
for role in config.keys():
assert isinstance(role, str)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.keys()
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
remote.run(
args=[
'sudo',
'install',
'-d',
'-m', '0755',
'--owner={user}'.format(user='ubuntu'), #TODO
'--',
scratch,
],
)
with parallel() as p:
for role, tests in config.items():
(remote,) = ctx.cluster.only(role).remotes.keys()
p.spawn(_run_tests, testdir, remote, role, tests)
def _download(testdir, remote):
"""
Download. Does not explicitly support muliple tasks in a single run.
"""
remote.run(
args=[
# explicitly does not support multiple autotest tasks
# in a single run; the result archival would conflict
'mkdir', '{tdir}/archive/autotest'.format(tdir=testdir),
run.Raw('&&'),
'mkdir', '{tdir}/autotest'.format(tdir=testdir),
run.Raw('&&'),
'wget',
'-nv',
'--no-check-certificate',
'https://github.com/ceph/autotest/tarball/ceph',
'-O-',
run.Raw('|'),
'tar',
'-C', '{tdir}/autotest'.format(tdir=testdir),
'-x',
'-z',
'-f-',
'--strip-components=1',
],
)
def _run_tests(testdir, remote, role, tests):
"""
Spawned to run test on remote site
"""
assert isinstance(role, str)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
assert isinstance(tests, list)
for idx, testname in enumerate(tests):
log.info('Running autotest client test #%d: %s...', idx, testname)
tag = 'client.{id}.num{idx}.{testname}'.format(
idx=idx,
testname=testname,
id=id_,
)
control = '{tdir}/control.{tag}'.format(tdir=testdir, tag=tag)
remote.write_file(
path=control,
data='import json; data=json.loads({data!r}); job.run_test(**data)'.format(
data=json.dumps(dict(
url=testname,
dir=scratch,
# TODO perhaps tag
# results will be in {testdir}/autotest/client/results/dbench
# or {testdir}/autotest/client/results/dbench.{tag}
)),
),
)
remote.run(
args=[
'{tdir}/autotest/client/bin/autotest'.format(tdir=testdir),
'--verbose',
'--harness=simple',
'--tag={tag}'.format(tag=tag),
control,
run.Raw('3>&1'),
],
)
remote.run(
args=[
'rm', '-rf', '--', control,
],
)
remote.run(
args=[
'mv',
'--',
'{tdir}/autotest/client/results/{tag}'.format(tdir=testdir, tag=tag),
'{tdir}/archive/autotest/{tag}'.format(tdir=testdir, tag=tag),
],
)
remote.run(
args=[
'rm', '-rf', '--', '{tdir}/autotest'.format(tdir=testdir),
],
)
| 4,983 | 29.024096 | 87 |
py
|
null |
ceph-main/qa/tasks/aver.py
|
"""
Aver wrapper task
"""
import contextlib
import logging
from subprocess import check_call, Popen, PIPE
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Execute an aver assertion
Parameters:
input: file containing data referred to by the assertions. File name is
relative to the job's archive path
validations: list of validations in the Aver language
Example:
- aver:
input: bench_output.csv
validations:
- expect performance(alg='ceph') > performance(alg='raw')
- for size > 3 expect avg_throughput > 2000
"""
log.info('Beginning aver...')
assert isinstance(config, dict), 'expecting dictionary for configuration'
if 'input' not in config:
raise Exception("Expecting 'input' option")
if len(config.get('validations', [])) < 1:
raise Exception("Expecting at least one entry in 'validations'")
url = ('https://github.com/ivotron/aver/releases/download/'
'v0.3.0/aver-linux-amd64.tar.bz2')
aver_path = ctx.archive + '/aver'
# download binary
check_call(['wget', '-O', aver_path + '.tbz', url])
check_call(['tar', 'xfj', aver_path + '.tbz', '-C', ctx.archive])
# print version
process = Popen([aver_path, '-v'], stdout=PIPE)
log.info(process.communicate()[0])
# validate
for validation in config['validations']:
cmd = (aver_path + ' -s -i ' + (ctx.archive + '/' + config['input']) +
' "' + validation + '"')
log.info("executing: " + cmd)
process = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
(stdout, stderr) = process.communicate()
if stderr:
log.info('aver stderr: ' + stderr)
log.info('aver result: ' + stdout)
if stdout.strip(' \t\n\r') != 'true':
raise Exception('Failed validation: ' + validation)
try:
yield
finally:
log.info('Removing aver binary...')
check_call(['rm', aver_path, aver_path + '.tbz'])
| 2,064 | 29.367647 | 79 |
py
|
null |
ceph-main/qa/tasks/backfill_toofull.py
|
"""
Backfill_toofull
"""
import logging
import time
from tasks import ceph_manager
from tasks.util.rados import rados
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def wait_for_pg_state(manager, pgid, state, to_osd):
log.debug("waiting for pg %s state is %s" % (pgid, state))
for i in range(300):
time.sleep(5)
manager.flush_pg_stats([0, 1, 2, 3])
pgs = manager.get_pg_stats()
pg = next((pg for pg in pgs if pg['pgid'] == pgid), None)
log.info('pg=%s' % pg);
assert pg
status = pg['state'].split('+')
if 'active' not in status:
log.debug('not active')
continue
if state not in status:
log.debug('not %s' % state)
continue
assert to_osd in pg['up']
return
assert False, '%s not in %s' % (pgid, state)
def task(ctx, config):
"""
Test backfill reservation calculates "toofull" condition correctly.
A pretty rigid cluster is brought up and tested by this task
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'backfill_toofull task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
profile = config.get('erasure_code_profile', {
'k': '2',
'm': '1',
'crush-failure-domain': 'osd'
})
profile_name = profile.get('name', 'backfill_toofull')
manager.create_erasure_code_profile(profile_name, profile)
pool = manager.create_pool_with_unique_name(
pg_num=1,
erasure_code_profile_name=profile_name,
min_size=2)
manager.raw_cluster_cmd('osd', 'pool', 'set', pool,
'pg_autoscale_mode', 'off')
manager.flush_pg_stats([0, 1, 2, 3])
manager.wait_for_clean()
pool_id = manager.get_pool_num(pool)
pgid = '%d.0' % pool_id
pgs = manager.get_pg_stats()
acting = next((pg['acting'] for pg in pgs if pg['pgid'] == pgid), None)
log.debug("acting=%s" % acting)
assert acting
primary = acting[0]
target = acting[1]
log.debug("write some data")
rados(ctx, mon, ['-p', pool, 'bench', '120', 'write', '--no-cleanup'])
df = manager.get_osd_df(target)
log.debug("target osd df: %s" % df)
total_kb = df['kb']
used_kb = df['kb_used']
log.debug("pause recovery")
manager.raw_cluster_cmd('osd', 'set', 'noout')
manager.raw_cluster_cmd('osd', 'set', 'nobackfill')
manager.raw_cluster_cmd('osd', 'set', 'norecover')
log.debug("stop tartget osd %s" % target)
manager.kill_osd(target)
manager.wait_till_active()
pgs = manager.get_pg_stats()
pg = next((pg for pg in pgs if pg['pgid'] == pgid), None)
log.debug('pg=%s' % pg)
assert pg
log.debug("re-write data")
rados(ctx, mon, ['-p', pool, 'cleanup'])
time.sleep(10)
rados(ctx, mon, ['-p', pool, 'bench', '60', 'write', '--no-cleanup'])
df = manager.get_osd_df(primary)
log.debug("primary osd df: %s" % df)
primary_used_kb = df['kb_used']
log.info("test backfill reservation rejected with toofull")
# We set backfillfull ratio less than new data size and expect the pg
# entering backfill_toofull state.
#
# We also need to update nearfull ratio to prevent "full ratio(s) out of order".
backfillfull = 0.9 * primary_used_kb / total_kb
nearfull = backfillfull * 0.9
log.debug("update nearfull ratio to %s and backfillfull ratio to %s" %
(nearfull, backfillfull))
manager.raw_cluster_cmd('osd', 'set-nearfull-ratio',
'{:.3f}'.format(nearfull + 0.001))
manager.raw_cluster_cmd('osd', 'set-backfillfull-ratio',
'{:.3f}'.format(backfillfull + 0.001))
log.debug("start tartget osd %s" % target)
manager.revive_osd(target)
manager.wait_for_active()
manager.wait_till_osd_is_up(target)
wait_for_pg_state(manager, pgid, 'backfill_toofull', target)
log.info("test pg not enter backfill_toofull after restarting backfill")
# We want to set backfillfull ratio to be big enough for the target to
# successfully backfill new data but smaller than the sum of old and new
# data, so if the osd backfill reservation incorrectly calculates "toofull"
# the test will detect this (fail).
#
# Note, we need to operate with "uncompressed" bytes because currently
# osd backfill reservation does not take compression into account.
#
# We also need to update nearfull ratio to prevent "full ratio(s) out of order".
pdf = manager.get_pool_df(pool)
log.debug("pool %s df: %s" % (pool, pdf))
assert pdf
compress_ratio = 1.0 * pdf['compress_under_bytes'] / pdf['compress_bytes_used'] \
if pdf['compress_bytes_used'] > 0 else 1.0
log.debug("compress_ratio: %s" % compress_ratio)
backfillfull = (used_kb + primary_used_kb) * compress_ratio / total_kb
assert backfillfull < 0.9
nearfull_min = max(used_kb, primary_used_kb) * compress_ratio / total_kb
assert nearfull_min < backfillfull
delta = backfillfull - nearfull_min
nearfull = nearfull_min + delta * 0.1
backfillfull = nearfull_min + delta * 0.2
log.debug("update nearfull ratio to %s and backfillfull ratio to %s" %
(nearfull, backfillfull))
manager.raw_cluster_cmd('osd', 'set-nearfull-ratio',
'{:.3f}'.format(nearfull + 0.001))
manager.raw_cluster_cmd('osd', 'set-backfillfull-ratio',
'{:.3f}'.format(backfillfull + 0.001))
wait_for_pg_state(manager, pgid, 'backfilling', target)
pgs = manager.get_pg_stats()
pg = next((pg for pg in pgs if pg['pgid'] == pgid), None)
log.debug('pg=%s' % pg)
assert pg
log.debug("interrupt %s backfill" % target)
manager.mark_down_osd(target)
# after marking the target osd down it will automatically be
# up soon again
log.debug("resume recovery")
manager.raw_cluster_cmd('osd', 'unset', 'noout')
manager.raw_cluster_cmd('osd', 'unset', 'nobackfill')
manager.raw_cluster_cmd('osd', 'unset', 'norecover')
# wait for everything to peer, backfill and recover
manager.wait_for_clean()
pgs = manager.get_pg_stats()
pg = next((pg for pg in pgs if pg['pgid'] == pgid), None)
log.info('pg=%s' % pg)
assert pg
assert 'clean' in pg['state'].split('+')
| 6,634 | 33.201031 | 85 |
py
|
null |
ceph-main/qa/tasks/barbican.py
|
"""
Deploy and configure Barbican for Teuthology
"""
import argparse
import contextlib
import logging
import http
import json
import time
import math
from urllib.parse import urlparse
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
from teuthology.exceptions import ConfigError
log = logging.getLogger(__name__)
@contextlib.contextmanager
def download(ctx, config):
"""
Download the Barbican from github.
Remove downloaded file upon exit.
The context passed in should be identical to the context
passed in to the main task.
"""
assert isinstance(config, dict)
log.info('Downloading barbican...')
testdir = teuthology.get_testdir(ctx)
for (client, cconf) in config.items():
branch = cconf.get('force-branch', 'master')
log.info("Using branch '%s' for barbican", branch)
sha1 = cconf.get('sha1')
log.info('sha1=%s', sha1)
ctx.cluster.only(client).run(
args=[
'bash', '-l'
],
)
ctx.cluster.only(client).run(
args=[
'git', 'clone',
'-b', branch,
'https://github.com/openstack/barbican.git',
'{tdir}/barbican'.format(tdir=testdir),
],
)
if sha1 is not None:
ctx.cluster.only(client).run(
args=[
'cd', '{tdir}/barbican'.format(tdir=testdir),
run.Raw('&&'),
'git', 'reset', '--hard', sha1,
],
)
try:
yield
finally:
log.info('Removing barbican...')
testdir = teuthology.get_testdir(ctx)
for client in config:
ctx.cluster.only(client).run(
args=[
'rm',
'-rf',
'{tdir}/barbican'.format(tdir=testdir),
],
)
def get_barbican_dir(ctx):
return '{tdir}/barbican'.format(tdir=teuthology.get_testdir(ctx))
def run_in_barbican_dir(ctx, client, args):
ctx.cluster.only(client).run(
args=['cd', get_barbican_dir(ctx), run.Raw('&&'), ] + args,
)
def run_in_barbican_venv(ctx, client, args):
run_in_barbican_dir(ctx, client,
['.',
'.barbicanenv/bin/activate',
run.Raw('&&')
] + args)
@contextlib.contextmanager
def setup_venv(ctx, config):
"""
Setup the virtualenv for Barbican using pip.
"""
assert isinstance(config, dict)
log.info('Setting up virtualenv for barbican...')
for (client, _) in config.items():
run_in_barbican_dir(ctx, client,
['python3', '-m', 'venv', '.barbicanenv'])
run_in_barbican_venv(ctx, client,
['pip', 'install', '--upgrade', 'pip'])
run_in_barbican_venv(ctx, client,
['pip', 'install', 'pytz',
'-e', get_barbican_dir(ctx)])
yield
def assign_ports(ctx, config, initial_port):
"""
Assign port numbers starting from @initial_port
"""
port = initial_port
role_endpoints = {}
for remote, roles_for_host in ctx.cluster.remotes.items():
for role in roles_for_host:
if role in config:
role_endpoints[role] = (remote.name.split('@')[1], port)
port += 1
return role_endpoints
def set_authtoken_params(ctx, cclient, cconfig):
section_config_list = cconfig['keystone_authtoken'].items()
for config in section_config_list:
(name, val) = config
run_in_barbican_dir(ctx, cclient,
['sed', '-i',
'/[[]filter:authtoken]/{p;s##'+'{} = {}'.format(name, val)+'#;}',
'etc/barbican/barbican-api-paste.ini'])
keystone_role = cconfig.get('use-keystone-role', None)
public_host, public_port = ctx.keystone.public_endpoints[keystone_role]
url = 'http://{host}:{port}/v3'.format(host=public_host,
port=public_port)
run_in_barbican_dir(ctx, cclient,
['sed', '-i',
'/[[]filter:authtoken]/{p;s##'+'auth_uri = {}'.format(url)+'#;}',
'etc/barbican/barbican-api-paste.ini'])
admin_host, admin_port = ctx.keystone.admin_endpoints[keystone_role]
admin_url = 'http://{host}:{port}/v3'.format(host=admin_host,
port=admin_port)
run_in_barbican_dir(ctx, cclient,
['sed', '-i',
'/[[]filter:authtoken]/{p;s##'+'auth_url = {}'.format(admin_url)+'#;}',
'etc/barbican/barbican-api-paste.ini'])
def fix_barbican_api_paste(ctx, cclient):
run_in_barbican_dir(ctx, cclient,
['sed', '-i', '-n',
'/\\[pipeline:barbican_api]/ {p;n; /^pipeline =/ '+
'{ s/.*/pipeline = unauthenticated-context apiapp/;p;d } } ; p',
'./etc/barbican/barbican-api-paste.ini'])
def fix_barbican_api(ctx, cclient):
run_in_barbican_dir(ctx, cclient,
['sed', '-i',
'/prop_dir =/ s#etc/barbican#{}/etc/barbican#'.format(get_barbican_dir(ctx)),
'bin/barbican-api'])
def create_barbican_conf(ctx, cclient):
barbican_host, barbican_port = ctx.barbican.endpoints[cclient]
barbican_url = 'http://{host}:{port}'.format(host=barbican_host,
port=barbican_port)
log.info("barbican url=%s", barbican_url)
run_in_barbican_dir(ctx, cclient,
['bash', '-c',
'echo -n -e "[DEFAULT]\nhost_href=' + barbican_url + '\n" ' + \
'>barbican.conf'])
log.info("run barbican db upgrade")
config_path = get_barbican_dir(ctx) + '/barbican.conf'
run_in_barbican_venv(ctx, cclient, ['barbican-manage', '--config-file', config_path,
'db', 'upgrade'])
log.info("run barbican db sync_secret_stores")
run_in_barbican_venv(ctx, cclient, ['barbican-manage', '--config-file', config_path,
'db', 'sync_secret_stores'])
@contextlib.contextmanager
def configure_barbican(ctx, config):
"""
Configure barbican paste-api and barbican-api.
"""
assert isinstance(config, dict)
(cclient, cconfig) = next(iter(config.items()))
keystone_role = cconfig.get('use-keystone-role', None)
if keystone_role is None:
raise ConfigError('use-keystone-role not defined in barbican task')
set_authtoken_params(ctx, cclient, cconfig)
fix_barbican_api(ctx, cclient)
fix_barbican_api_paste(ctx, cclient)
create_barbican_conf(ctx, cclient)
try:
yield
finally:
pass
@contextlib.contextmanager
def run_barbican(ctx, config):
assert isinstance(config, dict)
log.info('Running barbican...')
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
cluster_name, _, client_id = teuthology.split_role(client)
# start the public endpoint
client_public_with_id = 'barbican.public' + '.' + client_id
run_cmd = ['cd', get_barbican_dir(ctx), run.Raw('&&'),
'.', '.barbicanenv/bin/activate', run.Raw('&&'),
'HOME={}'.format(get_barbican_dir(ctx)), run.Raw('&&'),
'bin/barbican-api',
run.Raw('& { read; kill %1; }')]
#run.Raw('1>/dev/null')
run_cmd = 'cd ' + get_barbican_dir(ctx) + ' && ' + \
'. .barbicanenv/bin/activate && ' + \
'HOME={}'.format(get_barbican_dir(ctx)) + ' && ' + \
'exec bin/barbican-api & { read; kill %1; }'
ctx.daemons.add_daemon(
remote, 'barbican', client_public_with_id,
cluster=cluster_name,
args=['bash', '-c', run_cmd],
logger=log.getChild(client),
stdin=run.PIPE,
cwd=get_barbican_dir(ctx),
wait=False,
check_status=False,
)
# sleep driven synchronization
run_in_barbican_venv(ctx, client, ['sleep', '15'])
try:
yield
finally:
log.info('Stopping Barbican instance')
ctx.daemons.get_daemon('barbican', client_public_with_id,
cluster_name).stop()
@contextlib.contextmanager
def create_secrets(ctx, config):
"""
Create a main and an alternate s3 user.
"""
assert isinstance(config, dict)
(cclient, cconfig) = next(iter(config.items()))
rgw_user = cconfig['rgw_user']
keystone_role = cconfig.get('use-keystone-role', None)
keystone_host, keystone_port = ctx.keystone.public_endpoints[keystone_role]
barbican_host, barbican_port = ctx.barbican.endpoints[cclient]
barbican_url = 'http://{host}:{port}'.format(host=barbican_host,
port=barbican_port)
log.info("barbican_url=%s", barbican_url)
#fetching user_id of user that gets secrets for radosgw
token_req = http.client.HTTPConnection(keystone_host, keystone_port, timeout=30)
token_req.request(
'POST',
'/v3/auth/tokens',
headers={'Content-Type':'application/json'},
body=json.dumps({
"auth": {
"identity": {
"methods": ["password"],
"password": {
"user": {
"domain": {"id": "default"},
"name": rgw_user["username"],
"password": rgw_user["password"]
}
}
},
"scope": {
"project": {
"domain": {"id": "default"},
"name": rgw_user["tenantName"]
}
}
}
}))
rgw_access_user_resp = token_req.getresponse()
if not (rgw_access_user_resp.status >= 200 and
rgw_access_user_resp.status < 300):
raise Exception("Cannot authenticate user "+rgw_user["username"]+" for secret creation")
# baru_resp = json.loads(baru_req.data)
rgw_access_user_data = json.loads(rgw_access_user_resp.read().decode())
rgw_user_id = rgw_access_user_data['token']['user']['id']
if 'secrets' in cconfig:
for secret in cconfig['secrets']:
if 'name' not in secret:
raise ConfigError('barbican.secrets must have "name" field')
if 'base64' not in secret:
raise ConfigError('barbican.secrets must have "base64" field')
if 'tenantName' not in secret:
raise ConfigError('barbican.secrets must have "tenantName" field')
if 'username' not in secret:
raise ConfigError('barbican.secrets must have "username" field')
if 'password' not in secret:
raise ConfigError('barbican.secrets must have "password" field')
token_req = http.client.HTTPConnection(keystone_host, keystone_port, timeout=30)
token_req.request(
'POST',
'/v3/auth/tokens',
headers={'Content-Type':'application/json'},
body=json.dumps({
"auth": {
"identity": {
"methods": ["password"],
"password": {
"user": {
"domain": {"id": "default"},
"name": secret["username"],
"password": secret["password"]
}
}
},
"scope": {
"project": {
"domain": {"id": "default"},
"name": secret["tenantName"]
}
}
}
}))
token_resp = token_req.getresponse()
if not (token_resp.status >= 200 and
token_resp.status < 300):
raise Exception("Cannot authenticate user "+secret["username"]+" for secret creation")
expire = time.time() + 5400 # now + 90m
(expire_fract,dummy) = math.modf(expire)
expire_format = "%%FT%%T.%06d" % (round(expire_fract*1000000))
expiration = time.strftime(expire_format, time.gmtime(expire))
token_id = token_resp.getheader('x-subject-token')
key1_json = json.dumps(
{
"name": secret['name'],
"expiration": expiration,
"algorithm": "aes",
"bit_length": 256,
"mode": "cbc",
"payload": secret['base64'],
"payload_content_type": "application/octet-stream",
"payload_content_encoding": "base64"
})
sec_req = http.client.HTTPConnection(barbican_host, barbican_port, timeout=30)
try:
sec_req.request(
'POST',
'/v1/secrets',
headers={'Content-Type': 'application/json',
'Accept': '*/*',
'X-Auth-Token': token_id},
body=key1_json
)
except:
log.info("catched exception!")
run_in_barbican_venv(ctx, cclient, ['sleep', '900'])
barbican_sec_resp = sec_req.getresponse()
if not (barbican_sec_resp.status >= 200 and
barbican_sec_resp.status < 300):
raise Exception("Cannot create secret")
barbican_data = json.loads(barbican_sec_resp.read().decode())
if 'secret_ref' not in barbican_data:
raise ValueError("Malformed secret creation response")
secret_ref = barbican_data["secret_ref"]
log.info("secret_ref=%s", secret_ref)
secret_url_parsed = urlparse(secret_ref)
acl_json = json.dumps(
{
"read": {
"users": [rgw_user_id],
"project-access": True
}
})
acl_req = http.client.HTTPConnection(secret_url_parsed.netloc, timeout=30)
acl_req.request(
'PUT',
secret_url_parsed.path+'/acl',
headers={'Content-Type': 'application/json',
'Accept': '*/*',
'X-Auth-Token': token_id},
body=acl_json
)
barbican_acl_resp = acl_req.getresponse()
if not (barbican_acl_resp.status >= 200 and
barbican_acl_resp.status < 300):
raise Exception("Cannot set ACL for secret")
key = {'id': secret_ref.split('secrets/')[1], 'payload': secret['base64']}
ctx.barbican.keys[secret['name']] = key
run_in_barbican_venv(ctx, cclient, ['sleep', '3'])
try:
yield
finally:
pass
@contextlib.contextmanager
def task(ctx, config):
"""
Deploy and configure Keystone
Example of configuration:
tasks:
- local_cluster:
cluster_path: /home/adam/ceph-1/build
- local_rgw:
- tox: [ client.0 ]
- keystone:
client.0:
sha1: 17.0.0.0rc2
force-branch: master
projects:
- name: rgwcrypt
description: Encryption Tenant
- name: barbican
description: Barbican
- name: s3
description: S3 project
users:
- name: rgwcrypt-user
password: rgwcrypt-pass
project: rgwcrypt
- name: barbican-user
password: barbican-pass
project: barbican
- name: s3-user
password: s3-pass
project: s3
roles: [ name: Member, name: creator ]
role-mappings:
- name: Member
user: rgwcrypt-user
project: rgwcrypt
- name: admin
user: barbican-user
project: barbican
- name: creator
user: s3-user
project: s3
services:
- name: keystone
type: identity
description: Keystone Identity Service
- barbican:
client.0:
force-branch: master
use-keystone-role: client.0
keystone_authtoken:
auth_plugin: password
username: barbican-user
password: barbican-pass
user_domain_name: Default
rgw_user:
tenantName: rgwcrypt
username: rgwcrypt-user
password: rgwcrypt-pass
secrets:
- name: my-key-1
base64: a2V5MS5GcWVxKzhzTGNLaGtzQkg5NGVpb1FKcFpGb2c=
tenantName: s3
username: s3-user
password: s3-pass
- name: my-key-2
base64: a2V5Mi5yNUNNMGFzMVdIUVZxcCt5NGVmVGlQQ1k4YWg=
tenantName: s3
username: s3-user
password: s3-pass
- s3tests:
client.0:
force-branch: master
kms_key: my-key-1
- rgw:
client.0:
use-keystone-role: client.0
use-barbican-role: client.0
"""
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task keystone only supports a list or dictionary for configuration"
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
for client in config.keys():
if not config[client]:
config[client] = {}
teuthology.deep_merge(config[client], overrides.get('barbican', {}))
log.debug('Barbican config is %s', config)
if not hasattr(ctx, 'keystone'):
raise ConfigError('barbican must run after the keystone task')
ctx.barbican = argparse.Namespace()
ctx.barbican.endpoints = assign_ports(ctx, config, 9311)
ctx.barbican.keys = {}
with contextutil.nested(
lambda: download(ctx=ctx, config=config),
lambda: setup_venv(ctx=ctx, config=config),
lambda: configure_barbican(ctx=ctx, config=config),
lambda: run_barbican(ctx=ctx, config=config),
lambda: create_secrets(ctx=ctx, config=config),
):
yield
| 19,518 | 36.108365 | 102 |
py
|
null |
ceph-main/qa/tasks/blktrace.py
|
"""
Run blktrace program through teuthology
"""
import contextlib
import logging
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
log = logging.getLogger(__name__)
blktrace = '/usr/sbin/blktrace'
daemon_signal = 'term'
@contextlib.contextmanager
def setup(ctx, config):
"""
Setup all the remotes
"""
osds = ctx.cluster.only(teuthology.is_type('osd', config['cluster']))
log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=teuthology.get_testdir(ctx))
for remote, roles_for_host in osds.remotes.items():
log.info('Creating %s on %s' % (log_dir, remote.name))
remote.run(
args=['mkdir', '-p', '-m0755', '--', log_dir],
wait=False,
)
yield
@contextlib.contextmanager
def execute(ctx, config):
"""
Run the blktrace program on remote machines.
"""
procs = []
testdir = teuthology.get_testdir(ctx)
log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=testdir)
osds = ctx.cluster.only(teuthology.is_type('osd'))
for remote, roles_for_host in osds.remotes.items():
roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote]
for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd',
config['cluster']):
if roles_to_devs.get(role):
dev = roles_to_devs[role]
log.info("running blktrace on %s: %s" % (remote.name, dev))
proc = remote.run(
args=[
'cd',
log_dir,
run.Raw(';'),
'daemon-helper',
daemon_signal,
'sudo',
blktrace,
'-o',
dev.rsplit("/", 1)[1],
'-d',
dev,
],
wait=False,
stdin=run.PIPE,
)
procs.append(proc)
try:
yield
finally:
osds = ctx.cluster.only(teuthology.is_type('osd'))
log.info('stopping blktrace processs')
for proc in procs:
proc.stdin.close()
@contextlib.contextmanager
def task(ctx, config):
"""
Usage:
blktrace:
or:
blktrace:
cluster: backup
Runs blktrace on all osds in the specified cluster (the 'ceph' cluster by
default).
"""
if config is None:
config = {}
config['cluster'] = config.get('cluster', 'ceph')
with contextutil.nested(
lambda: setup(ctx=ctx, config=config),
lambda: execute(ctx=ctx, config=config),
):
yield
| 2,821 | 28.092784 | 92 |
py
|
null |
ceph-main/qa/tasks/cbt.py
|
import logging
import os
import yaml
from teuthology import misc
from teuthology.orchestra import run
from teuthology.task import Task
log = logging.getLogger(__name__)
class CBT(Task):
"""
Passes through a CBT configuration yaml fragment.
"""
def __init__(self, ctx, config):
super(CBT, self).__init__(ctx, config)
self.log = log
def hosts_of_type(self, type_):
return [r.name for r in self.ctx.cluster.only(misc.is_type(type_)).remotes.keys()]
def generate_cbt_config(self):
mon_hosts = self.hosts_of_type('mon')
osd_hosts = self.hosts_of_type('osd')
client_hosts = self.hosts_of_type('client')
rgw_client = {}
rgw_client[client_hosts[0]] = None
rgw_hosts = self.config.get('cluster', {}).get('rgws', rgw_client)
cluster_config = dict(
user=self.config.get('cluster', {}).get('user', 'ubuntu'),
head=mon_hosts[0],
osds=osd_hosts,
mons=mon_hosts,
clients=client_hosts,
rgws=rgw_hosts,
osds_per_node=self.config.get('cluster', {}).get('osds_per_node', 1),
rebuild_every_test=False,
use_existing=True,
is_teuthology=self.config.get('cluster', {}).get('is_teuthology', True),
iterations=self.config.get('cluster', {}).get('iterations', 1),
tmp_dir='/tmp/cbt',
pool_profiles=self.config.get('cluster', {}).get('pool_profiles'),
)
benchmark_config = self.config.get('benchmarks')
benchmark_type = next(iter(benchmark_config.keys()))
if benchmark_type in ['librbdfio', 'fio']:
testdir = misc.get_testdir(self.ctx)
benchmark_config[benchmark_type]['cmd_path'] = os.path.join(testdir, 'fio/fio')
if benchmark_type == 'cosbench':
# create cosbench_dir and cosbench_xml_dir
testdir = misc.get_testdir(self.ctx)
benchmark_config['cosbench']['cosbench_dir'] = os.path.join(testdir, 'cos')
benchmark_config['cosbench']['cosbench_xml_dir'] = os.path.join(testdir, 'xml')
self.ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', benchmark_config['cosbench']['cosbench_xml_dir']])
benchmark_config['cosbench']['controller'] = osd_hosts[0]
# set auth details
remotes_and_roles = self.ctx.cluster.remotes.items()
ips = [host for (host, port) in
(remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)]
benchmark_config['cosbench']['auth'] = "username=cosbench:operator;password=intel2012;url=http://%s:80/auth/v1.0;retry=9" %(ips[0])
client_endpoints_config = self.config.get('client_endpoints', None)
return dict(
cluster=cluster_config,
benchmarks=benchmark_config,
client_endpoints = client_endpoints_config,
)
def install_dependencies(self):
system_type = misc.get_system_type(self.first_mon)
if system_type == 'rpm':
install_cmd = ['sudo', 'yum', '-y', 'install']
cbt_depends = ['python3-yaml', 'python3-lxml', 'librbd-devel', 'pdsh', 'pdsh-rcmd-ssh']
self.log.info('Installing collectl')
collectl_location = "https://sourceforge.net/projects/collectl/files/collectl/collectl-4.3.1/collectl-4.3.1.src.tar.gz/download"
self.first_mon.run(
args=[
'sudo', 'mkdir', 'collectl', run.Raw('&&'),
'cd', 'collectl', run.Raw('&&'),
'sudo', 'wget', collectl_location, '-O', 'collectl.tar.gz', run.Raw('&&'),
'sudo', 'tar', '-xvf', 'collectl.tar.gz' , run.Raw('&&'),
'cd', 'collectl-4.3.1', run.Raw('&&'),
'sudo', './INSTALL'
]
)
else:
install_cmd = ['sudo', 'apt-get', '-y', '--force-yes', 'install']
cbt_depends = ['python3-yaml', 'python3-lxml', 'librbd-dev', 'collectl']
self.first_mon.run(args=install_cmd + cbt_depends)
benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys()))
self.log.info('benchmark: %s', benchmark_type)
if benchmark_type in ['librbdfio', 'fio']:
# install fio
testdir = misc.get_testdir(self.ctx)
self.first_mon.run(
args=[
'git', 'clone', '-b', 'master',
'https://github.com/axboe/fio.git',
'{tdir}/fio'.format(tdir=testdir)
]
)
self.first_mon.run(
args=[
'cd', os.path.join(testdir, 'fio'), run.Raw('&&'),
'./configure', run.Raw('&&'),
'make'
]
)
if benchmark_type == 'cosbench':
# install cosbench
self.log.info('install dependencies for cosbench')
if system_type == 'rpm':
cosbench_depends = ['wget', 'unzip', 'java-1.7.0-openjdk', 'curl']
else:
cosbench_depends = ['wget', 'unzip', 'openjdk-8-jre', 'curl']
self.first_mon.run(args=install_cmd + cosbench_depends)
testdir = misc.get_testdir(self.ctx)
cosbench_version = '0.4.2.c3'
cosbench_location = 'https://github.com/intel-cloud/cosbench/releases/download/v0.4.2.c3/0.4.2.c3.zip'
os_version = misc.get_system_type(self.first_mon, False, True)
# additional requirements for bionic
if os_version == '18.04':
self.first_mon.run(
args=['sudo', 'apt-get', '-y', 'purge', 'openjdk-11*'])
# use our own version of cosbench
cosbench_version = 'cosbench-0.4.2.c3.1'
# contains additional parameter "-N" to nc
cosbench_location = 'http://drop.ceph.com/qa/cosbench-0.4.2.c3.1.zip'
cosbench_dir = os.path.join(testdir, cosbench_version)
self.ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', cosbench_dir])
self.first_mon.run(
args=[
'cd', testdir, run.Raw('&&'),
'wget',
cosbench_location, run.Raw('&&'),
'unzip', '{name}.zip'.format(name=cosbench_version), '-d', cosbench_version
]
)
else:
self.first_mon.run(
args=[
'cd', testdir, run.Raw('&&'),
'wget',
cosbench_location, run.Raw('&&'),
'unzip', '{name}.zip'.format(name=cosbench_version)
]
)
self.first_mon.run(
args=[
'cd', testdir, run.Raw('&&'),
'ln', '-s', cosbench_version, 'cos',
]
)
self.first_mon.run(
args=[
'cd', os.path.join(testdir, 'cos'), run.Raw('&&'),
'chmod', '+x', run.Raw('*.sh'),
]
)
# start cosbench and check info
self.log.info('start cosbench')
self.first_mon.run(
args=[
'cd', testdir, run.Raw('&&'),
'cd', 'cos', run.Raw('&&'),
'sh', 'start-all.sh'
]
)
self.log.info('check cosbench info')
self.first_mon.run(
args=[
'cd', testdir, run.Raw('&&'),
'cd', 'cos', run.Raw('&&'),
'sh', 'cli.sh', 'info'
]
)
def checkout_cbt(self):
testdir = misc.get_testdir(self.ctx)
repo = self.config.get('repo', 'https://github.com/ceph/cbt.git')
branch = self.config.get('branch', 'master')
branch = self.config.get('force-branch', branch)
sha1 = self.config.get('sha1')
if sha1 is None:
self.first_mon.run(
args=[
'git', 'clone', '--depth', '1', '-b', branch, repo,
'{tdir}/cbt'.format(tdir=testdir)
]
)
else:
self.first_mon.run(
args=[
'git', 'clone', '-b', branch, repo,
'{tdir}/cbt'.format(tdir=testdir)
]
)
self.first_mon.run(
args=[
'cd', os.path.join(testdir, 'cbt'), run.Raw('&&'),
'git', 'reset', '--hard', sha1,
]
)
def setup(self):
super(CBT, self).setup()
self.first_mon = next(iter(self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys()))
self.cbt_config = self.generate_cbt_config()
self.log.info('cbt configuration is %s', self.cbt_config)
self.cbt_dir = os.path.join(misc.get_archive_dir(self.ctx), 'cbt')
self.ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', self.cbt_dir])
self.first_mon.write_file(
os.path.join(self.cbt_dir, 'cbt_config.yaml'),
yaml.safe_dump(self.cbt_config, default_flow_style=False))
self.checkout_cbt()
self.install_dependencies()
def begin(self):
super(CBT, self).begin()
testdir = misc.get_testdir(self.ctx)
self.first_mon.run(
args=[
'{tdir}/cbt/cbt.py'.format(tdir=testdir),
'-a', self.cbt_dir,
'{cbtdir}/cbt_config.yaml'.format(cbtdir=self.cbt_dir),
],
)
preserve_file = os.path.join(self.ctx.archive, '.preserve')
open(preserve_file, 'a').close()
def end(self):
super(CBT, self).end()
testdir = misc.get_testdir(self.ctx)
self.first_mon.run(
args=[
'rm', '--one-file-system', '-rf', '--',
'{tdir}/cbt'.format(tdir=testdir),
]
)
benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys()))
if benchmark_type in ['librbdfio', 'fio']:
self.first_mon.run(
args=[
'rm', '--one-file-system', '-rf', '--',
'{tdir}/fio'.format(tdir=testdir),
]
)
if benchmark_type == 'cosbench':
os_version = misc.get_system_type(self.first_mon, False, True)
if os_version == '18.04':
cosbench_version = 'cosbench-0.4.2.c3.1'
else:
cosbench_version = '0.4.2.c3'
# note: stop-all requires 'nc'
self.first_mon.run(
args=[
'cd', testdir, run.Raw('&&'),
'cd', 'cos', run.Raw('&&'),
'sh', 'stop-all.sh',
run.Raw('||'), 'true'
]
)
self.first_mon.run(
args=[
'sudo', 'killall', '-9', 'java',
run.Raw('||'), 'true'
]
)
self.first_mon.run(
args=[
'rm', '--one-file-system', '-rf', '--',
'{tdir}/cos'.format(tdir=testdir),
]
)
self.first_mon.run(
args=[
'rm', '--one-file-system', '-rf', '--',
'{tdir}/{version}'.format(tdir=testdir, version=cosbench_version),
]
)
self.first_mon.run(
args=[
'rm', '--one-file-system', '-rf', '--',
'{tdir}/{version}.zip'.format(tdir=testdir, version=cosbench_version),
]
)
self.first_mon.run(
args=[
'rm', '--one-file-system', '-rf', '--',
'{tdir}/xml'.format(tdir=testdir),
]
)
task = CBT
| 12,249 | 39.03268 | 143 |
py
|
null |
ceph-main/qa/tasks/ceph.py
|
"""
Ceph cluster task.
Handle the setup, starting, and clean-up of a Ceph cluster.
"""
from copy import deepcopy
from io import BytesIO
from io import StringIO
import argparse
import configobj
import contextlib
import errno
import logging
import os
import json
import time
import gevent
import re
import socket
import yaml
from paramiko import SSHException
from tasks.ceph_manager import CephManager, write_conf, get_valgrind_args
from tarfile import ReadError
from tasks.cephfs.filesystem import MDSCluster, Filesystem
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology import exceptions
from teuthology.orchestra import run
from tasks import ceph_client as cclient
from teuthology.orchestra.daemon import DaemonGroup
from tasks.daemonwatchdog import DaemonWatchdog
CEPH_ROLE_TYPES = ['mon', 'mgr', 'osd', 'mds', 'rgw']
DATA_PATH = '/var/lib/ceph/{type_}/{cluster}-{id_}'
log = logging.getLogger(__name__)
def generate_caps(type_):
"""
Each call will return the next capability for each system type
(essentially a subset of possible role values). Valid types are osd,
mds and client.
"""
defaults = dict(
osd=dict(
mon='allow profile osd',
mgr='allow profile osd',
osd='allow *',
),
mgr=dict(
mon='allow profile mgr',
osd='allow *',
mds='allow *',
),
mds=dict(
mon='allow *',
mgr='allow *',
osd='allow *',
mds='allow',
),
client=dict(
mon='allow rw',
mgr='allow r',
osd='allow rwx',
mds='allow',
),
)
for subsystem, capability in defaults[type_].items():
yield '--cap'
yield subsystem
yield capability
def update_archive_setting(ctx, key, value):
"""
Add logs directory to job's info log file
"""
if ctx.archive is None:
return
with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file:
info_yaml = yaml.safe_load(info_file)
info_file.seek(0)
if 'archive' in info_yaml:
info_yaml['archive'][key] = value
else:
info_yaml['archive'] = {key: value}
yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
@contextlib.contextmanager
def ceph_crash(ctx, config):
"""
Gather crash dumps from /var/lib/ceph/crash
"""
# Add crash directory to job's archive
update_archive_setting(ctx, 'crash', '/var/lib/ceph/crash')
try:
yield
finally:
if ctx.archive is not None:
log.info('Archiving crash dumps...')
path = os.path.join(ctx.archive, 'remote')
try:
os.makedirs(path)
except OSError:
pass
for remote in ctx.cluster.remotes.keys():
sub = os.path.join(path, remote.shortname)
try:
os.makedirs(sub)
except OSError:
pass
try:
teuthology.pull_directory(remote, '/var/lib/ceph/crash',
os.path.join(sub, 'crash'))
except ReadError:
pass
@contextlib.contextmanager
def ceph_log(ctx, config):
"""
Create /var/log/ceph log directory that is open to everyone.
Add valgrind and profiling-logger directories.
:param ctx: Context
:param config: Configuration
"""
log.info('Making ceph log dir writeable by non-root...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'chmod',
'777',
'/var/log/ceph',
],
wait=False,
)
)
log.info('Disabling ceph logrotate...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm', '-f', '--',
'/etc/logrotate.d/ceph',
],
wait=False,
)
)
log.info('Creating extra log directories...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'install', '-d', '-m0777', '--',
'/var/log/ceph/valgrind',
'/var/log/ceph/profiling-logger',
],
wait=False,
)
)
# Add logs directory to job's info log file
update_archive_setting(ctx, 'log', '/var/log/ceph')
class Rotater(object):
stop_event = gevent.event.Event()
def invoke_logrotate(self):
# 1) install ceph-test.conf in /etc/logrotate.d
# 2) continuously loop over logrotate invocation with ceph-test.conf
while not self.stop_event.is_set():
self.stop_event.wait(timeout=30)
try:
procs = ctx.cluster.run(
args=['sudo', 'logrotate', '/etc/logrotate.d/ceph-test.conf'],
wait=False,
stderr=StringIO()
)
run.wait(procs)
except exceptions.ConnectionLostError as e:
# Some tests may power off nodes during test, in which
# case we will see connection errors that we should ignore.
log.debug("Missed logrotate, node '{0}' is offline".format(
e.node))
except EOFError:
# Paramiko sometimes raises this when it fails to
# connect to a node during open_session. As with
# ConnectionLostError, we ignore this because nodes
# are allowed to get power cycled during tests.
log.debug("Missed logrotate, EOFError")
except SSHException:
log.debug("Missed logrotate, SSHException")
except run.CommandFailedError as e:
for p in procs:
if p.finished and p.exitstatus != 0:
err = p.stderr.getvalue()
if 'error: error renaming temp state file' in err:
log.info('ignoring transient state error: %s', e)
else:
raise
except socket.error as e:
if e.errno in (errno.EHOSTUNREACH, errno.ECONNRESET):
log.debug("Missed logrotate, host unreachable")
else:
raise
def begin(self):
self.thread = gevent.spawn(self.invoke_logrotate)
def end(self):
self.stop_event.set()
self.thread.get()
def write_rotate_conf(ctx, daemons):
testdir = teuthology.get_testdir(ctx)
remote_logrotate_conf = '%s/logrotate.ceph-test.conf' % testdir
rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf')
with open(rotate_conf_path) as f:
conf = ""
for daemon, size in daemons.items():
log.info('writing logrotate stanza for {}'.format(daemon))
conf += f.read().format(daemon_type=daemon,
max_size=size)
f.seek(0, 0)
for remote in ctx.cluster.remotes.keys():
remote.write_file(remote_logrotate_conf, BytesIO(conf.encode()))
remote.sh(
f'sudo mv {remote_logrotate_conf} /etc/logrotate.d/ceph-test.conf && '
'sudo chmod 0644 /etc/logrotate.d/ceph-test.conf && '
'sudo chown root.root /etc/logrotate.d/ceph-test.conf')
remote.chcon('/etc/logrotate.d/ceph-test.conf',
'system_u:object_r:etc_t:s0')
if ctx.config.get('log-rotate'):
daemons = ctx.config.get('log-rotate')
log.info('Setting up log rotation with ' + str(daemons))
write_rotate_conf(ctx, daemons)
logrotater = Rotater()
logrotater.begin()
try:
yield
finally:
if ctx.config.get('log-rotate'):
log.info('Shutting down logrotate')
logrotater.end()
ctx.cluster.sh('sudo rm /etc/logrotate.d/ceph-test.conf')
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and ctx.summary['success']):
# and logs
log.info('Compressing logs...')
run.wait(
ctx.cluster.run(
args=[
'time',
'sudo',
'find',
'/var/log/ceph',
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'--max-args=1',
'--max-procs=0',
'--verbose',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'-5',
'--verbose',
'--',
],
wait=False,
),
)
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
try:
os.makedirs(path)
except OSError:
pass
for remote in ctx.cluster.remotes.keys():
sub = os.path.join(path, remote.shortname)
try:
os.makedirs(sub)
except OSError:
pass
teuthology.pull_directory(remote, '/var/log/ceph',
os.path.join(sub, 'log'))
def assign_devs(roles, devs):
"""
Create a dictionary of devs indexed by roles
:param roles: List of roles
:param devs: Corresponding list of devices.
:returns: Dictionary of devs indexed by roles.
"""
return dict(zip(roles, devs))
@contextlib.contextmanager
def valgrind_post(ctx, config):
"""
After the tests run, look through all the valgrind logs. Exceptions are raised
if textual errors occurred in the logs, or if valgrind exceptions were detected in
the logs.
:param ctx: Context
:param config: Configuration
"""
try:
yield
finally:
lookup_procs = list()
log.info('Checking for errors in any valgrind logs...')
for remote in ctx.cluster.remotes.keys():
# look at valgrind logs for each node
proc = remote.run(
args="sudo zgrep '<kind>' /var/log/ceph/valgrind/* "
# include a second file so that we always get
# a filename prefix on the output
"/dev/null | sort | uniq",
wait=False,
check_status=False,
stdout=StringIO(),
)
lookup_procs.append((proc, remote))
valgrind_exception = None
for (proc, remote) in lookup_procs:
proc.wait()
out = proc.stdout.getvalue()
for line in out.split('\n'):
if line == '':
continue
try:
(file, kind) = line.split(':')
except Exception:
log.error('failed to split line %s', line)
raise
log.debug('file %s kind %s', file, kind)
if (file.find('mds') >= 0) and kind.find('Lost') > 0:
continue
log.error('saw valgrind issue %s in %s', kind, file)
valgrind_exception = Exception('saw valgrind issues')
if config.get('expect_valgrind_errors'):
if not valgrind_exception:
raise Exception('expected valgrind issues and found none')
else:
if valgrind_exception:
raise valgrind_exception
@contextlib.contextmanager
def crush_setup(ctx, config):
cluster_name = config['cluster']
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
(mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
profile = config.get('crush_tunables', 'default')
log.info('Setting crush tunables to %s', profile)
mon_remote.run(
args=['sudo', 'ceph', '--cluster', cluster_name,
'osd', 'crush', 'tunables', profile])
yield
@contextlib.contextmanager
def check_enable_crimson(ctx, config):
# enable crimson-osds if crimson
log.info("check_enable_crimson: {}".format(is_crimson(config)))
if is_crimson(config):
cluster_name = config['cluster']
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
(mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
log.info('check_enable_crimson: setting set-allow-crimson')
mon_remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'osd', 'set-allow-crimson', '--yes-i-really-mean-it'
]
)
yield
@contextlib.contextmanager
def setup_manager(ctx, config):
first_mon = teuthology.get_first_mon(ctx, config, config['cluster'])
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
if not hasattr(ctx, 'managers'):
ctx.managers = {}
ctx.managers[config['cluster']] = CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager.' + config['cluster']),
cluster=config['cluster'],
)
yield
@contextlib.contextmanager
def create_rbd_pool(ctx, config):
cluster_name = config['cluster']
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
(mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
log.info('Waiting for OSDs to come up')
teuthology.wait_until_osds_up(
ctx,
cluster=ctx.cluster,
remote=mon_remote,
ceph_cluster=cluster_name,
)
if config.get('create_rbd_pool', True):
log.info('Creating RBD pool')
mon_remote.run(
args=['sudo', 'ceph', '--cluster', cluster_name,
'osd', 'pool', 'create', 'rbd', '8'])
mon_remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'osd', 'pool', 'application', 'enable',
'rbd', 'rbd', '--yes-i-really-mean-it'
],
check_status=False)
yield
@contextlib.contextmanager
def cephfs_setup(ctx, config):
cluster_name = config['cluster']
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
(mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name))
# If there are any MDSs, then create a filesystem for them to use
# Do this last because requires mon cluster to be up and running
if mdss.remotes:
log.info('Setting up CephFS filesystem(s)...')
cephfs_config = config.get('cephfs', {})
fs_configs = cephfs_config.pop('fs', [{'name': 'cephfs'}])
# wait for standbys to become available (slow due to valgrind, perhaps)
mdsc = MDSCluster(ctx)
mds_count = len(list(teuthology.all_roles_of_type(ctx.cluster, 'mds')))
with contextutil.safe_while(sleep=2,tries=150) as proceed:
while proceed():
if len(mdsc.get_standby_daemons()) >= mds_count:
break
fss = []
for fs_config in fs_configs:
assert isinstance(fs_config, dict)
name = fs_config.pop('name')
temp = deepcopy(cephfs_config)
teuthology.deep_merge(temp, fs_config)
subvols = config.get('subvols', None)
if subvols:
teuthology.deep_merge(temp, {'subvols': subvols})
fs = Filesystem(ctx, fs_config=temp, name=name, create=True)
fss.append(fs)
yield
for fs in fss:
fs.destroy()
else:
yield
@contextlib.contextmanager
def watchdog_setup(ctx, config):
ctx.ceph[config['cluster']].thrashers = []
ctx.ceph[config['cluster']].watchdog = DaemonWatchdog(ctx, config, ctx.ceph[config['cluster']].thrashers)
ctx.ceph[config['cluster']].watchdog.start()
yield
def get_mons(roles, ips, cluster_name,
mon_bind_msgr2=False,
mon_bind_addrvec=False):
"""
Get monitors and their associated addresses
"""
mons = {}
v1_ports = {}
v2_ports = {}
is_mon = teuthology.is_type('mon', cluster_name)
for idx, roles in enumerate(roles):
for role in roles:
if not is_mon(role):
continue
if ips[idx] not in v1_ports:
v1_ports[ips[idx]] = 6789
else:
v1_ports[ips[idx]] += 1
if mon_bind_msgr2:
if ips[idx] not in v2_ports:
v2_ports[ips[idx]] = 3300
addr = '{ip}'.format(ip=ips[idx])
else:
assert mon_bind_addrvec
v2_ports[ips[idx]] += 1
addr = '[v2:{ip}:{port2},v1:{ip}:{port1}]'.format(
ip=ips[idx],
port2=v2_ports[ips[idx]],
port1=v1_ports[ips[idx]],
)
elif mon_bind_addrvec:
addr = '[v1:{ip}:{port}]'.format(
ip=ips[idx],
port=v1_ports[ips[idx]],
)
else:
addr = '{ip}:{port}'.format(
ip=ips[idx],
port=v1_ports[ips[idx]],
)
mons[role] = addr
assert mons
return mons
def skeleton_config(ctx, roles, ips, mons, cluster='ceph'):
"""
Returns a ConfigObj that is prefilled with a skeleton config.
Use conf[section][key]=value or conf.merge to change it.
Use conf.write to write it out, override .filename first if you want.
"""
path = os.path.join(os.path.dirname(__file__), 'ceph.conf.template')
conf = configobj.ConfigObj(path, file_error=True)
mon_hosts = []
for role, addr in mons.items():
mon_cluster, _, _ = teuthology.split_role(role)
if mon_cluster != cluster:
continue
name = teuthology.ceph_role(role)
conf.setdefault(name, {})
mon_hosts.append(addr)
conf.setdefault('global', {})
conf['global']['mon host'] = ','.join(mon_hosts)
# set up standby mds's
is_mds = teuthology.is_type('mds', cluster)
for roles_subset in roles:
for role in roles_subset:
if is_mds(role):
name = teuthology.ceph_role(role)
conf.setdefault(name, {})
return conf
def create_simple_monmap(ctx, remote, conf, mons,
path=None,
mon_bind_addrvec=False):
"""
Writes a simple monmap based on current ceph.conf into path, or
<testdir>/monmap by default.
Assumes ceph_conf is up to date.
Assumes mon sections are named "mon.*", with the dot.
:return the FSID (as a string) of the newly created monmap
"""
addresses = list(mons.items())
assert addresses, "There are no monitors in config!"
log.debug('Ceph mon addresses: %s', addresses)
try:
log.debug('writing out conf {c}'.format(c=conf))
except:
log.debug('my conf logging attempt failed')
testdir = teuthology.get_testdir(ctx)
tmp_conf_path = '{tdir}/ceph.tmp.conf'.format(tdir=testdir)
conf_fp = BytesIO()
conf.write(conf_fp)
conf_fp.seek(0)
teuthology.write_file(remote, tmp_conf_path, conf_fp)
args = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'monmaptool',
'-c',
'{conf}'.format(conf=tmp_conf_path),
'--create',
'--clobber',
]
if mon_bind_addrvec:
args.extend(['--enable-all-features'])
for (role, addr) in addresses:
_, _, n = teuthology.split_role(role)
if mon_bind_addrvec and (',' in addr or 'v' in addr or ':' in addr):
args.extend(('--addv', n, addr))
else:
args.extend(('--add', n, addr))
if not path:
path = '{tdir}/monmap'.format(tdir=testdir)
args.extend([
'--print',
path
])
monmap_output = remote.sh(args)
fsid = re.search("generated fsid (.+)$",
monmap_output, re.MULTILINE).group(1)
teuthology.delete_file(remote, tmp_conf_path)
return fsid
def is_crimson(config):
return config.get('flavor', 'default') == 'crimson'
def maybe_redirect_stderr(config, type_, args, log_path):
if type_ == 'osd' and is_crimson(config):
# teuthworker uses ubuntu:ubuntu to access the test nodes
create_log_cmd = \
f'sudo install -b -o ubuntu -g ubuntu /dev/null {log_path}'
return create_log_cmd, args + [run.Raw('2>>'), log_path]
else:
return None, args
@contextlib.contextmanager
def cluster(ctx, config):
"""
Handle the creation and removal of a ceph cluster.
On startup:
Create directories needed for the cluster.
Create remote journals for all osds.
Create and set keyring.
Copy the monmap to the test systems.
Setup mon nodes.
Setup mds nodes.
Mkfs osd nodes.
Add keyring information to monmaps
Mkfs mon nodes.
On exit:
If errors occurred, extract a failure message and store in ctx.summary.
Unmount all test files and temporary journaling files.
Save the monitor information and archive all ceph logs.
Cleanup the keyring setup, and remove all monitor map and data files left over.
:param ctx: Context
:param config: Configuration
"""
if ctx.config.get('use_existing_cluster', False) is True:
log.info("'use_existing_cluster' is true; skipping cluster creation")
yield
testdir = teuthology.get_testdir(ctx)
cluster_name = config['cluster']
data_dir = '{tdir}/{cluster}.data'.format(tdir=testdir, cluster=cluster_name)
log.info('Creating ceph cluster %s...', cluster_name)
log.info('config %s', config)
log.info('ctx.config %s', ctx.config)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
data_dir,
],
wait=False,
)
)
run.wait(
ctx.cluster.run(
args=[
'sudo',
'install', '-d', '-m0777', '--', '/var/run/ceph',
],
wait=False,
)
)
devs_to_clean = {}
remote_to_roles_to_devs = {}
osds = ctx.cluster.only(teuthology.is_type('osd', cluster_name))
for remote, roles_for_host in osds.remotes.items():
devs = teuthology.get_scratch_devices(remote)
roles_to_devs = assign_devs(
teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name), devs
)
devs_to_clean[remote] = []
log.info('osd dev map: {}'.format(roles_to_devs))
assert roles_to_devs, \
"remote {} has osd roles, but no osd devices were specified!".format(remote.hostname)
remote_to_roles_to_devs[remote] = roles_to_devs
log.info("remote_to_roles_to_devs: {}".format(remote_to_roles_to_devs))
for osd_role, dev_name in remote_to_roles_to_devs.items():
assert dev_name, "{} has no associated device!".format(osd_role)
log.info('Generating config...')
remotes_and_roles = ctx.cluster.remotes.items()
roles = [role_list for (remote, role_list) in remotes_and_roles]
ips = [host for (host, port) in
(remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)]
mons = get_mons(
roles, ips, cluster_name,
mon_bind_msgr2=config.get('mon_bind_msgr2'),
mon_bind_addrvec=config.get('mon_bind_addrvec'),
)
conf = skeleton_config(
ctx, roles=roles, ips=ips, mons=mons, cluster=cluster_name,
)
for section, keys in config['conf'].items():
for key, value in keys.items():
log.info("[%s] %s = %s" % (section, key, value))
if section not in conf:
conf[section] = {}
conf[section][key] = value
if not hasattr(ctx, 'ceph'):
ctx.ceph = {}
ctx.ceph[cluster_name] = argparse.Namespace()
ctx.ceph[cluster_name].conf = conf
ctx.ceph[cluster_name].mons = mons
default_keyring = '/etc/ceph/{cluster}.keyring'.format(cluster=cluster_name)
keyring_path = config.get('keyring_path', default_keyring)
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
firstmon = teuthology.get_first_mon(ctx, config, cluster_name)
log.info('Setting up %s...' % firstmon)
ctx.cluster.only(firstmon).run(
args=[
'sudo',
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'ceph-authtool',
'--create-keyring',
keyring_path,
],
)
ctx.cluster.only(firstmon).run(
args=[
'sudo',
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'ceph-authtool',
'--gen-key',
'--name=mon.',
keyring_path,
],
)
ctx.cluster.only(firstmon).run(
args=[
'sudo',
'chmod',
'0644',
keyring_path,
],
)
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
monmap_path = '{tdir}/{cluster}.monmap'.format(tdir=testdir,
cluster=cluster_name)
fsid = create_simple_monmap(
ctx,
remote=mon0_remote,
conf=conf,
mons=mons,
path=monmap_path,
mon_bind_addrvec=config.get('mon_bind_addrvec'),
)
ctx.ceph[cluster_name].fsid = fsid
if not 'global' in conf:
conf['global'] = {}
conf['global']['fsid'] = fsid
default_conf_path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster_name)
conf_path = config.get('conf_path', default_conf_path)
log.info('Writing %s for FSID %s...' % (conf_path, fsid))
write_conf(ctx, conf_path, cluster_name)
log.info('Creating admin key on %s...' % firstmon)
ctx.cluster.only(firstmon).run(
args=[
'sudo',
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'ceph-authtool',
'--gen-key',
'--name=client.admin',
'--cap', 'mon', 'allow *',
'--cap', 'osd', 'allow *',
'--cap', 'mds', 'allow *',
'--cap', 'mgr', 'allow *',
keyring_path,
],
)
log.info('Copying monmap to all nodes...')
keyring = mon0_remote.read_file(keyring_path)
monmap = mon0_remote.read_file(monmap_path)
for rem in ctx.cluster.remotes.keys():
# copy mon key and initial monmap
log.info('Sending monmap to node {remote}'.format(remote=rem))
rem.write_file(keyring_path, keyring, mode='0644', sudo=True)
rem.write_file(monmap_path, monmap)
log.info('Setting up mon nodes...')
mons = ctx.cluster.only(teuthology.is_type('mon', cluster_name))
if not config.get('skip_mgr_daemons', False):
log.info('Setting up mgr nodes...')
mgrs = ctx.cluster.only(teuthology.is_type('mgr', cluster_name))
for remote, roles_for_host in mgrs.remotes.items():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'mgr',
cluster_name):
_, _, id_ = teuthology.split_role(role)
mgr_dir = DATA_PATH.format(
type_='mgr', cluster=cluster_name, id_=id_)
remote.run(
args=[
'sudo',
'mkdir',
'-p',
mgr_dir,
run.Raw('&&'),
'sudo',
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'ceph-authtool',
'--create-keyring',
'--gen-key',
'--name=mgr.{id}'.format(id=id_),
mgr_dir + '/keyring',
],
)
log.info('Setting up mds nodes...')
mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name))
for remote, roles_for_host in mdss.remotes.items():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'mds',
cluster_name):
_, _, id_ = teuthology.split_role(role)
mds_dir = DATA_PATH.format(
type_='mds', cluster=cluster_name, id_=id_)
remote.run(
args=[
'sudo',
'mkdir',
'-p',
mds_dir,
run.Raw('&&'),
'sudo',
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'ceph-authtool',
'--create-keyring',
'--gen-key',
'--name=mds.{id}'.format(id=id_),
mds_dir + '/keyring',
],
)
remote.run(args=[
'sudo', 'chown', '-R', 'ceph:ceph', mds_dir
])
cclient.create_keyring(ctx, cluster_name)
log.info('Running mkfs on osd nodes...')
if not hasattr(ctx, 'disk_config'):
ctx.disk_config = argparse.Namespace()
if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev'):
ctx.disk_config.remote_to_roles_to_dev = {}
if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev_mount_options'):
ctx.disk_config.remote_to_roles_to_dev_mount_options = {}
if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev_fstype'):
ctx.disk_config.remote_to_roles_to_dev_fstype = {}
teuthology.deep_merge(ctx.disk_config.remote_to_roles_to_dev, remote_to_roles_to_devs)
log.info("ctx.disk_config.remote_to_roles_to_dev: {r}".format(r=str(ctx.disk_config.remote_to_roles_to_dev)))
for remote, roles_for_host in osds.remotes.items():
roles_to_devs = remote_to_roles_to_devs[remote]
for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name):
_, _, id_ = teuthology.split_role(role)
mnt_point = DATA_PATH.format(
type_='osd', cluster=cluster_name, id_=id_)
remote.run(
args=[
'sudo',
'mkdir',
'-p',
mnt_point,
])
log.info('roles_to_devs: {}'.format(roles_to_devs))
log.info('role: {}'.format(role))
if roles_to_devs.get(role):
dev = roles_to_devs[role]
fs = config.get('fs')
package = None
mkfs_options = config.get('mkfs_options')
mount_options = config.get('mount_options')
if fs == 'btrfs':
# package = 'btrfs-tools'
if mount_options is None:
mount_options = ['noatime', 'user_subvol_rm_allowed']
if mkfs_options is None:
mkfs_options = ['-m', 'single',
'-l', '32768',
'-n', '32768']
if fs == 'xfs':
# package = 'xfsprogs'
if mount_options is None:
mount_options = ['noatime']
if mkfs_options is None:
mkfs_options = ['-f', '-i', 'size=2048']
if fs == 'ext4' or fs == 'ext3':
if mount_options is None:
mount_options = ['noatime', 'user_xattr']
if mount_options is None:
mount_options = []
if mkfs_options is None:
mkfs_options = []
mkfs = ['mkfs.%s' % fs] + mkfs_options
log.info('%s on %s on %s' % (mkfs, dev, remote))
if package is not None:
remote.sh('sudo apt-get install -y %s' % package)
try:
remote.run(args=['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev])
except run.CommandFailedError:
# Newer btfs-tools doesn't prompt for overwrite, use -f
if '-f' not in mount_options:
mkfs_options.append('-f')
mkfs = ['mkfs.%s' % fs] + mkfs_options
log.info('%s on %s on %s' % (mkfs, dev, remote))
remote.run(args=['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev])
log.info('mount %s on %s -o %s' % (dev, remote,
','.join(mount_options)))
remote.run(
args=[
'sudo',
'mount',
'-t', fs,
'-o', ','.join(mount_options),
dev,
mnt_point,
]
)
remote.run(
args=[
'sudo', '/sbin/restorecon', mnt_point,
],
check_status=False,
)
if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options:
ctx.disk_config.remote_to_roles_to_dev_mount_options[remote] = {}
ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][role] = mount_options
if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype:
ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {}
ctx.disk_config.remote_to_roles_to_dev_fstype[remote][role] = fs
devs_to_clean[remote].append(mnt_point)
for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name):
_, _, id_ = teuthology.split_role(role)
try:
args = ['sudo',
'MALLOC_CHECK_=3',
'adjust-ulimits',
'ceph-coverage', coverage_dir,
'ceph-osd',
'--no-mon-config',
'--cluster', cluster_name,
'--mkfs',
'--mkkey',
'-i', id_,
'--monmap', monmap_path]
log_path = f'/var/log/ceph/{cluster_name}-osd.{id_}.log'
create_log_cmd, args = \
maybe_redirect_stderr(config, 'osd', args, log_path)
if create_log_cmd:
remote.sh(create_log_cmd)
remote.run(args=args)
except run.CommandFailedError:
# try without --no-mon-config.. this may be an upgrade test
remote.run(
args=[
'sudo',
'MALLOC_CHECK_=3',
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'ceph-osd',
'--cluster',
cluster_name,
'--mkfs',
'--mkkey',
'-i', id_,
'--monmap', monmap_path,
],
)
mnt_point = DATA_PATH.format(
type_='osd', cluster=cluster_name, id_=id_)
remote.run(args=[
'sudo', 'chown', '-R', 'ceph:ceph', mnt_point
])
log.info('Reading keys from all nodes...')
keys_fp = BytesIO()
keys = []
for remote, roles_for_host in ctx.cluster.remotes.items():
for type_ in ['mgr', 'mds', 'osd']:
if type_ == 'mgr' and config.get('skip_mgr_daemons', False):
continue
for role in teuthology.cluster_roles_of_type(roles_for_host, type_, cluster_name):
_, _, id_ = teuthology.split_role(role)
data = remote.read_file(
os.path.join(
DATA_PATH.format(
type_=type_, id_=id_, cluster=cluster_name),
'keyring',
),
sudo=True,
)
keys.append((type_, id_, data))
keys_fp.write(data)
for remote, roles_for_host in ctx.cluster.remotes.items():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'client', cluster_name):
_, _, id_ = teuthology.split_role(role)
data = remote.read_file(
'/etc/ceph/{cluster}.client.{id}.keyring'.format(id=id_, cluster=cluster_name)
)
keys.append(('client', id_, data))
keys_fp.write(data)
log.info('Adding keys to all mons...')
writes = mons.run(
args=[
'sudo', 'tee', '-a',
keyring_path,
],
stdin=run.PIPE,
wait=False,
stdout=BytesIO(),
)
keys_fp.seek(0)
teuthology.feed_many_stdins_and_close(keys_fp, writes)
run.wait(writes)
for type_, id_, data in keys:
run.wait(
mons.run(
args=[
'sudo',
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'ceph-authtool',
keyring_path,
'--name={type}.{id}'.format(
type=type_,
id=id_,
),
] + list(generate_caps(type_)),
wait=False,
),
)
log.info('Running mkfs on mon nodes...')
for remote, roles_for_host in mons.remotes.items():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'mon', cluster_name):
_, _, id_ = teuthology.split_role(role)
mnt_point = DATA_PATH.format(
type_='mon', id_=id_, cluster=cluster_name)
remote.run(
args=[
'sudo',
'mkdir',
'-p',
mnt_point,
],
)
remote.run(
args=[
'sudo',
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'ceph-mon',
'--cluster', cluster_name,
'--mkfs',
'-i', id_,
'--monmap', monmap_path,
'--keyring', keyring_path,
],
)
remote.run(args=[
'sudo', 'chown', '-R', 'ceph:ceph', mnt_point
])
run.wait(
mons.run(
args=[
'rm',
'--',
monmap_path,
],
wait=False,
),
)
try:
yield
except Exception:
# we need to know this below
ctx.summary['success'] = False
raise
finally:
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
log.info('Checking cluster log for badness...')
def first_in_ceph_log(pattern, excludes):
"""
Find the first occurrence of the pattern specified in the Ceph log,
Returns None if none found.
:param pattern: Pattern scanned for.
:param excludes: Patterns to ignore.
:return: First line of text (or None if not found)
"""
args = [
'sudo',
'egrep', pattern,
'/var/log/ceph/{cluster}.log'.format(cluster=cluster_name),
]
for exclude in excludes:
args.extend([run.Raw('|'), 'egrep', '-v', exclude])
args.extend([
run.Raw('|'), 'head', '-n', '1',
])
stdout = mon0_remote.sh(args)
return stdout or None
if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]',
config['log_ignorelist']) is not None:
log.warning('Found errors (ERR|WRN|SEC) in cluster log')
ctx.summary['success'] = False
# use the most severe problem as the failure reason
if 'failure_reason' not in ctx.summary:
for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']:
match = first_in_ceph_log(pattern, config['log_ignorelist'])
if match is not None:
ctx.summary['failure_reason'] = \
'"{match}" in cluster log'.format(
match=match.rstrip('\n'),
)
break
for remote, dirs in devs_to_clean.items():
for dir_ in dirs:
log.info('Unmounting %s on %s' % (dir_, remote))
try:
remote.run(
args=[
'sync',
run.Raw('&&'),
'sudo',
'umount',
'-f',
dir_
]
)
except Exception as e:
remote.run(args=[
'sudo',
run.Raw('PATH=/usr/sbin:$PATH'),
'lsof',
run.Raw(';'),
'ps', 'auxf',
])
raise e
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and ctx.summary['success']):
# archive mon data, too
log.info('Archiving mon data...')
path = os.path.join(ctx.archive, 'data')
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
for remote, roles in mons.remotes.items():
for role in roles:
is_mon = teuthology.is_type('mon', cluster_name)
if is_mon(role):
_, _, id_ = teuthology.split_role(role)
mon_dir = DATA_PATH.format(
type_='mon', id_=id_, cluster=cluster_name)
teuthology.pull_directory_tarball(
remote,
mon_dir,
path + '/' + role + '.tgz')
log.info('Cleaning ceph cluster...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm',
'-rf',
'--',
conf_path,
keyring_path,
data_dir,
monmap_path,
run.Raw('{tdir}/../*.pid'.format(tdir=testdir)),
],
wait=False,
),
)
def osd_scrub_pgs(ctx, config):
"""
Scrub pgs when we exit.
First make sure all pgs are active and clean.
Next scrub all osds.
Then periodically check until all pgs have scrub time stamps that
indicate the last scrub completed. Time out if no progress is made
here after two minutes.
"""
retries = 40
delays = 20
cluster_name = config['cluster']
manager = ctx.managers[cluster_name]
for _ in range(retries):
stats = manager.get_pg_stats()
unclean = [stat['pgid'] for stat in stats if 'active+clean' not in stat['state']]
split_merge = []
osd_dump = manager.get_osd_dump_json()
try:
split_merge = [i['pool_name'] for i in osd_dump['pools'] if i['pg_num'] != i['pg_num_target']]
except KeyError:
# we don't support pg_num_target before nautilus
pass
if not unclean and not split_merge:
break
waiting_on = []
if unclean:
waiting_on.append(f'{unclean} to go clean')
if split_merge:
waiting_on.append(f'{split_merge} to split/merge')
waiting_on = ' and '.join(waiting_on)
log.info('Waiting for all PGs to be active+clean and split+merged, waiting on %s', waiting_on)
time.sleep(delays)
else:
raise RuntimeError("Scrubbing terminated -- not all pgs were active and clean.")
check_time_now = time.localtime()
time.sleep(1)
all_roles = teuthology.all_roles(ctx.cluster)
for role in teuthology.cluster_roles_of_type(all_roles, 'osd', cluster_name):
log.info("Scrubbing {osd}".format(osd=role))
_, _, id_ = teuthology.split_role(role)
# allow this to fail; in certain cases the OSD might not be up
# at this point. we will catch all pgs below.
try:
manager.raw_cluster_cmd('tell', 'osd.' + id_, 'config', 'set',
'osd_debug_deep_scrub_sleep', '0');
manager.raw_cluster_cmd('osd', 'deep-scrub', id_)
except run.CommandFailedError:
pass
prev_good = 0
gap_cnt = 0
loop = True
while loop:
stats = manager.get_pg_stats()
timez = [(stat['pgid'],stat['last_scrub_stamp']) for stat in stats]
loop = False
thiscnt = 0
re_scrub = []
for (pgid, tmval) in timez:
t = tmval[0:tmval.find('.')].replace(' ', 'T')
pgtm = time.strptime(t, '%Y-%m-%dT%H:%M:%S')
if pgtm > check_time_now:
thiscnt += 1
else:
log.info('pgid %s last_scrub_stamp %s %s <= %s', pgid, tmval, pgtm, check_time_now)
loop = True
re_scrub.append(pgid)
if thiscnt > prev_good:
prev_good = thiscnt
gap_cnt = 0
else:
gap_cnt += 1
if gap_cnt % 6 == 0:
for pgid in re_scrub:
# re-request scrub every so often in case the earlier
# request was missed. do not do it every time because
# the scrub may be in progress or not reported yet and
# we will starve progress.
manager.raw_cluster_cmd('pg', 'deep-scrub', pgid)
if gap_cnt > retries:
raise RuntimeError('Exiting scrub checking -- not all pgs scrubbed.')
if loop:
log.info('Still waiting for all pgs to be scrubbed.')
time.sleep(delays)
@contextlib.contextmanager
def run_daemon(ctx, config, type_):
"""
Run daemons for a role type. Handle the startup and termination of a a daemon.
On startup -- set coverages, cpu_profile, valgrind values for all remotes,
and a max_mds value for one mds.
On cleanup -- Stop all existing daemons of this type.
:param ctx: Context
:param config: Configuration
:param type_: Role type
"""
cluster_name = config['cluster']
log.info('Starting %s daemons in cluster %s...', type_, cluster_name)
testdir = teuthology.get_testdir(ctx)
daemons = ctx.cluster.only(teuthology.is_type(type_, cluster_name))
# check whether any daemons if this type are configured
if daemons is None:
return
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
daemon_signal = 'kill'
if config.get('coverage') or config.get('valgrind') is not None:
daemon_signal = 'term'
# create osds in order. (this only matters for pre-luminous, which might
# be jewel/hammer, which doesn't take an id_ argument to legacy 'osd create').
osd_uuids = {}
for remote, roles_for_host in daemons.remotes.items():
is_type_ = teuthology.is_type(type_, cluster_name)
for role in roles_for_host:
if not is_type_(role):
continue
_, _, id_ = teuthology.split_role(role)
if type_ == 'osd':
datadir='/var/lib/ceph/osd/{cluster}-{id}'.format(
cluster=cluster_name, id=id_)
osd_uuid = remote.read_file(
datadir + '/fsid', sudo=True).decode().strip()
osd_uuids[id_] = osd_uuid
for osd_id in range(len(osd_uuids)):
id_ = str(osd_id)
osd_uuid = osd_uuids.get(id_)
try:
remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'osd', 'new', osd_uuid, id_,
]
)
except:
# fallback to pre-luminous (jewel)
remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'osd', 'create', osd_uuid,
]
)
if config.get('add_osds_to_crush'):
remote.run(
args=[
'sudo', 'ceph', '--cluster', cluster_name,
'osd', 'crush', 'create-or-move', 'osd.' + id_,
'1.0', 'host=localhost', 'root=default',
]
)
for remote, roles_for_host in daemons.remotes.items():
is_type_ = teuthology.is_type(type_, cluster_name)
for role in roles_for_host:
if not is_type_(role):
continue
_, _, id_ = teuthology.split_role(role)
run_cmd = [
'sudo',
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'daemon-helper',
daemon_signal,
]
run_cmd_tail = [
'ceph-%s' % (type_),
'-f',
'--cluster', cluster_name,
'-i', id_]
if type_ in config.get('cpu_profile', []):
profile_path = '/var/log/ceph/profiling-logger/%s.prof' % (role)
run_cmd.extend(['env', 'CPUPROFILE=%s' % profile_path])
vc = config.get('valgrind')
if vc is not None:
valgrind_args = None
if type_ in vc:
valgrind_args = vc[type_]
if role in vc:
valgrind_args = vc[role]
exit_on_first_error = vc.get('exit_on_first_error', True)
run_cmd = get_valgrind_args(testdir, role, run_cmd, valgrind_args,
exit_on_first_error=exit_on_first_error)
run_cmd.extend(run_cmd_tail)
log_path = f'/var/log/ceph/{cluster_name}-{type_}.{id_}.log'
create_log_cmd, run_cmd = \
maybe_redirect_stderr(config, type_, run_cmd, log_path)
if create_log_cmd:
remote.sh(create_log_cmd)
# always register mgr; don't necessarily start
ctx.daemons.register_daemon(
remote, type_, id_,
cluster=cluster_name,
args=run_cmd,
logger=log.getChild(role),
stdin=run.PIPE,
wait=False
)
if type_ != 'mgr' or not config.get('skip_mgr_daemons', False):
role = cluster_name + '.' + type_
ctx.daemons.get_daemon(type_, id_, cluster_name).restart()
# kludge: run any pre-manager commands
if type_ == 'mon':
for cmd in config.get('pre-mgr-commands', []):
firstmon = teuthology.get_first_mon(ctx, config, cluster_name)
(remote,) = ctx.cluster.only(firstmon).remotes.keys()
remote.run(args=cmd.split(' '))
try:
yield
finally:
teuthology.stop_daemons_of_type(ctx, type_, cluster_name)
def healthy(ctx, config):
"""
Wait for all osd's to be up, and for the ceph health monitor to return HEALTH_OK.
:param ctx: Context
:param config: Configuration
"""
config = config if isinstance(config, dict) else dict()
cluster_name = config.get('cluster', 'ceph')
log.info('Waiting until %s daemons up and pgs clean...', cluster_name)
manager = ctx.managers[cluster_name]
try:
manager.wait_for_mgr_available(timeout=30)
except (run.CommandFailedError, AssertionError) as e:
log.info('ignoring mgr wait error, probably testing upgrade: %s', e)
manager.wait_for_all_osds_up(timeout=300)
try:
manager.flush_all_pg_stats()
except (run.CommandFailedError, Exception) as e:
log.info('ignoring flush pg stats error, probably testing upgrade: %s', e)
manager.wait_for_clean()
if config.get('wait-for-healthy', True):
log.info('Waiting until ceph cluster %s is healthy...', cluster_name)
manager.wait_until_healthy(timeout=300)
if ctx.cluster.only(teuthology.is_type('mds', cluster_name)).remotes:
# Some MDSs exist, wait for them to be healthy
for fs in Filesystem.get_all_fs(ctx):
fs.wait_for_daemons(timeout=300)
def wait_for_mon_quorum(ctx, config):
"""
Check renote ceph status until all monitors are up.
:param ctx: Context
:param config: Configuration
"""
if isinstance(config, dict):
mons = config['daemons']
cluster_name = config.get('cluster', 'ceph')
else:
assert isinstance(config, list)
mons = config
cluster_name = 'ceph'
firstmon = teuthology.get_first_mon(ctx, config, cluster_name)
(remote,) = ctx.cluster.only(firstmon).remotes.keys()
with contextutil.safe_while(sleep=10, tries=60,
action='wait for monitor quorum') as proceed:
while proceed():
quorum_status = remote.sh('sudo ceph quorum_status',
logger=log.getChild('quorum_status'))
j = json.loads(quorum_status)
q = j.get('quorum_names', [])
log.debug('Quorum: %s', q)
if sorted(q) == sorted(mons):
break
def created_pool(ctx, config):
"""
Add new pools to the dictionary of pools that the ceph-manager
knows about.
"""
for new_pool in config:
if new_pool not in ctx.managers['ceph'].pools:
ctx.managers['ceph'].pools[new_pool] = ctx.managers['ceph'].get_pool_int_property(
new_pool, 'pg_num')
@contextlib.contextmanager
def suppress_mon_health_to_clog(ctx, config):
"""
set the option, and then restore it with its original value
Note, due to the way how tasks are executed/nested, it's not suggested to
use this method as a standalone task. otherwise, it's likely that it will
restore the tweaked option at the /end/ of 'tasks' block.
"""
if config.get('mon-health-to-clog', 'true') == 'false':
cluster = config.get('cluster', 'ceph')
manager = ctx.managers[cluster]
manager.raw_cluster_command(
'config', 'set', 'mon', 'mon_health_to_clog', 'false'
)
yield
manager.raw_cluster_command(
'config', 'rm', 'mon', 'mon_health_to_clog'
)
else:
yield
@contextlib.contextmanager
def restart(ctx, config):
"""
restart ceph daemons
For example::
tasks:
- ceph.restart: [all]
For example::
tasks:
- ceph.restart: [osd.0, mon.1, mds.*]
or::
tasks:
- ceph.restart:
daemons: [osd.0, mon.1]
wait-for-healthy: false
wait-for-osds-up: true
:param ctx: Context
:param config: Configuration
"""
if config is None:
config = {}
elif isinstance(config, list):
config = {'daemons': config}
daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True)
clusters = set()
with suppress_mon_health_to_clog(ctx, config):
for role in daemons:
cluster, type_, id_ = teuthology.split_role(role)
ctx.daemons.get_daemon(type_, id_, cluster).stop()
if type_ == 'osd':
ctx.managers[cluster].mark_down_osd(id_)
ctx.daemons.get_daemon(type_, id_, cluster).restart()
clusters.add(cluster)
if config.get('wait-for-healthy', True):
for cluster in clusters:
healthy(ctx=ctx, config=dict(cluster=cluster))
if config.get('wait-for-osds-up', False):
for cluster in clusters:
ctx.managers[cluster].wait_for_all_osds_up()
if config.get('expected-failure') is not None:
log.info('Checking for expected-failure in osds logs after restart...')
expected_fail = config.get('expected-failure')
is_osd = teuthology.is_type('osd')
for role in daemons:
if not is_osd(role):
continue
(remote,) = ctx.cluster.only(role).remotes.keys()
cluster, type_, id_ = teuthology.split_role(role)
remote.run(
args = ['sudo',
'egrep', expected_fail,
'/var/log/ceph/{cluster}-{type_}.{id_}.log'.format(cluster=cluster, type_=type_, id_=id_),
])
yield
@contextlib.contextmanager
def stop(ctx, config):
"""
Stop ceph daemons
For example::
tasks:
- ceph.stop: [mds.*]
tasks:
- ceph.stop: [osd.0, osd.2]
tasks:
- ceph.stop:
daemons: [osd.0, osd.2]
"""
if config is None:
config = {}
elif isinstance(config, list):
config = {'daemons': config}
daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True)
clusters = set()
for role in daemons:
cluster, type_, id_ = teuthology.split_role(role)
ctx.daemons.get_daemon(type_, id_, cluster).stop()
clusters.add(cluster)
for cluster in clusters:
ctx.ceph[cluster].watchdog.stop()
ctx.ceph[cluster].watchdog.join()
yield
@contextlib.contextmanager
def wait_for_failure(ctx, config):
"""
Wait for a failure of a ceph daemon
For example::
tasks:
- ceph.wait_for_failure: [mds.*]
tasks:
- ceph.wait_for_failure: [osd.0, osd.2]
tasks:
- ceph.wait_for_failure:
daemons: [osd.0, osd.2]
"""
if config is None:
config = {}
elif isinstance(config, list):
config = {'daemons': config}
daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True)
for role in daemons:
cluster, type_, id_ = teuthology.split_role(role)
try:
ctx.daemons.get_daemon(type_, id_, cluster).wait()
except:
log.info('Saw expected daemon failure. Continuing.')
pass
else:
raise RuntimeError('daemon %s did not fail' % role)
yield
def validate_config(ctx, config):
"""
Perform some simple validation on task configuration.
Raises exceptions.ConfigError if an error is found.
"""
# check for osds from multiple clusters on the same host
for remote, roles_for_host in ctx.cluster.remotes.items():
last_cluster = None
last_role = None
for role in roles_for_host:
role_cluster, role_type, _ = teuthology.split_role(role)
if role_type != 'osd':
continue
if last_cluster and last_cluster != role_cluster:
msg = "Host should not have osds (%s and %s) from multiple clusters" % (
last_role, role)
raise exceptions.ConfigError(msg)
last_cluster = role_cluster
last_role = role
@contextlib.contextmanager
def task(ctx, config):
"""
Set up and tear down a Ceph cluster.
For example::
tasks:
- ceph:
- interactive:
You can also specify what branch to run::
tasks:
- ceph:
branch: foo
Or a tag::
tasks:
- ceph:
tag: v0.42.13
Or a sha1::
tasks:
- ceph:
sha1: 1376a5ab0c89780eab39ffbbe436f6a6092314ed
Or a local source dir::
tasks:
- ceph:
path: /home/sage/ceph
To capture code coverage data, use::
tasks:
- ceph:
coverage: true
To use btrfs, ext4, or xfs on the target's scratch disks, use::
tasks:
- ceph:
fs: xfs
mkfs_options: [-b,size=65536,-l,logdev=/dev/sdc1]
mount_options: [nobarrier, inode64]
To change the cephfs's default max_mds (1), use::
tasks:
- ceph:
cephfs:
max_mds: 2
To change the max_mds of a specific filesystem, use::
tasks:
- ceph:
cephfs:
max_mds: 2
fs:
- name: a
max_mds: 3
- name: b
In the above example, filesystem 'a' will have 'max_mds' 3,
and filesystme 'b' will have 'max_mds' 2.
To change the mdsmap's default session_timeout (60 seconds), use::
tasks:
- ceph:
cephfs:
session_timeout: 300
Note, this will cause the task to check the /scratch_devs file on each node
for available devices. If no such file is found, /dev/sdb will be used.
To run some daemons under valgrind, include their names
and the tool/args to use in a valgrind section::
tasks:
- ceph:
valgrind:
mds.1: --tool=memcheck
osd.1: [--tool=memcheck, --leak-check=no]
Those nodes which are using memcheck or valgrind will get
checked for bad results.
To adjust or modify config options, use::
tasks:
- ceph:
conf:
section:
key: value
For example::
tasks:
- ceph:
conf:
mds.0:
some option: value
other key: other value
client.0:
debug client: 10
debug ms: 1
By default, the cluster log is checked for errors and warnings,
and the run marked failed if any appear. You can ignore log
entries by giving a list of egrep compatible regexes, i.e.:
tasks:
- ceph:
log-ignorelist: ['foo.*bar', 'bad message']
To run multiple ceph clusters, use multiple ceph tasks, and roles
with a cluster name prefix, e.g. cluster1.client.0. Roles with no
cluster use the default cluster name, 'ceph'. OSDs from separate
clusters must be on separate hosts. Clients and non-osd daemons
from multiple clusters may be colocated. For each cluster, add an
instance of the ceph task with the cluster name specified, e.g.::
roles:
- [mon.a, osd.0, osd.1]
- [backup.mon.a, backup.osd.0, backup.osd.1]
- [client.0, backup.client.0]
tasks:
- ceph:
cluster: ceph
- ceph:
cluster: backup
:param ctx: Context
:param config: Configuration
"""
if config is None:
config = {}
assert isinstance(config, dict), \
"task ceph only supports a dictionary for configuration"
overrides = ctx.config.get('overrides', {})
teuthology.deep_merge(config, overrides.get('ceph', {}))
first_ceph_cluster = False
if not hasattr(ctx, 'daemons'):
first_ceph_cluster = True
ctx.daemons = DaemonGroup()
testdir = teuthology.get_testdir(ctx)
if config.get('coverage'):
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
log.info('Creating coverage directory...')
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
coverage_dir,
],
wait=False,
)
)
if 'cluster' not in config:
config['cluster'] = 'ceph'
validate_config(ctx, config)
subtasks = []
if first_ceph_cluster:
# these tasks handle general log setup and parsing on all hosts,
# so they should only be run once
subtasks = [
lambda: ceph_log(ctx=ctx, config=None),
lambda: ceph_crash(ctx=ctx, config=None),
lambda: valgrind_post(ctx=ctx, config=config),
]
subtasks += [
lambda: cluster(ctx=ctx, config=dict(
conf=config.get('conf', {}),
fs=config.get('fs', 'xfs'),
mkfs_options=config.get('mkfs_options', None),
mount_options=config.get('mount_options', None),
skip_mgr_daemons=config.get('skip_mgr_daemons', False),
log_ignorelist=config.get('log-ignorelist', []),
cpu_profile=set(config.get('cpu_profile', []),),
cluster=config['cluster'],
mon_bind_msgr2=config.get('mon_bind_msgr2', True),
mon_bind_addrvec=config.get('mon_bind_addrvec', True),
)),
lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
lambda: run_daemon(ctx=ctx, config=config, type_='mgr'),
lambda: crush_setup(ctx=ctx, config=config),
lambda: check_enable_crimson(ctx=ctx, config=config),
lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
lambda: setup_manager(ctx=ctx, config=config),
lambda: create_rbd_pool(ctx=ctx, config=config),
lambda: run_daemon(ctx=ctx, config=config, type_='mds'),
lambda: cephfs_setup(ctx=ctx, config=config),
lambda: watchdog_setup(ctx=ctx, config=config),
]
with contextutil.nested(*subtasks):
try:
if config.get('wait-for-healthy', True):
healthy(ctx=ctx, config=dict(cluster=config['cluster']))
yield
finally:
# set pg_num_targets back to actual pg_num, so we don't have to
# wait for pending merges (which can take a while!)
if not config.get('skip_stop_pg_num_changes', True):
ctx.managers[config['cluster']].stop_pg_num_changes()
if config.get('wait-for-scrub', True):
# wait for pgs to become active+clean in case any
# recoveries were triggered since the last health check
ctx.managers[config['cluster']].wait_for_clean()
osd_scrub_pgs(ctx, config)
# stop logging health to clog during shutdown, or else we generate
# a bunch of scary messages unrelated to our actual run.
firstmon = teuthology.get_first_mon(ctx, config, config['cluster'])
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
mon0_remote.run(
args=[
'sudo',
'ceph',
'--cluster', config['cluster'],
'config', 'set', 'global',
'mon_health_to_clog', 'false',
],
check_status=False,
)
| 68,221 | 33.683274 | 113 |
py
|
null |
ceph-main/qa/tasks/ceph_client.py
|
"""
Set up client keyring
"""
import logging
from teuthology import misc as teuthology
from teuthology.orchestra import run
log = logging.getLogger(__name__)
def create_keyring(ctx, cluster_name):
"""
Set up key ring on remote sites
"""
log.info('Setting up client nodes...')
clients = ctx.cluster.only(teuthology.is_type('client', cluster_name))
testdir = teuthology.get_testdir(ctx)
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
for remote, roles_for_host in clients.remotes.items():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'client',
cluster_name):
name = teuthology.ceph_role(role)
client_keyring = '/etc/ceph/{0}.{1}.keyring'.format(cluster_name, name)
remote.run(
args=[
'sudo',
'adjust-ulimits',
'ceph-coverage',
coverage_dir,
'ceph-authtool',
'--create-keyring',
'--gen-key',
# TODO this --name= is not really obeyed, all unknown "types" are munged to "client"
'--name={name}'.format(name=name),
client_keyring,
run.Raw('&&'),
'sudo',
'chmod',
'0644',
client_keyring,
],
)
| 1,501 | 33.930233 | 104 |
py
|
null |
ceph-main/qa/tasks/ceph_deploy.py
|
"""
Execute ceph-deploy as a task
"""
import contextlib
import os
import time
import logging
import traceback
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.config import config as teuth_config
from teuthology.task import install as install_fn
from teuthology.orchestra import run
from tasks.cephfs.filesystem import Filesystem
from teuthology.misc import wait_until_healthy
log = logging.getLogger(__name__)
@contextlib.contextmanager
def download_ceph_deploy(ctx, config):
"""
Downloads ceph-deploy from the ceph.com git mirror and (by default)
switches to the master branch. If the `ceph-deploy-branch` is specified, it
will use that instead. The `bootstrap` script is ran, with the argument
obtained from `python_version`, if specified.
"""
# use mon.a for ceph_admin
(ceph_admin,) = ctx.cluster.only('mon.a').remotes.keys()
try:
py_ver = str(config['python_version'])
except KeyError:
pass
else:
supported_versions = ['2', '3']
if py_ver not in supported_versions:
raise ValueError("python_version must be: {}, not {}".format(
' or '.join(supported_versions), py_ver
))
log.info("Installing Python")
system_type = teuthology.get_system_type(ceph_admin)
if system_type == 'rpm':
package = 'python36' if py_ver == '3' else 'python'
ctx.cluster.run(args=[
'sudo', 'yum', '-y', 'install',
package, 'python-virtualenv'
])
else:
package = 'python3' if py_ver == '3' else 'python'
ctx.cluster.run(args=[
'sudo', 'apt-get', '-y', '--force-yes', 'install',
package, 'python-virtualenv'
])
log.info('Downloading ceph-deploy...')
testdir = teuthology.get_testdir(ctx)
ceph_deploy_branch = config.get('ceph-deploy-branch', 'master')
ceph_admin.run(
args=[
'git', 'clone', '-b', ceph_deploy_branch,
teuth_config.ceph_git_base_url + 'ceph-deploy.git',
'{tdir}/ceph-deploy'.format(tdir=testdir),
],
)
args = [
'cd',
'{tdir}/ceph-deploy'.format(tdir=testdir),
run.Raw('&&'),
'./bootstrap',
]
try:
args.append(str(config['python_version']))
except KeyError:
pass
ceph_admin.run(args=args)
try:
yield
finally:
log.info('Removing ceph-deploy ...')
ceph_admin.run(
args=[
'rm',
'-rf',
'{tdir}/ceph-deploy'.format(tdir=testdir),
],
)
def is_healthy(ctx, config):
"""Wait until a Ceph cluster is healthy."""
testdir = teuthology.get_testdir(ctx)
ceph_admin = teuthology.get_first_mon(ctx, config)
(remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
max_tries = 90 # 90 tries * 10 secs --> 15 minutes
tries = 0
while True:
tries += 1
if tries >= max_tries:
msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes"
remote.run(
args=[
'cd',
'{tdir}'.format(tdir=testdir),
run.Raw('&&'),
'sudo', 'ceph',
'report',
],
)
raise RuntimeError(msg)
out = remote.sh(
[
'cd',
'{tdir}'.format(tdir=testdir),
run.Raw('&&'),
'sudo', 'ceph',
'health',
],
logger=log.getChild('health'),
)
log.info('Ceph health: %s', out.rstrip('\n'))
if out.split(None, 1)[0] == 'HEALTH_OK':
break
time.sleep(10)
def get_nodes_using_role(ctx, target_role):
"""
Extract the names of nodes that match a given role from a cluster, and modify the
cluster's service IDs to match the resulting node-based naming scheme that ceph-deploy
uses, such that if "mon.a" is on host "foo23", it'll be renamed to "mon.foo23".
"""
# Nodes containing a service of the specified role
nodes_of_interest = []
# Prepare a modified version of cluster.remotes with ceph-deploy-ized names
modified_remotes = {}
ceph_deploy_mapped = dict()
for _remote, roles_for_host in ctx.cluster.remotes.items():
modified_remotes[_remote] = []
for svc_id in roles_for_host:
if svc_id.startswith("{0}.".format(target_role)):
fqdn = str(_remote).split('@')[-1]
nodename = str(str(_remote).split('.')[0]).split('@')[1]
if target_role == 'mon':
nodes_of_interest.append(fqdn)
else:
nodes_of_interest.append(nodename)
mapped_role = "{0}.{1}".format(target_role, nodename)
modified_remotes[_remote].append(mapped_role)
# keep dict of mapped role for later use by tasks
# eg. mon.a => mon.node1
ceph_deploy_mapped[svc_id] = mapped_role
else:
modified_remotes[_remote].append(svc_id)
ctx.cluster.remotes = modified_remotes
# since the function is called multiple times for target roles
# append new mapped roles
if not hasattr(ctx.cluster, 'mapped_role'):
ctx.cluster.mapped_role = ceph_deploy_mapped
else:
ctx.cluster.mapped_role.update(ceph_deploy_mapped)
log.info("New mapped_role={mr}".format(mr=ctx.cluster.mapped_role))
return nodes_of_interest
def get_dev_for_osd(ctx, config):
"""Get a list of all osd device names."""
osd_devs = []
for remote, roles_for_host in ctx.cluster.remotes.items():
host = remote.name.split('@')[-1]
shortname = host.split('.')[0]
devs = teuthology.get_scratch_devices(remote)
num_osd_per_host = list(
teuthology.roles_of_type(
roles_for_host, 'osd'))
num_osds = len(num_osd_per_host)
if config.get('separate_journal_disk') is not None:
num_devs_reqd = 2 * num_osds
assert num_devs_reqd <= len(
devs), 'fewer data and journal disks than required ' + shortname
for dindex in range(0, num_devs_reqd, 2):
jd_index = dindex + 1
dev_short = devs[dindex].split('/')[-1]
jdev_short = devs[jd_index].split('/')[-1]
osd_devs.append((shortname, dev_short, jdev_short))
else:
assert num_osds <= len(devs), 'fewer disks than osds ' + shortname
for dev in devs[:num_osds]:
dev_short = dev.split('/')[-1]
osd_devs.append((shortname, dev_short))
return osd_devs
def get_all_nodes(ctx, config):
"""Return a string of node names separated by blanks"""
nodelist = []
for t, k in ctx.config['targets'].items():
host = t.split('@')[-1]
simple_host = host.split('.')[0]
nodelist.append(simple_host)
nodelist = " ".join(nodelist)
return nodelist
@contextlib.contextmanager
def build_ceph_cluster(ctx, config):
"""Build a ceph cluster"""
# Expect to find ceph_admin on the first mon by ID, same place that the download task
# puts it. Remember this here, because subsequently IDs will change from those in
# the test config to those that ceph-deploy invents.
(ceph_admin,) = ctx.cluster.only('mon.a').remotes.keys()
def execute_ceph_deploy(cmd):
"""Remotely execute a ceph_deploy command"""
return ceph_admin.run(
args=[
'cd',
'{tdir}/ceph-deploy'.format(tdir=testdir),
run.Raw('&&'),
run.Raw(cmd),
],
check_status=False,
).exitstatus
def ceph_disk_osd_create(ctx, config):
node_dev_list = get_dev_for_osd(ctx, config)
no_of_osds = 0
for d in node_dev_list:
node = d[0]
for disk in d[1:]:
zap = './ceph-deploy disk zap ' + node + ' ' + disk
estatus = execute_ceph_deploy(zap)
if estatus != 0:
raise RuntimeError("ceph-deploy: Failed to zap osds")
osd_create_cmd = './ceph-deploy osd create '
# first check for filestore, default is bluestore with ceph-deploy
if config.get('filestore') is not None:
osd_create_cmd += '--filestore '
elif config.get('bluestore') is not None:
osd_create_cmd += '--bluestore '
if config.get('dmcrypt') is not None:
osd_create_cmd += '--dmcrypt '
osd_create_cmd += ":".join(d)
estatus_osd = execute_ceph_deploy(osd_create_cmd)
if estatus_osd == 0:
log.info('successfully created osd')
no_of_osds += 1
else:
raise RuntimeError("ceph-deploy: Failed to create osds")
return no_of_osds
def ceph_volume_osd_create(ctx, config):
osds = ctx.cluster.only(teuthology.is_type('osd'))
no_of_osds = 0
for remote in osds.remotes.keys():
# all devs should be lvm
osd_create_cmd = './ceph-deploy osd create --debug ' + remote.shortname + ' '
# default is bluestore so we just need config item for filestore
roles = ctx.cluster.remotes[remote]
dev_needed = len([role for role in roles
if role.startswith('osd')])
all_devs = teuthology.get_scratch_devices(remote)
log.info("node={n}, need_devs={d}, available={a}".format(
n=remote.shortname,
d=dev_needed,
a=all_devs,
))
devs = all_devs[0:dev_needed]
# rest of the devices can be used for journal if required
jdevs = dev_needed
for device in devs:
device_split = device.split('/')
lv_device = device_split[-2] + '/' + device_split[-1]
if config.get('filestore') is not None:
osd_create_cmd += '--filestore --data ' + lv_device + ' '
# filestore with ceph-volume also needs journal disk
try:
jdevice = all_devs.pop(jdevs)
except IndexError:
raise RuntimeError("No device available for \
journal configuration")
jdevice_split = jdevice.split('/')
j_lv = jdevice_split[-2] + '/' + jdevice_split[-1]
osd_create_cmd += '--journal ' + j_lv
else:
osd_create_cmd += ' --data ' + lv_device
estatus_osd = execute_ceph_deploy(osd_create_cmd)
if estatus_osd == 0:
log.info('successfully created osd')
no_of_osds += 1
else:
raise RuntimeError("ceph-deploy: Failed to create osds")
return no_of_osds
try:
log.info('Building ceph cluster using ceph-deploy...')
testdir = teuthology.get_testdir(ctx)
ceph_branch = None
if config.get('branch') is not None:
cbranch = config.get('branch')
for var, val in cbranch.items():
ceph_branch = '--{var}={val}'.format(var=var, val=val)
all_nodes = get_all_nodes(ctx, config)
mds_nodes = get_nodes_using_role(ctx, 'mds')
mds_nodes = " ".join(mds_nodes)
mon_node = get_nodes_using_role(ctx, 'mon')
mon_nodes = " ".join(mon_node)
# skip mgr based on config item
# this is needed when test uses latest code to install old ceph
# versions
skip_mgr = config.get('skip-mgr', False)
if not skip_mgr:
mgr_nodes = get_nodes_using_role(ctx, 'mgr')
mgr_nodes = " ".join(mgr_nodes)
new_mon = './ceph-deploy new' + " " + mon_nodes
if not skip_mgr:
mgr_create = './ceph-deploy mgr create' + " " + mgr_nodes
mon_hostname = mon_nodes.split(' ')[0]
mon_hostname = str(mon_hostname)
gather_keys = './ceph-deploy gatherkeys' + " " + mon_hostname
deploy_mds = './ceph-deploy mds create' + " " + mds_nodes
if mon_nodes is None:
raise RuntimeError("no monitor nodes in the config file")
estatus_new = execute_ceph_deploy(new_mon)
if estatus_new != 0:
raise RuntimeError("ceph-deploy: new command failed")
log.info('adding config inputs...')
testdir = teuthology.get_testdir(ctx)
conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
if config.get('conf') is not None:
confp = config.get('conf')
for section, keys in confp.items():
lines = '[{section}]\n'.format(section=section)
ceph_admin.sudo_write_file(conf_path, lines, append=True)
for key, value in keys.items():
log.info("[%s] %s = %s" % (section, key, value))
lines = '{key} = {value}\n'.format(key=key, value=value)
ceph_admin.sudo_write_file(conf_path, lines, append=True)
# install ceph
dev_branch = ctx.config['branch']
branch = '--dev={branch}'.format(branch=dev_branch)
if ceph_branch:
option = ceph_branch
else:
option = branch
install_nodes = './ceph-deploy install ' + option + " " + all_nodes
estatus_install = execute_ceph_deploy(install_nodes)
if estatus_install != 0:
raise RuntimeError("ceph-deploy: Failed to install ceph")
# install ceph-test package too
install_nodes2 = './ceph-deploy install --tests ' + option + \
" " + all_nodes
estatus_install = execute_ceph_deploy(install_nodes2)
if estatus_install != 0:
raise RuntimeError("ceph-deploy: Failed to install ceph-test")
mon_create_nodes = './ceph-deploy mon create-initial'
# If the following fails, it is OK, it might just be that the monitors
# are taking way more than a minute/monitor to form quorum, so lets
# try the next block which will wait up to 15 minutes to gatherkeys.
execute_ceph_deploy(mon_create_nodes)
estatus_gather = execute_ceph_deploy(gather_keys)
if estatus_gather != 0:
raise RuntimeError("ceph-deploy: Failed during gather keys")
# install admin key on mons (ceph-create-keys doesn't do this any more)
mons = ctx.cluster.only(teuthology.is_type('mon'))
for remote in mons.remotes.keys():
execute_ceph_deploy('./ceph-deploy admin ' + remote.shortname)
# create osd's
if config.get('use-ceph-volume', False):
no_of_osds = ceph_volume_osd_create(ctx, config)
else:
# this method will only work with ceph-deploy v1.5.39 or older
no_of_osds = ceph_disk_osd_create(ctx, config)
if not skip_mgr:
execute_ceph_deploy(mgr_create)
if mds_nodes:
estatus_mds = execute_ceph_deploy(deploy_mds)
if estatus_mds != 0:
raise RuntimeError("ceph-deploy: Failed to deploy mds")
if config.get('test_mon_destroy') is not None:
for d in range(1, len(mon_node)):
mon_destroy_nodes = './ceph-deploy mon destroy' + \
" " + mon_node[d]
estatus_mon_d = execute_ceph_deploy(mon_destroy_nodes)
if estatus_mon_d != 0:
raise RuntimeError("ceph-deploy: Failed to delete monitor")
if config.get('wait-for-healthy', True) and no_of_osds >= 2:
is_healthy(ctx=ctx, config=None)
log.info('Setting up client nodes...')
conf_path = '/etc/ceph/ceph.conf'
admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
first_mon = teuthology.get_first_mon(ctx, config)
(mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
conf_data = mon0_remote.read_file(conf_path, sudo=True)
admin_keyring = mon0_remote.read_file(admin_keyring_path, sudo=True)
clients = ctx.cluster.only(teuthology.is_type('client'))
for remote, roles_for_host in clients.remotes.items():
for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
client_keyring = \
'/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
mon0_remote.run(
args=[
'cd',
'{tdir}'.format(tdir=testdir),
run.Raw('&&'),
'sudo', 'bash', '-c',
run.Raw('"'), 'ceph',
'auth',
'get-or-create',
'client.{id}'.format(id=id_),
'mds', 'allow',
'mon', 'allow *',
'osd', 'allow *',
run.Raw('>'),
client_keyring,
run.Raw('"'),
],
)
key_data = mon0_remote.read_file(
path=client_keyring,
sudo=True,
)
remote.sudo_write_file(
path=client_keyring,
data=key_data,
mode='0644'
)
remote.sudo_write_file(
path=admin_keyring_path,
data=admin_keyring,
mode='0644'
)
remote.sudo_write_file(
path=conf_path,
data=conf_data,
mode='0644'
)
if mds_nodes:
log.info('Configuring CephFS...')
Filesystem(ctx, create=True)
elif not config.get('only_mon'):
raise RuntimeError(
"The cluster is NOT operational due to insufficient OSDs")
# create rbd pool
ceph_admin.run(
args=[
'sudo', 'ceph', '--cluster', 'ceph',
'osd', 'pool', 'create', 'rbd', '128', '128'],
check_status=False)
ceph_admin.run(
args=[
'sudo', 'ceph', '--cluster', 'ceph',
'osd', 'pool', 'application', 'enable',
'rbd', 'rbd', '--yes-i-really-mean-it'
],
check_status=False)
yield
except Exception:
log.info(
"Error encountered, logging exception before tearing down ceph-deploy")
log.info(traceback.format_exc())
raise
finally:
if config.get('keep_running'):
return
log.info('Stopping ceph...')
ctx.cluster.run(args=['sudo', 'systemctl', 'stop', 'ceph.target'],
check_status=False)
time.sleep(4)
# and now just check for the processes themselves, as if upstart/sysvinit
# is lying to us. Ignore errors if the grep fails
ctx.cluster.run(args=['sudo', 'ps', 'aux', run.Raw('|'),
'grep', '-v', 'grep', run.Raw('|'),
'grep', 'ceph'], check_status=False)
ctx.cluster.run(args=['sudo', 'systemctl', run.Raw('|'),
'grep', 'ceph'], check_status=False)
if ctx.archive is not None:
# archive mon data, too
log.info('Archiving mon data...')
path = os.path.join(ctx.archive, 'data')
os.makedirs(path)
mons = ctx.cluster.only(teuthology.is_type('mon'))
for remote, roles in mons.remotes.items():
for role in roles:
if role.startswith('mon.'):
teuthology.pull_directory_tarball(
remote,
'/var/lib/ceph/mon',
path + '/' + role + '.tgz')
log.info('Compressing logs...')
run.wait(
ctx.cluster.run(
args=[
'time',
'sudo',
'find',
'/var/log/ceph',
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'--max-args=1',
'--max-procs=0',
'--verbose',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'-5',
'--verbose',
'--',
],
wait=False,
),
)
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
os.makedirs(path)
for remote in ctx.cluster.remotes.keys():
sub = os.path.join(path, remote.shortname)
os.makedirs(sub)
teuthology.pull_directory(remote, '/var/log/ceph',
os.path.join(sub, 'log'))
# Prevent these from being undefined if the try block fails
all_nodes = get_all_nodes(ctx, config)
purge_nodes = './ceph-deploy purge' + " " + all_nodes
purgedata_nodes = './ceph-deploy purgedata' + " " + all_nodes
log.info('Purging package...')
execute_ceph_deploy(purge_nodes)
log.info('Purging data...')
execute_ceph_deploy(purgedata_nodes)
@contextlib.contextmanager
def cli_test(ctx, config):
"""
ceph-deploy cli to exercise most commonly use cli's and ensure
all commands works and also startup the init system.
"""
log.info('Ceph-deploy Test')
if config is None:
config = {}
test_branch = ''
conf_dir = teuthology.get_testdir(ctx) + "/cdtest"
def execute_cdeploy(admin, cmd, path):
"""Execute ceph-deploy commands """
"""Either use git path or repo path """
args = ['cd', conf_dir, run.Raw(';')]
if path:
args.append('{path}/ceph-deploy/ceph-deploy'.format(path=path))
else:
args.append('ceph-deploy')
args.append(run.Raw(cmd))
ec = admin.run(args=args, check_status=False).exitstatus
if ec != 0:
raise RuntimeError(
"failed during ceph-deploy cmd: {cmd} , ec={ec}".format(cmd=cmd, ec=ec))
if config.get('rhbuild'):
path = None
else:
path = teuthology.get_testdir(ctx)
# test on branch from config eg: wip-* , master or next etc
# packages for all distro's should exist for wip*
if ctx.config.get('branch'):
branch = ctx.config.get('branch')
test_branch = ' --dev={branch} '.format(branch=branch)
mons = ctx.cluster.only(teuthology.is_type('mon'))
for node, role in mons.remotes.items():
admin = node
admin.run(args=['mkdir', conf_dir], check_status=False)
nodename = admin.shortname
system_type = teuthology.get_system_type(admin)
if config.get('rhbuild'):
admin.run(args=['sudo', 'yum', 'install', 'ceph-deploy', '-y'])
log.info('system type is %s', system_type)
osds = ctx.cluster.only(teuthology.is_type('osd'))
for remote, roles in osds.remotes.items():
devs = teuthology.get_scratch_devices(remote)
log.info("roles %s", roles)
if (len(devs) < 3):
log.error(
'Test needs minimum of 3 devices, only found %s',
str(devs))
raise RuntimeError("Needs minimum of 3 devices ")
conf_path = '{conf_dir}/ceph.conf'.format(conf_dir=conf_dir)
new_cmd = 'new ' + nodename
execute_cdeploy(admin, new_cmd, path)
if config.get('conf') is not None:
confp = config.get('conf')
for section, keys in confp.items():
lines = '[{section}]\n'.format(section=section)
admin.sudo_write_file(conf_path, lines, append=True)
for key, value in keys.items():
log.info("[%s] %s = %s" % (section, key, value))
lines = '{key} = {value}\n'.format(key=key, value=value)
admin.sudo_write_file(conf_path, lines, append=True)
new_mon_install = 'install {branch} --mon '.format(
branch=test_branch) + nodename
new_mgr_install = 'install {branch} --mgr '.format(
branch=test_branch) + nodename
new_osd_install = 'install {branch} --osd '.format(
branch=test_branch) + nodename
new_admin = 'install {branch} --cli '.format(branch=test_branch) + nodename
create_initial = 'mon create-initial '
mgr_create = 'mgr create ' + nodename
# either use create-keys or push command
push_keys = 'admin ' + nodename
execute_cdeploy(admin, new_mon_install, path)
execute_cdeploy(admin, new_mgr_install, path)
execute_cdeploy(admin, new_osd_install, path)
execute_cdeploy(admin, new_admin, path)
execute_cdeploy(admin, create_initial, path)
execute_cdeploy(admin, mgr_create, path)
execute_cdeploy(admin, push_keys, path)
for i in range(3):
zap_disk = 'disk zap ' + "{n}:{d}".format(n=nodename, d=devs[i])
prepare = 'osd prepare ' + "{n}:{d}".format(n=nodename, d=devs[i])
execute_cdeploy(admin, zap_disk, path)
execute_cdeploy(admin, prepare, path)
log.info("list files for debugging purpose to check file permissions")
admin.run(args=['ls', run.Raw('-lt'), conf_dir])
remote.run(args=['sudo', 'ceph', '-s'], check_status=False)
out = remote.sh('sudo ceph health')
log.info('Ceph health: %s', out.rstrip('\n'))
log.info("Waiting for cluster to become healthy")
with contextutil.safe_while(sleep=10, tries=6,
action='check health') as proceed:
while proceed():
out = remote.sh('sudo ceph health')
if (out.split(None, 1)[0] == 'HEALTH_OK'):
break
rgw_install = 'install {branch} --rgw {node}'.format(
branch=test_branch,
node=nodename,
)
rgw_create = 'rgw create ' + nodename
execute_cdeploy(admin, rgw_install, path)
execute_cdeploy(admin, rgw_create, path)
log.info('All ceph-deploy cli tests passed')
try:
yield
finally:
log.info("cleaning up")
ctx.cluster.run(args=['sudo', 'systemctl', 'stop', 'ceph.target'],
check_status=False)
time.sleep(4)
for i in range(3):
umount_dev = "{d}1".format(d=devs[i])
remote.run(args=['sudo', 'umount', run.Raw(umount_dev)])
cmd = 'purge ' + nodename
execute_cdeploy(admin, cmd, path)
cmd = 'purgedata ' + nodename
execute_cdeploy(admin, cmd, path)
log.info("Removing temporary dir")
admin.run(
args=[
'rm',
run.Raw('-rf'),
run.Raw(conf_dir)],
check_status=False)
if config.get('rhbuild'):
admin.run(args=['sudo', 'yum', 'remove', 'ceph-deploy', '-y'])
@contextlib.contextmanager
def single_node_test(ctx, config):
"""
- ceph-deploy.single_node_test: null
#rhbuild testing
- ceph-deploy.single_node_test:
rhbuild: 1.2.3
"""
log.info("Testing ceph-deploy on single node")
if config is None:
config = {}
overrides = ctx.config.get('overrides', {})
teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
if config.get('rhbuild'):
log.info("RH Build, Skip Download")
with contextutil.nested(
lambda: cli_test(ctx=ctx, config=config),
):
yield
else:
with contextutil.nested(
lambda: install_fn.ship_utilities(ctx=ctx, config=None),
lambda: download_ceph_deploy(ctx=ctx, config=config),
lambda: cli_test(ctx=ctx, config=config),
):
yield
@contextlib.contextmanager
def upgrade(ctx, config):
"""
Upgrade using ceph-deploy
eg:
ceph-deploy.upgrade:
# to upgrade to specific branch, use
branch:
stable: jewel
# to setup mgr node, use
setup-mgr-node: True
# to wait for cluster to be healthy after all upgrade, use
wait-for-healthy: True
role: (upgrades the below roles serially)
mon.a
mon.b
osd.0
"""
roles = config.get('roles')
# get the roles that are mapped as per ceph-deploy
# roles are mapped for mon/mds eg: mon.a => mon.host_short_name
mapped_role = ctx.cluster.mapped_role
log.info("roles={r}, mapped_roles={mr}".format(r=roles, mr=mapped_role))
if config.get('branch'):
branch = config.get('branch')
(var, val) = branch.items()[0]
ceph_branch = '--{var}={val}'.format(var=var, val=val)
else:
# default to wip-branch under test
dev_branch = ctx.config['branch']
ceph_branch = '--dev={branch}'.format(branch=dev_branch)
# get the node used for initial deployment which is mon.a
mon_a = mapped_role.get('mon.a')
(ceph_admin,) = ctx.cluster.only(mon_a).remotes.keys()
testdir = teuthology.get_testdir(ctx)
cmd = './ceph-deploy install ' + ceph_branch
for role in roles:
# check if this role is mapped (mon or mds)
if mapped_role.get(role):
role = mapped_role.get(role)
remotes_and_roles = ctx.cluster.only(role).remotes
for remote, roles in remotes_and_roles.items():
nodename = remote.shortname
cmd = cmd + ' ' + nodename
log.info("Upgrading ceph on %s", nodename)
ceph_admin.run(
args=[
'cd',
'{tdir}/ceph-deploy'.format(tdir=testdir),
run.Raw('&&'),
run.Raw(cmd),
],
)
# restart all ceph services, ideally upgrade should but it does not
remote.run(
args=[
'sudo', 'systemctl', 'restart', 'ceph.target'
]
)
ceph_admin.run(args=['sudo', 'ceph', '-s'])
# workaround for http://tracker.ceph.com/issues/20950
# write the correct mgr key to disk
if config.get('setup-mgr-node', None):
mons = ctx.cluster.only(teuthology.is_type('mon'))
for remote, roles in mons.remotes.items():
remote.run(
args=[
run.Raw('sudo ceph auth get client.bootstrap-mgr'),
run.Raw('|'),
run.Raw('sudo tee'),
run.Raw('/var/lib/ceph/bootstrap-mgr/ceph.keyring')
]
)
if config.get('setup-mgr-node', None):
mgr_nodes = get_nodes_using_role(ctx, 'mgr')
mgr_nodes = " ".join(mgr_nodes)
mgr_install = './ceph-deploy install --mgr ' + ceph_branch + " " + mgr_nodes
mgr_create = './ceph-deploy mgr create' + " " + mgr_nodes
# install mgr
ceph_admin.run(
args=[
'cd',
'{tdir}/ceph-deploy'.format(tdir=testdir),
run.Raw('&&'),
run.Raw(mgr_install),
],
)
# create mgr
ceph_admin.run(
args=[
'cd',
'{tdir}/ceph-deploy'.format(tdir=testdir),
run.Raw('&&'),
run.Raw(mgr_create),
],
)
ceph_admin.run(args=['sudo', 'ceph', '-s'])
if config.get('wait-for-healthy', None):
wait_until_healthy(ctx, ceph_admin, use_sudo=True)
yield
@contextlib.contextmanager
def task(ctx, config):
"""
Set up and tear down a Ceph cluster.
For example::
tasks:
- install:
extras: yes
- ssh_keys:
- ceph-deploy:
branch:
stable: bobtail
mon_initial_members: 1
ceph-deploy-branch: my-ceph-deploy-branch
only_mon: true
keep_running: true
# either choose bluestore or filestore, default is bluestore
bluestore: True
# or
filestore: True
# skip install of mgr for old release using below flag
skip-mgr: True ( default is False )
# to use ceph-volume instead of ceph-disk
# ceph-disk can only be used with old ceph-deploy release from pypi
use-ceph-volume: true
tasks:
- install:
extras: yes
- ssh_keys:
- ceph-deploy:
branch:
dev: master
conf:
mon:
debug mon = 20
tasks:
- install:
extras: yes
- ssh_keys:
- ceph-deploy:
branch:
testing:
dmcrypt: yes
separate_journal_disk: yes
"""
if config is None:
config = {}
assert isinstance(config, dict), \
"task ceph-deploy only supports a dictionary for configuration"
overrides = ctx.config.get('overrides', {})
teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
if config.get('branch') is not None:
assert isinstance(
config['branch'], dict), 'branch must be a dictionary'
log.info('task ceph-deploy with config ' + str(config))
# we need to use 1.5.39-stable for testing jewel or master branch with
# ceph-disk
if config.get('use-ceph-volume', False) is False:
# check we are not testing specific branch
if config.get('ceph-deploy-branch', False) is False:
config['ceph-deploy-branch'] = '1.5.39-stable'
with contextutil.nested(
lambda: install_fn.ship_utilities(ctx=ctx, config=None),
lambda: download_ceph_deploy(ctx=ctx, config=config),
lambda: build_ceph_cluster(ctx=ctx, config=config),
):
yield
| 34,996 | 36.916576 | 90 |
py
|
null |
ceph-main/qa/tasks/ceph_fuse.py
|
"""
Ceph FUSE client task
"""
import contextlib
import logging
from teuthology import misc
from tasks.cephfs.fuse_mount import FuseMount
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Mount/unmount a ``ceph-fuse`` client.
The config is optional and defaults to mounting on all clients. If
a config is given, it is expected to be a list of clients to do
this operation on. This lets you e.g. set up one client with
``ceph-fuse`` and another with ``kclient``.
``brxnet`` should be a Private IPv4 Address range, default range is
[192.168.0.0/16]
Example that mounts all clients::
tasks:
- ceph:
- ceph-fuse:
- interactive:
- brxnet: [192.168.0.0/16]
Example that uses both ``kclient` and ``ceph-fuse``::
tasks:
- ceph:
- ceph-fuse: [client.0]
- kclient: [client.1]
- interactive:
Example that enables valgrind:
tasks:
- ceph:
- ceph-fuse:
client.0:
valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
- interactive:
Example that stops an already-mounted client:
::
tasks:
- ceph:
- ceph-fuse: [client.0]
- ... do something that requires the FS mounted ...
- ceph-fuse:
client.0:
mounted: false
- ... do something that requires the FS unmounted ...
Example that adds more generous wait time for mount (for virtual machines):
tasks:
- ceph:
- ceph-fuse:
client.0:
mount_wait: 60 # default is 0, do not wait before checking /sys/
mount_timeout: 120 # default is 30, give up if /sys/ is not populated
- interactive:
Example that creates and mounts a subvol:
overrides:
ceph:
subvols:
create: 2
subvol_options: "--namespace-isolated --size 25000000000"
ceph-fuse:
client.0:
mount_subvol_num: 0
kclient:
client.1:
mount_subvol_num: 1
:param ctx: Context
:param config: Configuration
"""
log.info('Running ceph_fuse task...')
if config is None:
ids = misc.all_roles_of_type(ctx.cluster, 'client')
client_roles = [f'client.{id_}' for id_ in ids]
config = dict([r, dict()] for r in client_roles)
elif isinstance(config, list):
client_roles = config
config = dict([r, dict()] for r in client_roles)
elif isinstance(config, dict):
client_roles = filter(lambda x: 'client.' in x, config.keys())
else:
raise ValueError(f"Invalid config object: {config} ({config.__class__})")
log.info(f"config is {config}")
clients = list(misc.get_clients(ctx=ctx, roles=client_roles))
testdir = misc.get_testdir(ctx)
all_mounts = getattr(ctx, 'mounts', {})
mounted_by_me = {}
skipped = {}
remotes = set()
brxnet = config.get("brxnet", None)
# Construct any new FuseMount instances
overrides = ctx.config.get('overrides', {}).get('ceph-fuse', {})
top_overrides = dict(filter(lambda x: 'client.' not in x[0], overrides.items()))
for id_, remote in clients:
entity = f"client.{id_}"
client_config = config.get(entity)
if client_config is None:
client_config = {}
# top level overrides
misc.deep_merge(client_config, top_overrides)
# mount specific overrides
client_config_overrides = overrides.get(entity)
misc.deep_merge(client_config, client_config_overrides)
log.info(f"{entity} config is {client_config}")
remotes.add(remote)
auth_id = client_config.get("auth_id", id_)
cephfs_name = client_config.get("cephfs_name")
skip = client_config.get("skip", False)
if skip:
skipped[id_] = skip
continue
if id_ not in all_mounts:
fuse_mount = FuseMount(ctx=ctx, client_config=client_config,
test_dir=testdir, client_id=auth_id,
client_remote=remote, brxnet=brxnet,
cephfs_name=cephfs_name)
all_mounts[id_] = fuse_mount
else:
# Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client
assert isinstance(all_mounts[id_], FuseMount)
if not config.get("disabled", False) and client_config.get('mounted', True):
mounted_by_me[id_] = {"config": client_config, "mount": all_mounts[id_]}
ctx.mounts = all_mounts
# Umount any pre-existing clients that we have not been asked to mount
for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()) - set(skipped.keys()):
mount = all_mounts[client_id]
if mount.is_mounted():
mount.umount_wait()
for remote in remotes:
FuseMount.cleanup_stale_netnses_and_bridge(remote)
# Mount any clients we have been asked to (default to mount all)
log.info('Mounting ceph-fuse clients...')
for info in mounted_by_me.values():
config = info["config"]
mount_x = info['mount']
mount_x.mount(mntopts=config.get('mntopts', []), mntargs=config.get('mntargs', []))
for info in mounted_by_me.values():
info["mount"].wait_until_mounted()
try:
yield all_mounts
finally:
log.info('Unmounting ceph-fuse clients...')
for info in mounted_by_me.values():
# Conditional because an inner context might have umounted it
mount = info["mount"]
if mount.is_mounted():
mount.umount_wait()
for remote in remotes:
FuseMount.cleanup_stale_netnses_and_bridge(remote)
| 5,946 | 30.973118 | 109 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.