Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/qa/suites/fs/upgrade/featureful_client/old_client/overrides/ignorelist_health.yaml
.qa/cephfs/overrides/ignorelist_health.yaml
43
43
43
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/old_client/overrides/ignorelist_wrongly_marked_down.yaml
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
56
56
56
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/old_client/overrides/pg-warn.yaml
overrides: ceph: conf: global: mon pg warn min per osd: 0
78
12.166667
34
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/old_client/overrides/multimds/no.yaml
overrides: ceph: cephfs: max_mds: 1
48
8.8
16
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/old_client/overrides/multimds/yes.yaml
overrides: ceph: cephfs: max_mds: 2
48
8.8
16
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/old_client/tasks/0-octopus.yaml
meta: - desc: | install ceph/octopus latest tasks: - install: branch: octopus exclude_packages: - librados3 - ceph-mgr-dashboard - ceph-mgr-diskprediction-local - ceph-mgr-rook - ceph-mgr-cephadm - cephadm - ceph-volume extra_packages: ['librados2'] - print: "**** done installing octopus" - ceph: log-ignorelist: - overall HEALTH_ - \(FS_ - \(MDS_ - \(OSD_ - \(MON_DOWN\) - \(CACHE_POOL_ - \(POOL_ - \(MGR_DOWN\) - \(PG_ - \(SMALLER_PGP_NUM\) - Monitor daemon marked osd - Behind on trimming - Manager daemon conf: global: mon warn on pool no app: false ms bind msgr2: false - exec: osd.0: - ceph osd set-require-min-compat-client octopus - print: "**** done ceph"
841
20.05
54
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/old_client/tasks/1-client.yaml
tasks: - ceph-fuse: - print: "**** done octopus client" - workunit: clients: all: - suites/fsstress.sh - print: "**** done fsstress"
149
15.666667
35
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/old_client/tasks/2-upgrade.yaml
overrides: ceph: log-ignorelist: - scrub mismatch - ScrubResult - wrongly marked - \(POOL_APP_NOT_ENABLED\) - \(SLOW_OPS\) - overall HEALTH_ - \(MON_MSGR2_NOT_ENABLED\) - slow request conf: global: bluestore warn on legacy statfs: false bluestore warn on no per pool omap: false mon: mon warn on osd down out interval zero: false tasks: - mds_pre_upgrade: - print: "**** done mds pre-upgrade sequence" - install.upgrade: # upgrade the single cluster node, which is running all the mon/mds/osd/mgr daemons mon.a: branch: quincy - print: "**** done install.upgrade the host" - ceph.restart: daemons: [mon.*, mgr.*] mon-health-to-clog: false wait-for-healthy: false - ceph.healthy: - ceph.restart: daemons: [osd.*] wait-for-healthy: false wait-for-osds-up: true - ceph.stop: [mds.*] - ceph.restart: daemons: [mds.*] wait-for-healthy: false wait-for-osds-up: true - exec: mon.a: - ceph osd dump -f json-pretty - ceph versions - ceph osd require-osd-release quincy - for f in `ceph osd pool ls` ; do ceph osd pool set $f pg_autoscale_mode off ; done - ceph.healthy: - print: "**** done ceph.restart"
1,244
24.408163
88
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/no.yaml
tasks: - workunit: clients: all: - suites/fsstress.sh - print: "**** done fsstress"
100
13.428571
29
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/quincy.yaml
overrides: ceph: log-ignorelist: - missing required features tasks: - exec: mon.a: - ceph fs dump --format=json-pretty - ceph fs required_client_features cephfs add metric_collect - sleep: duration: 5 - fs.clients_evicted:
255
18.692308
66
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/bluestore-bitmap.yaml
../../../../../cephfs/objectstore-ec/bluestore-bitmap.yaml
58
58
58
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/centos_latest.yaml
.qa/distros/supported/centos_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/clusters/1-mds-2-client-micro.yaml
.qa/cephfs/clusters/1-mds-2-client-micro.yaml
45
45
45
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/ignorelist_health.yaml
.qa/cephfs/overrides/ignorelist_health.yaml
43
43
43
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/ignorelist_wrongly_marked_down.yaml
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
56
56
56
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/pg-warn.yaml
overrides: ceph: conf: global: mon pg warn min per osd: 0
78
12.166667
34
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/multimds/no.yaml
overrides: ceph: cephfs: max_mds: 1
48
8.8
16
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/multimds/yes.yaml
overrides: ceph: cephfs: max_mds: 2
48
8.8
16
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/0-octopus.yaml
meta: - desc: | install ceph/octopus latest tasks: - install: branch: octopus exclude_packages: - librados3 - ceph-mgr-dashboard - ceph-mgr-diskprediction-local - ceph-mgr-rook - ceph-mgr-cephadm - cephadm - ceph-volume extra_packages: ['librados2'] - print: "**** done installing octopus" - ceph: log-ignorelist: - overall HEALTH_ - \(FS_ - \(MDS_ - \(OSD_ - \(MON_DOWN\) - \(CACHE_POOL_ - \(POOL_ - \(MGR_DOWN\) - \(PG_ - \(SMALLER_PGP_NUM\) - Monitor daemon marked osd - Behind on trimming - Manager daemon conf: global: mon warn on pool no app: false ms bind msgr2: false - exec: osd.0: - ceph osd set-require-min-compat-client octopus - print: "**** done ceph"
841
20.05
54
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/1-client.yaml
nuke-on-error: false overrides: nuke-on-error: false tasks: - ceph-fuse: - print: "**** done octopus client" #- workunit: # clients: # all: # - suites/fsstress.sh - print: "**** done fsstress"
208
16.416667
35
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/2-upgrade.yaml
overrides: ceph: log-ignorelist: - scrub mismatch - ScrubResult - wrongly marked - \(POOL_APP_NOT_ENABLED\) - \(SLOW_OPS\) - overall HEALTH_ - \(MON_MSGR2_NOT_ENABLED\) - slow request conf: global: bluestore warn on legacy statfs: false bluestore warn on no per pool omap: false mon: mon warn on osd down out interval zero: false tasks: - mds_pre_upgrade: - print: "**** done mds pre-upgrade sequence" - install.upgrade: # upgrade the single cluster node, which is running all the mon/mds/osd/mgr daemons mon.a: branch: quincy - print: "**** done install.upgrade the host" - ceph.restart: daemons: [mon.*, mgr.*] mon-health-to-clog: false wait-for-healthy: false - ceph.healthy: - ceph.restart: daemons: [osd.*] wait-for-healthy: false wait-for-osds-up: true - ceph.stop: [mds.*] - ceph.restart: daemons: [mds.*] wait-for-healthy: false wait-for-osds-up: true - exec: mon.a: - ceph versions - ceph osd dump -f json-pretty - ceph osd require-osd-release quincy - for f in `ceph osd pool ls` ; do ceph osd pool set $f pg_autoscale_mode off ; done - ceph.healthy: - print: "**** done ceph.restart"
1,244
24.408163
88
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/3-client-upgrade.yaml
tasks: - install.upgrade: client.0: branch: quincy - print: "**** done install.upgrade on client.0" - ceph-fuse: client.0: mounted: false client.1: skip: true - ceph-fuse: client.0: client.1: skip: true - print: "**** done remount client"
283
16.75
48
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml
overrides: ceph: log-ignorelist: - missing required features tasks: - exec: mon.a: - ceph fs dump --format=json-pretty - ceph fs required_client_features cephfs add metric_collect - sleep: duration: 5 # client.0 is upgraded and client.1 is evicted by the MDS due to missing # feature compat set - fs.clients_evicted: clients: client.0: False client.1: True
405
21.555556
72
yaml
null
ceph-main/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/5-client-sanity.yaml
tasks: - workunit: clients: client.0: - suites/fsstress.sh - print: "**** done fsstress"
105
14.142857
29
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/bluestore-bitmap.yaml
.qa/cephfs/objectstore-ec/bluestore-bitmap.yaml
47
47
47
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/centos_8.stream_container_tools.yaml
.qa/distros/podman/centos_8.stream_container_tools.yaml
55
55
55
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/roles.yaml
roles: - - host.a - client.0 - osd.0 - osd.1 - osd.2 - - host.b - client.1 - osd.3 - osd.4 - osd.5
115
8.666667
12
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/fail_fs/no.yaml
teuthology: variables: fail_fs: false
44
10.25
18
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/fail_fs/yes.yaml
teuthology: variables: fail_fs: true
43
10
17
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/ignorelist_health.yaml
.qa/cephfs/overrides/ignorelist_health.yaml
43
43
43
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/ignorelist_wrongly_marked_down.yaml
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
56
56
56
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/pg-warn.yaml
overrides: ceph: conf: global: mon pg warn min per osd: 0
78
12.166667
34
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/syntax.yaml
overrides: kclient: syntax: 'v1'
41
9.5
18
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client.yaml
tasks: - kclient: - print: "**** done client"
46
10.75
27
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/3-upgrade-mgr-staggered.yaml
teuthology: premerge: | if not yaml.teuthology.variables.fail_fs then reject() end upgrade-tasks: sequential: - cephadm.shell: env: [sha1] host.a: - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done - ceph versions | jq -e '.mgr | length == 1' - ceph versions | jq -e '.mgr | keys' | grep $sha1 - ceph versions | jq -e '.overall | length == 2' - ceph orch upgrade check quay.ceph.io/ceph-ci/ceph:$sha1 | jq -e '.up_to_date | length == 2' - ceph orch ps
1,038
53.684211
212
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/5-upgrade-with-workload.yaml
tasks: - parallel: - upgrade-tasks - workload-tasks upgrade-tasks: sequential: - cephadm.shell: env: [sha1] host.a: - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 - cephadm.shell: env: [sha1] host.a: - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph fs dump; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done - ceph orch ps - ceph orch upgrade status - ceph health detail - ceph versions - echo "wait for servicemap items w/ changing names to refresh" - sleep 60 - ceph orch ps - ceph versions - ceph versions | jq -e '.overall | length == 1' - ceph versions | jq -e '.overall | keys' | grep $sha1 workload-tasks: sequential: - workunit: clients: all: - suites/fsstress.sh
1,268
34.25
247
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/6-verify.yaml
tasks: - cephadm.shell: host.a: - ceph fs dump - fs.post_upgrade_checks:
83
13
25
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/pacific.yaml
meta: - desc: | setup ceph/pacific tasks: - install: branch: pacific exclude_packages: - ceph-volume - print: "**** done install task..." - cephadm: image: quay.ceph.io/ceph-ci/ceph:pacific roleless: true cephadm_branch: pacific cephadm_git_url: https://github.com/ceph/ceph conf: osd: #set config option for which cls modules are allowed to be loaded / used osd_class_load_list: "*" osd_class_default_list: "*" - print: "**** done end installing pacific cephadm ..." - cephadm.shell: host.a: - ceph config set mgr mgr/cephadm/use_repo_digest true --force - print: "**** done cephadm.shell ceph config set mgr..." - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls
842
24.545455
80
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/v16.2.4.yaml
teuthology: postmerge: - if yaml.teuthology.variables.fail_fs then reject() end meta: - desc: | setup ceph/pacific v16.2.4 tasks: # Disable metrics sending by kclient as it may crash (assert) a v16.2.4 MDS - pexec: clients: - sudo modprobe -r ceph - sudo modprobe ceph disable_send_metrics=on - install: tag: v16.2.4 exclude_packages: - ceph-volume - print: "**** done install task..." - cephadm: roleless: true image: quay.io/ceph/ceph:v16.2.4 cephadm_branch: v16.2.4 cephadm_git_url: https://github.com/ceph/ceph # needed for v16.2.4 due to --skip-admin-label avoid_pacific_features: true - print: "**** done starting v16.2.4" - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls
844
23.142857
75
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/0-create.yaml
tasks: - cephadm.shell: host.a: - ceph fs volume create cephfs --placement=4 - ceph fs dump
108
17.166667
50
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/4-verify.yaml
tasks: - cephadm.shell: host.a: - ceph fs dump - ceph --format=json fs dump | jq -e ".filesystems | length == 1" - while ! ceph --format=json mds versions | jq -e ". | add == 4"; do sleep 1; done - fs.pre_upgrade_save:
241
29.25
88
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/1.yaml
tasks: - cephadm.shell: host.a: - ceph fs set cephfs max_mds 1
73
13.8
36
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/1-ranks/2.yaml
tasks: - cephadm.shell: host.a: - ceph fs set cephfs max_mds 2
73
13.8
36
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/no.yaml
tasks: - cephadm.shell: host.a: - ceph fs set cephfs allow_standby_replay false
90
17.2
53
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/2-allow_standby_replay/yes.yaml
tasks: - cephadm.shell: host.a: - ceph fs set cephfs allow_standby_replay true
89
17
52
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/3-inline/no.yaml
tasks: - cephadm.shell: host.a: - ceph fs set cephfs inline_data false
81
15.4
44
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/1-volume/3-inline/yes.yaml
tasks: - cephadm.shell: host.a: - ceph fs set cephfs inline_data true --yes-i-really-really-mean-it
110
21.2
73
yaml
null
ceph-main/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/4-config-upgrade/fail_fs.yaml
teuthology: premerge: | local set = yaml.teuthology.variables.fail_fs local cmd = "ceph config set mgr mgr/orchestrator/fail_fs "..tostring(set) local cmds = yaml_fragment['upgrade-tasks'].sequential[0]['cephadm.shell']['host.a'] if set then py_attrgetter(cmds).append "ceph config set mgr mgr/orchestrator/fail_fs true" else py_attrgetter(cmds).append "ceph config set mgr mgr/orchestrator/fail_fs false || true" end upgrade-tasks: sequential: - cephadm.shell: env: [sha1] host.a: []
609
37.125
101
yaml
null
ceph-main/qa/suites/fs/upgrade/nofs/bluestore-bitmap.yaml
.qa/cephfs/objectstore-ec/bluestore-bitmap.yaml
47
47
47
yaml
null
ceph-main/qa/suites/fs/upgrade/nofs/centos_latest.yaml
.qa/distros/supported/centos_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/fs/upgrade/nofs/no-mds-cluster.yaml
roles: - [mon.a, mon.b, mon.c, mgr.x, mgr.y, osd.0, osd.1, osd.2, osd.3] openstack: - volumes: # attached to each instance count: 4 size: 10 # GB
154
21.142857
65
yaml
null
ceph-main/qa/suites/fs/upgrade/nofs/overrides/ignorelist_health.yaml
.qa/cephfs/overrides/ignorelist_health.yaml
43
43
43
yaml
null
ceph-main/qa/suites/fs/upgrade/nofs/overrides/ignorelist_wrongly_marked_down.yaml
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
56
56
56
yaml
null
ceph-main/qa/suites/fs/upgrade/nofs/overrides/pg-warn.yaml
overrides: ceph: conf: global: mon pg warn min per osd: 0
78
12.166667
34
yaml
null
ceph-main/qa/suites/fs/upgrade/nofs/tasks/0-pacific.yaml
meta: - desc: | install ceph/pacific latest tasks: - install: branch: pacific exclude_packages: - librados3 - ceph-mgr-dashboard - ceph-mgr-diskprediction-local - ceph-mgr-rook - ceph-mgr-cephadm - cephadm - ceph-volume extra_packages: ['librados2'] - print: "**** done installing pacific" - ceph: log-ignorelist: - overall HEALTH_ - \(FS_ - \(MDS_ - \(OSD_ - \(MON_DOWN\) - \(CACHE_POOL_ - \(POOL_ - \(MGR_DOWN\) - \(PG_ - \(SMALLER_PGP_NUM\) - Monitor daemon marked osd - Behind on trimming - Manager daemon conf: global: mon warn on pool no app: false ms bind msgr2: false - exec: osd.0: - ceph osd set-require-min-compat-client pacific - print: "**** done ceph"
841
20.05
54
yaml
null
ceph-main/qa/suites/fs/upgrade/nofs/tasks/1-upgrade.yaml
overrides: ceph: log-ignorelist: - scrub mismatch - ScrubResult - wrongly marked - \(POOL_APP_NOT_ENABLED\) - \(SLOW_OPS\) - overall HEALTH_ - \(MON_MSGR2_NOT_ENABLED\) - slow request conf: global: bluestore warn on legacy statfs: false bluestore warn on no per pool omap: false mon: mon warn on osd down out interval zero: false tasks: - print: "*** upgrading, no cephfs present" - exec: mon.a: - ceph fs dump - install.upgrade: mon.a: - print: "**** done install.upgrade" - ceph.restart: daemons: [mon.*, mgr.*] mon-health-to-clog: false wait-for-healthy: false - ceph.healthy: - ceph.restart: daemons: [osd.*] wait-for-healthy: false wait-for-osds-up: true - exec: mon.a: - ceph versions - ceph osd dump -f json-pretty - ceph fs dump - ceph osd require-osd-release quincy - for f in `ceph osd pool ls` ; do ceph osd pool set $f pg_autoscale_mode off ; done - ceph.healthy: - print: "**** done ceph.restart"
1,049
22.333333
88
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/bluestore-bitmap.yaml
../../../../cephfs/objectstore-ec/bluestore-bitmap.yaml
55
55
55
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/centos_latest.yaml
.qa/distros/supported/centos_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/clusters/1-mds-1-client-micro.yaml
.qa/cephfs/clusters/1-mds-1-client-micro.yaml
45
45
45
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/overrides/ignorelist_health.yaml
.qa/cephfs/overrides/ignorelist_health.yaml
43
43
43
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/overrides/ignorelist_wrongly_marked_down.yaml
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
56
56
56
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/overrides/pg-warn.yaml
overrides: ceph: conf: global: mon pg warn min per osd: 0
78
12.166667
34
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/tasks/0-from/nautilus.yaml
meta: - desc: | install ceph/nautilus latest tasks: - install: branch: nautilus exclude_packages: - cephadm - ceph-mgr-cephadm - ceph-immutable-object-cache - python3-rados - python3-rgw - python3-rbd - python3-cephfs - ceph-volume extra_packages: - python-rados - python-rgw - python-rbd - python-cephfs # For kernel_untar_build workunit extra_system_packages: - bison - flex - elfutils-libelf-devel - openssl-devel - NetworkManager - iproute - util-linux - print: "**** done installing nautilus" - ceph: log-ignorelist: - overall HEALTH_ - \(FS_ - \(MDS_ - \(OSD_ - \(MON_DOWN\) - \(CACHE_POOL_ - \(POOL_ - \(MGR_DOWN\) - \(PG_ - \(SMALLER_PGP_NUM\) - Monitor daemon marked osd - Behind on trimming - Manager daemon conf: global: mon warn on pool no app: false ms bind msgr2: false - exec: osd.0: - ceph osd set-require-min-compat-client nautilus - print: "**** done ceph"
1,130
19.944444
55
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/tasks/0-from/pacific.yaml
meta: - desc: | install ceph/pacific latest tasks: - install: branch: pacific exclude_packages: - cephadm - ceph-mgr-cephadm - ceph-immutable-object-cache - python3-rados - python3-rgw - python3-rbd - python3-cephfs - ceph-volume extra_packages: - python-rados - python-rgw - python-rbd - python-cephfs # For kernel_untar_build workunit extra_system_packages: - bison - flex - elfutils-libelf-devel - openssl-devel - NetworkManager - iproute - util-linux - print: "**** done installing pacific" - ceph: log-ignorelist: - overall HEALTH_ - \(FS_ - \(MDS_ - \(OSD_ - \(MON_DOWN\) - \(CACHE_POOL_ - \(POOL_ - \(MGR_DOWN\) - \(PG_ - \(SMALLER_PGP_NUM\) - Monitor daemon marked osd - Behind on trimming - Manager daemon conf: global: mon warn on pool no app: false ms bind msgr2: false - exec: osd.0: - ceph osd set-require-min-compat-client pacific - print: "**** done ceph"
1,126
19.87037
54
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/tasks/2-workload/new_ops/1-client-sanity.yaml
tasks: - cephfs_test_runner: modules: - tasks.cephfs.test_newops
81
15.4
34
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/tasks/2-workload/new_ops/0-clients/fuse-upgrade.yaml
teuthology: postmerge: - if not is_fuse() then reject() end tasks: - ceph-fuse: client.0: mounted: false - print: "**** done unmount client.0" - install.upgrade: client.0: - print: "**** done install.upgrade on client.0" - ceph.healthy: - ceph-fuse: client.0: - print: "**** done remount client"
320
19.0625
48
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/tasks/2-workload/new_ops/0-clients/kclient.yaml
teuthology: postmerge: # Once can we make sure the distro kernels have included the newops fixes # we can remove the is_kupstream() restriction. While since the Nautilus # will only support the 'v1' mount syntax, so don't touch the mount syntax # restriction. - if not is_kupstream() or syntax_version() == 'v2' then reject() end tasks:
359
39
78
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/tasks/2-workload/stress_tests/0-client-upgrade.yaml
teuthology: postmerge: - if not is_fuse() then reject() end tasks: - ceph-fuse: client.0: mounted: false - print: "**** done unmount client.0" - install.upgrade: client.0: - print: "**** done install.upgrade on client.0" - ceph-fuse: client.0: - print: "**** done remount client.0"
306
19.466667
48
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/tasks/2-workload/stress_tests/1-tests/blogbench.yaml
.qa/suites/fs/workload/tasks/5-workunit/suites/blogbench.yaml
61
61
61
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/tasks/2-workload/stress_tests/1-tests/dbench.yaml
.qa/suites/fs/workload/tasks/5-workunit/suites/dbench.yaml
58
58
58
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/tasks/2-workload/stress_tests/1-tests/fsstress.yaml
.qa/suites/fs/workload/tasks/5-workunit/suites/fsstress.yaml
60
60
60
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/tasks/2-workload/stress_tests/1-tests/iozone.yaml
.qa/suites/fs/workload/tasks/5-workunit/suites/iozone.yaml
58
58
58
yaml
null
ceph-main/qa/suites/fs/upgrade/upgraded_client/tasks/2-workload/stress_tests/1-tests/kernel_untar_build.yaml
.qa/suites/fs/workload/tasks/5-workunit/kernel_untar_build.yaml
63
63
63
yaml
null
ceph-main/qa/suites/fs/valgrind/centos_latest.yaml
.qa/distros/supported/centos_latest.yaml
40
40
40
yaml
null
ceph-main/qa/suites/fs/valgrind/debug.yaml
overrides: install: ceph: debuginfo: true
54
10
21
yaml
null
ceph-main/qa/suites/fs/valgrind/mirror/cephfs-mirror/one-per-cluster.yaml
meta: - desc: run one cephfs-mirror daemon on primary cluster tasks: - cephfs-mirror: client: client.mirror valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
186
22.375
72
yaml
null
ceph-main/qa/suites/fs/valgrind/mirror/clients/mirror.yaml
meta: - desc: configure the permissions for client.mirror overrides: ceph: conf: client: debug cephfs_mirror: 20 log to stderr: false # make these predictable client.mirror: admin socket: /var/run/ceph/cephfs-mirror.asok pid file: /var/run/ceph/cephfs-mirror.pid tasks: - exec: client.mirror: - "sudo ceph auth caps client.mirror mon 'profile cephfs-mirror' mds 'allow r' osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' mgr 'allow r'" client.mirror_remote: - "sudo ceph auth caps client.mirror_remote mon 'allow r' mds 'allow rwps' osd 'allow rw tag cephfs *=*' mgr 'allow r'"
669
34.263158
163
yaml
null
ceph-main/qa/suites/fs/valgrind/mirror/cluster/1-node.yaml
meta: - desc: 1 ceph cluster with 1 mon, 1 mgr, 3 osds, 5 mdss roles: - - mon.a - mgr.x - mds.a - mds.b - mds.c - mds.d - mds.e - osd.0 - osd.1 - osd.2 - client.0 - client.1 - client.mirror - client.mirror_remote
239
12.333333
56
yaml
null
ceph-main/qa/suites/fs/valgrind/mirror/mount/fuse.yaml
tasks: - ceph-fuse: [client.0, client.1]
43
13.666667
35
yaml
null
ceph-main/qa/suites/fs/valgrind/mirror/overrides/whitelist_health.yaml
overrides: ceph: log-ignorelist: - overall HEALTH_ - \(FS_DEGRADED\) - \(MDS_FAILED\) - \(MDS_DEGRADED\) - \(FS_WITH_FAILED_MDS\) - \(MDS_DAMAGE\) - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - \(FS_INLINE_DATA_DEPRECATED\) - Reduced data availability - Degraded data redundancy
352
22.533333
37
yaml
null
ceph-main/qa/suites/fs/valgrind/mirror/tasks/mirror.yaml
overrides: ceph: conf: mgr: debug client: 10 tasks: - cephfs_test_runner: modules: - tasks.cephfs.test_mirroring.TestMirroring
164
14
51
yaml
null
ceph-main/qa/suites/fs/verify/clusters/1a5s-mds-1c-client.yaml
.qa/cephfs/clusters/1a5s-mds-1c-client.yaml
43
43
43
yaml
null
ceph-main/qa/suites/fs/verify/distro/centos_8.yaml
.qa/distros/all/centos_8.yaml
29
29
29
yaml
null
ceph-main/qa/suites/fs/verify/distro/rhel_8.yaml
.qa/distros/all/rhel_8.yaml
27
27
27
yaml
null
ceph-main/qa/suites/fs/verify/distro/ubuntu/latest.yaml
.qa/distros/all/ubuntu_20.04.yaml
33
33
33
yaml
null
ceph-main/qa/suites/fs/verify/distro/ubuntu/overrides.yaml
overrides: ceph: valgrind: exit_on_first_error: false
66
12.4
32
yaml
null
ceph-main/qa/suites/fs/verify/mount/fuse.yaml
.qa/cephfs/mount/fuse.yaml
26
26
26
yaml
null
ceph-main/qa/suites/fs/verify/mount/kclient/k-testing.yaml
.qa/cephfs/mount/kclient/overrides/distro/testing/k-testing.yaml
64
64
64
yaml
null
ceph-main/qa/suites/fs/verify/mount/kclient/mount.yaml
.qa/cephfs/mount/kclient/mount.yaml
35
35
35
yaml
null
ceph-main/qa/suites/fs/verify/mount/kclient/ms-die-on-skipped.yaml
.qa/cephfs/mount/kclient/overrides/ms-die-on-skipped.yaml
57
57
57
yaml
null
ceph-main/qa/suites/fs/verify/overrides/ignorelist_health.yaml
.qa/cephfs/overrides/ignorelist_health.yaml
43
43
43
yaml
null
ceph-main/qa/suites/fs/verify/overrides/ignorelist_wrongly_marked_down.yaml
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
56
56
56
yaml
null
ceph-main/qa/suites/fs/verify/overrides/mon-debug.yaml
overrides: ceph: conf: mon: debug ms: 1 debug mon: 20
82
10.857143
21
yaml
null
ceph-main/qa/suites/fs/verify/overrides/session_timeout.yaml
.qa/cephfs/overrides/session_timeout.yaml
41
41
41
yaml
null
ceph-main/qa/suites/fs/verify/ranks/1.yaml
0
0
0
yaml
null
ceph-main/qa/suites/fs/verify/ranks/3.yaml
overrides: ceph: cephfs: max_mds: 3 check-counter: counters: mds: - mds.exported - mds.imported
136
12.7
22
yaml
null
ceph-main/qa/suites/fs/verify/ranks/5.yaml
overrides: ceph: cephfs: max_mds: 5 check-counter: counters: mds: - mds.exported - mds.imported
136
12.7
22
yaml
null
ceph-main/qa/suites/fs/verify/tasks/dbench.yaml
.qa/cephfs/tasks/cfuse_workunit_suites_dbench.yaml
50
50
50
yaml
null
ceph-main/qa/suites/fs/verify/tasks/fsstress.yaml
.qa/cephfs/tasks/cfuse_workunit_suites_fsstress.yaml
52
52
52
yaml
null
ceph-main/qa/suites/fs/verify/validater/lockdep.yaml
overrides: ceph: conf: global: lockdep: true
65
10
21
yaml