Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/qa/distros/supported-random-distro$/ubuntu_latest.yaml
../all/ubuntu_latest.yaml
25
25
25
yaml
null
ceph-main/qa/distros/supported/centos_latest.yaml
../all/centos_8.yaml
20
20
20
yaml
null
ceph-main/qa/distros/supported/rhel_latest.yaml
../all/rhel_8.yaml
18
18
18
yaml
null
ceph-main/qa/distros/supported/ubuntu_20.04.yaml
../all/ubuntu_20.04.yaml
24
24
24
yaml
null
ceph-main/qa/distros/supported/ubuntu_latest.yaml
../all/ubuntu_latest.yaml
25
25
25
yaml
null
ceph-main/qa/erasure-code/ec-feature-plugins-v2.yaml
# # Test the expected behavior of the # # CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2 # # feature. # roles: - - mon.a - mon.b - osd.0 - osd.1 - - osd.2 - mon.c - mgr.x tasks: # # Install firefly # - install: branch: firefly - ceph: fs: xfs # # We don't need mon.c for now: it will be used later to make sure an old # mon cannot join the quorum once the feature has been activated # - ceph.stop: daemons: [mon.c] - exec: mon.a: - |- ceph osd erasure-code-profile set WRONG plugin=WRONG ceph osd pool create poolWRONG 12 12 erasure WRONG 2>&1 | grep "failed to load plugin using profile WRONG" # # Partial upgrade, osd.2 is not upgraded # - install.upgrade: osd.0: # # a is the leader # - ceph.restart: daemons: [mon.a] wait-for-healthy: false - exec: mon.a: - |- ceph osd erasure-code-profile set profile-lrc plugin=lrc 2>&1 | grep "unsupported by: the monitor cluster" - ceph.restart: daemons: [mon.b, osd.1, osd.0] wait-for-healthy: false wait-for-osds-up: true # # The lrc plugin cannot be used because osd.2 is not upgraded yet # and would crash. # - exec: mon.a: - |- ceph osd erasure-code-profile set profile-lrc plugin=lrc 2>&1 | grep "unsupported by: osd.2" # # Taking osd.2 out, the rest of the cluster is upgraded # - ceph.stop: daemons: [osd.2] - sleep: duration: 60 # # Creating an erasure code profile using the lrc plugin now works # - exec: mon.a: - "ceph osd erasure-code-profile set profile-lrc plugin=lrc" # # osd.2 won't be able to join the because is does not support the feature # - ceph.restart: daemons: [osd.2] wait-for-healthy: false - sleep: duration: 60 - exec: osd.2: - |- grep "protocol feature.*missing 100000000000" /var/log/ceph/ceph-osd.2.log # # mon.c won't be able to join the because it does not support the feature # - ceph.restart: daemons: [mon.c] wait-for-healthy: false - sleep: duration: 60 - exec: mon.c: - |- grep "missing.*feature" /var/log/ceph/ceph-mon.c.log
2,094
20.161616
114
yaml
null
ceph-main/qa/erasure-code/ec-rados-default.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 50 ec_pool: true write_append_excl: false op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25 - print: "**** done rados ec task"
388
18.45
36
yaml
null
ceph-main/qa/erasure-code/ec-rados-parallel.yaml
workload: parallel: - rados: clients: [client.0] ops: 4000 objects: 50 ec_pool: true write_append_excl: false op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25 - print: "**** done rados ec parallel"
427
19.380952
42
yaml
null
ceph-main/qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 50 ec_pool: true write_append_excl: false erasure_code_profile: name: clay42profile plugin: clay k: 4 m: 2 technique: reed_sol_van crush-failure-domain: osd op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25
472
17.192308
31
yaml
null
ceph-main/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 50 ec_pool: true min_size: 2 write_append_excl: false erasure_code_profile: name: isaprofile plugin: isa k: 2 m: 1 technique: reed_sol_van crush-failure-domain: osd op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25
484
16.962963
31
yaml
null
ceph-main/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 50 ec_pool: true write_append_excl: false erasure_code_profile: name: jerasure21profile plugin: jerasure k: 2 m: 1 technique: reed_sol_van crush-failure-domain: osd op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25
480
17.5
31
yaml
null
ceph-main/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml
# # k=3 implies a stripe_width of 1376*3 = 4128 which is different from # the default value of 4096 It is also not a multiple of 1024*1024 and # creates situations where rounding rules during recovery becomes # necessary. # tasks: - rados: clients: [client.0] ops: 4000 objects: 50 ec_pool: true write_append_excl: false erasure_code_profile: name: jerasure31profile plugin: jerasure k: 3 m: 1 technique: reed_sol_van crush-failure-domain: osd op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25
706
21.09375
70
yaml
null
ceph-main/qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml
tasks: - rados: clients: [client.0] ops: 4000 objects: 50 ec_pool: true write_append_excl: false erasure_code_profile: name: jerasure21profile plugin: jerasure k: 4 m: 2 technique: reed_sol_van crush-failure-domain: osd op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25
480
17.5
31
yaml
null
ceph-main/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
tasks: - rados: clients: [client.0] ops: 400 objects: 50 ec_pool: true write_append_excl: false erasure_code_profile: name: lrcprofile plugin: lrc k: 4 m: 2 l: 3 crush-failure-domain: osd op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25
448
16.269231
31
yaml
null
ceph-main/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
tasks: - rados: clients: [client.0] ops: 400 objects: 50 ec_pool: true write_append_excl: false erasure_code_profile: name: shecprofile plugin: shec k: 4 m: 3 c: 2 crush-failure-domain: osd op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25
450
16.346154
31
yaml
null
ceph-main/qa/erasure-code/ec-rados-sequential.yaml
workload: sequential: - rados: clients: [client.0] ops: 4000 objects: 50 ec_pool: true write_append_excl: false op_weights: read: 100 write: 0 append: 100 delete: 50 snap_create: 50 snap_remove: 50 rollback: 50 copy_from: 50 setattr: 25 rmattr: 25 - print: "**** done rados ec sequential"
431
19.571429
44
yaml
null
ceph-main/qa/libceph/trivial_libceph.c
#define _FILE_OFFSET_BITS 64 #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/statvfs.h> #include "../../src/include/cephfs/libcephfs.h" #define MB64 (1<<26) int main(int argc, const char **argv) { struct ceph_mount_info *cmount; int ret, fd, len; char buf[1024]; if (argc < 3) { fprintf(stderr, "usage: ./%s <conf> <file>\n", argv[0]); exit(1); } ret = ceph_create(&cmount, NULL); if (ret) { fprintf(stderr, "ceph_create=%d\n", ret); exit(1); } ret = ceph_conf_read_file(cmount, argv[1]); if (ret) { fprintf(stderr, "ceph_conf_read_file=%d\n", ret); exit(1); } ret = ceph_conf_parse_argv(cmount, argc, argv); if (ret) { fprintf(stderr, "ceph_conf_parse_argv=%d\n", ret); exit(1); } ret = ceph_mount(cmount, NULL); if (ret) { fprintf(stderr, "ceph_mount=%d\n", ret); exit(1); } ret = ceph_chdir(cmount, "/"); if (ret) { fprintf(stderr, "ceph_chdir=%d\n", ret); exit(1); } fd = ceph_open(cmount, argv[2], O_CREAT|O_TRUNC|O_RDWR, 0777); if (fd < 0) { fprintf(stderr, "ceph_open=%d\n", fd); exit(1); } memset(buf, 'a', sizeof(buf)); len = ceph_write(cmount, fd, buf, sizeof(buf), 0); fprintf(stdout, "wrote %d bytes\n", len); ceph_shutdown(cmount); return 0; }
1,709
23.428571
72
c
null
ceph-main/qa/machine_types/schedule_rados_ovh.sh
#!/usr/bin/env bash # $1 - part # $2 - branch name # $3 - machine name # $4 - email address # $5 - filter out (this arg is to be at the end of the command line for now) ## example #1 ## (date +%U) week number ## % 2 - mod 2 (e.g. 0,1,0,1 ...) ## * 7 - multiplied by 7 (e.g. 0,7,0,7...) ## $1 day of the week (0-6) ## /14 for 2 weeks ## example #2 ## (date +%U) week number ## % 4 - mod 4 (e.g. 0,1,2,3,0,1,2,3 ...) ## * 7 - multiplied by 7 (e.g. 0,7,14,21,0,7,14,21...) ## $1 day of the week (0-6) ## /28 for 4 weeks echo "Scheduling " $2 " branch" if [ $2 = "master" ] ; then # run master branch with --newest option looking for good sha1 7 builds back teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/28 --newest 7 -e $4 ~/vps.yaml $5 elif [ $2 = "jewel" ] ; then # run jewel branch with /40 jobs teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/40 -e $4 ~/vps.yaml $5 else # run NON master branches without --newest teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/28 -e $4 ~/vps.yaml $5 fi
1,217
33.8
145
sh
null
ceph-main/qa/machine_types/schedule_subset.sh
#!/bin/bash -e #command line => CEPH_BRANCH=<branch>; MACHINE_NAME=<machine_type>; SUITE_NAME=<suite>; ../schedule_subset.sh <day_of_week> $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL <$FILTER> partitions="$1" shift branch="$1" shift machine="$1" shift suite="$1" shift email="$1" shift kernel="$1" shift # rest of arguments passed directly to teuthology-suite echo "Scheduling $branch branch" teuthology-suite -v -c "$branch" -m "$machine" -k "$kernel" -s "$suite" --ceph-repo https://git.ceph.com/ceph.git --suite-repo https://git.ceph.com/ceph.git --subset "$((RANDOM % partitions))/$partitions" --newest 100 -e "$email" "$@"
649
29.952381
234
sh
null
ceph-main/qa/machine_types/vps.yaml
overrides: ceph: conf: global: osd heartbeat grace: 100 # this line to address issue #1017 mon lease: 15 mon lease ack timeout: 25 s3tests: idle_timeout: 1200 ceph-fuse: client.0: mount_wait: 60 mount_timeout: 120
285
18.066667
43
yaml
null
ceph-main/qa/mds/test_anchortable.sh
#!/usr/bin/env bash set -x mkdir links for f in `seq 1 8` do mkdir $f for g in `seq 1 20` do touch $f/$g ln $f/$g links/$f.$g done done for f in `seq 1 8` do echo testing failure point $f bash -c "pushd . ; cd $bindir ; sleep 10; ./ceph -c $conf mds tell \* injectargs \"--mds_kill_mdstable_at $f\" ; popd" & bash -c "pushd . ; cd $bindir ; sleep 11 ; ./init-ceph -c $conf start mds ; popd" & for g in `seq 1 20` do rm $f/$g rm links/$f.$g sleep 1 done done
506
17.107143
124
sh
null
ceph-main/qa/mds/test_mdstable_failures.sh
#!/usr/bin/env bash set -x for f in `seq 1 8` do echo testing failure point $f pushd . ; cd $bindir ; ./ceph -c $conf mds tell \* injectargs "--mds_kill_mdstable_at $f" ; popd sleep 1 # wait for mds command to go thru bash -c "pushd . ; cd $bindir ; sleep 10 ; ./init-ceph -c $conf start mds ; popd" & touch $f ln $f $f.link sleep 10 done
370
23.733333
100
sh
null
ceph-main/qa/mgr_ttl_cache/disable.yaml
overrides: ceph: conf: mgr: mgr ttl cache expire seconds: 0
80
12.5
39
yaml
null
ceph-main/qa/mgr_ttl_cache/enable.yaml
overrides: ceph: conf: mgr: mgr ttl cache expire seconds: 5
80
12.5
39
yaml
null
ceph-main/qa/mon/bootstrap/host.sh
#!/bin/sh -ex cwd=`pwd` cat > conf <<EOF [global] mon host = 127.0.0.1:6789 [mon] admin socket = log file = $cwd/\$name.log debug mon = 20 debug ms = 1 EOF rm -f mm fsid=`uuidgen` rm -f keyring ceph-authtool --create-keyring keyring --gen-key -n client.admin ceph-authtool keyring --gen-key -n mon. ceph-mon -c conf -i a --mkfs --fsid $fsid --mon-data mon.a -k keyring ceph-mon -c conf -i a --mon-data $cwd/mon.a ceph -c conf -k keyring health killall ceph-mon echo OK
477
15.482759
69
sh
null
ceph-main/qa/mon/bootstrap/initial_members.sh
#!/bin/sh -ex cwd=`pwd` cat > conf <<EOF [mon] admin socket = log file = $cwd/\$name.log debug mon = 20 debug ms = 1 mon initial members = a,b,d EOF rm -f mm monmaptool --create mm \ --add a 127.0.0.1:6789 \ --add b 127.0.0.1:6790 \ --add c 127.0.0.1:6791 rm -f keyring ceph-authtool --create-keyring keyring --gen-key -n client.admin ceph-authtool keyring --gen-key -n mon. ceph-mon -c conf -i a --mkfs --monmap mm --mon-data $cwd/mon.a -k keyring ceph-mon -c conf -i b --mkfs --monmap mm --mon-data $cwd/mon.b -k keyring ceph-mon -c conf -i c --mkfs --monmap mm --mon-data $cwd/mon.c -k keyring ceph-mon -c conf -i a --mon-data $cwd/mon.a ceph-mon -c conf -i c --mon-data $cwd/mon.b ceph-mon -c conf -i b --mon-data $cwd/mon.c ceph -c conf -k keyring --monmap mm health ceph -c conf -k keyring --monmap mm health if ceph -c conf -k keyring --monmap mm mon stat | grep a= | grep b= | grep c= ; then break fi killall ceph-mon echo OK
959
23
84
sh
null
ceph-main/qa/mon/bootstrap/initial_members_asok.sh
#!/bin/sh -ex cwd=`pwd` cat > conf <<EOF [mon] log file = $cwd/\$name.log debug mon = 20 debug ms = 1 debug asok = 20 mon initial members = a,b,d admin socket = $cwd/\$name.asok EOF rm -f mm fsid=`uuidgen` rm -f keyring ceph-authtool --create-keyring keyring --gen-key -n client.admin ceph-authtool keyring --gen-key -n mon. ceph-mon -c conf -i a --mkfs --fsid $fsid --mon-data $cwd/mon.a -k keyring ceph-mon -c conf -i b --mkfs --fsid $fsid --mon-data $cwd/mon.b -k keyring ceph-mon -c conf -i c --mkfs --fsid $fsid --mon-data $cwd/mon.c -k keyring ceph-mon -c conf -i a --mon-data $cwd/mon.a --public-addr 127.0.0.1:6789 ceph-mon -c conf -i b --mon-data $cwd/mon.c --public-addr 127.0.0.1:6790 ceph-mon -c conf -i c --mon-data $cwd/mon.b --public-addr 127.0.0.1:6791 sleep 1 if timeout 5 ceph -c conf -k keyring -m localhost mon stat | grep "a,b,c" ; then echo WTF exit 1 fi ceph --admin-daemon mon.a.asok add_bootstrap_peer_hint 127.0.0.1:6790 while true; do if ceph -c conf -k keyring -m 127.0.0.1 mon stat | grep 'a,b'; then break fi sleep 1 done ceph --admin-daemon mon.c.asok add_bootstrap_peer_hint 127.0.0.1:6790 while true; do if ceph -c conf -k keyring -m 127.0.0.1 mon stat | grep 'a,b,c'; then break fi sleep 1 done ceph-mon -c conf -i d --mkfs --fsid $fsid --mon-data $cwd/mon.d -k keyring ceph-mon -c conf -i d --mon-data $cwd/mon.d --public-addr 127.0.0.1:6792 ceph --admin-daemon mon.d.asok add_bootstrap_peer_hint 127.0.0.1:6790 while true; do if ceph -c conf -k keyring -m 127.0.0.1 mon stat | grep 'a,b,c,d'; then break fi sleep 1 done killall ceph-mon echo OK
1,642
23.522388
80
sh
null
ceph-main/qa/mon/bootstrap/simple.sh
#!/bin/sh -e cwd=`pwd` cat > conf <<EOF [mon] admin socket = EOF rm -f mm monmaptool --create mm \ --add a 127.0.0.1:6789 \ --add b 127.0.0.1:6790 \ --add c 127.0.0.1:6791 rm -f keyring ceph-authtool --create-keyring keyring --gen-key -n client.admin ceph-authtool keyring --gen-key -n mon. ceph-mon -c conf -i a --mkfs --monmap mm --mon-data $cwd/mon.a -k keyring ceph-mon -c conf -i b --mkfs --monmap mm --mon-data $cwd/mon.b -k keyring ceph-mon -c conf -i c --mkfs --monmap mm --mon-data $cwd/mon.c -k keyring ceph-mon -c conf -i a --mon-data $cwd/mon.a ceph-mon -c conf -i c --mon-data $cwd/mon.b ceph-mon -c conf -i b --mon-data $cwd/mon.c while true; do ceph -c conf -k keyring --monmap mm health if ceph -c conf -k keyring --monmap mm mon stat | grep 'quorum 0,1,2'; then break fi sleep 1 done killall ceph-mon echo OK
863
22.351351
79
sh
null
ceph-main/qa/mon/bootstrap/simple_expand.sh
#!/bin/sh -ex cwd=`pwd` cat > conf <<EOF [mon] admin socket = log file = $cwd/\$name.log debug mon = 20 debug ms = 1 EOF rm -f mm monmaptool --create mm \ --add a 127.0.0.1:6789 \ --add b 127.0.0.1:6790 \ --add c 127.0.0.1:6791 rm -f keyring ceph-authtool --create-keyring keyring --gen-key -n client.admin ceph-authtool keyring --gen-key -n mon. ceph-mon -c conf -i a --mkfs --monmap mm --mon-data $cwd/mon.a -k keyring ceph-mon -c conf -i b --mkfs --monmap mm --mon-data $cwd/mon.b -k keyring ceph-mon -c conf -i c --mkfs --monmap mm --mon-data $cwd/mon.c -k keyring ceph-mon -c conf -i a --mon-data $cwd/mon.a ceph-mon -c conf -i c --mon-data $cwd/mon.b ceph-mon -c conf -i b --mon-data $cwd/mon.c ceph -c conf -k keyring --monmap mm health ## expand via a kludged monmap monmaptool mm --add d 127.0.0.1:6792 ceph-mon -c conf -i d --mkfs --monmap mm --mon-data $cwd/mon.d -k keyring ceph-mon -c conf -i d --mon-data $cwd/mon.d while true; do ceph -c conf -k keyring --monmap mm health if ceph -c conf -k keyring --monmap mm mon stat | grep 'quorum 0,1,2,3'; then break fi sleep 1 done # again monmaptool mm --add e 127.0.0.1:6793 ceph-mon -c conf -i e --mkfs --monmap mm --mon-data $cwd/mon.e -k keyring ceph-mon -c conf -i e --mon-data $cwd/mon.e while true; do ceph -c conf -k keyring --monmap mm health if ceph -c conf -k keyring --monmap mm mon stat | grep 'quorum 0,1,2,3,4'; then break fi sleep 1 done killall ceph-mon echo OK
1,495
23.52459
83
sh
null
ceph-main/qa/mon/bootstrap/simple_expand_monmap.sh
#!/bin/sh -ex cwd=`pwd` cat > conf <<EOF [mon] admin socket = EOF rm -f mm monmaptool --create mm \ --add a 127.0.0.1:6789 \ --add b 127.0.0.1:6790 \ --add c 127.0.0.1:6791 rm -f keyring ceph-authtool --create-keyring keyring --gen-key -n client.admin ceph-authtool keyring --gen-key -n mon. ceph-mon -c conf -i a --mkfs --monmap mm --mon-data $cwd/mon.a -k keyring ceph-mon -c conf -i b --mkfs --monmap mm --mon-data $cwd/mon.b -k keyring ceph-mon -c conf -i c --mkfs --monmap mm --mon-data $cwd/mon.c -k keyring ceph-mon -c conf -i a --mon-data $cwd/mon.a ceph-mon -c conf -i c --mon-data $cwd/mon.b ceph-mon -c conf -i b --mon-data $cwd/mon.c ceph -c conf -k keyring --monmap mm health ## expand via a kludged monmap monmaptool mm --add d 127.0.0.1:6792 ceph-mon -c conf -i d --mkfs --monmap mm --mon-data $cwd/mon.d -k keyring ceph-mon -c conf -i d --mon-data $cwd/mon.d while true; do ceph -c conf -k keyring --monmap mm health if ceph -c conf -k keyring --monmap mm mon stat | grep d=; then break fi sleep 1 done killall ceph-mon echo OK
1,084
23.111111
73
sh
null
ceph-main/qa/mon/bootstrap/simple_single_expand.sh
#!/bin/sh -ex cwd=`pwd` cat > conf <<EOF [mon] admin socket = log file = $cwd/\$name.log debug mon = 20 debug ms = 1 EOF rm -f mm monmaptool --create mm \ --add a 127.0.0.1:6789 rm -f keyring ceph-authtool --create-keyring keyring --gen-key -n client.admin ceph-authtool keyring --gen-key -n mon. ceph-mon -c conf -i a --mkfs --monmap mm --mon-data $cwd/mon.a -k keyring ceph-mon -c conf -i a --mon-data $cwd/mon.a ceph -c conf -k keyring --monmap mm health ## expand via a kludged monmap monmaptool mm --add d 127.0.0.1:6702 ceph-mon -c conf -i d --mkfs --monmap mm --mon-data $cwd/mon.d -k keyring ceph-mon -c conf -i d --mon-data $cwd/mon.d while true; do ceph -c conf -k keyring --monmap mm health if ceph -c conf -k keyring --monmap mm mon stat | grep 'quorum 0,1'; then break fi sleep 1 done # again monmaptool mm --add e 127.0.0.1:6793 ceph-mon -c conf -i e --mkfs --monmap mm --mon-data $cwd/mon.e -k keyring ceph-mon -c conf -i e --mon-data $cwd/mon.e while true; do ceph -c conf -k keyring --monmap mm health if ceph -c conf -k keyring --monmap mm mon stat | grep 'quorum 0,1,2'; then break fi sleep 1 done killall ceph-mon echo OK
1,193
20.709091
79
sh
null
ceph-main/qa/mon/bootstrap/simple_single_expand2.sh
#!/bin/sh -ex cwd=`pwd` cat > conf <<EOF [mon] admin socket = log file = $cwd/\$name.log debug mon = 20 debug ms = 1 EOF rm -f mm ip=`host \`hostname\` | awk '{print $4}'` monmaptool --create mm \ --add a $ip:6779 rm -f keyring ceph-authtool --create-keyring keyring --gen-key -n client.admin ceph-authtool keyring --gen-key -n mon. ceph-mon -c conf -i a --mkfs --monmap mm --mon-data $cwd/mon.a -k keyring ceph-mon -c conf -i a --mon-data $cwd/mon.a ceph -c conf -k keyring --monmap mm health ## expand via a local_network ceph-mon -c conf -i d --mkfs --monmap mm --mon-data $cwd/mon.d -k keyring ceph-mon -c conf -i d --mon-data $cwd/mon.d --public-network 127.0.0.1/32 while true; do ceph -c conf -k keyring --monmap mm health if ceph -c conf -k keyring --monmap mm mon stat | grep 'quorum 0,1'; then break fi sleep 1 done killall ceph-mon echo OK
882
20.536585
77
sh
null
ceph-main/qa/mon/bootstrap/single_host.sh
#!/bin/sh -ex cwd=`pwd` cat > conf <<EOF [global] mon host = 127.0.0.1:6789 [mon] admin socket = log file = $cwd/\$name.log debug mon = 20 debug ms = 1 EOF rm -f mm fsid=`uuidgen` rm -f keyring ceph-authtool --create-keyring keyring --gen-key -n client.admin ceph-authtool keyring --gen-key -n mon. ceph-mon -c conf -i a --mkfs --fsid $fsid --mon-data $cwd/mon.a -k keyring ceph-mon -c conf -i a --mon-data $cwd/mon.a ceph -c conf -k keyring health killall ceph-mon echo OK
482
15.655172
74
sh
null
ceph-main/qa/mon/bootstrap/single_host_multi.sh
#!/bin/sh -ex cwd=`pwd` cat > conf <<EOF [global] [mon] admin socket = log file = $cwd/\$name.log debug mon = 20 debug ms = 1 mon host = 127.0.0.1:6789 127.0.0.1:6790 127.0.0.1:6791 EOF rm -f mm fsid=`uuidgen` rm -f keyring ceph-authtool --create-keyring keyring --gen-key -n client.admin ceph-authtool keyring --gen-key -n mon. ceph-mon -c conf -i a --mkfs --fsid $fsid --mon-data $cwd/mon.a -k keyring --public-addr 127.0.0.1:6789 ceph-mon -c conf -i b --mkfs --fsid $fsid --mon-data $cwd/mon.b -k keyring --public-addr 127.0.0.1:6790 ceph-mon -c conf -i c --mkfs --fsid $fsid --mon-data $cwd/mon.c -k keyring --public-addr 127.0.0.1:6791 ceph-mon -c conf -i a --mon-data $cwd/mon.a ceph-mon -c conf -i b --mon-data $cwd/mon.b ceph-mon -c conf -i c --mon-data $cwd/mon.c ceph -c conf -k keyring health -m 127.0.0.1 while true; do if ceph -c conf -k keyring -m 127.0.0.1 mon stat | grep 'a,b,c'; then break fi sleep 1 done killall ceph-mon echo OK
970
23.897436
103
sh
null
ceph-main/qa/mon_election/classic.yaml
overrides: ceph: conf: global: mon election default strategy: 1
83
15.8
40
yaml
null
ceph-main/qa/mon_election/connectivity.yaml
overrides: ceph: conf: global: mon election default strategy: 3
83
15.8
40
yaml
null
ceph-main/qa/msgr/async-v1only.yaml
overrides: ceph: mon_bind_msgr2: false conf: global: ms type: async ms bind msgr2: false
121
14.25
28
yaml
null
ceph-main/qa/msgr/async-v2only.yaml
overrides: ceph: conf: global: ms type: async ms bind msgr2: true ms bind msgr1: false
123
14.5
28
yaml
null
ceph-main/qa/msgr/async.yaml
overrides: ceph: conf: global: ms type: async
66
10.166667
22
yaml
null
ceph-main/qa/objectstore/bluestore-bitmap.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 1/20 debug bluefs: 1/20 debug rocksdb: 4/10 bluestore fsck on mount: true bluestore allocator: bitmap # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true bdev enable discard: true bdev async discard: true ceph-deploy: fs: xfs bluestore: yes conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 1/20 debug bluefs: 1/20 debug rocksdb: 4/10 bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 bdev enable discard: true bdev async discard: true
1,339
29.454545
90
yaml
null
ceph-main/qa/objectstore/bluestore-comp-lz4.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 1/20 debug bluefs: 1/20 debug rocksdb: 4/10 bluestore compression mode: aggressive bluestore fsck on mount: true bluestore compression algorithm: lz4 # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true
775
30.04
90
yaml
null
ceph-main/qa/objectstore/bluestore-comp-snappy.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 1/20 debug bluefs: 1/20 debug rocksdb: 4/10 bluestore compression mode: aggressive bluestore fsck on mount: true bluestore compression algorithm: snappy # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true
778
30.16
90
yaml
null
ceph-main/qa/objectstore/bluestore-comp-zlib.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 1/20 debug bluefs: 1/20 debug rocksdb: 4/10 bluestore compression mode: aggressive bluestore fsck on mount: true bluestore compression algorithm: zlib # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true
776
30.08
90
yaml
null
ceph-main/qa/objectstore/bluestore-comp-zstd.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 1/20 debug bluefs: 1/20 debug rocksdb: 4/10 bluestore compression mode: aggressive bluestore fsck on mount: true bluestore compression algorithm: zstd # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true
776
30.08
90
yaml
null
ceph-main/qa/objectstore/bluestore-hybrid.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 1/20 debug bluefs: 1/20 debug rocksdb: 4/10 bluestore fsck on mount: true bluestore allocator: hybrid bluefs allocator: hybrid # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true ceph-deploy: fs: xfs bluestore: yes conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 1/20 debug bluefs: 1/20 debug rocksdb: 4/10 bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95
1,238
29.219512
90
yaml
null
ceph-main/qa/objectstore/bluestore-low-osd-mem-target.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore osd memory target: 1610612736 # reduced to 1.5_G bluestore block size: 96636764160 debug bluestore: 1/20 debug bluefs: 1/20 debug rocksdb: 4/10 bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true bdev enable discard: true bdev async discard: true
807
30.076923
90
yaml
null
ceph-main/qa/objectstore/bluestore-stupid.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 1/20 debug bluefs: 1/20 debug rocksdb: 4/10 bluestore fsck on mount: true bluestore allocator: stupid # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true bdev enable discard: true bdev async discard: true ceph-deploy: fs: xfs bluestore: yes conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 1/20 debug bluefs: 1/20 debug rocksdb: 4/10 bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 bdev enable discard: true bdev async discard: true
1,339
29.454545
90
yaml
null
ceph-main/qa/objectstore_cephfs/bluestore-bitmap.yaml
../objectstore/bluestore-bitmap.yaml
36
36
36
yaml
null
ceph-main/qa/objectstore_debug/bluestore-bitmap.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true bluestore allocator: bitmap # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true bdev enable discard: true bdev async discard: true ceph-deploy: fs: xfs bluestore: yes conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 bdev enable discard: true bdev async discard: true
1,327
29.181818
90
yaml
null
ceph-main/qa/objectstore_debug/bluestore-comp-lz4.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore compression mode: aggressive bluestore fsck on mount: true bluestore compression algorithm: lz4 # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true
769
29.8
90
yaml
null
ceph-main/qa/objectstore_debug/bluestore-comp-snappy.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore compression mode: aggressive bluestore fsck on mount: true bluestore compression algorithm: snappy # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true
772
29.92
90
yaml
null
ceph-main/qa/objectstore_debug/bluestore-comp-zlib.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore compression mode: aggressive bluestore fsck on mount: true bluestore compression algorithm: zlib # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true
770
29.84
90
yaml
null
ceph-main/qa/objectstore_debug/bluestore-comp-zstd.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore compression mode: aggressive bluestore fsck on mount: true bluestore compression algorithm: zstd # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true
770
29.84
90
yaml
null
ceph-main/qa/objectstore_debug/bluestore-hybrid.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true bluestore allocator: hybrid bluefs allocator: hybrid # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true ceph-deploy: fs: xfs bluestore: yes conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95
1,226
28.926829
90
yaml
null
ceph-main/qa/objectstore_debug/bluestore-low-osd-mem-target.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore osd memory target: 1610612736 # reduced to 1.5_G bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true bdev enable discard: true bdev async discard: true
801
29.846154
90
yaml
null
ceph-main/qa/objectstore_debug/bluestore-stupid.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true bluestore allocator: stupid # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 # this doesn't work with failures bc the log writes are not atomic across the two backends # bluestore bluefs env mirror: true bdev enable discard: true bdev async discard: true ceph-deploy: fs: xfs bluestore: yes conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore fsck on mount: true # lower the full ratios since we can fill up a 100gb osd so quickly mon osd full ratio: .9 mon osd backfillfull_ratio: .85 mon osd nearfull ratio: .8 osd failsafe full ratio: .95 bdev enable discard: true bdev async discard: true
1,327
29.181818
90
yaml
null
ceph-main/qa/overrides/2-size-1-min-size.yaml
overrides: ceph: conf: global: osd_pool_default_size: 2 osd_pool_default_min_size: 1
113
15.285714
36
yaml
null
ceph-main/qa/overrides/2-size-2-min-size.yaml
overrides: ceph: conf: global: osd_pool_default_size: 2 osd_pool_default_min_size: 2 log-ignorelist: - \(REQUEST_STUCK\)
159
16.777778
36
yaml
null
ceph-main/qa/overrides/3-size-2-min-size.yaml
overrides: thrashosds: min_in: 4 ceph: conf: global: osd_pool_default_size: 3 osd_pool_default_min_size: 2
141
14.777778
36
yaml
null
ceph-main/qa/overrides/ignorelist_wrongly_marked_down.yaml
overrides: ceph: log-ignorelist: - but it is still running conf: mds: debug mds: 20 debug ms: 1 client: debug client: 10
170
16.1
29
yaml
null
ceph-main/qa/overrides/more-active-recovery.yaml
overrides: ceph: conf: global: osd_recovery_max_active: 10 osd_recovery_max_single_start: 10
121
16.428571
41
yaml
null
ceph-main/qa/overrides/no_client_pidfile.yaml
overrides: ceph: conf: client: pid file: ""
64
9.833333
20
yaml
null
ceph-main/qa/overrides/nvme_loop.yaml
tasks: - nvme_loop:
20
6
12
yaml
null
ceph-main/qa/overrides/short_pg_log.yaml
overrides: ceph: conf: global: osd_min_pg_log_entries: 1 osd_max_pg_log_entries: 2 osd_pg_log_trim_min: 0
142
16.875
33
yaml
null
ceph-main/qa/packages/packages.yaml
--- ceph: deb: - ceph - cephadm - ceph-mds - ceph-mgr - ceph-common - ceph-fuse - ceph-test - ceph-volume - radosgw - python3-rados - python3-rgw - python3-cephfs - python3-rbd - libcephfs2 - libcephfs-dev - librados2 - librbd1 - rbd-fuse - ceph-common-dbg - ceph-fuse-dbg - ceph-mds-dbg - ceph-mgr-dbg - ceph-mon-dbg - ceph-osd-dbg - ceph-test-dbg - libcephfs2-dbg - librados2-dbg - libradosstriper1-dbg - librbd1-dbg - librgw2-dbg - radosgw-dbg - rbd-fuse-dbg - rbd-mirror-dbg - rbd-nbd-dbg rpm: - ceph-radosgw - ceph-test - ceph - ceph-base - cephadm - ceph-immutable-object-cache - ceph-mgr - ceph-mgr-dashboard - ceph-mgr-diskprediction-local - ceph-mgr-rook - ceph-mgr-cephadm - ceph-fuse - ceph-volume - librados-devel - libcephfs2 - libcephfs-devel - librados2 - librbd1 - python3-rados - python3-rgw - python3-cephfs - python3-rbd - rbd-fuse - rbd-mirror - rbd-nbd - ceph-base-debuginfo - ceph-common-debuginfo - ceph-immutable-object-cache-debuginfo - ceph-radosgw-debuginfo - ceph-test-debuginfo - ceph-base-debuginfo - ceph-mgr-debuginfo - ceph-mds-debuginfo - ceph-mon-debuginfo - ceph-osd-debuginfo - ceph-fuse-debuginfo - librados-devel-debuginfo - libcephfs2-debuginfo - librados2-debuginfo - librbd1-debuginfo - python3-cephfs-debuginfo - python3-rados-debuginfo - python3-rbd-debuginfo - python3-rgw-debuginfo - rbd-fuse-debuginfo - rbd-mirror-debuginfo - rbd-nbd-debuginfo
1,558
17.127907
41
yaml
null
ceph-main/qa/qa_scripts/cephscrub.sh
# remove the ceph directories sudo rm -rf /var/log/ceph sudo rm -rf /var/lib/ceph sudo rm -rf /etc/ceph sudo rm -rf /var/run/ceph # remove the ceph packages sudo apt-get -y purge ceph sudo apt-get -y purge ceph-dbg sudo apt-get -y purge ceph-mds sudo apt-get -y purge ceph-mds-dbg sudo apt-get -y purge ceph-fuse sudo apt-get -y purge ceph-fuse-dbg sudo apt-get -y purge ceph-common sudo apt-get -y purge ceph-common-dbg sudo apt-get -y purge ceph-resource-agents sudo apt-get -y purge librados2 sudo apt-get -y purge librados2-dbg sudo apt-get -y purge librados-dev sudo apt-get -y purge librbd1 sudo apt-get -y purge librbd1-dbg sudo apt-get -y purge librbd-dev sudo apt-get -y purge libcephfs2 sudo apt-get -y purge libcephfs2-dbg sudo apt-get -y purge libcephfs-dev sudo apt-get -y purge radosgw sudo apt-get -y purge radosgw-dbg sudo apt-get -y purge obsync sudo apt-get -y purge python-rados sudo apt-get -y purge python-rbd sudo apt-get -y purge python-cephfs
991
31
43
sh
null
ceph-main/qa/qa_scripts/openstack/ceph_install.sh
#!/usr/bin/env bash # # Install a simple ceph cluster upon which openstack images will be stored. # set -fv ceph_node=${1} source copy_func.sh copy_file files/$OS_CEPH_ISO $ceph_node . copy_file execs/ceph_cluster.sh $ceph_node . 0777 copy_file execs/ceph-pool-create.sh $ceph_node . 0777 ssh $ceph_node ./ceph_cluster.sh $*
326
26.25
75
sh
null
ceph-main/qa/qa_scripts/openstack/connectceph.sh
#!/usr/bin/env bash # # Connect openstack node just installed to a ceph cluster. # # Essentially implements: # # http://docs.ceph.com/en/latest/rbd/rbd-openstack/ # # The directory named files contains templates for the /etc/glance/glance-api.conf, # /etc/cinder/cinder.conf, /etc/nova/nova.conf Openstack files # set -fv source ./copy_func.sh source ./fix_conf_file.sh openstack_node=${1} ceph_node=${2} scp $ceph_node:/etc/ceph/ceph.conf ./ceph.conf ssh $openstack_node sudo mkdir /etc/ceph copy_file ceph.conf $openstack_node /etc/ceph 0644 rm -f ceph.conf ssh $openstack_node sudo yum -y install python-rbd ssh $openstack_node sudo yum -y install ceph-common ssh $ceph_node "sudo ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'" ssh $ceph_node "sudo ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'" ssh $ceph_node "sudo ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'" ssh $ceph_node sudo ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' ssh $ceph_node sudo ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups' ssh $ceph_node sudo ceph auth get-or-create client.glance | ssh $openstack_node sudo tee /etc/ceph/ceph.client.glance.keyring ssh $openstack_node sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring ssh $ceph_node sudo ceph auth get-or-create client.cinder | ssh $openstack_node sudo tee /etc/ceph/ceph.client.cinder.keyring ssh $openstack_node sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring ssh $ceph_node sudo ceph auth get-or-create client.cinder-backup | ssh $openstack_node sudo tee /etc/ceph/ceph.client.cinder-backup.keyring ssh $openstack_node sudo chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring ssh $ceph_node sudo ceph auth get-key client.cinder | ssh $openstack_node tee client.cinder.key copy_file execs/libvirt-secret.sh $openstack_node . secret_msg=`ssh $openstack_node sudo ./libvirt-secret.sh $openstack_node` secret_virt=`echo $secret_msg | sed 's/.* set //'` echo $secret_virt fix_conf_file $openstack_node glance-api /etc/glance fix_conf_file $openstack_node cinder /etc/cinder $secret_virt fix_conf_file $openstack_node nova /etc/nova $secret_virt copy_file execs/start_openstack.sh $openstack_node . 0755 ssh $openstack_node ./start_openstack.sh
2,662
58.177778
189
sh
null
ceph-main/qa/qa_scripts/openstack/copy_func.sh
# # copy_file(<filename>, <node>, <directory>, [<permissions>], [<owner>] # # copy a file -- this is needed because passwordless ssh does not # work when sudo'ing. # <file> -- name of local file to be copied # <node> -- node where we want the file # <directory> -- location where we want the file on <node> # <permissions> -- (optional) permissions on the copied file # <owner> -- (optional) owner of the copied file # function copy_file() { fname=`basename ${1}` scp ${1} ${2}:/tmp/${fname} ssh ${2} sudo cp /tmp/${fname} ${3} if [ $# -gt 3 ]; then ssh ${2} sudo chmod ${4} ${3}/${fname} fi if [ $# -gt 4 ]; then ssh ${2} sudo chown ${5} ${3}/${fname} fi }
731
30.826087
71
sh
null
ceph-main/qa/qa_scripts/openstack/fix_conf_file.sh
source ./copy_func.sh # # Take a templated file, modify a local copy, and write it to the # remote site. # # Usage: fix_conf_file <remote-site> <file-name> <remote-location> [<rbd-secret>] # <remote-site> -- site where we want this modified file stored. # <file-name> -- name of the remote file. # <remote-location> -- directory where the file will be stored # <rbd-secret> -- (optional) rbd_secret used by libvirt # function fix_conf_file() { if [[ $# < 3 ]]; then echo 'fix_conf_file: Too few parameters' exit 1 fi openstack_node_local=${1} cp files/${2}.template.conf ${2}.conf hostname=`ssh $openstack_node_local hostname` inet4addr=`ssh $openstack_node_local hostname -i` sed -i s/VARHOSTNAME/$hostname/g ${2}.conf sed -i s/VARINET4ADDR/$inet4addr/g ${2}.conf if [[ $# == 4 ]]; then sed -i s/RBDSECRET/${4}/g ${2}.conf fi copy_file ${2}.conf $openstack_node_local ${3} 0644 "root:root" rm ${2}.conf }
999
33.482759
81
sh
null
ceph-main/qa/qa_scripts/openstack/image_create.sh
#!/usr/bin/env bash # # Set up a vm on packstack. Use the iso in RHEL_ISO (defaults to home dir) # set -fv source ./copy_func.sh source ./fix_conf_file.sh openstack_node=${1} ceph_node=${2} RHEL_ISO=${RHEL_ISO:-~/rhel-server-7.2-x86_64-boot.iso} copy_file ${RHEL_ISO} $openstack_node . copy_file execs/run_openstack.sh $openstack_node . 0755 filler=`date +%s` ssh $openstack_node ./run_openstack.sh "${openstack_node}X${filler}" rhel-server-7.2-x86_64-boot.iso ssh $ceph_node sudo ceph df
491
27.941176
100
sh
null
ceph-main/qa/qa_scripts/openstack/openstack.sh
#!/usr/bin/env bash # # Install Openstack. # Usage: openstack <openstack-site> <ceph-monitor> # # This script installs Openstack on one node, and connects it to a ceph # cluster on another set of nodes. It is intended to run from a third # node. # # Assumes a single node Openstack cluster and a single monitor ceph # cluster. # # The execs directory contains scripts to be run on remote sites. # The files directory contains files to be copied to remote sites. # set -fv source ./copy_func.sh source ./fix_conf_file.sh openstack_node=${1} ceph_node=${2} ./packstack.sh $openstack_node $ceph_node echo 'done running packstack' sleep 60 ./connectceph.sh $openstack_node $ceph_node echo 'done connecting' sleep 60 ./image_create.sh $openstack_node $ceph_node
763
25.344828
71
sh
null
ceph-main/qa/qa_scripts/openstack/packstack.sh
#!/usr/bin/env bash # # Install openstack by running packstack. # # Implements the operations in: # https://docs.google.com/document/d/1us18KR3LuLyINgGk2rmI-SVj9UksCE7y4C2D_68Aa8o/edit?ts=56a78fcb # # The directory named files contains a template for the kilo.conf file used by packstack. # set -fv source ./copy_func.sh source ./fix_conf_file.sh openstack_node=${1} ceph_node=${2} copy_file execs/openstack-preinstall.sh $openstack_node . 0777 fix_conf_file $openstack_node kilo . ssh $openstack_node sudo ./openstack-preinstall.sh sleep 240 ssh $openstack_node sudo packstack --answer-file kilo.conf
604
27.809524
98
sh
null
ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/ceph_install.sh
#! /usr/bin/env bash if [ $# -ne 5 ]; then echo 'Usage: ceph_install.sh <admin-node> <mon-node> <osd-node> <osd-node> <osd-node>' exit -1 fi allnodes=$* adminnode=$1 shift cephnodes=$* monnode=$1 shift osdnodes=$* ./multi_action.sh cdn_setup.sh $allnodes ./talknice.sh $allnodes for mac in $allnodes; do ssh $mac sudo yum -y install yum-utils done source ./repolocs.sh ssh $adminnode sudo yum-config-manager --add ${CEPH_REPO_TOOLS} ssh $monnode sudo yum-config-manager --add ${CEPH_REPO_MON} for mac in $osdnodes; do ssh $mac sudo yum-config-manager --add ${CEPH_REPO_OSD} done ssh $adminnode sudo yum-config-manager --add ${INSTALLER_REPO_LOC} for mac in $allnodes; do ssh $mac sudo sed -i 's/gpgcheck=1/gpgcheck=0/' /etc/yum.conf done source copy_func.sh copy_file execs/ceph_ansible.sh $adminnode . 0777 ubuntu:ubuntu copy_file execs/edit_ansible_hosts.sh $adminnode . 0777 ubuntu:ubuntu copy_file execs/edit_groupvars_osds.sh $adminnode . 0777 ubuntu:ubuntu copy_file ../execs/ceph-pool-create.sh $monnode . 0777 ubuntu:ubuntu if [ -e ~/ip_info ]; then copy_file ~/ip_info $adminnode . 0777 ubuntu:ubuntu fi ssh $adminnode ./ceph_ansible.sh $cephnodes
1,184
28.625
90
sh
null
ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/copy_func.sh
../copy_func.sh
15
15
15
sh
null
ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/multi_action.sh
#! /usr/bin/env bash source copy_func.sh allparms=$* cmdv=$1 shift sites=$* for mac in $sites; do echo $cmdv $mac if [ -f ~/secrets ]; then copy_file ~/secrets $mac . 0777 ubuntu:ubuntu fi copy_file execs/${cmdv} $mac . 0777 ubuntu:ubuntu ssh $mac ./${cmdv} & done ./staller.sh $allparms for mac in $sites; do ssh $mac sudo rm -rf secrets done echo "DONE"
388
18.45
53
sh
null
ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/repolocs.sh
#! /usr/bin/env bash SPECIFIC_VERSION=latest-Ceph-2-RHEL-7 #SPECIFIC_VERSION=Ceph-2-RHEL-7-20160630.t.0 #SPECIFIC_VERSION=Ceph-2.0-RHEL-7-20160718.t.0 export CEPH_REPO_TOOLS=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/ceph-2-rhel-7-compose/${SPECIFIC_VERSION}/compose/Tools/x86_64/os/ export CEPH_REPO_MON=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/ceph-2-rhel-7-compose/${SPECIFIC_VERSION}/compose/MON/x86_64/os/ export CEPH_REPO_OSD=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/ceph-2-rhel-7-compose/${SPECIFIC_VERSION}/compose/OSD/x86_64/os/ export INSTALLER_REPO_LOC=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/rhscon-2-rhel-7-compose/latest-RHSCON-2-RHEL-7/compose/Installer/x86_64/os/
760
83.555556
162
sh
null
ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/staller.sh
#! /usr/bin/env bash cmd_wait=$1 shift sites=$* donebit=0 while [ $donebit -ne 1 ]; do sleep 10 donebit=1 for rem in $sites; do rval=`ssh $rem ps aux | grep $cmd_wait | wc -l` if [ $rval -gt 0 ]; then donebit=0 fi done done
277
16.375
56
sh
null
ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/talknice.sh
#!/usr/bin/env bash declare -A rsapub for fulln in $*; do sname=`echo $fulln | sed 's/\..*//'` nhead=`echo $sname | sed 's/[0-9]*//g'` x=`ssh $fulln "ls .ssh/id_rsa"` if [ -z $x ]; then ssh $fulln "ssh-keygen -N '' -f .ssh/id_rsa"; fi xx=`ssh $fulln "ls .ssh/config"` if [ -z $xx ]; then scp config $fulln:/home/ubuntu/.ssh/config fi ssh $fulln "chown ubuntu:ubuntu .ssh/config" ssh $fulln "chmod 0600 .ssh/config" rsapub[$fulln]=`ssh $fulln "cat .ssh/id_rsa.pub"` done for ii in $*; do ssh $ii sudo iptables -F for jj in $*; do pval=${rsapub[$jj]} if [ "$ii" != "$jj" ]; then xxxx=`ssh $ii "grep $jj .ssh/authorized_keys"` if [ -z "$xxxx" ]; then ssh $ii "echo '$pval' | sudo tee -a /home/ubuntu/.ssh/authorized_keys" fi fi done; done
884
28.5
86
sh
null
ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/cdn_setup.sh
#! /usr/bin/env bash if [ -f ~/secrets ]; then source ~/secrets fi subm=`which subscription-manager` if [ ${#subm} -eq 0 ]; then sudo yum -y update exit fi subst=`sudo subscription-manager status | grep "^Overall" | awk '{print $NF}'` if [ $subst == 'Unknown' ]; then mynameis=${subscrname:-'inigomontoya'} mypassis=${subscrpassword:-'youkeelmyfatherpreparetodie'} sudo subscription-manager register --username=$mynameis --password=$mypassis --force sudo subscription-manager refresh if [ $? -eq 1 ]; then exit 1; fi sudo subscription-manager attach --pool=8a85f9823e3d5e43013e3ddd4e2a0977 fi sudo subscription-manager repos --enable=rhel-7-server-rpms sudo yum -y update
708
32.761905
88
sh
null
ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/ceph_ansible.sh
#! /usr/bin/env bash cephnodes=$* monnode=$1 sudo yum -y install ceph-ansible cd sudo ./edit_ansible_hosts.sh $cephnodes mkdir ceph-ansible-keys cd /usr/share/ceph-ansible/group_vars/ if [ -f ~/ip_info ]; then source ~/ip_info fi mon_intf=${mon_intf:-'eno1'} pub_netw=${pub_netw:-'10.8.128.0\/21'} sudo cp all.sample all sudo sed -i 's/#ceph_origin:.*/ceph_origin: distro/' all sudo sed -i 's/#fetch_directory:.*/fetch_directory: ~\/ceph-ansible-keys/' all sudo sed -i 's/#ceph_stable:.*/ceph_stable: true/' all sudo sed -i 's/#ceph_stable_rh_storage:.*/ceph_stable_rh_storage: false/' all sudo sed -i 's/#ceph_stable_rh_storage_cdn_install:.*/ceph_stable_rh_storage_cdn_install: true/' all sudo sed -i 's/#cephx:.*/cephx: true/' all sudo sed -i "s/#monitor_interface:.*/monitor_interface: ${mon_intf}/" all sudo sed -i 's/#journal_size:.*/journal_size: 1024/' all sudo sed -i "s/#public_network:.*/public_network: ${pub_netw}/" all sudo cp osds.sample osds sudo sed -i 's/#fetch_directory:.*/fetch_directory: ~\/ceph-ansible-keys/' osds sudo sed -i 's/#crush_location:/crush_location:/' osds sudo sed -i 's/#osd_crush_location:/osd_crush_location:/' osds sudo sed -i 's/#cephx:/cephx:/' osds sudo sed -i 's/#devices:/devices:/' osds sudo sed -i 's/#journal_collocation:.*/journal_collocation: true/' osds cd sudo ./edit_groupvars_osds.sh cd /usr/share/ceph-ansible sudo cp site.yml.sample site.yml ansible-playbook site.yml ssh $monnode ~/ceph-pool-create.sh
1,464
38.594595
100
sh
null
ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_ansible_hosts.sh
#! /usr/bin/env bash ed /etc/ansible/hosts << EOF $ a [mons] ${1} [osds] ${2} ${3} ${4} . w q EOF
101
4.666667
28
sh
null
ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_groupvars_osds.sh
#! /usr/bin/env bash ed /usr/share/ceph-ansible/group_vars/osds << EOF $ /^devices: .+1 i - /dev/sdb - /dev/sdc - /dev/sdd . w q EOF
142
9.214286
49
sh
null
ceph-main/qa/qa_scripts/openstack/execs/ceph-pool-create.sh
#!/usr/bin/env bash set -f # # On the ceph site, make the pools required for Openstack # # # Make a pool, if it does not already exist. # function make_pool { if [[ -z `sudo ceph osd lspools | grep " $1,"` ]]; then echo "making $1" sudo ceph osd pool create $1 128 fi } # # Make sure the pg_num and pgp_num values are good. # count=`sudo ceph osd pool get rbd pg_num | sed 's/pg_num: //'` while [ $count -lt 128 ]; do sudo ceph osd pool set rbd pg_num $count count=`expr $count + 32` sleep 30 done sudo ceph osd pool set rbd pg_num 128 sleep 30 sudo ceph osd pool set rbd pgp_num 128 sleep 30 make_pool volumes make_pool images make_pool backups make_pool vms
699
19
62
sh
null
ceph-main/qa/qa_scripts/openstack/execs/ceph_cluster.sh
#!/usr/bin/env bash set -f echo $OS_CEPH_ISO if [[ $# -ne 4 ]]; then echo "Usage: ceph_cluster mon.0 osd.0 osd.1 osd.2" exit -1 fi allsites=$* mon=$1 shift osds=$* ISOVAL=${OS_CEPH_ISO-rhceph-1.3.1-rhel-7-x86_64-dvd.iso} sudo mount -o loop ${ISOVAL} /mnt fqdn=`hostname -f` lsetup=`ls /mnt/Installer | grep "^ice_setup"` sudo yum -y install /mnt/Installer/${lsetup} sudo ice_setup -d /mnt << EOF yes /mnt $fqdn http EOF ceph-deploy new ${mon} ceph-deploy install --repo --release=ceph-mon ${mon} ceph-deploy install --repo --release=ceph-osd ${allsites} ceph-deploy install --mon ${mon} ceph-deploy install --osd ${allsites} ceph-deploy mon create-initial sudo service ceph -a start osd for d in b c d; do for m in $osds; do ceph-deploy disk zap ${m}:sd${d} done for m in $osds; do ceph-deploy osd prepare ${m}:sd${d} done for m in $osds; do ceph-deploy osd activate ${m}:sd${d}1:sd${d}2 done done sudo ./ceph-pool-create.sh hchk=`sudo ceph health` while [[ $hchk != 'HEALTH_OK' ]]; do sleep 30 hchk=`sudo ceph health` done
1,092
20.431373
57
sh
null
ceph-main/qa/qa_scripts/openstack/execs/libvirt-secret.sh
#!/usr/bin/env bash set -f # # Generate a libvirt secret on the Openstack node. # openstack_node=${1} uuid=`uuidgen` cat > secret.xml <<EOF <secret ephemeral='no' private='no'> <uuid>${uuid}</uuid> <usage type='ceph'> <name>client.cinder secret</name> </usage> </secret> EOF sudo virsh secret-define --file secret.xml sudo virsh secret-set-value --secret ${uuid} --base64 $(cat client.cinder.key) echo ${uuid}
422
20.15
78
sh
null
ceph-main/qa/qa_scripts/openstack/execs/openstack-preinstall.sh
#!/usr/bin/env bash set -f # # Remotely setup the stuff needed to run packstack. This should do items 1-4 in # https://docs.google.com/document/d/1us18KR3LuLyINgGk2rmI-SVj9UksCE7y4C2D_68Aa8o/edit?ts=56a78fcb # yum remove -y rhos-release rpm -ivh http://rhos-release.virt.bos.redhat.com/repos/rhos-release/rhos-release-latest.noarch.rpm rm -rf /etc/yum.repos.d/* rm -rf /var/cache/yum/* rhos-release 8 yum update -y yum install -y nc puppet vim screen setroubleshoot crudini bpython openstack-packstack systemctl disable ntpd systemctl stop ntpd reboot
554
29.833333
98
sh
null
ceph-main/qa/qa_scripts/openstack/execs/run_openstack.sh
#!/usr/bin/env bash set -fv # # Create a glance image, a corresponding cinder volume, a nova instance, attach, the cinder volume to the # nova instance, and create a backup. # image_name=${1}X file_name=${2-rhel-server-7.2-x86_64-boot.iso} source ./keystonerc_admin glance image-create --name $image_name --disk-format iso --container-format bare --file $file_name glance_id=`glance image-list | grep ${image_name} | sed 's/^| //' | sed 's/ |.*//'` cinder create --image-id ${glance_id} --display-name ${image_name}-volume 8 nova boot --image ${image_name} --flavor 1 ${image_name}-inst cinder_id=`cinder list | grep ${image_name} | sed 's/^| //' | sed 's/ |.*//'` chkr=`cinder list | grep ${image_name}-volume | grep available` while [ -z "$chkr" ]; do sleep 30 chkr=`cinder list | grep ${image_name}-volume | grep available` done nova volume-attach ${image_name}-inst ${cinder_id} auto sleep 30 cinder backup-create --name ${image_name}-backup ${image_name}-volume --force
986
40.125
105
sh
null
ceph-main/qa/qa_scripts/openstack/execs/start_openstack.sh
#!/usr/bin/env bash set -fv # # start the Openstack services # sudo cp /root/keystonerc_admin ./keystonerc_admin sudo chmod 0644 ./keystonerc_admin source ./keystonerc_admin sudo service httpd stop sudo service openstack-keystone restart sudo service openstack-glance-api restart sudo service openstack-nova-compute restart sudo service openstack-cinder-volume restart sudo service openstack-cinder-backup restart
415
25
49
sh
null
ceph-main/qa/rbd/common.sh
#!/usr/bin/env bash die() { echo "$*" exit 1 } cleanup() { rm -rf $TDIR TDIR="" } set_variables() { # defaults [ -z "$bindir" ] && bindir=$PWD # location of init-ceph if [ -z "$conf" ]; then conf="$basedir/ceph.conf" [ -e $conf ] || conf="/etc/ceph/ceph.conf" fi [ -e $conf ] || die "conf file not found" CCONF="ceph-conf -c $conf" [ -z "$mnt" ] && mnt="/c" if [ -z "$monhost" ]; then $CCONF -t mon -i 0 'mon addr' > $TDIR/cconf_mon if [ $? -ne 0 ]; then $CCONF -t mon.a -i 0 'mon addr' > $TDIR/cconf_mon [ $? -ne 0 ] && die "can't figure out \$monhost" fi read monhost < $TDIR/cconf_mon fi [ -z "$imgsize" ] && imgsize=1024 [ -z "$user" ] && user=admin [ -z "$keyring" ] && keyring="`$CCONF keyring`" [ -z "$secret" ] && secret="`ceph-authtool $keyring -n client.$user -p`" monip="`echo $monhost | sed 's/:/ /g' | awk '{print $1}'`" monport="`echo $monhost | sed 's/:/ /g' | awk '{print $2}'`" [ -z "$monip" ] && die "bad mon address" [ -z "$monport" ] && monport=6789 set -e mydir=`hostname`_`echo $0 | sed 's/\//_/g'` img_name=test.`hostname`.$$ } rbd_load() { modprobe rbd } rbd_create_image() { id=$1 rbd create $img_name.$id --size=$imgsize } rbd_add() { id=$1 echo "$monip:$monport name=$user,secret=$secret rbd $img_name.$id" \ > /sys/bus/rbd/add pushd /sys/bus/rbd/devices &> /dev/null [ $? -eq 0 ] || die "failed to cd" devid="" rm -f "$TDIR/rbd_devs" for f in *; do echo $f >> "$TDIR/rbd_devs"; done sort -nr "$TDIR/rbd_devs" > "$TDIR/rev_rbd_devs" while read f < "$TDIR/rev_rbd_devs"; do read d_img_name < "$f/name" if [ "x$d_img_name" == "x$img_name.$id" ]; then devid=$f break fi done popd &> /dev/null [ "x$devid" == "x" ] && die "failed to find $img_name.$id" export rbd$id=$devid while [ ! -e /dev/rbd$devid ]; do sleep 1; done } rbd_test_init() { rbd_load } rbd_remove() { echo $1 > /sys/bus/rbd/remove } rbd_rm_image() { id=$1 rbd rm $imgname.$id } TDIR=`mktemp -d` trap cleanup INT TERM EXIT set_variables
2,161
19.788462
76
sh
null
ceph-main/qa/rbd/rbd.sh
#!/usr/bin/env bash set -x basedir=`echo $0 | sed 's/[^/]*$//g'`. . $basedir/common.sh rbd_test_init create_multiple() { for i in `seq 1 10`; do rbd_create_image $i done for i in `seq 1 10`; do rbd_add $i done for i in `seq 1 10`; do devname=/dev/rbd`eval echo \\$rbd$i` echo $devname done for i in `seq 1 10`; do devid=`eval echo \\$rbd$i` rbd_remove $devid done for i in `seq 1 10`; do rbd_rm_image $i done } test_dbench() { rbd_create_image 0 rbd_add 0 devname=/dev/rbd$rbd0 mkfs -t ext3 $devname mount -t ext3 $devname $mnt dbench -D $mnt -t 30 5 sync umount $mnt rbd_remove $rbd0 rbd_rm_image 0 } create_multiple test_dbench
676
12.27451
38
sh
null
ceph-main/qa/releases/infernalis.yaml
tasks: - exec: osd.0: - ceph osd set sortbitwise - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done
148
23.833333
88
yaml
null
ceph-main/qa/releases/jewel.yaml
tasks: - exec: osd.0: - ceph osd set sortbitwise - ceph osd set require_jewel_osds - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done
188
26
88
yaml
null
ceph-main/qa/releases/kraken.yaml
tasks: - exec: osd.0: - ceph osd set require_kraken_osds
67
12.6
40
yaml
null
ceph-main/qa/releases/luminous-with-mgr.yaml
tasks: - exec: osd.0: - ceph osd require-osd-release luminous - ceph osd set-require-min-compat-client luminous - ceph.healthy: overrides: ceph: conf: mon: mon warn on osd down out interval zero: false
238
18.916667
55
yaml
null
ceph-main/qa/releases/luminous.yaml
tasks: - exec: mgr.x: - mkdir -p /var/lib/ceph/mgr/ceph-x - ceph auth get-or-create-key mgr.x mon 'allow profile mgr' - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring - ceph.restart: daemons: [mgr.x] wait-for-healthy: false - exec: osd.0: - ceph osd require-osd-release luminous - ceph osd set-require-min-compat-client luminous - ceph.healthy: overrides: ceph: conf: mon: mon warn on osd down out interval zero: false log-ignorelist: - no active mgr
538
23.5
65
yaml
null
ceph-main/qa/releases/mimic.yaml
tasks: - exec: osd.0: - ceph osd require-osd-release mimic - ceph osd set-require-min-compat-client mimic - ceph.healthy:
138
18.857143
52
yaml
null
ceph-main/qa/releases/nautilus.yaml
tasks: - exec: osd.0: - ceph osd require-osd-release nautilus - ceph osd set-require-min-compat-client nautilus - for p in `ceph osd pool ls`; do ceph osd pool set $p pg_autoscale_mode off; done - ceph.healthy:
233
28.25
88
yaml
null
ceph-main/qa/releases/octopus.yaml
tasks: - exec: osd.0: - ceph osd require-osd-release octopus - ceph osd set-require-min-compat-client octopus - for f in `ceph osd pool ls` ; do ceph osd pool set $f pg_autoscale_mode off ; done - ceph.healthy:
233
28.25
90
yaml
null
ceph-main/qa/releases/pacific-from-o.yaml
tasks: - exec: osd.0: - ceph osd require-osd-release pacific - ceph osd set-require-min-compat-client pacific - ceph.healthy:
142
19.428571
54
yaml