repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/qa/workunits/fs/misc/chmod.sh | #!/bin/sh -x
set -e
check_perms() {
file=$1
r=$(ls -la ${file})
if test $? != 0; then
echo "ERROR: File listing/stat failed"
exit 1
fi
perms=$2
if test "${perms}" != $(echo ${r} | awk '{print $1}') && \
test "${perms}." != $(echo ${r} | awk '{print $1}') && \
test "${perms}+" != $(echo ${r} | awk '{print $1}'); then
echo "ERROR: Permissions should be ${perms}"
exit 1
fi
}
file=test_chmod.$$
echo "foo" > ${file}
if test $? != 0; then
echo "ERROR: Failed to create file ${file}"
exit 1
fi
chmod 400 ${file}
if test $? != 0; then
echo "ERROR: Failed to change mode of ${file}"
exit 1
fi
check_perms ${file} "-r--------"
set +e
echo "bar" >> ${file}
if test $? = 0; then
echo "ERROR: Write to read-only file should Fail"
exit 1
fi
set -e
chmod 600 ${file}
echo "bar" >> ${file}
if test $? != 0; then
echo "ERROR: Write to writeable file failed"
exit 1
fi
check_perms ${file} "-rw-------"
echo "foo" >> ${file}
if test $? != 0; then
echo "ERROR: Failed to write to file"
exit 1
fi
| 1,039 | 16.04918 | 68 | sh |
null | ceph-main/qa/workunits/fs/misc/dac_override.sh | #!/bin/sh -x
expect_failure() {
if "$@"; then return 1; else return 0; fi
}
set -e
mkdir -p testdir
file=test_chmod.$$
echo "foo" > testdir/${file}
sudo chmod 600 testdir
# only root can read
expect_failure cat testdir/${file}
# directory read/write DAC override for root should allow read
sudo cat testdir/${file}
| 322 | 15.15 | 62 | sh |
null | ceph-main/qa/workunits/fs/misc/dirfrag.sh | #!/usr/bin/env bash
set -e
DEPTH=5
COUNT=10000
kill_jobs() {
jobs -p | xargs kill
}
trap kill_jobs INT
create_files() {
for i in `seq 1 $COUNT`
do
touch file$i
done
}
delete_files() {
for i in `ls -f`
do
if [[ ${i}a = file*a ]]
then
rm -f $i
fi
done
}
rm -rf testdir
mkdir testdir
cd testdir
echo "creating folder hierarchy"
for i in `seq 1 $DEPTH`; do
mkdir dir$i
cd dir$i
create_files &
done
wait
echo "created hierarchy, now cleaning up"
for i in `seq 1 $DEPTH`; do
delete_files &
cd ..
done
wait
echo "cleaned up hierarchy"
cd ..
rm -rf testdir
| 605 | 10.433962 | 41 | sh |
null | ceph-main/qa/workunits/fs/misc/i_complete_vs_rename.sh | #!/bin/sh
set -e
mkdir x
cd x
touch a
touch b
touch c
touch d
ls
chmod 777 .
stat e || true
touch f
touch g
# over existing file
echo attempting rename over existing file...
touch ../xx
mv ../xx f
ls | grep f || false
echo rename over existing file is okay
# over negative dentry
echo attempting rename over negative dentry...
touch ../xx
mv ../xx e
ls | grep e || false
echo rename over negative dentry is ok
echo OK
| 423 | 12.25 | 46 | sh |
null | ceph-main/qa/workunits/fs/misc/layout_vxattrs.sh | #!/usr/bin/env bash
set -ex
# detect data pool
datapool=
dir=.
while true ; do
echo $dir
datapool=$(getfattr -n ceph.dir.layout.pool $dir --only-values) && break
dir=$dir/..
done
# file
rm -f file file2
touch file file2
getfattr -n ceph.file.layout file
getfattr -n ceph.file.layout file | grep -q object_size=
getfattr -n ceph.file.layout file | grep -q stripe_count=
getfattr -n ceph.file.layout file | grep -q stripe_unit=
getfattr -n ceph.file.layout file | grep -q pool=
getfattr -n ceph.file.layout.pool file
getfattr -n ceph.file.layout.pool_namespace file
getfattr -n ceph.file.layout.stripe_unit file
getfattr -n ceph.file.layout.stripe_count file
getfattr -n ceph.file.layout.object_size file
getfattr -n ceph.file.layout.bogus file 2>&1 | grep -q 'No such attribute'
getfattr -n ceph.dir.layout file 2>&1 | grep -q 'No such attribute'
setfattr -n ceph.file.layout.stripe_unit -v 1048576 file2
setfattr -n ceph.file.layout.stripe_count -v 8 file2
setfattr -n ceph.file.layout.object_size -v 10485760 file2
setfattr -n ceph.file.layout.pool -v $datapool file2
getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
setfattr -n ceph.file.layout.pool_namespace -v foons file2
getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons
setfattr -x ceph.file.layout.pool_namespace file2
getfattr -n ceph.file.layout.pool_namespace file2 | grep -q -v foons
getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 1048576
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 8
getfattr -n ceph.file.layout.object_size file2 | grep -q 10485760
setfattr -n ceph.file.layout -v "stripe_unit=4194304 stripe_count=16 object_size=41943040 pool=$datapool pool_namespace=foons" file2
getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 4194304
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16
getfattr -n ceph.file.layout.object_size file2 | grep -q 41943040
getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons
setfattr -n ceph.file.layout -v "stripe_unit=1048576" file2
getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 1048576
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16
getfattr -n ceph.file.layout.object_size file2 | grep -q 41943040
getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons
setfattr -n ceph.file.layout -v "stripe_unit=2097152 stripe_count=4 object_size=2097152 pool=$datapool pool_namespace=barns" file2
getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 2097152
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 4
getfattr -n ceph.file.layout.object_size file2 | grep -q 2097152
getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
getfattr -n ceph.file.layout.pool_namespace file2 | grep -q barns
# dir
rm -f dir/file || true
rmdir dir || true
mkdir -p dir
getfattr -d -m - dir | grep -q ceph.dir.layout && exit 1 || true
getfattr -d -m - dir | grep -q ceph.file.layout && exit 1 || true
getfattr -n ceph.dir.layout dir && exit 1 || true
setfattr -n ceph.dir.layout.stripe_unit -v 1048576 dir
setfattr -n ceph.dir.layout.stripe_count -v 8 dir
setfattr -n ceph.dir.layout.object_size -v 10485760 dir
setfattr -n ceph.dir.layout.pool -v $datapool dir
setfattr -n ceph.dir.layout.pool_namespace -v dirns dir
getfattr -n ceph.dir.layout dir
getfattr -n ceph.dir.layout dir | grep -q object_size=10485760
getfattr -n ceph.dir.layout dir | grep -q stripe_count=8
getfattr -n ceph.dir.layout dir | grep -q stripe_unit=1048576
getfattr -n ceph.dir.layout dir | grep -q pool=$datapool
getfattr -n ceph.dir.layout dir | grep -q pool_namespace=dirns
getfattr -n ceph.dir.layout.pool dir | grep -q $datapool
getfattr -n ceph.dir.layout.stripe_unit dir | grep -q 1048576
getfattr -n ceph.dir.layout.stripe_count dir | grep -q 8
getfattr -n ceph.dir.layout.object_size dir | grep -q 10485760
getfattr -n ceph.dir.layout.pool_namespace dir | grep -q dirns
setfattr -n ceph.file.layout -v "stripe_count=16" file2
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16
setfattr -n ceph.file.layout -v "object_size=10485760 stripe_count=8 stripe_unit=1048576 pool=$datapool pool_namespace=dirns" file2
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 8
touch dir/file
getfattr -n ceph.file.layout.pool dir/file | grep -q $datapool
getfattr -n ceph.file.layout.stripe_unit dir/file | grep -q 1048576
getfattr -n ceph.file.layout.stripe_count dir/file | grep -q 8
getfattr -n ceph.file.layout.object_size dir/file | grep -q 10485760
getfattr -n ceph.file.layout.pool_namespace dir/file | grep -q dirns
setfattr -x ceph.dir.layout.pool_namespace dir
getfattr -n ceph.dir.layout dir | grep -q -v pool_namespace=dirns
setfattr -x ceph.dir.layout dir
getfattr -n ceph.dir.layout dir 2>&1 | grep -q 'No such attribute'
echo OK
| 4,935 | 41.551724 | 132 | sh |
null | ceph-main/qa/workunits/fs/misc/mkpool_layout_vxattrs.sh | #!/usr/bin/env bash
set -e
touch foo.$$
ceph osd pool create foo.$$ 8
ceph fs add_data_pool cephfs foo.$$
setfattr -n ceph.file.layout.pool -v foo.$$ foo.$$
# cleanup
rm foo.$$
ceph fs rm_data_pool cephfs foo.$$
ceph osd pool rm foo.$$ foo.$$ --yes-i-really-really-mean-it
echo OK
| 285 | 16.875 | 60 | sh |
null | ceph-main/qa/workunits/fs/misc/multiple_rsync.sh | #!/bin/sh -ex
# Populate with some arbitrary files from the local system. Take
# a copy to protect against false fails from system updates during test.
export PAYLOAD=/tmp/multiple_rsync_payload.$$
sudo cp -r /usr/lib/ $PAYLOAD
set -e
sudo rsync -av $PAYLOAD payload.1
sudo rsync -av $PAYLOAD payload.2
# this shouldn't transfer any additional files
echo we should get 4 here if no additional files are transferred
sudo rsync -auv $PAYLOAD payload.1 | tee /tmp/$$
hexdump -C /tmp/$$
wc -l /tmp/$$ | grep 4
sudo rsync -auv $PAYLOAD payload.2 | tee /tmp/$$
hexdump -C /tmp/$$
wc -l /tmp/$$ | grep 4
echo OK
rm /tmp/$$
sudo rm -rf $PAYLOAD
| 644 | 23.807692 | 72 | sh |
null | ceph-main/qa/workunits/fs/misc/rstats.sh | #!/usr/bin/env bash
set -x
timeout=30
old_value=""
new_value=""
wait_until_changed() {
name=$1
wait=0
while [ $wait -lt $timeout ]; do
new_value=`getfattr --only-value -n ceph.dir.$name .`
[ $new_value == $old_value ] || return 0
sleep 1
wait=$(($wait + 1))
done
return 1
}
check_rctime() {
old_sec=$(echo $old_value | cut -d. -f1)
old_nsec=$(echo $old_value | cut -d. -f2)
new_sec=$(echo $new_value | cut -d. -f1)
new_nsec=$(echo $new_value | cut -d. -f2)
[ "$old_sec" -lt "$new_sec" ] && return 0
[ "$old_sec" -gt "$new_sec" ] && return 1
[ "$old_nsec" -lt "$new_nsec" ] && return 0
return 1
}
# sync(3) does not make ceph-fuse flush dirty caps, because fuse kernel module
# does not notify ceph-fuse about it. Use fsync(3) instead.
fsync_path() {
cmd="import os; fd=os.open(\"$1\", os.O_RDONLY); os.fsync(fd); os.close(fd)"
python3 -c "$cmd"
}
set -e
mkdir -p rstats_testdir/d1/d2
cd rstats_testdir
# rfiles
old_value=`getfattr --only-value -n ceph.dir.rfiles .`
[ $old_value == 0 ] || false
touch d1/d2/f1
wait_until_changed rfiles
[ $new_value == $(($old_value + 1)) ] || false
# rsubdirs
old_value=`getfattr --only-value -n ceph.dir.rsubdirs .`
[ $old_value == 3 ] || false
mkdir d1/d2/d3
wait_until_changed rsubdirs
[ $new_value == $(($old_value + 1)) ] || false
# rbytes
old_value=`getfattr --only-value -n ceph.dir.rbytes .`
[ $old_value == 0 ] || false
echo hello > d1/d2/f2
fsync_path d1/d2/f2
wait_until_changed rbytes
[ $new_value == $(($old_value + 6)) ] || false
#rctime
old_value=`getfattr --only-value -n ceph.dir.rctime .`
touch d1/d2/d3 # touch existing file
fsync_path d1/d2/d3
wait_until_changed rctime
check_rctime
old_value=`getfattr --only-value -n ceph.dir.rctime .`
touch d1/d2/f3 # create new file
wait_until_changed rctime
check_rctime
cd ..
rm -rf rstats_testdir
echo OK
| 1,836 | 21.679012 | 78 | sh |
null | ceph-main/qa/workunits/fs/misc/trivial_sync.sh | #!/usr/bin/env bash
set -e
mkdir foo
echo foo > bar
sync
| 59 | 6.5 | 19 | sh |
null | ceph-main/qa/workunits/fs/misc/xattrs.sh | #!/bin/sh -x
set -e
touch file
setfattr -n user.foo -v foo file
setfattr -n user.bar -v bar file
setfattr -n user.empty file
getfattr -d file | grep foo
getfattr -d file | grep bar
getfattr -d file | grep empty
echo OK.
| 224 | 14 | 32 | sh |
null | ceph-main/qa/workunits/fs/norstats/kernel_untar_tar.sh | #!/usr/bin/env bash
# check if there is file changed while being archived
set -ex
KERNEL=linux-4.0.5
wget -q http://download.ceph.com/qa/$KERNEL.tar.xz
mkdir untar_tar
cd untar_tar
tar Jxvf ../$KERNEL.tar.xz $KERNEL/Documentation/
tar cf doc.tar $KERNEL
tar xf doc.tar
sync
tar c $KERNEL >/dev/null
rm -rf $KERNEL
tar xf doc.tar
sync
tar c $KERNEL >/dev/null
echo Ok
| 376 | 12.962963 | 53 | sh |
null | ceph-main/qa/workunits/fs/quota/quota.sh | #!/usr/bin/env bash
set -ex
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
function write_file()
{
set +x
for ((i=1;i<=$2;i++))
do
dd if=/dev/zero of=$1 bs=1M count=1 conv=notrunc oflag=append 2>/dev/null >/dev/null
if [ $? != 0 ]; then
echo Try to write $(($i * 1048576))
set -x
return 1
fi
sleep 0.05
done
set -x
return 0
}
mkdir quota-test
cd quota-test
# bytes
setfattr . -n ceph.quota.max_bytes -v 100M
expect_false write_file big 1000 # 1g
expect_false write_file second 10
setfattr . -n ceph.quota.max_bytes -v 0
dd if=/dev/zero of=third bs=1M count=10
dd if=/dev/zero of=big2 bs=1M count=100
rm -rf *
# files
setfattr . -n ceph.quota.max_files -v 5
mkdir ok
touch ok/1
touch ok/2
touch 3
expect_false touch shouldbefail # 5 files will include the "."
expect_false touch ok/shouldbefail # 5 files will include the "."
setfattr . -n ceph.quota.max_files -v 0
touch shouldbecreated
touch shouldbecreated2
rm -rf *
# mix
mkdir bytes bytes/files
setfattr bytes -n ceph.quota.max_bytes -v 10M
setfattr bytes/files -n ceph.quota.max_files -v 5
dd if=/dev/zero of=bytes/files/1 bs=1M count=4
dd if=/dev/zero of=bytes/files/2 bs=1M count=4
expect_false write_file bytes/files/3 1000
expect_false write_file bytes/files/4 1000
expect_false write_file bytes/files/5 1000
stat --printf="%n %s\n" bytes/files/1 #4M
stat --printf="%n %s\n" bytes/files/2 #4M
stat --printf="%n %s\n" bytes/files/3 #bigger than 2M
stat --printf="%n %s\n" bytes/files/4 #should be zero
expect_false stat bytes/files/5 #shouldn't be exist
rm -rf *
#mv
mkdir files limit
truncate files/file -s 10G
setfattr limit -n ceph.quota.max_bytes -v 1M
expect_false mv files limit/
rm -rf *
#limit by ancestor
mkdir -p ancestor/p1/p2/parent/p3
setfattr ancestor -n ceph.quota.max_bytes -v 1M
setfattr ancestor/p1/p2/parent -n ceph.quota.max_bytes -v 1G
expect_false write_file ancestor/p1/p2/parent/p3/file1 900 #900m
stat --printf="%n %s\n" ancestor/p1/p2/parent/p3/file1
#get/set attribute
setfattr -n ceph.quota.max_bytes -v 0 .
setfattr -n ceph.quota.max_bytes -v 1 .
setfattr -n ceph.quota.max_bytes -v 9223372036854775807 .
expect_false setfattr -n ceph.quota.max_bytes -v 9223372036854775808 .
expect_false setfattr -n ceph.quota.max_bytes -v -1 .
expect_false setfattr -n ceph.quota.max_bytes -v -9223372036854775808 .
expect_false setfattr -n ceph.quota.max_bytes -v -9223372036854775809 .
setfattr -n ceph.quota.max_bytes -v 0 .
setfattr -n ceph.quota.max_bytes -v 1Ti .
setfattr -n ceph.quota.max_bytes -v 8388607Ti .
expect_false setfattr -n ceph.quota.max_bytes -v 8388608Ti .
expect_false setfattr -n ceph.quota.max_bytes -v -1Ti .
expect_false setfattr -n ceph.quota.max_bytes -v -8388609Ti .
expect_false setfattr -n ceph.quota.max_bytes -v -8388610Ti .
setfattr -n ceph.quota.max_files -v 0 .
setfattr -n ceph.quota.max_files -v 1 .
setfattr -n ceph.quota.max_files -v 9223372036854775807 .
expect_false setfattr -n ceph.quota.max_files -v 9223372036854775808 .
expect_false setfattr -n ceph.quota.max_files -v -1 .
expect_false setfattr -n ceph.quota.max_files -v -9223372036854775808 .
expect_false setfattr -n ceph.quota.max_files -v -9223372036854775809 .
setfattr -n ceph.quota -v "max_bytes=0 max_files=0" .
setfattr -n ceph.quota -v "max_bytes=1 max_files=0" .
setfattr -n ceph.quota -v "max_bytes=0 max_files=1" .
setfattr -n ceph.quota -v "max_bytes=1 max_files=1" .
expect_false setfattr -n ceph.quota -v "max_bytes=-1 max_files=0" .
expect_false setfattr -n ceph.quota -v "max_bytes=0 max_files=-1" .
expect_false setfattr -n ceph.quota -v "max_bytes=-1 max_files=-1" .
#addme
cd ..
rm -rf quota-test
echo OK
| 3,702 | 26.029197 | 86 | sh |
null | ceph-main/qa/workunits/fs/snaps/snap-rm-diff.sh | #!/bin/sh -ex
wget -q http://download.ceph.com/qa/linux-2.6.33.tar.bz2
mkdir foo
cp linux* foo
mkdir foo/.snap/barsnap
rm foo/linux*
diff -q foo/.snap/barsnap/linux* linux* && echo "passed: files are identical"
rmdir foo/.snap/barsnap
echo OK
| 244 | 21.272727 | 77 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-1.sh | #!/usr/bin/env bash
set -ex
echo 1 > file1
echo 2 > file2
echo 3 > file3
[ -e file4 ] && rm file4
mkdir .snap/snap1
echo 4 > file4
now=`ls`
then=`ls .snap/snap1`
rmdir .snap/snap1
if [ "$now" = "$then" ]; then
echo live and snap contents are identical?
false
fi
# do it again
echo 1 > file1
echo 2 > file2
echo 3 > file3
mkdir .snap/snap1
echo 4 > file4
rmdir .snap/snap1
rm file?
echo OK
| 402 | 12.433333 | 46 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-2.sh | #!/usr/bin/env bash
echo "Create dir 100 to 199 ..."
for i in $(seq 100 199); do
echo " create dir $i"
mkdir "$i"
for y in $(seq 10 20); do
echo "This is a test file before any snapshot was taken." >"$i/$y"
done
done
echo "Take first snapshot .snap/test1"
mkdir .snap/test1
echo "Create dir 200 to 299 ..."
for i in $(seq 200 299); do
echo " create dir $i"
mkdir $i
for y in $(seq 20 29); do
echo "This is a test file. Created after .snap/test1" >"$i/$y"
done
done
echo "Create a snapshot in every first level dir ..."
for dir in $(ls); do
echo " create $dir/.snap/snap-subdir-test"
mkdir "$dir/.snap/snap-subdir-test"
for y in $(seq 30 39); do
echo " create $dir/$y file after the snapshot"
echo "This is a test file. Created after $dir/.snap/snap-subdir-test" >"$dir/$y"
done
done
echo "Take second snapshot .snap/test2"
mkdir .snap/test2
echo "Copy content of .snap/test1 to copyofsnap1 ..."
mkdir copyofsnap1
cp -Rv .snap/test1 copyofsnap1/
echo "Take third snapshot .snap/test3"
mkdir .snap/test3
echo "Delete the snapshots..."
find ./ -type d -print | \
xargs -I% -n1 find %/.snap -mindepth 1 -maxdepth 1 \
\( ! -name "_*" \) -print 2>/dev/null
find ./ -type d -print | \
xargs -I% -n1 find %/.snap -mindepth 1 -maxdepth 1 \
\( ! -name "_*" \) -print 2>/dev/null | \
xargs -n1 rmdir
echo "Delete all the files and directories ..."
rm -Rfv ./*
echo OK
| 1,522 | 24.383333 | 96 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-authwb.sh | #!/bin/sh -x
set -e
touch foo
chmod +x foo
mkdir .snap/s
find .snap/s/foo -executable | grep foo
rmdir .snap/s
rm foo
echo OK
| 129 | 9 | 39 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-capwb.sh | #!/bin/sh -x
set -e
mkdir foo
# make sure mds handles it when the client does not send flushsnap
echo x > foo/x
sync
mkdir foo/.snap/ss
ln foo/x foo/xx
cat foo/.snap/ss/x
rmdir foo/.snap/ss
#
echo a > foo/a
echo b > foo/b
mkdir foo/.snap/s
r=`cat foo/.snap/s/a`
[ -z "$r" ] && echo "a appears empty in snapshot" && false
ln foo/b foo/b2
cat foo/.snap/s/b
echo "this used to hang:"
echo more >> foo/b2
echo "oh, it didn't hang! good job."
cat foo/b
rmdir foo/.snap/s
rm -r foo
echo OK
| 492 | 13.5 | 66 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-dir-rename.sh | #!/bin/sh -x
set -e
#
# make sure we keep an existing dn's seq
#
mkdir a
mkdir .snap/bar
mkdir a/.snap/foo
rmdir a/.snap/foo
rmdir a
stat .snap/bar/a
rmdir .snap/bar
echo OK
| 178 | 8.944444 | 40 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-double-null.sh | #!/bin/sh -x
set -e
# multiple intervening snapshots with no modifications, and thus no
# snapflush client_caps messages. make sure the mds can handle this.
for f in `seq 1 20` ; do
mkdir a
cat > a/foo &
mkdir a/.snap/one
mkdir a/.snap/two
chmod 777 a/foo
sync # this might crash the mds
ps
rmdir a/.snap/*
rm a/foo
rmdir a
done
echo OK
| 346 | 13.458333 | 69 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-estale.sh | #!/bin/sh -x
mkdir .snap/foo
echo "We want ENOENT, not ESTALE, here."
for f in `seq 1 100`
do
stat .snap/foo/$f 2>&1 | grep 'No such file'
done
rmdir .snap/foo
echo "OK"
| 178 | 11.785714 | 48 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-git-ceph.sh | #!/bin/sh -x
set -e
# try it again if the clone is slow and the second time
retried=false
trap -- 'retry' EXIT
retry() {
rm -rf ceph
# double the timeout value
timeout 3600 git clone https://git.ceph.com/ceph.git
}
rm -rf ceph
timeout 1800 git clone https://git.ceph.com/ceph.git
trap - EXIT
cd ceph
versions=`seq 1 90`
for v in $versions
do
if [ $v -eq 48 ]; then
continue
fi
ver="v0.$v"
echo $ver
git reset --hard $ver
mkdir .snap/$ver
done
for v in $versions
do
if [ $v -eq 48 ]; then
continue
fi
ver="v0.$v"
echo checking $ver
cd .snap/$ver
git diff --exit-code
cd ../..
done
for v in $versions
do
if [ $v -eq 48 ]; then
continue
fi
ver="v0.$v"
rmdir .snap/$ver
done
echo OK
| 790 | 13.924528 | 56 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-hardlink.sh | #!/bin/sh -x
set -e
mkdir 1 2
echo asdf >1/file1
echo asdf >1/file2
ln 1/file1 2/file1
ln 1/file2 2/file2
mkdir 2/.snap/s1
echo qwer >1/file1
grep asdf 2/.snap/s1/file1
rm -f 1/file2
grep asdf 2/.snap/s1/file2
rm -f 2/file2
grep asdf 2/.snap/s1/file2
rmdir 2/.snap/s1
rm -rf 1 2
echo OK
| 295 | 10.384615 | 26 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-intodir.sh | #!/bin/sh -ex
# this tests fix for #1399
mkdir foo
mkdir foo/.snap/one
touch bar
mv bar foo
sync
# should not crash :)
mkdir baz
mkdir baz/.snap/two
mv baz foo
sync
# should not crash :)
# clean up.
rmdir foo/baz/.snap/two
rmdir foo/.snap/one
rm -r foo
echo OK
| 265 | 10.565217 | 26 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh | #!/bin/sh -x
set -e
echo asdf > a
mkdir .snap/1
chmod 777 a
mkdir .snap/2
echo qwer > a
mkdir .snap/3
chmod 666 a
mkdir .snap/4
echo zxcv > a
mkdir .snap/5
ls -al .snap/?/a
grep asdf .snap/1/a
stat .snap/1/a | grep 'Size: 5'
grep asdf .snap/2/a
stat .snap/2/a | grep 'Size: 5'
stat .snap/2/a | grep -- '-rwxrwxrwx'
grep qwer .snap/3/a
stat .snap/3/a | grep 'Size: 5'
stat .snap/3/a | grep -- '-rwxrwxrwx'
grep qwer .snap/4/a
stat .snap/4/a | grep 'Size: 5'
stat .snap/4/a | grep -- '-rw-rw-rw-'
grep zxcv .snap/5/a
stat .snap/5/a | grep 'Size: 5'
stat .snap/5/a | grep -- '-rw-rw-rw-'
rmdir .snap/[12345]
echo "OK"
| 628 | 13.627907 | 37 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-name-limits.sh | #!/bin/bash
#
# This tests snapshot names limits: names have to be < 240 chars
#
function cleanup ()
{
rmdir d1/.snap/*
rm -rf d1
}
function fail ()
{
echo $@
cleanup
exit 1
}
mkdir d1
longname=$(printf "%.241d" 2)
mkdir d1/.snap/$longname 2> /dev/null
[ -d d1/.snap/$longname ] && fail "Invalid snapshot exists: $longname"
cleanup
echo OK
| 351 | 11.571429 | 70 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-parents.sh | #!/bin/sh
set -ex
echo "making directory tree and files"
mkdir -p 1/a/b/c/
echo "i'm file1" > 1/a/file1
echo "i'm file2" > 1/a/b/file2
echo "i'm file3" > 1/a/b/c/file3
echo "snapshotting"
mkdir 1/.snap/foosnap1
mkdir 2
echo "moving tree"
mv 1/a 2
echo "checking snapshot contains tree..."
dir1=`find 1/.snap/foosnap1 | wc -w`
dir2=`find 2/ | wc -w`
#diff $dir1 $dir2 && echo "Success!"
test $dir1==$dir2 && echo "Success!"
echo "adding folder and file to tree..."
mkdir 2/a/b/c/d
echo "i'm file 4!" > 2/a/b/c/d/file4
echo "snapshotting tree 2"
mkdir 2/.snap/barsnap2
echo "comparing snapshots"
dir1=`find 1/.snap/foosnap1/ -maxdepth 2 | wc -w`
dir2=`find 2/.snap/barsnap2/ -maxdepth 2 | wc -w`
#diff $dir1 $dir2 && echo "Success!"
test $dir1==$dir2 && echo "Success!"
echo "moving subtree to first folder"
mv 2/a/b/c 1
echo "comparing snapshots and new tree"
dir1=`find 1/ | wc -w`
dir2=`find 2/.snap/barsnap2/a/b/c | wc -w`
#diff $dir1 $dir2 && echo "Success!"
test $dir1==$dir2 && echo "Success!"
rmdir 1/.snap/*
rmdir 2/.snap/*
echo "OK"
| 1,043 | 25.1 | 49 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-realm-split.sh | #!/bin/sh -x
set -e
mkdir -p 1/a
exec 3<> 1/a/file1
echo -n a >&3
mkdir 1/.snap/s1
echo -n b >&3
mkdir 2
# create new snaprealm at dir a, file1's cap should be attached to the new snaprealm
mv 1/a 2
mkdir 2/.snap/s2
echo -n c >&3
exec 3>&-
grep '^a$' 1/.snap/s1/a/file1
grep '^ab$' 2/.snap/s2/a/file1
grep '^abc$' 2/a/file1
rmdir 1/.snap/s1
rmdir 2/.snap/s2
rm -rf 1 2
echo OK
| 388 | 11.15625 | 84 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-snap-rename.sh | #!/bin/sh -x
expect_failure() {
if "$@"; then return 1; else return 0; fi
}
set -e
mkdir -p d1/d2
mkdir -p d1/d3
mkdir d1/.snap/foo
mkdir d1/d2/.snap/foo
mkdir d1/d3/.snap/foo
mkdir d1/d3/.snap/bar
mv d1/d2/.snap/foo d1/d2/.snap/bar
# snapshot name can't start with _
expect_failure mv d1/d2/.snap/bar d1/d2/.snap/_bar
# can't rename parent snapshot
expect_failure mv d1/d2/.snap/_foo_* d1/d2/.snap/foo
expect_failure mv d1/d2/.snap/_foo_* d1/d2/.snap/_foo_1
# can't rename snapshot to different directroy
expect_failure mv d1/d2/.snap/bar d1/.snap/
# can't overwrite existing snapshot
expect_failure python3 -c "import os; os.rename('d1/d3/.snap/foo', 'd1/d3/.snap/bar')"
# can't move snaphost out of snapdir
expect_failure python3 -c "import os; os.rename('d1/.snap/foo', 'd1/foo')"
rmdir d1/.snap/foo
rmdir d1/d2/.snap/bar
rmdir d1/d3/.snap/foo
rmdir d1/d3/.snap/bar
rm -rf d1
echo OK
| 893 | 25.294118 | 86 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh | #!/bin/sh -x
set -e
file=linux-2.6.33.tar.bz2
wget -q http://download.ceph.com/qa/$file
real=`md5sum $file | awk '{print $1}'`
for f in `seq 1 20`
do
echo $f
cp $file a
mkdir .snap/s
rm a
cp .snap/s/a /tmp/a
cur=`md5sum /tmp/a | awk '{print $1}'`
if [ "$cur" != "$real" ]; then
echo "FAIL: bad match, /tmp/a $cur != real $real"
false
fi
rmdir .snap/s
done
rm $file
| 407 | 15.32 | 50 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-upchildrealms.sh | #!/bin/sh -x
set -e
#
# verify that a snap update on a parent realm will induce
# snap cap writeback for inodes child realms
#
mkdir a
mkdir a/b
mkdir a/.snap/a1
mkdir a/b/.snap/b1
echo asdf > a/b/foo
mkdir a/.snap/a2
# client _should_ have just queued a capsnap for writeback
ln a/b/foo a/b/bar # make the server cow the inode
echo "this should not hang..."
cat a/b/.snap/_a2_*/foo
echo "good, it did not hang."
rmdir a/b/.snap/b1
rmdir a/.snap/a1
rmdir a/.snap/a2
rm -r a
echo "OK"
| 496 | 16.137931 | 58 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-xattrwb.sh | #!/bin/sh -x
set -e
echo "testing simple xattr wb"
touch x
setfattr -n user.foo x
mkdir .snap/s1
getfattr -n user.foo .snap/s1/x | grep user.foo
rm x
rmdir .snap/s1
echo "testing wb with pre-wb server cow"
mkdir a
mkdir a/b
mkdir a/b/c
# b now has As but not Ax
setfattr -n user.foo a/b
mkdir a/.snap/s
mkdir a/b/cc
# b now has been cowed on the server, but we still have dirty xattr caps
getfattr -n user.foo a/b # there they are...
getfattr -n user.foo a/.snap/s/b | grep user.foo # should be there, too!
# ok, clean up
rmdir a/.snap/s
rm -r a
echo OK
| 569 | 18 | 73 | sh |
null | ceph-main/qa/workunits/fs/snaps/untar_snap_rm.sh | #!/bin/sh
set -ex
do_tarball() {
wget http://download.ceph.com/qa/$1
tar xvf$2 $1
mkdir .snap/k
sync
rm -rv $3
cp -av .snap/k .
rmdir .snap/k
rm -rv k
rm $1
}
do_tarball coreutils_8.5.orig.tar.gz z coreutils-8.5
do_tarball linux-2.6.33.tar.bz2 j linux-2.6.33
| 298 | 14.736842 | 52 | sh |
null | ceph-main/qa/workunits/hadoop/repl.sh | #!/usr/bin/env bash
set -e
set -x
# bail if $TESTDIR is not set as this test will fail in that scenario
[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
# if HADOOP_PREFIX is not set, use default
[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; }
# create pools with different replication factors
for repl in 2 3 7 8 9; do
name=hadoop.$repl
ceph osd pool create $name 8 8
ceph osd pool set $name size $repl
id=`ceph osd dump | sed -n "s/^pool \([0-9]*\) '$name'.*/\1/p"`
ceph fs add_data_pool cephfs $id
done
# create a file in each of the pools
for repl in 2 3 7 8 9; do
name=hadoop.$repl
$HADOOP_PREFIX/bin/hadoop fs -rm -f /$name.dat
dd if=/dev/zero bs=1048576 count=1 | \
$HADOOP_PREFIX/bin/hadoop fs -Dceph.data.pools="$name" \
-put - /$name.dat
done
# check that hadoop reports replication matching
# that of the pool the file was written into
for repl in 2 3 7 8 9; do
name=hadoop.$repl
repl2=$($HADOOP_PREFIX/bin/hadoop fs -ls /$name.dat | awk '{print $2}')
if [ $repl -ne $repl2 ]; then
echo "replication factors didn't match!"
exit 1
fi
done
exit 0
| 1,154 | 25.860465 | 86 | sh |
null | ceph-main/qa/workunits/hadoop/terasort.sh | #!/usr/bin/env bash
set -e
set -x
INPUT=/terasort-input
OUTPUT=/terasort-output
REPORT=/tersort-report
num_records=100000
[ ! -z $NUM_RECORDS ] && num_records=$NUM_RECORDS
# bail if $TESTDIR is not set as this test will fail in that scenario
[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
# if HADOOP_PREFIX is not set, use default
[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; }
# Nuke hadoop directories
$HADOOP_PREFIX/bin/hadoop fs -rm -r $INPUT $OUTPUT $REPORT || true
# Generate terasort data
#
#-Ddfs.blocksize=512M \
#-Dio.file.buffer.size=131072 \
#-Dmapreduce.map.java.opts=-Xmx1536m \
#-Dmapreduce.map.memory.mb=2048 \
#-Dmapreduce.task.io.sort.mb=256 \
#-Dyarn.app.mapreduce.am.resource.mb=1024 \
#-Dmapred.map.tasks=64 \
$HADOOP_PREFIX/bin/hadoop jar \
$HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \
teragen \
-Dmapred.map.tasks=9 \
$num_records \
$INPUT
# Run the sort job
#
#-Ddfs.blocksize=512M \
#-Dio.file.buffer.size=131072 \
#-Dmapreduce.map.java.opts=-Xmx1536m \
#-Dmapreduce.map.memory.mb=2048 \
#-Dmapreduce.map.output.compress=true \
#-Dmapreduce.map.output.compress.codec=org.apache.hadoop.io.compress.Lz4Codec \
#-Dmapreduce.reduce.java.opts=-Xmx1536m \
#-Dmapreduce.reduce.memory.mb=2048 \
#-Dmapreduce.task.io.sort.factor=100 \
#-Dmapreduce.task.io.sort.mb=768 \
#-Dyarn.app.mapreduce.am.resource.mb=1024 \
#-Dmapred.reduce.tasks=100 \
#-Dmapreduce.terasort.output.replication=1 \
$HADOOP_PREFIX/bin/hadoop jar \
$HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \
terasort \
-Dmapred.reduce.tasks=10 \
$INPUT $OUTPUT
# Validate the sorted data
#
#-Ddfs.blocksize=512M \
#-Dio.file.buffer.size=131072 \
#-Dmapreduce.map.java.opts=-Xmx1536m \
#-Dmapreduce.map.memory.mb=2048 \
#-Dmapreduce.reduce.java.opts=-Xmx1536m \
#-Dmapreduce.reduce.memory.mb=2048 \
#-Dmapreduce.task.io.sort.mb=256 \
#-Dyarn.app.mapreduce.am.resource.mb=1024 \
#-Dmapred.reduce.tasks=1 \
$HADOOP_PREFIX/bin/hadoop jar \
$HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \
teravalidate \
-Dmapred.reduce.tasks=1 \
$OUTPUT $REPORT
exit 0
| 2,189 | 27.441558 | 86 | sh |
null | ceph-main/qa/workunits/hadoop/wordcount.sh | #!/usr/bin/env bash
set -ex
WC_INPUT=/wc_input
WC_OUTPUT=/wc_output
DATA_INPUT=$(mktemp -d)
echo "starting hadoop-wordcount test"
# bail if $TESTDIR is not set as this test will fail in that scenario
[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
# if HADOOP_PREFIX is not set, use default
[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; }
# Nuke hadoop directories
$HADOOP_PREFIX/bin/hadoop fs -rm -r $WC_INPUT $WC_OUTPUT || true
# Fetch and import testing data set
curl http://download.ceph.com/qa/hadoop_input_files.tar | tar xf - -C $DATA_INPUT
$HADOOP_PREFIX/bin/hadoop fs -copyFromLocal $DATA_INPUT $WC_INPUT
rm -rf $DATA_INPUT
# Run the job
$HADOOP_PREFIX/bin/hadoop jar \
$HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \
wordcount $WC_INPUT $WC_OUTPUT
# Cleanup
$HADOOP_PREFIX/bin/hadoop fs -rm -r $WC_INPUT $WC_OUTPUT || true
echo "completed hadoop-wordcount test"
exit 0
| 968 | 26.685714 | 86 | sh |
null | ceph-main/qa/workunits/libcephfs/test.sh | #!/bin/sh -e
ceph_test_libcephfs
ceph_test_libcephfs_access
ceph_test_libcephfs_reclaim
ceph_test_libcephfs_lazyio
ceph_test_libcephfs_newops
ceph_test_libcephfs_suidsgid
exit 0
| 180 | 15.454545 | 28 | sh |
null | ceph-main/qa/workunits/mgr/test_localpool.sh | #!/bin/sh -ex
ceph config set mgr mgr/localpool/subtree host
ceph config set mgr mgr/localpool/failure_domain osd
ceph mgr module enable localpool
while ! ceph osd pool ls | grep '^by-host-'
do
sleep 5
done
ceph mgr module disable localpool
for p in `ceph osd pool ls | grep '^by-host-'`
do
ceph osd pool rm $p $p --yes-i-really-really-mean-it
done
ceph config rm mgr mgr/localpool/subtree
ceph config rm mgr mgr/localpool/failure_domain
echo OK
| 459 | 19.909091 | 56 | sh |
null | ceph-main/qa/workunits/mgr/test_per_module_finisher.sh | #!/usr/bin/env bash
set -ex
# This testcase tests the per module finisher stats for enabled modules
# using check counter (qa/tasks/check_counter.py).
# 'balancer' commands
ceph balancer pool ls
# 'crash' commands
ceph crash ls
ceph crash ls-new
# 'device' commands
ceph device query-daemon-health-metrics mon.a
# 'iostat' command
ceph iostat &
pid=$!
sleep 3
kill -SIGTERM $pid
# 'pg_autoscaler' command
ceph osd pool autoscale-status
# 'progress' command
ceph progress
ceph progress json
# 'status' commands
ceph fs status
ceph osd status
# 'telemetry' commands
ceph telemetry status
ceph telemetry diff
echo OK
| 624 | 15.025641 | 71 | sh |
null | ceph-main/qa/workunits/mon/auth_caps.sh | #!/usr/bin/env bash
set -e
set -x
declare -A keymap
combinations="r w x rw rx wx rwx"
for i in ${combinations}; do
k="foo_$i"
k=`ceph auth get-or-create-key client.$i mon "allow $i"` || exit 1
keymap["$i"]=$k
done
# add special caps
keymap["all"]=`ceph auth get-or-create-key client.all mon 'allow *'` || exit 1
tmp=`mktemp`
ceph auth export > $tmp
trap "rm $tmp" INT ERR EXIT QUIT 0
expect() {
set +e
local expected_ret=$1
local ret
shift
cmd=$@
eval $cmd
ret=$?
set -e
if [[ $ret -ne $expected_ret ]]; then
echo "ERROR: running \'$cmd\': expected $expected_ret got $ret"
return 1
fi
return 0
}
read_ops() {
local caps=$1
local has_read=1 has_exec=1
local ret
local args
( echo $caps | grep 'r' ) || has_read=0
( echo $caps | grep 'x' ) || has_exec=0
if [[ "$caps" == "all" ]]; then
has_read=1
has_exec=1
fi
ret=13
if [[ $has_read -gt 0 && $has_exec -gt 0 ]]; then
ret=0
fi
args="--id $caps --key ${keymap[$caps]}"
expect $ret ceph auth get client.admin $args
expect $ret ceph auth get-key client.admin $args
expect $ret ceph auth export $args
expect $ret ceph auth export client.admin $args
expect $ret ceph auth ls $args
expect $ret ceph auth print-key client.admin $args
expect $ret ceph auth print_key client.admin $args
}
write_ops() {
local caps=$1
local has_read=1 has_write=1 has_exec=1
local ret
local args
( echo $caps | grep 'r' ) || has_read=0
( echo $caps | grep 'w' ) || has_write=0
( echo $caps | grep 'x' ) || has_exec=0
if [[ "$caps" == "all" ]]; then
has_read=1
has_write=1
has_exec=1
fi
ret=13
if [[ $has_read -gt 0 && $has_write -gt 0 && $has_exec -gt 0 ]]; then
ret=0
fi
args="--id $caps --key ${keymap[$caps]}"
expect $ret ceph auth add client.foo $args
expect $ret "ceph auth caps client.foo mon 'allow *' $args"
expect $ret ceph auth get-or-create client.admin $args
expect $ret ceph auth get-or-create-key client.admin $args
expect $ret ceph auth get-or-create-key client.baz $args
expect $ret ceph auth del client.foo $args
expect $ret ceph auth del client.baz $args
expect $ret ceph auth import -i $tmp $args
}
echo "running combinations: ${!keymap[@]}"
subcmd=$1
for i in ${!keymap[@]}; do
echo "caps: $i"
if [[ -z "$subcmd" || "$subcmd" == "read" || "$subcmd" == "all" ]]; then
read_ops $i
fi
if [[ -z "$subcmd" || "$subcmd" == "write" || "$subcmd" == "all" ]]; then
write_ops $i
fi
done
# cleanup
for i in ${combinations} all; do
ceph auth del client.$i || exit 1
done
echo "OK"
| 2,610 | 18.931298 | 78 | sh |
null | ceph-main/qa/workunits/mon/auth_key_rotation.sh | #!/usr/bin/bash -ex
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
ceph auth export
ceph auth rm client.rot
ceph auth get-or-create client.rot mon 'allow rwx'
ceph auth export client.rot | grep key
ceph auth export client.rot | expect_false grep pending.key
ceph auth get-or-create-pending client.rot
ceph auth export client.rot | grep key
ceph auth export client.rot | grep pending.key
ceph auth clear-pending client.rot
ceph auth export client.rot | expect_false grep pending.key
ceph auth get-or-create-pending client.rot
ceph auth export client.rot | grep key
ceph auth export client.rot | grep pending.key
K=$(ceph auth export client.rot | grep 'key = ' | head -n 1 | awk '{print $3}')
PK=$(ceph auth export client.rot | grep pending.key | awk '{print $4}')
echo "K is $K"
echo "PK is $PK"
ceph -n client.rot --key $K -s
ceph auth commit-pending client.rot
ceph auth export client.rot | expect_false grep pending.key
ceph auth export client.rot | grep key | grep $PK
ceph auth get-or-create-pending client.rot
ceph auth export client.rot | grep key
ceph auth export client.rot | grep pending.key
K=$(ceph auth export client.rot | grep 'key = ' | head -n 1 | awk '{print $3}')
PK=$(ceph auth export client.rot | grep pending.key | awk '{print $4}')
echo "2, K is $K"
echo "2, PK is $PK"
ceph auth export client.rot
while ceph -n client.rot --key $K -s ; do
ceph auth export client.rot
ceph -n client.rot --key $PK -s
sleep 1
done
ceph auth export client.rot | expect_false grep pending.key
ceph auth export client.rot | grep key | grep $PK
ceph -n client.rot --key $PK -s
echo ok
| 1,642 | 26.847458 | 79 | sh |
null | ceph-main/qa/workunits/mon/caps.sh | #!/usr/bin/env bash
set -x
tmp=/tmp/cephtest-mon-caps-madness
exit_on_error=1
[[ ! -z $TEST_EXIT_ON_ERROR ]] && exit_on_error=$TEST_EXIT_ON_ERROR
if [ `uname` = FreeBSD ]; then
ETIMEDOUT=60
else
ETIMEDOUT=110
fi
expect()
{
cmd=$1
expected_ret=$2
echo $cmd
eval $cmd >&/dev/null
ret=$?
if [[ $ret -ne $expected_ret ]]; then
echo "Error: Expected return $expected_ret, got $ret"
[[ $exit_on_error -eq 1 ]] && exit 1
return 1
fi
return 0
}
expect "ceph auth get-or-create client.bazar > $tmp.bazar.keyring" 0
expect "ceph -k $tmp.bazar.keyring --user bazar quorum_status" 13
ceph auth del client.bazar
c="'allow command \"auth ls\", allow command quorum_status'"
expect "ceph auth get-or-create client.foo mon $c > $tmp.foo.keyring" 0
expect "ceph -k $tmp.foo.keyring --user foo quorum_status" 0
expect "ceph -k $tmp.foo.keyring --user foo auth ls" 0
expect "ceph -k $tmp.foo.keyring --user foo auth export" 13
expect "ceph -k $tmp.foo.keyring --user foo auth del client.bazar" 13
expect "ceph -k $tmp.foo.keyring --user foo osd dump" 13
# monitor drops the subscribe message from client if it does not have enough caps
# for read from mon. in that case, the client will be waiting for mgrmap in vain,
# if it is instructed to send a command to mgr. "pg dump" is served by mgr. so,
# we need to set a timeout for testing this scenario.
#
# leave plenty of time here because the mons might be thrashing.
export CEPH_ARGS='--rados-mon-op-timeout=300'
expect "ceph -k $tmp.foo.keyring --user foo pg dump" $ETIMEDOUT
export CEPH_ARGS=''
ceph auth del client.foo
expect "ceph -k $tmp.foo.keyring --user foo quorum_status" 13
c="'allow command service with prefix=list, allow command quorum_status'"
expect "ceph auth get-or-create client.bar mon $c > $tmp.bar.keyring" 0
expect "ceph -k $tmp.bar.keyring --user bar quorum_status" 0
expect "ceph -k $tmp.bar.keyring --user bar auth ls" 13
expect "ceph -k $tmp.bar.keyring --user bar auth export" 13
expect "ceph -k $tmp.bar.keyring --user bar auth del client.foo" 13
expect "ceph -k $tmp.bar.keyring --user bar osd dump" 13
# again, we'll need to timeout.
export CEPH_ARGS='--rados-mon-op-timeout=300'
expect "ceph -k $tmp.bar.keyring --user bar pg dump" $ETIMEDOUT
export CEPH_ARGS=''
ceph auth del client.bar
expect "ceph -k $tmp.bar.keyring --user bar quorum_status" 13
rm $tmp.bazar.keyring $tmp.foo.keyring $tmp.bar.keyring
# invalid caps health warning
cat <<EOF | ceph auth import -i -
[client.bad]
caps mon = this is wrong
caps osd = does not parse
caps mds = also does not parse
EOF
ceph health | grep AUTH_BAD_CAP
ceph health detail | grep client.bad
ceph auth rm client.bad
expect "ceph auth health | grep AUTH_BAD_CAP" 1
echo OK
| 2,743 | 29.153846 | 81 | sh |
null | ceph-main/qa/workunits/mon/config.sh | #!/bin/bash -ex
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
ceph config dump
# value validation
ceph config set mon.a debug_asok 22
ceph config set mon.a debug_asok 22/33
ceph config get mon.a debug_asok | grep 22
ceph config set mon.a debug_asok 1/2
expect_false ceph config set mon.a debug_asok foo
expect_false ceph config set mon.a debug_asok -10
ceph config rm mon.a debug_asok
ceph config set global log_graylog_port 123
expect_false ceph config set global log_graylog_port asdf
ceph config rm global log_graylog_port
ceph config set mon mon_cluster_log_to_stderr true
ceph config get mon.a mon_cluster_log_to_stderr | grep true
ceph config set mon mon_cluster_log_to_stderr 2
ceph config get mon.a mon_cluster_log_to_stderr | grep true
ceph config set mon mon_cluster_log_to_stderr 1
ceph config get mon.a mon_cluster_log_to_stderr | grep true
ceph config set mon mon_cluster_log_to_stderr false
ceph config get mon.a mon_cluster_log_to_stderr | grep false
ceph config set mon mon_cluster_log_to_stderr 0
ceph config get mon.a mon_cluster_log_to_stderr | grep false
expect_false ceph config set mon mon_cluster_log_to_stderr fiddle
expect_false ceph config set mon mon_cluster_log_to_stderr ''
ceph config rm mon mon_cluster_log_to_stderr
expect_false ceph config set mon.a osd_pool_default_type foo
ceph config set mon.a osd_pool_default_type replicated
ceph config rm mon.a osd_pool_default_type
# scoping
ceph config set global debug_asok 33
ceph config get mon.a debug_asok | grep 33
ceph config set mon debug_asok 11
ceph config get mon.a debug_asok | grep 11
ceph config set mon.a debug_asok 22
ceph config get mon.a debug_asok | grep 22
ceph config rm mon.a debug_asok
ceph config get mon.a debug_asok | grep 11
ceph config rm mon debug_asok
ceph config get mon.a debug_asok | grep 33
# nested .-prefix scoping
ceph config set client.foo debug_asok 44
ceph config get client.foo.bar debug_asok | grep 44
ceph config get client.foo.bar.baz debug_asok | grep 44
ceph config set client.foo.bar debug_asok 55
ceph config get client.foo.bar.baz debug_asok | grep 55
ceph config rm client.foo debug_asok
ceph config get client.foo.bar.baz debug_asok | grep 55
ceph config rm client.foo.bar debug_asok
ceph config get client.foo.bar.baz debug_asok | grep 33
ceph config rm global debug_asok
# whitespace keys
ceph config set client.foo 'debug asok' 44
ceph config get client.foo 'debug asok' | grep 44
ceph config set client.foo debug_asok 55
ceph config get client.foo 'debug asok' | grep 55
ceph config set client.foo 'debug asok' 66
ceph config get client.foo debug_asok | grep 66
ceph config rm client.foo debug_asok
ceph config set client.foo debug_asok 66
ceph config rm client.foo 'debug asok'
# help
ceph config help debug_asok | grep debug_asok
# show
ceph config set osd.0 debug_asok 33
while ! ceph config show osd.0 | grep debug_asok | grep 33 | grep mon
do
sleep 1
done
ceph config set osd.0 debug_asok 22
while ! ceph config show osd.0 | grep debug_asok | grep 22 | grep mon
do
sleep 1
done
ceph tell osd.0 config set debug_asok 99
while ! ceph config show osd.0 | grep debug_asok | grep 99
do
sleep 1
done
ceph config show osd.0 | grep debug_asok | grep 'override mon'
ceph tell osd.0 config unset debug_asok
ceph tell osd.0 config unset debug_asok
ceph config rm osd.0 debug_asok
while ceph config show osd.0 | grep debug_asok | grep mon
do
sleep 1
done
ceph config show osd.0 | grep -c debug_asok | grep 0
ceph config set osd.0 osd_scrub_cost 123
while ! ceph config show osd.0 | grep osd_scrub_cost | grep mon
do
sleep 1
done
ceph config rm osd.0 osd_scrub_cost
# show-with-defaults
ceph config show-with-defaults osd.0 | grep debug_asok
# assimilate
t1=`mktemp`
t2=`mktemp`
cat <<EOF > $t1
[osd.0]
keyring = foo
debug_asok = 66
EOF
ceph config assimilate-conf -i $t1 | tee $t2
grep keyring $t2
expect_false grep debug_asok $t2
rm -f $t1 $t2
expect_false ceph config reset
expect_false ceph config reset -1
# we are at end of testing, so it's okay to revert everything
ceph config reset 0
echo OK
| 4,097 | 28.912409 | 69 | sh |
null | ceph-main/qa/workunits/mon/crush_ops.sh | #!/usr/bin/env bash
set -ex
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
ceph osd crush dump
# rules
ceph osd crush rule dump
ceph osd crush rule ls
ceph osd crush rule list
ceph osd crush rule create-simple foo default host
ceph osd crush rule create-simple foo default host
ceph osd crush rule create-simple bar default host
ceph osd crush rm-device-class all
ceph osd crush set-device-class ssd osd.0
ceph osd crush set-device-class hdd osd.1
ceph osd crush rule create-replicated foo-ssd default host ssd
ceph osd crush rule create-replicated foo-hdd default host hdd
ceph osd crush rule ls-by-class ssd | grep 'foo-ssd'
ceph osd crush rule ls-by-class ssd | expect_false grep 'foo-hdd'
ceph osd crush rule ls-by-class hdd | grep 'foo-hdd'
ceph osd crush rule ls-by-class hdd | expect_false grep 'foo-ssd'
ceph osd erasure-code-profile set ec-foo-ssd crush-device-class=ssd m=2 k=2
ceph osd pool create ec-foo 2 erasure ec-foo-ssd
ceph osd pool rm ec-foo ec-foo --yes-i-really-really-mean-it
ceph osd crush rule ls | grep foo
ceph osd crush rule rename foo foo-asdf
ceph osd crush rule rename foo foo-asdf # idempotent
ceph osd crush rule rename bar bar-asdf
ceph osd crush rule ls | grep 'foo-asdf'
ceph osd crush rule ls | grep 'bar-asdf'
ceph osd crush rule rm foo 2>&1 | grep 'does not exist'
ceph osd crush rule rm bar 2>&1 | grep 'does not exist'
ceph osd crush rule rename foo-asdf foo
ceph osd crush rule rename foo-asdf foo # idempotent
ceph osd crush rule rename bar-asdf bar
ceph osd crush rule ls | expect_false grep 'foo-asdf'
ceph osd crush rule ls | expect_false grep 'bar-asdf'
ceph osd crush rule rm foo
ceph osd crush rule rm foo # idempotent
ceph osd crush rule rm bar
# can't delete in-use rules, tho:
ceph osd pool create pinning_pool 1
expect_false ceph osd crush rule rm replicated_rule
ceph osd pool rm pinning_pool pinning_pool --yes-i-really-really-mean-it
# build a simple map
expect_false ceph osd crush add-bucket foo osd
ceph osd crush add-bucket foo root
o1=`ceph osd create`
o2=`ceph osd create`
ceph osd crush add $o1 1 host=host1 root=foo
ceph osd crush add $o1 1 host=host1 root=foo # idemptoent
ceph osd crush add $o2 1 host=host2 root=foo
ceph osd crush add $o2 1 host=host2 root=foo # idempotent
ceph osd crush add-bucket bar root
ceph osd crush add-bucket bar root # idempotent
ceph osd crush link host1 root=bar
ceph osd crush link host1 root=bar # idempotent
ceph osd crush link host2 root=bar
ceph osd crush link host2 root=bar # idempotent
ceph osd tree | grep -c osd.$o1 | grep -q 2
ceph osd tree | grep -c host1 | grep -q 2
ceph osd tree | grep -c osd.$o2 | grep -q 2
ceph osd tree | grep -c host2 | grep -q 2
expect_false ceph osd crush rm host1 foo # not empty
ceph osd crush unlink host1 foo
ceph osd crush unlink host1 foo
ceph osd tree | grep -c host1 | grep -q 1
expect_false ceph osd crush rm foo # not empty
expect_false ceph osd crush rm bar # not empty
ceph osd crush unlink host1 bar
ceph osd tree | grep -c host1 | grep -q 1 # now an orphan
ceph osd crush rm osd.$o1 host1
ceph osd crush rm host1
ceph osd tree | grep -c host1 | grep -q 0
expect_false ceph osd tree-from host1
ceph osd tree-from host2
expect_false ceph osd tree-from osd.$o2
expect_false ceph osd crush rm bar # not empty
ceph osd crush unlink host2
ceph osd crush add-bucket host-for-test host root=root-for-test rack=rack-for-test
ceph osd tree | grep host-for-test
ceph osd tree | grep rack-for-test
ceph osd tree | grep root-for-test
ceph osd crush rm host-for-test
ceph osd crush rm rack-for-test
ceph osd crush rm root-for-test
# reference foo and bar with a rule
ceph osd crush rule create-simple foo-rule foo host firstn
expect_false ceph osd crush rm foo
ceph osd crush rule rm foo-rule
ceph osd crush rm bar
ceph osd crush rm foo
ceph osd crush rm osd.$o2 host2
ceph osd crush rm host2
ceph osd crush add-bucket foo host
ceph osd crush move foo root=default rack=localrack
ceph osd crush create-or-move osd.$o1 1.0 root=default
ceph osd crush move osd.$o1 host=foo
ceph osd find osd.$o1 | grep host | grep foo
ceph osd crush rm osd.$o1
ceph osd crush rm osd.$o2
ceph osd crush rm foo
# test reweight
o3=`ceph osd create`
ceph osd crush add $o3 123 root=default
ceph osd tree | grep osd.$o3 | grep 123
ceph osd crush reweight osd.$o3 113
expect_false ceph osd crush reweight osd.$o3 123456
ceph osd tree | grep osd.$o3 | grep 113
ceph osd crush rm osd.$o3
ceph osd rm osd.$o3
# test reweight-subtree
o4=`ceph osd create`
o5=`ceph osd create`
ceph osd crush add $o4 123 root=default host=foobaz
ceph osd crush add $o5 123 root=default host=foobaz
ceph osd tree | grep osd.$o4 | grep 123
ceph osd tree | grep osd.$o5 | grep 123
ceph osd crush reweight-subtree foobaz 155
expect_false ceph osd crush reweight-subtree foobaz 123456
ceph osd tree | grep osd.$o4 | grep 155
ceph osd tree | grep osd.$o5 | grep 155
ceph osd crush rm osd.$o4
ceph osd crush rm osd.$o5
ceph osd rm osd.$o4
ceph osd rm osd.$o5
# weight sets
# make sure we require luminous before testing weight-sets
ceph osd set-require-min-compat-client luminous
ceph osd crush weight-set dump
ceph osd crush weight-set ls
expect_false ceph osd crush weight-set reweight fooset osd.0 .9
ceph osd pool create fooset 8
ceph osd pool create barset 8
ceph osd pool set barset size 3
expect_false ceph osd crush weight-set reweight fooset osd.0 .9
ceph osd crush weight-set create fooset flat
ceph osd crush weight-set create barset positional
ceph osd crush weight-set ls | grep fooset
ceph osd crush weight-set ls | grep barset
ceph osd crush weight-set dump
ceph osd crush weight-set reweight fooset osd.0 .9
expect_false ceph osd crush weight-set reweight fooset osd.0 .9 .9
expect_false ceph osd crush weight-set reweight barset osd.0 .9
ceph osd crush weight-set reweight barset osd.0 .9 .9 .9
ceph osd crush weight-set ls | grep -c fooset | grep -q 1
ceph osd crush weight-set rm fooset
ceph osd crush weight-set ls | grep -c fooset | grep -q 0
ceph osd crush weight-set ls | grep barset
ceph osd crush weight-set rm barset
ceph osd crush weight-set ls | grep -c barset | grep -q 0
ceph osd crush weight-set create-compat
ceph osd crush weight-set ls | grep '(compat)'
ceph osd crush weight-set rm-compat
# weight set vs device classes
ceph osd pool create cool 2
ceph osd pool create cold 2
ceph osd pool set cold size 2
ceph osd crush weight-set create-compat
ceph osd crush weight-set create cool flat
ceph osd crush weight-set create cold positional
ceph osd crush rm-device-class osd.0
ceph osd crush weight-set reweight-compat osd.0 10.5
ceph osd crush weight-set reweight cool osd.0 11.5
ceph osd crush weight-set reweight cold osd.0 12.5 12.4
ceph osd crush set-device-class fish osd.0
ceph osd crush tree --show-shadow | grep osd\\.0 | grep fish | grep 10\\.
ceph osd crush tree --show-shadow | grep osd\\.0 | grep fish | grep 11\\.
ceph osd crush tree --show-shadow | grep osd\\.0 | grep fish | grep 12\\.
ceph osd crush rm-device-class osd.0
ceph osd crush set-device-class globster osd.0
ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 10\\.
ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 11\\.
ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 12\\.
ceph osd crush weight-set reweight-compat osd.0 7.5
ceph osd crush weight-set reweight cool osd.0 8.5
ceph osd crush weight-set reweight cold osd.0 6.5 6.6
ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 7\\.
ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 8\\.
ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 6\\.
ceph osd crush rm-device-class osd.0
ceph osd pool rm cool cool --yes-i-really-really-mean-it
ceph osd pool rm cold cold --yes-i-really-really-mean-it
ceph osd crush weight-set rm-compat
# weight set vs device classes vs move
ceph osd crush weight-set create-compat
ceph osd crush add-bucket fooo host
ceph osd crush move fooo root=default
ceph osd crush add-bucket barr rack
ceph osd crush move barr root=default
ceph osd crush move fooo rack=barr
ceph osd crush rm fooo
ceph osd crush rm barr
ceph osd crush weight-set rm-compat
# this sequence would crash at one point
ceph osd crush weight-set create-compat
ceph osd crush add-bucket r1 rack root=default
for f in `seq 1 32`; do
ceph osd crush add-bucket h$f host rack=r1
done
for f in `seq 1 32`; do
ceph osd crush rm h$f
done
ceph osd crush rm r1
ceph osd crush weight-set rm-compat
echo OK
| 8,549 | 34.92437 | 82 | sh |
null | ceph-main/qa/workunits/mon/osd.sh | #!/bin/sh -x
set -e
ua=`uuidgen`
ub=`uuidgen`
# should get same id with same uuid
na=`ceph osd create $ua`
test $na -eq `ceph osd create $ua`
nb=`ceph osd create $ub`
test $nb -eq `ceph osd create $ub`
test $nb -ne $na
ceph osd rm $na
ceph osd rm $na
ceph osd rm $nb
ceph osd rm 1000
na2=`ceph osd create $ua`
echo OK
| 326 | 12.08 | 35 | sh |
null | ceph-main/qa/workunits/mon/pg_autoscaler.sh | #!/bin/bash -ex
NUM_OSDS=$(ceph osd ls | wc -l)
if [ $NUM_OSDS -lt 6 ]; then
echo "test requires at least 6 OSDs"
exit 1
fi
NUM_POOLS=$(ceph osd pool ls | wc -l)
if [ $NUM_POOLS -gt 0 ]; then
echo "test requires no preexisting pools"
exit 1
fi
function wait_for() {
local sec=$1
local cmd=$2
while true ; do
if bash -c "$cmd" ; then
break
fi
sec=$(( $sec - 1 ))
if [ $sec -eq 0 ]; then
echo failed
return 1
fi
sleep 1
done
return 0
}
function power2() { echo "x=l($1)/l(2); scale=0; 2^((x+0.5)/1)" | bc -l;}
function eval_actual_expected_val() {
local actual_value=$1
local expected_value=$2
if [[ $actual_value = $expected_value ]]
then
echo "Success: " $actual_value "=" $expected_value
else
echo "Error: " $actual_value "!=" $expected_value
exit 1
fi
}
# enable
ceph config set mgr mgr/pg_autoscaler/sleep_interval 60
ceph mgr module enable pg_autoscaler
# ceph config set global osd_pool_default_pg_autoscale_mode on
# pg_num_min
ceph osd pool create meta0 16
ceph osd pool create bulk0 16 --bulk
ceph osd pool create bulk1 16 --bulk
ceph osd pool create bulk2 16 --bulk
ceph osd pool set meta0 pg_autoscale_mode on
ceph osd pool set bulk0 pg_autoscale_mode on
ceph osd pool set bulk1 pg_autoscale_mode on
ceph osd pool set bulk2 pg_autoscale_mode on
# set pool size
ceph osd pool set meta0 size 2
ceph osd pool set bulk0 size 2
ceph osd pool set bulk1 size 2
ceph osd pool set bulk2 size 2
# get num pools again since we created more pools
NUM_POOLS=$(ceph osd pool ls | wc -l)
# get bulk flag of each pool through the command ceph osd pool autoscale-status
BULK_FLAG_1=$(ceph osd pool autoscale-status | grep 'meta0' | grep -o -m 1 'True\|False' || true)
BULK_FLAG_2=$(ceph osd pool autoscale-status | grep 'bulk0' | grep -o -m 1 'True\|False' || true)
BULK_FLAG_3=$(ceph osd pool autoscale-status | grep 'bulk1' | grep -o -m 1 'True\|False' || true)
BULK_FLAG_4=$(ceph osd pool autoscale-status | grep 'bulk2' | grep -o -m 1 'True\|False' || true)
# evaluate the accuracy of ceph osd pool autoscale-status specifically the `BULK` column
eval_actual_expected_val $BULK_FLAG_1 'False'
eval_actual_expected_val $BULK_FLAG_2 'True'
eval_actual_expected_val $BULK_FLAG_3 'True'
eval_actual_expected_val $BULK_FLAG_4 'True'
# This part of this code will now evaluate the accuracy of the autoscaler
# get pool size
POOL_SIZE_1=$(ceph osd pool get meta0 size| grep -Eo '[0-9]{1,4}')
POOL_SIZE_2=$(ceph osd pool get bulk0 size| grep -Eo '[0-9]{1,4}')
POOL_SIZE_3=$(ceph osd pool get bulk1 size| grep -Eo '[0-9]{1,4}')
POOL_SIZE_4=$(ceph osd pool get bulk2 size| grep -Eo '[0-9]{1,4}')
# Calculate target pg of each pools
# First Pool is a non-bulk so we do it first.
# Since the Capacity ratio = 0 we first meta pool remains the same pg_num
TARGET_PG_1=$(ceph osd pool get meta0 pg_num| grep -Eo '[0-9]{1,4}')
PG_LEFT=$NUM_OSDS*100
NUM_POOLS_LEFT=$NUM_POOLS-1
# Rest of the pool is bulk and even pools so pretty straight forward
# calculations.
TARGET_PG_2=$(power2 $((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_2))))
TARGET_PG_3=$(power2 $((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_3))))
TARGET_PG_4=$(power2 $((($PG_LEFT)/($NUM_POOLS_LEFT)/($POOL_SIZE_4))))
# evaluate target_pg against pg num of each pools
wait_for 300 "ceph osd pool get meta0 pg_num | grep $TARGET_PG_1"
wait_for 300 "ceph osd pool get bulk0 pg_num | grep $TARGET_PG_2"
wait_for 300 "ceph osd pool get bulk1 pg_num | grep $TARGET_PG_3"
wait_for 300 "ceph osd pool get bulk2 pg_num | grep $TARGET_PG_4"
# target ratio
ceph osd pool set meta0 target_size_ratio 5
ceph osd pool set bulk0 target_size_ratio 1
sleep 60
APGS=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target')
BPGS=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target')
test $APGS -gt 100
test $BPGS -gt 10
# small ratio change does not change pg_num
ceph osd pool set meta0 target_size_ratio 7
ceph osd pool set bulk0 target_size_ratio 2
sleep 60
APGS2=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target')
BPGS2=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target')
test $APGS -eq $APGS2
test $BPGS -eq $BPGS2
# target_size
ceph osd pool set meta0 target_size_bytes 1000000000000000
ceph osd pool set bulk0 target_size_bytes 1000000000000000
ceph osd pool set meta0 target_size_ratio 0
ceph osd pool set bulk0 target_size_ratio 0
wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED"
ceph osd pool set meta0 target_size_bytes 1000
ceph osd pool set bulk0 target_size_bytes 1000
ceph osd pool set meta0 target_size_ratio 1
wait_for 60 "ceph health detail | grep POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO"
# test autoscale warn
ceph osd pool create warn0 1 --autoscale-mode=warn
wait_for 120 "ceph health detail | grep POOL_TOO_FEW_PGS"
ceph osd pool create warn1 256 --autoscale-mode=warn
wait_for 120 "ceph health detail | grep POOL_TOO_MANY_PGS"
ceph osd pool rm meta0 meta0 --yes-i-really-really-mean-it
ceph osd pool rm bulk0 bulk0 --yes-i-really-really-mean-it
ceph osd pool rm bulk1 bulk1 --yes-i-really-really-mean-it
ceph osd pool rm bulk2 bulk2 --yes-i-really-really-mean-it
ceph osd pool rm warn0 warn0 --yes-i-really-really-mean-it
ceph osd pool rm warn1 warn1 --yes-i-really-really-mean-it
echo OK
| 5,383 | 33.292994 | 97 | sh |
null | ceph-main/qa/workunits/mon/pool_ops.sh | #!/usr/bin/env bash
set -ex
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
function get_config_value_or_die()
{
local pool_name config_opt raw val
pool_name=$1
config_opt=$2
raw="`$SUDO ceph osd pool get $pool_name $config_opt 2>/dev/null`"
if [[ $? -ne 0 ]]; then
echo "error obtaining config opt '$config_opt' from '$pool_name': $raw"
exit 1
fi
raw=`echo $raw | sed -e 's/[{} "]//g'`
val=`echo $raw | cut -f2 -d:`
echo "$val"
return 0
}
function expect_config_value()
{
local pool_name config_opt expected_val val
pool_name=$1
config_opt=$2
expected_val=$3
val=$(get_config_value_or_die $pool_name $config_opt)
if [[ "$val" != "$expected_val" ]]; then
echo "expected '$expected_val', got '$val'"
exit 1
fi
}
# pg_num min/max
TEST_POOL=testpool1234
ceph osd pool create testpool1234 8 --autoscale-mode off
ceph osd pool set $TEST_POOL pg_num_min 2
ceph osd pool get $TEST_POOL pg_num_min | grep 2
ceph osd pool set $TEST_POOL pg_num_max 33
ceph osd pool get $TEST_POOL pg_num_max | grep 33
expect_false ceph osd pool set $TEST_POOL pg_num_min 9
expect_false ceph osd pool set $TEST_POOL pg_num_max 7
expect_false ceph osd pool set $TEST_POOL pg_num 1
expect_false ceph osd pool set $TEST_POOL pg_num 44
ceph osd pool set $TEST_POOL pg_num_min 0
expect_false ceph osd pool get $TEST_POOL pg_num_min
ceph osd pool set $TEST_POOL pg_num_max 0
expect_false ceph osd pool get $TEST_POOL pg_num_max
ceph osd pool delete $TEST_POOL $TEST_POOL --yes-i-really-really-mean-it
# note: we need to pass the other args or ceph_argparse.py will take
# 'invalid' that is not replicated|erasure and assume it is the next
# argument, which is a string.
expect_false ceph osd pool create foo 123 123 invalid foo-profile foo-rule
ceph osd pool create foo 123 123 replicated
ceph osd pool create fooo 123 123 erasure default
ceph osd pool create foooo 123
ceph osd pool create foo 123 # idempotent
ceph osd pool set foo size 1 --yes-i-really-mean-it
expect_config_value "foo" "min_size" 1
ceph osd pool set foo size 4
expect_config_value "foo" "min_size" 2
ceph osd pool set foo size 10
expect_config_value "foo" "min_size" 5
expect_false ceph osd pool set foo size 0
expect_false ceph osd pool set foo size 20
# should fail due to safety interlock
expect_false ceph osd pool delete foo
expect_false ceph osd pool delete foo foo
expect_false ceph osd pool delete foo foo --force
expect_false ceph osd pool delete foo fooo --yes-i-really-mean-it
expect_false ceph osd pool delete foo --yes-i-really-mean-it foo
ceph osd pool delete foooo foooo --yes-i-really-really-mean-it
ceph osd pool delete fooo fooo --yes-i-really-really-mean-it
ceph osd pool delete foo foo --yes-i-really-really-mean-it
# idempotent
ceph osd pool delete foo foo --yes-i-really-really-mean-it
ceph osd pool delete fooo fooo --yes-i-really-really-mean-it
ceph osd pool delete fooo fooo --yes-i-really-really-mean-it
# non-existent pool
ceph osd pool delete fuggg fuggg --yes-i-really-really-mean-it
echo OK
| 3,062 | 28.171429 | 75 | sh |
null | ceph-main/qa/workunits/mon/rbd_snaps_ops.sh | #!/usr/bin/env bash
# attempt to trigger #6047
cmd_no=0
expect()
{
cmd_no=$(($cmd_no+1))
cmd="$1"
expected=$2
echo "[$cmd_no] $cmd"
eval $cmd
ret=$?
if [[ $ret -ne $expected ]]; then
echo "[$cmd_no] unexpected return '$ret', expected '$expected'"
exit 1
fi
}
ceph osd pool delete test test --yes-i-really-really-mean-it || true
expect 'ceph osd pool create test 8 8' 0
expect 'ceph osd pool application enable test rbd'
expect 'ceph osd pool mksnap test snapshot' 0
expect 'ceph osd pool rmsnap test snapshot' 0
expect 'rbd --pool=test --rbd_validate_pool=false create --size=102400 image' 0
expect 'rbd --pool=test snap create image@snapshot' 22
expect 'ceph osd pool delete test test --yes-i-really-really-mean-it' 0
expect 'ceph osd pool create test 8 8' 0
expect 'rbd --pool=test pool init' 0
expect 'rbd --pool=test create --size=102400 image' 0
expect 'rbd --pool=test snap create image@snapshot' 0
expect 'rbd --pool=test snap ls image' 0
expect 'rbd --pool=test snap rm image@snapshot' 0
expect 'ceph osd pool mksnap test snapshot' 22
expect 'ceph osd pool delete test test --yes-i-really-really-mean-it' 0
# reproduce 7210 and expect it to be fixed
# basically create such a scenario where we end up deleting what used to
# be an unmanaged snapshot from a not-unmanaged pool
ceph osd pool delete test-foo test-foo --yes-i-really-really-mean-it || true
expect 'ceph osd pool create test-foo 8' 0
expect 'ceph osd pool application enable test-foo rbd'
expect 'rbd --pool test-foo create --size 1024 image' 0
expect 'rbd --pool test-foo snap create image@snapshot' 0
ceph osd pool delete test-bar test-bar --yes-i-really-really-mean-it || true
expect 'ceph osd pool create test-bar 8' 0
expect 'ceph osd pool application enable test-bar rbd'
expect 'rados cppool test-foo test-bar --yes-i-really-mean-it' 0
expect 'rbd --pool test-bar snap rm image@snapshot' 95
expect 'ceph osd pool delete test-foo test-foo --yes-i-really-really-mean-it' 0
expect 'ceph osd pool delete test-bar test-bar --yes-i-really-really-mean-it' 0
echo OK
| 2,074 | 32.467742 | 79 | sh |
null | ceph-main/qa/workunits/mon/test_config_key_caps.sh | #!/usr/bin/env bash
set -x
set -e
tmp=$(mktemp -d -p /tmp test_mon_config_key_caps.XXXXX)
entities=()
function cleanup()
{
set +e
set +x
if [[ -e $tmp/keyring ]] && [[ -e $tmp/keyring.orig ]]; then
grep '\[.*\..*\]' $tmp/keyring.orig > $tmp/entities.orig
for e in $(grep '\[.*\..*\]' $tmp/keyring | \
diff $tmp/entities.orig - | \
sed -n 's/^.*\[\(.*\..*\)\]/\1/p');
do
ceph auth rm $e 2>&1 >& /dev/null
done
fi
#rm -fr $tmp
}
trap cleanup 0 # cleanup on exit
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
# for cleanup purposes
ceph auth export -o $tmp/keyring.orig
k=$tmp/keyring
# setup a few keys
ceph config-key ls
ceph config-key set daemon-private/osd.123/test-foo
ceph config-key set mgr/test-foo
ceph config-key set device/test-foo
ceph config-key set test/foo
allow_aa=client.allow_aa
allow_bb=client.allow_bb
allow_cc=client.allow_cc
mgr_a=mgr.a
mgr_b=mgr.b
osd_a=osd.100
osd_b=osd.200
prefix_aa=client.prefix_aa
prefix_bb=client.prefix_bb
prefix_cc=client.prefix_cc
match_aa=client.match_aa
match_bb=client.match_bb
fail_aa=client.fail_aa
fail_bb=client.fail_bb
fail_cc=client.fail_cc
fail_dd=client.fail_dd
fail_ee=client.fail_ee
fail_ff=client.fail_ff
fail_gg=client.fail_gg
fail_writes=client.fail_writes
ceph auth get-or-create $allow_aa mon 'allow *'
ceph auth get-or-create $allow_bb mon 'allow service config-key rwx'
ceph auth get-or-create $allow_cc mon 'allow command "config-key get"'
ceph auth get-or-create $mgr_a mon 'allow profile mgr'
ceph auth get-or-create $mgr_b mon 'allow profile mgr'
ceph auth get-or-create $osd_a mon 'allow profile osd'
ceph auth get-or-create $osd_b mon 'allow profile osd'
ceph auth get-or-create $prefix_aa mon \
"allow command \"config-key get\" with key prefix client/$prefix_aa"
cap="allow command \"config-key set\" with key prefix client/"
cap="$cap,allow command \"config-key get\" with key prefix client/$prefix_bb"
ceph auth get-or-create $prefix_bb mon "$cap"
cap="allow command \"config-key get\" with key prefix client/"
cap="$cap, allow command \"config-key set\" with key prefix client/"
cap="$cap, allow command \"config-key ls\""
ceph auth get-or-create $prefix_cc mon "$cap"
cap="allow command \"config-key get\" with key=client/$match_aa/foo"
ceph auth get-or-create $match_aa mon "$cap"
cap="allow command \"config-key get\" with key=client/$match_bb/foo"
cap="$cap,allow command \"config-key set\" with key=client/$match_bb/foo"
ceph auth get-or-create $match_bb mon "$cap"
ceph auth get-or-create $fail_aa mon 'allow rx'
ceph auth get-or-create $fail_bb mon 'allow r,allow w'
ceph auth get-or-create $fail_cc mon 'allow rw'
ceph auth get-or-create $fail_dd mon 'allow rwx'
ceph auth get-or-create $fail_ee mon 'allow profile bootstrap-rgw'
ceph auth get-or-create $fail_ff mon 'allow profile bootstrap-rbd'
# write commands will require rw; wx is not enough
ceph auth get-or-create $fail_gg mon 'allow service config-key wx'
# read commands will only require 'r'; 'rx' should be enough.
ceph auth get-or-create $fail_writes mon 'allow service config-key rx'
# grab keyring
ceph auth export -o $k
# keys will all the caps can do whatever
for c in $allow_aa $allow_bb $allow_cc $mgr_a $mgr_b; do
ceph -k $k --name $c config-key get daemon-private/osd.123/test-foo
ceph -k $k --name $c config-key get mgr/test-foo
ceph -k $k --name $c config-key get device/test-foo
ceph -k $k --name $c config-key get test/foo
done
for c in $osd_a $osd_b; do
ceph -k $k --name $c config-key put daemon-private/$c/test-foo
ceph -k $k --name $c config-key get daemon-private/$c/test-foo
expect_false ceph -k $k --name $c config-key ls
expect_false ceph -k $k --name $c config-key get mgr/test-foo
expect_false ceph -k $k --name $c config-key get device/test-foo
expect_false ceph -k $k --name $c config-key get test/foo
done
expect_false ceph -k $k --name $osd_a get daemon-private/$osd_b/test-foo
expect_false ceph -k $k --name $osd_b get daemon-private/$osd_a/test-foo
expect_false ceph -k $k --name $prefix_aa \
config-key ls
expect_false ceph -k $k --name $prefix_aa \
config-key get daemon-private/osd.123/test-foo
expect_false ceph -k $k --name $prefix_aa \
config-key set test/bar
expect_false ceph -k $k --name $prefix_aa \
config-key set client/$prefix_aa/foo
# write something so we can read, use a custom entity
ceph -k $k --name $allow_bb config-key set client/$prefix_aa/foo
ceph -k $k --name $prefix_aa config-key get client/$prefix_aa/foo
# check one writes to the other's prefix, the other is able to read
ceph -k $k --name $prefix_bb config-key set client/$prefix_aa/bar
ceph -k $k --name $prefix_aa config-key get client/$prefix_aa/bar
ceph -k $k --name $prefix_bb config-key set client/$prefix_bb/foo
ceph -k $k --name $prefix_bb config-key get client/$prefix_bb/foo
expect_false ceph -k $k --name $prefix_bb config-key get client/$prefix_aa/bar
expect_false ceph -k $k --name $prefix_bb config-key ls
expect_false ceph -k $k --name $prefix_bb \
config-key get daemon-private/osd.123/test-foo
expect_false ceph -k $k --name $prefix_bb config-key get mgr/test-foo
expect_false ceph -k $k --name $prefix_bb config-key get device/test-foo
expect_false ceph -k $k --name $prefix_bb config-key get test/bar
expect_false ceph -k $k --name $prefix_bb config-key set test/bar
ceph -k $k --name $prefix_cc config-key set client/$match_aa/foo
ceph -k $k --name $prefix_cc config-key set client/$match_bb/foo
ceph -k $k --name $prefix_cc config-key get client/$match_aa/foo
ceph -k $k --name $prefix_cc config-key get client/$match_bb/foo
expect_false ceph -k $k --name $prefix_cc config-key set other/prefix
expect_false ceph -k $k --name $prefix_cc config-key get mgr/test-foo
ceph -k $k --name $prefix_cc config-key ls >& /dev/null
ceph -k $k --name $match_aa config-key get client/$match_aa/foo
expect_false ceph -k $k --name $match_aa config-key get client/$match_bb/foo
expect_false ceph -k $k --name $match_aa config-key set client/$match_aa/foo
ceph -k $k --name $match_bb config-key get client/$match_bb/foo
ceph -k $k --name $match_bb config-key set client/$match_bb/foo
expect_false ceph -k $k --name $match_bb config-key get client/$match_aa/foo
expect_false ceph -k $k --name $match_bb config-key set client/$match_aa/foo
keys=(daemon-private/osd.123/test-foo
mgr/test-foo
device/test-foo
test/foo
client/$prefix_aa/foo
client/$prefix_bb/foo
client/$match_aa/foo
client/$match_bb/foo
)
# expect these all to fail accessing config-key
for c in $fail_aa $fail_bb $fail_cc \
$fail_dd $fail_ee $fail_ff \
$fail_gg; do
for m in get set; do
for key in ${keys[*]} client/$prefix_aa/foo client/$prefix_bb/foo; do
expect_false ceph -k $k --name $c config-key $m $key
done
done
done
# fail writes but succeed on reads
expect_false ceph -k $k --name $fail_writes config-key set client/$match_aa/foo
expect_false ceph -k $k --name $fail_writes config-key set test/foo
ceph -k $k --name $fail_writes config-key ls
ceph -k $k --name $fail_writes config-key get client/$match_aa/foo
ceph -k $k --name $fail_writes config-key get daemon-private/osd.123/test-foo
echo "OK"
| 7,172 | 34.509901 | 79 | sh |
null | ceph-main/qa/workunits/mon/test_mon_osdmap_prune.sh | #!/bin/bash
. $(dirname $0)/../../standalone/ceph-helpers.sh
set -x
function wait_for_osdmap_manifest() {
local what=${1:-"true"}
local -a delays=($(get_timeout_delays $TIMEOUT .1))
local -i loop=0
for ((i=0; i < ${#delays[*]}; ++i)); do
has_manifest=$(ceph report | jq 'has("osdmap_manifest")')
if [[ "$has_manifest" == "$what" ]]; then
return 0
fi
sleep ${delays[$i]}
done
echo "osdmap_manifest never outputted on report"
ceph report
return 1
}
function wait_for_trim() {
local -i epoch=$1
local -a delays=($(get_timeout_delays $TIMEOUT .1))
local -i loop=0
for ((i=0; i < ${#delays[*]}; ++i)); do
fc=$(ceph report | jq '.osdmap_first_committed')
if [[ $fc -eq $epoch ]]; then
return 0
fi
sleep ${delays[$i]}
done
echo "never trimmed up to epoch $epoch"
ceph report
return 1
}
function test_osdmap() {
local epoch=$1
local ret=0
tmp_map=$(mktemp)
ceph osd getmap $epoch -o $tmp_map || return 1
if ! osdmaptool --print $tmp_map | grep "epoch $epoch" ; then
echo "ERROR: failed processing osdmap epoch $epoch"
ret=1
fi
rm $tmp_map
return $ret
}
function generate_osdmaps() {
local -i num=$1
cmds=( set unset )
for ((i=0; i < num; ++i)); do
ceph osd ${cmds[$((i%2))]} noup || return 1
done
return 0
}
function test_mon_osdmap_prune() {
create_pool foo 32
wait_for_clean || return 1
ceph config set mon mon_debug_block_osdmap_trim true || return 1
generate_osdmaps 500 || return 1
report="$(ceph report)"
fc=$(jq '.osdmap_first_committed' <<< $report)
lc=$(jq '.osdmap_last_committed' <<< $report)
[[ $((lc-fc)) -ge 500 ]] || return 1
wait_for_osdmap_manifest || return 1
manifest="$(ceph report | jq '.osdmap_manifest')"
first_pinned=$(jq '.first_pinned' <<< $manifest)
last_pinned=$(jq '.last_pinned' <<< $manifest)
pinned_maps=( $(jq '.pinned_maps[]' <<< $manifest) )
# validate pinned maps list
[[ $first_pinned -eq ${pinned_maps[0]} ]] || return 1
[[ $last_pinned -eq ${pinned_maps[-1]} ]] || return 1
# validate pinned maps range
[[ $first_pinned -lt $last_pinned ]] || return 1
[[ $last_pinned -lt $lc ]] || return 1
[[ $first_pinned -eq $fc ]] || return 1
# ensure all the maps are available, and work as expected
# this can take a while...
for ((i=$first_pinned; i <= $last_pinned; ++i)); do
test_osdmap $i || return 1
done
# update pinned maps state:
# the monitor may have pruned & pinned additional maps since we last
# assessed state, given it's an iterative process.
#
manifest="$(ceph report | jq '.osdmap_manifest')"
first_pinned=$(jq '.first_pinned' <<< $manifest)
last_pinned=$(jq '.last_pinned' <<< $manifest)
pinned_maps=( $(jq '.pinned_maps[]' <<< $manifest) )
# test trimming maps
#
# we're going to perform the following tests:
#
# 1. force trim to a pinned map
# 2. force trim to a pinned map's previous epoch
# 3. trim all maps except the last 200 or so.
#
# 1. force trim to a pinned map
#
[[ ${#pinned_maps[@]} -gt 10 ]] || return 1
trim_to=${pinned_maps[1]}
ceph config set mon mon_osd_force_trim_to $trim_to
ceph config set mon mon_min_osdmap_epochs 100
ceph config set mon paxos_service_trim_min 1
ceph config set mon mon_debug_block_osdmap_trim false
# generate an epoch so we get to trim maps
ceph osd set noup
ceph osd unset noup
wait_for_trim $trim_to || return 1
report="$(ceph report)"
fc=$(jq '.osdmap_first_committed' <<< $report)
[[ $fc -eq $trim_to ]] || return 1
old_first_pinned=$first_pinned
old_last_pinned=$last_pinned
first_pinned=$(jq '.osdmap_manifest.first_pinned' <<< $report)
last_pinned=$(jq '.osdmap_manifest.last_pinned' <<< $report)
[[ $first_pinned -eq $trim_to ]] || return 1
[[ $first_pinned -gt $old_first_pinned ]] || return 1
[[ $last_pinned -gt $old_first_pinned ]] || return 1
test_osdmap $trim_to || return 1
test_osdmap $(( trim_to+1 )) || return 1
pinned_maps=( $(jq '.osdmap_manifest.pinned_maps[]' <<< $report) )
# 2. force trim to a pinned map's previous epoch
#
[[ ${#pinned_maps[@]} -gt 2 ]] || return 1
trim_to=$(( ${pinned_maps[1]} - 1))
ceph config set mon mon_osd_force_trim_to $trim_to
# generate an epoch so we get to trim maps
ceph osd set noup
ceph osd unset noup
wait_for_trim $trim_to || return 1
report="$(ceph report)"
fc=$(jq '.osdmap_first_committed' <<< $report)
[[ $fc -eq $trim_to ]] || return 1
old_first_pinned=$first_pinned
old_last_pinned=$last_pinned
first_pinned=$(jq '.osdmap_manifest.first_pinned' <<< $report)
last_pinned=$(jq '.osdmap_manifest.last_pinned' <<< $report)
pinned_maps=( $(jq '.osdmap_manifest.pinned_maps[]' <<< $report) )
[[ $first_pinned -eq $trim_to ]] || return 1
[[ ${pinned_maps[1]} -eq $(( trim_to+1)) ]] || return 1
test_osdmap $first_pinned || return 1
test_osdmap $(( first_pinned + 1 )) || return 1
# 3. trim everything
#
ceph config set mon mon_osd_force_trim_to 0
# generate an epoch so we get to trim maps
ceph osd set noup
ceph osd unset noup
wait_for_osdmap_manifest "false" || return 1
return 0
}
test_mon_osdmap_prune || exit 1
echo "OK"
| 5,230 | 24.393204 | 71 | sh |
null | ceph-main/qa/workunits/mon/test_noautoscale_flag.sh | #!/bin/bash -ex
unset CEPH_CLI_TEST_DUP_COMMAND
NUM_POOLS=$(ceph osd pool ls | wc -l)
if [ "$NUM_POOLS" -gt 0 ]; then
echo "test requires no preexisting pools"
exit 1
fi
ceph osd pool set noautoscale
ceph osd pool create pool_a
echo 'pool_a autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_a | grep -o -m 1 'on\|off')
NUM_POOLS=$[NUM_POOLS+1]
sleep 2
# Count the number of Pools with AUTOSCALE `off`
RESULT1=$(ceph osd pool autoscale-status | grep -oe 'off' | wc -l)
# number of Pools with AUTOSCALE `off` should equal to 2
test "$RESULT1" -eq "$NUM_POOLS"
ceph osd pool unset noautoscale
echo $(ceph osd pool get noautoscale)
ceph osd pool create pool_b
echo 'pool_a autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_a | grep -o -m 1 'on\|off')
echo 'pool_b autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_b | grep -o -m 1 'on\|off')
NUM_POOLS=$[NUM_POOLS+1]
sleep 2
# Count the number of Pools with AUTOSCALE `on`
RESULT2=$(ceph osd pool autoscale-status | grep -oe 'on' | wc -l)
# number of Pools with AUTOSCALE `on` should equal to 3
test "$RESULT2" -eq "$NUM_POOLS"
ceph osd pool set noautoscale
ceph osd pool create pool_c
echo 'pool_a autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_a | grep -o -m 1 'on\|off')
echo 'pool_b autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_b | grep -o -m 1 'on\|off')
echo 'pool_c autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_c | grep -o -m 1 'on\|off')
NUM_POOLS=$[NUM_POOLS+1]
sleep 2
# Count the number of Pools with AUTOSCALE `off`
RESULT3=$(ceph osd pool autoscale-status | grep -oe 'off' | wc -l)
# number of Pools with AUTOSCALE `off` should equal to 4
test "$RESULT3" -eq "$NUM_POOLS"
ceph osd pool rm pool_a pool_a --yes-i-really-really-mean-it
ceph osd pool rm pool_b pool_b --yes-i-really-really-mean-it
ceph osd pool rm pool_c pool_c --yes-i-really-really-mean-it
echo OK
| 1,959 | 22.333333 | 102 | sh |
null | ceph-main/qa/workunits/objectstore/test_fuse.sh | #!/bin/sh -ex
if ! id -u | grep -q '^0$'; then
echo "not root, re-running self via sudo"
sudo PATH=$PATH TYPE=$TYPE $0
exit 0
fi
expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
COT=ceph-objectstore-tool
DATA=store_test_fuse_dir
[ -z "$TYPE" ] && TYPE=bluestore
MNT=store_test_fuse_mnt
rm -rf $DATA
mkdir -p $DATA
test -d $MNT && fusermount -u $MNT || true
rmdir $MNT || true
mkdir $MNT
export CEPH_ARGS=--enable_experimental_unrecoverable_data_corrupting_features=bluestore
$COT --no-mon-config --op mkfs --data-path $DATA --type $TYPE
$COT --no-mon-config --op fuse --data-path $DATA --mountpoint $MNT &
while ! test -e $MNT/type ; do
echo waiting for $MNT/type to appear
sleep 1
done
umask 0
grep $TYPE $MNT/type
# create collection
mkdir $MNT/meta
test -e $MNT/meta/bitwise_hash_start
test -d $MNT/meta/all
test -d $MNT/meta/by_bitwise_hash
# create object
mkdir $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#
test -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
test -d $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr
test -d $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap
test -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/bitwise_hash
test -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap_header
# omap header
echo omap header > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap_header
grep -q omap $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap_header
# omap
echo value a > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya
echo value b > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb
ls $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap | grep -c key | grep -q 2
grep 'value a' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya
grep 'value b' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb
rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya
test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya
rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb
test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb
# attr
echo value a > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya
echo value b > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb
ls $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr | grep -c key | grep -q 2
grep 'value a' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya
grep 'value b' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb
rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya
test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya
rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb
test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb
# data
test ! -s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
echo asdfasdfasdf > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
test -s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
grep -q asdfasdfasdf $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
truncate --size 4 $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
stat --format=%s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data | grep -q ^4$
expect_false grep -q asdfasdfasdf $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
test ! -s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data
# create pg collection
mkdir --mode 0003 $MNT/0.0_head
grep -q 00000000 $MNT/0.0_head/bitwise_hash_start
if [ "$TYPE" = "bluestore" ]; then
cat $MNT/0.0_head/bitwise_hash_bits
grep -q 3 $MNT/0.0_head/bitwise_hash_bits
grep -q 1fffffff $MNT/0.0_head/bitwise_hash_end
fi
test -d $MNT/0.0_head/all
mkdir --mode 0003 $MNT/0.1_head
grep -q 80000000 $MNT/0.1_head/bitwise_hash_start
if [ "$TYPE" = "bluestore" ]; then
grep -q 3 $MNT/0.1_head/bitwise_hash_bits
grep -q 9fffffff $MNT/0.1_head/bitwise_hash_end
fi
# create pg object
mkdir $MNT/0.0_head/all/#0:00000000::::head#/
mkdir $MNT/0.0_head/all/#0:10000000:::foo:head#/
# verify pg bounds check
if [ "$TYPE" = "bluestore" ]; then
expect_false mkdir $MNT/0.0_head/all/#0:20000000:::bar:head#/
fi
# remove a collection
expect_false rmdir $MNT/0.0_head
rmdir $MNT/0.0_head/all/#0:10000000:::foo:head#/
rmdir $MNT/0.0_head/all/#0:00000000::::head#/
rmdir $MNT/0.0_head
rmdir $MNT/0.1_head
fusermount -u $MNT
wait
echo OK
| 4,352 | 32.484615 | 87 | sh |
null | ceph-main/qa/workunits/osdc/stress_objectcacher.sh | #!/bin/sh -ex
for i in $(seq 1 10)
do
for DELAY in 0 1000
do
for OPS in 1000 10000
do
for OBJECTS in 10 50 100
do
for READS in 0.90 0.50 0.10
do
for OP_SIZE in 4096 131072 1048576
do
for MAX_DIRTY in 0 25165824
do
ceph_test_objectcacher_stress --ops $OPS --percent-read $READS --delay-ns $DELAY --objects $OBJECTS --max-op-size $OP_SIZE --client-oc-max-dirty $MAX_DIRTY --stress-test > /dev/null 2>&1
done
done
done
done
done
done
done
ceph_test_objectcacher_stress --correctness-test > /dev/null 2>&1
echo OK
| 793 | 26.37931 | 214 | sh |
null | ceph-main/qa/workunits/rados/clone.sh | #!/bin/sh -x
set -e
rados -p data rm foo || true
rados -p data put foo.tmp /etc/passwd --object-locator foo
rados -p data clonedata foo.tmp foo --object-locator foo
rados -p data get foo /tmp/foo
cmp /tmp/foo /etc/passwd
rados -p data rm foo.tmp --object-locator foo
rados -p data rm foo
echo OK | 298 | 22 | 58 | sh |
null | ceph-main/qa/workunits/rados/load-gen-big.sh | #!/bin/sh
rados -p rbd load-gen \
--num-objects 10240 \
--min-object-size 1048576 \
--max-object-size 25600000 \
--max-ops 1024 \
--max-backlog 1024 \
--read-percent 50 \
--run-length 1200
| 218 | 18.909091 | 32 | sh |
null | ceph-main/qa/workunits/rados/load-gen-mix-small-long.sh | #!/bin/sh
rados -p rbd load-gen \
--num-objects 1024 \
--min-object-size 1 \
--max-object-size 1048576 \
--max-ops 128 \
--max-backlog 128 \
--read-percent 50 \
--run-length 1800
| 208 | 18 | 31 | sh |
null | ceph-main/qa/workunits/rados/load-gen-mix-small.sh | #!/bin/sh
rados -p rbd load-gen \
--num-objects 1024 \
--min-object-size 1 \
--max-object-size 1048576 \
--max-ops 128 \
--max-backlog 128 \
--read-percent 50 \
--run-length 600
| 207 | 17.909091 | 31 | sh |
null | ceph-main/qa/workunits/rados/load-gen-mix.sh | #!/bin/sh
rados -p rbd load-gen \
--num-objects 10240 \
--min-object-size 1 \
--max-object-size 1048576 \
--max-ops 128 \
--max-backlog 128 \
--read-percent 50 \
--run-length 600
| 208 | 18 | 31 | sh |
null | ceph-main/qa/workunits/rados/load-gen-mostlyread.sh | #!/bin/sh
rados -p rbd load-gen \
--num-objects 51200 \
--min-object-size 1 \
--max-object-size 1048576 \
--max-ops 128 \
--max-backlog 128 \
--read-percent 90 \
--run-length 600
| 208 | 18 | 31 | sh |
null | ceph-main/qa/workunits/rados/stress_watch.sh | #!/bin/sh -e
ceph_test_stress_watch
ceph_multi_stress_watch rep reppool repobj
ceph_multi_stress_watch ec ecpool ecobj
exit 0
| 128 | 15.125 | 42 | sh |
null | ceph-main/qa/workunits/rados/test.sh | #!/usr/bin/env bash
set -ex
parallel=1
[ "$1" = "--serial" ] && parallel=0
color=""
[ -t 1 ] && color="--gtest_color=yes"
function cleanup() {
pkill -P $$ || true
}
trap cleanup EXIT ERR HUP INT QUIT
declare -A pids
for f in \
api_aio api_aio_pp \
api_io api_io_pp \
api_asio api_list \
api_lock api_lock_pp \
api_misc api_misc_pp \
api_tier_pp \
api_pool \
api_snapshots api_snapshots_pp \
api_stat api_stat_pp \
api_watch_notify api_watch_notify_pp \
api_cmd api_cmd_pp \
api_service api_service_pp \
api_c_write_operations \
api_c_read_operations \
api_cls_remote_reads \
list_parallel \
open_pools_parallel \
delete_pools_parallel
do
if [ $parallel -eq 1 ]; then
r=`printf '%25s' $f`
ff=`echo $f | awk '{print $1}'`
bash -o pipefail -exc "ceph_test_rados_$f $color 2>&1 | tee ceph_test_rados_$ff.log | sed \"s/^/$r: /\"" &
pid=$!
echo "test $f on pid $pid"
pids[$f]=$pid
else
ceph_test_rados_$f
fi
done
ret=0
if [ $parallel -eq 1 ]; then
for t in "${!pids[@]}"
do
pid=${pids[$t]}
if ! wait $pid
then
echo "error in $t ($pid)"
ret=1
fi
done
fi
exit $ret
| 1,175 | 17.666667 | 107 | sh |
null | ceph-main/qa/workunits/rados/test_alloc_hint.sh | #!/usr/bin/env bash
set -ex
shopt -s nullglob # fns glob expansion in expect_alloc_hint_eq()
#
# Helpers
#
function get_xml_val() {
local xml="$1"
local tag="$2"
local regex=".*<${tag}>(.*)</${tag}>.*"
if [[ ! "${xml}" =~ ${regex} ]]; then
echo "'${xml}' xml doesn't match '${tag}' tag regex" >&2
return 2
fi
echo "${BASH_REMATCH[1]}"
}
function get_conf_val() {
set -e
local entity="$1"
local option="$2"
local val
val="$(sudo ceph daemon "${entity}" config get --format=xml "${option}")"
val="$(get_xml_val "${val}" "${option}")"
echo "${val}"
}
function setup_osd_data() {
for (( i = 0 ; i < "${NUM_OSDS}" ; i++ )); do
OSD_DATA[i]="$(get_conf_val "osd.$i" "osd_data")"
done
}
function setup_pgid() {
local poolname="$1"
local objname="$2"
local pgid
pgid="$(ceph osd map "${poolname}" "${objname}" --format=xml)"
pgid="$(get_xml_val "${pgid}" "pgid")"
PGID="${pgid}"
}
function expect_alloc_hint_eq() {
export CEPH_ARGS="--osd-objectstore=filestore"
local expected_extsize="$1"
for (( i = 0 ; i < "${NUM_OSDS}" ; i++ )); do
# Make sure that stuff is flushed from the journal to the store
# by the time we get to it, as we prod the actual files and not
# the journal.
sudo ceph daemon "osd.${i}" "flush_journal"
# e.g., .../25.6_head/foo__head_7FC1F406__19
# .../26.bs1_head/bar__head_EFE6384B__1a_ffffffffffffffff_1
local fns=$(sudo sh -c "ls ${OSD_DATA[i]}/current/${PGID}*_head/${OBJ}_*")
local count="${#fns[@]}"
if [ "${count}" -ne 1 ]; then
echo "bad fns count: ${count}" >&2
return 2
fi
local extsize
extsize="$(sudo xfs_io -c extsize "${fns[0]}")"
local extsize_regex="^\[(.*)\] ${fns[0]}$"
if [[ ! "${extsize}" =~ ${extsize_regex} ]]; then
echo "extsize doesn't match extsize_regex: ${extsize}" >&2
return 2
fi
extsize="${BASH_REMATCH[1]}"
if [ "${extsize}" -ne "${expected_extsize}" ]; then
echo "FAIL: alloc_hint: actual ${extsize}, expected ${expected_extsize}" >&2
return 1
fi
done
}
#
# Global setup
#
EC_K="2"
EC_M="1"
NUM_OSDS="$((EC_K + EC_M))"
NUM_PG="12"
NUM_PGP="${NUM_PG}"
LOW_CAP="$(get_conf_val "osd.0" "filestore_max_alloc_hint_size")"
HIGH_CAP="$((LOW_CAP * 10))" # 10M, assuming 1M default cap
SMALL_HINT="$((LOW_CAP / 4))" # 256K, assuming 1M default cap
BIG_HINT="$((LOW_CAP * 6))" # 6M, assuming 1M default cap
setup_osd_data
#
# ReplicatedBackend tests
#
POOL="alloc_hint-rep"
ceph osd pool create "${POOL}" "${NUM_PG}"
ceph osd pool set "${POOL}" size "${NUM_OSDS}" --yes-i-really-mean-it
ceph osd pool application enable "${POOL}" rados
OBJ="foo"
setup_pgid "${POOL}" "${OBJ}"
rados -p "${POOL}" create "${OBJ}"
# Empty object, SMALL_HINT - expect SMALL_HINT
rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
expect_alloc_hint_eq "${SMALL_HINT}"
# Try changing to BIG_HINT (1) - expect LOW_CAP (BIG_HINT > LOW_CAP)
rados -p "${POOL}" set-alloc-hint "${OBJ}" "${BIG_HINT}" "${BIG_HINT}"
expect_alloc_hint_eq "${LOW_CAP}"
# Bump the cap to HIGH_CAP
ceph tell 'osd.*' injectargs "--filestore_max_alloc_hint_size ${HIGH_CAP}"
# Try changing to BIG_HINT (2) - expect BIG_HINT (BIG_HINT < HIGH_CAP)
rados -p "${POOL}" set-alloc-hint "${OBJ}" "${BIG_HINT}" "${BIG_HINT}"
expect_alloc_hint_eq "${BIG_HINT}"
ceph tell 'osd.*' injectargs "--filestore_max_alloc_hint_size ${LOW_CAP}"
# Populate object with some data
rados -p "${POOL}" put "${OBJ}" /etc/passwd
# Try changing back to SMALL_HINT - expect BIG_HINT (non-empty object)
rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
expect_alloc_hint_eq "${BIG_HINT}"
OBJ="bar"
setup_pgid "${POOL}" "${OBJ}"
# Non-existent object, SMALL_HINT - expect SMALL_HINT (object creation)
rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
expect_alloc_hint_eq "${SMALL_HINT}"
ceph osd pool delete "${POOL}" "${POOL}" --yes-i-really-really-mean-it
#
# ECBackend tests
#
PROFILE="alloc_hint-ecprofile"
POOL="alloc_hint-ec"
ceph osd erasure-code-profile set "${PROFILE}" k=2 m=1 crush-failure-domain=osd
ceph osd erasure-code-profile get "${PROFILE}" # just so it's logged
ceph osd pool create "${POOL}" "${NUM_PG}" "${NUM_PGP}" erasure "${PROFILE}"
ceph osd pool application enable "${POOL}" rados
OBJ="baz"
setup_pgid "${POOL}" "${OBJ}"
rados -p "${POOL}" create "${OBJ}"
# Empty object, SMALL_HINT - expect scaled-down SMALL_HINT
rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
expect_alloc_hint_eq "$((SMALL_HINT / EC_K))"
ceph osd pool delete "${POOL}" "${POOL}" --yes-i-really-really-mean-it
#
# Global teardown
#
echo "OK"
| 4,881 | 26.426966 | 88 | sh |
null | ceph-main/qa/workunits/rados/test_cache_pool.sh | #!/usr/bin/env bash
set -ex
expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
# create pools, set up tier relationship
ceph osd pool create base_pool 2
ceph osd pool application enable base_pool rados
ceph osd pool create partial_wrong 2
ceph osd pool create wrong_cache 2
ceph osd tier add base_pool partial_wrong
ceph osd tier add base_pool wrong_cache
# populate base_pool with some data
echo "foo" > foo.txt
echo "bar" > bar.txt
echo "baz" > baz.txt
rados -p base_pool put fooobj foo.txt
rados -p base_pool put barobj bar.txt
# fill in wrong_cache backwards so we can tell we read from it
rados -p wrong_cache put fooobj bar.txt
rados -p wrong_cache put barobj foo.txt
# partial_wrong gets barobj backwards so we can check promote and non-promote
rados -p partial_wrong put barobj foo.txt
# get the objects back before setting a caching pool
rados -p base_pool get fooobj tmp.txt
diff -q tmp.txt foo.txt
rados -p base_pool get barobj tmp.txt
diff -q tmp.txt bar.txt
# set up redirect and make sure we get backwards results
ceph osd tier set-overlay base_pool wrong_cache
ceph osd tier cache-mode wrong_cache writeback
rados -p base_pool get fooobj tmp.txt
diff -q tmp.txt bar.txt
rados -p base_pool get barobj tmp.txt
diff -q tmp.txt foo.txt
# switch cache pools and make sure we're doing promote
ceph osd tier remove-overlay base_pool
ceph osd tier set-overlay base_pool partial_wrong
ceph osd tier cache-mode partial_wrong writeback
rados -p base_pool get fooobj tmp.txt
diff -q tmp.txt foo.txt # hurray, it promoted!
rados -p base_pool get barobj tmp.txt
diff -q tmp.txt foo.txt # yep, we read partial_wrong's local object!
# try a nonexistent object and make sure we get an error
expect_false rados -p base_pool get bazobj tmp.txt
# drop the cache entirely and make sure contents are still the same
ceph osd tier remove-overlay base_pool
rados -p base_pool get fooobj tmp.txt
diff -q tmp.txt foo.txt
rados -p base_pool get barobj tmp.txt
diff -q tmp.txt bar.txt
# create an empty cache pool and make sure it has objects after reading
ceph osd pool create empty_cache 2
touch empty.txt
rados -p empty_cache ls > tmp.txt
diff -q tmp.txt empty.txt
ceph osd tier add base_pool empty_cache
ceph osd tier set-overlay base_pool empty_cache
ceph osd tier cache-mode empty_cache writeback
rados -p base_pool get fooobj tmp.txt
rados -p base_pool get barobj tmp.txt
expect_false rados -p base_pool get bazobj tmp.txt
rados -p empty_cache ls > tmp.txt
expect_false diff -q tmp.txt empty.txt
# cleanup
ceph osd tier remove-overlay base_pool
ceph osd tier remove base_pool wrong_cache
ceph osd tier remove base_pool partial_wrong
ceph osd tier remove base_pool empty_cache
ceph osd pool delete base_pool base_pool --yes-i-really-really-mean-it
ceph osd pool delete empty_cache empty_cache --yes-i-really-really-mean-it
ceph osd pool delete wrong_cache wrong_cache --yes-i-really-really-mean-it
ceph osd pool delete partial_wrong partial_wrong --yes-i-really-really-mean-it
## set of base, cache
ceph osd pool create base 8
ceph osd pool application enable base rados
ceph osd pool create cache 8
ceph osd tier add base cache
ceph osd tier cache-mode cache writeback
ceph osd tier set-overlay base cache
# cache-flush, cache-evict
rados -p base put foo /etc/passwd
expect_false rados -p base cache-evict foo
expect_false rados -p base cache-flush foo
expect_false rados -p cache cache-evict foo
rados -p cache cache-flush foo
rados -p cache cache-evict foo
rados -p cache ls - | wc -l | grep 0
# cache-try-flush, cache-evict
rados -p base put foo /etc/passwd
expect_false rados -p base cache-evict foo
expect_false rados -p base cache-flush foo
expect_false rados -p cache cache-evict foo
rados -p cache cache-try-flush foo
rados -p cache cache-evict foo
rados -p cache ls - | wc -l | grep 0
# cache-flush-evict-all
rados -p base put bar /etc/passwd
rados -p cache ls - | wc -l | grep 1
expect_false rados -p base cache-flush-evict-all
rados -p cache cache-flush-evict-all
rados -p cache ls - | wc -l | grep 0
# cache-try-flush-evict-all
rados -p base put bar /etc/passwd
rados -p cache ls - | wc -l | grep 1
expect_false rados -p base cache-flush-evict-all
rados -p cache cache-try-flush-evict-all
rados -p cache ls - | wc -l | grep 0
# cache flush/evit when clone objects exist
rados -p base put testclone /etc/passwd
rados -p cache ls - | wc -l | grep 1
ceph osd pool mksnap base snap
rados -p base put testclone /etc/hosts
rados -p cache cache-flush-evict-all
rados -p cache ls - | wc -l | grep 0
ceph osd tier cache-mode cache proxy --yes-i-really-mean-it
rados -p base -s snap get testclone testclone.txt
diff -q testclone.txt /etc/passwd
rados -p base get testclone testclone.txt
diff -q testclone.txt /etc/hosts
# test --with-clones option
ceph osd tier cache-mode cache writeback
rados -p base put testclone2 /etc/passwd
rados -p cache ls - | wc -l | grep 1
ceph osd pool mksnap base snap1
rados -p base put testclone2 /etc/hosts
expect_false rados -p cache cache-flush testclone2
rados -p cache cache-flush testclone2 --with-clones
expect_false rados -p cache cache-evict testclone2
rados -p cache cache-evict testclone2 --with-clones
rados -p cache ls - | wc -l | grep 0
rados -p base -s snap1 get testclone2 testclone2.txt
diff -q testclone2.txt /etc/passwd
rados -p base get testclone2 testclone2.txt
diff -q testclone2.txt /etc/hosts
# cleanup
ceph osd tier remove-overlay base
ceph osd tier remove base cache
ceph osd pool delete cache cache --yes-i-really-really-mean-it
ceph osd pool delete base base --yes-i-really-really-mean-it
echo OK
| 5,597 | 31.736842 | 78 | sh |
null | ceph-main/qa/workunits/rados/test_crash.sh | #!/bin/sh
set -x
# run on a single-node three-OSD cluster
sudo killall -ABRT ceph-osd
sleep 5
# kill caused coredumps; find them and delete them, carefully, so as
# not to disturb other coredumps, or else teuthology will see them
# and assume test failure. sudos are because the core files are
# root/600
for f in $(find $TESTDIR/archive/coredump -type f); do
gdb_output=$(echo "quit" | sudo gdb /usr/bin/ceph-osd $f)
if expr match "$gdb_output" ".*generated.*ceph-osd.*" && \
( \
expr match "$gdb_output" ".*terminated.*signal 6.*" || \
expr match "$gdb_output" ".*terminated.*signal SIGABRT.*" \
)
then
sudo rm $f
fi
done
# ceph-crash runs as the unprivileged "ceph" user, but when under test
# the ceph osd daemons are running as root, so their crash files aren't
# readable. let's chown them so they behave as they would in real life.
sudo chown -R ceph:ceph /var/lib/ceph/crash
# let daemon find crashdumps on startup
sudo systemctl restart ceph-crash
sleep 30
# must be 3 crashdumps registered and moved to crash/posted
[ $(ceph crash ls | wc -l) = 4 ] || exit 1 # 4 here bc of the table header
[ $(sudo find /var/lib/ceph/crash/posted/ -name meta | wc -l) = 3 ] || exit 1
# there should be a health warning
ceph health detail | grep RECENT_CRASH || exit 1
ceph crash archive-all
sleep 30
ceph health detail | grep -c RECENT_CRASH | grep 0 # should be gone!
| 1,406 | 30.266667 | 77 | sh |
null | ceph-main/qa/workunits/rados/test_crushdiff.sh | #!/usr/bin/env bash
set -ex
REP_POOL=
EC_POOL=
TEMPDIR=
OSD_NUM=$(ceph osd ls | wc -l)
test ${OSD_NUM} -gt 0
setup() {
local pool
TEMPDIR=`mktemp -d`
pool=test-crushdiff-rep-$$
ceph osd pool create ${pool} 32
REP_POOL=${pool}
rados -p ${REP_POOL} bench 5 write --no-cleanup
if [ ${OSD_NUM} -gt 3 ]; then
pool=test-crushdiff-ec-$$
ceph osd pool create ${pool} 32 32 erasure
EC_POOL=${pool}
rados -p ${EC_POOL} bench 5 write --no-cleanup
fi
}
cleanup() {
set +e
test -n "${EC_POOL}" &&
ceph osd pool delete "${EC_POOL}" "${EC_POOL}" \
--yes-i-really-really-mean-it
EC_POOL=
test -n "${REP_POOL}" &&
ceph osd pool delete "${REP_POOL}" "${REP_POOL}" \
--yes-i-really-really-mean-it
REP_POOL=
test -n "${TEMPDIR}" && rm -Rf ${TEMPDIR}
TEMPDIR=
}
trap "cleanup" INT TERM EXIT
setup
# test without crushmap modification
crushdiff export ${TEMPDIR}/cm.txt --verbose
crushdiff compare ${TEMPDIR}/cm.txt --verbose
crushdiff import ${TEMPDIR}/cm.txt --verbose
# test using a compiled crushmap
crushdiff export ${TEMPDIR}/cm --compiled --verbose
crushdiff compare ${TEMPDIR}/cm --compiled --verbose
crushdiff import ${TEMPDIR}/cm --compiled --verbose
# test using "offline" osdmap and pg-dump
ceph osd getmap -o ${TEMPDIR}/osdmap
ceph pg dump --format json > ${TEMPDIR}/pg-dump
crushdiff export ${TEMPDIR}/cm.txt --osdmap ${TEMPDIR}/osdmap --verbose
crushdiff compare ${TEMPDIR}/cm.txt --osdmap ${TEMPDIR}/osdmap \
--pg-dump ${TEMPDIR}/pg-dump --verbose | tee ${TEMPDIR}/compare.txt
# test the diff is zero when the crushmap is not modified
grep '^0/[0-9]* (0\.00%) pgs affected' ${TEMPDIR}/compare.txt
grep '^0/[0-9]* (0\.00%) objects affected' ${TEMPDIR}/compare.txt
grep '^0/[0-9]* (0\.00%) pg shards to move' ${TEMPDIR}/compare.txt
grep '^0/[0-9]* (0\.00%) pg object shards to move' ${TEMPDIR}/compare.txt
grep '^0\.00/.* (0\.00%) bytes to move' ${TEMPDIR}/compare.txt
crushdiff import ${TEMPDIR}/cm.txt --osdmap ${TEMPDIR}/osdmap --verbose
if [ ${OSD_NUM} -gt 3 ]; then
# test the diff is non-zero when the crushmap is modified
cat ${TEMPDIR}/cm.txt >&2
weight=$(awk '/item osd\.0 weight ([0-9.]+)/ {print $4 * 3}' \
${TEMPDIR}/cm.txt)
test -n "${weight}"
sed -i -Ee 's/^(.*item osd\.0 weight )[0-9.]+/\1'${weight}'/' \
${TEMPDIR}/cm.txt
crushdiff compare ${TEMPDIR}/cm.txt --osdmap ${TEMPDIR}/osdmap \
--pg-dump ${TEMPDIR}/pg-dump --verbose | tee ${TEMPDIR}/compare.txt
grep '^[1-9][0-9]*/[0-9]* (.*%) pgs affected' ${TEMPDIR}/compare.txt
grep '^[1-9][0-9]*/[0-9]* (.*%) objects affected' ${TEMPDIR}/compare.txt
grep '^[1-9][0-9]*/[0-9]* (.*%) pg shards to move' ${TEMPDIR}/compare.txt
grep '^[1-9][0-9]*/[0-9]* (.*%) pg object shards to move' \
${TEMPDIR}/compare.txt
grep '^.*/.* (.*%) bytes to move' ${TEMPDIR}/compare.txt
crushdiff import ${TEMPDIR}/cm.txt --osdmap ${TEMPDIR}/osdmap --verbose
fi
echo OK
| 3,064 | 28.471154 | 77 | sh |
null | ceph-main/qa/workunits/rados/test_dedup_tool.sh | #!/usr/bin/env bash
set -x
die() {
echo "$@"
exit 1
}
do_run() {
if [ "$1" == "--tee" ]; then
shift
tee_out="$1"
shift
"$@" | tee $tee_out
else
"$@"
fi
}
run_expect_succ() {
echo "RUN_EXPECT_SUCC: " "$@"
do_run "$@"
[ $? -ne 0 ] && die "expected success, but got failure! cmd: $@"
}
run() {
echo "RUN: " $@
do_run "$@"
}
if [ -n "$CEPH_BIN" ] ; then
# CMake env
RADOS_TOOL="$CEPH_BIN/rados"
CEPH_TOOL="$CEPH_BIN/ceph"
DEDUP_TOOL="$CEPH_BIN/ceph-dedup-tool"
else
# executables should be installed by the QA env
RADOS_TOOL=$(which rados)
CEPH_TOOL=$(which ceph)
DEDUP_TOOL=$(which ceph-dedup-tool)
fi
POOL=dedup_pool
OBJ=test_rados_obj
[ -x "$RADOS_TOOL" ] || die "couldn't find $RADOS_TOOL binary to test"
[ -x "$CEPH_TOOL" ] || die "couldn't find $CEPH_TOOL binary to test"
run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
sleep 5
function test_dedup_ratio_fixed()
{
# case 1
dd if=/dev/urandom of=dedup_object_1k bs=1K count=1
for num in `seq 1 50`
do
dd if=dedup_object_1k of=dedup_object_100k bs=1K oflag=append conv=notrunc
done
for num in `seq 1 50`
do
dd if=/dev/zero of=dedup_object_100k bs=1K count=1 oflag=append conv=notrunc
done
$RADOS_TOOL -p $POOL put $OBJ ./dedup_object_100k
RESULT=$($DEDUP_TOOL --op estimate --pool $POOL --chunk-size 1024 --chunk-algorithm fixed --fingerprint-algorithm sha1 | grep chunk_size_average | awk '{print$2}' | sed "s/\,//g")
# total size / the number of deduped object = 100K / 1
if [ 51200 -ne $RESULT ];
then
die "Estimate failed expecting 51200 result $RESULT"
fi
# case 2
dd if=/dev/zero of=dedup_object_10m bs=10M count=1
$RADOS_TOOL -p $POOL put $OBJ ./dedup_object_10m
RESULT=$($DEDUP_TOOL --op estimate --pool $POOL --chunk-size 4096 --chunk-algorithm fixed --fingerprint-algorithm sha1 | grep examined_bytes | awk '{print$2}')
# 10485760
if [ 10485760 -ne $RESULT ];
then
die "Estimate failed expecting 10485760 result $RESULT"
fi
# case 3 max_thread
for num in `seq 0 20`
do
dd if=/dev/zero of=dedup_object_$num bs=4M count=1
$RADOS_TOOL -p $POOL put dedup_object_$num ./dedup_object_$num
done
RESULT=$($DEDUP_TOOL --op estimate --pool $POOL --chunk-size 4096 --chunk-algorithm fixed --fingerprint-algorithm sha1 --max-thread 4 | grep chunk_size_average | awk '{print$2}' | sed "s/\,//g")
if [ 98566144 -ne $RESULT ];
then
die "Estimate failed expecting 98566144 result $RESULT"
fi
rm -rf ./dedup_object_1k ./dedup_object_100k ./dedup_object_10m
for num in `seq 0 20`
do
rm -rf ./dedup_object_$num
done
$RADOS_TOOL -p $POOL rm $OBJ
for num in `seq 0 20`
do
$RADOS_TOOL -p $POOL rm dedup_object_$num
done
}
function test_dedup_chunk_scrub()
{
CHUNK_POOL=dedup_chunk_pool
run_expect_succ "$CEPH_TOOL" osd pool create "$CHUNK_POOL" 8
echo "hi there" > foo
echo "hi there" > bar
echo "there" > foo-chunk
echo "CHUNK" > bar-chunk
$CEPH_TOOL osd pool set $POOL fingerprint_algorithm sha1 --yes-i-really-mean-it
$CEPH_TOOL osd pool set $POOL dedup_chunk_algorithm fastcdc --yes-i-really-mean-it
$CEPH_TOOL osd pool set $POOL dedup_cdc_chunk_size 4096 --yes-i-really-mean-it
$CEPH_TOOL osd pool set $POOL dedup_tier $CHUNK_POOL --yes-i-really-mean-it
$RADOS_TOOL -p $POOL put foo ./foo
$RADOS_TOOL -p $POOL put bar ./bar
$RADOS_TOOL -p $CHUNK_POOL put bar-chunk ./bar-chunk
$RADOS_TOOL -p $CHUNK_POOL put foo-chunk ./foo-chunk
$RADOS_TOOL -p $POOL set-chunk bar 0 8 --target-pool $CHUNK_POOL bar-chunk 0 --with-reference
echo -n "There hi" > test_obj
# dirty
$RADOS_TOOL -p $POOL put foo ./test_obj
$RADOS_TOOL -p $POOL set-chunk foo 0 8 --target-pool $CHUNK_POOL foo-chunk 0 --with-reference
# flush
$RADOS_TOOL -p $POOL tier-flush foo
sleep 2
$RADOS_TOOL ls -p $CHUNK_POOL
CHUNK_OID=$(echo -n "There hi" | sha1sum | awk '{print $1}')
POOL_ID=$($CEPH_TOOL osd pool ls detail | grep $POOL | awk '{print$2}')
$DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref bar --target-ref-pool-id $POOL_ID
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID)
RESULT=$($DEDUP_TOOL --op chunk-scrub --chunk-pool $CHUNK_POOL | grep "Damaged object" | awk '{print$4}')
if [ $RESULT -ne "1" ] ; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Chunk-scrub failed expecting damaged objects is not 1"
fi
$DEDUP_TOOL --op chunk-put-ref --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref bar --target-ref-pool-id $POOL_ID
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep bar)
if [ -n "$RESULT" ] ; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Scrub failed expecting bar is removed"
fi
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
rm -rf ./foo ./bar ./foo-chunk ./bar-chunk ./test_obj
$RADOS_TOOL -p $POOL rm foo
$RADOS_TOOL -p $POOL rm bar
}
function test_dedup_chunk_repair()
{
CHUNK_POOL=dedup_chunk_pool
run_expect_succ "$CEPH_TOOL" osd pool create "$CHUNK_POOL" 8
echo -n "hi there" > foo
echo -n "hi there" > bar
echo -n "there" > foo-chunk
echo -n "CHUNK" > bar-chunk
$CEPH_TOOL osd pool set $POOL fingerprint_algorithm sha1 --yes-i-really-mean-it
$CEPH_TOOL osd pool set $POOL dedup_chunk_algorithm fastcdc --yes-i-really-mean-it
$CEPH_TOOL osd pool set $POOL dedup_cdc_chunk_size 4096 --yes-i-really-mean-it
$CEPH_TOOL osd pool set $POOL dedup_tier $CHUNK_POOL --yes-i-really-mean-it
$RADOS_TOOL -p $POOL put foo ./foo
$RADOS_TOOL -p $POOL put bar ./bar
$RADOS_TOOL -p $CHUNK_POOL put bar-chunk ./bar-chunk
$RADOS_TOOL -p $CHUNK_POOL put foo-chunk ./foo-chunk
$RADOS_TOOL ls -p $CHUNK_POOL
CHUNK_OID=$(echo -n "hi there" | sha1sum | awk '{print $1}')
POOL_ID=$($CEPH_TOOL osd pool ls detail | grep $POOL | awk '{print$2}')
$RADOS_TOOL -p $CHUNK_POOL put $CHUNK_OID ./foo
# increase ref count by two, resuling in mismatch
$DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref foo --target-ref-pool-id $POOL_ID
$DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref foo --target-ref-pool-id $POOL_ID
$DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref foo --target-ref-pool-id $POOL_ID
$DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref foo --target-ref-pool-id $POOL_ID
$DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object bar-chunk --target-ref bar --target-ref-pool-id $POOL_ID
$DEDUP_TOOL --op chunk-get-ref --chunk-pool $CHUNK_POOL --object bar-chunk --target-ref bar --target-ref-pool-id $POOL_ID
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID)
RESULT=$($DEDUP_TOOL --op chunk-scrub --chunk-pool $CHUNK_POOL | grep "Damaged object" | awk '{print$4}')
if [ $RESULT -ne "2" ] ; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Chunk-scrub failed expecting damaged objects is not 1"
fi
$DEDUP_TOOL --op chunk-repair --chunk-pool $CHUNK_POOL --object $CHUNK_OID --target-ref foo --target-ref-pool-id $POOL_ID
$DEDUP_TOOL --op chunk-repair --chunk-pool $CHUNK_POOL --object bar-chunk --target-ref bar --target-ref-pool-id $POOL_ID
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep foo | wc -l)
if [ 0 -ne "$RESULT" ] ; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Scrub failed expecting bar is removed"
fi
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object bar-chunk | grep bar | wc -l)
if [ 0 -ne "$RESULT" ] ; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Scrub failed expecting bar is removed"
fi
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
rm -rf ./foo ./bar ./foo-chunk ./bar-chunk ./test_obj
$RADOS_TOOL -p $POOL rm foo
$RADOS_TOOL -p $POOL rm bar
}
function test_dedup_object()
{
CHUNK_POOL=dedup_chunk_pool
run_expect_succ "$CEPH_TOOL" osd pool create "$CHUNK_POOL" 8
echo "There hiHI" > foo
$CEPH_TOOL osd pool set $POOL dedup_tier $CHUNK_POOL --yes-i-really-mean-it
$RADOS_TOOL -p $POOL put foo ./foo
sleep 2
rados ls -p $CHUNK_POOL
RESULT=$($DEDUP_TOOL --pool $POOL --op chunk-dedup --object foo --chunk-pool $CHUNK_POOL --source-off 0 --source-length 10 --fingerprint-algorithm sha1 )
POOL_ID=$($CEPH_TOOL osd pool ls detail | grep $POOL | awk '{print$2}')
CHUNK_OID=$(echo -n "There hiHI" | sha1sum | awk '{print $1}')
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep foo)
if [ -z "$RESULT" ] ; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Scrub failed expecting bar is removed"
fi
$RADOS_TOOL -p $CHUNK_POOL get $CHUNK_OID ./chunk
VERIFY=$(cat ./chunk | sha1sum | awk '{print $1}')
if [ "$CHUNK_OID" != "$VERIFY" ] ; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Comparing failed expecting chunk mismatch"
fi
echo -n "There hihiHI" > bar
$RADOS_TOOL -p $POOL put bar ./bar
RESULT=$($DEDUP_TOOL --pool $POOL --op object-dedup --object bar --chunk-pool $CHUNK_POOL --fingerprint-algorithm sha1 --dedup-cdc-chunk-size 4096)
CHUNK_OID=$(echo -n "There hihiHI" | sha1sum | awk '{print $1}')
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep bar)
if [ -z "$RESULT" ] ; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Scrub failed expecting bar is removed"
fi
$RADOS_TOOL -p $CHUNK_POOL get $CHUNK_OID ./chunk
VERIFY=$(cat ./chunk | sha1sum | awk '{print $1}')
if [ "$CHUNK_OID" != "$VERIFY" ] ; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Comparing failed expecting chunk mismatch"
fi
echo -n "THERE HIHIHI" > bar
$RADOS_TOOL -p $POOL put bar ./bar
$RADOS_TOOL -p $POOL mksnap mysnap
echo -n "There HIHIHI" > bar
$RADOS_TOOL -p $POOL put bar ./bar
RESULT=$($DEDUP_TOOL --pool $POOL --op object-dedup --object bar --chunk-pool $CHUNK_POOL --fingerprint-algorithm sha1 --dedup-cdc-chunk-size 4096 --snap)
CHUNK_OID=$(echo -n "THERE HIHIHI" | sha1sum | awk '{print $1}')
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep bar)
if [ -z "$RESULT" ] ; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Scrub failed expecting bar is removed"
fi
CHUNK_OID=$(echo -n "There HIHIHI" | sha1sum | awk '{print $1}')
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep bar)
if [ -z "$RESULT" ] ; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Scrub failed expecting bar is removed"
fi
# rerun tier-flush
RESULT=$($DEDUP_TOOL --pool $POOL --op object-dedup --object bar --chunk-pool $CHUNK_POOL --fingerprint-algorithm sha1 --dedup-cdc-chunk-size 4096)
CHUNK_OID=$(echo -n "There HIHIHI" | sha1sum | awk '{print $1}')
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID | grep bar)
if [ -z "$RESULT" ] ; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Scrub failed expecting bar is removed"
fi
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
rm -rf ./foo ./bar ./chunk
$RADOS_TOOL -p $POOL rm foo
$RADOS_TOOL -p $POOL rm bar
}
function test_sample_dedup()
{
CHUNK_POOL=dedup_chunk_pool
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
sleep 2
run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
run_expect_succ "$CEPH_TOOL" osd pool create "$CHUNK_POOL" 8
run_expect_succ "$CEPH_TOOL" osd pool set "$POOL" dedup_tier "$CHUNK_POOL"
run_expect_succ "$CEPH_TOOL" osd pool set "$POOL" dedup_chunk_algorithm fastcdc
run_expect_succ "$CEPH_TOOL" osd pool set "$POOL" dedup_cdc_chunk_size 8192
run_expect_succ "$CEPH_TOOL" osd pool set "$POOL" fingerprint_algorithm sha1
# 8 Dedupable objects
CONTENT_1="There hiHI"
echo $CONTENT_1 > foo
for num in `seq 1 8`
do
$RADOS_TOOL -p $POOL put foo_$num ./foo
done
# 1 Unique object
CONTENT_3="There hiHI3"
echo $CONTENT_3 > foo3
$RADOS_TOOL -p $POOL put foo3_1 ./foo3
sleep 2
# Execute dedup crawler
RESULT=$($DEDUP_TOOL --pool $POOL --chunk-pool $CHUNK_POOL --op sample-dedup --chunk-algorithm fastcdc --fingerprint-algorithm sha1 --chunk-dedup-threshold 3 --sampling-ratio 50)
CHUNK_OID_1=$(echo $CONTENT_1 | sha1sum | awk '{print $1}')
CHUNK_OID_3=$(echo $CONTENT_3 | sha1sum | awk '{print $1}')
# Find chunk object has references of 8 dedupable meta objects
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID_1)
DEDUP_COUNT=0
for num in `seq 1 8`
do
GREP_RESULT=$(echo $RESULT | grep foo_$num)
if [ -n "$GREP_RESULT" ]; then
DEDUP_COUNT=$(($DEDUP_COUNT + 1))
fi
done
if [ $DEDUP_COUNT -lt 2 ]; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Chunk object has no reference of first meta object"
fi
# 7 Duplicated objects but less than chunk dedup threshold
CONTENT_2="There hiHI2"
echo $CONTENT_2 > foo2
for num in `seq 1 7`
do
$RADOS_TOOL -p $POOL put foo2_$num ./foo2
done
CHUNK_OID_2=$(echo $CONTENT_2 | sha1sum | awk '{print $1}')
RESULT=$($DEDUP_TOOL --pool $POOL --chunk-pool $CHUNK_POOL --op sample-dedup --chunk-algorithm fastcdc --fingerprint-algorithm sha1 --sampling-ratio 100 --chunk-dedup-threshold 2)
# Objects duplicates less than chunk dedup threshold should be deduplicated because of they satisfies object-dedup-threshold
# The only object, which is crawled at the very first, should not be deduplicated because it was not duplicated at initial time
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID_2)
DEDUP_COUNT=0
for num in `seq 1 7`
do
GREP_RESULT=$(echo $RESULT | grep foo2_$num)
if [ -n "$GREP_RESULT" ]; then
DEDUP_COUNT=$(($DEDUP_COUNT + 1))
fi
done
if [ $DEDUP_COUNT -ne 6 ]; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Chunk object has no reference of first meta object"
fi
# Unique object should not be deduplicated
RESULT=$($DEDUP_TOOL --op dump-chunk-refs --chunk-pool $CHUNK_POOL --object $CHUNK_OID_3)
GREP_RESULT=$($RESULT | grep $CHUNK_OID_3)
if [ -n "$GREP_RESULT" ]; then
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
die "Chunk object has no reference of second meta object"
fi
rm -rf ./foo ./foo2 ./foo3
for num in `seq 1 8`
do
$RADOS_TOOL -p $POOL rm foo_$num
done
for num in `seq 1 2`
do
$RADOS_TOOL -p $POOL rm foo2_$num
done
$RADOS_TOOL -p $POOL rm foo3_1
$CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
}
test_dedup_ratio_fixed
test_dedup_chunk_scrub
test_dedup_chunk_repair
test_dedup_object
test_sample_dedup
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
echo "SUCCESS!"
exit 0
| 16,953 | 35.936819 | 197 | sh |
null | ceph-main/qa/workunits/rados/test_envlibrados_for_rocksdb.sh | #!/usr/bin/env bash
set -ex
############################################
# Helper functions
############################################
source $(dirname $0)/../ceph-helpers-root.sh
############################################
# Install required tools
############################################
echo "Install required tools"
CURRENT_PATH=`pwd`
############################################
# Compile&Start RocksDB
############################################
# install prerequisites
# for rocksdb
case $(distro_id) in
ubuntu|debian|devuan|softiron)
install git g++ libsnappy-dev zlib1g-dev libbz2-dev libradospp-dev cmake
;;
centos|fedora|rhel)
case $(distro_id) in
rhel)
# RHEL needs CRB repo for snappy-devel
sudo subscription-manager repos --enable "codeready-builder-for-rhel-8-x86_64-rpms"
;;
esac
install git gcc-c++.x86_64 snappy-devel zlib zlib-devel bzip2 bzip2-devel libradospp-devel.x86_64 cmake libarchive-3.3.3
;;
opensuse*|suse|sles)
install git gcc-c++ snappy-devel zlib-devel libbz2-devel libradospp-devel
;;
*)
echo "$(distro_id) is unknown, $@ will have to be installed manually."
;;
esac
# # gflags
# sudo yum install gflags-devel
#
# wget https://github.com/schuhschuh/gflags/archive/master.zip
# unzip master.zip
# cd gflags-master
# mkdir build && cd build
# export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1
# make && make install
# # snappy-devel
echo "Compile rocksdb"
if [ -e rocksdb ]; then
rm -fr rocksdb
fi
pushd $(dirname /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/bash.sh)/../../../
git submodule update --init src/rocksdb
popd
git clone $(dirname /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/bash.sh)/../../../src/rocksdb rocksdb
# compile code
cd rocksdb
if type cmake3 > /dev/null 2>&1 ; then
CMAKE=cmake3
else
CMAKE=cmake
fi
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
mkdir ${BUILD_DIR} && cd ${BUILD_DIR} && ${CMAKE} -DCMAKE_BUILD_TYPE=Debug -DWITH_TESTS=ON -DWITH_LIBRADOS=ON -DWITH_SNAPPY=ON -DWITH_GFLAGS=OFF -DFAIL_ON_WARNINGS=OFF ..
make rocksdb_env_librados_test -j8
echo "Copy ceph.conf"
# prepare ceph.conf
mkdir -p ../ceph/src/
if [ -f "/etc/ceph/ceph.conf" ]; then
cp /etc/ceph/ceph.conf ../ceph/src/
elif [ -f "/etc/ceph/ceph/ceph.conf" ]; then
cp /etc/ceph/ceph/ceph.conf ../ceph/src/
else
echo "/etc/ceph/ceph/ceph.conf doesn't exist"
fi
echo "Run EnvLibrados test"
# run test
if [ -f "../ceph/src/ceph.conf" ]
then
cp env_librados_test ~/cephtest/archive
./env_librados_test
else
echo "../ceph/src/ceph.conf doesn't exist"
fi
cd ${CURRENT_PATH}
| 2,681 | 26.367347 | 170 | sh |
null | ceph-main/qa/workunits/rados/test_hang.sh | #!/bin/sh -ex
# Hang forever for manual testing using the thrasher
while(true)
do
sleep 300
done
exit 0
| 108 | 11.111111 | 52 | sh |
null | ceph-main/qa/workunits/rados/test_health_warnings.sh | #!/usr/bin/env bash
set -uex
# number of osds = 10
crushtool -o crushmap --build --num_osds 10 host straw 2 rack straw 2 row straw 2 root straw 0
ceph osd setcrushmap -i crushmap
ceph osd tree
ceph tell osd.* injectargs --osd_max_markdown_count 1024 --osd_max_markdown_period 1
ceph osd set noout
wait_for_healthy() {
while ceph health | grep down
do
sleep 1
done
}
test_mark_two_osds_same_host_down() {
ceph osd set noup
ceph osd down osd.0 osd.1
ceph health detail
ceph health | grep "1 host"
ceph health | grep "2 osds"
ceph health detail | grep "osd.0"
ceph health detail | grep "osd.1"
ceph osd unset noup
wait_for_healthy
}
test_mark_two_osds_same_rack_down() {
ceph osd set noup
ceph osd down osd.8 osd.9
ceph health detail
ceph health | grep "1 host"
ceph health | grep "1 rack"
ceph health | grep "1 row"
ceph health | grep "2 osds"
ceph health detail | grep "osd.8"
ceph health detail | grep "osd.9"
ceph osd unset noup
wait_for_healthy
}
test_mark_all_but_last_osds_down() {
ceph osd set noup
ceph osd down $(ceph osd ls | sed \$d)
ceph health detail
ceph health | grep "1 row"
ceph health | grep "2 racks"
ceph health | grep "4 hosts"
ceph health | grep "9 osds"
ceph osd unset noup
wait_for_healthy
}
test_mark_two_osds_same_host_down_with_classes() {
ceph osd set noup
ceph osd crush set-device-class ssd osd.0 osd.2 osd.4 osd.6 osd.8
ceph osd crush set-device-class hdd osd.1 osd.3 osd.5 osd.7 osd.9
ceph osd down osd.0 osd.1
ceph health detail
ceph health | grep "1 host"
ceph health | grep "2 osds"
ceph health detail | grep "osd.0"
ceph health detail | grep "osd.1"
ceph osd unset noup
wait_for_healthy
}
test_mark_two_osds_same_host_down
test_mark_two_osds_same_rack_down
test_mark_all_but_last_osds_down
test_mark_two_osds_same_host_down_with_classes
exit 0
| 1,899 | 23.675325 | 94 | sh |
null | ceph-main/qa/workunits/rados/test_libcephsqlite.sh | #!/bin/bash -ex
# The main point of these tests beyond ceph_test_libcephsqlite is to:
#
# - Ensure you can load the Ceph VFS via the dynamic load extension mechanism
# in SQLite.
# - Check the behavior of a dead application, that it does not hold locks
# indefinitely.
pool="$1"
ns="$(basename $0)"
function sqlite {
background="$1"
if [ "$background" = b ]; then
shift
fi
a=$(cat)
printf "%s" "$a" >&2
# We're doing job control gymnastics here to make sure that sqlite3 is the
# main process (i.e. the process group leader) in the background, not a bash
# function or job pipeline.
sqlite3 -cmd '.output /dev/null' -cmd '.load libcephsqlite.so' -cmd 'pragma journal_mode = PERSIST' -cmd ".open file:///$pool:$ns/baz.db?vfs=ceph" -cmd '.output stdout' <<<"$a" &
if [ "$background" != b ]; then
wait
fi
}
function striper {
rados --pool=$pool --namespace="$ns" --striper "$@"
}
function repeat {
n=$1
shift
for ((i = 0; i < "$n"; ++i)); do
echo "$*"
done
}
striper rm baz.db || true
time sqlite <<EOF
create table if not exists foo (a INT);
insert into foo (a) values (RANDOM());
drop table foo;
EOF
striper stat baz.db
striper rm baz.db
time sqlite <<EOF
CREATE TABLE IF NOT EXISTS rand(text BLOB NOT NULL);
$(repeat 10 'INSERT INTO rand (text) VALUES (RANDOMBLOB(4096));')
SELECT LENGTH(text) FROM rand;
DROP TABLE rand;
EOF
time sqlite <<EOF
BEGIN TRANSACTION;
CREATE TABLE IF NOT EXISTS rand(text BLOB NOT NULL);
$(repeat 100 'INSERT INTO rand (text) VALUES (RANDOMBLOB(4096));')
COMMIT;
SELECT LENGTH(text) FROM rand;
DROP TABLE rand;
EOF
# Connection death drops the lock:
striper rm baz.db
date
sqlite b <<EOF
CREATE TABLE foo (a BLOB);
INSERT INTO foo VALUES ("start");
WITH RECURSIVE c(x) AS
(
VALUES(1)
UNION ALL
SELECT x+1
FROM c
)
INSERT INTO foo (a)
SELECT RANDOMBLOB(1<<20)
FROM c
LIMIT (1<<20);
EOF
# Let it chew on that INSERT for a while so it writes data, it will not finish as it's trying to write 2^40 bytes...
sleep 10
echo done
jobs -l
kill -KILL -- $(jobs -p)
date
wait
date
n=$(sqlite <<<"SELECT COUNT(*) FROM foo;")
[ "$n" -eq 1 ]
# Connection "hang" loses the lock and cannot reacquire it:
striper rm baz.db
date
sqlite b <<EOF
CREATE TABLE foo (a BLOB);
INSERT INTO foo VALUES ("start");
WITH RECURSIVE c(x) AS
(
VALUES(1)
UNION ALL
SELECT x+1
FROM c
)
INSERT INTO foo (a)
SELECT RANDOMBLOB(1<<20)
FROM c
LIMIT (1<<20);
EOF
# Same thing, let it chew on the INSERT for a while...
sleep 20
jobs -l
kill -STOP -- $(jobs -p)
# cephsqlite_lock_renewal_timeout is 30s
sleep 45
date
kill -CONT -- $(jobs -p)
sleep 10
date
# it should exit with an error as it lost the lock
wait
date
n=$(sqlite <<<"SELECT COUNT(*) FROM foo;")
[ "$n" -eq 1 ]
| 2,769 | 19.218978 | 180 | sh |
null | ceph-main/qa/workunits/rados/test_librados_build.sh | #!/bin/bash -ex
#
# Compile and run a librados application outside of the ceph build system, so
# that we can be sure librados.h[pp] is still usable and hasn't accidentally
# started depending on internal headers.
#
# The script assumes all dependencies - e.g. curl, make, gcc, librados headers,
# libradosstriper headers, boost headers, etc. - are already installed.
#
source $(dirname $0)/../ceph-helpers-root.sh
trap cleanup EXIT
SOURCES="hello_radosstriper.cc
hello_world_c.c
hello_world.cc
Makefile
"
BINARIES_TO_RUN="hello_world_c
hello_world_cpp
"
BINARIES="${BINARIES_TO_RUN}hello_radosstriper_cpp
"
# parse output like "octopus (dev)"
case $(librados-config --release | grep -Po ' \(\K[^\)]+') in
dev)
BRANCH=main;;
rc|stable)
BRANCH=$(librados-config --release | cut -d' ' -f1);;
*)
echo "unknown release '$(librados-config --release)'" >&2
return 1;;
esac
DL_PREFIX="http://git.ceph.com/?p=ceph.git;a=blob_plain;hb=${BRANCH};f=examples/librados/"
#DL_PREFIX="https://raw.githubusercontent.com/ceph/ceph/master/examples/librados/"
DESTDIR=$(pwd)
function cleanup () {
for f in $BINARIES$SOURCES ; do
rm -f "${DESTDIR}/$f"
done
}
function get_sources () {
for s in $SOURCES ; do
curl --progress-bar --output $s -L ${DL_PREFIX}$s
done
}
function check_sources () {
for s in $SOURCES ; do
test -f $s
done
}
function check_binaries () {
for b in $BINARIES ; do
file $b
test -f $b
done
}
function run_binaries () {
for b in $BINARIES_TO_RUN ; do
./$b -c /etc/ceph/ceph.conf
done
}
pushd $DESTDIR
case $(distro_id) in
centos|fedora|rhel|opensuse*|suse|sles)
install gcc-c++ make libradospp-devel librados-devel;;
ubuntu)
install gcc-11 g++-11 make libradospp-dev librados-dev
export CXX_FLAGS="-std=c++20";;
debian|devuan|softiron)
install g++ make libradospp-dev librados-dev;;
*)
echo "$(distro_id) is unknown, $@ will have to be installed manually."
esac
get_sources
check_sources
make all-system
check_binaries
run_binaries
popd
| 2,134 | 23.261364 | 90 | sh |
null | ceph-main/qa/workunits/rados/test_pool_access.sh | #!/usr/bin/env bash
set -ex
KEYRING=$(mktemp)
trap cleanup EXIT ERR HUP INT QUIT
cleanup() {
(ceph auth del client.mon_read || true) >/dev/null 2>&1
(ceph auth del client.mon_write || true) >/dev/null 2>&1
rm -f $KEYRING
}
expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
create_pool_op() {
ID=$1
POOL=$2
cat << EOF | CEPH_ARGS="-k $KEYRING" python3
import rados
cluster = rados.Rados(conffile="", rados_id="${ID}")
cluster.connect()
cluster.create_pool("${POOL}")
EOF
}
delete_pool_op() {
ID=$1
POOL=$2
cat << EOF | CEPH_ARGS="-k $KEYRING" python3
import rados
cluster = rados.Rados(conffile="", rados_id="${ID}")
cluster.connect()
cluster.delete_pool("${POOL}")
EOF
}
create_pool_snap_op() {
ID=$1
POOL=$2
SNAP=$3
cat << EOF | CEPH_ARGS="-k $KEYRING" python3
import rados
cluster = rados.Rados(conffile="", rados_id="${ID}")
cluster.connect()
ioctx = cluster.open_ioctx("${POOL}")
ioctx.create_snap("${SNAP}")
EOF
}
remove_pool_snap_op() {
ID=$1
POOL=$2
SNAP=$3
cat << EOF | CEPH_ARGS="-k $KEYRING" python3
import rados
cluster = rados.Rados(conffile="", rados_id="${ID}")
cluster.connect()
ioctx = cluster.open_ioctx("${POOL}")
ioctx.remove_snap("${SNAP}")
EOF
}
test_pool_op()
{
ceph auth get-or-create client.mon_read mon 'allow r' >> $KEYRING
ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING
expect_false create_pool_op mon_read pool1
create_pool_op mon_write pool1
expect_false create_pool_snap_op mon_read pool1 snap1
create_pool_snap_op mon_write pool1 snap1
expect_false remove_pool_snap_op mon_read pool1 snap1
remove_pool_snap_op mon_write pool1 snap1
expect_false delete_pool_op mon_read pool1
delete_pool_op mon_write pool1
}
key=`ceph auth get-or-create-key client.poolaccess1 mon 'allow r' osd 'allow *'`
rados --id poolaccess1 --key $key -p rbd ls
key=`ceph auth get-or-create-key client.poolaccess2 mon 'allow r' osd 'allow * pool=nopool'`
expect_false rados --id poolaccess2 --key $key -p rbd ls
key=`ceph auth get-or-create-key client.poolaccess3 mon 'allow r' osd 'allow rw pool=nopool'`
expect_false rados --id poolaccess3 --key $key -p rbd ls
test_pool_op
echo OK
| 2,235 | 19.513761 | 93 | sh |
null | ceph-main/qa/workunits/rados/test_pool_quota.sh | #!/bin/sh -ex
p=`uuidgen`
# objects
ceph osd pool create $p 12
ceph osd pool set-quota $p max_objects 10
ceph osd pool application enable $p rados
for f in `seq 1 10` ; do
rados -p $p put obj$f /etc/passwd
done
sleep 30
rados -p $p put onemore /etc/passwd &
pid=$!
ceph osd pool set-quota $p max_objects 100
wait $pid
[ $? -ne 0 ] && exit 1 || true
rados -p $p put twomore /etc/passwd
# bytes
ceph osd pool set-quota $p max_bytes 100
sleep 30
rados -p $p put two /etc/passwd &
pid=$!
ceph osd pool set-quota $p max_bytes 0
ceph osd pool set-quota $p max_objects 0
wait $pid
[ $? -ne 0 ] && exit 1 || true
rados -p $p put three /etc/passwd
#one pool being full does not block a different pool
pp=`uuidgen`
ceph osd pool create $pp 12
ceph osd pool application enable $pp rados
# set objects quota
ceph osd pool set-quota $pp max_objects 10
sleep 30
for f in `seq 1 10` ; do
rados -p $pp put obj$f /etc/passwd
done
sleep 30
rados -p $p put threemore /etc/passwd
ceph osd pool set-quota $p max_bytes 0
ceph osd pool set-quota $p max_objects 0
sleep 30
# done
ceph osd pool delete $p $p --yes-i-really-really-mean-it
ceph osd pool delete $pp $pp --yes-i-really-really-mean-it
echo OK
| 1,210 | 16.550725 | 58 | sh |
null | ceph-main/qa/workunits/rados/test_python.sh | #!/bin/sh -ex
ceph osd pool create rbd
${PYTHON:-python3} -m nose -v $(dirname $0)/../../../src/test/pybind/test_rados.py "$@"
exit 0
| 135 | 21.666667 | 87 | sh |
null | ceph-main/qa/workunits/rados/test_rados_timeouts.sh | #!/usr/bin/env bash
set -x
delay_mon() {
MSGTYPE=$1
shift
$@ --rados-mon-op-timeout 1 --ms-inject-delay-type mon --ms-inject-delay-max 10000000 --ms-inject-delay-probability 1 --ms-inject-delay-msg-type $MSGTYPE
if [ $? -eq 0 ]; then
exit 1
fi
}
delay_osd() {
MSGTYPE=$1
shift
$@ --rados-osd-op-timeout 1 --ms-inject-delay-type osd --ms-inject-delay-max 10000000 --ms-inject-delay-probability 1 --ms-inject-delay-msg-type $MSGTYPE
if [ $? -eq 0 ]; then
exit 2
fi
}
# pool ops
delay_mon omap rados lspools
delay_mon poolopreply ceph osd pool create test 8
delay_mon poolopreply rados mksnap -p test snap
delay_mon poolopreply ceph osd pool rm test test --yes-i-really-really-mean-it
# other mon ops
delay_mon getpoolstats rados df
delay_mon mon_command ceph df
delay_mon omap ceph osd dump
delay_mon omap ceph -s
# osd ops
delay_osd osd_op_reply rados -p data put ls /bin/ls
delay_osd osd_op_reply rados -p data get ls - >/dev/null
delay_osd osd_op_reply rados -p data ls
delay_osd command_reply ceph tell osd.0 bench 1 1
# rbd commands, using more kinds of osd ops
rbd create -s 1 test
delay_osd osd_op_reply rbd watch test
delay_osd osd_op_reply rbd info test
delay_osd osd_op_reply rbd snap create test@snap
delay_osd osd_op_reply rbd import /bin/ls ls
rbd rm test
echo OK
| 1,338 | 26.326531 | 157 | sh |
null | ceph-main/qa/workunits/rados/test_rados_tool.sh | #!/usr/bin/env bash
set -x
die() {
echo "$@"
exit 1
}
usage() {
cat <<EOF
test_rados_tool.sh: tests rados_tool
-c: RADOS configuration file to use [optional]
-k: keep temp files
-h: this help message
-p: set temporary pool to use [optional]
EOF
}
do_run() {
if [ "$1" == "--tee" ]; then
shift
tee_out="$1"
shift
"$@" | tee $tee_out
else
"$@"
fi
}
run_expect_fail() {
echo "RUN_EXPECT_FAIL: " "$@"
do_run "$@"
[ $? -eq 0 ] && die "expected failure, but got success! cmd: $@"
}
run_expect_succ() {
echo "RUN_EXPECT_SUCC: " "$@"
do_run "$@"
[ $? -ne 0 ] && die "expected success, but got failure! cmd: $@"
}
run_expect_nosignal() {
echo "RUN_EXPECT_NOSIGNAL: " "$@"
do_run "$@"
[ $? -ge 128 ] && die "expected success or fail, but got signal! cmd: $@"
}
run() {
echo "RUN: " $@
do_run "$@"
}
if [ -n "$CEPH_BIN" ] ; then
# CMake env
RADOS_TOOL="$CEPH_BIN/rados"
CEPH_TOOL="$CEPH_BIN/ceph"
else
# executables should be installed by the QA env
RADOS_TOOL=$(which rados)
CEPH_TOOL=$(which ceph)
fi
KEEP_TEMP_FILES=0
POOL=trs_pool
POOL_CP_TARGET=trs_pool.2
POOL_EC=trs_pool_ec
[ -x "$RADOS_TOOL" ] || die "couldn't find $RADOS_TOOL binary to test"
[ -x "$CEPH_TOOL" ] || die "couldn't find $CEPH_TOOL binary to test"
while getopts "c:hkp:" flag; do
case $flag in
c) RADOS_TOOL="$RADOS_TOOL -c $OPTARG";;
k) KEEP_TEMP_FILES=1;;
h) usage; exit 0;;
p) POOL=$OPTARG;;
*) echo; usage; exit 1;;
esac
done
TDIR=`mktemp -d -t test_rados_tool.XXXXXXXXXX` || die "mktemp failed"
[ $KEEP_TEMP_FILES -eq 0 ] && trap "rm -rf ${TDIR}; exit" INT TERM EXIT
# ensure rados doesn't segfault without --pool
run_expect_nosignal "$RADOS_TOOL" --snap "asdf" ls
run_expect_nosignal "$RADOS_TOOL" --snapid "0" ls
run_expect_nosignal "$RADOS_TOOL" --object-locator "asdf" ls
run_expect_nosignal "$RADOS_TOOL" --namespace "asdf" ls
run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
run_expect_succ "$CEPH_TOOL" osd erasure-code-profile set myprofile k=2 m=1 stripe_unit=2K crush-failure-domain=osd --force
run_expect_succ "$CEPH_TOOL" osd pool create "$POOL_EC" 100 100 erasure myprofile
# expb happens to be the empty export for legacy reasons
run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expb"
# expa has objects foo, foo2 and bar
run_expect_succ "$RADOS_TOOL" -p "$POOL" put foo /etc/fstab
run_expect_succ "$RADOS_TOOL" -p "$POOL" put foo2 /etc/fstab
run_expect_succ "$RADOS_TOOL" -p "$POOL" put bar /etc/fstab
run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expa"
# expc has foo and foo2 with some attributes and omaps set
run_expect_succ "$RADOS_TOOL" -p "$POOL" rm bar
run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo "rados.toothbrush" "toothbrush"
run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo "rados.toothpaste" "crest"
run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapval foo "rados.floss" "myfloss"
run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo2 "rados.toothbrush" "green"
run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapheader foo2 "foo2.header"
run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expc"
# make sure that --create works
run "$CEPH_TOOL" osd pool rm "$POOL" "$POOL" --yes-i-really-really-mean-it
run_expect_succ "$RADOS_TOOL" -p "$POOL" --create import "$TDIR/expa"
# make sure that lack of --create fails
run_expect_succ "$CEPH_TOOL" osd pool rm "$POOL" "$POOL" --yes-i-really-really-mean-it
run_expect_fail "$RADOS_TOOL" -p "$POOL" import "$TDIR/expa"
run_expect_succ "$RADOS_TOOL" -p "$POOL" --create import "$TDIR/expa"
# inaccessible import src should fail
run_expect_fail "$RADOS_TOOL" -p "$POOL" import "$TDIR/dir_nonexistent"
# export an empty pool to test purge
run_expect_succ "$RADOS_TOOL" purge "$POOL" --yes-i-really-really-mean-it
run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/empty"
cmp -s "$TDIR/expb" "$TDIR/empty" \
|| die "failed to export the same stuff we imported!"
rm -f "$TDIR/empty"
# import some stuff with extended attributes on it
run_expect_succ "$RADOS_TOOL" -p "$POOL" import "$TDIR/expc"
VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
[ ${VAL} = "toothbrush" ] || die "Invalid attribute after import"
# the second time, the xattrs should match, so there should be nothing to do.
run_expect_succ "$RADOS_TOOL" -p "$POOL" import "$TDIR/expc"
VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
[ "${VAL}" = "toothbrush" ] || die "Invalid attribute after second import"
# Now try with --no-overwrite option after changing an attribute
run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo "rados.toothbrush" "dentist"
run_expect_succ "$RADOS_TOOL" -p "$POOL" import --no-overwrite "$TDIR/expc"
VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
[ "${VAL}" = "dentist" ] || die "Invalid attribute after second import"
# now force it to copy everything
run_expect_succ "$RADOS_TOOL" -p "$POOL" import "$TDIR/expc"
VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
[ "${VAL}" = "toothbrush" ] || die "Invalid attribute after second import"
# test copy pool
run "$CEPH_TOOL" osd pool rm "$POOL" "$POOL" --yes-i-really-really-mean-it
run "$CEPH_TOOL" osd pool rm "$POOL_CP_TARGET" "$POOL_CP_TARGET" --yes-i-really-really-mean-it
run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
run_expect_succ "$CEPH_TOOL" osd pool create "$POOL_CP_TARGET" 8
# create src files
mkdir -p "$TDIR/dir_cp_src"
for i in `seq 1 5`; do
fname="$TDIR/dir_cp_src/f.$i"
objname="f.$i"
dd if=/dev/urandom of="$fname" bs=$((1024*1024)) count=$i
run_expect_succ "$RADOS_TOOL" -p "$POOL" put $objname "$fname"
# a few random attrs
for j in `seq 1 4`; do
rand_str=`dd if=/dev/urandom bs=4 count=1 | hexdump -x`
run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr $objname attr.$j "$rand_str"
run_expect_succ --tee "$fname.attr.$j" "$RADOS_TOOL" -p "$POOL" getxattr $objname attr.$j
done
rand_str=`dd if=/dev/urandom bs=4 count=1 | hexdump -x`
run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapheader $objname "$rand_str"
run_expect_succ --tee "$fname.omap.header" "$RADOS_TOOL" -p "$POOL" getomapheader $objname
# a few random omap keys
for j in `seq 1 4`; do
rand_str=`dd if=/dev/urandom bs=4 count=1 | hexdump -x`
run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapval $objname key.$j "$rand_str"
done
run_expect_succ --tee "$fname.omap.vals" "$RADOS_TOOL" -p "$POOL" listomapvals $objname
done
run_expect_succ "$RADOS_TOOL" cppool "$POOL" "$POOL_CP_TARGET"
mkdir -p "$TDIR/dir_cp_dst"
for i in `seq 1 5`; do
fname="$TDIR/dir_cp_dst/f.$i"
objname="f.$i"
run_expect_succ "$RADOS_TOOL" -p "$POOL_CP_TARGET" get $objname "$fname"
# a few random attrs
for j in `seq 1 4`; do
run_expect_succ --tee "$fname.attr.$j" "$RADOS_TOOL" -p "$POOL_CP_TARGET" getxattr $objname attr.$j
done
run_expect_succ --tee "$fname.omap.header" "$RADOS_TOOL" -p "$POOL_CP_TARGET" getomapheader $objname
run_expect_succ --tee "$fname.omap.vals" "$RADOS_TOOL" -p "$POOL_CP_TARGET" listomapvals $objname
done
diff -q -r "$TDIR/dir_cp_src" "$TDIR/dir_cp_dst" \
|| die "copy pool validation failed!"
for opt in \
block-size \
concurrent-ios \
min-object-size \
max-object-size \
min-op-len \
max-op-len \
max-ops \
max-backlog \
target-throughput \
read-percent \
num-objects \
run-length \
; do
run_expect_succ "$RADOS_TOOL" --$opt 4 df
run_expect_fail "$RADOS_TOOL" --$opt 4k df
done
run_expect_succ "$RADOS_TOOL" lock list f.1 --lock-duration 4 --pool "$POOL"
echo # previous command doesn't output an end of line: issue #9735
run_expect_fail "$RADOS_TOOL" lock list f.1 --lock-duration 4k --pool "$POOL"
run_expect_succ "$RADOS_TOOL" mksnap snap1 --pool "$POOL"
snapid=$("$RADOS_TOOL" lssnap --pool "$POOL" | grep snap1 | cut -f1)
[ $? -ne 0 ] && die "expected success, but got failure! cmd: \"$RADOS_TOOL\" lssnap --pool \"$POOL\" | grep snap1 | cut -f1"
run_expect_succ "$RADOS_TOOL" ls --pool "$POOL" --snapid="$snapid"
run_expect_fail "$RADOS_TOOL" ls --pool "$POOL" --snapid="$snapid"k
run_expect_succ "$RADOS_TOOL" truncate f.1 0 --pool "$POOL"
run_expect_fail "$RADOS_TOOL" truncate f.1 0k --pool "$POOL"
run "$CEPH_TOOL" osd pool rm delete_me_mkpool_test delete_me_mkpool_test --yes-i-really-really-mean-it
run_expect_succ "$CEPH_TOOL" osd pool create delete_me_mkpool_test 1
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 write
run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 1k write
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 write --format json --output "$TDIR/bench.json"
run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 1 write --output "$TDIR/bench.json"
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --format json --no-cleanup
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 rand --format json
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 rand -f json
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 seq --format json
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 seq -f json
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-omap
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-object
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr --write-object
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr --write-omap
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-omap --write-object
run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr --write-omap --write-object
run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-omap
run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-object
run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr
run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr --write-object
run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr --write-omap
run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-omap --write-object
run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr --write-omap --write-object
for i in $("$RADOS_TOOL" --pool "$POOL" ls | grep "benchmark_data"); do
"$RADOS_TOOL" --pool "$POOL" truncate $i 0
done
run_expect_nosignal "$RADOS_TOOL" --pool "$POOL" bench 1 rand
run_expect_nosignal "$RADOS_TOOL" --pool "$POOL" bench 1 seq
set -e
OBJ=test_rados_obj
expect_false()
{
if "$@"; then return 1; else return 0; fi
}
cleanup() {
$RADOS_TOOL -p $POOL rm $OBJ > /dev/null 2>&1 || true
$RADOS_TOOL -p $POOL_EC rm $OBJ > /dev/null 2>&1 || true
}
test_omap() {
cleanup
for i in $(seq 1 1 10)
do
if [ $(($i % 2)) -eq 0 ]; then
$RADOS_TOOL -p $POOL setomapval $OBJ $i $i
else
echo -n "$i" | $RADOS_TOOL -p $POOL setomapval $OBJ $i
fi
$RADOS_TOOL -p $POOL getomapval $OBJ $i | grep -q "|$i|\$"
done
$RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 10
for i in $(seq 1 1 5)
do
$RADOS_TOOL -p $POOL rmomapkey $OBJ $i
done
$RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 5
$RADOS_TOOL -p $POOL clearomap $OBJ
$RADOS_TOOL -p $POOL listomapvals $OBJ | wc -l | grep 0
cleanup
for i in $(seq 1 1 10)
do
dd if=/dev/urandom bs=128 count=1 > $TDIR/omap_key
if [ $(($i % 2)) -eq 0 ]; then
$RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key setomapval $OBJ $i
else
echo -n "$i" | $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key setomapval $OBJ
fi
$RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key getomapval $OBJ | grep -q "|$i|\$"
$RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key rmomapkey $OBJ
$RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 0
done
cleanup
}
test_xattr() {
cleanup
$RADOS_TOOL -p $POOL put $OBJ /etc/passwd
V1=`mktemp fooattrXXXXXXX`
V2=`mktemp fooattrXXXXXXX`
echo -n fooval > $V1
expect_false $RADOS_TOOL -p $POOL setxattr $OBJ 2>/dev/null
expect_false $RADOS_TOOL -p $POOL setxattr $OBJ foo fooval extraarg 2>/dev/null
$RADOS_TOOL -p $POOL setxattr $OBJ foo fooval
$RADOS_TOOL -p $POOL getxattr $OBJ foo > $V2
cmp $V1 $V2
cat $V1 | $RADOS_TOOL -p $POOL setxattr $OBJ bar
$RADOS_TOOL -p $POOL getxattr $OBJ bar > $V2
cmp $V1 $V2
$RADOS_TOOL -p $POOL listxattr $OBJ > $V1
grep -q foo $V1
grep -q bar $V1
[ `cat $V1 | wc -l` -eq 2 ]
rm $V1 $V2
cleanup
}
test_rmobj() {
p=`uuidgen`
$CEPH_TOOL osd pool create $p 1
$CEPH_TOOL osd pool set-quota $p max_objects 1
V1=`mktemp fooattrXXXXXXX`
$RADOS_TOOL put $OBJ $V1 -p $p
while ! $CEPH_TOOL osd dump | grep 'full_quota max_objects'
do
sleep 2
done
$RADOS_TOOL -p $p rm $OBJ --force-full
$CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it
rm $V1
}
test_ls() {
echo "Testing rados ls command"
p=`uuidgen`
$CEPH_TOOL osd pool create $p 1
NS=10
OBJS=20
# Include default namespace (0) in the total
TOTAL=$(expr $OBJS \* $(expr $NS + 1))
for nsnum in `seq 0 $NS`
do
for onum in `seq 1 $OBJS`
do
if [ "$nsnum" = "0" ];
then
"$RADOS_TOOL" -p $p put obj${onum} /etc/fstab 2> /dev/null
else
"$RADOS_TOOL" -p $p -N "NS${nsnum}" put obj${onum} /etc/fstab 2> /dev/null
fi
done
done
CHECK=$("$RADOS_TOOL" -p $p ls 2> /dev/null | wc -l)
if [ "$OBJS" -ne "$CHECK" ];
then
die "Created $OBJS objects in default namespace but saw $CHECK"
fi
TESTNS=NS${NS}
CHECK=$("$RADOS_TOOL" -p $p -N $TESTNS ls 2> /dev/null | wc -l)
if [ "$OBJS" -ne "$CHECK" ];
then
die "Created $OBJS objects in $TESTNS namespace but saw $CHECK"
fi
CHECK=$("$RADOS_TOOL" -p $p --all ls 2> /dev/null | wc -l)
if [ "$TOTAL" -ne "$CHECK" ];
then
die "Created $TOTAL objects but saw $CHECK"
fi
$CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it
}
test_cleanup() {
echo "Testing rados cleanup command"
p=`uuidgen`
$CEPH_TOOL osd pool create $p 1
NS=5
OBJS=4
# Include default namespace (0) in the total
TOTAL=$(expr $OBJS \* $(expr $NS + 1))
for nsnum in `seq 0 $NS`
do
for onum in `seq 1 $OBJS`
do
if [ "$nsnum" = "0" ];
then
"$RADOS_TOOL" -p $p put obj${onum} /etc/fstab 2> /dev/null
else
"$RADOS_TOOL" -p $p -N "NS${nsnum}" put obj${onum} /etc/fstab 2> /dev/null
fi
done
done
$RADOS_TOOL -p $p --all ls > $TDIR/before.ls.out 2> /dev/null
$RADOS_TOOL -p $p bench 3 write --no-cleanup 2> /dev/null
$RADOS_TOOL -p $p -N NS1 bench 3 write --no-cleanup 2> /dev/null
$RADOS_TOOL -p $p -N NS2 bench 3 write --no-cleanup 2> /dev/null
$RADOS_TOOL -p $p -N NS3 bench 3 write --no-cleanup 2> /dev/null
# Leave dangling objects without a benchmark_last_metadata in NS4
expect_false timeout 3 $RADOS_TOOL -p $p -N NS4 bench 30 write --no-cleanup 2> /dev/null
$RADOS_TOOL -p $p -N NS5 bench 3 write --no-cleanup 2> /dev/null
$RADOS_TOOL -p $p -N NS3 cleanup 2> /dev/null
#echo "Check NS3 after specific cleanup"
CHECK=$($RADOS_TOOL -p $p -N NS3 ls | wc -l)
if [ "$OBJS" -ne "$CHECK" ] ;
then
die "Expected $OBJS objects in NS3 but saw $CHECK"
fi
#echo "Try to cleanup all"
$RADOS_TOOL -p $p --all cleanup
#echo "Check all namespaces"
$RADOS_TOOL -p $p --all ls > $TDIR/after.ls.out 2> /dev/null
CHECK=$(cat $TDIR/after.ls.out | wc -l)
if [ "$TOTAL" -ne "$CHECK" ];
then
die "Expected $TOTAL objects but saw $CHECK"
fi
if ! diff $TDIR/before.ls.out $TDIR/after.ls.out
then
die "Different objects found after cleanup"
fi
set +e
run_expect_fail $RADOS_TOOL -p $p cleanup --prefix illegal_prefix
run_expect_succ $RADOS_TOOL -p $p cleanup --prefix benchmark_data_otherhost
set -e
$CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it
}
function test_append()
{
cleanup
# create object
touch ./rados_append_null
$RADOS_TOOL -p $POOL append $OBJ ./rados_append_null
$RADOS_TOOL -p $POOL get $OBJ ./rados_append_0_out
cmp ./rados_append_null ./rados_append_0_out
# append 4k, total size 4k
dd if=/dev/zero of=./rados_append_4k bs=4k count=1
$RADOS_TOOL -p $POOL append $OBJ ./rados_append_4k
$RADOS_TOOL -p $POOL get $OBJ ./rados_append_4k_out
cmp ./rados_append_4k ./rados_append_4k_out
# append 4k, total size 8k
$RADOS_TOOL -p $POOL append $OBJ ./rados_append_4k
$RADOS_TOOL -p $POOL get $OBJ ./rados_append_4k_out
read_size=`ls -l ./rados_append_4k_out | awk -F ' ' '{print $5}'`
if [ 8192 -ne $read_size ];
then
die "Append failed expecting 8192 read $read_size"
fi
# append 10M, total size 10493952
dd if=/dev/zero of=./rados_append_10m bs=10M count=1
$RADOS_TOOL -p $POOL append $OBJ ./rados_append_10m
$RADOS_TOOL -p $POOL get $OBJ ./rados_append_10m_out
read_size=`ls -l ./rados_append_10m_out | awk -F ' ' '{print $5}'`
if [ 10493952 -ne $read_size ];
then
die "Append failed expecting 10493952 read $read_size"
fi
# cleanup
cleanup
# create object
$RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_null
$RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_0_out
cmp rados_append_null rados_append_0_out
# append 4k, total size 4k
$RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_4k
$RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_4k_out
cmp rados_append_4k rados_append_4k_out
# append 4k, total size 8k
$RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_4k
$RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_4k_out
read_size=`ls -l ./rados_append_4k_out | awk -F ' ' '{print $5}'`
if [ 8192 -ne $read_size ];
then
die "Append failed expecting 8192 read $read_size"
fi
# append 10M, total size 10493952
$RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_10m
$RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_10m_out
read_size=`ls -l ./rados_append_10m_out | awk -F ' ' '{print $5}'`
if [ 10493952 -ne $read_size ];
then
die "Append failed expecting 10493952 read $read_size"
fi
cleanup
rm -rf ./rados_append_null ./rados_append_0_out
rm -rf ./rados_append_4k ./rados_append_4k_out ./rados_append_10m ./rados_append_10m_out
}
function test_put()
{
# rados put test:
cleanup
# create file in local fs
dd if=/dev/urandom of=rados_object_10k bs=1K count=10
# test put command
$RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k
$RADOS_TOOL -p $POOL get $OBJ ./rados_object_10k_out
cmp ./rados_object_10k ./rados_object_10k_out
cleanup
# test put command with offset 0
$RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k --offset 0
$RADOS_TOOL -p $POOL get $OBJ ./rados_object_offset_0_out
cmp ./rados_object_10k ./rados_object_offset_0_out
cleanup
# test put command with offset 1000
$RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k --offset 1000
$RADOS_TOOL -p $POOL get $OBJ ./rados_object_offset_1000_out
cmp ./rados_object_10k ./rados_object_offset_1000_out 0 1000
cleanup
rm -rf ./rados_object_10k ./rados_object_10k_out ./rados_object_offset_0_out ./rados_object_offset_1000_out
}
function test_stat()
{
bluestore=$("$CEPH_TOOL" osd metadata | grep '"osd_objectstore": "bluestore"' | cut -f1)
# create file in local fs
dd if=/dev/urandom of=rados_object_128k bs=64K count=2
# rados df test (replicated_pool):
$RADOS_TOOL purge $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool rm $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool create $POOL 8
$CEPH_TOOL osd pool set $POOL size 3
# put object with 1 MB gap in front
$RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k --offset=1048576
MATCH_CNT=0
if [ "" == "$bluestore" ];
then
STORED=1.1
STORED_UNIT="MiB"
else
STORED=384
STORED_UNIT="KiB"
fi
for i in {1..60}
do
IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
[[ -z $IN ]] && sleep 1 && continue
IFS=' ' read -ra VALS <<< "$IN"
# verification is a bit tricky due to stats report's eventual model
# VALS[1] - STORED
# VALS[2] - STORED units
# VALS[3] - OBJECTS
# VALS[5] - COPIES
# VALS[12] - WR_OPS
# VALS[13] - WR
# VALS[14] - WR uints
# implies replication factor 3
if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "1" ] && [ ${VALS[13]} == 128 ] && [ ${VALS[14]} == "KiB" ]
then
# enforce multiple match to make sure stats aren't changing any more
MATCH_CNT=$((MATCH_CNT+1))
[[ $MATCH_CNT == 3 ]] && break
sleep 1
continue
fi
MATCH_CNT=0
sleep 1
continue
done
[[ -z $IN ]] && die "Failed to retrieve any pool stats within 60 seconds"
if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "1" ] || [ ${VALS[13]} != 128 ] || [ ${VALS[14]} != "KiB" ]
then
die "Failed to retrieve proper pool stats within 60 seconds"
fi
# overwrite data at 1MB offset
$RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k --offset=1048576
MATCH_CNT=0
if [ "" == "$bluestore" ];
then
STORED=1.1
STORED_UNIT="MiB"
else
STORED=384
STORED_UNIT="KiB"
fi
for i in {1..60}
do
IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
IFS=' ' read -ra VALS <<< "$IN"
# verification is a bit tricky due to stats report's eventual model
# VALS[1] - STORED
# VALS[2] - STORED units
# VALS[3] - OBJECTS
# VALS[5] - COPIES
# VALS[12] - WR_OPS
# VALS[13] - WR
# VALS[14] - WR uints
# implies replication factor 3
if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "2" ] && [ ${VALS[13]} == 256 ] && [ ${VALS[14]} == "KiB" ]
then
# enforce multiple match to make sure stats aren't changing any more
MATCH_CNT=$((MATCH_CNT+1))
[[ $MATCH_CNT == 3 ]] && break
sleep 1
continue
fi
MATCH_CNT=0
sleep 1
continue
done
if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "2" ] || [ ${VALS[13]} != 256 ] || [ ${VALS[14]} != "KiB" ]
then
die "Failed to retrieve proper pool stats within 60 seconds"
fi
# write data at 64K offset
$RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k --offset=65536
MATCH_CNT=0
if [ "" == "$bluestore" ];
then
STORED=1.1
STORED_UNIT="MiB"
else
STORED=768
STORED_UNIT="KiB"
fi
for i in {1..60}
do
IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
IFS=' ' read -ra VALS <<< "$IN"
# verification is a bit tricky due to stats report's eventual model
# VALS[1] - STORED
# VALS[2] - STORED units
# VALS[3] - OBJECTS
# VALS[5] - COPIES
# VALS[12] - WR_OPS
# VALS[13] - WR
# VALS[14] - WR uints
# implies replication factor 3
if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "3" ] && [ ${VALS[13]} == 384 ] && [ ${VALS[14]} == "KiB" ]
then
# enforce multiple match to make sure stats aren't changing any more
MATCH_CNT=$((MATCH_CNT+1))
[[ $MATCH_CNT == 3 ]] && break
sleep 1
continue
fi
MATCH_CNT=0
sleep 1
continue
done
if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "3" ] || [ ${VALS[13]} != 384 ] || [ ${VALS[14]} != "KiB" ]
then
die "Failed to retrieve proper pool stats within 60 seconds"
fi
# overwrite object totally
$RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k
MATCH_CNT=0
if [ "" == "$bluestore" ];
then
STORED=128
STORED_UNIT="KiB"
else
STORED=384
STORED_UNIT="KiB"
fi
for i in {1..60}
do
IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
IFS=' ' read -ra VALS <<< "$IN"
# verification is a bit tricky due to stats report's eventual model
# VALS[1] - STORED
# VALS[2] - STORED units
# VALS[3] - OBJECTS
# VALS[5] - COPIES
# VALS[12] - WR_OPS
# VALS[13] - WR
# VALS[14] - WR uints
# implies replication factor 3
if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "4" ] && [ ${VALS[13]} == 512 ] && [ ${VALS[14]} == "KiB" ]
then
# enforce multiple match to make sure stats aren't changing any more
MATCH_CNT=$((MATCH_CNT+1))
[[ $MATCH_CNT == 3 ]] && break
sleep 1
continue
fi
MATCH_CNT=0
sleep 1
continue
done
if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "4" ] || [ ${VALS[13]} != 512 ] || [ ${VALS[14]} != "KiB" ]
then
die "Failed to retrieve proper pool stats within 60 seconds"
fi
cleanup
# after cleanup?
MATCH_CNT=0
for i in {1..60}
do
IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
IFS=' ' read -ra VALS <<< "$IN"
# verification is a bit tricky due to stats report's eventual model
# VALS[1] - STORED
# VALS[2] - STORED units
# VALS[3] - OBJECTS
# VALS[5] - COPIES
# VALS[12] - WR_OPS
# VALS[13] - WR
# VALS[14] - WR uints
# implies replication factor 3
if [ ${VALS[1]} == 0 ] && [ ${VALS[2]} == "B" ] && [ ${VALS[3]} == "0" ] && [ ${VALS[5]} == "0" ] && [ ${VALS[12]} == "5" ] && [ ${VALS[13]} == 512 ] && [ ${VALS[14]} == "KiB" ]
then
# enforce multiple match to make sure stats aren't changing any more
MATCH_CNT=$((MATCH_CNT+1))
[[ $MATCH_CNT == 3 ]] && break
sleep 1
continue
fi
MATCH_CNT=0
sleep 1
continue
done
if [ ${VALS[1]} != 0 ] || [ ${VALS[2]} != "B" ] || [ ${VALS[3]} != "0" ] || [ ${VALS[5]} != "0" ] || [ ${VALS[12]} != "5" ] || [ ${VALS[13]} != 512 ] || [ ${VALS[14]} != "KiB" ]
then
die "Failed to retrieve proper pool stats within 60 seconds"
fi
############ rados df test (EC pool): ##############
$RADOS_TOOL purge $POOL_EC --yes-i-really-really-mean-it
$CEPH_TOOL osd pool rm $POOL_EC $POOL_EC --yes-i-really-really-mean-it
$CEPH_TOOL osd erasure-code-profile set myprofile k=2 m=1 stripe_unit=2K crush-failure-domain=osd --force
$CEPH_TOOL osd pool create $POOL_EC 8 8 erasure
# put object
$RADOS_TOOL -p $POOL_EC put $OBJ ./rados_object_128k
MATCH_CNT=0
if [ "" == "$bluestore" ];
then
STORED=128
STORED_UNIT="KiB"
else
STORED=192
STORED_UNIT="KiB"
fi
for i in {1..60}
do
IN=$($RADOS_TOOL -p $POOL_EC df | grep $POOL_EC ; [[ ! -z $? ]] && echo "")
[[ -z $IN ]] && sleep 1 && continue
IFS=' ' read -ra VALS <<< "$IN"
# verification is a bit tricky due to stats report's eventual model
# VALS[1] - STORED
# VALS[2] - STORED units
# VALS[3] - OBJECTS
# VALS[5] - COPIES
# VALS[12] - WR_OPS
# VALS[13] - WR
# VALS[14] - WR uints
# implies replication factor 2+1
if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "1" ] && [ ${VALS[13]} == 128 ] && [ ${VALS[14]} == "KiB" ]
then
# enforce multiple match to make sure stats aren't changing any more
MATCH_CNT=$((MATCH_CNT+1))
[[ $MATCH_CNT == 3 ]] && break
sleep 1
continue
fi
MATCH_CNT=0
sleep 1
continue
done
[[ -z $IN ]] && die "Failed to retrieve any pool stats within 60 seconds"
if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "1" ] || [ ${VALS[13]} != 128 ] || [ ${VALS[14]} != "KiB" ]
then
die "Failed to retrieve proper pool stats within 60 seconds"
fi
# overwrite object
$RADOS_TOOL -p $POOL_EC put $OBJ ./rados_object_128k
MATCH_CNT=0
if [ "" == "$bluestore" ];
then
STORED=128
STORED_UNIT="KiB"
else
STORED=192
STORED_UNIT="KiB"
fi
for i in {1..60}
do
IN=$($RADOS_TOOL -p $POOL_EC df | grep $POOL_EC ; [[ ! -z $? ]] && echo "")
IFS=' ' read -ra VALS <<< "$IN"
# verification is a bit tricky due to stats report's eventual model
# VALS[1] - STORED
# VALS[2] - STORED units
# VALS[3] - OBJECTS
# VALS[5] - COPIES
# VALS[12] - WR_OPS
# VALS[13] - WR
# VALS[14] - WR uints
# implies replication factor 2+1
if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "2" ] && [ ${VALS[13]} == 256 ] && [ ${VALS[14]} == "KiB" ]
then
# enforce multiple match to make sure stats aren't changing any more
MATCH_CNT=$((MATCH_CNT+1))
[[ $MATCH_CNT == 3 ]] && break
sleep 1
continue
fi
MATCH_CNT=0
sleep 1
continue
done
if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "2" ] || [ ${VALS[13]} != 256 ] || [ ${VALS[14]} != "KiB" ]
then
die "Failed to retrieve proper pool stats within 60 seconds"
fi
cleanup
# after cleanup?
MATCH_CNT=0
for i in {1..60}
do
IN=$($RADOS_TOOL -p $POOL_EC df | grep $POOL_EC ; [[ ! -z $? ]] && echo "")
IFS=' ' read -ra VALS <<< "$IN"
# verification is a bit tricky due to stats report's eventual model
# VALS[1] - STORED
# VALS[2] - STORED units
# VALS[3] - OBJECTS
# VALS[5] - COPIES
# VALS[12] - WR_OPS
# VALS[13] - WR
# VALS[14] - WR uints
# implies replication factor 2+1
if [ ${VALS[1]} == 0 ] && [ ${VALS[2]} == "B" ] && [ ${VALS[3]} == "0" ] && [ ${VALS[5]} == "0" ] && [ ${VALS[12]} == "3" ] && [ ${VALS[13]} == 256 ] && [ ${VALS[14]} == "KiB" ]
then
# enforce multiple match to make sure stats aren't changing any more
MATCH_CNT=$((MATCH_CNT+1))
[[ $MATCH_CNT == 3 ]] && break
sleep 1
continue
fi
MATCH_CNT=0
sleep 1
continue
done
if [ ${VALS[1]} != 0 ] || [ ${VALS[2]} != "B" ] || [ ${VALS[3]} != "0" ] || [ ${VALS[5]} != "0" ] || [ ${VALS[12]} != "3" ] || [ ${VALS[13]} != 256 ] || [ ${VALS[14]} != "KiB" ]
then
die "Failed to retrieve proper pool stats within 60 seconds"
fi
rm -rf ./rados_object_128k
}
test_xattr
test_omap
test_rmobj
test_ls
test_cleanup
test_append
test_put
test_stat
# clean up environment, delete pool
$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $POOL_EC $POOL_EC --yes-i-really-really-mean-it
$CEPH_TOOL osd pool delete $POOL_CP_TARGET $POOL_CP_TARGET --yes-i-really-really-mean-it
echo "SUCCESS!"
exit 0
| 31,345 | 32.887568 | 196 | sh |
null | ceph-main/qa/workunits/rados/version_number_sanity.sh | #!/bin/bash -ex
#
# test that ceph RPM/DEB package version matches "ceph --version"
# (for a loose definition of "matches")
#
source /etc/os-release
case $ID in
debian|ubuntu)
RPMDEB='DEB'
dpkg-query --show ceph-common
PKG_NAME_AND_VERSION=$(dpkg-query --show ceph-common)
;;
centos|fedora|rhel|opensuse*|suse|sles)
RPMDEB='RPM'
rpm -q ceph
PKG_NAME_AND_VERSION=$(rpm -q ceph)
;;
*)
echo "Unsupported distro ->$ID<-! Bailing out."
exit 1
esac
PKG_CEPH_VERSION=$(perl -e '"'"$PKG_NAME_AND_VERSION"'" =~ m/(\d+(\.\d+)+)/; print "$1\n";')
echo "According to $RPMDEB package, the ceph version under test is ->$PKG_CEPH_VERSION<-"
test -n "$PKG_CEPH_VERSION"
ceph --version
BUFFER=$(ceph --version)
CEPH_CEPH_VERSION=$(perl -e '"'"$BUFFER"'" =~ m/ceph version (\d+(\.\d+)+)/; print "$1\n";')
echo "According to \"ceph --version\", the ceph version under test is ->$CEPH_CEPH_VERSION<-"
test -n "$CEPH_CEPH_VERSION"
test "$PKG_CEPH_VERSION" = "$CEPH_CEPH_VERSION"
| 998 | 31.225806 | 93 | sh |
null | ceph-main/qa/workunits/rbd/cli_generic.sh | #!/usr/bin/env bash
set -ex
. $(dirname $0)/../../standalone/ceph-helpers.sh
export RBD_FORCE_ALLOW_V1=1
# make sure rbd pool is EMPTY.. this is a test script!!
rbd ls | wc -l | grep -v '^0$' && echo "nonempty rbd pool, aborting! run this script on an empty test cluster only." && exit 1
IMGS="testimg1 testimg2 testimg3 testimg4 testimg5 testimg6 testimg-diff1 testimg-diff2 testimg-diff3 foo foo2 bar bar2 test1 test2 test3 test4 clone2"
expect_fail() {
"$@" && return 1 || return 0
}
tiered=0
if ceph osd dump | grep ^pool | grep "'rbd'" | grep tier; then
tiered=1
fi
remove_images() {
for img in $IMGS
do
(rbd snap purge $img || true) >/dev/null 2>&1
(rbd rm $img || true) >/dev/null 2>&1
done
}
test_others() {
echo "testing import, export, resize, and snapshots..."
TMP_FILES="/tmp/img1 /tmp/img1.new /tmp/img2 /tmp/img2.new /tmp/img3 /tmp/img3.new /tmp/img-diff1.new /tmp/img-diff2.new /tmp/img-diff3.new /tmp/img1.snap1 /tmp/img1.snap1 /tmp/img-diff1.snap1"
remove_images
rm -f $TMP_FILES
# create an image
dd if=/bin/sh of=/tmp/img1 bs=1k count=1 seek=10
dd if=/bin/dd of=/tmp/img1 bs=1k count=10 seek=100
dd if=/bin/rm of=/tmp/img1 bs=1k count=100 seek=1000
dd if=/bin/ls of=/tmp/img1 bs=1k seek=10000
dd if=/bin/ln of=/tmp/img1 bs=1k seek=100000
# import, snapshot
rbd import $RBD_CREATE_ARGS /tmp/img1 testimg1
rbd resize testimg1 --size=256 --allow-shrink
rbd export testimg1 /tmp/img2
rbd snap create testimg1 --snap=snap1
rbd resize testimg1 --size=128 && exit 1 || true # shrink should fail
rbd resize testimg1 --size=128 --allow-shrink
rbd export testimg1 /tmp/img3
# info
rbd info testimg1 | grep 'size 128 MiB'
rbd info --snap=snap1 testimg1 | grep 'size 256 MiB'
# export-diff
rm -rf /tmp/diff-testimg1-1 /tmp/diff-testimg1-2
rbd export-diff testimg1 --snap=snap1 /tmp/diff-testimg1-1
rbd export-diff testimg1 --from-snap=snap1 /tmp/diff-testimg1-2
# import-diff
rbd create $RBD_CREATE_ARGS --size=1 testimg-diff1
rbd import-diff --sparse-size 8K /tmp/diff-testimg1-1 testimg-diff1
rbd import-diff --sparse-size 8K /tmp/diff-testimg1-2 testimg-diff1
# info
rbd info testimg1 | grep 'size 128 MiB'
rbd info --snap=snap1 testimg1 | grep 'size 256 MiB'
rbd info testimg-diff1 | grep 'size 128 MiB'
rbd info --snap=snap1 testimg-diff1 | grep 'size 256 MiB'
# make copies
rbd copy testimg1 --snap=snap1 testimg2
rbd copy testimg1 testimg3
rbd copy testimg-diff1 --sparse-size 768K --snap=snap1 testimg-diff2
rbd copy testimg-diff1 --sparse-size 768K testimg-diff3
# verify the result
rbd info testimg2 | grep 'size 256 MiB'
rbd info testimg3 | grep 'size 128 MiB'
rbd info testimg-diff2 | grep 'size 256 MiB'
rbd info testimg-diff3 | grep 'size 128 MiB'
# deep copies
rbd deep copy testimg1 testimg4
rbd deep copy testimg1 --snap=snap1 testimg5
rbd info testimg4 | grep 'size 128 MiB'
rbd info testimg5 | grep 'size 256 MiB'
rbd snap ls testimg4 | grep -v 'SNAPID' | wc -l | grep 1
rbd snap ls testimg4 | grep '.*snap1.*'
rbd export testimg1 /tmp/img1.new
rbd export testimg2 /tmp/img2.new
rbd export testimg3 /tmp/img3.new
rbd export testimg-diff1 /tmp/img-diff1.new
rbd export testimg-diff2 /tmp/img-diff2.new
rbd export testimg-diff3 /tmp/img-diff3.new
cmp /tmp/img2 /tmp/img2.new
cmp /tmp/img3 /tmp/img3.new
cmp /tmp/img2 /tmp/img-diff2.new
cmp /tmp/img3 /tmp/img-diff3.new
# rollback
rbd snap rollback --snap=snap1 testimg1
rbd snap rollback --snap=snap1 testimg-diff1
rbd info testimg1 | grep 'size 256 MiB'
rbd info testimg-diff1 | grep 'size 256 MiB'
rbd export testimg1 /tmp/img1.snap1
rbd export testimg-diff1 /tmp/img-diff1.snap1
cmp /tmp/img2 /tmp/img1.snap1
cmp /tmp/img2 /tmp/img-diff1.snap1
# test create, copy of zero-length images
rbd rm testimg2
rbd rm testimg3
rbd create testimg2 -s 0
rbd cp testimg2 testimg3
rbd deep cp testimg2 testimg6
# remove snapshots
rbd snap rm --snap=snap1 testimg1
rbd snap rm --snap=snap1 testimg-diff1
rbd info --snap=snap1 testimg1 2>&1 | grep 'error setting snapshot context: (2) No such file or directory'
rbd info --snap=snap1 testimg-diff1 2>&1 | grep 'error setting snapshot context: (2) No such file or directory'
# sparsify
rbd sparsify testimg1
remove_images
rm -f $TMP_FILES
}
test_rename() {
echo "testing rename..."
remove_images
rbd create --image-format 1 -s 1 foo
rbd create --image-format 2 -s 1 bar
rbd rename foo foo2
rbd rename foo2 bar 2>&1 | grep exists
rbd rename bar bar2
rbd rename bar2 foo2 2>&1 | grep exists
ceph osd pool create rbd2 8
rbd pool init rbd2
rbd create -p rbd2 -s 1 foo
rbd rename rbd2/foo rbd2/bar
rbd -p rbd2 ls | grep bar
rbd rename rbd2/bar foo
rbd rename --pool rbd2 foo bar
! rbd rename rbd2/bar --dest-pool rbd foo
rbd rename --pool rbd2 bar --dest-pool rbd2 foo
rbd -p rbd2 ls | grep foo
ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
remove_images
}
test_ls() {
echo "testing ls..."
remove_images
rbd create --image-format 1 -s 1 test1
rbd create --image-format 1 -s 1 test2
rbd ls | grep test1
rbd ls | grep test2
rbd ls | wc -l | grep 2
# look for fields in output of ls -l without worrying about space
rbd ls -l | grep 'test1.*1 MiB.*1'
rbd ls -l | grep 'test2.*1 MiB.*1'
rbd rm test1
rbd rm test2
rbd create --image-format 2 -s 1 test1
rbd create --image-format 2 -s 1 test2
rbd ls | grep test1
rbd ls | grep test2
rbd ls | wc -l | grep 2
rbd ls -l | grep 'test1.*1 MiB.*2'
rbd ls -l | grep 'test2.*1 MiB.*2'
rbd rm test1
rbd rm test2
rbd create --image-format 2 -s 1 test1
rbd create --image-format 1 -s 1 test2
rbd ls | grep test1
rbd ls | grep test2
rbd ls | wc -l | grep 2
rbd ls -l | grep 'test1.*1 MiB.*2'
rbd ls -l | grep 'test2.*1 MiB.*1'
remove_images
# test that many images can be shown by ls
for i in $(seq -w 00 99); do
rbd create image.$i -s 1
done
rbd ls | wc -l | grep 100
rbd ls -l | grep image | wc -l | grep 100
for i in $(seq -w 00 99); do
rbd rm image.$i
done
for i in $(seq -w 00 99); do
rbd create image.$i --image-format 2 -s 1
done
rbd ls | wc -l | grep 100
rbd ls -l | grep image | wc -l | grep 100
for i in $(seq -w 00 99); do
rbd rm image.$i
done
}
test_remove() {
echo "testing remove..."
remove_images
rbd remove "NOT_EXIST" && exit 1 || true # remove should fail
rbd create --image-format 1 -s 1 test1
rbd rm test1
rbd ls | wc -l | grep "^0$"
rbd create --image-format 2 -s 1 test2
rbd rm test2
rbd ls | wc -l | grep "^0$"
# check that remove succeeds even if it's
# interrupted partway through. simulate this
# by removing some objects manually.
# remove with header missing (old format)
rbd create --image-format 1 -s 1 test1
rados rm -p rbd test1.rbd
rbd rm test1
rbd ls | wc -l | grep "^0$"
if [ $tiered -eq 0 ]; then
# remove with header missing
rbd create --image-format 2 -s 1 test2
HEADER=$(rados -p rbd ls | grep '^rbd_header')
rados -p rbd rm $HEADER
rbd rm test2
rbd ls | wc -l | grep "^0$"
# remove with id missing
rbd create --image-format 2 -s 1 test2
rados -p rbd rm rbd_id.test2
rbd rm test2
rbd ls | wc -l | grep "^0$"
# remove with header and id missing
rbd create --image-format 2 -s 1 test2
HEADER=$(rados -p rbd ls | grep '^rbd_header')
rados -p rbd rm $HEADER
rados -p rbd rm rbd_id.test2
rbd rm test2
rbd ls | wc -l | grep "^0$"
fi
# remove with rbd_children object missing (and, by extension,
# with child not mentioned in rbd_children)
rbd create --image-format 2 -s 1 test2
rbd snap create test2@snap
rbd snap protect test2@snap
rbd clone test2@snap clone --rbd-default-clone-format 1
rados -p rbd rm rbd_children
rbd rm clone
rbd ls | grep clone | wc -l | grep '^0$'
rbd snap unprotect test2@snap
rbd snap rm test2@snap
rbd rm test2
}
test_locking() {
echo "testing locking..."
remove_images
rbd create $RBD_CREATE_ARGS -s 1 test1
rbd lock list test1 | wc -l | grep '^0$'
rbd lock add test1 id
rbd lock list test1 | grep ' 1 '
LOCKER=$(rbd lock list test1 | tail -n 1 | awk '{print $1;}')
rbd lock remove test1 id $LOCKER
rbd lock list test1 | wc -l | grep '^0$'
rbd lock add test1 id --shared tag
rbd lock list test1 | grep ' 1 '
rbd lock add test1 id --shared tag
rbd lock list test1 | grep ' 2 '
rbd lock add test1 id2 --shared tag
rbd lock list test1 | grep ' 3 '
rbd lock list test1 | tail -n 1 | awk '{print $2, $1;}' | xargs rbd lock remove test1
if rbd info test1 | grep -qE "features:.*exclusive"
then
# new locking functionality requires all locks to be released
while [ -n "$(rbd lock list test1)" ]
do
rbd lock list test1 | tail -n 1 | awk '{print $2, $1;}' | xargs rbd lock remove test1
done
fi
rbd rm test1
}
test_pool_image_args() {
echo "testing pool and image args..."
remove_images
ceph osd pool delete test test --yes-i-really-really-mean-it || true
ceph osd pool create test 32
rbd pool init test
truncate -s 1 /tmp/empty /tmp/empty@snap
rbd ls | wc -l | grep 0
rbd create -s 1 test1
rbd ls | grep -q test1
rbd import --image test2 /tmp/empty
rbd ls | grep -q test2
rbd --dest test3 import /tmp/empty
rbd ls | grep -q test3
rbd import /tmp/empty foo
rbd ls | grep -q foo
# should fail due to "destination snapname specified"
rbd import --dest test/empty@snap /tmp/empty && exit 1 || true
rbd import /tmp/empty test/empty@snap && exit 1 || true
rbd import --image test/empty@snap /tmp/empty && exit 1 || true
rbd import /tmp/empty@snap && exit 1 || true
rbd ls test | wc -l | grep 0
rbd import /tmp/empty test/test1
rbd ls test | grep -q test1
rbd -p test import /tmp/empty test2
rbd ls test | grep -q test2
rbd --image test3 -p test import /tmp/empty
rbd ls test | grep -q test3
rbd --image test4 -p test import /tmp/empty
rbd ls test | grep -q test4
rbd --dest test5 -p test import /tmp/empty
rbd ls test | grep -q test5
rbd --dest test6 --dest-pool test import /tmp/empty
rbd ls test | grep -q test6
rbd --image test7 --dest-pool test import /tmp/empty
rbd ls test | grep -q test7
rbd --image test/test8 import /tmp/empty
rbd ls test | grep -q test8
rbd --dest test/test9 import /tmp/empty
rbd ls test | grep -q test9
rbd import --pool test /tmp/empty
rbd ls test | grep -q empty
# copy with no explicit pool goes to pool rbd
rbd copy test/test9 test10
rbd ls test | grep -qv test10
rbd ls | grep -q test10
rbd copy test/test9 test/test10
rbd ls test | grep -q test10
rbd copy --pool test test10 --dest-pool test test11
rbd ls test | grep -q test11
rbd copy --dest-pool rbd --pool test test11 test12
rbd ls | grep test12
rbd ls test | grep -qv test12
rm -f /tmp/empty /tmp/empty@snap
ceph osd pool delete test test --yes-i-really-really-mean-it
for f in foo test1 test10 test12 test2 test3 ; do
rbd rm $f
done
}
test_clone() {
echo "testing clone..."
remove_images
rbd create test1 $RBD_CREATE_ARGS -s 1
rbd snap create test1@s1
rbd snap protect test1@s1
ceph osd pool create rbd2 8
rbd pool init rbd2
rbd clone test1@s1 rbd2/clone
rbd -p rbd2 ls | grep clone
rbd -p rbd2 ls -l | grep clone | grep test1@s1
rbd ls | grep -v clone
rbd flatten rbd2/clone
rbd snap create rbd2/clone@s1
rbd snap protect rbd2/clone@s1
rbd clone rbd2/clone@s1 clone2
rbd ls | grep clone2
rbd ls -l | grep clone2 | grep rbd2/clone@s1
rbd -p rbd2 ls | grep -v clone2
rbd rm clone2
rbd snap unprotect rbd2/clone@s1
rbd snap rm rbd2/clone@s1
rbd rm rbd2/clone
rbd snap unprotect test1@s1
rbd snap rm test1@s1
rbd rm test1
ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
}
test_trash() {
echo "testing trash..."
remove_images
rbd create $RBD_CREATE_ARGS -s 1 test1
rbd create $RBD_CREATE_ARGS -s 1 test2
rbd ls | grep test1
rbd ls | grep test2
rbd ls | wc -l | grep 2
rbd ls -l | grep 'test1.*2.*'
rbd ls -l | grep 'test2.*2.*'
rbd trash mv test1
rbd ls | grep test2
rbd ls | wc -l | grep 1
rbd ls -l | grep 'test2.*2.*'
rbd trash ls | grep test1
rbd trash ls | wc -l | grep 1
rbd trash ls -l | grep 'test1.*USER.*'
rbd trash ls -l | grep -v 'protected until'
ID=`rbd trash ls | cut -d ' ' -f 1`
rbd trash rm $ID
rbd trash mv test2
ID=`rbd trash ls | cut -d ' ' -f 1`
rbd info --image-id $ID | grep "rbd image 'test2'"
rbd trash restore $ID
rbd ls | grep test2
rbd ls | wc -l | grep 1
rbd ls -l | grep 'test2.*2.*'
rbd trash mv test2 --expires-at "3600 sec"
rbd trash ls | grep test2
rbd trash ls | wc -l | grep 1
rbd trash ls -l | grep 'test2.*USER.*protected until'
rbd trash rm $ID 2>&1 | grep 'Deferment time has not expired'
rbd trash rm --image-id $ID --force
rbd create $RBD_CREATE_ARGS -s 1 test1
rbd snap create test1@snap1
rbd snap protect test1@snap1
rbd trash mv test1
rbd trash ls | grep test1
rbd trash ls | wc -l | grep 1
rbd trash ls -l | grep 'test1.*USER.*'
rbd trash ls -l | grep -v 'protected until'
ID=`rbd trash ls | cut -d ' ' -f 1`
rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 1
rbd snap ls --image-id $ID | grep '.*snap1.*'
rbd snap unprotect --image-id $ID --snap snap1
rbd snap rm --image-id $ID --snap snap1
rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 0
rbd trash restore $ID
rbd snap create test1@snap1
rbd snap create test1@snap2
rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 2
rbd snap purge --image-id $ID
rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 0
rbd rm --rbd_move_to_trash_on_remove=true --rbd_move_to_trash_on_remove_expire_seconds=3600 test1
rbd trash ls | grep test1
rbd trash ls | wc -l | grep 1
rbd trash ls -l | grep 'test1.*USER.*protected until'
rbd trash rm $ID 2>&1 | grep 'Deferment time has not expired'
rbd trash rm --image-id $ID --force
remove_images
}
test_purge() {
echo "testing trash purge..."
remove_images
rbd trash ls | wc -l | grep 0
rbd trash purge
rbd create $RBD_CREATE_ARGS --size 256 testimg1
rbd create $RBD_CREATE_ARGS --size 256 testimg2
rbd trash mv testimg1
rbd trash mv testimg2
rbd trash ls | wc -l | grep 2
rbd trash purge
rbd trash ls | wc -l | grep 0
rbd create $RBD_CREATE_ARGS --size 256 testimg1
rbd create $RBD_CREATE_ARGS --size 256 testimg2
rbd trash mv testimg1 --expires-at "1 hour"
rbd trash mv testimg2 --expires-at "3 hours"
rbd trash ls | wc -l | grep 2
rbd trash purge
rbd trash ls | wc -l | grep 2
rbd trash purge --expired-before "now + 2 hours"
rbd trash ls | wc -l | grep 1
rbd trash ls | grep testimg2
rbd trash purge --expired-before "now + 4 hours"
rbd trash ls | wc -l | grep 0
rbd create $RBD_CREATE_ARGS --size 256 testimg1
rbd snap create testimg1@snap # pin testimg1
rbd create $RBD_CREATE_ARGS --size 256 testimg2
rbd create $RBD_CREATE_ARGS --size 256 testimg3
rbd trash mv testimg1
rbd trash mv testimg2
rbd trash mv testimg3
rbd trash ls | wc -l | grep 3
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 1
rbd trash ls | grep testimg1
ID=$(rbd trash ls | awk '{ print $1 }')
rbd snap purge --image-id $ID
rbd trash purge
rbd trash ls | wc -l | grep 0
rbd create $RBD_CREATE_ARGS --size 256 testimg1
rbd create $RBD_CREATE_ARGS --size 256 testimg2
rbd snap create testimg2@snap # pin testimg2
rbd create $RBD_CREATE_ARGS --size 256 testimg3
rbd trash mv testimg1
rbd trash mv testimg2
rbd trash mv testimg3
rbd trash ls | wc -l | grep 3
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 1
rbd trash ls | grep testimg2
ID=$(rbd trash ls | awk '{ print $1 }')
rbd snap purge --image-id $ID
rbd trash purge
rbd trash ls | wc -l | grep 0
rbd create $RBD_CREATE_ARGS --size 256 testimg1
rbd create $RBD_CREATE_ARGS --size 256 testimg2
rbd create $RBD_CREATE_ARGS --size 256 testimg3
rbd snap create testimg3@snap # pin testimg3
rbd trash mv testimg1
rbd trash mv testimg2
rbd trash mv testimg3
rbd trash ls | wc -l | grep 3
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 1
rbd trash ls | grep testimg3
ID=$(rbd trash ls | awk '{ print $1 }')
rbd snap purge --image-id $ID
rbd trash purge
rbd trash ls | wc -l | grep 0
# test purging a clone with a chain of parents
rbd create $RBD_CREATE_ARGS --size 256 testimg1
rbd snap create testimg1@snap
rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2
rbd snap rm testimg1@snap
rbd create $RBD_CREATE_ARGS --size 256 testimg3
rbd snap create testimg2@snap
rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4
rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5
rbd snap rm testimg2@snap
rbd snap create testimg4@snap
rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6
rbd snap rm testimg4@snap
rbd trash mv testimg1
rbd trash mv testimg2
rbd trash mv testimg3
rbd trash mv testimg4
rbd trash ls | wc -l | grep 4
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 3
rbd trash ls | grep testimg1
rbd trash ls | grep testimg2
rbd trash ls | grep testimg4
rbd trash mv testimg6
rbd trash ls | wc -l | grep 4
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 2
rbd trash ls | grep testimg1
rbd trash ls | grep testimg2
rbd trash mv testimg5
rbd trash ls | wc -l | grep 3
rbd trash purge
rbd trash ls | wc -l | grep 0
rbd create $RBD_CREATE_ARGS --size 256 testimg1
rbd snap create testimg1@snap
rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2
rbd snap rm testimg1@snap
rbd create $RBD_CREATE_ARGS --size 256 testimg3
rbd snap create testimg3@snap # pin testimg3
rbd snap create testimg2@snap
rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4
rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5
rbd snap rm testimg2@snap
rbd snap create testimg4@snap
rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6
rbd snap rm testimg4@snap
rbd trash mv testimg1
rbd trash mv testimg2
rbd trash mv testimg3
rbd trash mv testimg4
rbd trash ls | wc -l | grep 4
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 4
rbd trash mv testimg6
rbd trash ls | wc -l | grep 5
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 3
rbd trash ls | grep testimg1
rbd trash ls | grep testimg2
rbd trash ls | grep testimg3
rbd trash mv testimg5
rbd trash ls | wc -l | grep 4
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 1
rbd trash ls | grep testimg3
ID=$(rbd trash ls | awk '{ print $1 }')
rbd snap purge --image-id $ID
rbd trash purge
rbd trash ls | wc -l | grep 0
# test purging a clone with a chain of auto-delete parents
rbd create $RBD_CREATE_ARGS --size 256 testimg1
rbd snap create testimg1@snap
rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2
rbd snap rm testimg1@snap
rbd create $RBD_CREATE_ARGS --size 256 testimg3
rbd snap create testimg2@snap
rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4
rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5
rbd snap rm testimg2@snap
rbd snap create testimg4@snap
rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6
rbd snap rm testimg4@snap
rbd rm --rbd_move_parent_to_trash_on_remove=true testimg1
rbd rm --rbd_move_parent_to_trash_on_remove=true testimg2
rbd trash mv testimg3
rbd rm --rbd_move_parent_to_trash_on_remove=true testimg4
rbd trash ls | wc -l | grep 4
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 3
rbd trash ls | grep testimg1
rbd trash ls | grep testimg2
rbd trash ls | grep testimg4
rbd trash mv testimg6
rbd trash ls | wc -l | grep 4
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 2
rbd trash ls | grep testimg1
rbd trash ls | grep testimg2
rbd trash mv testimg5
rbd trash ls | wc -l | grep 3
rbd trash purge
rbd trash ls | wc -l | grep 0
rbd create $RBD_CREATE_ARGS --size 256 testimg1
rbd snap create testimg1@snap
rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2
rbd snap rm testimg1@snap
rbd create $RBD_CREATE_ARGS --size 256 testimg3
rbd snap create testimg3@snap # pin testimg3
rbd snap create testimg2@snap
rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4
rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5
rbd snap rm testimg2@snap
rbd snap create testimg4@snap
rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6
rbd snap rm testimg4@snap
rbd rm --rbd_move_parent_to_trash_on_remove=true testimg1
rbd rm --rbd_move_parent_to_trash_on_remove=true testimg2
rbd trash mv testimg3
rbd rm --rbd_move_parent_to_trash_on_remove=true testimg4
rbd trash ls | wc -l | grep 4
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 4
rbd trash mv testimg6
rbd trash ls | wc -l | grep 5
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 3
rbd trash ls | grep testimg1
rbd trash ls | grep testimg2
rbd trash ls | grep testimg3
rbd trash mv testimg5
rbd trash ls | wc -l | grep 4
rbd trash purge 2>&1 | grep 'some expired images could not be removed'
rbd trash ls | wc -l | grep 1
rbd trash ls | grep testimg3
ID=$(rbd trash ls | awk '{ print $1 }')
rbd snap purge --image-id $ID
rbd trash purge
rbd trash ls | wc -l | grep 0
}
test_deep_copy_clone() {
echo "testing deep copy clone..."
remove_images
rbd create testimg1 $RBD_CREATE_ARGS --size 256
rbd snap create testimg1 --snap=snap1
rbd snap protect testimg1@snap1
rbd clone testimg1@snap1 testimg2
rbd snap create testimg2@snap2
rbd deep copy testimg2 testimg3
rbd info testimg3 | grep 'size 256 MiB'
rbd info testimg3 | grep 'parent: rbd/testimg1@snap1'
rbd snap ls testimg3 | grep -v 'SNAPID' | wc -l | grep 1
rbd snap ls testimg3 | grep '.*snap2.*'
rbd info testimg2 | grep 'features:.*deep-flatten' || rbd snap rm testimg2@snap2
rbd info testimg3 | grep 'features:.*deep-flatten' || rbd snap rm testimg3@snap2
rbd flatten testimg2
rbd flatten testimg3
rbd snap unprotect testimg1@snap1
rbd snap purge testimg2
rbd snap purge testimg3
rbd rm testimg2
rbd rm testimg3
rbd snap protect testimg1@snap1
rbd clone testimg1@snap1 testimg2
rbd snap create testimg2@snap2
rbd deep copy --flatten testimg2 testimg3
rbd info testimg3 | grep 'size 256 MiB'
rbd info testimg3 | grep -v 'parent:'
rbd snap ls testimg3 | grep -v 'SNAPID' | wc -l | grep 1
rbd snap ls testimg3 | grep '.*snap2.*'
rbd info testimg2 | grep 'features:.*deep-flatten' || rbd snap rm testimg2@snap2
rbd flatten testimg2
rbd snap unprotect testimg1@snap1
remove_images
}
test_clone_v2() {
echo "testing clone v2..."
remove_images
rbd create $RBD_CREATE_ARGS -s 1 test1
rbd snap create test1@1
rbd clone --rbd-default-clone-format=1 test1@1 test2 && exit 1 || true
rbd clone --rbd-default-clone-format=2 test1@1 test2
rbd clone --rbd-default-clone-format=2 test1@1 test3
rbd snap protect test1@1
rbd clone --rbd-default-clone-format=1 test1@1 test4
rbd children test1@1 | sort | tr '\n' ' ' | grep -E "test2.*test3.*test4"
rbd children --descendants test1 | sort | tr '\n' ' ' | grep -E "test2.*test3.*test4"
rbd remove test4
rbd snap unprotect test1@1
rbd snap remove test1@1
rbd snap list --all test1 | grep -E "trash \(1\) *$"
rbd snap create test1@2
rbd rm test1 2>&1 | grep 'image has snapshots'
rbd snap rm test1@2
rbd rm test1 2>&1 | grep 'linked clones'
rbd rm test3
rbd rm test1 2>&1 | grep 'linked clones'
rbd flatten test2
rbd snap list --all test1 | wc -l | grep '^0$'
rbd rm test1
rbd rm test2
rbd create $RBD_CREATE_ARGS -s 1 test1
rbd snap create test1@1
rbd snap create test1@2
rbd clone test1@1 test2 --rbd-default-clone-format 2
rbd clone test1@2 test3 --rbd-default-clone-format 2
rbd snap rm test1@1
rbd snap rm test1@2
expect_fail rbd rm test1
rbd rm test1 --rbd-move-parent-to-trash-on-remove=true
rbd trash ls -a | grep test1
rbd rm test2
rbd trash ls -a | grep test1
rbd rm test3
rbd trash ls -a | expect_fail grep test1
}
test_thick_provision() {
echo "testing thick provision..."
remove_images
# Try to create small and large thick-pro image and
# check actual size. (64M and 4G)
# Small thick-pro image test
rbd create $RBD_CREATE_ARGS --thick-provision -s 64M test1
count=0
ret=""
while [ $count -lt 10 ]
do
rbd du|grep test1|tr -s " "|cut -d " " -f 4-5|grep '^64 MiB' && ret=$?
if [ "$ret" = "0" ]
then
break;
fi
count=`expr $count + 1`
sleep 2
done
rbd du
if [ "$ret" != "0" ]
then
exit 1
fi
rbd rm test1
rbd ls | grep test1 | wc -l | grep '^0$'
# Large thick-pro image test
rbd create $RBD_CREATE_ARGS --thick-provision -s 4G test1
count=0
ret=""
while [ $count -lt 10 ]
do
rbd du|grep test1|tr -s " "|cut -d " " -f 4-5|grep '^4 GiB' && ret=$?
if [ "$ret" = "0" ]
then
break;
fi
count=`expr $count + 1`
sleep 2
done
rbd du
if [ "$ret" != "0" ]
then
exit 1
fi
rbd rm test1
rbd ls | grep test1 | wc -l | grep '^0$'
}
test_namespace() {
echo "testing namespace..."
remove_images
rbd namespace ls | wc -l | grep '^0$'
rbd namespace create rbd/test1
rbd namespace create --pool rbd --namespace test2
rbd namespace create --namespace test3
expect_fail rbd namespace create rbd/test3
rbd namespace list | grep 'test' | wc -l | grep '^3$'
expect_fail rbd namespace remove --pool rbd missing
rbd create $RBD_CREATE_ARGS --size 1G rbd/test1/image1
# default test1 ns to test2 ns clone
rbd bench --io-type write --io-pattern rand --io-total 32M --io-size 4K rbd/test1/image1
rbd snap create rbd/test1/image1@1
rbd clone --rbd-default-clone-format 2 rbd/test1/image1@1 rbd/test2/image1
rbd snap rm rbd/test1/image1@1
cmp <(rbd export rbd/test1/image1 -) <(rbd export rbd/test2/image1 -)
rbd rm rbd/test2/image1
# default ns to test1 ns clone
rbd create $RBD_CREATE_ARGS --size 1G rbd/image2
rbd bench --io-type write --io-pattern rand --io-total 32M --io-size 4K rbd/image2
rbd snap create rbd/image2@1
rbd clone --rbd-default-clone-format 2 rbd/image2@1 rbd/test2/image2
rbd snap rm rbd/image2@1
cmp <(rbd export rbd/image2 -) <(rbd export rbd/test2/image2 -)
expect_fail rbd rm rbd/image2
rbd rm rbd/test2/image2
rbd rm rbd/image2
# v1 clones are supported within the same namespace
rbd create $RBD_CREATE_ARGS --size 1G rbd/test1/image3
rbd snap create rbd/test1/image3@1
rbd snap protect rbd/test1/image3@1
rbd clone --rbd-default-clone-format 1 rbd/test1/image3@1 rbd/test1/image4
rbd rm rbd/test1/image4
rbd snap unprotect rbd/test1/image3@1
rbd snap rm rbd/test1/image3@1
rbd rm rbd/test1/image3
rbd create $RBD_CREATE_ARGS --size 1G --namespace test1 image2
expect_fail rbd namespace remove rbd/test1
rbd group create rbd/test1/group1
rbd group image add rbd/test1/group1 rbd/test1/image1
rbd group rm rbd/test1/group1
rbd trash move rbd/test1/image1
ID=`rbd trash --namespace test1 ls | cut -d ' ' -f 1`
rbd trash rm rbd/test1/${ID}
rbd remove rbd/test1/image2
rbd namespace remove --pool rbd --namespace test1
rbd namespace remove --namespace test3
rbd namespace list | grep 'test' | wc -l | grep '^1$'
rbd namespace remove rbd/test2
}
get_migration_state() {
local image=$1
rbd --format xml status $image |
$XMLSTARLET sel -t -v '//status/migration/state'
}
test_migration() {
echo "testing migration..."
remove_images
ceph osd pool create rbd2 8
rbd pool init rbd2
# Convert to new format
rbd create --image-format 1 -s 128M test1
rbd info test1 | grep 'format: 1'
rbd migration prepare test1 --image-format 2
test "$(get_migration_state test1)" = prepared
rbd info test1 | grep 'format: 2'
rbd rm test1 && exit 1 || true
rbd migration execute test1
test "$(get_migration_state test1)" = executed
rbd migration commit test1
get_migration_state test1 && exit 1 || true
# Enable layering (and some other features)
rbd info test1 | grep 'features: .*layering' && exit 1 || true
rbd migration prepare test1 --image-feature \
layering,exclusive-lock,object-map,fast-diff,deep-flatten
rbd info test1 | grep 'features: .*layering'
rbd migration execute test1
rbd migration commit test1
# Migration to other pool
rbd migration prepare test1 rbd2/test1
test "$(get_migration_state rbd2/test1)" = prepared
rbd ls | wc -l | grep '^0$'
rbd -p rbd2 ls | grep test1
rbd migration execute test1
test "$(get_migration_state rbd2/test1)" = executed
rbd rm rbd2/test1 && exit 1 || true
rbd migration commit test1
# Migration to other namespace
rbd namespace create rbd2/ns1
rbd namespace create rbd2/ns2
rbd migration prepare rbd2/test1 rbd2/ns1/test1
test "$(get_migration_state rbd2/ns1/test1)" = prepared
rbd migration execute rbd2/test1
test "$(get_migration_state rbd2/ns1/test1)" = executed
rbd migration commit rbd2/test1
rbd migration prepare rbd2/ns1/test1 rbd2/ns2/test1
rbd migration execute rbd2/ns2/test1
rbd migration commit rbd2/ns2/test1
# Enable data pool
rbd create -s 128M test1
rbd migration prepare test1 --data-pool rbd2
rbd info test1 | grep 'data_pool: rbd2'
rbd migration execute test1
rbd migration commit test1
# testing trash
rbd migration prepare test1
expect_fail rbd trash mv test1
ID=`rbd trash ls -a | cut -d ' ' -f 1`
expect_fail rbd trash rm $ID
expect_fail rbd trash restore $ID
rbd migration abort test1
# Migrate parent
rbd remove test1
dd if=/dev/urandom bs=1M count=1 | rbd --image-format 2 import - test1
md5sum=$(rbd export test1 - | md5sum)
rbd snap create test1@snap1
rbd snap protect test1@snap1
rbd snap create test1@snap2
rbd clone test1@snap1 clone_v1 --rbd_default_clone_format=1
rbd clone test1@snap2 clone_v2 --rbd_default_clone_format=2
rbd info clone_v1 | fgrep 'parent: rbd/test1@snap1'
rbd info clone_v2 | fgrep 'parent: rbd/test1@snap2'
rbd info clone_v2 |grep 'op_features: clone-child'
test "$(rbd export clone_v1 - | md5sum)" = "${md5sum}"
test "$(rbd export clone_v2 - | md5sum)" = "${md5sum}"
test "$(rbd children test1@snap1)" = "rbd/clone_v1"
test "$(rbd children test1@snap2)" = "rbd/clone_v2"
rbd migration prepare test1 rbd2/test2
rbd info clone_v1 | fgrep 'parent: rbd2/test2@snap1'
rbd info clone_v2 | fgrep 'parent: rbd2/test2@snap2'
rbd info clone_v2 | fgrep 'op_features: clone-child'
test "$(rbd children rbd2/test2@snap1)" = "rbd/clone_v1"
test "$(rbd children rbd2/test2@snap2)" = "rbd/clone_v2"
rbd migration execute test1
expect_fail rbd migration commit test1
rbd migration commit test1 --force
test "$(rbd export clone_v1 - | md5sum)" = "${md5sum}"
test "$(rbd export clone_v2 - | md5sum)" = "${md5sum}"
rbd migration prepare rbd2/test2 test1
rbd info clone_v1 | fgrep 'parent: rbd/test1@snap1'
rbd info clone_v2 | fgrep 'parent: rbd/test1@snap2'
rbd info clone_v2 | fgrep 'op_features: clone-child'
test "$(rbd children test1@snap1)" = "rbd/clone_v1"
test "$(rbd children test1@snap2)" = "rbd/clone_v2"
rbd migration execute test1
expect_fail rbd migration commit test1
rbd migration commit test1 --force
test "$(rbd export clone_v1 - | md5sum)" = "${md5sum}"
test "$(rbd export clone_v2 - | md5sum)" = "${md5sum}"
rbd remove clone_v1
rbd remove clone_v2
rbd snap unprotect test1@snap1
rbd snap purge test1
rbd rm test1
for format in 1 2; do
# Abort migration after successful prepare
rbd create -s 128M --image-format ${format} test2
rbd migration prepare test2 --data-pool rbd2
rbd bench --io-type write --io-size 1024 --io-total 1024 test2
rbd migration abort test2
rbd bench --io-type write --io-size 1024 --io-total 1024 test2
rbd rm test2
# Abort migration after successful execute
rbd create -s 128M --image-format ${format} test2
rbd migration prepare test2 --data-pool rbd2
rbd bench --io-type write --io-size 1024 --io-total 1024 test2
rbd migration execute test2
rbd migration abort test2
rbd bench --io-type write --io-size 1024 --io-total 1024 test2
rbd rm test2
# Migration is automatically aborted if prepare failed
rbd create -s 128M --image-format ${format} test2
rbd migration prepare test2 --data-pool INVALID_DATA_POOL && exit 1 || true
rbd bench --io-type write --io-size 1024 --io-total 1024 test2
rbd rm test2
# Abort migration to other pool
rbd create -s 128M --image-format ${format} test2
rbd migration prepare test2 rbd2/test2
rbd bench --io-type write --io-size 1024 --io-total 1024 rbd2/test2
rbd migration abort test2
rbd bench --io-type write --io-size 1024 --io-total 1024 test2
rbd rm test2
# The same but abort using destination image
rbd create -s 128M --image-format ${format} test2
rbd migration prepare test2 rbd2/test2
rbd migration abort rbd2/test2
rbd bench --io-type write --io-size 1024 --io-total 1024 test2
rbd rm test2
test $format = 1 && continue
# Abort migration to other namespace
rbd create -s 128M --image-format ${format} test2
rbd migration prepare test2 rbd2/ns1/test3
rbd bench --io-type write --io-size 1024 --io-total 1024 rbd2/ns1/test3
rbd migration abort test2
rbd bench --io-type write --io-size 1024 --io-total 1024 test2
rbd rm test2
done
remove_images
ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
}
test_config() {
echo "testing config..."
remove_images
expect_fail rbd config global set osd rbd_cache true
expect_fail rbd config global set global debug_ms 10
expect_fail rbd config global set global rbd_UNKNOWN false
expect_fail rbd config global set global rbd_cache INVALID
rbd config global set global rbd_cache false
rbd config global set client rbd_cache true
rbd config global set client.123 rbd_cache false
rbd config global get global rbd_cache | grep '^false$'
rbd config global get client rbd_cache | grep '^true$'
rbd config global get client.123 rbd_cache | grep '^false$'
expect_fail rbd config global get client.UNKNOWN rbd_cache
rbd config global list global | grep '^rbd_cache * false * global *$'
rbd config global list client | grep '^rbd_cache * true * client *$'
rbd config global list client.123 | grep '^rbd_cache * false * client.123 *$'
rbd config global list client.UNKNOWN | grep '^rbd_cache * true * client *$'
rbd config global rm client rbd_cache
expect_fail rbd config global get client rbd_cache
rbd config global list client | grep '^rbd_cache * false * global *$'
rbd config global rm client.123 rbd_cache
rbd config global rm global rbd_cache
rbd config pool set rbd rbd_cache true
rbd config pool list rbd | grep '^rbd_cache * true * pool *$'
rbd config pool get rbd rbd_cache | grep '^true$'
rbd create $RBD_CREATE_ARGS -s 1 test1
rbd config image list rbd/test1 | grep '^rbd_cache * true * pool *$'
rbd config image set rbd/test1 rbd_cache false
rbd config image list rbd/test1 | grep '^rbd_cache * false * image *$'
rbd config image get rbd/test1 rbd_cache | grep '^false$'
rbd config image remove rbd/test1 rbd_cache
expect_fail rbd config image get rbd/test1 rbd_cache
rbd config image list rbd/test1 | grep '^rbd_cache * true * pool *$'
rbd config pool remove rbd rbd_cache
expect_fail rbd config pool get rbd rbd_cache
rbd config pool list rbd | grep '^rbd_cache * true * config *$'
rbd rm test1
}
test_trash_purge_schedule() {
echo "testing trash purge schedule..."
remove_images
ceph osd pool create rbd2 8
rbd pool init rbd2
rbd namespace create rbd2/ns1
test "$(ceph rbd trash purge schedule list)" = "{}"
ceph rbd trash purge schedule status | fgrep '"scheduled": []'
expect_fail rbd trash purge schedule ls
test "$(rbd trash purge schedule ls -R --format json)" = "[]"
rbd trash purge schedule add -p rbd 1d 01:30
rbd trash purge schedule ls -p rbd | grep 'every 1d starting at 01:30'
expect_fail rbd trash purge schedule ls
rbd trash purge schedule ls -R | grep 'every 1d starting at 01:30'
rbd trash purge schedule ls -R -p rbd | grep 'every 1d starting at 01:30'
expect_fail rbd trash purge schedule ls -p rbd2
test "$(rbd trash purge schedule ls -p rbd2 -R --format json)" = "[]"
rbd trash purge schedule add -p rbd2/ns1 2d
test "$(rbd trash purge schedule ls -p rbd2 -R --format json)" != "[]"
rbd trash purge schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *every 2d'
rbd trash purge schedule rm -p rbd2/ns1
test "$(rbd trash purge schedule ls -p rbd2 -R --format json)" = "[]"
for i in `seq 12`; do
test "$(rbd trash purge schedule status --format xml |
$XMLSTARLET sel -t -v '//scheduled/item/pool')" = 'rbd' && break
sleep 10
done
rbd trash purge schedule status
test "$(rbd trash purge schedule status --format xml |
$XMLSTARLET sel -t -v '//scheduled/item/pool')" = 'rbd'
test "$(rbd trash purge schedule status -p rbd --format xml |
$XMLSTARLET sel -t -v '//scheduled/item/pool')" = 'rbd'
rbd trash purge schedule add 2d 00:17
rbd trash purge schedule ls | grep 'every 2d starting at 00:17'
rbd trash purge schedule ls -R | grep 'every 2d starting at 00:17'
expect_fail rbd trash purge schedule ls -p rbd2
rbd trash purge schedule ls -p rbd2 -R | grep 'every 2d starting at 00:17'
rbd trash purge schedule ls -p rbd2/ns1 -R | grep 'every 2d starting at 00:17'
test "$(rbd trash purge schedule ls -R -p rbd2/ns1 --format xml |
$XMLSTARLET sel -t -v '//schedules/schedule/pool')" = "-"
test "$(rbd trash purge schedule ls -R -p rbd2/ns1 --format xml |
$XMLSTARLET sel -t -v '//schedules/schedule/namespace')" = "-"
test "$(rbd trash purge schedule ls -R -p rbd2/ns1 --format xml |
$XMLSTARLET sel -t -v '//schedules/schedule/items/item/start_time')" = "00:17:00"
for i in `seq 12`; do
rbd trash purge schedule status --format xml |
$XMLSTARLET sel -t -v '//scheduled/item/pool' | grep 'rbd2' && break
sleep 10
done
rbd trash purge schedule status
rbd trash purge schedule status --format xml |
$XMLSTARLET sel -t -v '//scheduled/item/pool' | grep 'rbd2'
echo $(rbd trash purge schedule status --format xml |
$XMLSTARLET sel -t -v '//scheduled/item/pool') | grep 'rbd rbd2 rbd2'
test "$(rbd trash purge schedule status -p rbd --format xml |
$XMLSTARLET sel -t -v '//scheduled/item/pool')" = 'rbd'
test "$(echo $(rbd trash purge schedule status -p rbd2 --format xml |
$XMLSTARLET sel -t -v '//scheduled/item/pool'))" = 'rbd2 rbd2'
test "$(echo $(rbd trash purge schedule ls -R --format xml |
$XMLSTARLET sel -t -v '//schedules/schedule/items'))" = "2d00:17:00 1d01:30:00"
rbd trash purge schedule add 1d
rbd trash purge schedule ls | grep 'every 2d starting at 00:17'
rbd trash purge schedule ls | grep 'every 1d'
rbd trash purge schedule ls -R --format xml |
$XMLSTARLET sel -t -v '//schedules/schedule/items' | grep '2d00:17'
rbd trash purge schedule rm 1d
rbd trash purge schedule ls | grep 'every 2d starting at 00:17'
rbd trash purge schedule rm 2d 00:17
expect_fail rbd trash purge schedule ls
for p in rbd2 rbd2/ns1; do
rbd create $RBD_CREATE_ARGS -s 1 rbd2/ns1/test1
rbd trash mv rbd2/ns1/test1
rbd trash ls rbd2/ns1 | wc -l | grep '^1$'
rbd trash purge schedule add -p $p 1m
rbd trash purge schedule list -p rbd2 -R | grep 'every 1m'
rbd trash purge schedule list -p rbd2/ns1 -R | grep 'every 1m'
for i in `seq 12`; do
rbd trash ls rbd2/ns1 | wc -l | grep '^1$' || break
sleep 10
done
rbd trash ls rbd2/ns1 | wc -l | grep '^0$'
# repeat with kicked in schedule, see https://tracker.ceph.com/issues/53915
rbd trash purge schedule list -p rbd2 -R | grep 'every 1m'
rbd trash purge schedule list -p rbd2/ns1 -R | grep 'every 1m'
rbd trash purge schedule status | grep 'rbd2 *ns1'
rbd trash purge schedule status -p rbd2 | grep 'rbd2 *ns1'
rbd trash purge schedule status -p rbd2/ns1 | grep 'rbd2 *ns1'
rbd trash purge schedule rm -p $p 1m
done
# Negative tests
rbd trash purge schedule add 2m
expect_fail rbd trash purge schedule add -p rbd dummy
expect_fail rbd trash purge schedule add dummy
expect_fail rbd trash purge schedule remove -p rbd dummy
expect_fail rbd trash purge schedule remove dummy
rbd trash purge schedule ls -p rbd | grep 'every 1d starting at 01:30'
rbd trash purge schedule ls | grep 'every 2m'
rbd trash purge schedule remove -p rbd 1d 01:30
rbd trash purge schedule remove 2m
test "$(rbd trash purge schedule ls -R --format json)" = "[]"
remove_images
ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
}
test_trash_purge_schedule_recovery() {
echo "testing recovery of trash_purge_schedule handler after module's RADOS client is blocklisted..."
remove_images
ceph osd pool create rbd3 8
rbd pool init rbd3
rbd namespace create rbd3/ns1
rbd trash purge schedule add -p rbd3/ns1 2d
rbd trash purge schedule ls -p rbd3 -R | grep 'rbd3 *ns1 *every 2d'
# Fetch and blocklist the rbd_support module's RADOS client
CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
jq 'select(.name == "rbd_support")' |
jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
ceph osd blocklist add $CLIENT_ADDR
ceph osd blocklist ls | grep $CLIENT_ADDR
# Check that you can add a trash purge schedule after a few retries
expect_fail rbd trash purge schedule add -p rbd3 10m
sleep 10
for i in `seq 24`; do
rbd trash purge schedule add -p rbd3 10m && break
sleep 10
done
rbd trash purge schedule ls -p rbd3 -R | grep 'every 10m'
# Verify that the schedule present before client blocklisting is preserved
rbd trash purge schedule ls -p rbd3 -R | grep 'rbd3 *ns1 *every 2d'
rbd trash purge schedule remove -p rbd3 10m
rbd trash purge schedule remove -p rbd3/ns1 2d
rbd trash purge schedule ls -p rbd3 -R | expect_fail grep 'every 10m'
rbd trash purge schedule ls -p rbd3 -R | expect_fail grep 'rbd3 *ns1 *every 2d'
ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
}
test_mirror_snapshot_schedule() {
echo "testing mirror snapshot schedule..."
remove_images
ceph osd pool create rbd2 8
rbd pool init rbd2
rbd namespace create rbd2/ns1
rbd mirror pool enable rbd2 image
rbd mirror pool enable rbd2/ns1 image
rbd mirror pool peer add rbd2 cluster1
test "$(ceph rbd mirror snapshot schedule list)" = "{}"
ceph rbd mirror snapshot schedule status | fgrep '"scheduled_images": []'
expect_fail rbd mirror snapshot schedule ls
test "$(rbd mirror snapshot schedule ls -R --format json)" = "[]"
rbd create $RBD_CREATE_ARGS -s 1 rbd2/ns1/test1
test "$(rbd mirror image status rbd2/ns1/test1 |
grep -c mirror.primary)" = '0'
rbd mirror image enable rbd2/ns1/test1 snapshot
test "$(rbd mirror image status rbd2/ns1/test1 |
grep -c mirror.primary)" = '1'
rbd mirror snapshot schedule add -p rbd2/ns1 --image test1 1m
expect_fail rbd mirror snapshot schedule ls
rbd mirror snapshot schedule ls -R | grep 'rbd2 *ns1 *test1 *every 1m'
expect_fail rbd mirror snapshot schedule ls -p rbd2
rbd mirror snapshot schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *test1 *every 1m'
expect_fail rbd mirror snapshot schedule ls -p rbd2/ns1
rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'rbd2 *ns1 *test1 *every 1m'
test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
for i in `seq 12`; do
test "$(rbd mirror image status rbd2/ns1/test1 |
grep -c mirror.primary)" -gt '1' && break
sleep 10
done
test "$(rbd mirror image status rbd2/ns1/test1 |
grep -c mirror.primary)" -gt '1'
# repeat with kicked in schedule, see https://tracker.ceph.com/issues/53915
expect_fail rbd mirror snapshot schedule ls
rbd mirror snapshot schedule ls -R | grep 'rbd2 *ns1 *test1 *every 1m'
expect_fail rbd mirror snapshot schedule ls -p rbd2
rbd mirror snapshot schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *test1 *every 1m'
expect_fail rbd mirror snapshot schedule ls -p rbd2/ns1
rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'rbd2 *ns1 *test1 *every 1m'
test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
rbd mirror snapshot schedule status
test "$(rbd mirror snapshot schedule status --format xml |
$XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1'
test "$(rbd mirror snapshot schedule status -p rbd2 --format xml |
$XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1'
test "$(rbd mirror snapshot schedule status -p rbd2/ns1 --format xml |
$XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1'
test "$(rbd mirror snapshot schedule status -p rbd2/ns1 --image test1 --format xml |
$XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1'
rbd mirror image demote rbd2/ns1/test1
for i in `seq 12`; do
rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' || break
sleep 10
done
rbd mirror snapshot schedule status | expect_fail grep 'rbd2/ns1/test1'
rbd mirror image promote rbd2/ns1/test1
for i in `seq 12`; do
rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' && break
sleep 10
done
rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1'
rbd mirror snapshot schedule add 1h 00:15
test "$(rbd mirror snapshot schedule ls)" = 'every 1h starting at 00:15:00'
rbd mirror snapshot schedule ls -R | grep 'every 1h starting at 00:15:00'
rbd mirror snapshot schedule ls -R | grep 'rbd2 *ns1 *test1 *every 1m'
expect_fail rbd mirror snapshot schedule ls -p rbd2
rbd mirror snapshot schedule ls -p rbd2 -R | grep 'every 1h starting at 00:15:00'
rbd mirror snapshot schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *test1 *every 1m'
expect_fail rbd mirror snapshot schedule ls -p rbd2/ns1
rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'every 1h starting at 00:15:00'
rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'rbd2 *ns1 *test1 *every 1m'
test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
# Negative tests
expect_fail rbd mirror snapshot schedule add dummy
expect_fail rbd mirror snapshot schedule add -p rbd2/ns1 --image test1 dummy
expect_fail rbd mirror snapshot schedule remove dummy
expect_fail rbd mirror snapshot schedule remove -p rbd2/ns1 --image test1 dummy
test "$(rbd mirror snapshot schedule ls)" = 'every 1h starting at 00:15:00'
test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
rbd rm rbd2/ns1/test1
for i in `seq 12`; do
rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' || break
sleep 10
done
rbd mirror snapshot schedule status | expect_fail grep 'rbd2/ns1/test1'
rbd mirror snapshot schedule remove
test "$(rbd mirror snapshot schedule ls -R --format json)" = "[]"
remove_images
ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
}
test_mirror_snapshot_schedule_recovery() {
echo "testing recovery of mirror snapshot scheduler after module's RADOS client is blocklisted..."
remove_images
ceph osd pool create rbd3 8
rbd pool init rbd3
rbd namespace create rbd3/ns1
rbd mirror pool enable rbd3 image
rbd mirror pool enable rbd3/ns1 image
rbd mirror pool peer add rbd3 cluster1
rbd create $RBD_CREATE_ARGS -s 1 rbd3/ns1/test1
rbd mirror image enable rbd3/ns1/test1 snapshot
test "$(rbd mirror image status rbd3/ns1/test1 |
grep -c mirror.primary)" = '1'
rbd mirror snapshot schedule add -p rbd3/ns1 --image test1 1m
test "$(rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1)" = 'every 1m'
# Fetch and blocklist rbd_support module's RADOS client
CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
jq 'select(.name == "rbd_support")' |
jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
ceph osd blocklist add $CLIENT_ADDR
ceph osd blocklist ls | grep $CLIENT_ADDR
# Check that you can add a mirror snapshot schedule after a few retries
expect_fail rbd mirror snapshot schedule add -p rbd3/ns1 --image test1 2m
sleep 10
for i in `seq 24`; do
rbd mirror snapshot schedule add -p rbd3/ns1 --image test1 2m && break
sleep 10
done
rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | grep 'every 2m'
# Verify that the schedule present before client blocklisting is preserved
rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | grep 'every 1m'
rbd mirror snapshot schedule rm -p rbd3/ns1 --image test1 2m
rbd mirror snapshot schedule rm -p rbd3/ns1 --image test1 1m
rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | expect_fail grep 'every 2m'
rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | expect_fail grep 'every 1m'
rbd snap purge rbd3/ns1/test1
rbd rm rbd3/ns1/test1
ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
}
test_perf_image_iostat() {
echo "testing perf image iostat..."
remove_images
ceph osd pool create rbd1 8
rbd pool init rbd1
rbd namespace create rbd1/ns
ceph osd pool create rbd2 8
rbd pool init rbd2
rbd namespace create rbd2/ns
IMAGE_SPECS=("test1" "rbd1/test2" "rbd1/ns/test3" "rbd2/test4" "rbd2/ns/test5")
for spec in "${IMAGE_SPECS[@]}"; do
# ensure all images are created without a separate data pool
# as we filter iostat by specific pool specs below
rbd create $RBD_CREATE_ARGS --size 10G --rbd-default-data-pool '' $spec
done
BENCH_PIDS=()
for spec in "${IMAGE_SPECS[@]}"; do
rbd bench --io-type write --io-pattern rand --io-total 10G --io-threads 1 \
--rbd-cache false $spec >/dev/null 2>&1 &
BENCH_PIDS+=($!)
done
# test specifying pool spec via spec syntax
test "$(rbd perf image iostat --format json rbd1 |
jq -r 'map(.image) | sort | join(" ")')" = 'test2'
test "$(rbd perf image iostat --format json rbd1/ns |
jq -r 'map(.image) | sort | join(" ")')" = 'test3'
test "$(rbd perf image iostat --format json --rbd-default-pool rbd1 /ns |
jq -r 'map(.image) | sort | join(" ")')" = 'test3'
# test specifying pool spec via options
test "$(rbd perf image iostat --format json --pool rbd2 |
jq -r 'map(.image) | sort | join(" ")')" = 'test4'
test "$(rbd perf image iostat --format json --pool rbd2 --namespace ns |
jq -r 'map(.image) | sort | join(" ")')" = 'test5'
test "$(rbd perf image iostat --format json --rbd-default-pool rbd2 --namespace ns |
jq -r 'map(.image) | sort | join(" ")')" = 'test5'
# test omitting pool spec (-> GLOBAL_POOL_KEY)
test "$(rbd perf image iostat --format json |
jq -r 'map(.image) | sort | join(" ")')" = 'test1 test2 test3 test4 test5'
for pid in "${BENCH_PIDS[@]}"; do
kill $pid
done
wait
remove_images
ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
ceph osd pool rm rbd1 rbd1 --yes-i-really-really-mean-it
}
test_perf_image_iostat_recovery() {
echo "testing recovery of perf handler after module's RADOS client is blocklisted..."
remove_images
ceph osd pool create rbd3 8
rbd pool init rbd3
rbd namespace create rbd3/ns
IMAGE_SPECS=("rbd3/test1" "rbd3/ns/test2")
for spec in "${IMAGE_SPECS[@]}"; do
# ensure all images are created without a separate data pool
# as we filter iostat by specific pool specs below
rbd create $RBD_CREATE_ARGS --size 10G --rbd-default-data-pool '' $spec
done
BENCH_PIDS=()
for spec in "${IMAGE_SPECS[@]}"; do
rbd bench --io-type write --io-pattern rand --io-total 10G --io-threads 1 \
--rbd-cache false $spec >/dev/null 2>&1 &
BENCH_PIDS+=($!)
done
test "$(rbd perf image iostat --format json rbd3 |
jq -r 'map(.image) | sort | join(" ")')" = 'test1'
# Fetch and blocklist the rbd_support module's RADOS client
CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
jq 'select(.name == "rbd_support")' |
jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
ceph osd blocklist add $CLIENT_ADDR
ceph osd blocklist ls | grep $CLIENT_ADDR
expect_fail rbd perf image iostat --format json rbd3/ns
sleep 10
for i in `seq 24`; do
test "$(rbd perf image iostat --format json rbd3/ns |
jq -r 'map(.image) | sort | join(" ")')" = 'test2' && break
sleep 10
done
for pid in "${BENCH_PIDS[@]}"; do
kill $pid
done
wait
remove_images
ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
}
test_mirror_pool_peer_bootstrap_create() {
echo "testing mirror pool peer bootstrap create..."
remove_images
ceph osd pool create rbd1 8
rbd pool init rbd1
rbd mirror pool enable rbd1 image
ceph osd pool create rbd2 8
rbd pool init rbd2
rbd mirror pool enable rbd2 pool
readarray -t MON_ADDRS < <(ceph mon dump |
sed -n 's/^[0-9]: \(.*\) mon\.[a-z]$/\1/p')
# check that all monitors make it to the token even if only one
# valid monitor is specified
BAD_MON_ADDR="1.2.3.4:6789"
MON_HOST="${MON_ADDRS[0]},$BAD_MON_ADDR"
TOKEN="$(rbd mirror pool peer bootstrap create \
--mon-host "$MON_HOST" rbd1 | base64 -d)"
TOKEN_FSID="$(jq -r '.fsid' <<< "$TOKEN")"
TOKEN_CLIENT_ID="$(jq -r '.client_id' <<< "$TOKEN")"
TOKEN_KEY="$(jq -r '.key' <<< "$TOKEN")"
TOKEN_MON_HOST="$(jq -r '.mon_host' <<< "$TOKEN")"
test "$TOKEN_FSID" = "$(ceph fsid)"
test "$TOKEN_KEY" = "$(ceph auth get-key client.$TOKEN_CLIENT_ID)"
for addr in "${MON_ADDRS[@]}"; do
fgrep "$addr" <<< "$TOKEN_MON_HOST"
done
expect_fail fgrep "$BAD_MON_ADDR" <<< "$TOKEN_MON_HOST"
# check that the token does not change, including across pools
test "$(rbd mirror pool peer bootstrap create \
--mon-host "$MON_HOST" rbd1 | base64 -d)" = "$TOKEN"
test "$(rbd mirror pool peer bootstrap create \
rbd1 | base64 -d)" = "$TOKEN"
test "$(rbd mirror pool peer bootstrap create \
--mon-host "$MON_HOST" rbd2 | base64 -d)" = "$TOKEN"
test "$(rbd mirror pool peer bootstrap create \
rbd2 | base64 -d)" = "$TOKEN"
ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
ceph osd pool rm rbd1 rbd1 --yes-i-really-really-mean-it
}
test_tasks_removed_pool() {
echo "testing removing pool under running tasks..."
remove_images
ceph osd pool create rbd2 8
rbd pool init rbd2
rbd create $RBD_CREATE_ARGS --size 1G foo
rbd snap create foo@snap
rbd snap protect foo@snap
rbd clone foo@snap bar
rbd create $RBD_CREATE_ARGS --size 1G rbd2/dummy
rbd bench --io-type write --io-pattern seq --io-size 1M --io-total 1G rbd2/dummy
rbd snap create rbd2/dummy@snap
rbd snap protect rbd2/dummy@snap
for i in {1..5}; do
rbd clone rbd2/dummy@snap rbd2/dummy$i
done
# queue flattens on a few dummy images and remove that pool
test "$(ceph rbd task list)" = "[]"
for i in {1..5}; do
ceph rbd task add flatten rbd2/dummy$i
done
ceph osd pool delete rbd2 rbd2 --yes-i-really-really-mean-it
test "$(ceph rbd task list)" != "[]"
# queue flatten on another image and check that it completes
rbd info bar | grep 'parent: '
expect_fail rbd snap unprotect foo@snap
ceph rbd task add flatten bar
for i in {1..12}; do
rbd info bar | grep 'parent: ' || break
sleep 10
done
rbd info bar | expect_fail grep 'parent: '
rbd snap unprotect foo@snap
# check that flattens disrupted by pool removal are cleaned up
for i in {1..12}; do
test "$(ceph rbd task list)" = "[]" && break
sleep 10
done
test "$(ceph rbd task list)" = "[]"
remove_images
}
test_tasks_recovery() {
echo "testing task handler recovery after module's RADOS client is blocklisted..."
remove_images
ceph osd pool create rbd2 8
rbd pool init rbd2
rbd create $RBD_CREATE_ARGS --size 1G rbd2/img1
rbd bench --io-type write --io-pattern seq --io-size 1M --io-total 1G rbd2/img1
rbd snap create rbd2/img1@snap
rbd snap protect rbd2/img1@snap
rbd clone rbd2/img1@snap rbd2/clone1
# Fetch and blocklist rbd_support module's RADOS client
CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
jq 'select(.name == "rbd_support")' |
jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
ceph osd blocklist add $CLIENT_ADDR
ceph osd blocklist ls | grep $CLIENT_ADDR
expect_fail ceph rbd task add flatten rbd2/clone1
sleep 10
for i in `seq 24`; do
ceph rbd task add flatten rbd2/clone1 && break
sleep 10
done
test "$(ceph rbd task list)" != "[]"
for i in {1..12}; do
rbd info rbd2/clone1 | grep 'parent: ' || break
sleep 10
done
rbd info rbd2/clone1 | expect_fail grep 'parent: '
rbd snap unprotect rbd2/img1@snap
test "$(ceph rbd task list)" = "[]"
ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
}
test_pool_image_args
test_rename
test_ls
test_remove
test_migration
test_config
RBD_CREATE_ARGS=""
test_others
test_locking
test_thick_provision
RBD_CREATE_ARGS="--image-format 2"
test_others
test_locking
test_clone
test_trash
test_purge
test_deep_copy_clone
test_clone_v2
test_thick_provision
test_namespace
test_trash_purge_schedule
test_trash_purge_schedule_recovery
test_mirror_snapshot_schedule
test_mirror_snapshot_schedule_recovery
test_perf_image_iostat
test_perf_image_iostat_recovery
test_mirror_pool_peer_bootstrap_create
test_tasks_removed_pool
test_tasks_recovery
echo OK
| 61,261 | 34.700466 | 197 | sh |
null | ceph-main/qa/workunits/rbd/cli_migration.sh | #!/usr/bin/env bash
set -ex
. $(dirname $0)/../../standalone/ceph-helpers.sh
TEMPDIR=
IMAGE1=image1
IMAGE2=image2
IMAGE3=image3
IMAGES="${IMAGE1} ${IMAGE2} ${IMAGE3}"
cleanup() {
cleanup_tempdir
remove_images
}
setup_tempdir() {
TEMPDIR=`mktemp -d`
}
cleanup_tempdir() {
rm -rf ${TEMPDIR}
}
create_base_image() {
local image=$1
rbd create --size 1G ${image}
rbd bench --io-type write --io-pattern rand --io-size=4K --io-total 256M ${image}
rbd snap create ${image}@1
rbd bench --io-type write --io-pattern rand --io-size=4K --io-total 64M ${image}
rbd snap create ${image}@2
rbd bench --io-type write --io-pattern rand --io-size=4K --io-total 128M ${image}
}
export_raw_image() {
local image=$1
rm -rf "${TEMPDIR}/${image}"
rbd export ${image} "${TEMPDIR}/${image}"
}
export_base_image() {
local image=$1
export_raw_image "${image}"
export_raw_image "${image}@1"
export_raw_image "${image}@2"
}
remove_image() {
local image=$1
(rbd migration abort $image || true) >/dev/null 2>&1
(rbd snap purge $image || true) >/dev/null 2>&1
(rbd rm $image || true) >/dev/null 2>&1
}
remove_images() {
for image in ${IMAGES}
do
remove_image ${image}
done
}
show_diff()
{
local file1=$1
local file2=$2
xxd "${file1}" > "${file1}.xxd"
xxd "${file2}" > "${file2}.xxd"
sdiff -s "${file1}.xxd" "${file2}.xxd" | head -n 64
rm -f "${file1}.xxd" "${file2}.xxd"
}
compare_images() {
local src_image=$1
local dst_image=$2
local ret=0
export_raw_image ${dst_image}
if ! cmp "${TEMPDIR}/${src_image}" "${TEMPDIR}/${dst_image}"
then
show_diff "${TEMPDIR}/${src_image}" "${TEMPDIR}/${dst_image}"
ret=1
fi
return ${ret}
}
test_import_native_format() {
local base_image=$1
local dest_image=$2
rbd migration prepare --import-only "rbd/${base_image}@2" ${dest_image}
rbd migration abort ${dest_image}
local pool_id=$(ceph osd pool ls detail --format xml | xmlstarlet sel -t -v "//pools/pool[pool_name='rbd']/pool_id")
cat > ${TEMPDIR}/spec.json <<EOF
{
"type": "native",
"pool_id": ${pool_id},
"pool_namespace": "",
"image_name": "${base_image}",
"snap_name": "2"
}
EOF
cat ${TEMPDIR}/spec.json
rbd migration prepare --import-only \
--source-spec-path ${TEMPDIR}/spec.json ${dest_image}
compare_images "${base_image}@1" "${dest_image}@1"
compare_images "${base_image}@2" "${dest_image}@2"
rbd migration abort ${dest_image}
rbd migration prepare --import-only \
--source-spec-path ${TEMPDIR}/spec.json ${dest_image}
rbd migration execute ${dest_image}
compare_images "${base_image}@1" "${dest_image}@1"
compare_images "${base_image}@2" "${dest_image}@2"
rbd migration abort ${dest_image}
rbd migration prepare --import-only \
--source-spec "{\"type\": \"native\", \"pool_id\": "${pool_id}", \"image_name\": \"${base_image}\", \"snap_name\": \"2\"}" \
${dest_image}
rbd migration abort ${dest_image}
rbd migration prepare --import-only \
--source-spec "{\"type\": \"native\", \"pool_name\": \"rbd\", \"image_name\": \"${base_image}\", \"snap_name\": \"2\"}" \
${dest_image}
rbd migration execute ${dest_image}
rbd migration commit ${dest_image}
compare_images "${base_image}@1" "${dest_image}@1"
compare_images "${base_image}@2" "${dest_image}@2"
remove_image "${dest_image}"
}
test_import_qcow_format() {
local base_image=$1
local dest_image=$2
if ! qemu-img convert -f raw -O qcow rbd:rbd/${base_image} ${TEMPDIR}/${base_image}.qcow; then
echo "skipping QCOW test"
return 0
fi
qemu-img info -f qcow ${TEMPDIR}/${base_image}.qcow
cat > ${TEMPDIR}/spec.json <<EOF
{
"type": "qcow",
"stream": {
"type": "file",
"file_path": "${TEMPDIR}/${base_image}.qcow"
}
}
EOF
cat ${TEMPDIR}/spec.json
set +e
rbd migration prepare --import-only \
--source-spec-path ${TEMPDIR}/spec.json ${dest_image}
local error_code=$?
set -e
if [ $error_code -eq 95 ]; then
echo "skipping QCOW test (librbd support disabled)"
return 0
fi
test $error_code -eq 0
compare_images "${base_image}" "${dest_image}"
rbd migration abort ${dest_image}
rbd migration prepare --import-only \
--source-spec-path ${TEMPDIR}/spec.json ${dest_image}
compare_images "${base_image}" "${dest_image}"
rbd migration execute ${dest_image}
compare_images "${base_image}" "${dest_image}"
rbd migration commit ${dest_image}
compare_images "${base_image}" "${dest_image}"
remove_image "${dest_image}"
}
test_import_qcow2_format() {
local base_image=$1
local dest_image=$2
# create new image via qemu-img and its bench tool since we cannot
# import snapshot deltas into QCOW2
qemu-img create -f qcow2 ${TEMPDIR}/${base_image}.qcow2 1G
qemu-img bench -f qcow2 -w -c 65536 -d 16 --pattern 65 -s 4096 \
-S $((($RANDOM % 262144) * 4096)) ${TEMPDIR}/${base_image}.qcow2
qemu-img convert -f qcow2 -O raw ${TEMPDIR}/${base_image}.qcow2 \
"${TEMPDIR}/${base_image}@snap1"
qemu-img snapshot -c "snap1" ${TEMPDIR}/${base_image}.qcow2
qemu-img bench -f qcow2 -w -c 16384 -d 16 --pattern 66 -s 4096 \
-S $((($RANDOM % 262144) * 4096)) ${TEMPDIR}/${base_image}.qcow2
qemu-img convert -f qcow2 -O raw ${TEMPDIR}/${base_image}.qcow2 \
"${TEMPDIR}/${base_image}@snap2"
qemu-img snapshot -c "snap2" ${TEMPDIR}/${base_image}.qcow2
qemu-img bench -f qcow2 -w -c 32768 -d 16 --pattern 67 -s 4096 \
-S $((($RANDOM % 262144) * 4096)) ${TEMPDIR}/${base_image}.qcow2
qemu-img convert -f qcow2 -O raw ${TEMPDIR}/${base_image}.qcow2 \
${TEMPDIR}/${base_image}
qemu-img info -f qcow2 ${TEMPDIR}/${base_image}.qcow2
cat > ${TEMPDIR}/spec.json <<EOF
{
"type": "qcow",
"stream": {
"type": "file",
"file_path": "${TEMPDIR}/${base_image}.qcow2"
}
}
EOF
cat ${TEMPDIR}/spec.json
rbd migration prepare --import-only \
--source-spec-path ${TEMPDIR}/spec.json ${dest_image}
compare_images "${base_image}@snap1" "${dest_image}@snap1"
compare_images "${base_image}@snap2" "${dest_image}@snap2"
compare_images "${base_image}" "${dest_image}"
rbd migration abort ${dest_image}
rbd migration prepare --import-only \
--source-spec-path ${TEMPDIR}/spec.json ${dest_image}
compare_images "${base_image}@snap1" "${dest_image}@snap1"
compare_images "${base_image}@snap2" "${dest_image}@snap2"
compare_images "${base_image}" "${dest_image}"
rbd migration execute ${dest_image}
compare_images "${base_image}@snap1" "${dest_image}@snap1"
compare_images "${base_image}@snap2" "${dest_image}@snap2"
compare_images "${base_image}" "${dest_image}"
rbd migration commit ${dest_image}
compare_images "${base_image}@snap1" "${dest_image}@snap1"
compare_images "${base_image}@snap2" "${dest_image}@snap2"
compare_images "${base_image}" "${dest_image}"
remove_image "${dest_image}"
}
test_import_raw_format() {
local base_image=$1
local dest_image=$2
cat > ${TEMPDIR}/spec.json <<EOF
{
"type": "raw",
"stream": {
"type": "file",
"file_path": "${TEMPDIR}/${base_image}"
}
}
EOF
cat ${TEMPDIR}/spec.json
cat ${TEMPDIR}/spec.json | rbd migration prepare --import-only \
--source-spec-path - ${dest_image}
compare_images ${base_image} ${dest_image}
rbd migration abort ${dest_image}
rbd migration prepare --import-only \
--source-spec-path ${TEMPDIR}/spec.json ${dest_image}
rbd migration execute ${dest_image}
rbd migration commit ${dest_image}
compare_images ${base_image} ${dest_image}
remove_image "${dest_image}"
cat > ${TEMPDIR}/spec.json <<EOF
{
"type": "raw",
"stream": {
"type": "file",
"file_path": "${TEMPDIR}/${base_image}"
},
"snapshots": [{
"type": "raw",
"name": "snap1",
"stream": {
"type": "file",
"file_path": "${TEMPDIR}/${base_image}@1"
}
}, {
"type": "raw",
"name": "snap2",
"stream": {
"type": "file",
"file_path": "${TEMPDIR}/${base_image}@2"
}
}]
}
EOF
cat ${TEMPDIR}/spec.json
rbd migration prepare --import-only \
--source-spec-path ${TEMPDIR}/spec.json ${dest_image}
rbd snap create ${dest_image}@head
rbd bench --io-type write --io-pattern rand --io-size=32K --io-total=32M ${dest_image}
compare_images "${base_image}" "${dest_image}@head"
compare_images "${base_image}@1" "${dest_image}@snap1"
compare_images "${base_image}@2" "${dest_image}@snap2"
compare_images "${base_image}" "${dest_image}@head"
rbd migration execute ${dest_image}
compare_images "${base_image}@1" "${dest_image}@snap1"
compare_images "${base_image}@2" "${dest_image}@snap2"
compare_images "${base_image}" "${dest_image}@head"
rbd migration commit ${dest_image}
remove_image "${dest_image}"
}
# make sure rbd pool is EMPTY.. this is a test script!!
rbd ls 2>&1 | wc -l | grep -v '^0$' && echo "nonempty rbd pool, aborting! run this script on an empty test cluster only." && exit 1
setup_tempdir
trap 'cleanup $?' INT TERM EXIT
create_base_image ${IMAGE1}
export_base_image ${IMAGE1}
test_import_native_format ${IMAGE1} ${IMAGE2}
test_import_qcow_format ${IMAGE1} ${IMAGE2}
test_import_qcow2_format ${IMAGE2} ${IMAGE3}
test_import_raw_format ${IMAGE1} ${IMAGE2}
echo OK
| 9,606 | 25.835196 | 132 | sh |
null | ceph-main/qa/workunits/rbd/concurrent.sh | #!/usr/bin/env bash
# Copyright (C) 2013 Inktank Storage, Inc.
#
# This is free software; see the source for copying conditions.
# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as
# published by the Free Software Foundation version 2.
# Alex Elder <[email protected]>
# January 29, 2013
################################################################
# The purpose of this test is to exercise paths through the rbd
# code, making sure no bad pointer references or invalid reference
# count operations occur in the face of concurrent activity.
#
# Each pass of the test creates an rbd image, maps it, and writes
# some data into the image. It also reads some data from all of the
# other images that exist at the time the pass executes. Finally,
# the image is unmapped and removed. The image removal completes in
# the background.
#
# An iteration of the test consists of performing some number of
# passes, initiating each pass as a background job, and finally
# sleeping for a variable delay. The delay is initially a specified
# value, but each iteration shortens that proportionally, such that
# the last iteration will not delay at all.
#
# The result exercises concurrent creates and deletes of rbd images,
# writes to new images, reads from both written and unwritten image
# data (including reads concurrent with writes), and attempts to
# unmap images being read.
# Usage: concurrent [-i <iter>] [-c <count>] [-d <delay>]
#
# Exit status:
# 0: success
# 1: usage error
# 2: other runtime error
# 99: argument count error (programming error)
# 100: getopt error (internal error)
################################################################
set -ex
# Default flag values; RBD_CONCURRENT_ITER names are intended
# to be used in yaml scripts to pass in alternate values, e.g.:
# env:
# RBD_CONCURRENT_ITER: 20
# RBD_CONCURRENT_COUNT: 5
# RBD_CONCURRENT_DELAY: 3
ITER_DEFAULT=${RBD_CONCURRENT_ITER:-100}
COUNT_DEFAULT=${RBD_CONCURRENT_COUNT:-5}
DELAY_DEFAULT=${RBD_CONCURRENT_DELAY:-5} # seconds
CEPH_SECRET_FILE=${CEPH_SECRET_FILE:-}
CEPH_ID=${CEPH_ID:-admin}
SECRET_ARGS=""
if [ "${CEPH_SECRET_FILE}" ]; then
SECRET_ARGS="--secret $CEPH_SECRET_FILE"
fi
################################################################
function setup() {
ID_MAX_DIR=$(mktemp -d /tmp/image_max_id.XXXXX)
ID_COUNT_DIR=$(mktemp -d /tmp/image_ids.XXXXXX)
NAMES_DIR=$(mktemp -d /tmp/image_names.XXXXXX)
SOURCE_DATA=$(mktemp /tmp/source_data.XXXXXX)
# Use urandom to generate SOURCE_DATA
dd if=/dev/urandom of=${SOURCE_DATA} bs=2048 count=66 \
>/dev/null 2>&1
# List of rbd id's *not* created by this script
export INITIAL_RBD_IDS=$(ls /sys/bus/rbd/devices)
# Set up some environment for normal teuthology test setup.
# This really should not be necessary but I found it was.
export CEPH_ARGS=" --name client.0"
}
function cleanup() {
[ ! "${ID_MAX_DIR}" ] && return
local id
local image
# Unmap mapped devices
for id in $(rbd_ids); do
image=$(cat "/sys/bus/rbd/devices/${id}/name")
rbd_unmap_image "${id}"
rbd_destroy_image "${image}"
done
# Get any leftover images
for image in $(rbd ls 2>/dev/null); do
rbd_destroy_image "${image}"
done
wait
sync
rm -f "${SOURCE_DATA}"
[ -d "${NAMES_DIR}" ] && rmdir "${NAMES_DIR}"
echo "Max concurrent rbd image count was $(get_max "${ID_COUNT_DIR}")"
rm -rf "${ID_COUNT_DIR}"
echo "Max rbd image id was $(get_max "${ID_MAX_DIR}")"
rm -rf "${ID_MAX_DIR}"
}
function get_max() {
[ $# -eq 1 ] || exit 99
local dir="$1"
ls -U "${dir}" | sort -n | tail -1
}
trap cleanup HUP INT QUIT
# print a usage message and quit
#
# if a message is supplied, print that first, and then exit
# with non-zero status
function usage() {
if [ $# -gt 0 ]; then
echo "" >&2
echo "$@" >&2
fi
echo "" >&2
echo "Usage: ${PROGNAME} <options> <tests>" >&2
echo "" >&2
echo " options:" >&2
echo " -h or --help" >&2
echo " show this message" >&2
echo " -i or --iterations" >&2
echo " iteration count (1 or more)" >&2
echo " -c or --count" >&2
echo " images created per iteration (1 or more)" >&2
echo " -d or --delay" >&2
echo " maximum delay between iterations" >&2
echo "" >&2
echo " defaults:" >&2
echo " iterations: ${ITER_DEFAULT}"
echo " count: ${COUNT_DEFAULT}"
echo " delay: ${DELAY_DEFAULT} (seconds)"
echo "" >&2
[ $# -gt 0 ] && exit 1
exit 0 # This is used for a --help
}
# parse command line arguments
function parseargs() {
ITER="${ITER_DEFAULT}"
COUNT="${COUNT_DEFAULT}"
DELAY="${DELAY_DEFAULT}"
# Short option flags
SHORT_OPTS=""
SHORT_OPTS="${SHORT_OPTS},h"
SHORT_OPTS="${SHORT_OPTS},i:"
SHORT_OPTS="${SHORT_OPTS},c:"
SHORT_OPTS="${SHORT_OPTS},d:"
# Short option flags
LONG_OPTS=""
LONG_OPTS="${LONG_OPTS},help"
LONG_OPTS="${LONG_OPTS},iterations:"
LONG_OPTS="${LONG_OPTS},count:"
LONG_OPTS="${LONG_OPTS},delay:"
TEMP=$(getopt --name "${PROGNAME}" \
--options "${SHORT_OPTS}" \
--longoptions "${LONG_OPTS}" \
-- "$@")
eval set -- "$TEMP"
while [ "$1" != "--" ]; do
case "$1" in
-h|--help)
usage
;;
-i|--iterations)
ITER="$2"
[ "${ITER}" -lt 1 ] &&
usage "bad iterations value"
shift
;;
-c|--count)
COUNT="$2"
[ "${COUNT}" -lt 1 ] &&
usage "bad count value"
shift
;;
-d|--delay)
DELAY="$2"
shift
;;
*)
exit 100 # Internal error
;;
esac
shift
done
shift
}
function rbd_ids() {
[ $# -eq 0 ] || exit 99
local ids
local i
[ -d /sys/bus/rbd ] || return
ids=" $(echo $(ls /sys/bus/rbd/devices)) "
for i in ${INITIAL_RBD_IDS}; do
ids=${ids/ ${i} / }
done
echo ${ids}
}
function update_maxes() {
local ids="$@"
local last_id
# These aren't 100% safe against concurrent updates but it
# should be pretty close
count=$(echo ${ids} | wc -w)
touch "${ID_COUNT_DIR}/${count}"
last_id=${ids% }
last_id=${last_id##* }
touch "${ID_MAX_DIR}/${last_id}"
}
function rbd_create_image() {
[ $# -eq 0 ] || exit 99
local image=$(basename $(mktemp "${NAMES_DIR}/image.XXXXXX"))
rbd create "${image}" --size=1024
echo "${image}"
}
function rbd_image_id() {
[ $# -eq 1 ] || exit 99
local image="$1"
grep -l "${image}" /sys/bus/rbd/devices/*/name 2>/dev/null |
cut -d / -f 6
}
function rbd_map_image() {
[ $# -eq 1 ] || exit 99
local image="$1"
local id
sudo rbd map "${image}" --user "${CEPH_ID}" ${SECRET_ARGS} \
> /dev/null 2>&1
id=$(rbd_image_id "${image}")
echo "${id}"
}
function rbd_write_image() {
[ $# -eq 1 ] || exit 99
local id="$1"
# Offset and size here are meant to ensure beginning and end
# cross both (4K or 64K) page and (4MB) rbd object boundaries.
# It assumes the SOURCE_DATA file has size 66 * 2048 bytes
dd if="${SOURCE_DATA}" of="/dev/rbd${id}" bs=2048 seek=2015 \
> /dev/null 2>&1
}
# All starting and ending offsets here are selected so they are not
# aligned on a (4 KB or 64 KB) page boundary
function rbd_read_image() {
[ $# -eq 1 ] || exit 99
local id="$1"
# First read starting and ending at an offset before any
# written data. The osd zero-fills data read from an
# existing rbd object, but before any previously-written
# data.
dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=3 \
> /dev/null 2>&1
# Next read starting at an offset before any written data,
# but ending at an offset that includes data that's been
# written. The osd zero-fills unwritten data at the
# beginning of a read.
dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=1983 \
> /dev/null 2>&1
# Read the data at offset 2015 * 2048 bytes (where it was
# written) and make sure it matches the original data.
cmp --quiet "${SOURCE_DATA}" "/dev/rbd${id}" 0 4126720 ||
echo "MISMATCH!!!"
# Now read starting within the pre-written data, but ending
# beyond it. The rbd client zero-fills the unwritten
# portion at the end of a read.
dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=2079 \
> /dev/null 2>&1
# Now read starting from an unwritten range within a written
# rbd object. The rbd client zero-fills this.
dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=2115 \
> /dev/null 2>&1
# Finally read from an unwritten region which would reside
# in a different (non-existent) osd object. The osd client
# zero-fills unwritten data when the target object doesn't
# exist.
dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=4098 \
> /dev/null 2>&1
}
function rbd_unmap_image() {
[ $# -eq 1 ] || exit 99
local id="$1"
sudo rbd unmap "/dev/rbd${id}"
}
function rbd_destroy_image() {
[ $# -eq 1 ] || exit 99
local image="$1"
# Don't wait for it to complete, to increase concurrency
rbd rm "${image}" >/dev/null 2>&1 &
rm -f "${NAMES_DIR}/${image}"
}
function one_pass() {
[ $# -eq 0 ] || exit 99
local image
local id
local ids
local i
image=$(rbd_create_image)
id=$(rbd_map_image "${image}")
ids=$(rbd_ids)
update_maxes "${ids}"
for i in ${rbd_ids}; do
if [ "${i}" -eq "${id}" ]; then
rbd_write_image "${i}"
else
rbd_read_image "${i}"
fi
done
rbd_unmap_image "${id}"
rbd_destroy_image "${image}"
}
################################################################
parseargs "$@"
setup
for iter in $(seq 1 "${ITER}"); do
for count in $(seq 1 "${COUNT}"); do
one_pass &
done
# Sleep longer at first, overlap iterations more later.
# Use awk to get sub-second granularity (see sleep(1)).
sleep $(echo "${DELAY}" "${iter}" "${ITER}" |
awk '{ printf("%.2f\n", $1 - $1 * $2 / $3);}')
done
wait
cleanup
exit 0
| 9,823 | 25.12766 | 71 | sh |
null | ceph-main/qa/workunits/rbd/diff.sh | #!/usr/bin/env bash
set -ex
function cleanup() {
rbd snap purge foo || :
rbd rm foo || :
rbd snap purge foo.copy || :
rbd rm foo.copy || :
rbd snap purge foo.copy2 || :
rbd rm foo.copy2 || :
rm -f foo.diff foo.out
}
cleanup
rbd create foo --size 1000
rbd bench --io-type write foo --io-size 4096 --io-threads 5 --io-total 4096000 --io-pattern rand
#rbd cp foo foo.copy
rbd create foo.copy --size 1000
rbd export-diff foo - | rbd import-diff - foo.copy
rbd snap create foo --snap=two
rbd bench --io-type write foo --io-size 4096 --io-threads 5 --io-total 4096000 --io-pattern rand
rbd snap create foo --snap=three
rbd snap create foo.copy --snap=two
rbd export-diff foo@two --from-snap three foo.diff && exit 1 || true # wrong snap order
rm -f foo.diff
rbd export-diff foo@three --from-snap two foo.diff
rbd import-diff foo.diff foo.copy
rbd import-diff foo.diff foo.copy && exit 1 || true # this should fail with EEXIST on the end snap
rbd snap ls foo.copy | grep three
rbd create foo.copy2 --size 1000
rbd import-diff foo.diff foo.copy2 && exit 1 || true # this should fail bc the start snap dne
rbd export foo foo.out
orig=`md5sum foo.out | awk '{print $1}'`
rm foo.out
rbd export foo.copy foo.out
copy=`md5sum foo.out | awk '{print $1}'`
if [ "$orig" != "$copy" ]; then
echo does not match
exit 1
fi
cleanup
echo OK
| 1,373 | 24.444444 | 100 | sh |
null | ceph-main/qa/workunits/rbd/diff_continuous.sh | #!/usr/bin/env bash
set -ex
set -o pipefail
function untar_workload() {
local i
for ((i = 0; i < 10; i++)); do
pv -L 10M linux-5.4.tar.gz > "${MOUNT}/linux-5.4.tar.gz"
tar -C "${MOUNT}" -xzf "${MOUNT}/linux-5.4.tar.gz"
sync "${MOUNT}"
rm -rf "${MOUNT}"/linux-5.4*
done
}
function check_object_map() {
local spec="$1"
rbd object-map check "${spec}"
local flags
flags="$(rbd info "${spec}" | grep 'flags: ')"
if [[ "${flags}" =~ object\ map\ invalid ]]; then
echo "Object map invalid at ${spec}"
exit 1
fi
if [[ "${flags}" =~ fast\ diff\ invalid ]]; then
echo "Fast diff invalid at ${spec}"
exit 1
fi
}
# RBD_DEVICE_TYPE is intended to be set from yaml, default to krbd
readonly DEVICE_TYPE="${RBD_DEVICE_TYPE:-krbd}"
BASE_UUID="$(uuidgen)"
readonly BASE_UUID
readonly SIZE="2G"
readonly SRC="${BASE_UUID}-src"
readonly DST="${BASE_UUID}-dst"
readonly MOUNT="${BASE_UUID}-mnt"
rbd create -s "${SIZE}" --stripe-unit 64K --stripe-count 8 \
--image-feature exclusive-lock,object-map,fast-diff "${SRC}"
rbd create -s "${SIZE}" --object-size 512K "${DST}"
dev="$(sudo rbd device map -t "${DEVICE_TYPE}" "${SRC}")"
sudo mkfs.ext4 "${dev}"
mkdir "${MOUNT}"
sudo mount "${dev}" "${MOUNT}"
sudo chown "$(whoami)" "${MOUNT}"
# start untar in the background
wget https://download.ceph.com/qa/linux-5.4.tar.gz
untar_workload &
untar_pid=$!
# export initial incremental
snap_num=1
rbd snap create "${SRC}@snap${snap_num}"
rbd export-diff "${SRC}@snap${snap_num}" "${BASE_UUID}@snap${snap_num}.diff"
# keep exporting successive incrementals while untar is running
while kill -0 "${untar_pid}"; do
snap_num=$((snap_num + 1))
rbd snap create "${SRC}@snap${snap_num}"
sleep $((RANDOM % 4 + 1))
rbd export-diff --whole-object --from-snap "snap$((snap_num - 1))" \
"${SRC}@snap${snap_num}" "${BASE_UUID}@snap${snap_num}.diff"
done
sudo umount "${MOUNT}"
sudo rbd device unmap -t "${DEVICE_TYPE}" "${dev}"
if ! wait "${untar_pid}"; then
echo "untar_workload failed"
exit 1
fi
echo "Exported ${snap_num} incrementals"
if ((snap_num < 30)); then
echo "Too few incrementals"
exit 1
fi
# validate
for ((i = 1; i <= snap_num; i++)); do
rbd import-diff "${BASE_UUID}@snap${i}.diff" "${DST}"
src_sum="$(rbd export "${SRC}@snap${i}" - | md5sum | awk '{print $1}')"
dst_sum="$(rbd export "${DST}@snap${i}" - | md5sum | awk '{print $1}')"
if [[ "${src_sum}" != "${dst_sum}" ]]; then
echo "Mismatch at snap${i}: ${src_sum} != ${dst_sum}"
exit 1
fi
check_object_map "${SRC}@snap${i}"
# FIXME: this reproduces http://tracker.ceph.com/issues/37876
# there is no fstrim involved but "rbd import-diff" can produce
# write-zeroes requests which turn into discards under the hood
# actual: EXISTS, expected: EXISTS_CLEAN inconsistency is harmless
# from a data integrity POV and data is validated above regardless,
# so just waive it for now
#check_object_map "${DST}@snap${i}"
done
echo OK
| 3,089 | 27.878505 | 76 | sh |
null | ceph-main/qa/workunits/rbd/huge-tickets.sh | #!/usr/bin/env bash
# This is a test for http://tracker.ceph.com/issues/8979 and the fallout
# from triaging it. #8979 itself was random crashes on corrupted memory
# due to a buffer overflow (for tickets larger than 256 bytes), further
# inspection showed that vmalloced tickets weren't handled correctly as
# well.
#
# What we are doing here is generating three huge keyrings and feeding
# them to libceph (through 'rbd map' on a scratch image). Bad kernels
# will crash reliably either on corrupted memory somewhere or a bad page
# fault in scatterwalk_pagedone().
set -ex
function generate_keyring() {
local user=$1
local n=$2
ceph-authtool -C -n client.$user --cap mon 'allow *' --gen-key /tmp/keyring-$user
set +x # don't pollute trace with echos
echo -en "\tcaps osd = \"allow rwx pool=rbd" >>/tmp/keyring-$user
for i in $(seq 1 $n); do
echo -n ", allow rwx pool=pool$i" >>/tmp/keyring-$user
done
echo "\"" >>/tmp/keyring-$user
set -x
}
generate_keyring foo 1000 # ~25K, kmalloc
generate_keyring bar 20000 # ~500K, vmalloc
generate_keyring baz 300000 # ~8M, vmalloc + sg chaining
rbd create --size 1 test
for user in {foo,bar,baz}; do
ceph auth import -i /tmp/keyring-$user
DEV=$(sudo rbd map -n client.$user --keyring /tmp/keyring-$user test)
sudo rbd unmap $DEV
done
| 1,342 | 30.97619 | 85 | sh |
null | ceph-main/qa/workunits/rbd/image_read.sh | #!/usr/bin/env bash
# Copyright (C) 2013 Inktank Storage, Inc.
#
# This is free software; see the source for copying conditions.
# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as
# published by the Free Software Foundation version 2.
# Alex Elder <[email protected]>
# April 10, 2013
################################################################
# The purpose of this test is to validate that data read from a
# mapped rbd image is what it's expected to be.
#
# By default it creates an image and fills it with some data. It
# then reads back the data at a series of offsets known to cover
# various situations (such as reading the beginning, end, or the
# entirety of an object, or doing a read that spans multiple
# objects), and stashes the results in a set of local files.
#
# It also creates and maps a snapshot of the original image after
# it's been filled, and reads back the same ranges of data from the
# snapshot. It then compares the data read back with what was read
# back from the original image, verifying they match.
#
# Clone functionality is tested as well, in which case a clone is
# made of the snapshot, and the same ranges of data are again read
# and compared with the original. In addition, a snapshot of that
# clone is created, and a clone of *that* snapshot is put through
# the same set of tests. (Clone testing can be optionally skipped.)
################################################################
# Default parameter values. Environment variables, if set, will
# supercede these defaults. Such variables have names that begin
# with "IMAGE_READ_", for e.g. use IMAGE_READ_PAGE_SIZE=65536
# to use 65536 as the page size.
set -e
DEFAULT_VERBOSE=true
DEFAULT_TEST_CLONES=true
DEFAULT_LOCAL_FILES=false
DEFAULT_FORMAT=2
DEFAULT_DOUBLE_ORDER=true
DEFAULT_HALF_ORDER=false
DEFAULT_PAGE_SIZE=4096
DEFAULT_OBJECT_ORDER=22
MIN_OBJECT_ORDER=12 # technically 9, but the rbd CLI enforces 12
MAX_OBJECT_ORDER=32
RBD_FORCE_ALLOW_V1=1
PROGNAME=$(basename $0)
ORIGINAL=original-$$
SNAP1=snap1-$$
CLONE1=clone1-$$
SNAP2=snap2-$$
CLONE2=clone2-$$
function err() {
if [ $# -gt 0 ]; then
echo "${PROGNAME}: $@" >&2
fi
exit 2
}
function usage() {
if [ $# -gt 0 ]; then
echo "" >&2
echo "${PROGNAME}: $@" >&2
fi
echo "" >&2
echo "Usage: ${PROGNAME} [<options>]" >&2
echo "" >&2
echo "options are:" >&2
echo " -o object_order" >&2
echo " must be ${MIN_OBJECT_ORDER}..${MAX_OBJECT_ORDER}" >&2
echo " -p page_size (in bytes)" >&2
echo " note: there must be at least 4 pages per object" >&2
echo " -1" >&2
echo " test using format 1 rbd images (default)" >&2
echo " -2" >&2
echo " test using format 2 rbd images" >&2
echo " -c" >&2
echo " also test rbd clone images (implies format 2)" >&2
echo " -d" >&2
echo " clone object order double its parent's (format 2)" >&2
echo " -h" >&2
echo " clone object order half of its parent's (format 2)" >&2
echo " -l" >&2
echo " use local files rather than rbd images" >&2
echo " -v" >&2
echo " disable reporting of what's going on" >&2
echo "" >&2
exit 1
}
function verbose() {
[ "${VERBOSE}" = true ] && echo "$@"
true # Don't let the verbose test spoil our return value
}
function quiet() {
"$@" 2> /dev/null
}
function boolean_toggle() {
[ $# -eq 1 ] || exit 99
test "$1" = "true" && echo false || echo true
}
function parseargs() {
local opts="o:p:12clv"
local lopts="order:,page_size:,local,clone,verbose"
local parsed
local clone_order_msg
# use values from environment if available
VERBOSE="${IMAGE_READ_VERBOSE:-${DEFAULT_VERBOSE}}"
TEST_CLONES="${IMAGE_READ_TEST_CLONES:-${DEFAULT_TEST_CLONES}}"
LOCAL_FILES="${IMAGE_READ_LOCAL_FILES:-${DEFAULT_LOCAL_FILES}}"
DOUBLE_ORDER="${IMAGE_READ_DOUBLE_ORDER:-${DEFAULT_DOUBLE_ORDER}}"
HALF_ORDER="${IMAGE_READ_HALF_ORDER:-${DEFAULT_HALF_ORDER}}"
FORMAT="${IMAGE_READ_FORMAT:-${DEFAULT_FORMAT}}"
PAGE_SIZE="${IMAGE_READ_PAGE_SIZE:-${DEFAULT_PAGE_SIZE}}"
OBJECT_ORDER="${IMAGE_READ_OBJECT_ORDER:-${DEFAULT_OBJECT_ORDER}}"
parsed=$(getopt -o "${opts}" -l "${lopts}" -n "${PROGNAME}" -- "$@") ||
usage
eval set -- "${parsed}"
while true; do
case "$1" in
-v|--verbose)
VERBOSE=$(boolean_toggle "${VERBOSE}");;
-c|--clone)
TEST_CLONES=$(boolean_toggle "${TEST_CLONES}");;
-d|--double)
DOUBLE_ORDER=$(boolean_toggle "${DOUBLE_ORDER}");;
-h|--half)
HALF_ORDER=$(boolean_toggle "${HALF_ORDER}");;
-l|--local)
LOCAL_FILES=$(boolean_toggle "${LOCAL_FILES}");;
-1|-2)
FORMAT="${1:1}";;
-p|--page_size)
PAGE_SIZE="$2"; shift;;
-o|--order)
OBJECT_ORDER="$2"; shift;;
--)
shift; break;;
*)
err "getopt internal error"
esac
shift
done
[ $# -gt 0 ] && usage "excess arguments ($*)"
if [ "${TEST_CLONES}" = true ]; then
# If we're using different object orders for clones,
# make sure the limits are updated accordingly. If
# both "half" and "double" are specified, just
# ignore them both.
if [ "${DOUBLE_ORDER}" = true ]; then
if [ "${HALF_ORDER}" = true ]; then
DOUBLE_ORDER=false
HALF_ORDER=false
else
((MAX_OBJECT_ORDER -= 2))
fi
elif [ "${HALF_ORDER}" = true ]; then
((MIN_OBJECT_ORDER += 2))
fi
fi
[ "${OBJECT_ORDER}" -lt "${MIN_OBJECT_ORDER}" ] &&
usage "object order (${OBJECT_ORDER}) must be" \
"at least ${MIN_OBJECT_ORDER}"
[ "${OBJECT_ORDER}" -gt "${MAX_OBJECT_ORDER}" ] &&
usage "object order (${OBJECT_ORDER}) must be" \
"at most ${MAX_OBJECT_ORDER}"
if [ "${TEST_CLONES}" = true ]; then
if [ "${DOUBLE_ORDER}" = true ]; then
((CLONE1_ORDER = OBJECT_ORDER + 1))
((CLONE2_ORDER = OBJECT_ORDER + 2))
clone_order_msg="double"
elif [ "${HALF_ORDER}" = true ]; then
((CLONE1_ORDER = OBJECT_ORDER - 1))
((CLONE2_ORDER = OBJECT_ORDER - 2))
clone_order_msg="half of"
else
CLONE1_ORDER="${OBJECT_ORDER}"
CLONE2_ORDER="${OBJECT_ORDER}"
clone_order_msg="the same as"
fi
fi
[ "${TEST_CLONES}" != true ] || FORMAT=2
OBJECT_SIZE=$(echo "2 ^ ${OBJECT_ORDER}" | bc)
OBJECT_PAGES=$(echo "${OBJECT_SIZE} / ${PAGE_SIZE}" | bc)
IMAGE_SIZE=$((2 * 16 * OBJECT_SIZE / (1024 * 1024)))
[ "${IMAGE_SIZE}" -lt 1 ] && IMAGE_SIZE=1
IMAGE_OBJECTS=$((IMAGE_SIZE * (1024 * 1024) / OBJECT_SIZE))
[ "${OBJECT_PAGES}" -lt 4 ] &&
usage "object size (${OBJECT_SIZE}) must be" \
"at least 4 * page size (${PAGE_SIZE})"
echo "parameters for this run:"
echo " format ${FORMAT} images will be tested"
echo " object order is ${OBJECT_ORDER}, so" \
"objects are ${OBJECT_SIZE} bytes"
echo " page size is ${PAGE_SIZE} bytes, so" \
"there are ${OBJECT_PAGES} pages in an object"
echo " derived image size is ${IMAGE_SIZE} MB, so" \
"there are ${IMAGE_OBJECTS} objects in an image"
if [ "${TEST_CLONES}" = true ]; then
echo " clone functionality will be tested"
echo " object size for a clone will be ${clone_order_msg}"
echo " the object size of its parent image"
fi
true # Don't let the clones test spoil our return value
}
function image_dev_path() {
[ $# -eq 1 ] || exit 99
local image_name="$1"
if [ "${LOCAL_FILES}" = true ]; then
echo "${TEMP}/${image_name}"
return
fi
echo "/dev/rbd/rbd/${image_name}"
}
function out_data_dir() {
[ $# -lt 2 ] || exit 99
local out_data="${TEMP}/data"
local image_name
if [ $# -eq 1 ]; then
image_name="$1"
echo "${out_data}/${image_name}"
else
echo "${out_data}"
fi
}
function setup() {
verbose "===== setting up ====="
TEMP=$(mktemp -d /tmp/rbd_image_read.XXXXX)
mkdir -p $(out_data_dir)
# create and fill the original image with some data
create_image "${ORIGINAL}"
map_image "${ORIGINAL}"
fill_original
# create a snapshot of the original
create_image_snap "${ORIGINAL}" "${SNAP1}"
map_image_snap "${ORIGINAL}" "${SNAP1}"
if [ "${TEST_CLONES}" = true ]; then
# create a clone of the original snapshot
create_snap_clone "${ORIGINAL}" "${SNAP1}" \
"${CLONE1}" "${CLONE1_ORDER}"
map_image "${CLONE1}"
# create a snapshot of that clone
create_image_snap "${CLONE1}" "${SNAP2}"
map_image_snap "${CLONE1}" "${SNAP2}"
# create a clone of that clone's snapshot
create_snap_clone "${CLONE1}" "${SNAP2}" \
"${CLONE2}" "${CLONE2_ORDER}"
map_image "${CLONE2}"
fi
}
function teardown() {
verbose "===== cleaning up ====="
if [ "${TEST_CLONES}" = true ]; then
unmap_image "${CLONE2}" || true
destroy_snap_clone "${CLONE1}" "${SNAP2}" "${CLONE2}" || true
unmap_image_snap "${CLONE1}" "${SNAP2}" || true
destroy_image_snap "${CLONE1}" "${SNAP2}" || true
unmap_image "${CLONE1}" || true
destroy_snap_clone "${ORIGINAL}" "${SNAP1}" "${CLONE1}" || true
fi
unmap_image_snap "${ORIGINAL}" "${SNAP1}" || true
destroy_image_snap "${ORIGINAL}" "${SNAP1}" || true
unmap_image "${ORIGINAL}" || true
destroy_image "${ORIGINAL}" || true
rm -rf $(out_data_dir)
rmdir "${TEMP}"
}
function create_image() {
[ $# -eq 1 ] || exit 99
local image_name="$1"
local image_path
local bytes
verbose "creating image \"${image_name}\""
if [ "${LOCAL_FILES}" = true ]; then
image_path=$(image_dev_path "${image_name}")
bytes=$(echo "${IMAGE_SIZE} * 1024 * 1024 - 1" | bc)
quiet dd if=/dev/zero bs=1 count=1 seek="${bytes}" \
of="${image_path}"
return
fi
rbd create "${image_name}" --image-format "${FORMAT}" \
--size "${IMAGE_SIZE}" --order "${OBJECT_ORDER}" \
--image-shared
}
function destroy_image() {
[ $# -eq 1 ] || exit 99
local image_name="$1"
local image_path
verbose "destroying image \"${image_name}\""
if [ "${LOCAL_FILES}" = true ]; then
image_path=$(image_dev_path "${image_name}")
rm -f "${image_path}"
return
fi
rbd rm "${image_name}"
}
function map_image() {
[ $# -eq 1 ] || exit 99
local image_name="$1" # can be image@snap too
if [ "${LOCAL_FILES}" = true ]; then
return
fi
sudo rbd map "${image_name}"
}
function unmap_image() {
[ $# -eq 1 ] || exit 99
local image_name="$1" # can be image@snap too
local image_path
if [ "${LOCAL_FILES}" = true ]; then
return
fi
image_path=$(image_dev_path "${image_name}")
if [ -e "${image_path}" ]; then
sudo rbd unmap "${image_path}"
fi
}
function map_image_snap() {
[ $# -eq 2 ] || exit 99
local image_name="$1"
local snap_name="$2"
local image_snap
if [ "${LOCAL_FILES}" = true ]; then
return
fi
image_snap="${image_name}@${snap_name}"
map_image "${image_snap}"
}
function unmap_image_snap() {
[ $# -eq 2 ] || exit 99
local image_name="$1"
local snap_name="$2"
local image_snap
if [ "${LOCAL_FILES}" = true ]; then
return
fi
image_snap="${image_name}@${snap_name}"
unmap_image "${image_snap}"
}
function create_image_snap() {
[ $# -eq 2 ] || exit 99
local image_name="$1"
local snap_name="$2"
local image_snap="${image_name}@${snap_name}"
local image_path
local snap_path
verbose "creating snapshot \"${snap_name}\"" \
"of image \"${image_name}\""
if [ "${LOCAL_FILES}" = true ]; then
image_path=$(image_dev_path "${image_name}")
snap_path=$(image_dev_path "${image_snap}")
cp "${image_path}" "${snap_path}"
return
fi
rbd snap create "${image_snap}"
}
function destroy_image_snap() {
[ $# -eq 2 ] || exit 99
local image_name="$1"
local snap_name="$2"
local image_snap="${image_name}@${snap_name}"
local snap_path
verbose "destroying snapshot \"${snap_name}\"" \
"of image \"${image_name}\""
if [ "${LOCAL_FILES}" = true ]; then
snap_path=$(image_dev_path "${image_snap}")
rm -rf "${snap_path}"
return
fi
rbd snap rm "${image_snap}"
}
function create_snap_clone() {
[ $# -eq 4 ] || exit 99
local image_name="$1"
local snap_name="$2"
local clone_name="$3"
local clone_order="$4"
local image_snap="${image_name}@${snap_name}"
local snap_path
local clone_path
verbose "creating clone image \"${clone_name}\"" \
"of image snapshot \"${image_name}@${snap_name}\""
if [ "${LOCAL_FILES}" = true ]; then
snap_path=$(image_dev_path "${image_name}@${snap_name}")
clone_path=$(image_dev_path "${clone_name}")
cp "${snap_path}" "${clone_path}"
return
fi
rbd snap protect "${image_snap}"
rbd clone --order "${clone_order}" --image-shared \
"${image_snap}" "${clone_name}"
}
function destroy_snap_clone() {
[ $# -eq 3 ] || exit 99
local image_name="$1"
local snap_name="$2"
local clone_name="$3"
local image_snap="${image_name}@${snap_name}"
local clone_path
verbose "destroying clone image \"${clone_name}\""
if [ "${LOCAL_FILES}" = true ]; then
clone_path=$(image_dev_path "${clone_name}")
rm -rf "${clone_path}"
return
fi
rbd rm "${clone_name}"
rbd snap unprotect "${image_snap}"
}
# function that produces "random" data with which to fill the image
function source_data() {
while quiet dd if=/bin/bash skip=$(($$ % 199)) bs="${PAGE_SIZE}"; do
: # Just do the dd
done
}
function fill_original() {
local image_path=$(image_dev_path "${ORIGINAL}")
verbose "filling original image"
# Fill 16 objects worth of "random" data
source_data |
quiet dd bs="${PAGE_SIZE}" count=$((16 * OBJECT_PAGES)) \
of="${image_path}"
}
function do_read() {
[ $# -eq 3 -o $# -eq 4 ] || exit 99
local image_name="$1"
local offset="$2"
local length="$3"
[ "${length}" -gt 0 ] || err "do_read: length must be non-zero"
local image_path=$(image_dev_path "${image_name}")
local out_data=$(out_data_dir "${image_name}")
local range=$(printf "%06u~%04u" "${offset}" "${length}")
local out_file
[ $# -eq 4 ] && offset=$((offset + 16 * OBJECT_PAGES))
verbose "reading \"${image_name}\" pages ${range}"
out_file="${out_data}/pages_${range}"
quiet dd bs="${PAGE_SIZE}" skip="${offset}" count="${length}" \
if="${image_path}" of="${out_file}"
}
function one_pass() {
[ $# -eq 1 -o $# -eq 2 ] || exit 99
local image_name="$1"
local extended
[ $# -eq 2 ] && extended="true"
local offset
local length
offset=0
# +-----------+-----------+---
# |X:X:X...X:X| : : ... : | :
# +-----------+-----------+---
length="${OBJECT_PAGES}"
do_read "${image_name}" "${offset}" "${length}" ${extended}
offset=$((offset + length))
# ---+-----------+---
# : |X: : ... : | :
# ---+-----------+---
length=1
do_read "${image_name}" "${offset}" "${length}" ${extended}
offset=$((offset + length))
# ---+-----------+---
# : | :X: ... : | :
# ---+-----------+---
length=1
do_read "${image_name}" "${offset}" "${length}" ${extended}
offset=$((offset + length))
# ---+-----------+---
# : | : :X...X: | :
# ---+-----------+---
length=$((OBJECT_PAGES - 3))
do_read "${image_name}" "${offset}" "${length}" ${extended}
offset=$((offset + length))
# ---+-----------+---
# : | : : ... :X| :
# ---+-----------+---
length=1
do_read "${image_name}" "${offset}" "${length}" ${extended}
offset=$((offset + length))
# ---+-----------+---
# : |X:X:X...X:X| :
# ---+-----------+---
length="${OBJECT_PAGES}"
do_read "${image_name}" "${offset}" "${length}" ${extended}
offset=$((offset + length))
offset=$((offset + 1)) # skip 1
# ---+-----------+---
# : | :X:X...X:X| :
# ---+-----------+---
length=$((OBJECT_PAGES - 1))
do_read "${image_name}" "${offset}" "${length}" ${extended}
offset=$((offset + length))
# ---+-----------+-----------+---
# : |X:X:X...X:X|X: : ... : | :
# ---+-----------+-----------+---
length=$((OBJECT_PAGES + 1))
do_read "${image_name}" "${offset}" "${length}" ${extended}
offset=$((offset + length))
# ---+-----------+-----------+---
# : | :X:X...X:X|X: : ... : | :
# ---+-----------+-----------+---
length="${OBJECT_PAGES}"
do_read "${image_name}" "${offset}" "${length}" ${extended}
offset=$((offset + length))
# ---+-----------+-----------+---
# : | :X:X...X:X|X:X: ... : | :
# ---+-----------+-----------+---
length=$((OBJECT_PAGES + 1))
do_read "${image_name}" "${offset}" "${length}" ${extended}
offset=$((offset + length))
# ---+-----------+-----------+---
# : | : :X...X:X|X:X:X...X:X| :
# ---+-----------+-----------+---
length=$((2 * OBJECT_PAGES + 2))
do_read "${image_name}" "${offset}" "${length}" ${extended}
offset=$((offset + length))
offset=$((offset + 1)) # skip 1
# ---+-----------+-----------+-----
# : | :X:X...X:X|X:X:X...X:X|X: :
# ---+-----------+-----------+-----
length=$((2 * OBJECT_PAGES))
do_read "${image_name}" "${offset}" "${length}" ${extended}
offset=$((offset + length))
# --+-----------+-----------+--------
# : | :X:X...X:X|X:X:X...X:X|X:X: :
# --+-----------+-----------+--------
length=2049
length=$((2 * OBJECT_PAGES + 1))
do_read "${image_name}" "${offset}" "${length}" ${extended}
# offset=$((offset + length))
}
function run_using() {
[ $# -eq 1 ] || exit 99
local image_name="$1"
local out_data=$(out_data_dir "${image_name}")
verbose "===== running using \"${image_name}\" ====="
mkdir -p "${out_data}"
one_pass "${image_name}"
one_pass "${image_name}" extended
}
function compare() {
[ $# -eq 1 ] || exit 99
local image_name="$1"
local out_data=$(out_data_dir "${image_name}")
local original=$(out_data_dir "${ORIGINAL}")
verbose "===== comparing \"${image_name}\" ====="
for i in $(ls "${original}"); do
verbose compare "\"${image_name}\" \"${i}\""
cmp "${original}/${i}" "${out_data}/${i}"
done
[ "${image_name}" = "${ORIGINAL}" ] || rm -rf "${out_data}"
}
function doit() {
[ $# -eq 1 ] || exit 99
local image_name="$1"
run_using "${image_name}"
compare "${image_name}"
}
########## Start
parseargs "$@"
trap teardown EXIT HUP INT
setup
run_using "${ORIGINAL}"
doit "${ORIGINAL}@${SNAP1}"
if [ "${TEST_CLONES}" = true ]; then
doit "${CLONE1}"
doit "${CLONE1}@${SNAP2}"
doit "${CLONE2}"
fi
rm -rf $(out_data_dir "${ORIGINAL}")
echo "Success!"
exit 0
| 17,897 | 25.281938 | 72 | sh |
null | ceph-main/qa/workunits/rbd/import_export.sh | #!/bin/sh -ex
# V1 image unsupported but required for testing purposes
export RBD_FORCE_ALLOW_V1=1
# returns data pool for a given image
get_image_data_pool () {
image=$1
data_pool=$(rbd info $image | grep "data_pool: " | awk -F':' '{ print $NF }')
if [ -z $data_pool ]; then
data_pool='rbd'
fi
echo $data_pool
}
# return list of object numbers populated in image
objects () {
image=$1
prefix=$(rbd info $image | grep block_name_prefix | awk '{print $NF;}')
# strip off prefix and leading zeros from objects; sort, although
# it doesn't necessarily make sense as they're hex, at least it makes
# the list repeatable and comparable
objects=$(rados ls -p $(get_image_data_pool $image) | grep $prefix | \
sed -e 's/'$prefix'\.//' -e 's/^0*\([0-9a-f]\)/\1/' | sort -u)
echo $objects
}
# return false if either files don't compare or their ondisk
# sizes don't compare
compare_files_and_ondisk_sizes () {
cmp -l $1 $2 || return 1
origsize=$(stat $1 --format %b)
exportsize=$(stat $2 --format %b)
difference=$(($exportsize - $origsize))
difference=${difference#-} # absolute value
test $difference -ge 0 -a $difference -lt 4096
}
TMPDIR=/tmp/rbd_import_export_$$
rm -rf $TMPDIR
mkdir $TMPDIR
trap "rm -rf $TMPDIR" INT TERM EXIT
# cannot import a dir
mkdir foo.$$
rbd import foo.$$ foo.dir && exit 1 || true # should fail
rmdir foo.$$
# create a sparse file
dd if=/bin/sh of=${TMPDIR}/img bs=1k count=1 seek=10
dd if=/bin/dd of=${TMPDIR}/img bs=1k count=10 seek=100
dd if=/bin/rm of=${TMPDIR}/img bs=1k count=100 seek=1000
dd if=/bin/ls of=${TMPDIR}/img bs=1k seek=10000
dd if=/bin/ln of=${TMPDIR}/img bs=1k seek=100000
dd if=/bin/grep of=${TMPDIR}/img bs=1k seek=1000000
rbd rm testimg || true
rbd import $RBD_CREATE_ARGS ${TMPDIR}/img testimg
rbd export testimg ${TMPDIR}/img2
rbd export testimg - > ${TMPDIR}/img3
rbd rm testimg
cmp ${TMPDIR}/img ${TMPDIR}/img2
cmp ${TMPDIR}/img ${TMPDIR}/img3
rm ${TMPDIR}/img2 ${TMPDIR}/img3
# try again, importing from stdin
rbd import $RBD_CREATE_ARGS - testimg < ${TMPDIR}/img
rbd export testimg ${TMPDIR}/img2
rbd export testimg - > ${TMPDIR}/img3
rbd rm testimg
cmp ${TMPDIR}/img ${TMPDIR}/img2
cmp ${TMPDIR}/img ${TMPDIR}/img3
rm ${TMPDIR}/img ${TMPDIR}/img2 ${TMPDIR}/img3
if rbd help export | grep -q export-format; then
# try with --export-format for snapshots
dd if=/bin/dd of=${TMPDIR}/img bs=1k count=10 seek=100
rbd import $RBD_CREATE_ARGS ${TMPDIR}/img testimg
rbd snap create testimg@snap
rbd image-meta set testimg key1 value1
IMAGEMETA_BEFORE=`rbd image-meta list testimg`
rbd export --export-format 2 testimg ${TMPDIR}/img_v2
rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
rbd info testimg_import
rbd info testimg_import@snap
IMAGEMETA_AFTER=`rbd image-meta list testimg_import`
[ "$IMAGEMETA_BEFORE" = "$IMAGEMETA_AFTER" ]
# compare the contents between testimg and testimg_import
rbd export testimg_import ${TMPDIR}/img_import
compare_files_and_ondisk_sizes ${TMPDIR}/img ${TMPDIR}/img_import
rbd export testimg@snap ${TMPDIR}/img_snap
rbd export testimg_import@snap ${TMPDIR}/img_snap_import
compare_files_and_ondisk_sizes ${TMPDIR}/img_snap ${TMPDIR}/img_snap_import
rm ${TMPDIR}/img_v2
rm ${TMPDIR}/img_import
rm ${TMPDIR}/img_snap
rm ${TMPDIR}/img_snap_import
rbd snap rm testimg_import@snap
rbd remove testimg_import
rbd snap rm testimg@snap
rbd rm testimg
# order
rbd import --order 20 ${TMPDIR}/img testimg
rbd export --export-format 2 testimg ${TMPDIR}/img_v2
rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
rbd info testimg_import|grep order|awk '{print $2}'|grep 20
rm ${TMPDIR}/img_v2
rbd remove testimg_import
rbd remove testimg
# features
rbd import --image-feature layering ${TMPDIR}/img testimg
FEATURES_BEFORE=`rbd info testimg|grep features`
rbd export --export-format 2 testimg ${TMPDIR}/img_v2
rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
FEATURES_AFTER=`rbd info testimg_import|grep features`
if [ "$FEATURES_BEFORE" != "$FEATURES_AFTER" ]; then
false
fi
rm ${TMPDIR}/img_v2
rbd remove testimg_import
rbd remove testimg
# stripe
rbd import --stripe-count 1000 --stripe-unit 4096 ${TMPDIR}/img testimg
rbd export --export-format 2 testimg ${TMPDIR}/img_v2
rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
rbd info testimg_import|grep "stripe unit"|grep -Ei '(4 KiB|4096)'
rbd info testimg_import|grep "stripe count"|awk '{print $3}'|grep 1000
rm ${TMPDIR}/img_v2
rbd remove testimg_import
rbd remove testimg
# snap protect
rbd import --image-format=2 ${TMPDIR}/img testimg
rbd snap create testimg@snap1
rbd snap create testimg@snap2
rbd snap protect testimg@snap2
rbd export --export-format 2 testimg ${TMPDIR}/snap_protect
rbd import --export-format 2 ${TMPDIR}/snap_protect testimg_import
rbd info testimg_import@snap1 | grep 'protected: False'
rbd info testimg_import@snap2 | grep 'protected: True'
rm ${TMPDIR}/snap_protect
rbd snap unprotect testimg@snap2
rbd snap unprotect testimg_import@snap2
rbd snap purge testimg
rbd snap purge testimg_import
rbd remove testimg
rbd remove testimg_import
fi
tiered=0
if ceph osd dump | grep ^pool | grep "'rbd'" | grep tier; then
tiered=1
fi
# create specifically sparse files
# 1 1M block of sparse, 1 1M block of random
dd if=/dev/urandom bs=1M seek=1 count=1 of=${TMPDIR}/sparse1
# 1 1M block of random, 1 1M block of sparse
dd if=/dev/urandom bs=1M count=1 of=${TMPDIR}/sparse2; truncate ${TMPDIR}/sparse2 -s 2M
# 1M-block images; validate resulting blocks
# 1M sparse, 1M data
rbd rm sparse1 || true
rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse1
rbd ls -l | grep sparse1 | grep -Ei '(2 MiB|2048k)'
[ $tiered -eq 1 -o "$(objects sparse1)" = '1' ]
# export, compare contents and on-disk size
rbd export sparse1 ${TMPDIR}/sparse1.out
compare_files_and_ondisk_sizes ${TMPDIR}/sparse1 ${TMPDIR}/sparse1.out
rm ${TMPDIR}/sparse1.out
rbd rm sparse1
# 1M data, 1M sparse
rbd rm sparse2 || true
rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse2
rbd ls -l | grep sparse2 | grep -Ei '(2 MiB|2048k)'
[ $tiered -eq 1 -o "$(objects sparse2)" = '0' ]
rbd export sparse2 ${TMPDIR}/sparse2.out
compare_files_and_ondisk_sizes ${TMPDIR}/sparse2 ${TMPDIR}/sparse2.out
rm ${TMPDIR}/sparse2.out
rbd rm sparse2
# extend sparse1 to 10 1M blocks, sparse at the end
truncate ${TMPDIR}/sparse1 -s 10M
# import from stdin just for fun, verify still sparse
rbd import $RBD_CREATE_ARGS --order 20 - sparse1 < ${TMPDIR}/sparse1
rbd ls -l | grep sparse1 | grep -Ei '(10 MiB|10240k)'
[ $tiered -eq 1 -o "$(objects sparse1)" = '1' ]
rbd export sparse1 ${TMPDIR}/sparse1.out
compare_files_and_ondisk_sizes ${TMPDIR}/sparse1 ${TMPDIR}/sparse1.out
rm ${TMPDIR}/sparse1.out
rbd rm sparse1
# extend sparse2 to 4M total with two more nonsparse megs
dd if=/dev/urandom bs=2M count=1 of=${TMPDIR}/sparse2 oflag=append conv=notrunc
# again from stding
rbd import $RBD_CREATE_ARGS --order 20 - sparse2 < ${TMPDIR}/sparse2
rbd ls -l | grep sparse2 | grep -Ei '(4 MiB|4096k)'
[ $tiered -eq 1 -o "$(objects sparse2)" = '0 2 3' ]
rbd export sparse2 ${TMPDIR}/sparse2.out
compare_files_and_ondisk_sizes ${TMPDIR}/sparse2 ${TMPDIR}/sparse2.out
rm ${TMPDIR}/sparse2.out
rbd rm sparse2
# zeros import to a sparse image. Note: all zeros currently
# doesn't work right now due to the way we handle 'empty' fiemaps;
# the image ends up zero-filled.
echo "partially-sparse file imports to partially-sparse image"
rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse1 sparse
[ $tiered -eq 1 -o "$(objects sparse)" = '1' ]
rbd rm sparse
echo "zeros import through stdin to sparse image"
# stdin
dd if=/dev/zero bs=1M count=4 | rbd import $RBD_CREATE_ARGS - sparse
[ $tiered -eq 1 -o "$(objects sparse)" = '' ]
rbd rm sparse
echo "zeros export to sparse file"
# Must be tricky to make image "by hand" ; import won't create a zero image
rbd create $RBD_CREATE_ARGS sparse --size 4
prefix=$(rbd info sparse | grep block_name_prefix | awk '{print $NF;}')
# drop in 0 object directly
dd if=/dev/zero bs=4M count=1 | rados -p $(get_image_data_pool sparse) \
put ${prefix}.000000000000 -
[ $tiered -eq 1 -o "$(objects sparse)" = '0' ]
# 1 object full of zeros; export should still create 0-disk-usage file
rm ${TMPDIR}/sparse || true
rbd export sparse ${TMPDIR}/sparse
[ $(stat ${TMPDIR}/sparse --format=%b) = '0' ]
rbd rm sparse
rm ${TMPDIR}/sparse ${TMPDIR}/sparse1 ${TMPDIR}/sparse2 ${TMPDIR}/sparse3 || true
echo OK
| 8,814 | 32.903846 | 87 | sh |
null | ceph-main/qa/workunits/rbd/issue-20295.sh | #!/bin/sh -ex
TEST_POOL=ecpool
TEST_IMAGE=test1
PGS=12
ceph osd pool create $TEST_POOL $PGS $PGS erasure
ceph osd pool application enable $TEST_POOL rbd
ceph osd pool set $TEST_POOL allow_ec_overwrites true
rbd --data-pool $TEST_POOL create --size 1024G $TEST_IMAGE
rbd bench \
--io-type write \
--io-size 4096 \
--io-pattern=rand \
--io-total 100M \
$TEST_IMAGE
echo "OK"
| 396 | 19.894737 | 58 | sh |
null | ceph-main/qa/workunits/rbd/journal.sh | #!/usr/bin/env bash
set -e
. $(dirname $0)/../../standalone/ceph-helpers.sh
function list_tests()
{
echo "AVAILABLE TESTS"
for i in $TESTS; do
echo " $i"
done
}
function usage()
{
echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...] [--no-cleanup]]"
}
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
function save_commit_position()
{
local journal=$1
rados -p rbd getomapval journal.${journal} client_ \
$TMPDIR/${journal}.client_.omap
}
function restore_commit_position()
{
local journal=$1
rados -p rbd setomapval journal.${journal} client_ \
< $TMPDIR/${journal}.client_.omap
}
test_rbd_journal()
{
local image=testrbdjournal$$
rbd create --image-feature exclusive-lock --image-feature journaling \
--size 128 ${image}
local journal=$(rbd info ${image} --format=xml 2>/dev/null |
$XMLSTARLET sel -t -v "//image/journal")
test -n "${journal}"
rbd journal info ${journal}
rbd journal info --journal ${journal}
rbd journal info --image ${image}
rbd feature disable ${image} journaling
rbd info ${image} --format=xml 2>/dev/null |
expect_false $XMLSTARLET sel -t -v "//image/journal"
expect_false rbd journal info ${journal}
expect_false rbd journal info --image ${image}
rbd feature enable ${image} journaling
local journal1=$(rbd info ${image} --format=xml 2>/dev/null |
$XMLSTARLET sel -t -v "//image/journal")
test "${journal}" = "${journal1}"
rbd journal info ${journal}
rbd journal status ${journal}
local count=10
save_commit_position ${journal}
rbd bench --io-type write ${image} --io-size 4096 --io-threads 1 \
--io-total $((4096 * count)) --io-pattern seq
rbd journal status --image ${image} | fgrep "tid=$((count - 1))"
restore_commit_position ${journal}
rbd journal status --image ${image} | fgrep "positions=[]"
local count1=$(rbd journal inspect --verbose ${journal} |
grep -c 'event_type.*AioWrite')
test "${count}" -eq "${count1}"
rbd journal export ${journal} $TMPDIR/journal.export
local size=$(stat -c "%s" $TMPDIR/journal.export)
test "${size}" -gt 0
rbd export ${image} $TMPDIR/${image}.export
local image1=${image}1
rbd create --image-feature exclusive-lock --image-feature journaling \
--size 128 ${image1}
journal1=$(rbd info ${image1} --format=xml 2>/dev/null |
$XMLSTARLET sel -t -v "//image/journal")
save_commit_position ${journal1}
rbd journal import --dest ${image1} $TMPDIR/journal.export
rbd snap create ${image1}@test
restore_commit_position ${journal1}
# check that commit position is properly updated: the journal should contain
# 14 entries (2 AioFlush + 10 AioWrite + 1 SnapCreate + 1 OpFinish) and
# commit position set to tid=14
rbd journal inspect --image ${image1} --verbose | awk '
/AioFlush/ {a++} # match: "event_type": "AioFlush",
/AioWrite/ {w++} # match: "event_type": "AioWrite",
/SnapCreate/ {s++} # match: "event_type": "SnapCreate",
/OpFinish/ {f++} # match: "event_type": "OpFinish",
/entries inspected/ {t=$1; e=$4} # match: 14 entries inspected, 0 errors
{print} # for diagnostic
END {
if (a != 2 || w != 10 || s != 1 || f != 1 || t != 14 || e != 0) exit(1)
}
'
rbd export ${image1}@test $TMPDIR/${image1}.export
cmp $TMPDIR/${image}.export $TMPDIR/${image1}.export
rbd journal reset ${journal}
rbd journal inspect --verbose ${journal} | expect_false grep 'event_type'
rbd snap purge ${image1}
rbd remove ${image1}
rbd remove ${image}
}
rbd_assert_eq() {
local image=$1
local cmd=$2
local param=$3
local expected_val=$4
local val=$(rbd --format xml ${cmd} --image ${image} |
$XMLSTARLET sel -t -v "${param}")
test "${val}" = "${expected_val}"
}
test_rbd_create()
{
local image=testrbdcreate$$
rbd create --image-feature exclusive-lock --image-feature journaling \
--journal-pool rbd \
--journal-object-size 20M \
--journal-splay-width 6 \
--size 256 ${image}
rbd_assert_eq ${image} 'journal info' '//journal/order' 25
rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
rbd remove ${image}
}
test_rbd_copy()
{
local src=testrbdcopys$$
rbd create --size 256 ${src}
local image=testrbdcopy$$
rbd copy --image-feature exclusive-lock --image-feature journaling \
--journal-pool rbd \
--journal-object-size 20M \
--journal-splay-width 6 \
${src} ${image}
rbd remove ${src}
rbd_assert_eq ${image} 'journal info' '//journal/order' 25
rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
rbd remove ${image}
}
test_rbd_deep_copy()
{
local src=testrbdcopys$$
rbd create --size 256 ${src}
rbd snap create ${src}@snap1
local dest=testrbdcopy$$
rbd deep copy --image-feature exclusive-lock --image-feature journaling \
--journal-pool rbd \
--journal-object-size 20M \
--journal-splay-width 6 \
${src} ${dest}
rbd snap purge ${src}
rbd remove ${src}
rbd_assert_eq ${dest} 'journal info' '//journal/order' 25
rbd_assert_eq ${dest} 'journal info' '//journal/splay_width' 6
rbd_assert_eq ${dest} 'journal info' '//journal/object_pool' rbd
rbd snap purge ${dest}
rbd remove ${dest}
}
test_rbd_clone()
{
local parent=testrbdclonep$$
rbd create --image-feature layering --size 256 ${parent}
rbd snap create ${parent}@snap
rbd snap protect ${parent}@snap
local image=testrbdclone$$
rbd clone --image-feature layering --image-feature exclusive-lock --image-feature journaling \
--journal-pool rbd \
--journal-object-size 20M \
--journal-splay-width 6 \
${parent}@snap ${image}
rbd_assert_eq ${image} 'journal info' '//journal/order' 25
rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
rbd remove ${image}
rbd snap unprotect ${parent}@snap
rbd snap purge ${parent}
rbd remove ${parent}
}
test_rbd_import()
{
local src=testrbdimports$$
rbd create --size 256 ${src}
rbd export ${src} $TMPDIR/${src}.export
rbd remove ${src}
local image=testrbdimport$$
rbd import --image-feature exclusive-lock --image-feature journaling \
--journal-pool rbd \
--journal-object-size 20M \
--journal-splay-width 6 \
$TMPDIR/${src}.export ${image}
rbd_assert_eq ${image} 'journal info' '//journal/order' 25
rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
rbd remove ${image}
}
test_rbd_feature()
{
local image=testrbdfeature$$
rbd create --image-feature exclusive-lock --size 256 ${image}
rbd feature enable ${image} journaling \
--journal-pool rbd \
--journal-object-size 20M \
--journal-splay-width 6
rbd_assert_eq ${image} 'journal info' '//journal/order' 25
rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
rbd remove ${image}
}
TESTS+=" rbd_journal"
TESTS+=" rbd_create"
TESTS+=" rbd_copy"
TESTS+=" rbd_clone"
TESTS+=" rbd_import"
TESTS+=" rbd_feature"
#
# "main" follows
#
tests_to_run=()
cleanup=true
while [[ $# -gt 0 ]]; do
opt=$1
case "$opt" in
"-l" )
do_list=1
;;
"--no-cleanup" )
cleanup=false
;;
"-t" )
shift
if [[ -z "$1" ]]; then
echo "missing argument to '-t'"
usage ;
exit 1
fi
tests_to_run+=" $1"
;;
"-h" )
usage ;
exit 0
;;
esac
shift
done
if [[ $do_list -eq 1 ]]; then
list_tests ;
exit 0
fi
TMPDIR=/tmp/rbd_journal$$
mkdir $TMPDIR
if $cleanup; then
trap "rm -fr $TMPDIR" 0
fi
if test -z "$tests_to_run" ; then
tests_to_run="$TESTS"
fi
for i in $tests_to_run; do
set -x
test_${i}
set +x
done
echo OK
| 8,280 | 24.324159 | 98 | sh |
null | ceph-main/qa/workunits/rbd/kernel.sh | #!/usr/bin/env bash
set -ex
CEPH_SECRET_FILE=${CEPH_SECRET_FILE:-}
CEPH_ID=${CEPH_ID:-admin}
SECRET_ARGS=''
if [ ! -z $CEPH_SECRET_FILE ]; then
SECRET_ARGS="--secret $CEPH_SECRET_FILE"
fi
TMP_FILES="/tmp/img1 /tmp/img1.small /tmp/img1.snap1 /tmp/img1.export /tmp/img1.trunc"
function expect_false() {
if "$@"; then return 1; else return 0; fi
}
function get_device_dir {
local POOL=$1
local IMAGE=$2
local SNAP=$3
rbd device list | tail -n +2 | egrep "\s+$POOL\s+$IMAGE\s+$SNAP\s+" |
awk '{print $1;}'
}
function clean_up {
[ -e /dev/rbd/rbd/testimg1@snap1 ] &&
sudo rbd device unmap /dev/rbd/rbd/testimg1@snap1
if [ -e /dev/rbd/rbd/testimg1 ]; then
sudo rbd device unmap /dev/rbd/rbd/testimg1
rbd snap purge testimg1 || true
fi
rbd ls | grep testimg1 > /dev/null && rbd rm testimg1 || true
sudo rm -f $TMP_FILES
}
clean_up
trap clean_up INT TERM EXIT
# create an image
dd if=/bin/sh of=/tmp/img1 bs=1k count=1 seek=10
dd if=/bin/dd of=/tmp/img1 bs=1k count=10 seek=100
dd if=/bin/rm of=/tmp/img1 bs=1k count=100 seek=1000
dd if=/bin/ls of=/tmp/img1 bs=1k seek=10000
dd if=/bin/ln of=/tmp/img1 bs=1k seek=100000
dd if=/dev/zero of=/tmp/img1 count=0 seek=150000
# import
rbd import /tmp/img1 testimg1
sudo rbd device map testimg1 --user $CEPH_ID $SECRET_ARGS
DEV_ID1=$(get_device_dir rbd testimg1 -)
echo "dev_id1 = $DEV_ID1"
cat /sys/bus/rbd/devices/$DEV_ID1/size
cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 76800000
sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.export
cmp /tmp/img1 /tmp/img1.export
# snapshot
rbd snap create testimg1 --snap=snap1
sudo rbd device map --snap=snap1 testimg1 --user $CEPH_ID $SECRET_ARGS
DEV_ID2=$(get_device_dir rbd testimg1 snap1)
cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000
sudo dd if=/dev/rbd/rbd/testimg1@snap1 of=/tmp/img1.snap1
cmp /tmp/img1 /tmp/img1.snap1
# resize
rbd resize testimg1 --size=40 --allow-shrink
cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 41943040
cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000
sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.small
cp /tmp/img1 /tmp/img1.trunc
truncate -s 41943040 /tmp/img1.trunc
cmp /tmp/img1.trunc /tmp/img1.small
# rollback expects an unlocked image
# (acquire and) release the lock as a side effect
rbd bench --io-type read --io-size 1 --io-threads 1 --io-total 1 testimg1
# rollback and check data again
rbd snap rollback --snap=snap1 testimg1
cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 76800000
cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000
sudo rm -f /tmp/img1.snap1 /tmp/img1.export
sudo dd if=/dev/rbd/rbd/testimg1@snap1 of=/tmp/img1.snap1
cmp /tmp/img1 /tmp/img1.snap1
sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.export
cmp /tmp/img1 /tmp/img1.export
# zeros are returned if an image or a snapshot is removed
expect_false cmp -n 76800000 /dev/rbd/rbd/testimg1@snap1 /dev/zero
rbd snap rm --snap=snap1 testimg1
cmp -n 76800000 /dev/rbd/rbd/testimg1@snap1 /dev/zero
echo OK
| 2,952 | 28.237624 | 86 | sh |
null | ceph-main/qa/workunits/rbd/krbd_data_pool.sh | #!/usr/bin/env bash
set -ex
export RBD_FORCE_ALLOW_V1=1
function fill_image() {
local spec=$1
local dev
dev=$(sudo rbd map $spec)
xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 -W 0 $IMAGE_SIZE" $dev
sudo rbd unmap $dev
}
function create_clones() {
local spec=$1
rbd snap create $spec@snap
rbd snap protect $spec@snap
local pool=${spec%/*} # pool/image is assumed
local image=${spec#*/}
local child_pool
for child_pool in $pool clonesonly; do
rbd clone $spec@snap $child_pool/$pool-$image-clone1
rbd clone $spec@snap --data-pool repdata $child_pool/$pool-$image-clone2
rbd clone $spec@snap --data-pool ecdata $child_pool/$pool-$image-clone3
done
}
function trigger_copyup() {
local spec=$1
local dev
dev=$(sudo rbd map $spec)
local i
{
for ((i = 0; i < $NUM_OBJECTS; i++)); do
echo pwrite -b $OBJECT_SIZE -S 0x59 $((i * OBJECT_SIZE + OBJECT_SIZE / 2)) $((OBJECT_SIZE / 2))
done
echo fsync
echo quit
} | xfs_io $dev
sudo rbd unmap $dev
}
function compare() {
local spec=$1
local object=$2
local dev
dev=$(sudo rbd map $spec)
local i
for ((i = 0; i < $NUM_OBJECTS; i++)); do
dd if=$dev bs=$OBJECT_SIZE count=1 skip=$i | cmp $object -
done
sudo rbd unmap $dev
}
function mkfs_and_mount() {
local spec=$1
local dev
dev=$(sudo rbd map $spec)
blkdiscard $dev
mkfs.ext4 -q -E nodiscard $dev
sudo mount $dev /mnt
sudo umount /mnt
sudo rbd unmap $dev
}
function list_HEADs() {
local pool=$1
rados -p $pool ls | while read obj; do
if rados -p $pool stat $obj >/dev/null 2>&1; then
echo $obj
fi
done
}
function count_data_objects() {
local spec=$1
local pool
pool=$(rbd info $spec | grep 'data_pool: ' | awk '{ print $NF }')
if [[ -z $pool ]]; then
pool=${spec%/*} # pool/image is assumed
fi
local prefix
prefix=$(rbd info $spec | grep 'block_name_prefix: ' | awk '{ print $NF }')
rados -p $pool ls | grep -c $prefix
}
function get_num_clones() {
local pool=$1
rados -p $pool --format=json df |
python3 -c 'import sys, json; print(json.load(sys.stdin)["pools"][0]["num_object_clones"])'
}
ceph osd pool create repdata 24 24
rbd pool init repdata
ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
ceph osd pool create ecdata 24 24 erasure teuthologyprofile
rbd pool init ecdata
ceph osd pool set ecdata allow_ec_overwrites true
ceph osd pool create rbdnonzero 24 24
rbd pool init rbdnonzero
ceph osd pool create clonesonly 24 24
rbd pool init clonesonly
for pool in rbd rbdnonzero; do
rbd create --size 200 --image-format 1 $pool/img0
rbd create --size 200 $pool/img1
rbd create --size 200 --data-pool repdata $pool/img2
rbd create --size 200 --data-pool ecdata $pool/img3
done
IMAGE_SIZE=$(rbd info --format=json img1 | python3 -c 'import sys, json; print(json.load(sys.stdin)["size"])')
OBJECT_SIZE=$(rbd info --format=json img1 | python3 -c 'import sys, json; print(json.load(sys.stdin)["object_size"])')
NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_SIZE))
[[ $((IMAGE_SIZE % OBJECT_SIZE)) -eq 0 ]]
OBJECT_X=$(mktemp) # xxxx
xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 0 $OBJECT_SIZE" $OBJECT_X
OBJECT_XY=$(mktemp) # xxYY
xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 0 $((OBJECT_SIZE / 2))" \
-c "pwrite -b $OBJECT_SIZE -S 0x59 $((OBJECT_SIZE / 2)) $((OBJECT_SIZE / 2))" \
$OBJECT_XY
for pool in rbd rbdnonzero; do
for i in {0..3}; do
fill_image $pool/img$i
if [[ $i -ne 0 ]]; then
create_clones $pool/img$i
for child_pool in $pool clonesonly; do
for j in {1..3}; do
trigger_copyup $child_pool/$pool-img$i-clone$j
done
done
fi
done
done
# rbd_directory, rbd_children, rbd_info + img0 header + ...
NUM_META_RBDS=$((3 + 1 + 3 * (1*2 + 3*2)))
# rbd_directory, rbd_children, rbd_info + ...
NUM_META_CLONESONLY=$((3 + 2 * 3 * (3*2)))
[[ $(rados -p rbd ls | wc -l) -eq $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
[[ $(rados -p repdata ls | wc -l) -eq $((1 + 14 * NUM_OBJECTS)) ]]
[[ $(rados -p ecdata ls | wc -l) -eq $((1 + 14 * NUM_OBJECTS)) ]]
[[ $(rados -p rbdnonzero ls | wc -l) -eq $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
[[ $(rados -p clonesonly ls | wc -l) -eq $((NUM_META_CLONESONLY + 6 * NUM_OBJECTS)) ]]
for pool in rbd rbdnonzero; do
for i in {0..3}; do
[[ $(count_data_objects $pool/img$i) -eq $NUM_OBJECTS ]]
if [[ $i -ne 0 ]]; then
for child_pool in $pool clonesonly; do
for j in {1..3}; do
[[ $(count_data_objects $child_pool/$pool-img$i-clone$j) -eq $NUM_OBJECTS ]]
done
done
fi
done
done
[[ $(get_num_clones rbd) -eq 0 ]]
[[ $(get_num_clones repdata) -eq 0 ]]
[[ $(get_num_clones ecdata) -eq 0 ]]
[[ $(get_num_clones rbdnonzero) -eq 0 ]]
[[ $(get_num_clones clonesonly) -eq 0 ]]
for pool in rbd rbdnonzero; do
for i in {0..3}; do
compare $pool/img$i $OBJECT_X
mkfs_and_mount $pool/img$i
if [[ $i -ne 0 ]]; then
for child_pool in $pool clonesonly; do
for j in {1..3}; do
compare $child_pool/$pool-img$i-clone$j $OBJECT_XY
done
done
fi
done
done
# mkfs_and_mount should discard some objects everywhere but in clonesonly
[[ $(list_HEADs rbd | wc -l) -lt $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
[[ $(list_HEADs repdata | wc -l) -lt $((1 + 14 * NUM_OBJECTS)) ]]
[[ $(list_HEADs ecdata | wc -l) -lt $((1 + 14 * NUM_OBJECTS)) ]]
[[ $(list_HEADs rbdnonzero | wc -l) -lt $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
[[ $(list_HEADs clonesonly | wc -l) -eq $((NUM_META_CLONESONLY + 6 * NUM_OBJECTS)) ]]
[[ $(get_num_clones rbd) -eq $NUM_OBJECTS ]]
[[ $(get_num_clones repdata) -eq $((2 * NUM_OBJECTS)) ]]
[[ $(get_num_clones ecdata) -eq $((2 * NUM_OBJECTS)) ]]
[[ $(get_num_clones rbdnonzero) -eq $NUM_OBJECTS ]]
[[ $(get_num_clones clonesonly) -eq 0 ]]
echo OK
| 6,189 | 28.903382 | 118 | sh |
null | ceph-main/qa/workunits/rbd/krbd_exclusive_option.sh | #!/usr/bin/env bash
set -ex
function expect_false() {
if "$@"; then return 1; else return 0; fi
}
function assert_locked() {
local dev_id="${1#/dev/rbd}"
local client_addr
client_addr="$(< $SYSFS_DIR/$dev_id/client_addr)"
local client_id
client_id="$(< $SYSFS_DIR/$dev_id/client_id)"
# client4324 -> client.4324
client_id="client.${client_id#client}"
local watch_cookie
watch_cookie="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID |
grep $client_id | cut -d ' ' -f 3 | cut -d '=' -f 2)"
[[ $(echo -n "$watch_cookie" | grep -c '^') -eq 1 ]]
local actual
actual="$(rados -p rbd --format=json lock info rbd_header.$IMAGE_ID rbd_lock |
python3 -m json.tool --sort-keys)"
local expected
expected="$(cat <<EOF | python3 -m json.tool --sort-keys
{
"lockers": [
{
"addr": "$client_addr",
"cookie": "auto $watch_cookie",
"description": "",
"expiration": "0.000000",
"name": "$client_id"
}
],
"name": "rbd_lock",
"tag": "internal",
"type": "exclusive"
}
EOF
)"
[ "$actual" = "$expected" ]
}
function assert_unlocked() {
rados -p rbd --format=json lock info rbd_header.$IMAGE_ID rbd_lock |
grep '"lockers":\[\]'
}
function blocklist_add() {
local dev_id="${1#/dev/rbd}"
local client_addr
client_addr="$(< $SYSFS_DIR/$dev_id/client_addr)"
ceph osd blocklist add $client_addr
}
SYSFS_DIR="/sys/bus/rbd/devices"
IMAGE_NAME="exclusive-option-test"
rbd create --size 1 --image-feature '' $IMAGE_NAME
IMAGE_ID="$(rbd info --format=json $IMAGE_NAME |
python3 -c "import sys, json; print(json.load(sys.stdin)['block_name_prefix'].split('.')[1])")"
DEV=$(sudo rbd map $IMAGE_NAME)
assert_unlocked
sudo rbd unmap $DEV
assert_unlocked
expect_false sudo rbd map -o exclusive $IMAGE_NAME
assert_unlocked
expect_false sudo rbd map -o lock_on_read $IMAGE_NAME
assert_unlocked
rbd feature enable $IMAGE_NAME exclusive-lock
rbd snap create $IMAGE_NAME@snap
DEV=$(sudo rbd map $IMAGE_NAME)
assert_locked $DEV
[[ $(blockdev --getro $DEV) -eq 0 ]]
sudo rbd unmap $DEV
assert_unlocked
DEV=$(sudo rbd map $IMAGE_NAME@snap)
assert_unlocked
[[ $(blockdev --getro $DEV) -eq 1 ]]
sudo rbd unmap $DEV
assert_unlocked
DEV=$(sudo rbd map -o ro $IMAGE_NAME)
assert_unlocked
[[ $(blockdev --getro $DEV) -eq 1 ]]
sudo rbd unmap $DEV
assert_unlocked
DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
assert_locked $DEV
[[ $(blockdev --getro $DEV) -eq 0 ]]
sudo rbd unmap $DEV
assert_unlocked
DEV=$(sudo rbd map -o exclusive $IMAGE_NAME@snap)
assert_unlocked
[[ $(blockdev --getro $DEV) -eq 1 ]]
sudo rbd unmap $DEV
assert_unlocked
DEV=$(sudo rbd map -o exclusive,ro $IMAGE_NAME)
assert_unlocked
[[ $(blockdev --getro $DEV) -eq 1 ]]
sudo rbd unmap $DEV
assert_unlocked
# alternate syntax
DEV=$(sudo rbd map --exclusive --read-only $IMAGE_NAME)
assert_unlocked
[[ $(blockdev --getro $DEV) -eq 1 ]]
sudo rbd unmap $DEV
assert_unlocked
DEV=$(sudo rbd map $IMAGE_NAME)
assert_locked $DEV
OTHER_DEV=$(sudo rbd map -o noshare $IMAGE_NAME)
assert_locked $OTHER_DEV
dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
assert_locked $DEV
dd if=/dev/urandom of=$OTHER_DEV bs=4k count=10 oflag=direct
assert_locked $OTHER_DEV
sudo rbd unmap $DEV
sudo rbd unmap $OTHER_DEV
assert_unlocked
DEV=$(sudo rbd map $IMAGE_NAME)
assert_locked $DEV
OTHER_DEV=$(sudo rbd map -o noshare,exclusive $IMAGE_NAME)
assert_locked $OTHER_DEV
dd if=$DEV of=/dev/null bs=4k count=10 iflag=direct
expect_false dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
assert_locked $OTHER_DEV
sudo rbd unmap $OTHER_DEV
assert_unlocked
dd if=$DEV of=/dev/null bs=4k count=10 iflag=direct
assert_unlocked
dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
assert_locked $DEV
sudo rbd unmap $DEV
assert_unlocked
DEV=$(sudo rbd map -o lock_on_read $IMAGE_NAME)
assert_locked $DEV
OTHER_DEV=$(sudo rbd map -o noshare,exclusive $IMAGE_NAME)
assert_locked $OTHER_DEV
expect_false dd if=$DEV of=/dev/null bs=4k count=10 iflag=direct
expect_false dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
sudo udevadm settle
assert_locked $OTHER_DEV
sudo rbd unmap $OTHER_DEV
assert_unlocked
dd if=$DEV of=/dev/null bs=4k count=10 iflag=direct
assert_locked $DEV
dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
assert_locked $DEV
sudo rbd unmap $DEV
assert_unlocked
DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
assert_locked $DEV
expect_false sudo rbd map -o noshare $IMAGE_NAME
assert_locked $DEV
sudo rbd unmap $DEV
assert_unlocked
DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
assert_locked $DEV
expect_false sudo rbd map -o noshare,exclusive $IMAGE_NAME
assert_locked $DEV
sudo rbd unmap $DEV
assert_unlocked
DEV=$(sudo rbd map $IMAGE_NAME)
assert_locked $DEV
rbd resize --size 1G $IMAGE_NAME
assert_unlocked
sudo rbd unmap $DEV
assert_unlocked
DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
assert_locked $DEV
expect_false rbd resize --size 2G $IMAGE_NAME
assert_locked $DEV
sudo rbd unmap $DEV
assert_unlocked
DEV=$(sudo rbd map $IMAGE_NAME)
assert_locked $DEV
dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
{ sleep 10; blocklist_add $DEV; } &
PID=$!
expect_false dd if=/dev/urandom of=$DEV bs=4k count=200000 oflag=direct
wait $PID
# break lock
OTHER_DEV=$(sudo rbd map -o noshare $IMAGE_NAME)
assert_locked $OTHER_DEV
sudo rbd unmap $DEV
assert_locked $OTHER_DEV
sudo rbd unmap $OTHER_DEV
assert_unlocked
# induce a watch error after 30 seconds
DEV=$(sudo rbd map -o exclusive,osdkeepalive=60 $IMAGE_NAME)
assert_locked $DEV
OLD_WATCHER="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID)"
sleep 40
assert_locked $DEV
NEW_WATCHER="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID)"
# same client_id, old cookie < new cookie
[ "$(echo "$OLD_WATCHER" | cut -d ' ' -f 2)" = \
"$(echo "$NEW_WATCHER" | cut -d ' ' -f 2)" ]
[[ $(echo "$OLD_WATCHER" | cut -d ' ' -f 3 | cut -d '=' -f 2) -lt \
$(echo "$NEW_WATCHER" | cut -d ' ' -f 3 | cut -d '=' -f 2) ]]
sudo rbd unmap $DEV
assert_unlocked
echo OK
| 6,079 | 24.982906 | 99 | sh |
null | ceph-main/qa/workunits/rbd/krbd_fallocate.sh | #!/usr/bin/env bash
# - fallocate -z deallocates because BLKDEV_ZERO_NOUNMAP hint is ignored by
# krbd
#
# - big unaligned blkdiscard and fallocate -z/-p leave the objects in place
set -ex
# no blkdiscard(8) in trusty
function py_blkdiscard() {
local offset=$1
python3 <<EOF
import fcntl, struct
BLKDISCARD = 0x1277
with open('$DEV', 'w') as dev:
fcntl.ioctl(dev, BLKDISCARD, struct.pack('QQ', $offset, $IMAGE_SIZE - $offset))
EOF
}
# fallocate(1) in trusty doesn't support -z/-p
function py_fallocate() {
local mode=$1
local offset=$2
python3 <<EOF
import os, ctypes, ctypes.util
FALLOC_FL_KEEP_SIZE = 0x01
FALLOC_FL_PUNCH_HOLE = 0x02
FALLOC_FL_ZERO_RANGE = 0x10
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
with open('$DEV', 'w') as dev:
if libc.fallocate(dev.fileno(), ctypes.c_int($mode), ctypes.c_long($offset), ctypes.c_long($IMAGE_SIZE - $offset)):
err = ctypes.get_errno()
raise OSError(err, os.strerror(err))
EOF
}
function allocate() {
xfs_io -c "pwrite -b $OBJECT_SIZE -W 0 $IMAGE_SIZE" $DEV
assert_allocated
}
function assert_allocated() {
cmp <(od -xAx $DEV) - <<EOF
000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd
*
$(printf %x $IMAGE_SIZE)
EOF
[[ $(rados -p rbd ls | grep -c rbd_data.$IMAGE_ID) -eq $NUM_OBJECTS ]]
}
function assert_zeroes() {
local num_objects_expected=$1
cmp <(od -xAx $DEV) - <<EOF
000000 0000 0000 0000 0000 0000 0000 0000 0000
*
$(printf %x $IMAGE_SIZE)
EOF
[[ $(rados -p rbd ls | grep -c rbd_data.$IMAGE_ID) -eq $num_objects_expected ]]
}
function assert_zeroes_unaligned() {
local num_objects_expected=$1
cmp <(od -xAx $DEV) - <<EOF
000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd
*
$(printf %x $((OBJECT_SIZE / 2))) 0000 0000 0000 0000 0000 0000 0000 0000
*
$(printf %x $IMAGE_SIZE)
EOF
[[ $(rados -p rbd ls | grep -c rbd_data.$IMAGE_ID) -eq $num_objects_expected ]]
for ((i = 0; i < $num_objects_expected; i++)); do
rados -p rbd stat rbd_data.$IMAGE_ID.$(printf %016x $i) | egrep "(size $((OBJECT_SIZE / 2)))|(size 0)"
done
}
IMAGE_NAME="fallocate-test"
rbd create --size 200 $IMAGE_NAME
IMAGE_SIZE=$(rbd info --format=json $IMAGE_NAME | python3 -c 'import sys, json; print(json.load(sys.stdin)["size"])')
OBJECT_SIZE=$(rbd info --format=json $IMAGE_NAME | python3 -c 'import sys, json; print(json.load(sys.stdin)["object_size"])')
NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_SIZE))
[[ $((IMAGE_SIZE % OBJECT_SIZE)) -eq 0 ]]
IMAGE_ID="$(rbd info --format=json $IMAGE_NAME |
python3 -c "import sys, json; print(json.load(sys.stdin)['block_name_prefix'].split('.')[1])")"
DEV=$(sudo rbd map $IMAGE_NAME)
# make sure -ENOENT is hidden
assert_zeroes 0
py_blkdiscard 0
assert_zeroes 0
# blkdev_issue_discard
allocate
py_blkdiscard 0
assert_zeroes 0
# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOUNMAP
allocate
py_fallocate FALLOC_FL_ZERO_RANGE\|FALLOC_FL_KEEP_SIZE 0
assert_zeroes 0
# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOFALLBACK
allocate
py_fallocate FALLOC_FL_PUNCH_HOLE\|FALLOC_FL_KEEP_SIZE 0
assert_zeroes 0
# unaligned blkdev_issue_discard
allocate
py_blkdiscard $((OBJECT_SIZE / 2))
assert_zeroes_unaligned $NUM_OBJECTS
# unaligned blkdev_issue_zeroout w/ BLKDEV_ZERO_NOUNMAP
allocate
py_fallocate FALLOC_FL_ZERO_RANGE\|FALLOC_FL_KEEP_SIZE $((OBJECT_SIZE / 2))
assert_zeroes_unaligned $NUM_OBJECTS
# unaligned blkdev_issue_zeroout w/ BLKDEV_ZERO_NOFALLBACK
allocate
py_fallocate FALLOC_FL_PUNCH_HOLE\|FALLOC_FL_KEEP_SIZE $((OBJECT_SIZE / 2))
assert_zeroes_unaligned $NUM_OBJECTS
sudo rbd unmap $DEV
DEV=$(sudo rbd map -o notrim $IMAGE_NAME)
# blkdev_issue_discard
allocate
py_blkdiscard 0 |& grep 'Operation not supported'
assert_allocated
# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOUNMAP
allocate
py_fallocate FALLOC_FL_ZERO_RANGE\|FALLOC_FL_KEEP_SIZE 0
assert_zeroes $NUM_OBJECTS
# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOFALLBACK
allocate
py_fallocate FALLOC_FL_PUNCH_HOLE\|FALLOC_FL_KEEP_SIZE 0 |& grep 'Operation not supported'
assert_allocated
sudo rbd unmap $DEV
echo OK
| 4,056 | 25.690789 | 125 | sh |
null | ceph-main/qa/workunits/rbd/krbd_huge_osdmap.sh | #!/usr/bin/env bash
# This is a test for https://tracker.ceph.com/issues/40481.
#
# An osdmap with 60000 slots encodes to ~16M, of which the ignored portion
# is ~13M. However in-memory osdmap is larger than ~3M: in-memory osd_addr
# array for 60000 OSDs is ~8M because of sockaddr_storage.
#
# Set mon_max_osd = 60000 in ceph.conf.
set -ex
function expect_false() {
if "$@"; then return 1; else return 0; fi
}
function run_test() {
local dev
# initially tiny, grow via incrementals
dev=$(sudo rbd map img)
for max in 8 60 600 6000 60000; do
ceph osd setmaxosd $max
expect_false sudo rbd map wait_for/latest_osdmap
xfs_io -c 'pwrite -w 0 12M' $DEV
done
ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
expect_false sudo rbd map wait_for/latest_osdmap
xfs_io -c 'pwrite -w 0 12M' $DEV
sudo rbd unmap $dev
# initially huge, shrink via incrementals
dev=$(sudo rbd map img)
for max in 60000 6000 600 60 8; do
ceph osd setmaxosd $max
expect_false sudo rbd map wait_for/latest_osdmap
xfs_io -c 'pwrite -w 0 12M' $DEV
done
ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
expect_false sudo rbd map wait_for/latest_osdmap
xfs_io -c 'pwrite -w 0 12M' $DEV
sudo rbd unmap $dev
}
rbd create --size 12M img
run_test
# repeat with primary affinity (adds an extra array)
ceph osd primary-affinity osd.0 0.5
run_test
echo OK
| 1,487 | 27.615385 | 76 | sh |
null | ceph-main/qa/workunits/rbd/krbd_latest_osdmap_on_map.sh | #!/bin/bash
set -ex
function run_test() {
ceph osd pool create foo 12
rbd pool init foo
rbd create --size 1 foo/img
local dev
dev=$(sudo rbd map foo/img)
sudo rbd unmap $dev
ceph osd pool delete foo foo --yes-i-really-really-mean-it
}
NUM_ITER=20
for ((i = 0; i < $NUM_ITER; i++)); do
run_test
done
rbd create --size 1 img
DEV=$(sudo rbd map img)
for ((i = 0; i < $NUM_ITER; i++)); do
run_test
done
sudo rbd unmap $DEV
echo OK
| 471 | 14.225806 | 62 | sh |
null | ceph-main/qa/workunits/rbd/krbd_namespaces.sh | #!/usr/bin/env bash
set -ex
function get_block_name_prefix() {
rbd info --format=json $1 | python3 -c "import sys, json; print(json.load(sys.stdin)['block_name_prefix'])"
}
function do_pwrite() {
local spec=$1
local old_byte=$2
local new_byte=$3
local dev
dev=$(sudo rbd map $spec)
cmp <(dd if=/dev/zero bs=1M count=10 | tr \\000 \\$old_byte) $dev
xfs_io -c "pwrite -b 1M -S $new_byte 0 10M" $dev
sudo rbd unmap $dev
}
function do_cmp() {
local spec=$1
local byte=$2
local dev
dev=$(sudo rbd map $spec)
cmp <(dd if=/dev/zero bs=1M count=10 | tr \\000 \\$byte) $dev
sudo rbd unmap $dev
}
function gen_child_specs() {
local i=$1
local child_specs="foo/img$i-clone1 foo/img$i-clone2 foo/ns1/img$i-clone1 foo/ns1/img$i-clone2"
if [[ $i -ge 3 ]]; then
child_specs="$child_specs foo/ns2/img$i-clone1 foo/ns2/img$i-clone2"
fi
echo $child_specs
}
ceph osd pool create foo 12
rbd pool init foo
ceph osd pool create bar 12
rbd pool init bar
ceph osd set-require-min-compat-client nautilus
rbd namespace create foo/ns1
rbd namespace create foo/ns2
SPECS=(foo/img1 foo/img2 foo/ns1/img3 foo/ns1/img4)
COUNT=1
for spec in "${SPECS[@]}"; do
if [[ $spec =~ img1|img3 ]]; then
rbd create --size 10 $spec
else
rbd create --size 10 --data-pool bar $spec
fi
do_pwrite $spec 000 $(printf %03d $COUNT)
rbd snap create $spec@snap
COUNT=$((COUNT + 1))
done
for i in {1..4}; do
for child_spec in $(gen_child_specs $i); do
if [[ $child_spec =~ clone1 ]]; then
rbd clone ${SPECS[i - 1]}@snap $child_spec
else
rbd clone --data-pool bar ${SPECS[i - 1]}@snap $child_spec
fi
do_pwrite $child_spec $(printf %03d $i) $(printf %03d $COUNT)
COUNT=$((COUNT + 1))
done
done
[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img1)) -eq 3 ]]
[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img2)) -eq 3 ]]
[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img3)) -eq 3 ]]
[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img4)) -eq 3 ]]
[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img1-clone1)) -eq 3 ]]
[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img1-clone2)) -eq 3 ]]
[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img1-clone1)) -eq 3 ]]
[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img1-clone2)) -eq 3 ]]
[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img2-clone1)) -eq 3 ]]
[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img2-clone2)) -eq 3 ]]
[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img2-clone1)) -eq 3 ]]
[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img2-clone2)) -eq 3 ]]
[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img3-clone1)) -eq 3 ]]
[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img3-clone2)) -eq 3 ]]
[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img3-clone1)) -eq 3 ]]
[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img3-clone2)) -eq 3 ]]
[[ $(rados -p foo -N ns2 ls | grep -c $(get_block_name_prefix foo/ns2/img3-clone1)) -eq 3 ]]
[[ $(rados -p bar -N ns2 ls | grep -c $(get_block_name_prefix foo/ns2/img3-clone2)) -eq 3 ]]
[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img4-clone1)) -eq 3 ]]
[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img4-clone2)) -eq 3 ]]
[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img4-clone1)) -eq 3 ]]
[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img4-clone2)) -eq 3 ]]
[[ $(rados -p foo -N ns2 ls | grep -c $(get_block_name_prefix foo/ns2/img4-clone1)) -eq 3 ]]
[[ $(rados -p bar -N ns2 ls | grep -c $(get_block_name_prefix foo/ns2/img4-clone2)) -eq 3 ]]
COUNT=1
for spec in "${SPECS[@]}"; do
do_cmp $spec $(printf %03d $COUNT)
COUNT=$((COUNT + 1))
done
for i in {1..4}; do
for child_spec in $(gen_child_specs $i); do
do_cmp $child_spec $(printf %03d $COUNT)
COUNT=$((COUNT + 1))
done
done
echo OK
| 4,226 | 35.128205 | 111 | sh |
null | ceph-main/qa/workunits/rbd/krbd_rxbounce.sh | #!/usr/bin/env bash
set -ex
rbd create --size 256 img
IMAGE_SIZE=$(rbd info --format=json img | python3 -c 'import sys, json; print(json.load(sys.stdin)["size"])')
OBJECT_SIZE=$(rbd info --format=json img | python3 -c 'import sys, json; print(json.load(sys.stdin)["object_size"])')
NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_SIZE))
[[ $((IMAGE_SIZE % OBJECT_SIZE)) -eq 0 ]]
OP_SIZE=16384
DEV=$(sudo rbd map img)
{
for ((i = 0; i < $NUM_OBJECTS; i++)); do
echo pwrite -b $OP_SIZE -S $i $((i * OBJECT_SIZE)) $OP_SIZE
done
echo fsync
echo quit
} | xfs_io $DEV
sudo rbd unmap $DEV
g++ -xc++ -o racereads - -lpthread <<EOF
#include <assert.h>
#include <fcntl.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <thread>
#include <vector>
const int object_size = $OBJECT_SIZE;
const int num_objects = $NUM_OBJECTS;
const int read_len = $OP_SIZE;
const int num_reads = 1024;
int main() {
int fd = open("$DEV", O_DIRECT | O_RDONLY);
assert(fd >= 0);
void *buf;
int r = posix_memalign(&buf, 512, read_len);
assert(r == 0);
std::vector<std::thread> threads;
for (int i = 0; i < num_objects; i++) {
threads.emplace_back(
[fd, buf, read_off = static_cast<off_t>(i) * object_size]() {
for (int i = 0; i < num_reads; i++) {
auto len = pread(fd, buf, read_len, read_off);
assert(len == read_len);
}
});
}
for (auto &t : threads) {
t.join();
}
}
EOF
DEV=$(sudo rbd map -o ms_mode=legacy img)
sudo dmesg -C
./racereads
[[ $(dmesg | grep -c 'libceph: osd.* bad crc/signature') -gt 100 ]]
sudo rbd unmap $DEV
DEV=$(sudo rbd map -o ms_mode=legacy,rxbounce img)
sudo dmesg -C
./racereads
[[ $(dmesg | grep -c 'libceph: osd.* bad crc/signature') -eq 0 ]]
sudo rbd unmap $DEV
DEV=$(sudo rbd map -o ms_mode=crc img)
sudo dmesg -C
./racereads
[[ $(dmesg | grep -c 'libceph: osd.* integrity error') -gt 100 ]]
sudo rbd unmap $DEV
DEV=$(sudo rbd map -o ms_mode=crc,rxbounce img)
sudo dmesg -C
./racereads
[[ $(dmesg | grep -c 'libceph: osd.* integrity error') -eq 0 ]]
sudo rbd unmap $DEV
# rxbounce is a no-op for secure mode
DEV=$(sudo rbd map -o ms_mode=secure img)
sudo dmesg -C
./racereads
[[ $(dmesg | grep -c 'libceph: osd.* integrity error') -eq 0 ]]
sudo rbd unmap $DEV
DEV=$(sudo rbd map -o ms_mode=secure,rxbounce img)
sudo dmesg -C
./racereads
[[ $(dmesg | grep -c 'libceph: osd.* integrity error') -eq 0 ]]
sudo rbd unmap $DEV
rbd rm img
echo OK
| 2,500 | 23.048077 | 117 | sh |
null | ceph-main/qa/workunits/rbd/krbd_stable_writes.sh | #!/usr/bin/env bash
set -ex
function assert_dm() {
local name=$1
local val=$2
local devno
devno=$(sudo dmsetup info -c --noheadings -o Major,Minor $name)
grep -q $val /sys/dev/block/$devno/queue/stable_writes
}
function dmsetup_reload() {
local name=$1
local table
table=$(</dev/stdin)
sudo dmsetup suspend $name
echo "$table" | sudo dmsetup reload $name
sudo dmsetup resume $name
}
IMAGE_NAME="stable-writes-test"
rbd create --size 1 $IMAGE_NAME
DEV=$(sudo rbd map $IMAGE_NAME)
fallocate -l 1M loopfile
LOOP_DEV=$(sudo losetup -f --show loopfile)
[[ $(blockdev --getsize64 $DEV) -eq 1048576 ]]
grep -q 1 /sys/block/${DEV#/dev/}/queue/stable_writes
rbd resize --size 2 $IMAGE_NAME
[[ $(blockdev --getsize64 $DEV) -eq 2097152 ]]
grep -q 1 /sys/block/${DEV#/dev/}/queue/stable_writes
cat <<EOF | sudo dmsetup create tbl
0 1024 linear $LOOP_DEV 0
EOF
assert_dm tbl 0
sudo dmsetup remove tbl
cat <<EOF | sudo dmsetup create tbl
0 1024 linear $DEV 0
EOF
assert_dm tbl 1
sudo dmsetup remove tbl
cat <<EOF | sudo dmsetup create tbl
0 1024 linear $LOOP_DEV 0
1024 2048 error
EOF
assert_dm tbl 0
sudo dmsetup remove tbl
cat <<EOF | sudo dmsetup create tbl
0 1024 linear $DEV 0
1024 2048 error
EOF
assert_dm tbl 1
sudo dmsetup remove tbl
cat <<EOF | sudo dmsetup create tbl
0 1024 linear $LOOP_DEV 0
1024 2048 linear $DEV 0
EOF
assert_dm tbl 1
sudo dmsetup remove tbl
cat <<EOF | sudo dmsetup create tbl
0 1024 linear $DEV 0
1024 2048 linear $LOOP_DEV 0
EOF
assert_dm tbl 1
sudo dmsetup remove tbl
cat <<EOF | sudo dmsetup create tbl
0 1024 linear $LOOP_DEV 0
EOF
assert_dm tbl 0
cat <<EOF | dmsetup_reload tbl
0 1024 linear $LOOP_DEV 0
1024 2048 linear $DEV 0
EOF
assert_dm tbl 1
cat <<EOF | dmsetup_reload tbl
0 1024 linear $LOOP_DEV 0
EOF
assert_dm tbl 0
sudo dmsetup remove tbl
cat <<EOF | sudo dmsetup create tbl
0 1024 linear $DEV 0
EOF
assert_dm tbl 1
cat <<EOF | dmsetup_reload tbl
0 1024 linear $DEV 0
1024 2048 linear $LOOP_DEV 0
EOF
assert_dm tbl 1
cat <<EOF | dmsetup_reload tbl
0 1024 linear $DEV 0
EOF
assert_dm tbl 1
sudo dmsetup remove tbl
cat <<EOF | sudo dmsetup create tbl
0 1024 linear $DEV 0
EOF
assert_dm tbl 1
cat <<EOF | dmsetup_reload tbl
0 1024 linear $DEV 0
1024 2048 linear $LOOP_DEV 0
EOF
assert_dm tbl 1
cat <<EOF | dmsetup_reload tbl
0 1024 error
1024 2048 linear $LOOP_DEV 0
EOF
assert_dm tbl 0
cat <<EOF | dmsetup_reload tbl
0 1024 linear $DEV 0
1024 2048 linear $LOOP_DEV 0
EOF
assert_dm tbl 1
cat <<EOF | dmsetup_reload tbl
0 1024 linear $DEV 0
EOF
assert_dm tbl 1
sudo dmsetup remove tbl
sudo losetup -d $LOOP_DEV
rm loopfile
sudo rbd unmap $DEV
rbd rm $IMAGE_NAME
echo OK
| 2,666 | 17.78169 | 67 | sh |
null | ceph-main/qa/workunits/rbd/krbd_udev_enumerate.sh | #!/usr/bin/env bash
# This is a test for https://tracker.ceph.com/issues/41036, but it also
# triggers https://tracker.ceph.com/issues/41404 in some environments.
set -ex
function assert_exit_codes() {
declare -a pids=($@)
for pid in ${pids[@]}; do
wait $pid
done
}
function run_map() {
declare -a pids
for i in {1..300}; do
sudo rbd map img$i &
pids+=($!)
done
assert_exit_codes ${pids[@]}
[[ $(rbd showmapped | wc -l) -eq 301 ]]
}
function run_unmap_by_dev() {
declare -a pids
run_map
for i in {0..299}; do
sudo rbd unmap /dev/rbd$i &
pids+=($!)
done
assert_exit_codes ${pids[@]}
[[ $(rbd showmapped | wc -l) -eq 0 ]]
}
function run_unmap_by_spec() {
declare -a pids
run_map
for i in {1..300}; do
sudo rbd unmap img$i &
pids+=($!)
done
assert_exit_codes ${pids[@]}
[[ $(rbd showmapped | wc -l) -eq 0 ]]
}
# Can't test with exclusive-lock, don't bother enabling deep-flatten.
# See https://tracker.ceph.com/issues/42492.
for i in {1..300}; do
rbd create --size 1 --image-feature '' img$i
done
for i in {1..30}; do
echo Iteration $i
run_unmap_by_dev
run_unmap_by_spec
done
echo OK
| 1,248 | 17.641791 | 71 | sh |
null | ceph-main/qa/workunits/rbd/krbd_udev_netlink_enobufs.sh | #!/usr/bin/env bash
# This is a test for https://tracker.ceph.com/issues/41404, verifying that udev
# events are properly reaped while the image is being (un)mapped in the kernel.
# UDEV_BUF_SIZE is 1M (giving us a 2M socket receive buffer), but modprobe +
# modprobe -r generate ~28M worth of "block" events.
set -ex
rbd create --size 1 img
ceph osd pause
sudo rbd map img &
PID=$!
sudo modprobe scsi_debug max_luns=16 add_host=16 num_parts=1 num_tgts=16
sudo udevadm settle
sudo modprobe -r scsi_debug
[[ $(rbd showmapped | wc -l) -eq 0 ]]
ceph osd unpause
wait $PID
[[ $(rbd showmapped | wc -l) -eq 2 ]]
sudo rbd unmap img
echo OK
| 639 | 24.6 | 79 | sh |
null | ceph-main/qa/workunits/rbd/krbd_udev_netns.sh | #!/usr/bin/env bash
set -ex
sudo ip netns add ns1
sudo ip link add veth1-ext type veth peer name veth1-int
sudo ip link set veth1-int netns ns1
sudo ip netns exec ns1 ip link set dev lo up
sudo ip netns exec ns1 ip addr add 192.168.1.2/24 dev veth1-int
sudo ip netns exec ns1 ip link set veth1-int up
sudo ip netns exec ns1 ip route add default via 192.168.1.1
sudo ip addr add 192.168.1.1/24 dev veth1-ext
sudo ip link set veth1-ext up
# Enable forwarding between the namespace and the default route
# interface and set up NAT. In case of multiple default routes,
# just pick the first one.
if [[ $(sysctl -n net.ipv4.ip_forward) -eq 0 ]]; then
sudo iptables -P FORWARD DROP
sudo sysctl -w net.ipv4.ip_forward=1
fi
IFACE="$(ip route list 0.0.0.0/0 | head -n 1 | cut -d ' ' -f 5)"
sudo iptables -A FORWARD -i veth1-ext -o "$IFACE" -j ACCEPT
sudo iptables -A FORWARD -i "$IFACE" -o veth1-ext -j ACCEPT
sudo iptables -t nat -A POSTROUTING -s 192.168.1.2 -o "$IFACE" -j MASQUERADE
rbd create --size 300 img
DEV="$(sudo rbd map img)"
mkfs.ext4 "$DEV"
sudo mount "$DEV" /mnt
sudo umount /mnt
sudo rbd unmap "$DEV"
sudo ip netns exec ns1 bash <<'EOF'
set -ex
DEV="/dev/rbd/rbd/img"
[[ ! -e "$DEV" ]]
# In a network namespace, "rbd map" maps the device and hangs waiting
# for udev add uevents. udev runs as usual (in particular creating the
# symlink which is used here because the device node is never printed),
# but the uevents it sends out never come because they don't cross
# network namespace boundaries.
set +e
timeout 30s rbd map img
RET=$?
set -e
[[ $RET -eq 124 ]]
[[ -L "$DEV" ]]
mkfs.ext4 -F "$DEV"
mount "$DEV" /mnt
umount /mnt
# In a network namespace, "rbd unmap" unmaps the device and hangs
# waiting for udev remove uevents. udev runs as usual (removing the
# symlink), but the uevents it sends out never come because they don't
# cross network namespace boundaries.
set +e
timeout 30s rbd unmap "$DEV"
RET=$?
set -e
[[ $RET -eq 124 ]]
[[ ! -e "$DEV" ]]
# Skip waiting for udev uevents with "-o noudev".
DEV="$(rbd map -o noudev img)"
mkfs.ext4 -F "$DEV"
mount "$DEV" /mnt
umount /mnt
rbd unmap -o noudev "$DEV"
EOF
rbd rm img
sudo iptables -t nat -D POSTROUTING -s 192.168.1.2 -o "$IFACE" -j MASQUERADE
sudo iptables -D FORWARD -i "$IFACE" -o veth1-ext -j ACCEPT
sudo iptables -D FORWARD -i veth1-ext -o "$IFACE" -j ACCEPT
sudo ip netns delete ns1
echo OK
| 2,397 | 26.563218 | 76 | sh |
null | ceph-main/qa/workunits/rbd/krbd_udev_symlinks.sh | #!/usr/bin/env bash
set -ex
SPECS=(
rbd/img1
rbd/img2
rbd/img2@snap1
rbd/img3
rbd/img3@snap1
rbd/img3@snap2
rbd/ns1/img1
rbd/ns1/img2
rbd/ns1/img2@snap1
rbd/ns1/img3
rbd/ns1/img3@snap1
rbd/ns1/img3@snap2
rbd/ns2/img1
rbd/ns2/img2
rbd/ns2/img2@snap1
rbd/ns2/img3
rbd/ns2/img3@snap1
rbd/ns2/img3@snap2
custom/img1
custom/img1@snap1
custom/img2
custom/img2@snap1
custom/img2@snap2
custom/img3
custom/ns1/img1
custom/ns1/img1@snap1
custom/ns1/img2
custom/ns1/img2@snap1
custom/ns1/img2@snap2
custom/ns1/img3
custom/ns2/img1
custom/ns2/img1@snap1
custom/ns2/img2
custom/ns2/img2@snap1
custom/ns2/img2@snap2
custom/ns2/img3
)
ceph osd pool create custom 8
rbd pool init custom
ceph osd set-require-min-compat-client nautilus
rbd namespace create rbd/ns1
rbd namespace create rbd/ns2
rbd namespace create custom/ns1
rbd namespace create custom/ns2
# create in order, images before snapshots
for spec in "${SPECS[@]}"; do
if [[ "$spec" =~ snap ]]; then
rbd snap create "$spec"
else
rbd create --size 10 "$spec"
DEV="$(sudo rbd map "$spec")"
sudo sfdisk "$DEV" <<EOF
unit: sectors
${DEV}p1 : start= 2048, size= 2, type=83
${DEV}p2 : start= 4096, size= 2, type=83
EOF
sudo rbd unmap "$DEV"
fi
done
[[ ! -e /dev/rbd ]]
# map in random order
COUNT=${#SPECS[@]}
read -r -a INDEXES < <(python3 <<EOF
import random
l = list(range($COUNT))
random.shuffle(l)
print(*l)
EOF
)
DEVS=()
for idx in "${INDEXES[@]}"; do
DEVS+=("$(sudo rbd map "${SPECS[idx]}")")
done
[[ $(rbd showmapped | wc -l) -eq $((COUNT + 1)) ]]
for ((i = 0; i < COUNT; i++)); do
[[ "$(readlink -e "/dev/rbd/${SPECS[INDEXES[i]]}")" == "${DEVS[i]}" ]]
[[ "$(readlink -e "/dev/rbd/${SPECS[INDEXES[i]]}-part1")" == "${DEVS[i]}p1" ]]
[[ "$(readlink -e "/dev/rbd/${SPECS[INDEXES[i]]}-part2")" == "${DEVS[i]}p2" ]]
done
for idx in "${INDEXES[@]}"; do
sudo rbd unmap "/dev/rbd/${SPECS[idx]}"
done
[[ ! -e /dev/rbd ]]
# remove in reverse order, snapshots before images
for ((i = COUNT - 1; i >= 0; i--)); do
if [[ "${SPECS[i]}" =~ snap ]]; then
rbd snap rm "${SPECS[i]}"
else
rbd rm "${SPECS[i]}"
fi
done
rbd namespace rm custom/ns2
rbd namespace rm custom/ns1
rbd namespace rm rbd/ns2
rbd namespace rm rbd/ns1
ceph osd pool delete custom custom --yes-i-really-really-mean-it
echo OK
| 2,375 | 19.307692 | 82 | sh |
null | ceph-main/qa/workunits/rbd/krbd_wac.sh | #!/usr/bin/env bash
set -ex
wget http://download.ceph.com/qa/wac.c
gcc -o wac wac.c
rbd create --size 300 img
DEV=$(sudo rbd map img)
sudo mkfs.ext4 $DEV
sudo mount $DEV /mnt
set +e
sudo timeout 5m ./wac -l 65536 -n 64 -r /mnt/wac-test
RET=$?
set -e
[[ $RET -eq 124 ]]
sudo killall -w wac || true # wac forks
sudo umount /mnt
sudo wipefs -a $DEV
sudo vgcreate vg_img $DEV
sudo lvcreate -L 256M -n lv_img vg_img
udevadm settle
sudo mkfs.ext4 /dev/mapper/vg_img-lv_img
sudo mount /dev/mapper/vg_img-lv_img /mnt
set +e
sudo timeout 5m ./wac -l 65536 -n 64 -r /mnt/wac-test
RET=$?
set -e
[[ $RET -eq 124 ]]
sudo killall -w wac || true # wac forks
sudo umount /mnt
sudo vgremove -f vg_img
sudo pvremove $DEV
sudo rbd unmap $DEV
rbd rm img
echo OK
| 751 | 17.341463 | 53 | sh |
Subsets and Splits