repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/qa/workunits/rbd/luks-encryption.sh | #!/usr/bin/env bash
set -ex
CEPH_ID=${CEPH_ID:-admin}
TMP_FILES="/tmp/passphrase /tmp/passphrase2 /tmp/testdata1 /tmp/testdata2 /tmp/cmpdata"
_sudo()
{
local cmd
if [ `id -u` -eq 0 ]
then
"$@"
return $?
fi
# Look for the command in the user path. If it fails run it as is,
# supposing it is in sudo path.
cmd=`which $1 2>/dev/null` || cmd=$1
shift
sudo -nE "${cmd}" "$@"
}
function drop_caches {
sudo sync
echo 3 | sudo tee /proc/sys/vm/drop_caches
}
function expect_false() {
if "$@"; then return 1; else return 0; fi
}
function test_encryption_format() {
local format=$1
clean_up_cryptsetup
# format
rbd encryption format testimg $format /tmp/passphrase
drop_caches
# open encryption with cryptsetup
sudo cryptsetup open $RAW_DEV --type luks cryptsetupdev -d /tmp/passphrase
sudo chmod 666 /dev/mapper/cryptsetupdev
# open encryption with librbd
LIBRBD_DEV=$(_sudo rbd -p rbd map testimg -t nbd -o encryption-passphrase-file=/tmp/passphrase)
sudo chmod 666 $LIBRBD_DEV
# write via librbd && compare
dd if=/tmp/testdata1 of=$LIBRBD_DEV oflag=direct bs=1M
dd if=/dev/mapper/cryptsetupdev of=/tmp/cmpdata iflag=direct bs=4M count=4
cmp -n 16MB /tmp/cmpdata /tmp/testdata1
# write via cryptsetup && compare
dd if=/tmp/testdata2 of=/dev/mapper/cryptsetupdev oflag=direct bs=1M
dd if=$LIBRBD_DEV of=/tmp/cmpdata iflag=direct bs=4M count=4
cmp -n 16MB /tmp/cmpdata /tmp/testdata2
# FIXME: encryption-aware flatten/resize misbehave if proxied to
# RAW_DEV mapping (i.e. if RAW_DEV mapping ows the lock)
# (acquire and) release the lock as a side effect
rbd bench --io-type read --io-size 1 --io-threads 1 --io-total 1 testimg
# check that encryption-aware resize compensates LUKS header overhead
(( $(sudo blockdev --getsize64 $LIBRBD_DEV) < (32 << 20) ))
expect_false rbd resize --size 32M testimg
rbd resize --size 32M --encryption-passphrase-file /tmp/passphrase testimg
(( $(sudo blockdev --getsize64 $LIBRBD_DEV) == (32 << 20) ))
_sudo rbd device unmap -t nbd $LIBRBD_DEV
}
function test_clone_encryption() {
clean_up_cryptsetup
# write 1MB plaintext
dd if=/tmp/testdata1 of=$RAW_DEV oflag=direct bs=1M count=1
# clone (luks1)
rbd snap create testimg@snap
rbd snap protect testimg@snap
rbd clone testimg@snap testimg1
rbd encryption format testimg1 luks1 /tmp/passphrase
# open encryption with librbd, write one more MB, close
LIBRBD_DEV=$(_sudo rbd -p rbd map testimg1 -t nbd -o encryption-format=luks1,encryption-passphrase-file=/tmp/passphrase)
sudo chmod 666 $LIBRBD_DEV
dd if=$LIBRBD_DEV of=/tmp/cmpdata iflag=direct bs=1M count=1
cmp -n 1MB /tmp/cmpdata /tmp/testdata1
dd if=/tmp/testdata1 of=$LIBRBD_DEV seek=1 skip=1 oflag=direct bs=1M count=1
_sudo rbd device unmap -t nbd $LIBRBD_DEV
# second clone (luks2)
rbd snap create testimg1@snap
rbd snap protect testimg1@snap
rbd clone testimg1@snap testimg2
rbd encryption format testimg2 luks2 /tmp/passphrase2
# open encryption with librbd, write one more MB, close
LIBRBD_DEV=$(_sudo rbd -p rbd map testimg2 -t nbd -o encryption-format=luks2,encryption-passphrase-file=/tmp/passphrase2,encryption-format=luks1,encryption-passphrase-file=/tmp/passphrase)
sudo chmod 666 $LIBRBD_DEV
dd if=$LIBRBD_DEV of=/tmp/cmpdata iflag=direct bs=1M count=2
cmp -n 2MB /tmp/cmpdata /tmp/testdata1
dd if=/tmp/testdata1 of=$LIBRBD_DEV seek=2 skip=2 oflag=direct bs=1M count=1
_sudo rbd device unmap -t nbd $LIBRBD_DEV
# flatten
expect_false rbd flatten testimg2 --encryption-format luks1 --encryption-format luks2 --encryption-passphrase-file /tmp/passphrase2 --encryption-passphrase-file /tmp/passphrase
rbd flatten testimg2 --encryption-format luks2 --encryption-format luks1 --encryption-passphrase-file /tmp/passphrase2 --encryption-passphrase-file /tmp/passphrase
# verify with cryptsetup
RAW_FLAT_DEV=$(_sudo rbd -p rbd map testimg2 -t nbd)
sudo cryptsetup open $RAW_FLAT_DEV --type luks cryptsetupdev -d /tmp/passphrase2
sudo chmod 666 /dev/mapper/cryptsetupdev
dd if=/dev/mapper/cryptsetupdev of=/tmp/cmpdata iflag=direct bs=1M count=3
cmp -n 3MB /tmp/cmpdata /tmp/testdata1
_sudo rbd device unmap -t nbd $RAW_FLAT_DEV
}
function test_clone_and_load_with_a_single_passphrase {
local expectedfail=$1
# clone and format
rbd snap create testimg@snap
rbd snap protect testimg@snap
rbd clone testimg@snap testimg1
rbd encryption format testimg1 luks2 /tmp/passphrase2
if [ "$expectedfail" = "true" ]
then
expect_false rbd flatten testimg1 --encryption-passphrase-file /tmp/passphrase2
rbd flatten testimg1 --encryption-passphrase-file /tmp/passphrase2 --encryption-passphrase-file /tmp/passphrase
else
rbd flatten testimg1 --encryption-passphrase-file /tmp/passphrase2
fi
rbd remove testimg1
rbd snap unprotect testimg@snap
rbd snap remove testimg@snap
}
function test_plaintext_detection {
# 16k LUKS header
sudo cryptsetup -q luksFormat --type luks2 --luks2-metadata-size 16k $RAW_DEV /tmp/passphrase
test_clone_and_load_with_a_single_passphrase true
# 4m LUKS header
sudo cryptsetup -q luksFormat --type luks2 --luks2-metadata-size 4m $RAW_DEV /tmp/passphrase
test_clone_and_load_with_a_single_passphrase true
# no luks header
dd if=/dev/zero of=$RAW_DEV oflag=direct bs=4M count=8
test_clone_and_load_with_a_single_passphrase false
}
function get_nbd_device_paths {
rbd device list -t nbd | tail -n +2 | egrep "\s+rbd\s+testimg" | awk '{print $5;}'
}
function clean_up_cryptsetup() {
ls /dev/mapper/cryptsetupdev && sudo cryptsetup close cryptsetupdev || true
}
function clean_up {
sudo rm -f $TMP_FILES
clean_up_cryptsetup
for device in $(get_nbd_device_paths); do
_sudo rbd device unmap -t nbd $device
done
rbd remove testimg2 || true
rbd snap unprotect testimg1@snap || true
rbd snap remove testimg1@snap || true
rbd remove testimg1 || true
rbd snap unprotect testimg@snap || true
rbd snap remove testimg@snap || true
rbd remove testimg || true
}
if [[ $(uname) != "Linux" ]]; then
echo "LUKS encryption tests only supported on Linux"
exit 0
fi
if [[ $(($(ceph-conf --name client.${CEPH_ID} --show-config-value rbd_default_features) & 64)) != 0 ]]; then
echo "LUKS encryption tests not supported alongside image journaling feature"
exit 0
fi
clean_up
trap clean_up INT TERM EXIT
# generate test data
dd if=/dev/urandom of=/tmp/testdata1 bs=4M count=4
dd if=/dev/urandom of=/tmp/testdata2 bs=4M count=4
# create passphrase files
printf "pass\0word\n" > /tmp/passphrase
printf "\t password2 " > /tmp/passphrase2
# create an image
rbd create testimg --size=32M
# map raw data to nbd device
RAW_DEV=$(_sudo rbd -p rbd map testimg -t nbd)
sudo chmod 666 $RAW_DEV
test_plaintext_detection
test_encryption_format luks1
test_encryption_format luks2
test_clone_encryption
echo OK
| 6,933 | 30.807339 | 190 | sh |
null | ceph-main/qa/workunits/rbd/map-snapshot-io.sh | #!/bin/sh
# http://tracker.ceph.com/issues/3964
set -ex
rbd create image -s 100
DEV=$(sudo rbd map image)
dd if=/dev/zero of=$DEV oflag=direct count=10
rbd snap create image@s1
dd if=/dev/zero of=$DEV oflag=direct count=10 # used to fail
rbd snap rm image@s1
dd if=/dev/zero of=$DEV oflag=direct count=10
sudo rbd unmap $DEV
rbd rm image
echo OK
| 352 | 18.611111 | 62 | sh |
null | ceph-main/qa/workunits/rbd/map-unmap.sh | #!/usr/bin/env bash
set -ex
RUN_TIME=300 # approximate duration of run (seconds)
[ $# -eq 1 ] && RUN_TIME="$1"
IMAGE_NAME="image-$$"
IMAGE_SIZE="1024" # MB
function get_time() {
date '+%s'
}
function times_up() {
local end_time="$1"
test $(get_time) -ge "${end_time}"
}
function map_unmap() {
[ $# -eq 1 ] || exit 99
local image_name="$1"
local dev
dev="$(sudo rbd map "${image_name}")"
sudo rbd unmap "${dev}"
}
#### Start
rbd create "${IMAGE_NAME}" --size="${IMAGE_SIZE}"
COUNT=0
START_TIME=$(get_time)
END_TIME=$(expr $(get_time) + ${RUN_TIME})
while ! times_up "${END_TIME}"; do
map_unmap "${IMAGE_NAME}"
COUNT=$(expr $COUNT + 1)
done
ELAPSED=$(expr "$(get_time)" - "${START_TIME}")
rbd rm "${IMAGE_NAME}"
echo "${COUNT} iterations completed in ${ELAPSED} seconds"
| 793 | 16.26087 | 58 | sh |
null | ceph-main/qa/workunits/rbd/merge_diff.sh | #!/usr/bin/env bash
set -ex
export RBD_FORCE_ALLOW_V1=1
pool=rbd
gen=$pool/gen
out=$pool/out
testno=1
mkdir -p merge_diff_test
pushd merge_diff_test
function expect_false()
{
if "$@"; then return 1; else return 0; fi
}
function clear_all()
{
fusermount -u mnt || true
rbd snap purge --no-progress $gen || true
rbd rm --no-progress $gen || true
rbd snap purge --no-progress $out || true
rbd rm --no-progress $out || true
rm -rf diffs || true
}
function rebuild()
{
clear_all
echo Starting test $testno
((testno++))
if [[ "$2" -lt "$1" ]] && [[ "$3" -gt "1" ]]; then
rbd create $gen --size 100 --object-size $1 --stripe-unit $2 --stripe-count $3 --image-format $4
else
rbd create $gen --size 100 --object-size $1 --image-format $4
fi
rbd create $out --size 1 --object-size 524288
mkdir -p mnt diffs
# lttng has atexit handlers that need to be fork/clone aware
LD_PRELOAD=liblttng-ust-fork.so.0 rbd-fuse -p $pool mnt
}
function write()
{
dd if=/dev/urandom of=mnt/gen bs=1M conv=notrunc seek=$1 count=$2
}
function snap()
{
rbd snap create $gen@$1
}
function resize()
{
rbd resize --no-progress $gen --size $1 --allow-shrink
}
function export_diff()
{
if [ $2 == "head" ]; then
target="$gen"
else
target="$gen@$2"
fi
if [ $1 == "null" ]; then
rbd export-diff --no-progress $target diffs/$1.$2
else
rbd export-diff --no-progress $target --from-snap $1 diffs/$1.$2
fi
}
function merge_diff()
{
rbd merge-diff diffs/$1.$2 diffs/$2.$3 diffs/$1.$3
}
function check()
{
rbd import-diff --no-progress diffs/$1.$2 $out || return -1
if [ "$2" == "head" ]; then
sum1=`rbd export $gen - | md5sum`
else
sum1=`rbd export $gen@$2 - | md5sum`
fi
sum2=`rbd export $out - | md5sum`
if [ "$sum1" != "$sum2" ]; then
exit -1
fi
if [ "$2" != "head" ]; then
rbd snap ls $out | awk '{print $2}' | grep "^$2\$" || return -1
fi
}
#test f/t header
rebuild 4194304 4194304 1 2
write 0 1
snap a
write 1 1
export_diff null a
export_diff a head
merge_diff null a head
check null head
rebuild 4194304 4194304 1 2
write 0 1
snap a
write 1 1
snap b
write 2 1
export_diff null a
export_diff a b
export_diff b head
merge_diff null a b
check null b
rebuild 4194304 4194304 1 2
write 0 1
snap a
write 1 1
snap b
write 2 1
export_diff null a
export_diff a b
export_diff b head
merge_diff a b head
check null a
check a head
rebuild 4194304 4194304 1 2
write 0 1
snap a
write 1 1
snap b
write 2 1
export_diff null a
export_diff a b
export_diff b head
rbd merge-diff diffs/null.a diffs/a.b - | rbd merge-diff - diffs/b.head - > diffs/null.head
check null head
#data test
rebuild 4194304 4194304 1 2
write 4 2
snap s101
write 0 3
write 8 2
snap s102
export_diff null s101
export_diff s101 s102
merge_diff null s101 s102
check null s102
rebuild 4194304 4194304 1 2
write 0 3
write 2 5
write 8 2
snap s201
write 0 2
write 6 3
snap s202
export_diff null s201
export_diff s201 s202
merge_diff null s201 s202
check null s202
rebuild 4194304 4194304 1 2
write 0 4
write 12 6
snap s301
write 0 6
write 10 5
write 16 4
snap s302
export_diff null s301
export_diff s301 s302
merge_diff null s301 s302
check null s302
rebuild 4194304 4194304 1 2
write 0 12
write 14 2
write 18 2
snap s401
write 1 2
write 5 6
write 13 3
write 18 2
snap s402
export_diff null s401
export_diff s401 s402
merge_diff null s401 s402
check null s402
rebuild 4194304 4194304 1 2
write 2 4
write 10 12
write 27 6
write 36 4
snap s501
write 0 24
write 28 4
write 36 4
snap s502
export_diff null s501
export_diff s501 s502
merge_diff null s501 s502
check null s502
rebuild 4194304 4194304 1 2
write 0 8
resize 5
snap r1
resize 20
write 12 8
snap r2
resize 8
write 4 4
snap r3
export_diff null r1
export_diff r1 r2
export_diff r2 r3
merge_diff null r1 r2
merge_diff null r2 r3
check null r3
rebuild 4194304 4194304 1 2
write 0 8
resize 5
snap r1
resize 20
write 12 8
snap r2
resize 8
write 4 4
snap r3
resize 10
snap r4
export_diff null r1
export_diff r1 r2
export_diff r2 r3
export_diff r3 r4
merge_diff null r1 r2
merge_diff null r2 r3
merge_diff null r3 r4
check null r4
# merge diff doesn't yet support fancy striping
# rebuild 4194304 65536 8 2
# write 0 32
# snap r1
# write 16 32
# snap r2
# export_diff null r1
# export_diff r1 r2
# expect_false merge_diff null r1 r2
rebuild 4194304 4194304 1 2
write 0 1
write 2 1
write 4 1
write 6 1
snap s1
write 1 1
write 3 1
write 5 1
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 1 1
write 3 1
write 5 1
snap s1
write 0 1
write 2 1
write 4 1
write 6 1
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 0 3
write 6 3
write 12 3
snap s1
write 1 1
write 7 1
write 13 1
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 0 3
write 6 3
write 12 3
snap s1
write 0 1
write 6 1
write 12 1
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 0 3
write 6 3
write 12 3
snap s1
write 2 1
write 8 1
write 14 1
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 1 1
write 7 1
write 13 1
snap s1
write 0 3
write 6 3
write 12 3
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 0 1
write 6 1
write 12 1
snap s1
write 0 3
write 6 3
write 12 3
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 2 1
write 8 1
write 14 1
snap s1
write 0 3
write 6 3
write 12 3
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 0 3
write 6 3
write 12 3
snap s1
write 0 3
write 6 3
write 12 3
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 2 4
write 8 4
write 14 4
snap s1
write 0 3
write 6 3
write 12 3
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 0 4
write 6 4
write 12 4
snap s1
write 0 3
write 6 3
write 12 3
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 0 6
write 6 6
write 12 6
snap s1
write 0 3
write 6 3
write 12 3
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 3 6
write 9 6
write 15 6
snap s1
write 0 3
write 6 3
write 12 3
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 0 8
snap s1
resize 2
resize 100
snap s2
export_diff null s1
export_diff s1 s2
merge_diff null s1 s2
check null s2
rebuild 4194304 4194304 1 2
write 0 8
snap s1
resize 2
resize 100
snap s2
write 20 2
snap s3
export_diff null s1
export_diff s1 s2
export_diff s2 s3
merge_diff s1 s2 s3
check null s1
check s1 s3
#addme
clear_all
popd
rm -rf merge_diff_test
echo OK
| 7,100 | 13.855649 | 100 | sh |
null | ceph-main/qa/workunits/rbd/notify_master.sh | #!/bin/sh -ex
relpath=$(dirname $0)/../../../src/test/librbd
python3 $relpath/test_notify.py master
exit 0
| 108 | 17.166667 | 46 | sh |
null | ceph-main/qa/workunits/rbd/notify_slave.sh | #!/bin/sh -ex
relpath=$(dirname $0)/../../../src/test/librbd
python3 $relpath/test_notify.py slave
exit 0
| 107 | 17 | 46 | sh |
null | ceph-main/qa/workunits/rbd/permissions.sh | #!/usr/bin/env bash
set -ex
IMAGE_FEATURES="layering,exclusive-lock,object-map,fast-diff"
clone_v2_enabled() {
image_spec=$1
rbd info $image_spec | grep "clone-parent"
}
create_pools() {
ceph osd pool create images 32
rbd pool init images
ceph osd pool create volumes 32
rbd pool init volumes
}
delete_pools() {
(ceph osd pool delete images images --yes-i-really-really-mean-it || true) >/dev/null 2>&1
(ceph osd pool delete volumes volumes --yes-i-really-really-mean-it || true) >/dev/null 2>&1
}
recreate_pools() {
delete_pools
create_pools
}
delete_users() {
(ceph auth del client.volumes || true) >/dev/null 2>&1
(ceph auth del client.images || true) >/dev/null 2>&1
(ceph auth del client.snap_none || true) >/dev/null 2>&1
(ceph auth del client.snap_all || true) >/dev/null 2>&1
(ceph auth del client.snap_pool || true) >/dev/null 2>&1
(ceph auth del client.snap_profile_all || true) >/dev/null 2>&1
(ceph auth del client.snap_profile_pool || true) >/dev/null 2>&1
(ceph auth del client.mon_write || true) >/dev/null 2>&1
}
create_users() {
ceph auth get-or-create client.volumes \
mon 'profile rbd' \
osd 'profile rbd pool=volumes, profile rbd-read-only pool=images' \
mgr 'profile rbd pool=volumes, profile rbd-read-only pool=images' >> $KEYRING
ceph auth get-or-create client.images mon 'profile rbd' osd 'profile rbd pool=images' >> $KEYRING
ceph auth get-or-create client.snap_none mon 'allow r' >> $KEYRING
ceph auth get-or-create client.snap_all mon 'allow r' osd 'allow w' >> $KEYRING
ceph auth get-or-create client.snap_pool mon 'allow r' osd 'allow w pool=images' >> $KEYRING
ceph auth get-or-create client.snap_profile_all mon 'allow r' osd 'profile rbd' >> $KEYRING
ceph auth get-or-create client.snap_profile_pool mon 'allow r' osd 'profile rbd pool=images' >> $KEYRING
ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING
}
expect() {
set +e
local expected_ret=$1
local ret
shift
cmd=$@
eval $cmd
ret=$?
set -e
if [[ $ret -ne $expected_ret ]]; then
echo "ERROR: running \'$cmd\': expected $expected_ret got $ret"
return 1
fi
return 0
}
test_images_access() {
rbd -k $KEYRING --id images create --image-format 2 --image-feature $IMAGE_FEATURES -s 1 images/foo
rbd -k $KEYRING --id images snap create images/foo@snap
rbd -k $KEYRING --id images snap protect images/foo@snap
rbd -k $KEYRING --id images snap unprotect images/foo@snap
rbd -k $KEYRING --id images snap protect images/foo@snap
rbd -k $KEYRING --id images export images/foo@snap - >/dev/null
expect 16 rbd -k $KEYRING --id images snap rm images/foo@snap
rbd -k $KEYRING --id volumes clone --image-feature $IMAGE_FEATURES images/foo@snap volumes/child
if ! clone_v2_enabled images/foo; then
expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
fi
expect 1 rbd -k $KEYRING --id volumes snap unprotect images/foo@snap
expect 1 rbd -k $KEYRING --id images flatten volumes/child
rbd -k $KEYRING --id volumes flatten volumes/child
expect 1 rbd -k $KEYRING --id volumes snap unprotect images/foo@snap
rbd -k $KEYRING --id images snap unprotect images/foo@snap
expect 39 rbd -k $KEYRING --id images rm images/foo
rbd -k $KEYRING --id images snap rm images/foo@snap
rbd -k $KEYRING --id images rm images/foo
rbd -k $KEYRING --id volumes rm volumes/child
}
test_volumes_access() {
rbd -k $KEYRING --id images create --image-format 2 --image-feature $IMAGE_FEATURES -s 1 images/foo
rbd -k $KEYRING --id images snap create images/foo@snap
rbd -k $KEYRING --id images snap protect images/foo@snap
# commands that work with read-only access
rbd -k $KEYRING --id volumes info images/foo@snap
rbd -k $KEYRING --id volumes snap ls images/foo
rbd -k $KEYRING --id volumes export images/foo - >/dev/null
rbd -k $KEYRING --id volumes cp images/foo volumes/foo_copy
rbd -k $KEYRING --id volumes rm volumes/foo_copy
rbd -k $KEYRING --id volumes children images/foo@snap
rbd -k $KEYRING --id volumes lock list images/foo
# commands that fail with read-only access
expect 1 rbd -k $KEYRING --id volumes resize -s 2 images/foo --allow-shrink
expect 1 rbd -k $KEYRING --id volumes snap create images/foo@2
expect 1 rbd -k $KEYRING --id volumes snap rollback images/foo@snap
expect 1 rbd -k $KEYRING --id volumes snap remove images/foo@snap
expect 1 rbd -k $KEYRING --id volumes snap purge images/foo
expect 1 rbd -k $KEYRING --id volumes snap unprotect images/foo@snap
expect 1 rbd -k $KEYRING --id volumes flatten images/foo
expect 1 rbd -k $KEYRING --id volumes lock add images/foo test
expect 1 rbd -k $KEYRING --id volumes lock remove images/foo test locker
expect 1 rbd -k $KEYRING --id volumes ls rbd
# create clone and snapshot
rbd -k $KEYRING --id volumes clone --image-feature $IMAGE_FEATURES images/foo@snap volumes/child
rbd -k $KEYRING --id volumes snap create volumes/child@snap1
rbd -k $KEYRING --id volumes snap protect volumes/child@snap1
rbd -k $KEYRING --id volumes snap create volumes/child@snap2
# make sure original snapshot stays protected
if clone_v2_enabled images/foo; then
rbd -k $KEYRING --id volumes flatten volumes/child
rbd -k $KEYRING --id volumes snap rm volumes/child@snap2
rbd -k $KEYRING --id volumes snap unprotect volumes/child@snap1
else
expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
rbd -k $KEYRING --id volumes flatten volumes/child
expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
rbd -k $KEYRING --id volumes snap rm volumes/child@snap2
expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
expect 2 rbd -k $KEYRING --id volumes snap rm volumes/child@snap2
rbd -k $KEYRING --id volumes snap unprotect volumes/child@snap1
expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
fi
# clean up
rbd -k $KEYRING --id volumes snap rm volumes/child@snap1
rbd -k $KEYRING --id images snap unprotect images/foo@snap
rbd -k $KEYRING --id images snap rm images/foo@snap
rbd -k $KEYRING --id images rm images/foo
rbd -k $KEYRING --id volumes rm volumes/child
}
create_self_managed_snapshot() {
ID=$1
POOL=$2
cat << EOF | CEPH_ARGS="-k $KEYRING" python3
import rados
with rados.Rados(conffile="", rados_id="${ID}") as cluster:
ioctx = cluster.open_ioctx("${POOL}")
snap_id = ioctx.create_self_managed_snap()
print ("Created snap id {}".format(snap_id))
EOF
}
remove_self_managed_snapshot() {
ID=$1
POOL=$2
cat << EOF | CEPH_ARGS="-k $KEYRING" python3
import rados
with rados.Rados(conffile="", rados_id="mon_write") as cluster1, \
rados.Rados(conffile="", rados_id="${ID}") as cluster2:
ioctx1 = cluster1.open_ioctx("${POOL}")
snap_id = ioctx1.create_self_managed_snap()
print ("Created snap id {}".format(snap_id))
ioctx2 = cluster2.open_ioctx("${POOL}")
ioctx2.remove_self_managed_snap(snap_id)
print ("Removed snap id {}".format(snap_id))
EOF
}
test_remove_self_managed_snapshots() {
# Ensure users cannot create self-managed snapshots w/o permissions
expect 1 create_self_managed_snapshot snap_none images
expect 1 create_self_managed_snapshot snap_none volumes
create_self_managed_snapshot snap_all images
create_self_managed_snapshot snap_all volumes
create_self_managed_snapshot snap_pool images
expect 1 create_self_managed_snapshot snap_pool volumes
create_self_managed_snapshot snap_profile_all images
create_self_managed_snapshot snap_profile_all volumes
create_self_managed_snapshot snap_profile_pool images
expect 1 create_self_managed_snapshot snap_profile_pool volumes
# Ensure users cannot delete self-managed snapshots w/o permissions
expect 1 remove_self_managed_snapshot snap_none images
expect 1 remove_self_managed_snapshot snap_none volumes
remove_self_managed_snapshot snap_all images
remove_self_managed_snapshot snap_all volumes
remove_self_managed_snapshot snap_pool images
expect 1 remove_self_managed_snapshot snap_pool volumes
remove_self_managed_snapshot snap_profile_all images
remove_self_managed_snapshot snap_profile_all volumes
remove_self_managed_snapshot snap_profile_pool images
expect 1 remove_self_managed_snapshot snap_profile_pool volumes
}
test_rbd_support() {
# read-only commands should work on both pools
ceph -k $KEYRING --id volumes rbd perf image stats volumes
ceph -k $KEYRING --id volumes rbd perf image stats images
# read/write commands should only work on 'volumes'
rbd -k $KEYRING --id volumes create --image-format 2 --image-feature $IMAGE_FEATURES -s 1 volumes/foo
ceph -k $KEYRING --id volumes rbd task add remove volumes/foo
expect 13 ceph -k $KEYRING --id volumes rbd task add remove images/foo
}
cleanup() {
rm -f $KEYRING
}
KEYRING=$(mktemp)
trap cleanup EXIT ERR HUP INT QUIT
delete_users
create_users
recreate_pools
test_images_access
recreate_pools
test_volumes_access
test_remove_self_managed_snapshots
test_rbd_support
delete_pools
delete_users
echo OK
exit 0
| 9,384 | 33.759259 | 108 | sh |
null | ceph-main/qa/workunits/rbd/qemu-iotests.sh | #!/bin/sh -ex
# Run qemu-iotests against rbd. These are block-level tests that go
# through qemu but do not involve running a full vm. Note that these
# require the admin ceph user, as there's no way to pass the ceph user
# to qemu-iotests currently.
testlist='001 002 003 004 005 008 009 010 011 021 025 032 033'
git clone https://github.com/qemu/qemu.git
cd qemu
if grep -iqE '(bionic|focal|jammy)' /etc/os-release; then
# Bionic requires a matching test harness
git checkout v2.11.0
elif grep -iqE '(xenial|platform:el8)' /etc/os-release; then
# Xenial requires a recent test harness
git checkout v2.3.0
else
# use v2.2.0-rc3 (last released version that handles all the tests
git checkout 2528043f1f299e0e88cb026f1ca7c40bbb4e1f80
fi
cd tests/qemu-iotests
mkdir bin
# qemu-iotests expects a binary called just 'qemu' to be available
if [ -x '/usr/bin/qemu-system-x86_64' ]
then
QEMU='/usr/bin/qemu-system-x86_64'
# Bionic (v2.11.0) tests expect all tools in current directory
ln -s $QEMU qemu
ln -s /usr/bin/qemu-img
ln -s /usr/bin/qemu-io
ln -s /usr/bin/qemu-nbd
else
QEMU='/usr/libexec/qemu-kvm'
fi
ln -s $QEMU bin/qemu
# this is normally generated by configure, but has nothing but a python
# binary definition, which we don't care about. for some reason it is
# not present on trusty.
touch common.env
# TEST_DIR is the pool for rbd
TEST_DIR=rbd PATH="$PATH:$PWD/bin" ./check -rbd $testlist
cd ../../..
rm -rf qemu
| 1,484 | 27.557692 | 71 | sh |
null | ceph-main/qa/workunits/rbd/qemu_dynamic_features.sh | #!/usr/bin/env bash
set -x
if [[ -z "${IMAGE_NAME}" ]]; then
echo image name must be provided
exit 1
fi
is_qemu_running() {
rbd status ${IMAGE_NAME} | grep -v "Watchers: none"
}
wait_for_qemu() {
while ! is_qemu_running ; do
echo "*** Waiting for QEMU"
sleep 30
done
}
wait_for_qemu
rbd feature disable ${IMAGE_NAME} journaling
rbd feature disable ${IMAGE_NAME} object-map
rbd feature disable ${IMAGE_NAME} exclusive-lock
while is_qemu_running ; do
echo "*** Enabling all features"
rbd feature enable ${IMAGE_NAME} exclusive-lock || break
rbd feature enable ${IMAGE_NAME} journaling || break
rbd feature enable ${IMAGE_NAME} object-map || break
if is_qemu_running ; then
sleep 60
fi
echo "*** Disabling all features"
rbd feature disable ${IMAGE_NAME} journaling || break
rbd feature disable ${IMAGE_NAME} object-map || break
rbd feature disable ${IMAGE_NAME} exclusive-lock || break
if is_qemu_running ; then
sleep 60
fi
done
if is_qemu_running ; then
echo "RBD command failed on alive QEMU"
exit 1
fi
| 1,067 | 21.723404 | 59 | sh |
null | ceph-main/qa/workunits/rbd/qemu_rebuild_object_map.sh | #!/usr/bin/env bash
set -ex
if [[ -z "${IMAGE_NAME}" ]]; then
echo image name must be provided
exit 1
fi
is_qemu_running() {
rbd status ${IMAGE_NAME} | grep -v "Watchers: none"
}
wait_for_qemu() {
while ! is_qemu_running ; do
echo "*** Waiting for QEMU"
sleep 30
done
}
wait_for_qemu
rbd feature disable ${IMAGE_NAME} journaling || true
rbd feature disable ${IMAGE_NAME} fast-diff || true
rbd feature disable ${IMAGE_NAME} object-map || true
rbd feature disable ${IMAGE_NAME} exclusive-lock || true
rbd feature enable ${IMAGE_NAME} exclusive-lock
rbd feature enable ${IMAGE_NAME} object-map
while is_qemu_running ; do
echo "*** Rebuilding object map"
rbd object-map rebuild ${IMAGE_NAME}
if is_qemu_running ; then
sleep 60
fi
done
| 769 | 19.263158 | 56 | sh |
null | ceph-main/qa/workunits/rbd/qos.sh | #!/bin/sh -ex
POOL=rbd
IMAGE=test$$
IMAGE_SIZE=1G
TOLERANCE_PRCNT=10
rbd_bench() {
local image=$1
local type=$2
local total=$3
local qos_type=$4
local qos_limit=$5
local iops_var_name=$6
local bps_var_name=$7
local timeout=$8
local timeout_cmd=""
if [ -n "${timeout}" ]; then
timeout_cmd="timeout --preserve-status ${timeout}"
fi
# parse `rbd bench` output for string like this:
# elapsed: 25 ops: 2560 ops/sec: 100.08 bytes/sec: 409.13 MiB
iops_bps=$(${timeout_cmd} rbd bench "${image}" \
--io-type ${type} --io-size 4K \
--io-total ${total} --rbd-cache=false \
--rbd_qos_${qos_type}_limit ${qos_limit} |
awk '/elapsed:.* GiB/ {print int($6) ":" int($8) * 1024 * 1024 * 1024}
/elapsed:.* MiB/ {print int($6) ":" int($8) * 1024 * 1024}
/elapsed:.* KiB/ {print int($6) ":" int($8) * 1024}
/elapsed:.* B/ {print int($6) ":" int($8)}')
eval ${iops_var_name}=${iops_bps%:*}
eval ${bps_var_name}=${iops_bps#*:}
}
rbd create "${POOL}/${IMAGE}" -s ${IMAGE_SIZE}
rbd bench "${POOL}/${IMAGE}" --io-type write --io-size 4M --io-total ${IMAGE_SIZE}
rbd_bench "${POOL}/${IMAGE}" write ${IMAGE_SIZE} iops 0 iops bps 60
iops_unlimited=$iops
bps_unlimited=$bps
test "${iops_unlimited}" -ge 20 || exit 0
io_total=$((bps_unlimited * 30))
rbd_bench "${POOL}/${IMAGE}" write ${io_total} iops $((iops_unlimited / 2)) iops bps
test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
rbd_bench "${POOL}/${IMAGE}" write ${io_total} write_iops $((iops_unlimited / 2)) iops bps
test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
rbd_bench "${POOL}/${IMAGE}" write ${io_total} bps $((bps_unlimited / 2)) iops bps
test "${bps}" -le $((bps_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
rbd_bench "${POOL}/${IMAGE}" write ${io_total} write_bps $((bps_unlimited / 2)) iops bps
test "${bps}" -le $((bps_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
rbd_bench "${POOL}/${IMAGE}" read ${io_total} iops 0 iops bps
iops_unlimited=$iops
bps_unlimited=$bps
test "${iops_unlimited}" -ge 20 || exit 0
io_total=$((bps_unlimited * 30))
rbd_bench "${POOL}/${IMAGE}" read ${io_total} iops $((iops_unlimited / 2)) iops bps
test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
rbd_bench "${POOL}/${IMAGE}" read ${io_total} read_iops $((iops_unlimited / 2)) iops bps
test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
rbd_bench "${POOL}/${IMAGE}" read ${io_total} bps $((bps_unlimited / 2)) iops bps
test "${bps}" -le $((bps_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
rbd_bench "${POOL}/${IMAGE}" read ${io_total} read_bps $((bps_unlimited / 2)) iops bps
test "${bps}" -le $((bps_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
# test a config override is applied
rbd config image set "${POOL}/${IMAGE}" rbd_qos_iops_limit $((iops_unlimited / 4))
rbd_bench "${POOL}/${IMAGE}" read ${io_total} iops $((iops_unlimited / 2)) iops bps
test "${iops}" -le $((iops_unlimited / 4 * (100 + TOLERANCE_PRCNT) / 100))
rbd config image remove "${POOL}/${IMAGE}" rbd_qos_iops_limit
rbd_bench "${POOL}/${IMAGE}" read ${io_total} iops $((iops_unlimited / 2)) iops bps
test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
rbd rm "${POOL}/${IMAGE}"
echo OK
| 3,502 | 37.494505 | 90 | sh |
null | ceph-main/qa/workunits/rbd/rbd-ggate.sh | #!/bin/sh -ex
POOL=testrbdggate$$
NS=ns
IMAGE=test
SIZE=64
DATA=
DEV=
if which xmlstarlet > /dev/null 2>&1; then
XMLSTARLET=xmlstarlet
elif which xml > /dev/null 2>&1; then
XMLSTARLET=xml
else
echo "Missing xmlstarlet binary!"
exit 1
fi
if [ `uname -K` -ge 1200078 ] ; then
RBD_GGATE_RESIZE_SUPPORTED=1
fi
_sudo()
{
local cmd
if [ `id -u` -eq 0 ]
then
"$@"
return $?
fi
# Look for the command in the user path. If it fails run it as is,
# supposing it is in sudo path.
cmd=`which $1 2>/dev/null` || cmd=$1
shift
sudo -nE "${cmd}" "$@"
}
check_geom_gate()
{
# See if geom_date is load, or can be loaded.
# Otherwise the tests can not run
if ! kldstat -q -n geom_gate ; then
# See if we can load it
if ! _sudo kldload geom_gate ; then
echo Not able to load geom_gate
echo check /var/log/messages as to why
exit 1
fi
fi
}
setup()
{
local ns x
if [ -e CMakeCache.txt ]; then
# running under cmake build dir
CEPH_SRC=$(readlink -f $(dirname $0)/../../../src)
CEPH_ROOT=${PWD}
CEPH_BIN=${CEPH_ROOT}/bin
export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind:${CEPH_ROOT}/lib/cython_modules/lib.3
PATH=${CEPH_BIN}:${PATH}
fi
_sudo echo test sudo
check_geom_gate
trap cleanup INT TERM EXIT
TEMPDIR=`mktemp -d`
DATA=${TEMPDIR}/data
dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
ceph osd pool create ${POOL} 32
rbd namespace create ${POOL}/${NS}
for ns in '' ${NS}; do
rbd --dest-pool ${POOL} --dest-namespace "${ns}" --no-progress import \
${DATA} ${IMAGE}
done
}
cleanup()
{
local ns s
set +e
rm -Rf ${TEMPDIR}
if [ -n "${DEV}" ]
then
_sudo rbd-ggate unmap ${DEV}
fi
ceph osd pool delete ${POOL} ${POOL} --yes-i-really-really-mean-it
}
expect_false()
{
if "$@"; then return 1; else return 0; fi
}
#
# main
#
setup
echo exit status test
expect_false rbd-ggate
expect_false rbd-ggate INVALIDCMD
if [ `id -u` -ne 0 ]
then
expect_false rbd-ggate map ${IMAGE}
fi
expect_false _sudo rbd-ggate map INVALIDIMAGE
echo map test using the first unused device
DEV=`_sudo rbd-ggate map ${POOL}/${IMAGE}`
rbd-ggate list | grep " ${DEV} *$"
echo map test specifying the device
expect_false _sudo rbd-ggate --device ${DEV} map ${POOL}/${IMAGE}
dev1=${DEV}
_sudo rbd-ggate unmap ${DEV}
rbd-ggate list | expect_false grep " ${DEV} *$"
DEV=
# XXX: race possible when the device is reused by other process
DEV=`_sudo rbd-ggate --device ${dev1} map ${POOL}/${IMAGE}`
[ "${DEV}" = "${dev1}" ]
rbd-ggate list | grep " ${DEV} *$"
echo list format test
expect_false _sudo rbd-ggate --format INVALID list
rbd-ggate --format json --pretty-format list
rbd-ggate --format xml list
echo read test
[ "`dd if=${DATA} bs=1M | md5`" = "`_sudo dd if=${DEV} bs=1M | md5`" ]
echo write test
dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
_sudo dd if=${DATA} of=${DEV} bs=1M
_sudo sync
[ "`dd if=${DATA} bs=1M | md5`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5`" ]
echo trim test
provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
used=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
[ "${used}" -eq "${provisioned}" ]
_sudo newfs -E ${DEV}
_sudo sync
provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
used=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
[ "${used}" -lt "${provisioned}" ]
echo resize test
devname=$(basename ${DEV})
size=$(geom gate list ${devname} | awk '$1 ~ /Mediasize:/ {print $2}')
test -n "${size}"
rbd resize ${POOL}/${IMAGE} --size $((SIZE * 2))M
rbd info ${POOL}/${IMAGE}
if [ -z "$RBD_GGATE_RESIZE_SUPPORTED" ]; then
# when resizing is not supported:
# resizing the underlying image for a GEOM ggate will stop the
# ggate process servicing the device. So we can resize and test
# the disappearance of the device
rbd-ggate list | expect_false grep " ${DEV} *$"
else
rbd-ggate list | grep " ${DEV} *$"
size2=$(geom gate list ${devname} | awk '$1 ~ /Mediasize:/ {print $2}')
test -n "${size2}"
test ${size2} -eq $((size * 2))
dd if=/dev/urandom of=${DATA} bs=1M count=$((SIZE * 2))
_sudo dd if=${DATA} of=${DEV} bs=1M
_sudo sync
[ "`dd if=${DATA} bs=1M | md5`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5`" ]
rbd resize ${POOL}/${IMAGE} --allow-shrink --size ${SIZE}M
rbd info ${POOL}/${IMAGE}
size2=$(geom gate list ${devname} | awk '$1 ~ /Mediasize:/ {print $2}')
test -n "${size2}"
test ${size2} -eq ${size}
truncate -s ${SIZE}M ${DATA}
[ "`dd if=${DATA} bs=1M | md5`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5`" ]
_sudo rbd-ggate unmap ${DEV}
fi
DEV=
echo read-only option test
DEV=`_sudo rbd-ggate map --read-only ${POOL}/${IMAGE}`
devname=$(basename ${DEV})
rbd-ggate list | grep " ${DEV} *$"
access=$(geom gate list ${devname} | awk '$1 == "access:" {print $2}')
test "${access}" = "read-only"
_sudo dd if=${DEV} of=/dev/null bs=1M
expect_false _sudo dd if=${DATA} of=${DEV} bs=1M
_sudo rbd-ggate unmap ${DEV}
echo exclusive option test
DEV=`_sudo rbd-ggate map --exclusive ${POOL}/${IMAGE}`
rbd-ggate list | grep " ${DEV} *$"
_sudo dd if=${DATA} of=${DEV} bs=1M
_sudo sync
expect_false timeout 10 \
rbd -p ${POOL} bench ${IMAGE} --io-type=write --io-size=1024 --io-total=1024
_sudo rbd-ggate unmap ${DEV}
DEV=
rbd bench -p ${POOL} ${IMAGE} --io-type=write --io-size=1024 --io-total=1024
echo unmap by image name test
DEV=`_sudo rbd-ggate map ${POOL}/${IMAGE}`
rbd-ggate list | grep " ${DEV} *$"
_sudo rbd-ggate unmap "${POOL}/${IMAGE}"
rbd-ggate list | expect_false grep " ${DEV} *$"
DEV=
echo map/unmap snap test
rbd snap create ${POOL}/${IMAGE}@snap
DEV=`_sudo rbd-ggate map ${POOL}/${IMAGE}@snap`
rbd-ggate list | grep " ${DEV} *$"
_sudo rbd-ggate unmap "${POOL}/${IMAGE}@snap"
rbd-ggate list | expect_false grep " ${DEV} *$"
DEV=
echo map/unmap namespace test
rbd snap create ${POOL}/${NS}/${IMAGE}@snap
DEV=`_sudo rbd-ggate map ${POOL}/${NS}/${IMAGE}@snap`
rbd-ggate list | grep " ${DEV} *$"
_sudo rbd-ggate unmap "${POOL}/${NS}/${IMAGE}@snap"
rbd-ggate list | expect_false grep "${DEV} $"
DEV=
echo OK
| 6,519 | 26.166667 | 96 | sh |
null | ceph-main/qa/workunits/rbd/rbd-nbd.sh | #!/usr/bin/env bash
set -ex
. $(dirname $0)/../../standalone/ceph-helpers.sh
POOL=rbd
ANOTHER_POOL=new_default_pool$$
NS=ns
IMAGE=testrbdnbd$$
SIZE=64
DATA=
DEV=
_sudo()
{
local cmd
if [ `id -u` -eq 0 ]
then
"$@"
return $?
fi
# Look for the command in the user path. If it fails run it as is,
# supposing it is in sudo path.
cmd=`which $1 2>/dev/null` || cmd=$1
shift
sudo -nE "${cmd}" "$@"
}
setup()
{
local ns x
if [ -e CMakeCache.txt ]; then
# running under cmake build dir
CEPH_SRC=$(readlink -f $(dirname $0)/../../../src)
CEPH_ROOT=${PWD}
CEPH_BIN=${CEPH_ROOT}/bin
export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind:${CEPH_ROOT}/lib/cython_modules/lib.3
PATH=${CEPH_BIN}:${PATH}
fi
_sudo echo test sudo
trap cleanup INT TERM EXIT
TEMPDIR=`mktemp -d`
DATA=${TEMPDIR}/data
dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
rbd namespace create ${POOL}/${NS}
for ns in '' ${NS}; do
rbd --dest-pool ${POOL} --dest-namespace "${ns}" --no-progress import \
${DATA} ${IMAGE}
done
# create another pool
ceph osd pool create ${ANOTHER_POOL} 8
rbd pool init ${ANOTHER_POOL}
}
function cleanup()
{
local ns s
set +e
mount | fgrep ${TEMPDIR}/mnt && _sudo umount -f ${TEMPDIR}/mnt
rm -Rf ${TEMPDIR}
if [ -n "${DEV}" ]
then
_sudo rbd device --device-type nbd unmap ${DEV}
fi
for ns in '' ${NS}; do
if rbd -p ${POOL} --namespace "${ns}" status ${IMAGE} 2>/dev/null; then
for s in 0.5 1 2 4 8 16 32; do
sleep $s
rbd -p ${POOL} --namespace "${ns}" status ${IMAGE} |
grep 'Watchers: none' && break
done
rbd -p ${POOL} --namespace "${ns}" snap purge ${IMAGE}
rbd -p ${POOL} --namespace "${ns}" remove ${IMAGE}
fi
done
rbd namespace remove ${POOL}/${NS}
# cleanup/reset default pool
rbd config global rm global rbd_default_pool
ceph osd pool delete ${ANOTHER_POOL} ${ANOTHER_POOL} --yes-i-really-really-mean-it
}
function expect_false()
{
if "$@"; then return 1; else return 0; fi
}
function get_pid()
{
local pool=$1
local ns=$2
PID=$(rbd device --device-type nbd --format xml list | $XMLSTARLET sel -t -v \
"//devices/device[pool='${pool}'][namespace='${ns}'][image='${IMAGE}'][device='${DEV}']/id")
test -n "${PID}" || return 1
ps -p ${PID} -C rbd-nbd
}
unmap_device()
{
local args=$1
local pid=$2
_sudo rbd device --device-type nbd unmap ${args}
rbd device --device-type nbd list | expect_false grep "^${pid}\\b" || return 1
ps -C rbd-nbd | expect_false grep "^ *${pid}\\b" || return 1
# workaround possible race between unmap and following map
sleep 0.5
}
#
# main
#
setup
# exit status test
expect_false rbd-nbd
expect_false rbd-nbd INVALIDCMD
if [ `id -u` -ne 0 ]
then
expect_false rbd device --device-type nbd map ${IMAGE}
fi
expect_false _sudo rbd device --device-type nbd map INVALIDIMAGE
expect_false _sudo rbd-nbd --device INVALIDDEV map ${IMAGE}
# list format test
expect_false rbd device --device-type nbd --format INVALID list
rbd device --device-type nbd --format json --pretty-format list
rbd device --device-type nbd --format xml list
# map test using the first unused device
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
# map test specifying the device
expect_false _sudo rbd-nbd --device ${DEV} map ${POOL}/${IMAGE}
dev1=${DEV}
unmap_device ${DEV} ${PID}
DEV=
# XXX: race possible when the device is reused by other process
DEV=`_sudo rbd-nbd --device ${dev1} map ${POOL}/${IMAGE}`
[ "${DEV}" = "${dev1}" ]
rbd device --device-type nbd list | grep "${IMAGE}"
get_pid ${POOL}
# read test
[ "`dd if=${DATA} bs=1M | md5sum`" = "`_sudo dd if=${DEV} bs=1M | md5sum`" ]
# write test
dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
_sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
[ "`dd if=${DATA} bs=1M | md5sum`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5sum`" ]
unmap_device ${DEV} ${PID}
# notrim test
DEV=`_sudo rbd device --device-type nbd --options notrim map ${POOL}/${IMAGE}`
get_pid ${POOL}
provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
used=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
[ "${used}" -eq "${provisioned}" ]
# should fail discard as at time of mapping notrim was used
expect_false _sudo blkdiscard ${DEV}
sync
provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
used=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
[ "${used}" -eq "${provisioned}" ]
unmap_device ${DEV} ${PID}
# trim test
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
used=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
[ "${used}" -eq "${provisioned}" ]
# should honor discard as at time of mapping trim was considered by default
_sudo blkdiscard ${DEV}
sync
provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
used=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
[ "${used}" -lt "${provisioned}" ]
# resize test
devname=$(basename ${DEV})
blocks=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
test -n "${blocks}"
rbd resize ${POOL}/${IMAGE} --size $((SIZE * 2))M
rbd info ${POOL}/${IMAGE}
blocks2=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
test -n "${blocks2}"
test ${blocks2} -eq $((blocks * 2))
rbd resize ${POOL}/${IMAGE} --allow-shrink --size ${SIZE}M
blocks2=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
test -n "${blocks2}"
test ${blocks2} -eq ${blocks}
# read-only option test
unmap_device ${DEV} ${PID}
DEV=`_sudo rbd --device-type nbd map --read-only ${POOL}/${IMAGE}`
PID=$(rbd device --device-type nbd list | awk -v pool=${POOL} -v img=${IMAGE} -v dev=${DEV} \
'$2 == pool && $3 == img && $5 == dev {print $1}')
test -n "${PID}"
ps -p ${PID} -C rbd-nbd
_sudo dd if=${DEV} of=/dev/null bs=1M
expect_false _sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
unmap_device ${DEV} ${PID}
# exclusive option test
DEV=`_sudo rbd --device-type nbd map --exclusive ${POOL}/${IMAGE}`
get_pid ${POOL}
_sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
expect_false timeout 10 \
rbd bench ${IMAGE} --io-type write --io-size=1024 --io-total=1024
unmap_device ${DEV} ${PID}
DEV=
rbd bench ${IMAGE} --io-type write --io-size=1024 --io-total=1024
# unmap by image name test
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
unmap_device ${IMAGE} ${PID}
DEV=
# map/unmap snap test
rbd snap create ${POOL}/${IMAGE}@snap
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}@snap`
get_pid ${POOL}
unmap_device "${IMAGE}@snap" ${PID}
DEV=
# map/unmap snap test with --snap-id
SNAPID=`rbd snap ls ${POOL}/${IMAGE} | awk '$2 == "snap" {print $1}'`
DEV=`_sudo rbd device --device-type nbd map --snap-id ${SNAPID} ${POOL}/${IMAGE}`
get_pid ${POOL}
unmap_device "--snap-id ${SNAPID} ${IMAGE}" ${PID}
DEV=
# map/unmap namespace test
rbd snap create ${POOL}/${NS}/${IMAGE}@snap
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${NS}/${IMAGE}@snap`
get_pid ${POOL} ${NS}
unmap_device "${POOL}/${NS}/${IMAGE}@snap" ${PID}
DEV=
# map/unmap namespace test with --snap-id
SNAPID=`rbd snap ls ${POOL}/${NS}/${IMAGE} | awk '$2 == "snap" {print $1}'`
DEV=`_sudo rbd device --device-type nbd map --snap-id ${SNAPID} ${POOL}/${NS}/${IMAGE}`
get_pid ${POOL} ${NS}
unmap_device "--snap-id ${SNAPID} ${POOL}/${NS}/${IMAGE}" ${PID}
DEV=
# map/unmap namespace using options test
DEV=`_sudo rbd device --device-type nbd map --pool ${POOL} --namespace ${NS} --image ${IMAGE}`
get_pid ${POOL} ${NS}
unmap_device "--pool ${POOL} --namespace ${NS} --image ${IMAGE}" ${PID}
DEV=`_sudo rbd device --device-type nbd map --pool ${POOL} --namespace ${NS} --image ${IMAGE} --snap snap`
get_pid ${POOL} ${NS}
unmap_device "--pool ${POOL} --namespace ${NS} --image ${IMAGE} --snap snap" ${PID}
DEV=
# unmap by image name test 2
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
pid=$PID
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${NS}/${IMAGE}`
get_pid ${POOL} ${NS}
unmap_device ${POOL}/${NS}/${IMAGE} ${PID}
DEV=
unmap_device ${POOL}/${IMAGE} ${pid}
# map/unmap test with just image name and expect image to come from default pool
if [ "${POOL}" = "rbd" ];then
DEV=`_sudo rbd device --device-type nbd map ${IMAGE}`
get_pid ${POOL}
unmap_device ${IMAGE} ${PID}
DEV=
fi
# map/unmap test with just image name after changing default pool
rbd config global set global rbd_default_pool ${ANOTHER_POOL}
rbd create --size 10M ${IMAGE}
DEV=`_sudo rbd device --device-type nbd map ${IMAGE}`
get_pid ${ANOTHER_POOL}
unmap_device ${IMAGE} ${PID}
DEV=
# reset
rbd config global rm global rbd_default_pool
# auto unmap test
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
_sudo kill ${PID}
for i in `seq 10`; do
rbd device --device-type nbd list | expect_false grep "^${PID} *${POOL} *${IMAGE}" && break
sleep 1
done
rbd device --device-type nbd list | expect_false grep "^${PID} *${POOL} *${IMAGE}"
# quiesce test
QUIESCE_HOOK=${TEMPDIR}/quiesce.sh
DEV=`_sudo rbd device --device-type nbd map --quiesce --quiesce-hook ${QUIESCE_HOOK} ${POOL}/${IMAGE}`
get_pid ${POOL}
# test it fails if the hook does not exists
test ! -e ${QUIESCE_HOOK}
expect_false rbd snap create ${POOL}/${IMAGE}@quiesce1
_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
# test the hook is executed
touch ${QUIESCE_HOOK}
chmod +x ${QUIESCE_HOOK}
cat > ${QUIESCE_HOOK} <<EOF
#/bin/sh
echo "test the hook is executed" >&2
echo \$1 > ${TEMPDIR}/\$2
EOF
rbd snap create ${POOL}/${IMAGE}@quiesce1
_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
test "$(cat ${TEMPDIR}/quiesce)" = ${DEV}
test "$(cat ${TEMPDIR}/unquiesce)" = ${DEV}
# test snap create fails if the hook fails
touch ${QUIESCE_HOOK}
chmod +x ${QUIESCE_HOOK}
cat > ${QUIESCE_HOOK} <<EOF
#/bin/sh
echo "test snap create fails if the hook fails" >&2
exit 22
EOF
expect_false rbd snap create ${POOL}/${IMAGE}@quiesce2
_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
# test the hook is slow
cat > ${QUIESCE_HOOK} <<EOF
#/bin/sh
echo "test the hook is slow" >&2
sleep 7
EOF
rbd snap create ${POOL}/${IMAGE}@quiesce2
_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
# test rbd-nbd_quiesce hook that comes with distribution
unmap_device ${DEV} ${PID}
LOG_FILE=${TEMPDIR}/rbd-nbd.log
if [ -n "${CEPH_SRC}" ]; then
QUIESCE_HOOK=${CEPH_SRC}/tools/rbd_nbd/rbd-nbd_quiesce
DEV=`_sudo rbd device --device-type nbd map --quiesce --quiesce-hook ${QUIESCE_HOOK} \
${POOL}/${IMAGE} --log-file=${LOG_FILE}`
else
DEV=`_sudo rbd device --device-type nbd map --quiesce ${POOL}/${IMAGE} --log-file=${LOG_FILE}`
fi
get_pid ${POOL}
_sudo mkfs ${DEV}
mkdir ${TEMPDIR}/mnt
_sudo mount ${DEV} ${TEMPDIR}/mnt
rbd snap create ${POOL}/${IMAGE}@quiesce3
_sudo dd if=${DATA} of=${TEMPDIR}/mnt/test bs=1M count=1 oflag=direct
_sudo umount ${TEMPDIR}/mnt
unmap_device ${DEV} ${PID}
DEV=
cat ${LOG_FILE}
expect_false grep 'quiesce failed' ${LOG_FILE}
# test detach/attach
OUT=`_sudo rbd device --device-type nbd --options try-netlink,show-cookie map ${POOL}/${IMAGE}`
read DEV COOKIE <<< "${OUT}"
get_pid ${POOL}
_sudo mount ${DEV} ${TEMPDIR}/mnt
_sudo rbd device detach ${POOL}/${IMAGE} --device-type nbd
expect_false get_pid ${POOL}
expect_false _sudo rbd device attach --device ${DEV} ${POOL}/${IMAGE} --device-type nbd
if [ -n "${COOKIE}" ]; then
_sudo rbd device attach --device ${DEV} --cookie ${COOKIE} ${POOL}/${IMAGE} --device-type nbd
else
_sudo rbd device attach --device ${DEV} ${POOL}/${IMAGE} --device-type nbd --force
fi
get_pid ${POOL}
_sudo rbd device detach ${DEV} --device-type nbd
expect_false get_pid ${POOL}
if [ -n "${COOKIE}" ]; then
_sudo rbd device attach --device ${DEV} --cookie ${COOKIE} ${POOL}/${IMAGE} --device-type nbd
else
_sudo rbd device attach --device ${DEV} ${POOL}/${IMAGE} --device-type nbd --force
fi
get_pid ${POOL}
ls ${TEMPDIR}/mnt/
dd if=${TEMPDIR}/mnt/test of=/dev/null bs=1M count=1
_sudo dd if=${DATA} of=${TEMPDIR}/mnt/test1 bs=1M count=1 oflag=direct
_sudo umount ${TEMPDIR}/mnt
unmap_device ${DEV} ${PID}
# if kernel supports cookies
if [ -n "${COOKIE}" ]; then
OUT=`_sudo rbd device --device-type nbd --show-cookie --cookie "abc de" --options try-netlink map ${POOL}/${IMAGE}`
read DEV ANOTHER_COOKIE <<< "${OUT}"
get_pid ${POOL}
test "${ANOTHER_COOKIE}" = "abc de"
unmap_device ${DEV} ${PID}
fi
DEV=
# test detach/attach with --snap-id
SNAPID=`rbd snap ls ${POOL}/${IMAGE} | awk '$2 == "snap" {print $1}'`
OUT=`_sudo rbd device --device-type nbd --options try-netlink,show-cookie map --snap-id ${SNAPID} ${POOL}/${IMAGE}`
read DEV COOKIE <<< "${OUT}"
get_pid ${POOL}
_sudo rbd device detach ${POOL}/${IMAGE} --snap-id ${SNAPID} --device-type nbd
expect_false get_pid ${POOL}
expect_false _sudo rbd device attach --device ${DEV} --snap-id ${SNAPID} ${POOL}/${IMAGE} --device-type nbd
if [ -n "${COOKIE}" ]; then
_sudo rbd device attach --device ${DEV} --cookie ${COOKIE} --snap-id ${SNAPID} ${POOL}/${IMAGE} --device-type nbd
else
_sudo rbd device attach --device ${DEV} --snap-id ${SNAPID} ${POOL}/${IMAGE} --device-type nbd --force
fi
get_pid ${POOL}
_sudo rbd device detach ${DEV} --device-type nbd
expect_false get_pid ${POOL}
DEV=
# test discard granularity with journaling
rbd config image set ${POOL}/${IMAGE} rbd_discard_granularity_bytes 4096
rbd feature enable ${POOL}/${IMAGE} journaling
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
# since a discard will now be pruned to only whole blocks (0..4095, 4096..8191)
# let us test all the cases around those alignments. 512 is the smallest
# possible block blkdiscard allows us to use. Thus the test checks
# 512 before, on the alignment, 512 after.
_sudo blkdiscard --offset 0 --length $((4096-512)) ${DEV}
_sudo blkdiscard --offset 0 --length 4096 ${DEV}
_sudo blkdiscard --offset 0 --length $((4096+512)) ${DEV}
_sudo blkdiscard --offset 512 --length $((8192-1024)) ${DEV}
_sudo blkdiscard --offset 512 --length $((8192-512)) ${DEV}
_sudo blkdiscard --offset 512 --length 8192 ${DEV}
# wait for commit log to be empty, 10 seconds should be well enough
tries=0
queue_length=`rbd journal inspect --pool ${POOL} --image ${IMAGE} | awk '/entries inspected/ {print $1}'`
while [ ${tries} -lt 10 ] && [ ${queue_length} -gt 0 ]; do
rbd journal inspect --pool ${POOL} --image ${IMAGE} --verbose
sleep 1
queue_length=`rbd journal inspect --pool ${POOL} --image ${IMAGE} | awk '/entries inspected/ {print $1}'`
tries=$((tries+1))
done
[ ${queue_length} -eq 0 ]
unmap_device ${DEV} ${PID}
DEV=
rbd feature disable ${POOL}/${IMAGE} journaling
rbd config image rm ${POOL}/${IMAGE} rbd_discard_granularity_bytes
# test that rbd_op_threads setting takes effect
EXPECTED=`ceph-conf --show-config-value librados_thread_count`
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
ACTUAL=`ps -p ${PID} -T | grep -c io_context_pool`
[ ${ACTUAL} -eq ${EXPECTED} ]
unmap_device ${DEV} ${PID}
EXPECTED=$((EXPECTED * 3 + 1))
DEV=`_sudo rbd device --device-type nbd --rbd-op-threads ${EXPECTED} map ${POOL}/${IMAGE}`
get_pid ${POOL}
ACTUAL=`ps -p ${PID} -T | grep -c io_context_pool`
[ ${ACTUAL} -eq ${EXPECTED} ]
unmap_device ${DEV} ${PID}
DEV=
echo OK
| 16,034 | 31.657841 | 119 | sh |
null | ceph-main/qa/workunits/rbd/rbd_groups.sh | #!/usr/bin/env bash
set -ex
#
# rbd_consistency_groups.sh - test consistency groups cli commands
#
#
# Functions
#
create_group()
{
local group_name=$1
rbd group create $group_name
}
list_groups()
{
rbd group list
}
check_group_exists()
{
local group_name=$1
list_groups | grep $group_name
}
remove_group()
{
local group_name=$1
rbd group remove $group_name
}
rename_group()
{
local src_name=$1
local dest_name=$2
rbd group rename $src_name $dest_name
}
check_group_does_not_exist()
{
local group_name=$1
for v in $(list_groups); do
if [ "$v" == "$group_name" ]; then
return 1
fi
done
return 0
}
create_image()
{
local image_name=$1
rbd create --size 10M $image_name
}
remove_image()
{
local image_name=$1
rbd remove $image_name
}
add_image_to_group()
{
local image_name=$1
local group_name=$2
rbd group image add $group_name $image_name
}
remove_image_from_group()
{
local image_name=$1
local group_name=$2
rbd group image remove $group_name $image_name
}
check_image_in_group()
{
local image_name=$1
local group_name=$2
for v in $(rbd group image list $group_name); do
local vtrimmed=${v#*/}
if [ "$vtrimmed" = "$image_name" ]; then
return 0
fi
done
return 1
}
check_image_not_in_group()
{
local image_name=$1
local group_name=$2
for v in $(rbd group image list $group_name); do
local vtrimmed=${v#*/}
if [ "$vtrimmed" = "$image_name" ]; then
return 1
fi
done
return 0
}
create_snapshot()
{
local group_name=$1
local snap_name=$2
rbd group snap create $group_name@$snap_name
}
create_snapshots()
{
local group_name=$1
local snap_name=$2
local snap_count=$3
for i in `seq 1 $snap_count`; do
rbd group snap create $group_name@$snap_name$i
done
}
remove_snapshot()
{
local group_name=$1
local snap_name=$2
rbd group snap remove $group_name@$snap_name
}
remove_snapshots()
{
local group_name=$1
local snap_name=$2
local snap_count=$3
for i in `seq 1 $snap_count`; do
rbd group snap remove $group_name@$snap_name$i
done
}
rename_snapshot()
{
local group_name=$1
local snap_name=$2
local new_snap_name=$3
rbd group snap rename $group_name@$snap_name $new_snap_name
}
list_snapshots()
{
local group_name=$1
rbd group snap list $group_name
}
rollback_snapshot()
{
local group_name=$1
local snap_name=$2
rbd group snap rollback $group_name@$snap_name
}
check_snapshot_in_group()
{
local group_name=$1
local snap_name=$2
list_snapshots $group_name | grep $snap_name
}
check_snapshots_count_in_group()
{
local group_name=$1
local snap_name=$2
local expected_count=$3
local actual_count
actual_count=$(list_snapshots $group_name | grep -c $snap_name)
(( actual_count == expected_count ))
}
check_snapshot_not_in_group()
{
local group_name=$1
local snap_name=$2
for v in $(list_snapshots $group_name | awk '{print $1}'); do
if [ "$v" = "$snap_name" ]; then
return 1
fi
done
return 0
}
echo "TEST: create remove consistency group"
group="test_consistency_group"
new_group="test_new_consistency_group"
create_group $group
check_group_exists $group
rename_group $group $new_group
check_group_exists $new_group
remove_group $new_group
check_group_does_not_exist $new_group
echo "PASSED"
echo "TEST: add remove images to consistency group"
image="test_image"
group="test_consistency_group"
create_image $image
create_group $group
add_image_to_group $image $group
check_image_in_group $image $group
remove_image_from_group $image $group
check_image_not_in_group $image $group
remove_group $group
remove_image $image
echo "PASSED"
echo "TEST: create remove snapshots of consistency group"
image="test_image"
group="test_consistency_group"
snap="group_snap"
new_snap="new_group_snap"
sec_snap="group_snap2"
create_image $image
create_group $group
add_image_to_group $image $group
create_snapshot $group $snap
check_snapshot_in_group $group $snap
rename_snapshot $group $snap $new_snap
check_snapshot_not_in_group $group $snap
create_snapshot $group $sec_snap
check_snapshot_in_group $group $sec_snap
rollback_snapshot $group $new_snap
remove_snapshot $group $new_snap
check_snapshot_not_in_group $group $new_snap
remove_snapshot $group $sec_snap
check_snapshot_not_in_group $group $sec_snap
remove_group $group
remove_image $image
echo "PASSED"
echo "TEST: list snapshots of consistency group"
image="test_image"
group="test_consistency_group"
snap="group_snap"
create_image $image
create_group $group
add_image_to_group $image $group
create_snapshots $group $snap 10
check_snapshots_count_in_group $group $snap 10
remove_snapshots $group $snap 10
create_snapshots $group $snap 100
check_snapshots_count_in_group $group $snap 100
remove_snapshots $group $snap 100
remove_group $group
remove_image $image
echo "PASSED"
echo "OK"
| 5,078 | 18.610039 | 67 | sh |
null | ceph-main/qa/workunits/rbd/rbd_mirror_bootstrap.sh | #!/bin/sh -ex
#
# rbd_mirror_bootstrap.sh - test peer bootstrap create/import
#
RBD_MIRROR_MANUAL_PEERS=1
RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-1}
. $(dirname $0)/rbd_mirror_helpers.sh
setup
testlog "TEST: bootstrap cluster2 from cluster1"
# create token on cluster1 and import to cluster2
TOKEN=${TEMPDIR}/peer-token
TOKEN_2=${TEMPDIR}/peer-token-2
CEPH_ARGS='' rbd --cluster ${CLUSTER1} mirror pool peer bootstrap create ${POOL} > ${TOKEN}
CEPH_ARGS='' rbd --cluster ${CLUSTER1} mirror pool peer bootstrap create ${PARENT_POOL} > ${TOKEN_2}
cmp ${TOKEN} ${TOKEN_2}
CEPH_ARGS='' rbd --cluster ${CLUSTER2} --pool ${POOL} mirror pool peer bootstrap import ${TOKEN} --direction rx-only
CEPH_ARGS='' rbd --cluster ${CLUSTER2} --pool ${PARENT_POOL} mirror pool peer bootstrap import ${TOKEN} --direction rx-tx
start_mirrors ${CLUSTER1}
start_mirrors ${CLUSTER2}
testlog "TEST: verify rx-only direction"
# rx-only peer is added immediately by "rbd mirror pool peer bootstrap import"
rbd --cluster ${CLUSTER2} --pool ${POOL} mirror pool info --format json | jq -e '.peers[0].direction == "rx-only"'
# tx-only peer is added asynchronously by mirror_peer_ping class method
while ! rbd --cluster ${CLUSTER1} --pool ${POOL} mirror pool info --format json | jq -e '.peers | length > 0'; do
sleep 1
done
rbd --cluster ${CLUSTER1} --pool ${POOL} mirror pool info --format json | jq -e '.peers[0].direction == "tx-only"'
create_image_and_enable_mirror ${CLUSTER1} ${POOL} image1
wait_for_image_replay_started ${CLUSTER2} ${POOL} image1
write_image ${CLUSTER1} ${POOL} image1 100
wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} image1
testlog "TEST: verify rx-tx direction"
# both rx-tx peers are added immediately by "rbd mirror pool peer bootstrap import"
rbd --cluster ${CLUSTER1} --pool ${PARENT_POOL} mirror pool info --format json | jq -e '.peers[0].direction == "rx-tx"'
rbd --cluster ${CLUSTER2} --pool ${PARENT_POOL} mirror pool info --format json | jq -e '.peers[0].direction == "rx-tx"'
create_image ${CLUSTER1} ${PARENT_POOL} image1
create_image ${CLUSTER2} ${PARENT_POOL} image2
enable_mirror ${CLUSTER1} ${PARENT_POOL} image1
enable_mirror ${CLUSTER2} ${PARENT_POOL} image2
wait_for_image_replay_started ${CLUSTER2} ${PARENT_POOL} image1
write_image ${CLUSTER1} ${PARENT_POOL} image1 100
wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${PARENT_POOL} image1
wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} image2
write_image ${CLUSTER2} ${PARENT_POOL} image2 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} image2
| 2,576 | 42.677966 | 121 | sh |
null | ceph-main/qa/workunits/rbd/rbd_mirror_fsx_compare.sh | #!/bin/sh -ex
#
# rbd_mirror_fsx_compare.sh - test rbd-mirror daemon under FSX workload
#
# The script is used to compare FSX-generated images between two clusters.
#
. $(dirname $0)/rbd_mirror_helpers.sh
trap 'cleanup $?' INT TERM EXIT
setup_tempdir
testlog "TEST: wait for all images"
image_count=$(rbd --cluster ${CLUSTER1} --pool ${POOL} ls | wc -l)
retrying_seconds=0
sleep_seconds=10
while [ ${retrying_seconds} -le 7200 ]; do
[ $(rbd --cluster ${CLUSTER2} --pool ${POOL} ls | wc -l) -ge ${image_count} ] && break
sleep ${sleep_seconds}
retrying_seconds=$(($retrying_seconds+${sleep_seconds}))
done
testlog "TEST: snapshot all pool images"
snap_id=`uuidgen`
for image in $(rbd --cluster ${CLUSTER1} --pool ${POOL} ls); do
create_snapshot ${CLUSTER1} ${POOL} ${image} ${snap_id}
done
testlog "TEST: wait for snapshots"
for image in $(rbd --cluster ${CLUSTER1} --pool ${POOL} ls); do
wait_for_snap_present ${CLUSTER2} ${POOL} ${image} ${snap_id}
done
testlog "TEST: compare image snapshots"
for image in $(rbd --cluster ${CLUSTER1} --pool ${POOL} ls); do
compare_image_snapshots ${POOL} ${image}
done
| 1,136 | 28.153846 | 90 | sh |
null | ceph-main/qa/workunits/rbd/rbd_mirror_fsx_prepare.sh | #!/bin/sh -ex
#
# rbd_mirror_fsx_prepare.sh - test rbd-mirror daemon under FSX workload
#
# The script is used to compare FSX-generated images between two clusters.
#
. $(dirname $0)/rbd_mirror_helpers.sh
setup
| 213 | 18.454545 | 74 | sh |
null | ceph-main/qa/workunits/rbd/rbd_mirror_ha.sh | #!/bin/sh -ex
#
# rbd_mirror_ha.sh - test rbd-mirror daemons in HA mode
#
RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-7}
. $(dirname $0)/rbd_mirror_helpers.sh
setup
is_leader()
{
local instance=$1
local pool=$2
test -n "${pool}" || pool=${POOL}
admin_daemon "${CLUSTER1}:${instance}" \
rbd mirror status ${pool} ${CLUSTER2}${PEER_CLUSTER_SUFFIX} |
grep '"leader": true'
}
wait_for_leader()
{
local s instance
for s in 1 1 2 4 4 4 4 4 8 8 8 8 16 16 32 64; do
sleep $s
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
is_leader ${instance} || continue
LEADER=${instance}
return 0
done
done
LEADER=
return 1
}
release_leader()
{
local pool=$1
local cmd="rbd mirror leader release"
test -n "${pool}" && cmd="${cmd} ${pool} ${CLUSTER2}"
admin_daemon "${CLUSTER1}:${LEADER}" ${cmd}
}
wait_for_leader_released()
{
local i
test -n "${LEADER}"
for i in `seq 10`; do
is_leader ${LEADER} || return 0
sleep 1
done
return 1
}
test_replay()
{
local image
for image; do
wait_for_image_replay_started ${CLUSTER1}:${LEADER} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1}:${LEADER} ${CLUSTER2} ${POOL} \
${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' \
'primary_position' \
"${MIRROR_USER_ID_PREFIX}${LEADER} on $(hostname -s)"
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} \
'down+unknown'
fi
compare_images ${POOL} ${image}
done
}
testlog "TEST: start first daemon instance and test replay"
start_mirror ${CLUSTER1}:0
image1=test1
create_image ${CLUSTER2} ${POOL} ${image1}
LEADER=0
test_replay ${image1}
testlog "TEST: release leader and wait it is reacquired"
is_leader 0 ${POOL}
is_leader 0 ${PARENT_POOL}
release_leader ${POOL}
wait_for_leader_released
is_leader 0 ${PARENT_POOL}
wait_for_leader
release_leader
wait_for_leader_released
expect_failure "" is_leader 0 ${PARENT_POOL}
wait_for_leader
testlog "TEST: start second daemon instance and test replay"
start_mirror ${CLUSTER1}:1
image2=test2
create_image ${CLUSTER2} ${POOL} ${image2}
test_replay ${image1} ${image2}
testlog "TEST: release leader and test it is acquired by secondary"
is_leader 0 ${POOL}
is_leader 0 ${PARENT_POOL}
release_leader ${POOL}
wait_for_leader_released
wait_for_leader
test_replay ${image1} ${image2}
release_leader
wait_for_leader_released
wait_for_leader
test "${LEADER}" = 0
testlog "TEST: stop first daemon instance and test replay"
stop_mirror ${CLUSTER1}:0
image3=test3
create_image ${CLUSTER2} ${POOL} ${image3}
LEADER=1
test_replay ${image1} ${image2} ${image3}
testlog "TEST: start first daemon instance and test replay"
start_mirror ${CLUSTER1}:0
image4=test4
create_image ${CLUSTER2} ${POOL} ${image4}
test_replay ${image3} ${image4}
testlog "TEST: crash leader and test replay"
stop_mirror ${CLUSTER1}:1 -KILL
image5=test5
create_image ${CLUSTER2} ${POOL} ${image5}
LEADER=0
test_replay ${image1} ${image4} ${image5}
testlog "TEST: start crashed leader and test replay"
start_mirror ${CLUSTER1}:1
image6=test6
create_image ${CLUSTER2} ${POOL} ${image6}
test_replay ${image1} ${image6}
testlog "TEST: start yet another daemon instance and test replay"
start_mirror ${CLUSTER1}:2
image7=test7
create_image ${CLUSTER2} ${POOL} ${image7}
test_replay ${image1} ${image7}
testlog "TEST: release leader and test it is acquired by secondary"
is_leader 0
release_leader
wait_for_leader_released
wait_for_leader
test_replay ${image1} ${image2}
testlog "TEST: stop leader and test replay"
stop_mirror ${CLUSTER1}:${LEADER}
image8=test8
create_image ${CLUSTER2} ${POOL} ${image8}
prev_leader=${LEADER}
wait_for_leader
test_replay ${image1} ${image8}
testlog "TEST: start previous leader and test replay"
start_mirror ${CLUSTER1}:${prev_leader}
image9=test9
create_image ${CLUSTER2} ${POOL} ${image9}
test_replay ${image1} ${image9}
testlog "TEST: crash leader and test replay"
stop_mirror ${CLUSTER1}:${LEADER} -KILL
image10=test10
create_image ${CLUSTER2} ${POOL} ${image10}
prev_leader=${LEADER}
wait_for_leader
test_replay ${image1} ${image10}
testlog "TEST: start previous leader and test replay"
start_mirror ${CLUSTER1}:${prev_leader}
image11=test11
create_image ${CLUSTER2} ${POOL} ${image11}
test_replay ${image1} ${image11}
testlog "TEST: start some more daemon instances and test replay"
start_mirror ${CLUSTER1}:3
start_mirror ${CLUSTER1}:4
start_mirror ${CLUSTER1}:5
start_mirror ${CLUSTER1}:6
image13=test13
create_image ${CLUSTER2} ${POOL} ${image13}
test_replay ${leader} ${image1} ${image13}
testlog "TEST: release leader and test it is acquired by secondary"
release_leader
wait_for_leader_released
wait_for_leader
test_replay ${image1} ${image2}
testlog "TEST: in loop: stop leader and test replay"
for i in 0 1 2 3 4 5; do
stop_mirror ${CLUSTER1}:${LEADER}
wait_for_leader
test_replay ${image1}
done
stop_mirror ${CLUSTER1}:${LEADER}
| 5,121 | 23.274882 | 89 | sh |
null | ceph-main/qa/workunits/rbd/rbd_mirror_helpers.sh | #!/bin/sh
#
# rbd_mirror_helpers.sh - shared rbd-mirror daemon helper functions
#
# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
# creates a temporary directory, used for cluster configs, daemon logs, admin
# socket, temporary files, and launches rbd-mirror daemon.
#
# There are several env variables useful when troubleshooting a test failure:
#
# RBD_MIRROR_NOCLEANUP - if not empty, don't run the cleanup (stop processes,
# destroy the clusters and remove the temp directory)
# on exit, so it is possible to check the test state
# after failure.
# RBD_MIRROR_TEMDIR - use this path when creating the temporary directory
# (should not exist) instead of running mktemp(1).
# RBD_MIRROR_ARGS - use this to pass additional arguments to started
# rbd-mirror daemons.
# RBD_MIRROR_VARGS - use this to pass additional arguments to vstart.sh
# when starting clusters.
# RBD_MIRROR_INSTANCES - number of daemons to start per cluster
# RBD_MIRROR_CONFIG_KEY - if not empty, use config-key for remote cluster
# secrets
# The cleanup can be done as a separate step, running the script with
# `cleanup ${RBD_MIRROR_TEMDIR}' arguments.
#
# Note, as other workunits tests, rbd_mirror_journal.sh expects to find ceph binaries
# in PATH.
#
# Thus a typical troubleshooting session:
#
# From Ceph src dir (CEPH_SRC_PATH), start the test in NOCLEANUP mode and with
# TEMPDIR pointing to a known location:
#
# cd $CEPH_SRC_PATH
# PATH=$CEPH_SRC_PATH:$PATH
# RBD_MIRROR_NOCLEANUP=1 RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror \
# ../qa/workunits/rbd/rbd_mirror_journal.sh
#
# After the test failure cd to TEMPDIR and check the current state:
#
# cd /tmp/tmp.rbd_mirror
# ls
# less rbd-mirror.cluster1_daemon.$pid.log
# ceph --cluster cluster1 -s
# ceph --cluster cluster1 -s
# rbd --cluster cluster2 -p mirror ls
# rbd --cluster cluster2 -p mirror journal status --image test
# ceph --admin-daemon rbd-mirror.cluster1_daemon.cluster1.$pid.asok help
# ...
#
# Also you can execute commands (functions) from the script:
#
# cd $CEPH_SRC_PATH
# export RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror
# ../qa/workunits/rbd/rbd_mirror_journal.sh status
# ../qa/workunits/rbd/rbd_mirror_journal.sh stop_mirror cluster1
# ../qa/workunits/rbd/rbd_mirror_journal.sh start_mirror cluster2
# ../qa/workunits/rbd/rbd_mirror_journal.sh flush cluster2
# ...
#
# Eventually, run the cleanup:
#
# cd $CEPH_SRC_PATH
# RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror \
# ../qa/workunits/rbd/rbd_mirror_journal.sh cleanup
#
if type xmlstarlet > /dev/null 2>&1; then
XMLSTARLET=xmlstarlet
elif type xml > /dev/null 2>&1; then
XMLSTARLET=xml
else
echo "Missing xmlstarlet binary!"
exit 1
fi
RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-2}
CLUSTER1=cluster1
CLUSTER2=cluster2
PEER_CLUSTER_SUFFIX=
POOL=mirror
PARENT_POOL=mirror_parent
NS1=ns1
NS2=ns2
TEMPDIR=
CEPH_ID=${CEPH_ID:-mirror}
RBD_IMAGE_FEATURES=${RBD_IMAGE_FEATURES:-layering,exclusive-lock,journaling}
MIRROR_USER_ID_PREFIX=${MIRROR_USER_ID_PREFIX:-${CEPH_ID}.}
MIRROR_POOL_MODE=${MIRROR_POOL_MODE:-pool}
MIRROR_IMAGE_MODE=${MIRROR_IMAGE_MODE:-journal}
export CEPH_ARGS="--id ${CEPH_ID}"
LAST_MIRROR_INSTANCE=$((${RBD_MIRROR_INSTANCES} - 1))
CEPH_ROOT=$(readlink -f $(dirname $0)/../../../src)
CEPH_BIN=.
CEPH_SRC=.
if [ -e CMakeCache.txt ]; then
CEPH_SRC=${CEPH_ROOT}
CEPH_ROOT=${PWD}
CEPH_BIN=./bin
# needed for ceph CLI under cmake
export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind:${CEPH_ROOT}/lib/cython_modules/lib.3
fi
# These vars facilitate running this script in an environment with
# ceph installed from packages, like teuthology. These are not defined
# by default.
#
# RBD_MIRROR_USE_EXISTING_CLUSTER - if set, do not start and stop ceph clusters
# RBD_MIRROR_USE_RBD_MIRROR - if set, use an existing instance of rbd-mirror
# running as ceph client $CEPH_ID. If empty,
# this script will start and stop rbd-mirror
#
# Functions
#
# Parse a value in format cluster[:instance] and set cluster and instance vars.
set_cluster_instance()
{
local val=$1
local cluster_var_name=$2
local instance_var_name=$3
cluster=${val%:*}
instance=${val##*:}
if [ "${instance}" = "${val}" ]; then
# instance was not specified, use default
instance=0
fi
eval ${cluster_var_name}=${cluster}
eval ${instance_var_name}=${instance}
}
daemon_asok_file()
{
local local_cluster=$1
local cluster=$2
local instance
set_cluster_instance "${local_cluster}" local_cluster instance
echo $(ceph-conf --cluster $local_cluster --name "client.${MIRROR_USER_ID_PREFIX}${instance}" 'admin socket')
}
daemon_pid_file()
{
local cluster=$1
local instance
set_cluster_instance "${cluster}" cluster instance
echo $(ceph-conf --cluster $cluster --name "client.${MIRROR_USER_ID_PREFIX}${instance}" 'pid file')
}
testlog()
{
echo $(date '+%F %T') $@ | tee -a "${TEMPDIR}/rbd-mirror.test.log" >&2
}
expect_failure()
{
local expected="$1" ; shift
local out=${TEMPDIR}/expect_failure.out
if "$@" > ${out} 2>&1 ; then
cat ${out} >&2
return 1
fi
if [ -z "${expected}" ]; then
return 0
fi
if ! grep -q "${expected}" ${out} ; then
cat ${out} >&2
return 1
fi
return 0
}
mkfname()
{
echo "$@" | sed -e 's|[/ ]|_|g'
}
create_users()
{
local cluster=$1
CEPH_ARGS='' ceph --cluster "${cluster}" \
auth get-or-create client.${CEPH_ID} \
mon 'profile rbd' osd 'profile rbd' mgr 'profile rbd' >> \
${CEPH_ROOT}/run/${cluster}/keyring
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
CEPH_ARGS='' ceph --cluster "${cluster}" \
auth get-or-create client.${MIRROR_USER_ID_PREFIX}${instance} \
mon 'profile rbd-mirror' osd 'profile rbd' mgr 'profile rbd' >> \
${CEPH_ROOT}/run/${cluster}/keyring
done
}
setup_cluster()
{
local cluster=$1
CEPH_ARGS='' ${CEPH_SRC}/mstart.sh ${cluster} -n ${RBD_MIRROR_VARGS}
cd ${CEPH_ROOT}
rm -f ${TEMPDIR}/${cluster}.conf
ln -s $(readlink -f run/${cluster}/ceph.conf) \
${TEMPDIR}/${cluster}.conf
cd ${TEMPDIR}
create_users "${cluster}"
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
cat<<EOF >> ${TEMPDIR}/${cluster}.conf
[client.${MIRROR_USER_ID_PREFIX}${instance}]
admin socket = ${TEMPDIR}/rbd-mirror.\$cluster-\$name.asok
pid file = ${TEMPDIR}/rbd-mirror.\$cluster-\$name.pid
log file = ${TEMPDIR}/rbd-mirror.${cluster}_daemon.${instance}.log
EOF
done
}
peer_add()
{
local cluster=$1 ; shift
local pool=$1 ; shift
local client_cluster=$1 ; shift
local remote_cluster="${client_cluster##*@}"
local uuid_var_name
if [ -n "$1" ]; then
uuid_var_name=$1 ; shift
fi
local error_code
local peer_uuid
for s in 1 2 4 8 16 32; do
set +e
peer_uuid=$(rbd --cluster ${cluster} mirror pool peer add \
${pool} ${client_cluster} $@)
error_code=$?
set -e
if [ $error_code -eq 17 ]; then
# raced with a remote heartbeat ping -- remove and retry
sleep $s
peer_uuid=$(rbd mirror pool info --cluster ${cluster} --pool ${pool} --format xml | \
xmlstarlet sel -t -v "//peers/peer[site_name='${remote_cluster}']/uuid")
CEPH_ARGS='' rbd --cluster ${cluster} --pool ${pool} mirror pool peer remove ${peer_uuid}
else
test $error_code -eq 0
if [ -n "$uuid_var_name" ]; then
eval ${uuid_var_name}=${peer_uuid}
fi
return 0
fi
done
return 1
}
setup_pools()
{
local cluster=$1
local remote_cluster=$2
local mon_map_file
local mon_addr
local admin_key_file
local uuid
CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${POOL} 64 64
CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${PARENT_POOL} 64 64
CEPH_ARGS='' rbd --cluster ${cluster} pool init ${POOL}
CEPH_ARGS='' rbd --cluster ${cluster} pool init ${PARENT_POOL}
if [ -n "${RBD_MIRROR_CONFIG_KEY}" ]; then
PEER_CLUSTER_SUFFIX=-DNE
fi
CEPH_ARGS='' rbd --cluster ${cluster} mirror pool enable \
--site-name ${cluster}${PEER_CLUSTER_SUFFIX} ${POOL} ${MIRROR_POOL_MODE}
rbd --cluster ${cluster} mirror pool enable ${PARENT_POOL} image
rbd --cluster ${cluster} namespace create ${POOL}/${NS1}
rbd --cluster ${cluster} namespace create ${POOL}/${NS2}
rbd --cluster ${cluster} mirror pool enable ${POOL}/${NS1} ${MIRROR_POOL_MODE}
rbd --cluster ${cluster} mirror pool enable ${POOL}/${NS2} image
if [ -z ${RBD_MIRROR_MANUAL_PEERS} ]; then
if [ -z ${RBD_MIRROR_CONFIG_KEY} ]; then
peer_add ${cluster} ${POOL} ${remote_cluster}
peer_add ${cluster} ${PARENT_POOL} ${remote_cluster}
else
mon_map_file=${TEMPDIR}/${remote_cluster}.monmap
CEPH_ARGS='' ceph --cluster ${remote_cluster} mon getmap > ${mon_map_file}
mon_addr=$(monmaptool --print ${mon_map_file} | grep -E 'mon\.' |
head -n 1 | sed -E 's/^[0-9]+: ([^ ]+).+$/\1/' | sed -E 's/\/[0-9]+//g')
admin_key_file=${TEMPDIR}/${remote_cluster}.client.${CEPH_ID}.key
CEPH_ARGS='' ceph --cluster ${remote_cluster} auth get-key client.${CEPH_ID} > ${admin_key_file}
CEPH_ARGS='' peer_add ${cluster} ${POOL} \
client.${CEPH_ID}@${remote_cluster}${PEER_CLUSTER_SUFFIX} '' \
--remote-mon-host "${mon_addr}" --remote-key-file ${admin_key_file}
peer_add ${cluster} ${PARENT_POOL} client.${CEPH_ID}@${remote_cluster}${PEER_CLUSTER_SUFFIX} uuid
CEPH_ARGS='' rbd --cluster ${cluster} mirror pool peer set ${PARENT_POOL} ${uuid} mon-host ${mon_addr}
CEPH_ARGS='' rbd --cluster ${cluster} mirror pool peer set ${PARENT_POOL} ${uuid} key-file ${admin_key_file}
fi
fi
}
setup_tempdir()
{
if [ -n "${RBD_MIRROR_TEMDIR}" ]; then
test -d "${RBD_MIRROR_TEMDIR}" ||
mkdir "${RBD_MIRROR_TEMDIR}"
TEMPDIR="${RBD_MIRROR_TEMDIR}"
cd ${TEMPDIR}
else
TEMPDIR=`mktemp -d`
fi
}
setup()
{
local c
trap 'cleanup $?' INT TERM EXIT
setup_tempdir
if [ -z "${RBD_MIRROR_USE_EXISTING_CLUSTER}" ]; then
setup_cluster "${CLUSTER1}"
setup_cluster "${CLUSTER2}"
fi
setup_pools "${CLUSTER1}" "${CLUSTER2}"
setup_pools "${CLUSTER2}" "${CLUSTER1}"
if [ -n "${RBD_MIRROR_MIN_COMPAT_CLIENT}" ]; then
CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd \
set-require-min-compat-client ${RBD_MIRROR_MIN_COMPAT_CLIENT}
CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd \
set-require-min-compat-client ${RBD_MIRROR_MIN_COMPAT_CLIENT}
fi
}
cleanup()
{
local error_code=$1
set +e
if [ "${error_code}" -ne 0 ]; then
status
fi
if [ -z "${RBD_MIRROR_NOCLEANUP}" ]; then
local cluster instance
CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool rm ${PARENT_POOL} ${PARENT_POOL} --yes-i-really-really-mean-it
CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool rm ${PARENT_POOL} ${PARENT_POOL} --yes-i-really-really-mean-it
for cluster in "${CLUSTER1}" "${CLUSTER2}"; do
stop_mirrors "${cluster}"
done
if [ -z "${RBD_MIRROR_USE_EXISTING_CLUSTER}" ]; then
cd ${CEPH_ROOT}
CEPH_ARGS='' ${CEPH_SRC}/mstop.sh ${CLUSTER1}
CEPH_ARGS='' ${CEPH_SRC}/mstop.sh ${CLUSTER2}
fi
test "${RBD_MIRROR_TEMDIR}" = "${TEMPDIR}" || rm -Rf ${TEMPDIR}
fi
if [ "${error_code}" -eq 0 ]; then
echo "OK"
else
echo "FAIL"
fi
exit ${error_code}
}
start_mirror()
{
local cluster=$1
local instance
set_cluster_instance "${cluster}" cluster instance
test -n "${RBD_MIRROR_USE_RBD_MIRROR}" && return
rbd-mirror \
--cluster ${cluster} \
--id ${MIRROR_USER_ID_PREFIX}${instance} \
--rbd-mirror-delete-retry-interval=5 \
--rbd-mirror-image-state-check-interval=5 \
--rbd-mirror-journal-poll-age=1 \
--rbd-mirror-pool-replayers-refresh-interval=5 \
--debug-rbd=30 --debug-journaler=30 \
--debug-rbd_mirror=30 \
--daemonize=true \
${RBD_MIRROR_ARGS}
}
start_mirrors()
{
local cluster=$1
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
start_mirror "${cluster}:${instance}"
done
}
stop_mirror()
{
local cluster=$1
local sig=$2
test -n "${RBD_MIRROR_USE_RBD_MIRROR}" && return
local pid
pid=$(cat $(daemon_pid_file "${cluster}") 2>/dev/null) || :
if [ -n "${pid}" ]
then
kill ${sig} ${pid}
for s in 1 2 4 8 16 32; do
sleep $s
ps auxww | awk -v pid=${pid} '$2 == pid {print; exit 1}' && break
done
ps auxww | awk -v pid=${pid} '$2 == pid {print; exit 1}'
fi
rm -f $(daemon_asok_file "${cluster}" "${CLUSTER1}")
rm -f $(daemon_asok_file "${cluster}" "${CLUSTER2}")
rm -f $(daemon_pid_file "${cluster}")
}
stop_mirrors()
{
local cluster=$1
local sig=$2
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
stop_mirror "${cluster}:${instance}" "${sig}"
done
}
admin_daemon()
{
local cluster=$1 ; shift
local instance
set_cluster_instance "${cluster}" cluster instance
local asok_file=$(daemon_asok_file "${cluster}:${instance}" "${cluster}")
test -S "${asok_file}"
ceph --admin-daemon ${asok_file} $@
}
admin_daemons()
{
local cluster_instance=$1 ; shift
local cluster="${cluster_instance%:*}"
local instance="${cluster_instance##*:}"
local loop_instance
for s in 0 1 2 4 8 8 8 8 8 8 8 8 16 16; do
sleep ${s}
if [ "${instance}" != "${cluster_instance}" ]; then
admin_daemon "${cluster}:${instance}" $@ && return 0
else
for loop_instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
admin_daemon "${cluster}:${loop_instance}" $@ && return 0
done
fi
done
return 1
}
all_admin_daemons()
{
local cluster=$1 ; shift
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
admin_daemon "${cluster}:${instance}" $@
done
}
status()
{
local cluster daemon image_pool image_ns image
for cluster in ${CLUSTER1} ${CLUSTER2}
do
echo "${cluster} status"
CEPH_ARGS='' ceph --cluster ${cluster} -s
CEPH_ARGS='' ceph --cluster ${cluster} service dump
CEPH_ARGS='' ceph --cluster ${cluster} service status
echo
for image_pool in ${POOL} ${PARENT_POOL}
do
for image_ns in "" "${NS1}" "${NS2}"
do
echo "${cluster} ${image_pool} ${image_ns} images"
rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" ls -l
echo
echo "${cluster} ${image_pool}${image_ns} mirror pool info"
rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" mirror pool info
echo
echo "${cluster} ${image_pool}${image_ns} mirror pool status"
CEPH_ARGS='' rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" mirror pool status --verbose
echo
for image in `rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" ls 2>/dev/null`
do
echo "image ${image} info"
rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" info ${image}
echo
echo "image ${image} journal status"
rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" journal status --image ${image}
echo
echo "image ${image} snapshots"
rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" snap ls --all ${image}
echo
done
echo "${cluster} ${image_pool} ${image_ns} rbd_mirroring omap vals"
rados --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" listomapvals rbd_mirroring
echo "${cluster} ${image_pool} ${image_ns} rbd_mirror_leader omap vals"
rados --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" listomapvals rbd_mirror_leader
echo
done
done
done
local ret
for cluster in "${CLUSTER1}" "${CLUSTER2}"
do
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
local pid_file=$(daemon_pid_file ${cluster}:${instance})
if [ ! -e ${pid_file} ]
then
echo "${cluster} rbd-mirror not running or unknown" \
"(${pid_file} not exist)"
continue
fi
local pid
pid=$(cat ${pid_file} 2>/dev/null) || :
if [ -z "${pid}" ]
then
echo "${cluster} rbd-mirror not running or unknown" \
"(can't find pid using ${pid_file})"
ret=1
continue
fi
echo "${daemon} rbd-mirror process in ps output:"
if ps auxww |
awk -v pid=${pid} 'NR == 1 {print} $2 == pid {print; exit 1}'
then
echo
echo "${cluster} rbd-mirror not running" \
"(can't find pid $pid in ps output)"
ret=1
continue
fi
echo
local asok_file=$(daemon_asok_file ${cluster}:${instance} ${cluster})
if [ ! -S "${asok_file}" ]
then
echo "${cluster} rbd-mirror asok is unknown (${asok_file} not exits)"
ret=1
continue
fi
echo "${cluster} rbd-mirror status"
ceph --admin-daemon ${asok_file} rbd mirror status
echo
done
done
return ${ret}
}
flush()
{
local cluster=$1
local pool=$2
local image=$3
local cmd="rbd mirror flush"
if [ -n "${image}" ]
then
cmd="${cmd} ${pool}/${image}"
fi
admin_daemons "${cluster}" ${cmd}
}
test_image_replay_state()
{
local cluster=$1
local pool=$2
local image=$3
local test_state=$4
local status_result
local current_state=stopped
status_result=$(admin_daemons "${cluster}" rbd mirror status ${pool}/${image} | grep -i 'state') || return 1
echo "${status_result}" | grep -i 'Replaying' && current_state=started
test "${test_state}" = "${current_state}"
}
wait_for_image_replay_state()
{
local cluster=$1
local pool=$2
local image=$3
local state=$4
local s
# TODO: add a way to force rbd-mirror to update replayers
for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
sleep ${s}
test_image_replay_state "${cluster}" "${pool}" "${image}" "${state}" && return 0
done
return 1
}
wait_for_image_replay_started()
{
local cluster=$1
local pool=$2
local image=$3
wait_for_image_replay_state "${cluster}" "${pool}" "${image}" started
}
wait_for_image_replay_stopped()
{
local cluster=$1
local pool=$2
local image=$3
wait_for_image_replay_state "${cluster}" "${pool}" "${image}" stopped
}
get_journal_position()
{
local cluster=$1
local pool=$2
local image=$3
local id_regexp=$4
# Parse line like below, looking for the first position
# [id=, commit_position=[positions=[[object_number=1, tag_tid=3, entry_tid=9], [object_number=0, tag_tid=3, entry_tid=8], [object_number=3, tag_tid=3, entry_tid=7], [object_number=2, tag_tid=3, entry_tid=6]]]]
local status_log=${TEMPDIR}/$(mkfname ${CLUSTER2}-${pool}-${image}.status)
rbd --cluster ${cluster} journal status --image ${pool}/${image} |
tee ${status_log} >&2
sed -nEe 's/^.*\[id='"${id_regexp}"',.*positions=\[\[([^]]*)\],.*state=connected.*$/\1/p' \
${status_log}
}
get_master_journal_position()
{
local cluster=$1
local pool=$2
local image=$3
get_journal_position "${cluster}" "${pool}" "${image}" ''
}
get_mirror_journal_position()
{
local cluster=$1
local pool=$2
local image=$3
get_journal_position "${cluster}" "${pool}" "${image}" '..*'
}
wait_for_journal_replay_complete()
{
local local_cluster=$1
local cluster=$2
local pool=$3
local image=$4
local s master_pos mirror_pos last_mirror_pos
local master_tag master_entry mirror_tag mirror_entry
while true; do
for s in 0.2 0.4 0.8 1.6 2 2 4 4 8 8 16 16 32 32; do
sleep ${s}
flush "${local_cluster}" "${pool}" "${image}"
master_pos=$(get_master_journal_position "${cluster}" "${pool}" "${image}")
mirror_pos=$(get_mirror_journal_position "${cluster}" "${pool}" "${image}")
test -n "${master_pos}" -a "${master_pos}" = "${mirror_pos}" && return 0
test "${mirror_pos}" != "${last_mirror_pos}" && break
done
test "${mirror_pos}" = "${last_mirror_pos}" && return 1
last_mirror_pos="${mirror_pos}"
# handle the case where the mirror is ahead of the master
master_tag=$(echo "${master_pos}" | grep -Eo "tag_tid=[0-9]*" | cut -d'=' -f 2)
mirror_tag=$(echo "${mirror_pos}" | grep -Eo "tag_tid=[0-9]*" | cut -d'=' -f 2)
master_entry=$(echo "${master_pos}" | grep -Eo "entry_tid=[0-9]*" | cut -d'=' -f 2)
mirror_entry=$(echo "${mirror_pos}" | grep -Eo "entry_tid=[0-9]*" | cut -d'=' -f 2)
test "${master_tag}" = "${mirror_tag}" -a ${master_entry} -le ${mirror_entry} && return 0
done
return 1
}
mirror_image_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster "${cluster}" mirror image snapshot "${pool}/${image}"
}
get_newest_mirror_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
local log=$4
rbd --cluster "${cluster}" snap list --all "${pool}/${image}" --format xml | \
xmlstarlet sel -t -c "//snapshots/snapshot[namespace/complete='true' and position()=last()]" > \
${log} || true
}
wait_for_snapshot_sync_complete()
{
local local_cluster=$1
local cluster=$2
local pool=$3
local image=$4
local status_log=${TEMPDIR}/$(mkfname ${cluster}-${pool}-${image}.status)
local local_status_log=${TEMPDIR}/$(mkfname ${local_cluster}-${pool}-${image}.status)
mirror_image_snapshot "${cluster}" "${pool}" "${image}"
get_newest_mirror_snapshot "${cluster}" "${pool}" "${image}" "${status_log}"
local snapshot_id=$(xmlstarlet sel -t -v "//snapshot/id" < ${status_log})
while true; do
for s in 0.2 0.4 0.8 1.6 2 2 4 4 8 8 16 16 32 32; do
sleep ${s}
get_newest_mirror_snapshot "${local_cluster}" "${pool}" "${image}" "${local_status_log}"
local primary_snapshot_id=$(xmlstarlet sel -t -v "//snapshot/namespace/primary_snap_id" < ${local_status_log})
test "${snapshot_id}" = "${primary_snapshot_id}" && return 0
done
return 1
done
return 1
}
wait_for_replay_complete()
{
local local_cluster=$1
local cluster=$2
local pool=$3
local image=$4
if [ "${MIRROR_IMAGE_MODE}" = "journal" ]; then
wait_for_journal_replay_complete ${local_cluster} ${cluster} ${pool} ${image}
elif [ "${MIRROR_IMAGE_MODE}" = "snapshot" ]; then
wait_for_snapshot_sync_complete ${local_cluster} ${cluster} ${pool} ${image}
else
return 1
fi
}
test_status_in_pool_dir()
{
local cluster=$1
local pool=$2
local image=$3
local state_pattern="$4"
local description_pattern="$5"
local service_pattern="$6"
local status_log=${TEMPDIR}/$(mkfname ${cluster}-${pool}-${image}.mirror_status)
CEPH_ARGS='' rbd --cluster ${cluster} mirror image status ${pool}/${image} |
tee ${status_log} >&2
grep "^ state: .*${state_pattern}" ${status_log} || return 1
grep "^ description: .*${description_pattern}" ${status_log} || return 1
if [ -n "${service_pattern}" ]; then
grep "service: *${service_pattern}" ${status_log} || return 1
elif echo ${state_pattern} | grep '^up+'; then
grep "service: *${MIRROR_USER_ID_PREFIX}.* on " ${status_log} || return 1
else
grep "service: " ${status_log} && return 1
fi
# recheck using `mirror pool status` command to stress test it.
local last_update="$(sed -nEe 's/^ last_update: *(.*) *$/\1/p' ${status_log})"
test_mirror_pool_status_verbose \
${cluster} ${pool} ${image} "${state_pattern}" "${last_update}" &&
return 0
echo "'mirror pool status' test failed" >&2
exit 1
}
test_mirror_pool_status_verbose()
{
local cluster=$1
local pool=$2
local image=$3
local state_pattern="$4"
local prev_last_update="$5"
local status_log=${TEMPDIR}/$(mkfname ${cluster}-${pool}.mirror_status)
rbd --cluster ${cluster} mirror pool status ${pool} --verbose --format xml \
> ${status_log}
local last_update state
last_update=$($XMLSTARLET sel -t -v \
"//images/image[name='${image}']/last_update" < ${status_log})
state=$($XMLSTARLET sel -t -v \
"//images/image[name='${image}']/state" < ${status_log})
echo "${state}" | grep "${state_pattern}" ||
test "${last_update}" '>' "${prev_last_update}"
}
wait_for_status_in_pool_dir()
{
local cluster=$1
local pool=$2
local image=$3
local state_pattern="$4"
local description_pattern="$5"
local service_pattern="$6"
for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
sleep ${s}
test_status_in_pool_dir ${cluster} ${pool} ${image} "${state_pattern}" \
"${description_pattern}" "${service_pattern}" &&
return 0
done
return 1
}
create_image()
{
local cluster=$1 ; shift
local pool=$1 ; shift
local image=$1 ; shift
local size=128
if [ -n "$1" ]; then
size=$1
shift
fi
rbd --cluster ${cluster} create --size ${size} \
--image-feature "${RBD_IMAGE_FEATURES}" $@ ${pool}/${image}
}
create_image_and_enable_mirror()
{
local cluster=$1 ; shift
local pool=$1 ; shift
local image=$1 ; shift
local mode=${1:-${MIRROR_IMAGE_MODE}}
if [ -n "$1" ]; then
shift
fi
create_image ${cluster} ${pool} ${image} $@
if [ "${MIRROR_POOL_MODE}" = "image" ] || [ "$pool" = "${PARENT_POOL}" ]; then
enable_mirror ${cluster} ${pool} ${image} ${mode}
fi
}
enable_journaling()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster ${cluster} feature enable ${pool}/${image} journaling
}
set_image_meta()
{
local cluster=$1
local pool=$2
local image=$3
local key=$4
local val=$5
rbd --cluster ${cluster} image-meta set ${pool}/${image} $key $val
}
compare_image_meta()
{
local cluster=$1
local pool=$2
local image=$3
local key=$4
local value=$5
test `rbd --cluster ${cluster} image-meta get ${pool}/${image} ${key}` = "${value}"
}
rename_image()
{
local cluster=$1
local pool=$2
local image=$3
local new_name=$4
rbd --cluster=${cluster} rename ${pool}/${image} ${pool}/${new_name}
}
remove_image()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster=${cluster} snap purge ${pool}/${image}
rbd --cluster=${cluster} rm ${pool}/${image}
}
remove_image_retry()
{
local cluster=$1
local pool=$2
local image=$3
for s in 0 1 2 4 8 16 32; do
sleep ${s}
remove_image ${cluster} ${pool} ${image} && return 0
done
return 1
}
trash_move() {
local cluster=$1
local pool=$2
local image=$3
rbd --cluster=${cluster} trash move ${pool}/${image}
}
trash_restore() {
local cluster=$1
local pool=$2
local image_id=$3
rbd --cluster=${cluster} trash restore ${pool}/${image_id}
}
clone_image()
{
local cluster=$1
local parent_pool=$2
local parent_image=$3
local parent_snap=$4
local clone_pool=$5
local clone_image=$6
shift 6
rbd --cluster ${cluster} clone \
${parent_pool}/${parent_image}@${parent_snap} \
${clone_pool}/${clone_image} --image-feature "${RBD_IMAGE_FEATURES}" $@
}
clone_image_and_enable_mirror()
{
local cluster=$1
local parent_pool=$2
local parent_image=$3
local parent_snap=$4
local clone_pool=$5
local clone_image=$6
shift 6
local mode=${1:-${MIRROR_IMAGE_MODE}}
if [ -n "$1" ]; then
shift
fi
clone_image ${cluster} ${parent_pool} ${parent_image} ${parent_snap} ${clone_pool} ${clone_image} $@
enable_mirror ${cluster} ${clone_pool} ${clone_image} ${mode}
}
disconnect_image()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster ${cluster} journal client disconnect \
--image ${pool}/${image}
}
create_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
local snap=$4
rbd --cluster ${cluster} snap create ${pool}/${image}@${snap}
}
remove_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
local snap=$4
rbd --cluster ${cluster} snap rm ${pool}/${image}@${snap}
}
rename_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
local snap=$4
local new_snap=$5
rbd --cluster ${cluster} snap rename ${pool}/${image}@${snap} \
${pool}/${image}@${new_snap}
}
purge_snapshots()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster ${cluster} snap purge ${pool}/${image}
}
protect_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
local snap=$4
rbd --cluster ${cluster} snap protect ${pool}/${image}@${snap}
}
unprotect_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
local snap=$4
rbd --cluster ${cluster} snap unprotect ${pool}/${image}@${snap}
}
unprotect_snapshot_retry()
{
local cluster=$1
local pool=$2
local image=$3
local snap=$4
for s in 0 1 2 4 8 16 32; do
sleep ${s}
unprotect_snapshot ${cluster} ${pool} ${image} ${snap} && return 0
done
return 1
}
wait_for_snap_present()
{
local cluster=$1
local pool=$2
local image=$3
local snap_name=$4
local s
for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
sleep ${s}
rbd --cluster ${cluster} info ${pool}/${image}@${snap_name} || continue
return 0
done
return 1
}
test_snap_moved_to_trash()
{
local cluster=$1
local pool=$2
local image=$3
local snap_name=$4
rbd --cluster ${cluster} snap ls ${pool}/${image} --all |
grep -F " trash (${snap_name})"
}
wait_for_snap_moved_to_trash()
{
local s
for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
sleep ${s}
test_snap_moved_to_trash $@ || continue
return 0
done
return 1
}
test_snap_removed_from_trash()
{
test_snap_moved_to_trash $@ && return 1
return 0
}
wait_for_snap_removed_from_trash()
{
local s
for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
sleep ${s}
test_snap_removed_from_trash $@ || continue
return 0
done
return 1
}
write_image()
{
local cluster=$1
local pool=$2
local image=$3
local count=$4
local size=$5
test -n "${size}" || size=4096
rbd --cluster ${cluster} bench ${pool}/${image} --io-type write \
--io-size ${size} --io-threads 1 --io-total $((size * count)) \
--io-pattern rand
}
stress_write_image()
{
local cluster=$1
local pool=$2
local image=$3
local duration=$(awk 'BEGIN {srand(); print int(10 * rand()) + 5}')
set +e
timeout ${duration}s ceph_test_rbd_mirror_random_write \
--cluster ${cluster} ${pool} ${image} \
--debug-rbd=20 --debug-journaler=20 \
2> ${TEMPDIR}/rbd-mirror-random-write.log
error_code=$?
set -e
if [ $error_code -eq 124 ]; then
return 0
fi
return 1
}
show_diff()
{
local file1=$1
local file2=$2
xxd ${file1} > ${file1}.xxd
xxd ${file2} > ${file2}.xxd
sdiff -s ${file1}.xxd ${file2}.xxd | head -n 64
rm -f ${file1}.xxd ${file2}.xxd
}
compare_images()
{
local pool=$1
local image=$2
local ret=0
local rmt_export=${TEMPDIR}/$(mkfname ${CLUSTER2}-${pool}-${image}.export)
local loc_export=${TEMPDIR}/$(mkfname ${CLUSTER1}-${pool}-${image}.export)
rm -f ${rmt_export} ${loc_export}
rbd --cluster ${CLUSTER2} export ${pool}/${image} ${rmt_export}
rbd --cluster ${CLUSTER1} export ${pool}/${image} ${loc_export}
if ! cmp ${rmt_export} ${loc_export}
then
show_diff ${rmt_export} ${loc_export}
ret=1
fi
rm -f ${rmt_export} ${loc_export}
return ${ret}
}
compare_image_snapshots()
{
local pool=$1
local image=$2
local ret=0
local rmt_export=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.export
local loc_export=${TEMPDIR}/${CLUSTER1}-${pool}-${image}.export
for snap_name in $(rbd --cluster ${CLUSTER1} --format xml \
snap list ${pool}/${image} | \
$XMLSTARLET sel -t -v "//snapshot/name" | \
grep -E -v "^\.rbd-mirror\."); do
rm -f ${rmt_export} ${loc_export}
rbd --cluster ${CLUSTER2} export ${pool}/${image}@${snap_name} ${rmt_export}
rbd --cluster ${CLUSTER1} export ${pool}/${image}@${snap_name} ${loc_export}
if ! cmp ${rmt_export} ${loc_export}
then
show_diff ${rmt_export} ${loc_export}
ret=1
fi
done
rm -f ${rmt_export} ${loc_export}
return ${ret}
}
demote_image()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster=${cluster} mirror image demote ${pool}/${image}
}
promote_image()
{
local cluster=$1
local pool=$2
local image=$3
local force=$4
rbd --cluster=${cluster} mirror image promote ${pool}/${image} ${force}
}
set_pool_mirror_mode()
{
local cluster=$1
local pool=$2
local mode=${3:-${MIRROR_POOL_MODE}}
rbd --cluster=${cluster} mirror pool enable ${pool} ${mode}
}
disable_mirror()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster=${cluster} mirror image disable ${pool}/${image}
}
enable_mirror()
{
local cluster=$1
local pool=$2
local image=$3
local mode=${4:-${MIRROR_IMAGE_MODE}}
rbd --cluster=${cluster} mirror image enable ${pool}/${image} ${mode}
# Display image info including the global image id for debugging purpose
rbd --cluster=${cluster} info ${pool}/${image}
}
test_image_present()
{
local cluster=$1
local pool=$2
local image=$3
local test_state=$4
local image_id=$5
local current_state=deleted
local current_image_id
current_image_id=$(get_image_id ${cluster} ${pool} ${image})
test -n "${current_image_id}" &&
test -z "${image_id}" -o "${image_id}" = "${current_image_id}" &&
current_state=present
test "${test_state}" = "${current_state}"
}
wait_for_image_present()
{
local cluster=$1
local pool=$2
local image=$3
local state=$4
local image_id=$5
local s
test -n "${image_id}" ||
image_id=$(get_image_id ${cluster} ${pool} ${image})
# TODO: add a way to force rbd-mirror to update replayers
for s in 0.1 1 2 4 8 8 8 8 8 8 8 8 16 16 32 32; do
sleep ${s}
test_image_present \
"${cluster}" "${pool}" "${image}" "${state}" "${image_id}" &&
return 0
done
return 1
}
get_image_id()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster=${cluster} info ${pool}/${image} |
sed -ne 's/^.*block_name_prefix: rbd_data\.//p'
}
request_resync_image()
{
local cluster=$1
local pool=$2
local image=$3
local image_id_var_name=$4
eval "${image_id_var_name}='$(get_image_id ${cluster} ${pool} ${image})'"
eval 'test -n "$'${image_id_var_name}'"'
rbd --cluster=${cluster} mirror image resync ${pool}/${image}
}
get_image_data_pool()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster ${cluster} info ${pool}/${image} |
awk '$1 == "data_pool:" {print $2}'
}
get_clone_format()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster ${cluster} info ${pool}/${image} |
awk 'BEGIN {
format = 1
}
$1 == "parent:" {
parent = $2
}
/op_features: .*clone-child/ {
format = 2
}
END {
if (!parent) exit 1
print format
}'
}
list_omap_keys()
{
local cluster=$1
local pool=$2
local obj_name=$3
rados --cluster ${cluster} -p ${pool} listomapkeys ${obj_name}
}
count_omap_keys_with_filter()
{
local cluster=$1
local pool=$2
local obj_name=$3
local filter=$4
list_omap_keys ${cluster} ${pool} ${obj_name} | grep -c ${filter}
}
wait_for_omap_keys()
{
local cluster=$1
local pool=$2
local obj_name=$3
local filter=$4
for s in 0 1 2 2 4 4 8 8 8 16 16 32; do
sleep $s
set +e
test "$(count_omap_keys_with_filter ${cluster} ${pool} ${obj_name} ${filter})" = 0
error_code=$?
set -e
if [ $error_code -eq 0 ]; then
return 0
fi
done
return 1
}
wait_for_image_in_omap()
{
local cluster=$1
local pool=$2
wait_for_omap_keys ${cluster} ${pool} rbd_mirroring status_global
wait_for_omap_keys ${cluster} ${pool} rbd_mirroring image_
wait_for_omap_keys ${cluster} ${pool} rbd_mirror_leader image_map
}
#
# Main
#
if [ "$#" -gt 0 ]
then
if [ -z "${RBD_MIRROR_TEMDIR}" ]
then
echo "RBD_MIRROR_TEMDIR is not set" >&2
exit 1
fi
TEMPDIR="${RBD_MIRROR_TEMDIR}"
cd ${TEMPDIR}
$@
exit $?
fi
| 38,897 | 25.300203 | 213 | sh |
null | ceph-main/qa/workunits/rbd/rbd_mirror_journal.sh | #!/bin/sh -ex
#
# rbd_mirror_journal.sh - test rbd-mirror daemon in journal-based mirroring mode
#
# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
# creates a temporary directory, used for cluster configs, daemon logs, admin
# socket, temporary files, and launches rbd-mirror daemon.
#
. $(dirname $0)/rbd_mirror_helpers.sh
setup
testlog "TEST: add image and test replay"
start_mirrors ${CLUSTER1}
image=test
create_image ${CLUSTER2} ${POOL} ${image}
set_image_meta ${CLUSTER2} ${POOL} ${image} "key1" "value1"
set_image_meta ${CLUSTER2} ${POOL} ${image} "key2" "value2"
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'down+unknown'
fi
compare_images ${POOL} ${image}
compare_image_meta ${CLUSTER1} ${POOL} ${image} "key1" "value1"
compare_image_meta ${CLUSTER1} ${POOL} ${image} "key2" "value2"
testlog "TEST: stop mirror, add image, start mirror and test replay"
stop_mirrors ${CLUSTER1}
image1=test1
create_image ${CLUSTER2} ${POOL} ${image1}
write_image ${CLUSTER2} ${POOL} ${image1} 100
start_mirrors ${CLUSTER1}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying' 'primary_position'
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image1} 'down+unknown'
fi
compare_images ${POOL} ${image1}
testlog "TEST: test the first image is replaying after restart"
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${image}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
testlog "TEST: stop/start/restart mirror via admin socket"
all_admin_daemons ${CLUSTER1} rbd mirror stop
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
all_admin_daemons ${CLUSTER1} rbd mirror start
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror restart
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror stop
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
all_admin_daemons ${CLUSTER1} rbd mirror restart
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror start ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
flush ${CLUSTER1}
all_admin_daemons ${CLUSTER1} rbd mirror status
fi
remove_image_retry ${CLUSTER2} ${POOL} ${image1}
testlog "TEST: test image rename"
new_name="${image}_RENAMED"
rename_image ${CLUSTER2} ${POOL} ${image} ${new_name}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
admin_daemons ${CLUSTER1} rbd mirror status ${POOL}/${new_name}
admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${new_name}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
rename_image ${CLUSTER2} ${POOL} ${new_name} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
testlog "TEST: test trash move restore"
image_id=$(get_image_id ${CLUSTER2} ${POOL} ${image})
trash_move ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
trash_restore ${CLUSTER2} ${POOL} ${image_id}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
testlog "TEST: check if removed images' OMAP are removed (with rbd-mirror on one cluster)"
remove_image_retry ${CLUSTER2} ${POOL} ${image}
wait_for_image_in_omap ${CLUSTER1} ${POOL}
wait_for_image_in_omap ${CLUSTER2} ${POOL}
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
testlog "TEST: failover and failback"
start_mirrors ${CLUSTER2}
# demote and promote same cluster
demote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${image}
# failover (unmodified)
demote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
# failback (unmodified)
demote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
compare_images ${POOL} ${image}
# failover
demote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
write_image ${CLUSTER1} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${image}
# failback
demote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
compare_images ${POOL} ${image}
# force promote
force_promote_image=test_force_promote
create_image ${CLUSTER2} ${POOL} ${force_promote_image}
write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${force_promote_image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${force_promote_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+replaying' 'primary_position'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
promote_image ${CLUSTER1} ${POOL} ${force_promote_image} '--force'
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${force_promote_image}
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
write_image ${CLUSTER1} ${POOL} ${force_promote_image} 100
write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
remove_image_retry ${CLUSTER1} ${POOL} ${force_promote_image}
remove_image_retry ${CLUSTER2} ${POOL} ${force_promote_image}
testlog "TEST: cloned images"
testlog " - default"
parent_image=test_parent
parent_snap=snap
create_image ${CLUSTER2} ${PARENT_POOL} ${parent_image}
write_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} 100
create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
protect_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
clone_image=test_clone
clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}
write_image ${CLUSTER2} ${POOL} ${clone_image} 100
enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} journal
wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} ${parent_image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} ${parent_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${PARENT_POOL} ${parent_image} 'up+replaying' 'primary_position'
compare_images ${PARENT_POOL} ${parent_image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${clone_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${clone_image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${clone_image}
remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}
testlog " - clone v1"
clone_image ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}1
clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \
${clone_image}_v1 --rbd-default-clone-format 1
test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v1) = 1
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v1
test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v1) = 1
remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v1
remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}1
unprotect_snapshot_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
testlog " - clone v2"
parent_snap=snap_v2
create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \
${clone_image}_v2 --rbd-default-clone-format 2
test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v2) = 2
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v2
test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v2) = 2
remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
test_snap_moved_to_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v2
wait_for_image_present ${CLUSTER1} ${POOL} ${clone_image}_v2 'deleted'
test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
testlog " - clone v2 non-primary"
create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
wait_for_snap_present ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
clone_image ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \
${clone_image}_v2 --rbd-default-clone-format 2
remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}_v2
wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_image_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image}
testlog "TEST: data pool"
dp_image=test_data_pool
create_image ${CLUSTER2} ${POOL} ${dp_image} 128 --data-pool ${PARENT_POOL}
data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${dp_image}
data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap1'
write_image ${CLUSTER2} ${POOL} ${dp_image} 100
create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap2'
write_image ${CLUSTER2} ${POOL} ${dp_image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${dp_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${dp_image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${dp_image}@snap1
compare_images ${POOL} ${dp_image}@snap2
compare_images ${POOL} ${dp_image}
remove_image_retry ${CLUSTER2} ${POOL} ${dp_image}
testlog "TEST: disable mirroring / delete non-primary image"
image2=test2
image3=test3
image4=test4
image5=test5
for i in ${image2} ${image3} ${image4} ${image5}; do
create_image ${CLUSTER2} ${POOL} ${i}
write_image ${CLUSTER2} ${POOL} ${i} 100
create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
if [ "${i}" = "${image4}" ] || [ "${i}" = "${image5}" ]; then
protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
fi
write_image ${CLUSTER2} ${POOL} ${i} 100
wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
done
set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
for i in ${image2} ${image4}; do
disable_mirror ${CLUSTER2} ${POOL} ${i}
done
unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap1'
unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap2'
for i in ${image3} ${image5}; do
remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
remove_image_retry ${CLUSTER2} ${POOL} ${i}
done
for i in ${image2} ${image3} ${image4} ${image5}; do
wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'deleted'
done
set_pool_mirror_mode ${CLUSTER2} ${POOL} 'pool'
for i in ${image2} ${image4}; do
enable_journaling ${CLUSTER2} ${POOL} ${i}
wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${i}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${i}
compare_images ${POOL} ${i}
done
testlog "TEST: remove mirroring pool"
pool=pool_to_remove
for cluster in ${CLUSTER1} ${CLUSTER2}; do
CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${pool} 16 16
CEPH_ARGS='' rbd --cluster ${cluster} pool init ${pool}
rbd --cluster ${cluster} mirror pool enable ${pool} pool
done
peer_add ${CLUSTER1} ${pool} ${CLUSTER2}
peer_add ${CLUSTER2} ${pool} ${CLUSTER1}
rdp_image=test_remove_data_pool
create_image ${CLUSTER2} ${pool} ${image} 128
create_image ${CLUSTER2} ${POOL} ${rdp_image} 128 --data-pool ${pool}
write_image ${CLUSTER2} ${pool} ${image} 100
write_image ${CLUSTER2} ${POOL} ${rdp_image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${pool} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${pool} ${image} 'up+replaying' 'primary_position'
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${rdp_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${rdp_image} 'up+replaying' 'primary_position'
for cluster in ${CLUSTER1} ${CLUSTER2}; do
CEPH_ARGS='' ceph --cluster ${cluster} osd pool rm ${pool} ${pool} --yes-i-really-really-mean-it
done
remove_image_retry ${CLUSTER2} ${POOL} ${rdp_image}
wait_for_image_present ${CLUSTER1} ${POOL} ${rdp_image} 'deleted'
for i in 0 1 2 4 8 8 8 8 16 16; do
sleep $i
admin_daemons "${CLUSTER2}" rbd mirror status ${pool}/${image} || break
done
admin_daemons "${CLUSTER2}" rbd mirror status ${pool}/${image} && false
testlog "TEST: snapshot rename"
snap_name='snap_rename'
create_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_0"
for i in `seq 1 20`; do
rename_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_$(expr ${i} - 1)" "${snap_name}_${i}"
done
wait_for_snap_present ${CLUSTER1} ${POOL} ${image2} "${snap_name}_${i}"
unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap1'
unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap2'
for i in ${image2} ${image4}; do
remove_image_retry ${CLUSTER2} ${POOL} ${i}
done
testlog "TEST: disable mirror while daemon is stopped"
stop_mirrors ${CLUSTER1}
stop_mirrors ${CLUSTER2}
set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
disable_mirror ${CLUSTER2} ${POOL} ${image}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
test_image_present ${CLUSTER1} ${POOL} ${image} 'present'
fi
start_mirrors ${CLUSTER1}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
set_pool_mirror_mode ${CLUSTER2} ${POOL} 'pool'
enable_journaling ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
testlog "TEST: non-default namespace image mirroring"
testlog " - replay"
create_image ${CLUSTER2} ${POOL}/${NS1} ${image}
create_image ${CLUSTER2} ${POOL}/${NS2} ${image}
enable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image} journal
wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS2} ${image}
write_image ${CLUSTER2} ${POOL}/${NS1} ${image} 100
write_image ${CLUSTER2} ${POOL}/${NS2} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS2} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${image} 'up+replaying' 'primary_position'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS2} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL}/${NS1} ${image}
compare_images ${POOL}/${NS2} ${image}
testlog " - disable mirroring / delete image"
remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${image}
disable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image}
wait_for_image_present ${CLUSTER1} ${POOL}/${NS1} ${image} 'deleted'
wait_for_image_present ${CLUSTER1} ${POOL}/${NS2} ${image} 'deleted'
remove_image_retry ${CLUSTER2} ${POOL}/${NS2} ${image}
testlog " - data pool"
dp_image=test_data_pool
create_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 128 --data-pool ${PARENT_POOL}
data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL}/${NS1} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${dp_image}
data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL}/${NS1} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
write_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${dp_image} 'up+replaying' 'primary_position'
compare_images ${POOL}/${NS1} ${dp_image}
remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
testlog "TEST: simple image resync"
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${image}
testlog "TEST: image resync while replayer is stopped"
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
admin_daemons ${CLUSTER1} rbd mirror stop ${POOL}/${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${image}
fi
testlog "TEST: request image resync while daemon is offline"
stop_mirrors ${CLUSTER1}
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
start_mirrors ${CLUSTER1}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${image}
remove_image_retry ${CLUSTER2} ${POOL} ${image}
testlog "TEST: client disconnect"
image=laggy
create_image ${CLUSTER2} ${POOL} ${image} 128 --journal-object-size 64K
write_image ${CLUSTER2} ${POOL} ${image} 10
testlog " - replay stopped after disconnect"
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
disconnect_image ${CLUSTER2} ${POOL} ${image}
test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
testlog " - replay started after resync requested"
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
compare_images ${POOL} ${image}
testlog " - disconnected after max_concurrent_object_sets reached"
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
admin_daemons ${CLUSTER1} rbd mirror stop ${POOL}/${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
set_image_meta ${CLUSTER2} ${POOL} ${image} \
conf_rbd_journal_max_concurrent_object_sets 1
write_image ${CLUSTER2} ${POOL} ${image} 20 16384
write_image ${CLUSTER2} ${POOL} ${image} 20 16384
test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
set_image_meta ${CLUSTER2} ${POOL} ${image} \
conf_rbd_journal_max_concurrent_object_sets 0
testlog " - replay is still stopped (disconnected) after restart"
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
fi
testlog " - replay started after resync requested"
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
compare_images ${POOL} ${image}
testlog " - rbd_mirroring_resync_after_disconnect config option"
set_image_meta ${CLUSTER2} ${POOL} ${image} \
conf_rbd_mirroring_resync_after_disconnect true
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
image_id=$(get_image_id ${CLUSTER1} ${POOL} ${image})
disconnect_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
compare_images ${POOL} ${image}
set_image_meta ${CLUSTER2} ${POOL} ${image} \
conf_rbd_mirroring_resync_after_disconnect false
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
disconnect_image ${CLUSTER2} ${POOL} ${image}
test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
remove_image_retry ${CLUSTER2} ${POOL} ${image}
testlog "TEST: split-brain"
image=split-brain
create_image ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
promote_image ${CLUSTER1} ${POOL} ${image} --force
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
write_image ${CLUSTER1} ${POOL} ${image} 10
demote_image ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'split-brain'
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
remove_image_retry ${CLUSTER2} ${POOL} ${image}
testlog "TEST: check if removed images' OMAP are removed"
start_mirrors ${CLUSTER2}
wait_for_image_in_omap ${CLUSTER1} ${POOL}
wait_for_image_in_omap ${CLUSTER2} ${POOL}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
# teuthology will trash the daemon
testlog "TEST: no blocklists"
CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
fi
| 29,142 | 48.145025 | 104 | sh |
null | ceph-main/qa/workunits/rbd/rbd_mirror_snapshot.sh | #!/bin/sh -ex
#
# rbd_mirror_snapshot.sh - test rbd-mirror daemon in snapshot-based mirroring mode
#
# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
# creates a temporary directory, used for cluster configs, daemon logs, admin
# socket, temporary files, and launches rbd-mirror daemon.
#
MIRROR_POOL_MODE=image
MIRROR_IMAGE_MODE=snapshot
. $(dirname $0)/rbd_mirror_helpers.sh
setup
testlog "TEST: add image and test replay"
start_mirrors ${CLUSTER1}
image=test
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
set_image_meta ${CLUSTER2} ${POOL} ${image} "key1" "value1"
set_image_meta ${CLUSTER2} ${POOL} ${image} "key2" "value2"
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'down+unknown'
fi
compare_images ${POOL} ${image}
compare_image_meta ${CLUSTER1} ${POOL} ${image} "key1" "value1"
compare_image_meta ${CLUSTER1} ${POOL} ${image} "key2" "value2"
testlog "TEST: stop mirror, add image, start mirror and test replay"
stop_mirrors ${CLUSTER1}
image1=test1
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image1}
write_image ${CLUSTER2} ${POOL} ${image1} 100
start_mirrors ${CLUSTER1}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image1} 'down+unknown'
fi
compare_images ${POOL} ${image1}
testlog "TEST: test the first image is replaying after restart"
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
compare_images ${POOL} ${image}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
testlog "TEST: stop/start/restart mirror via admin socket"
all_admin_daemons ${CLUSTER1} rbd mirror stop
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
all_admin_daemons ${CLUSTER1} rbd mirror start
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror restart
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror stop
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
all_admin_daemons ${CLUSTER1} rbd mirror restart
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror start ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
flush ${CLUSTER1}
all_admin_daemons ${CLUSTER1} rbd mirror status
fi
remove_image_retry ${CLUSTER2} ${POOL} ${image1}
testlog "TEST: test image rename"
new_name="${image}_RENAMED"
rename_image ${CLUSTER2} ${POOL} ${image} ${new_name}
mirror_image_snapshot ${CLUSTER2} ${POOL} ${new_name}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
admin_daemons ${CLUSTER1} rbd mirror status ${POOL}/${new_name}
admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${new_name}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
rename_image ${CLUSTER2} ${POOL} ${new_name} ${image}
mirror_image_snapshot ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
testlog "TEST: test trash move restore"
image_id=$(get_image_id ${CLUSTER2} ${POOL} ${image})
trash_move ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
trash_restore ${CLUSTER2} ${POOL} ${image_id}
enable_mirror ${CLUSTER2} ${POOL} ${image} snapshot
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
testlog "TEST: check if removed images' OMAP are removed (with rbd-mirror on one cluster)"
remove_image_retry ${CLUSTER2} ${POOL} ${image}
wait_for_image_in_omap ${CLUSTER1} ${POOL}
wait_for_image_in_omap ${CLUSTER2} ${POOL}
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
testlog "TEST: failover and failback"
start_mirrors ${CLUSTER2}
# demote and promote same cluster
demote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
compare_images ${POOL} ${image}
# failover (unmodified)
demote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
# failback (unmodified)
demote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
compare_images ${POOL} ${image}
# failover
demote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
write_image ${CLUSTER1} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying'
compare_images ${POOL} ${image}
# failback
demote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
compare_images ${POOL} ${image}
# force promote
force_promote_image=test_force_promote
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${force_promote_image}
write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${force_promote_image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${force_promote_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
promote_image ${CLUSTER1} ${POOL} ${force_promote_image} '--force'
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${force_promote_image}
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
write_image ${CLUSTER1} ${POOL} ${force_promote_image} 100
write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
remove_image_retry ${CLUSTER1} ${POOL} ${force_promote_image}
remove_image_retry ${CLUSTER2} ${POOL} ${force_promote_image}
testlog "TEST: cloned images"
testlog " - default"
parent_image=test_parent
parent_snap=snap
create_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image}
write_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} 100
create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
protect_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
clone_image=test_clone
clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}
write_image ${CLUSTER2} ${POOL} ${clone_image} 100
enable_mirror ${CLUSTER2} ${POOL} ${clone_image} snapshot
wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} ${parent_image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} ${parent_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${PARENT_POOL} ${parent_image} 'up+replaying'
compare_images ${PARENT_POOL} ${parent_image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${clone_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${clone_image} 'up+replaying'
compare_images ${POOL} ${clone_image}
remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}
testlog " - clone v1"
clone_image_and_enable_mirror ${CLUSTER1} ${PARENT_POOL} ${parent_image} \
${parent_snap} ${POOL} ${clone_image}1
clone_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} \
${parent_snap} ${POOL} ${clone_image}_v1 snapshot --rbd-default-clone-format 1
test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v1) = 1
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v1
test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v1) = 1
remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v1
remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}1
unprotect_snapshot_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
testlog " - clone v2"
parent_snap=snap_v2
create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
clone_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} \
${parent_snap} ${POOL} ${clone_image}_v2 snapshot --rbd-default-clone-format 2
test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v2) = 2
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v2
test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v2) = 2
remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
test_snap_moved_to_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v2
wait_for_image_present ${CLUSTER1} ${POOL} ${clone_image}_v2 'deleted'
test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
testlog " - clone v2 non-primary"
create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
wait_for_snap_present ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
clone_image_and_enable_mirror ${CLUSTER1} ${PARENT_POOL} ${parent_image} \
${parent_snap} ${POOL} ${clone_image}_v2 snapshot --rbd-default-clone-format 2
remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}_v2
wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_image_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image}
testlog "TEST: data pool"
dp_image=test_data_pool
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${dp_image} snapshot 128 --data-pool ${PARENT_POOL}
data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${dp_image}
data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap1'
write_image ${CLUSTER2} ${POOL} ${dp_image} 100
create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap2'
write_image ${CLUSTER2} ${POOL} ${dp_image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${dp_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${dp_image} 'up+replaying'
compare_images ${POOL} ${dp_image}@snap1
compare_images ${POOL} ${dp_image}@snap2
compare_images ${POOL} ${dp_image}
remove_image_retry ${CLUSTER2} ${POOL} ${dp_image}
testlog "TEST: disable mirroring / delete non-primary image"
image2=test2
image3=test3
image4=test4
image5=test5
for i in ${image2} ${image3} ${image4} ${image5}; do
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${i}
write_image ${CLUSTER2} ${POOL} ${i} 100
create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
if [ "${i}" = "${image4}" ] || [ "${i}" = "${image5}" ]; then
protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
fi
write_image ${CLUSTER2} ${POOL} ${i} 100
mirror_image_snapshot ${CLUSTER2} ${POOL} ${i}
wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
done
set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
for i in ${image2} ${image4}; do
disable_mirror ${CLUSTER2} ${POOL} ${i}
done
unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap1'
unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap2'
for i in ${image3} ${image5}; do
remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
remove_image_retry ${CLUSTER2} ${POOL} ${i}
done
for i in ${image2} ${image3} ${image4} ${image5}; do
wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'deleted'
done
testlog "TEST: snapshot rename"
snap_name='snap_rename'
enable_mirror ${CLUSTER2} ${POOL} ${image2}
create_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_0"
for i in `seq 1 20`; do
rename_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_$(expr ${i} - 1)" "${snap_name}_${i}"
done
mirror_image_snapshot ${CLUSTER2} ${POOL} ${image2}
wait_for_snap_present ${CLUSTER1} ${POOL} ${image2} "${snap_name}_${i}"
unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap1'
unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap2'
for i in ${image2} ${image4}; do
remove_image_retry ${CLUSTER2} ${POOL} ${i}
done
testlog "TEST: disable mirror while daemon is stopped"
stop_mirrors ${CLUSTER1}
stop_mirrors ${CLUSTER2}
disable_mirror ${CLUSTER2} ${POOL} ${image}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
test_image_present ${CLUSTER1} ${POOL} ${image} 'present'
fi
start_mirrors ${CLUSTER1}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
enable_mirror ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
testlog "TEST: non-default namespace image mirroring"
testlog " - replay"
create_image_and_enable_mirror ${CLUSTER2} ${POOL}/${NS1} ${image}
create_image_and_enable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS2} ${image}
write_image ${CLUSTER2} ${POOL}/${NS1} ${image} 100
write_image ${CLUSTER2} ${POOL}/${NS2} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS2} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS2} ${image} 'up+replaying'
compare_images ${POOL}/${NS1} ${image}
compare_images ${POOL}/${NS2} ${image}
testlog " - disable mirroring / delete image"
remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${image}
disable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image}
wait_for_image_present ${CLUSTER1} ${POOL}/${NS1} ${image} 'deleted'
wait_for_image_present ${CLUSTER1} ${POOL}/${NS2} ${image} 'deleted'
remove_image_retry ${CLUSTER2} ${POOL}/${NS2} ${image}
testlog " - data pool"
dp_image=test_data_pool
create_image_and_enable_mirror ${CLUSTER2} ${POOL}/${NS1} ${dp_image} snapshot 128 --data-pool ${PARENT_POOL}
data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL}/${NS1} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${dp_image}
data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL}/${NS1} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
write_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${dp_image} 'up+replaying'
compare_images ${POOL}/${NS1} ${dp_image}
remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
testlog "TEST: simple image resync"
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
compare_images ${POOL} ${image}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
testlog "TEST: image resync while replayer is stopped"
admin_daemons ${CLUSTER1} rbd mirror stop ${POOL}/${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
compare_images ${POOL} ${image}
fi
testlog "TEST: request image resync while daemon is offline"
stop_mirrors ${CLUSTER1}
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
start_mirrors ${CLUSTER1}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
compare_images ${POOL} ${image}
remove_image_retry ${CLUSTER2} ${POOL} ${image}
testlog "TEST: split-brain"
image=split-brain
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
promote_image ${CLUSTER1} ${POOL} ${image} --force
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
write_image ${CLUSTER1} ${POOL} ${image} 10
demote_image ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'split-brain'
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
remove_image_retry ${CLUSTER2} ${POOL} ${image}
testlog "TEST: check if removed images' OMAP are removed"
start_mirrors ${CLUSTER2}
wait_for_image_in_omap ${CLUSTER1} ${POOL}
wait_for_image_in_omap ${CLUSTER2} ${POOL}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
# teuthology will trash the daemon
testlog "TEST: no blocklists"
CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
fi
| 23,941 | 47.563895 | 109 | sh |
null | ceph-main/qa/workunits/rbd/rbd_mirror_stress.sh | #!/bin/sh -ex
#
# rbd_mirror_stress.sh - stress test rbd-mirror daemon
#
# The following additional environment variables affect the test:
#
# RBD_MIRROR_REDUCE_WRITES - if not empty, don't run the stress bench write
# tool during the many image test
#
IMAGE_COUNT=50
export LOCKDEP=0
. $(dirname $0)/rbd_mirror_helpers.sh
setup
create_snap()
{
local cluster=$1
local pool=$2
local image=$3
local snap_name=$4
rbd --cluster ${cluster} -p ${pool} snap create ${image}@${snap_name} \
--debug-rbd=20 --debug-journaler=20 2> ${TEMPDIR}/rbd-snap-create.log
}
compare_image_snaps()
{
local pool=$1
local image=$2
local snap_name=$3
local ret=0
local rmt_export=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.export
local loc_export=${TEMPDIR}/${CLUSTER1}-${pool}-${image}.export
rm -f ${rmt_export} ${loc_export}
rbd --cluster ${CLUSTER2} -p ${pool} export ${image}@${snap_name} ${rmt_export}
rbd --cluster ${CLUSTER1} -p ${pool} export ${image}@${snap_name} ${loc_export}
if ! cmp ${rmt_export} ${loc_export}
then
show_diff ${rmt_export} ${loc_export}
ret=1
fi
rm -f ${rmt_export} ${loc_export}
return ${ret}
}
wait_for_pool_images()
{
local cluster=$1
local pool=$2
local image_count=$3
local s
local count
local last_count=0
while true; do
for s in `seq 1 40`; do
test $s -ne 1 && sleep 30
count=$(rbd --cluster ${cluster} -p ${pool} mirror pool status | grep 'images: ' | cut -d' ' -f 2)
test "${count}" = "${image_count}" && return 0
# reset timeout if making forward progress
test $count -ne $last_count && break
done
test $count -eq $last_count && break
last_count=$count
done
rbd --cluster ${cluster} -p ${pool} mirror pool status --verbose >&2
return 1
}
wait_for_pool_healthy()
{
local cluster=$1
local pool=$2
local s
local state
for s in `seq 1 40`; do
test $s -ne 1 && sleep 30
state=$(rbd --cluster ${cluster} -p ${pool} mirror pool status | grep 'image health:' | cut -d' ' -f 3)
test "${state}" = "ERROR" && break
test "${state}" = "OK" && return 0
done
rbd --cluster ${cluster} -p ${pool} mirror pool status --verbose >&2
return 1
}
start_mirrors ${CLUSTER1}
start_mirrors ${CLUSTER2}
testlog "TEST: add image and test replay after client crashes"
image=test
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image} ${MIRROR_IMAGE_MODE} '512M'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
clean_snap_name=
for i in `seq 1 10`
do
stress_write_image ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
snap_name="snap${i}"
create_snap ${CLUSTER2} ${POOL} ${image} ${snap_name}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${snap_name}
if [ -n "${clean_snap_name}" ]; then
compare_image_snaps ${POOL} ${image} ${clean_snap_name}
fi
compare_image_snaps ${POOL} ${image} ${snap_name}
clean_snap_name="snap${i}-clean"
create_snap ${CLUSTER2} ${POOL} ${image} ${clean_snap_name}
done
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${clean_snap_name}
for i in `seq 1 10`
do
snap_name="snap${i}"
compare_image_snaps ${POOL} ${image} ${snap_name}
snap_name="snap${i}-clean"
compare_image_snaps ${POOL} ${image} ${snap_name}
done
for i in `seq 1 10`
do
snap_name="snap${i}"
remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
snap_name="snap${i}-clean"
remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
done
remove_image_retry ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
testlog "TEST: create many images"
snap_name="snap"
for i in `seq 1 ${IMAGE_COUNT}`
do
image="image_${i}"
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image} ${MIRROR_IMAGE_MODE} '128M'
if [ -n "${RBD_MIRROR_REDUCE_WRITES}" ]; then
write_image ${CLUSTER2} ${POOL} ${image} 100
else
stress_write_image ${CLUSTER2} ${POOL} ${image}
fi
done
wait_for_pool_images ${CLUSTER2} ${POOL} ${IMAGE_COUNT}
wait_for_pool_healthy ${CLUSTER2} ${POOL}
wait_for_pool_images ${CLUSTER1} ${POOL} ${IMAGE_COUNT}
wait_for_pool_healthy ${CLUSTER1} ${POOL}
testlog "TEST: compare many images"
for i in `seq 1 ${IMAGE_COUNT}`
do
image="image_${i}"
create_snap ${CLUSTER2} ${POOL} ${image} ${snap_name}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${snap_name}
compare_image_snaps ${POOL} ${image} ${snap_name}
done
testlog "TEST: delete many images"
for i in `seq 1 ${IMAGE_COUNT}`
do
image="image_${i}"
remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
remove_image_retry ${CLUSTER2} ${POOL} ${image}
done
testlog "TEST: image deletions should propagate"
wait_for_pool_images ${CLUSTER1} ${POOL} 0
wait_for_pool_healthy ${CLUSTER1} ${POOL} 0
for i in `seq 1 ${IMAGE_COUNT}`
do
image="image_${i}"
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
done
testlog "TEST: delete images during bootstrap"
set_pool_mirror_mode ${CLUSTER1} ${POOL} 'image'
set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
start_mirror ${CLUSTER1}
image=test
for i in `seq 1 10`
do
image="image_${i}"
create_image ${CLUSTER2} ${POOL} ${image} '512M'
enable_mirror ${CLUSTER2} ${POOL} ${image}
stress_write_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
disable_mirror ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
purge_snapshots ${CLUSTER2} ${POOL} ${image}
remove_image_retry ${CLUSTER2} ${POOL} ${image}
done
testlog "TEST: check if removed images' OMAP are removed"
wait_for_image_in_omap ${CLUSTER1} ${POOL}
wait_for_image_in_omap ${CLUSTER2} ${POOL}
| 6,273 | 27.261261 | 111 | sh |
null | ceph-main/qa/workunits/rbd/read-flags.sh | #!/usr/bin/env bash
set -ex
# create a snapshot, then export it and check that setting read flags works
# by looking at --debug-ms output
function clean_up {
rm -f test.log || true
rbd snap remove test@snap || true
rbd rm test || true
}
function test_read_flags {
local IMAGE=$1
local SET_BALANCED=$2
local SET_LOCALIZED=$3
local EXPECT_BALANCED=$4
local EXPECT_LOCALIZED=$5
local EXTRA_ARGS="--log-file test.log --debug-ms 1 --no-log-to-stderr"
if [ "$SET_BALANCED" = 'y' ]; then
EXTRA_ARGS="$EXTRA_ARGS --rbd-balance-snap-reads"
elif [ "$SET_LOCALIZED" = 'y' ]; then
EXTRA_ARGS="$EXTRA_ARGS --rbd-localize-snap-reads"
fi
rbd export $IMAGE - $EXTRA_ARGS > /dev/null
if [ "$EXPECT_BALANCED" = 'y' ]; then
grep -q balance_reads test.log
else
grep -L balance_reads test.log | grep -q test.log
fi
if [ "$EXPECT_LOCALIZED" = 'y' ]; then
grep -q localize_reads test.log
else
grep -L localize_reads test.log | grep -q test.log
fi
rm -f test.log
}
clean_up
trap clean_up INT TERM EXIT
rbd create --image-feature layering -s 10 test
rbd snap create test@snap
# export from non snapshot with or without settings should not have flags
test_read_flags test n n n n
test_read_flags test y y n n
# export from snapshot should have read flags in log if they are set
test_read_flags test@snap n n n n
test_read_flags test@snap y n y n
test_read_flags test@snap n y n y
# balanced_reads happens to take priority over localize_reads
test_read_flags test@snap y y y n
echo OK
| 1,557 | 24.129032 | 75 | sh |
null | ceph-main/qa/workunits/rbd/simple_big.sh | #!/bin/sh -ex
mb=100000
rbd create foo --size $mb
DEV=$(sudo rbd map foo)
dd if=/dev/zero of=$DEV bs=1M count=$mb
dd if=$DEV of=/dev/null bs=1M count=$mb
sudo rbd unmap $DEV
rbd rm foo
echo OK
| 196 | 14.153846 | 39 | sh |
null | ceph-main/qa/workunits/rbd/test_admin_socket.sh | #!/usr/bin/env bash
set -ex
TMPDIR=/tmp/rbd_test_admin_socket$$
mkdir $TMPDIR
trap "rm -fr $TMPDIR" 0
. $(dirname $0)/../../standalone/ceph-helpers.sh
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
function rbd_watch_out_file()
{
echo ${TMPDIR}/rbd_watch_$1.out
}
function rbd_watch_pid_file()
{
echo ${TMPDIR}/rbd_watch_$1.pid
}
function rbd_watch_fifo()
{
echo ${TMPDIR}/rbd_watch_$1.fifo
}
function rbd_watch_asok()
{
echo ${TMPDIR}/rbd_watch_$1.asok
}
function rbd_get_perfcounter()
{
local image=$1
local counter=$2
local name
name=$(ceph --format xml --admin-daemon $(rbd_watch_asok ${image}) \
perf schema | $XMLSTARLET el -d3 |
grep "/librbd-.*-${image}/${counter}\$")
test -n "${name}" || return 1
ceph --format xml --admin-daemon $(rbd_watch_asok ${image}) perf dump |
$XMLSTARLET sel -t -m "${name}" -v .
}
function rbd_check_perfcounter()
{
local image=$1
local counter=$2
local expected_val=$3
local val=
val=$(rbd_get_perfcounter ${image} ${counter})
test "${val}" -eq "${expected_val}"
}
function rbd_watch_start()
{
local image=$1
local asok=$(rbd_watch_asok ${image})
mkfifo $(rbd_watch_fifo ${image})
(cat $(rbd_watch_fifo ${image}) |
rbd --admin-socket ${asok} watch ${image} \
> $(rbd_watch_out_file ${image}) 2>&1)&
# find pid of the started rbd watch process
local pid
for i in `seq 10`; do
pid=$(ps auxww | awk "/[r]bd --admin.* watch ${image}/ {print \$2}")
test -n "${pid}" && break
sleep 0.1
done
test -n "${pid}"
echo ${pid} > $(rbd_watch_pid_file ${image})
# find watcher admin socket
test -n "${asok}"
for i in `seq 10`; do
test -S "${asok}" && break
sleep 0.1
done
test -S "${asok}"
# configure debug level
ceph --admin-daemon "${asok}" config set debug_rbd 20
# check that watcher is registered
rbd status ${image} | expect_false grep "Watchers: none"
}
function rbd_watch_end()
{
local image=$1
local regexp=$2
# send 'enter' to watch to exit
echo > $(rbd_watch_fifo ${image})
# just in case it is not terminated
kill $(cat $(rbd_watch_pid_file ${image})) || :
# output rbd watch out file for easier troubleshooting
cat $(rbd_watch_out_file ${image})
# cleanup
rm -f $(rbd_watch_fifo ${image}) $(rbd_watch_pid_file ${image}) \
$(rbd_watch_out_file ${image}) $(rbd_watch_asok ${image})
}
pool="rbd"
image=testimg$$
ceph_admin="ceph --admin-daemon $(rbd_watch_asok ${image})"
rbd create --size 128 ${pool}/${image}
# check rbd cache commands are present in help output
rbd_cache_flush="rbd cache flush ${pool}/${image}"
rbd_cache_invalidate="rbd cache invalidate ${pool}/${image}"
rbd_watch_start ${image}
${ceph_admin} help | fgrep "${rbd_cache_flush}"
${ceph_admin} help | fgrep "${rbd_cache_invalidate}"
rbd_watch_end ${image}
# test rbd cache commands with disabled and enabled cache
for conf_rbd_cache in false true; do
rbd image-meta set ${image} conf_rbd_cache ${conf_rbd_cache}
rbd_watch_start ${image}
rbd_check_perfcounter ${image} flush 0
${ceph_admin} ${rbd_cache_flush}
# 'flush' counter should increase regardless if cache is enabled
rbd_check_perfcounter ${image} flush 1
rbd_check_perfcounter ${image} invalidate_cache 0
${ceph_admin} ${rbd_cache_invalidate}
# 'invalidate_cache' counter should increase regardless if cache is enabled
rbd_check_perfcounter ${image} invalidate_cache 1
rbd_watch_end ${image}
done
rbd rm ${image}
| 3,611 | 22.763158 | 79 | sh |
null | ceph-main/qa/workunits/rbd/test_librbd.sh | #!/bin/sh -e
if [ -n "${VALGRIND}" ]; then
valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
--error-exitcode=1 ceph_test_librbd
else
ceph_test_librbd
fi
exit 0
| 183 | 17.4 | 64 | sh |
null | ceph-main/qa/workunits/rbd/test_librbd_python.sh | #!/bin/sh -ex
relpath=$(dirname $0)/../../../src/test/pybind
if [ -n "${VALGRIND}" ]; then
valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
--errors-for-leak-kinds=definite --error-exitcode=1 \
python3 -m nose -v $relpath/test_rbd.py "$@"
else
python3 -m nose -v $relpath/test_rbd.py "$@"
fi
exit 0
| 329 | 24.384615 | 64 | sh |
null | ceph-main/qa/workunits/rbd/test_lock_fence.sh | #!/usr/bin/env bash
# can't use -e because of background process
set -x
IMAGE=rbdrw-image
LOCKID=rbdrw
RELPATH=$(dirname $0)/../../../src/test/librbd
RBDRW=$RELPATH/rbdrw.py
rbd create $IMAGE --size 10 --image-format 2 --image-shared || exit 1
# rbdrw loops doing I/O to $IMAGE after locking with lockid $LOCKID
python3 $RBDRW $IMAGE $LOCKID &
iochild=$!
# give client time to lock and start reading/writing
LOCKS='[]'
while [ "$LOCKS" == '[]' ]
do
LOCKS=$(rbd lock list $IMAGE --format json)
sleep 1
done
clientaddr=$(rbd lock list $IMAGE | tail -1 | awk '{print $NF;}')
clientid=$(rbd lock list $IMAGE | tail -1 | awk '{print $1;}')
echo "clientaddr: $clientaddr"
echo "clientid: $clientid"
ceph osd blocklist add $clientaddr || exit 1
wait $iochild
rbdrw_exitcode=$?
if [ $rbdrw_exitcode != 108 ]
then
echo "wrong exitcode from rbdrw: $rbdrw_exitcode"
exit 1
else
echo "rbdrw stopped with ESHUTDOWN"
fi
set -e
ceph osd blocklist rm $clientaddr
rbd lock remove $IMAGE $LOCKID "$clientid"
# rbdrw will have exited with an existing watch, so, until #3527 is fixed,
# hang out until the watch expires
sleep 30
rbd rm $IMAGE
echo OK
| 1,150 | 22.489796 | 74 | sh |
null | ceph-main/qa/workunits/rbd/test_rbd_mirror.sh | #!/bin/sh -e
if [ -n "${VALGRIND}" ]; then
valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
--error-exitcode=1 ceph_test_rbd_mirror
else
ceph_test_rbd_mirror
fi
exit 0
| 191 | 18.2 | 64 | sh |
null | ceph-main/qa/workunits/rbd/test_rbd_tasks.sh | #!/usr/bin/env bash
set -ex
POOL=rbd_tasks
POOL_NS=ns1
setup() {
trap 'cleanup' INT TERM EXIT
ceph osd pool create ${POOL} 128
rbd pool init ${POOL}
rbd namespace create ${POOL}/${POOL_NS}
TEMPDIR=`mktemp -d`
}
cleanup() {
ceph osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
rm -rf ${TEMPDIR}
}
wait_for() {
local TEST_FN=$1
shift 1
local TEST_FN_ARGS=("$@")
for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
sleep ${s}
${TEST_FN} "${TEST_FN_ARGS[@]}" || continue
return 0
done
return 1
}
task_exists() {
local TASK_ID=$1
[[ -z "${TASK_ID}" ]] && exit 1
ceph rbd task list ${TASK_ID} || return 1
return 0
}
task_dne() {
local TASK_ID=$1
[[ -z "${TASK_ID}" ]] && exit 1
ceph rbd task list ${TASK_ID} || return 0
return 1
}
task_in_progress() {
local TASK_ID=$1
[[ -z "${TASK_ID}" ]] && exit 1
[[ $(ceph rbd task list ${TASK_ID} | jq '.in_progress') == 'true' ]]
}
test_remove() {
echo "test_remove"
local IMAGE=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${IMAGE}
# MGR might require some time to discover the OSD map w/ new pool
wait_for ceph rbd task add remove ${POOL}/${IMAGE}
}
test_flatten() {
echo "test_flatten"
local PARENT_IMAGE=`uuidgen`
local CHILD_IMAGE=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${PARENT_IMAGE}
rbd snap create ${POOL}/${PARENT_IMAGE}@snap
rbd clone ${POOL}/${PARENT_IMAGE}@snap ${POOL}/${POOL_NS}/${CHILD_IMAGE} --rbd-default-clone-format=2
[[ "$(rbd info --format json ${POOL}/${POOL_NS}/${CHILD_IMAGE} | jq 'has("parent")')" == "true" ]]
local TASK_ID=`ceph rbd task add flatten ${POOL}/${POOL_NS}/${CHILD_IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
[[ "$(rbd info --format json ${POOL}/${POOL_NS}/${CHILD_IMAGE} | jq 'has("parent")')" == "false" ]]
}
test_trash_remove() {
echo "test_trash_remove"
local IMAGE=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${IMAGE}
local IMAGE_ID=`rbd info --format json ${POOL}/${IMAGE} | jq --raw-output ".id"`
rbd trash mv ${POOL}/${IMAGE}
[[ -n "$(rbd trash list ${POOL})" ]] || exit 1
local TASK_ID=`ceph rbd task add trash remove ${POOL}/${IMAGE_ID} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
[[ -z "$(rbd trash list ${POOL})" ]] || exit 1
}
test_migration_execute() {
echo "test_migration_execute"
local SOURCE_IMAGE=`uuidgen`
local TARGET_IMAGE=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${SOURCE_IMAGE}
rbd migration prepare ${POOL}/${SOURCE_IMAGE} ${POOL}/${TARGET_IMAGE}
[[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "prepared" ]]
local TASK_ID=`ceph rbd task add migration execute ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
[[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "executed" ]]
}
test_migration_commit() {
echo "test_migration_commit"
local SOURCE_IMAGE=`uuidgen`
local TARGET_IMAGE=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${SOURCE_IMAGE}
rbd migration prepare ${POOL}/${SOURCE_IMAGE} ${POOL}/${TARGET_IMAGE}
[[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "prepared" ]]
local TASK_ID=`ceph rbd task add migration execute ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
TASK_ID=`ceph rbd task add migration commit ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
[[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq 'has("migration")')" == "false" ]]
(rbd info ${POOL}/${SOURCE_IMAGE} && return 1) || true
rbd info ${POOL}/${TARGET_IMAGE}
}
test_migration_abort() {
echo "test_migration_abort"
local SOURCE_IMAGE=`uuidgen`
local TARGET_IMAGE=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${SOURCE_IMAGE}
rbd migration prepare ${POOL}/${SOURCE_IMAGE} ${POOL}/${TARGET_IMAGE}
[[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "prepared" ]]
local TASK_ID=`ceph rbd task add migration execute ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
TASK_ID=`ceph rbd task add migration abort ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
[[ "$(rbd status --format json ${POOL}/${SOURCE_IMAGE} | jq 'has("migration")')" == "false" ]]
rbd info ${POOL}/${SOURCE_IMAGE}
(rbd info ${POOL}/${TARGET_IMAGE} && return 1) || true
}
test_list() {
echo "test_list"
local IMAGE_1=`uuidgen`
local IMAGE_2=`uuidgen`
rbd create --size 1T --image-shared ${POOL}/${IMAGE_1}
rbd create --size 1T --image-shared ${POOL}/${IMAGE_2}
local TASK_ID_1=`ceph rbd task add remove ${POOL}/${IMAGE_1} | jq --raw-output ".id"`
local TASK_ID_2=`ceph rbd task add remove ${POOL}/${IMAGE_2} | jq --raw-output ".id"`
local LIST_FILE="${TEMPDIR}/list_file"
ceph rbd task list > ${LIST_FILE}
cat ${LIST_FILE}
[[ $(jq "[.[] | .id] | contains([\"${TASK_ID_1}\", \"${TASK_ID_2}\"])" ${LIST_FILE}) == "true" ]]
ceph rbd task cancel ${TASK_ID_1}
ceph rbd task cancel ${TASK_ID_2}
}
test_cancel() {
echo "test_cancel"
local IMAGE=`uuidgen`
rbd create --size 1T --image-shared ${POOL}/${IMAGE}
local TASK_ID=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
wait_for task_exists ${TASK_ID}
ceph rbd task cancel ${TASK_ID}
wait_for task_dne ${TASK_ID}
}
test_duplicate_task() {
echo "test_duplicate_task"
local IMAGE=`uuidgen`
rbd create --size 1T --image-shared ${POOL}/${IMAGE}
local IMAGE_ID=`rbd info --format json ${POOL}/${IMAGE} | jq --raw-output ".id"`
rbd trash mv ${POOL}/${IMAGE}
local TASK_ID_1=`ceph rbd task add trash remove ${POOL}/${IMAGE_ID} | jq --raw-output ".id"`
local TASK_ID_2=`ceph rbd task add trash remove ${POOL}/${IMAGE_ID} | jq --raw-output ".id"`
[[ "${TASK_ID_1}" == "${TASK_ID_2}" ]]
ceph rbd task cancel ${TASK_ID_1}
}
test_duplicate_name() {
echo "test_duplicate_name"
local IMAGE=`uuidgen`
rbd create --size 1G --image-shared ${POOL}/${IMAGE}
local TASK_ID_1=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID_1}
rbd create --size 1G --image-shared ${POOL}/${IMAGE}
local TASK_ID_2=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
[[ "${TASK_ID_1}" != "${TASK_ID_2}" ]]
wait_for task_dne ${TASK_ID_2}
local TASK_ID_3=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
[[ "${TASK_ID_2}" == "${TASK_ID_3}" ]]
}
test_progress() {
echo "test_progress"
local IMAGE_1=`uuidgen`
local IMAGE_2=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${IMAGE_1}
local TASK_ID_1=`ceph rbd task add remove ${POOL}/${IMAGE_1} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID_1}
local PROGRESS_FILE="${TEMPDIR}/progress_file"
ceph progress json > ${PROGRESS_FILE}
cat ${PROGRESS_FILE}
[[ $(jq "[.completed | .[].id] | contains([\"${TASK_ID_1}\"])" ${PROGRESS_FILE}) == "true" ]]
rbd create --size 1T --image-shared ${POOL}/${IMAGE_2}
local TASK_ID_2=`ceph rbd task add remove ${POOL}/${IMAGE_2} | jq --raw-output ".id"`
wait_for task_in_progress ${TASK_ID_2}
ceph progress json > ${PROGRESS_FILE}
cat ${PROGRESS_FILE}
[[ $(jq "[.events | .[].id] | contains([\"${TASK_ID_2}\"])" ${PROGRESS_FILE}) == "true" ]]
ceph rbd task cancel ${TASK_ID_2}
wait_for task_dne ${TASK_ID_2}
ceph progress json > ${PROGRESS_FILE}
cat ${PROGRESS_FILE}
[[ $(jq "[.completed | map(select(.failed)) | .[].id] | contains([\"${TASK_ID_2}\"])" ${PROGRESS_FILE}) == "true" ]]
}
setup
test_remove
test_flatten
test_trash_remove
test_migration_execute
test_migration_commit
test_migration_abort
test_list
test_cancel
test_duplicate_task
test_duplicate_name
test_progress
echo OK
| 7,928 | 27.624549 | 118 | sh |
null | ceph-main/qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh | #!/bin/sh
#
# Regression test for http://tracker.ceph.com/issues/14984
#
# When the bug is present, starting the rbdmap service causes
# a bogus log message to be emitted to the log because the RBDMAPFILE
# environment variable is not set.
#
# When the bug is not present, starting the rbdmap service will emit
# no log messages, because /etc/ceph/rbdmap does not contain any lines
# that require processing.
#
set -ex
echo "TEST: save timestamp for use later with journalctl --since"
TIMESTAMP=$(date +%Y-%m-%d\ %H:%M:%S)
echo "TEST: assert that rbdmap has not logged anything since boot"
journalctl -b 0 -t rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
journalctl -b 0 -t init-rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
echo "TEST: restart the rbdmap.service"
sudo systemctl restart rbdmap.service
echo "TEST: ensure that /usr/bin/rbdmap runs to completion"
until sudo systemctl status rbdmap.service | grep 'active (exited)' ; do
sleep 0.5
done
echo "TEST: assert that rbdmap has not logged anything since TIMESTAMP"
journalctl --since "$TIMESTAMP" -t rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
journalctl --since "$TIMESTAMP" -t init-rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
exit 0
| 1,208 | 33.542857 | 85 | sh |
null | ceph-main/qa/workunits/rbd/verify_pool.sh | #!/bin/sh -ex
POOL_NAME=rbd_test_validate_pool
PG_NUM=32
tear_down () {
ceph osd pool delete $POOL_NAME $POOL_NAME --yes-i-really-really-mean-it || true
}
set_up () {
tear_down
ceph osd pool create $POOL_NAME $PG_NUM
ceph osd pool mksnap $POOL_NAME snap
rbd pool init $POOL_NAME
}
trap tear_down EXIT HUP INT
set_up
# creating an image in a pool-managed snapshot pool should fail
rbd create --pool $POOL_NAME --size 1 foo && exit 1 || true
# should succeed if the pool already marked as validated
printf "overwrite validated" | rados --pool $POOL_NAME put rbd_info -
rbd create --pool $POOL_NAME --size 1 foo
echo OK
| 634 | 21.678571 | 82 | sh |
null | ceph-main/qa/workunits/rbd/crimson/test_crimson_librbd.sh | #!/bin/sh -e
if [ -n "${VALGRIND}" ]; then
valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
--error-exitcode=1 ceph_test_librbd
else
# Run test cases indivually to allow better selection
# of ongoing Crimson development.
# Disabled test groups are tracked here:
# https://tracker.ceph.com/issues/58791
ceph_test_librbd --gtest_filter='TestLibRBD.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/0.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/1.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/2.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/3.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/4.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/5.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/6.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/7.*'
# ceph_test_librbd --gtest_filter='DiffIterateTest/0.*'
# ceph_test_librbd --gtest_filter='DiffIterateTest/1.*'
ceph_test_librbd --gtest_filter='TestImageWatcher.*'
ceph_test_librbd --gtest_filter='TestInternal.*'
ceph_test_librbd --gtest_filter='TestMirroring.*'
# ceph_test_librbd --gtest_filter='TestDeepCopy.*'
ceph_test_librbd --gtest_filter='TestGroup.*'
# ceph_test_librbd --gtest_filter='TestMigration.*'
ceph_test_librbd --gtest_filter='TestMirroringWatcher.*'
ceph_test_librbd --gtest_filter='TestObjectMap.*'
ceph_test_librbd --gtest_filter='TestOperations.*'
ceph_test_librbd --gtest_filter='TestTrash.*'
ceph_test_librbd --gtest_filter='TestJournalEntries.*'
ceph_test_librbd --gtest_filter='TestJournalReplay.*'
fi
exit 0
| 1,632 | 44.361111 | 64 | sh |
null | ceph-main/qa/workunits/rename/all.sh | #!/usr/bin/env bash
set -ex
dir=`dirname $0`
CEPH_TOOL='./ceph'
$CEPH_TOOL || CEPH_TOOL='ceph'
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/prepare.sh
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_nul.sh
rm ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/rem_nul.sh
rm ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_pri.sh
rm ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/rem_pri.sh
rm ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/rem_rem.sh
rm ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_nul.sh
rm -r ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_pri.sh
rm -r ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/dir_pri_pri.sh
rm -r ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/dir_pri_nul.sh
rm -r ./?/* || true
| 861 | 21.684211 | 61 | sh |
null | ceph-main/qa/workunits/rename/dir_pri_nul.sh | #!/bin/sh -ex
# dir: srcdn=destdn
mkdir ./a/dir1
mv ./a/dir1 ./a/dir1.renamed
# dir: diff
mkdir ./a/dir2
mv ./a/dir2 ./b/dir2
# dir: diff, child subtree on target
mkdir -p ./a/dir3/child/foo
$CEPH_TOOL mds tell 0 export_dir /a/dir3/child 1
sleep 5
mv ./a/dir3 ./b/dir3
# dir: diff, child subtree on other
mkdir -p ./a/dir4/child/foo
$CEPH_TOOL mds tell 0 export_dir /a/dir4/child 2
sleep 5
mv ./a/dir4 ./b/dir4
# dir: witness subtree adjustment
mkdir -p ./a/dir5/1/2/3/4
$CEPH_TOOL mds tell 0 export_dir /a/dir5/1/2/3 2
sleep 5
mv ./a/dir5 ./b
| 550 | 18 | 48 | sh |
null | ceph-main/qa/workunits/rename/dir_pri_pri.sh | #!/bin/sh -ex
# dir, srcdn=destdn
mkdir ./a/dir1
mkdir ./a/dir2
mv -T ./a/dir1 ./a/dir2
# dir, different
mkdir ./a/dir3
mkdir ./b/dir4
mv -T ./a/dir3 ./b/dir4
| 161 | 12.5 | 23 | sh |
null | ceph-main/qa/workunits/rename/prepare.sh | #!/bin/sh -ex
$CEPH_TOOL mds tell 0 injectargs '--mds-bal-interval 0'
$CEPH_TOOL mds tell 1 injectargs '--mds-bal-interval 0'
$CEPH_TOOL mds tell 2 injectargs '--mds-bal-interval 0'
$CEPH_TOOL mds tell 3 injectargs '--mds-bal-interval 0'
#$CEPH_TOOL mds tell 4 injectargs '--mds-bal-interval 0'
mkdir -p ./a/a
mkdir -p ./b/b
mkdir -p ./c/c
mkdir -p ./d/d
mount_dir=`df . | grep -o " /.*" | grep -o "/.*"`
cur_dir=`pwd`
ceph_dir=${cur_dir##$mount_dir}
$CEPH_TOOL mds tell 0 export_dir $ceph_dir/b 1
$CEPH_TOOL mds tell 0 export_dir $ceph_dir/c 2
$CEPH_TOOL mds tell 0 export_dir $ceph_dir/d 3
sleep 5
| 604 | 26.5 | 56 | sh |
null | ceph-main/qa/workunits/rename/pri_nul.sh | #!/bin/sh -ex
# srcdn=destdn
touch ./a/file1
mv ./a/file1 ./a/file1.renamed
# different
touch ./a/file2
mv ./a/file2 ./b
| 125 | 9.5 | 30 | sh |
null | ceph-main/qa/workunits/rename/pri_pri.sh | #!/bin/sh -ex
# srcdn=destdn
touch ./a/file1
touch ./a/file2
mv ./a/file1 ./a/file2
# different (srcdn != destdn)
touch ./a/file3
touch ./b/file4
mv ./a/file3 ./b/file4
| 172 | 12.307692 | 29 | sh |
null | ceph-main/qa/workunits/rename/pri_rem.sh | #!/bin/sh -ex
dotest() {
src=$1
desti=$2
destdn=$3
n=$4
touch ./$src/src$n
touch ./$desti/desti$n
ln ./$desti/desti$n ./$destdn/destdn$n
mv ./$src/src$n ./$destdn/destdn$n
}
# srcdn=destdn=desti
dotest 'a' 'a' 'a' 1
# destdn=desti
dotest 'b' 'a' 'a' 2
# srcdn=destdn
dotest 'a' 'b' 'a' 3
# srcdn=desti
dotest 'a' 'a' 'b' 4
# all different
dotest 'a' 'b' 'c' 5
| 402 | 11.59375 | 42 | sh |
null | ceph-main/qa/workunits/rename/rem_nul.sh | #!/bin/sh -ex
dotest() {
srci=$1
srcdn=$2
dest=$3
n=$4
touch ./$srci/srci$n
ln ./$srci/srci$n ./$srcdn/srcdn$n
mv ./$srcdn/srcdn$n ./$dest/dest$n
}
# srci=srcdn=destdn
dotest 'a' 'a' 'a' 1
# srcdn=destdn
dotest 'b' 'a' 'a' 2
# srci=destdn
dotest 'a' 'b' 'a' 3
# srci=srcdn
dotest 'a' 'a' 'b' 4
# all different
dotest 'a' 'b' 'c' 5
| 368 | 11.3 | 38 | sh |
null | ceph-main/qa/workunits/rename/rem_pri.sh | #!/bin/sh -ex
dotest() {
srci=$1
srcdn=$2
dest=$3
n=$4
touch ./$srci/srci$n
ln ./$srci/srci$n ./$srcdn/srcdn$n
touch ./$dest/dest$n
mv ./$srcdn/srcdn$n ./$dest/dest$n
}
# srci=srcdn=destdn
dotest 'a' 'a' 'a' 1
# srcdn=destdn
dotest 'b' 'a' 'a' 2
# srci=destdn
dotest 'a' 'b' 'a' 3
# srci=srcdn
dotest 'a' 'a' 'b' 4
# all different
dotest 'a' 'b' 'c' 5
| 392 | 12.1 | 38 | sh |
null | ceph-main/qa/workunits/rename/rem_rem.sh | #!/bin/sh -ex
dotest() {
srci=$1
srcdn=$2
desti=$3
destdn=$4
n=$5
touch ./$srci/srci$n
ln ./$srci/srci$n ./$srcdn/srcdn$n
touch ./$desti/desti$n
ln ./$desti/desti$n ./$destdn/destdn$n
mv ./$srcdn/srcdn$n ./$destdn/destdn$n
}
# srci=srcdn=destdn=desti
dotest 'a' 'a' 'a' 'a' 1
# srcdn=destdn=desti
dotest 'b' 'a' 'a' 'a' 2
# srci=destdn=desti
dotest 'a' 'b' 'a' 'a' 3
# srci=srcdn=destdn
dotest 'a' 'a' 'b' 'a' 4
# srci=srcdn=desti
dotest 'a' 'a' 'a' 'b' 5
# srci=srcdn destdn=desti
dotest 'a' 'a' 'b' 'b' 6
# srci=destdn srcdn=desti
dotest 'a' 'b' 'b' 'a' 7
# srci=desti srcdn=destdn
dotest 'a' 'b' 'a' 'b' 8
# srci=srcdn
dotest 'a' 'a' 'b' 'c' 9
# srci=desti
dotest 'a' 'b' 'a' 'c' 10
# srci=destdn
dotest 'a' 'b' 'c' 'a' 11
# srcdn=desti
dotest 'a' 'b' 'b' 'c' 12
# srcdn=destdn
dotest 'a' 'b' 'c' 'b' 13
# destdn=desti
dotest 'a' 'b' 'c' 'c' 14
# all different
dotest 'a' 'b' 'c' 'd' 15
| 963 | 14.548387 | 42 | sh |
null | ceph-main/qa/workunits/rest/test-restful.sh | #!/bin/sh -ex
mydir=`dirname $0`
secret=`ceph config-key get mgr/restful/keys/admin`
url=$(ceph mgr dump|jq -r .services.restful|sed -e 's/\/$//')
echo "url $url secret $secret"
$mydir/test_mgr_rest_api.py $url $secret
echo $0 OK
| 233 | 20.272727 | 61 | sh |
null | ceph-main/qa/workunits/rgw/keystone-service-token.sh | #!/usr/bin/env bash
#
# Copyright (C) 2022 Binero
#
# Author: Tobias Urdin <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
trap cleanup EXIT
function cleanup() {
kill $KEYSTONE_FAKE_SERVER_PID
wait
}
function run() {
$CEPH_ROOT/qa/workunits/rgw//keystone-fake-server.py &
KEYSTONE_FAKE_SERVER_PID=$!
# Give fake Keystone server some seconds to startup
sleep 5
$CEPH_ROOT/qa/workunits/rgw/test-keystone-service-token.py
}
main keystone-service-token "$@"
| 978 | 26.971429 | 70 | sh |
null | ceph-main/qa/workunits/rgw/run-d4n.sh | #!/usr/bin/env bash
set -ex
mydir=`dirname $0`
python3 -m venv $mydir
source $mydir/bin/activate
pip install pip --upgrade
pip install redis
pip install configobj
pip install boto3
# run test
$mydir/bin/python3 $mydir/test_rgw_d4n.py
deactivate
echo OK.
| 257 | 14.176471 | 41 | sh |
null | ceph-main/qa/workunits/rgw/run-datacache.sh | #!/usr/bin/env bash
set -ex
#assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
# localhost::443 for ssl
mydir=`dirname $0`
python3 -m venv $mydir
source $mydir/bin/activate
pip install pip --upgrade
pip install configobj
## run test
$mydir/bin/python3 $mydir/test_rgw_datacache.py
deactivate
echo OK.
| 337 | 15.9 | 80 | sh |
null | ceph-main/qa/workunits/rgw/run-reshard.sh | #!/usr/bin/env bash
set -ex
# this test uses fault injection to abort during 'radosgw-admin bucket reshard'
# disable coredumps so teuthology won't mark a failure
ulimit -c 0
#assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
# localhost::443 for ssl
mydir=`dirname $0`
python3 -m venv $mydir
source $mydir/bin/activate
pip install pip --upgrade
pip install boto3
## run test
$mydir/bin/python3 $mydir/test_rgw_reshard.py
deactivate
echo OK.
| 479 | 19 | 80 | sh |
null | ceph-main/qa/workunits/rgw/run-s3tests.sh | #!/usr/bin/env bash
set -ex
# run s3-tests from current directory. assume working
# ceph environment (radosgw-admin in path) and rgw on localhost:8000
# (the vstart default).
branch=$1
[ -z "$1" ] && branch=master
port=$2
[ -z "$2" ] && port=8000 # this is vstart's default
##
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
if [ -e CMakeCache.txt ]; then
BIN_PATH=$PWD/bin
elif [ -e $root_path/../${BUILD_DIR}/CMakeCache.txt ]; then
cd $root_path/../${BUILD_DIR}
BIN_PATH=$PWD/bin
fi
PATH=$PATH:$BIN_PATH
dir=tmp.s3-tests.$$
# clone and bootstrap
mkdir $dir
cd $dir
git clone https://github.com/ceph/s3-tests
cd s3-tests
git checkout ceph-$branch
S3TEST_CONF=s3tests.conf.SAMPLE tox -- -m "not fails_on_rgw and not sse_s3 and not lifecycle_expiration and not test_of_sts and not webidentity_test" -v
cd ../..
rm -rf $dir
echo OK.
| 846 | 20.175 | 152 | sh |
null | ceph-main/qa/workunits/rgw/test_librgw_file.sh | #!/bin/sh -e
if [ -z ${AWS_ACCESS_KEY_ID} ]
then
export AWS_ACCESS_KEY_ID=`openssl rand -base64 20`
export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 40`
radosgw-admin user create --uid ceph-test-librgw-file \
--access-key $AWS_ACCESS_KEY_ID \
--secret $AWS_SECRET_ACCESS_KEY \
--display-name "librgw test user" \
--email [email protected] || echo "librgw user exists"
# keyring override for teuthology env
KEYRING="/etc/ceph/ceph.keyring"
K="-k ${KEYRING}"
fi
# nfsns is the main suite
# create herarchy, and then list it
echo "phase 1.1"
ceph_test_librgw_file_nfsns ${K} --hier1 --dirs1 --create --rename --verbose
# the older librgw_file can consume the namespace
echo "phase 1.2"
ceph_test_librgw_file_nfsns ${K} --getattr --verbose
# and delete the hierarchy
echo "phase 1.3"
ceph_test_librgw_file_nfsns ${K} --hier1 --dirs1 --delete --verbose
# bulk create/delete buckets
echo "phase 2.1"
ceph_test_librgw_file_cd ${K} --create --multi --verbose
echo "phase 2.2"
ceph_test_librgw_file_cd ${K} --delete --multi --verbose
# write continuation test
echo "phase 3.1"
ceph_test_librgw_file_aw ${K} --create --large --verify
echo "phase 3.2"
ceph_test_librgw_file_aw ${K} --delete --large
# continued readdir
echo "phase 4.1"
ceph_test_librgw_file_marker ${K} --create --marker1 --marker2 --nobjs=100 --verbose
echo "phase 4.2"
ceph_test_librgw_file_marker ${K} --delete --verbose
# advanced i/o--but skip readv/writev for now--split delete from
# create and stat ops to avoid fault in sysobject cache
echo "phase 5.1"
ceph_test_librgw_file_gp ${K} --get --stat --put --create
echo "phase 5.2"
ceph_test_librgw_file_gp ${K} --delete
exit 0
| 1,707 | 27.466667 | 84 | sh |
null | ceph-main/qa/workunits/rgw/test_rgw_gc_log.sh | #!/bin/sh -e
ceph_test_rgw_gc_log
exit 0
| 43 | 6.333333 | 20 | sh |
null | ceph-main/qa/workunits/rgw/test_rgw_obj.sh | #!/bin/sh -e
ceph_test_rgw_obj
exit 0
| 40 | 5.833333 | 17 | sh |
null | ceph-main/qa/workunits/rgw/test_rgw_orphan_list.sh | #!/usr/bin/env bash
# set -x
set -e
# if defined, debug messages will be displayed and prepended with the string
# debug="DEBUG"
huge_size=5100 # in megabytes
big_size=7 # in megabytes
huge_obj=/tmp/huge_obj.temp.$$
big_obj=/tmp/big_obj.temp.$$
empty_obj=/tmp/empty_obj.temp.$$
fifo=/tmp/orphan-fifo.$$
awscli_dir=${HOME}/awscli_temp
export PATH=${PATH}:${awscli_dir}
rgw_host=$(hostname --fqdn)
if echo "$rgw_host" | grep -q '\.' ; then
:
else
host_domain=".front.sepia.ceph.com"
echo "WARNING: rgw hostname -- $rgw_host -- does not appear to be fully qualified; PUNTING and appending $host_domain"
rgw_host="${rgw_host}${host_domain}"
fi
rgw_port=80
echo "Fully Qualified Domain Name: $rgw_host"
success() {
echo OK.
exit 0
}
########################################################################
# INSTALL AND CONFIGURE TOOLING
install_awscli() {
# NB: this does verify authenticity and integrity of downloaded
# file; see
# https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html
here="$(pwd)"
cd "$HOME"
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
mkdir -p $awscli_dir
./aws/install -i $awscli_dir
cd "$here"
}
uninstall_awscli() {
here="$(pwd)"
cd "$HOME"
rm -rf $awscli_dir ./aws awscliv2.zip
cd "$here"
}
sudo yum -y install s3cmd
sudo yum -y install python3-setuptools
sudo yum -y install python3-pip
sudo pip3 install --upgrade setuptools
sudo pip3 install python-swiftclient
# get ready for transition from s3cmd to awscli
if false ;then
install_awscli
aws --version
uninstall_awscli
fi
s3config=/tmp/s3config.$$
# do not include the port when it is 80; the host base is used in the
# v4 signature and it needs to follow this convention for signatures
# to match
if [ "$rgw_port" -ne 80 ] ;then
s3_host_base="${rgw_host}:${rgw_port}"
else
s3_host_base="$rgw_host"
fi
cat >${s3config} <<EOF
[default]
host_base = $s3_host_base
access_key = 0555b35654ad1656d804
secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
bucket_location = us-east-1
check_ssl_certificate = True
check_ssl_hostname = True
default_mime_type = binary/octet-stream
delete_removed = False
dry_run = False
enable_multipart = True
encoding = UTF-8
encrypt = False
follow_symlinks = False
force = False
guess_mime_type = True
host_bucket = anything.with.three.dots
multipart_chunk_size_mb = 15
multipart_max_chunks = 10000
recursive = False
recv_chunk = 65536
send_chunk = 65536
signature_v2 = False
socket_timeout = 300
use_https = False
use_mime_magic = True
verbosity = WARNING
EOF
# set up swift authentication
export ST_AUTH=http://${rgw_host}:${rgw_port}/auth/v1.0
export ST_USER=test:tester
export ST_KEY=testing
create_users() {
# Create S3 user
local akey='0555b35654ad1656d804'
local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
radosgw-admin user create --uid testid \
--access-key $akey --secret $skey \
--display-name 'M. Tester' --email [email protected]
# Create Swift user
radosgw-admin user create --subuser=test:tester \
--display-name=Tester-Subuser --key-type=swift \
--secret=testing --access=full
}
myswift() {
if [ -n "$debug" ] ;then
echo "${debug}: swift --verbose --debug $@"
fi
swift --verbose --debug "$@"
local code=$?
if [ $code -ne 0 ] ;then
echo "ERROR: code = $code ; command = s3cmd --config=${s3config} --verbose --debug "$@""
exit $code
fi
}
mys3cmd() {
if [ -n "$debug" ] ;then
echo "${debug}: s3cmd --config=${s3config} --verbose --debug $@"
fi
s3cmd --config=${s3config} --verbose --debug "$@"
local code=$?
if [ $code -ne 0 ] ;then
echo "ERROR: code = $code ; command = s3cmd --config=${s3config} --verbose --debug "$@""
exit $code
fi
}
mys3uploadkill() {
if [ $# -ne 5 ] ;then
echo "$0: error expecting 5 arguments"
exit 1
fi
local_file="$1"
remote_bkt="$2"
remote_obj="$3"
fifo="$4"
stop_part="$5"
mkfifo $fifo
s3cmd --config=${s3config} put $local_file \
s3://${remote_bkt}/${remote_obj} \
--progress \
--multipart-chunk-size-mb=5 >$fifo &
set +e # don't allow errors to stop script
while read line ;do
echo "$line" | grep --quiet "part $stop_part "
if [ ${PIPESTATUS[1]} -eq 0 ] ;then
kill -9 $(jobs -p)
break
fi
done <$fifo
set -e
rm -f $fifo
}
mys3upload() {
obj=$1
bucket=$2
dest_obj=$3
mys3cmd put -q $obj s3://${bucket}/$dest_obj
}
########################################################################
# PREP
create_users
dd if=/dev/urandom of=$big_obj bs=1M count=${big_size}
dd if=/dev/urandom of=$huge_obj bs=1M count=${huge_size}
touch $empty_obj
quick_tests() {
echo TRY A SWIFT COMMAND
myswift upload swift-plain-ctr $big_obj --object-name swift-obj-test
myswift list
myswift list swift-plain-ctr
echo TRY A RADOSGW-ADMIN COMMAND
radosgw-admin bucket list # make sure rgw is up and running
}
########################################################################
# S3 TESTS
####################################
# regular multipart test
mys3cmd mb s3://multipart-bkt
mys3upload $huge_obj multipart-bkt multipart-obj
mys3cmd ls
mys3cmd ls s3://multipart-bkt
####################################
# multipart test with incomplete uploads
bkt="incomplete-mp-bkt-1"
mys3cmd mb s3://$bkt
mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c $fifo 20
# generate an incomplete multipart with more than 1,000 parts
mys3uploadkill $huge_obj $bkt incomplete-mp-obj-b $fifo 1005
# generate more than 1000 incomplet multiparts
for c in $(seq 1005) ;do
mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c-$c $fifo 3
done
####################################
# resharded bucket
bkt=resharded-bkt-1
mys3cmd mb s3://$bkt
for f in $(seq 8) ; do
dest_obj="reshard-obj-${f}"
mys3cmd put -q $big_obj s3://${bkt}/$dest_obj
done
radosgw-admin bucket reshard --num-shards 3 --bucket=$bkt --yes-i-really-mean-it
radosgw-admin bucket reshard --num-shards 5 --bucket=$bkt --yes-i-really-mean-it
####################################
# versioned bucket
if true ;then
echo "WARNING: versioned bucket test currently turned off"
else
bkt=versioned-bkt-1
mys3cmd mb s3://$bkt
# bucket-enable-versioning $bkt
for f in $(seq 3) ;do
for g in $(seq 10) ;do
dest_obj="versioned-obj-${g}"
mys3cmd put -q $big_obj s3://${bkt}/$dest_obj
done
done
for g in $(seq 1 2 10) ;do
dest_obj="versioned-obj-${g}"
mys3cmd rm s3://${bkt}/$dest_obj
done
fi
############################################################
# copy small objects
o_bkt="orig-bkt-1"
d_bkt="copy-bkt-1"
mys3cmd mb s3://$o_bkt
for f in $(seq 4) ;do
dest_obj="orig-obj-$f"
mys3cmd put -q $big_obj s3://${o_bkt}/$dest_obj
done
mys3cmd mb s3://$d_bkt
mys3cmd cp s3://${o_bkt}/orig-obj-1 s3://${d_bkt}/copied-obj-1
mys3cmd cp s3://${o_bkt}/orig-obj-3 s3://${d_bkt}/copied-obj-3
for f in $(seq 5 6) ;do
dest_obj="orig-obj-$f"
mys3cmd put -q $big_obj s3://${d_bkt}/$dest_obj
done
############################################################
# copy small objects and delete original
o_bkt="orig-bkt-2"
d_bkt="copy-bkt-2"
mys3cmd mb s3://$o_bkt
for f in $(seq 4) ;do
dest_obj="orig-obj-$f"
mys3cmd put -q $big_obj s3://${o_bkt}/$dest_obj
done
mys3cmd mb s3://$d_bkt
mys3cmd cp s3://${o_bkt}/orig-obj-1 s3://${d_bkt}/copied-obj-1
mys3cmd cp s3://${o_bkt}/orig-obj-3 s3://${d_bkt}/copied-obj-3
for f in $(seq 5 6) ;do
dest_obj="orig-obj-$f"
mys3cmd put -q $big_obj s3://${d_bkt}/$dest_obj
done
mys3cmd rb --recursive s3://${o_bkt}
############################################################
# copy multipart objects
o_bkt="orig-mp-bkt-3"
d_bkt="copy-mp-bkt-3"
mys3cmd mb s3://$o_bkt
for f in $(seq 2) ;do
dest_obj="orig-multipart-obj-$f"
mys3cmd put -q $huge_obj s3://${o_bkt}/$dest_obj
done
mys3cmd mb s3://$d_bkt
mys3cmd cp s3://${o_bkt}/orig-multipart-obj-1 \
s3://${d_bkt}/copied-multipart-obj-1
for f in $(seq 5 5) ;do
dest_obj="orig-multipart-obj-$f"
mys3cmd put -q $huge_obj s3://${d_bkt}/$dest_obj
done
############################################################
# copy multipart objects and delete original
o_bkt="orig-mp-bkt-4"
d_bkt="copy-mp-bkt-4"
mys3cmd mb s3://$o_bkt
for f in $(seq 2) ;do
dest_obj="orig-multipart-obj-$f"
mys3cmd put -q $huge_obj s3://${o_bkt}/$dest_obj
done
mys3cmd mb s3://$d_bkt
mys3cmd cp s3://${o_bkt}/orig-multipart-obj-1 \
s3://${d_bkt}/copied-multipart-obj-1
for f in $(seq 5 5) ;do
dest_obj="orig-multipart-obj-$f"
mys3cmd put -q $huge_obj s3://${d_bkt}/$dest_obj
done
mys3cmd rb --recursive s3://$o_bkt
########################################################################
# SWIFT TESTS
# 600MB
segment_size=629145600
############################################################
# plain test
for f in $(seq 4) ;do
myswift upload swift-plain-ctr $big_obj --object-name swift-obj-$f
done
############################################################
# zero-len test
myswift upload swift-zerolen-ctr $empty_obj --object-name subdir/
myswift upload swift-zerolen-ctr $big_obj --object-name subdir/abc1
myswift upload swift-zerolen-ctr $empty_obj --object-name subdir/empty1
myswift upload swift-zerolen-ctr $big_obj --object-name subdir/xyz1
############################################################
# dlo test
# upload in 300MB segments
myswift upload swift-dlo-ctr $huge_obj --object-name dlo-obj-1 \
-S $segment_size
############################################################
# slo test
# upload in 300MB segments
myswift upload swift-slo-ctr $huge_obj --object-name slo-obj-1 \
-S $segment_size --use-slo
############################################################
# large object copy test
# upload in 300MB segments
o_ctr=swift-orig-ctr
o_obj=slo-orig-obj-1
d_ctr=swift-copy-ctr
d_obj=slo-copy-obj-1
myswift upload $o_ctr $big_obj --object-name $o_obj
myswift copy --destination /${d_ctr}/${d_obj} \
$o_ctr $o_obj
myswift delete $o_ctr $o_obj
############################################################
# huge dlo object copy test
o_ctr=swift-orig-dlo-ctr-1
o_obj=dlo-orig-dlo-obj-1
d_ctr=swift-copy-dlo-ctr-1
d_obj=dlo-copy-dlo-obj-1
myswift upload $o_ctr $huge_obj --object-name $o_obj \
-S $segment_size
myswift copy --destination /${d_ctr}/${d_obj} \
$o_ctr $o_obj
############################################################
# huge dlo object copy and orig delete
o_ctr=swift-orig-dlo-ctr-2
o_obj=dlo-orig-dlo-obj-2
d_ctr=swift-copy-dlo-ctr-2
d_obj=dlo-copy-dlo-obj-2
myswift upload $o_ctr $huge_obj --object-name $o_obj \
-S $segment_size
myswift copy --destination /${d_ctr}/${d_obj} \
$o_ctr $o_obj
myswift delete $o_ctr $o_obj
############################################################
# huge slo object copy test
o_ctr=swift-orig-slo-ctr-1
o_obj=slo-orig-slo-obj-1
d_ctr=swift-copy-slo-ctr-1
d_obj=slo-copy-slo-obj-1
myswift upload $o_ctr $huge_obj --object-name $o_obj \
-S $segment_size --use-slo
myswift copy --destination /${d_ctr}/${d_obj} $o_ctr $o_obj
############################################################
# huge slo object copy test and orig delete
o_ctr=swift-orig-slo-ctr-2
o_obj=slo-orig-slo-obj-2
d_ctr=swift-copy-slo-ctr-2
d_obj=slo-copy-slo-obj-2
myswift upload $o_ctr $huge_obj --object-name $o_obj \
-S $segment_size --use-slo
myswift copy --destination /${d_ctr}/${d_obj} $o_ctr $o_obj
myswift delete $o_ctr $o_obj
########################################################################
# FORCE GARBAGE COLLECTION
sleep 6 # since for testing age at which gc can happen is 5 secs
radosgw-admin gc process --include-all
########################################
# DO ORPHAN LIST
pool="default.rgw.buckets.data"
rgw-orphan-list $pool
# we only expect there to be one output file, but loop just in case
ol_error=""
for f in orphan-list-*.out ; do
if [ -s "$f" ] ;then # if file non-empty
ol_error="${ol_error}:$f"
echo "One ore more orphans found in $f:"
cat "$f"
fi
done
if [ -n "$ol_error" ] ;then
echo "ERROR: orphans found when none expected"
exit 1
fi
########################################################################
# CLEAN UP
rm -f $empty_obj $big_obj $huge_obj $s3config
success
| 12,507 | 23.053846 | 122 | sh |
null | ceph-main/qa/workunits/rgw/test_rgw_s3_mp_reupload.sh | #!/usr/bin/env bash
# INITIALIZATION
mydir=$(dirname $0)
data_pool=default.rgw.buckets.data
orphan_list_out=/tmp/orphan_list.$$
radoslist_out=/tmp/radoslist.$$
rados_ls_out=/tmp/rados_ls.$$
diff_out=/tmp/diff.$$
rgw_host="$(hostname --fqdn)"
echo "INFO: fully qualified domain name: $rgw_host"
export RGW_ACCESS_KEY="0555b35654ad1656d804"
export RGW_SECRET_KEY="h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
export RGW_HOST="${RGW_HOST:-$rgw_host}"
# random argument determines if multipart is aborted or completed 50/50
outcome=$((RANDOM % 2))
if [ $outcome -eq 0 ] ;then
echo "== TESTING *ABORTING* MULTIPART UPLOAD WITH RE-UPLOADS =="
else
echo "== TESTING *COMPLETING* MULTIPART UPLOAD WITH RE-UPLOADS =="
fi
# random argument determines if multipart is aborted or completed 50/50
versioning=$((RANDOM % 2))
if [ $versioning -eq 0 ] ;then
echo "== TESTING NON-VERSIONED BUCKET =="
else
echo "== TESTING VERSIONED BUCKET =="
fi
# create a randomized bucket name
bucket="reupload-bkt-$((RANDOM % 899999 + 100000))"
# SET UP PYTHON VIRTUAL ENVIRONMENT
# install boto3
python3 -m venv $mydir
source $mydir/bin/activate
pip install pip --upgrade
pip install boto3
# CREATE RGW USER IF NECESSARY
if radosgw-admin user info --access-key $RGW_ACCESS_KEY 2>/dev/null ;then
echo INFO: user already exists
else
echo INFO: creating user
radosgw-admin user create --uid testid \
--access-key $RGW_ACCESS_KEY \
--secret $RGW_SECRET_KEY \
--display-name 'M. Tester' \
--email [email protected] 2>/dev/null
fi
# RUN REUPLOAD TEST
$mydir/bin/python3 ${mydir}/test_rgw_s3_mp_reupload.py $bucket $outcome $versioning
# ANALYZE FOR ERRORS
# (NOTE: for now we're choosing not to use the rgw-orphan-list tool)
# force garbage collection to remove extra parts
radosgw-admin gc process --include-all 2>/dev/null
marker=$(radosgw-admin metadata get bucket:$bucket 2>/dev/null | grep bucket_id | sed 's/.*: "\(.*\)".*/\1/')
# determine expected rados objects
radosgw-admin bucket radoslist --bucket=$bucket 2>/dev/null | sort >$radoslist_out
echo "radosgw-admin bucket radoslist:"
cat $radoslist_out
# determine found rados objects
rados ls -p $data_pool 2>/dev/null | grep "^$marker" | sort >$rados_ls_out
echo "rados ls:"
cat $rados_ls_out
# compare expected and found
diff $radoslist_out $rados_ls_out >$diff_out
if [ $(cat $diff_out | wc -l) -ne 0 ] ;then
error=1
echo "ERROR: Found differences between expected and actual rados objects for test bucket."
echo " note: indicators: '>' found but not expected; '<' expected but not found."
cat $diff_out
fi
# CLEAN UP
deactivate
rm -f $orphan_list_out $radoslist_out $rados_ls_out $diff_out
# PRODUCE FINAL RESULTS
if [ -n "$error" ] ;then
echo "== FAILED =="
exit 1
fi
echo "== PASSED =="
exit 0
| 2,838 | 24.576577 | 109 | sh |
null | ceph-main/qa/workunits/suites/blogbench.sh | #!/usr/bin/env bash
set -ex
echo "getting blogbench"
wget http://download.ceph.com/qa/blogbench-1.0.tar.bz2
#cp /home/gregf/src/blogbench-1.0.tar.bz2 .
tar -xvf blogbench-1.0.tar.bz2
cd blogbench-1.0/
echo "making blogbench"
./configure
make
cd src
mkdir blogtest_in
echo "running blogbench"
./blogbench -d blogtest_in
| 320 | 19.0625 | 54 | sh |
null | ceph-main/qa/workunits/suites/bonnie.sh | #!/usr/bin/env bash
set -ex
bonnie_bin=`which bonnie++`
[ $? -eq 1 ] && bonnie_bin=/usr/sbin/bonnie++
uid_flags=""
[ "`id -u`" == "0" ] && uid_flags="-u root"
$bonnie_bin $uid_flags -n 100
| 193 | 15.166667 | 45 | sh |
null | ceph-main/qa/workunits/suites/cephfs_journal_tool_smoke.sh | #!/usr/bin/env bash
set -ex
export BIN="${BIN:-cephfs-journal-tool --rank=cephfs:0}"
export JOURNAL_FILE=/tmp/journal.bin
export JSON_OUTPUT=/tmp/json.tmp
export BINARY_OUTPUT=/tmp/binary.tmp
if [ -d $BINARY_OUTPUT ] ; then
rm -rf $BINARY_OUTPUT
fi
# Check that the import/export stuff really works as expected
# first because it's used as the reset method between
# following checks.
echo "Testing that export/import cycle preserves state"
HEADER_STATE=`$BIN header get`
EVENT_LIST=`$BIN event get list`
$BIN journal export $JOURNAL_FILE
$BIN journal import $JOURNAL_FILE
NEW_HEADER_STATE=`$BIN header get`
NEW_EVENT_LIST=`$BIN event get list`
if [ ! "$HEADER_STATE" = "$NEW_HEADER_STATE" ] ; then
echo "Import failed to preserve header state"
echo $HEADER_STATE
echo $NEW_HEADER_STATE
exit -1
fi
if [ ! "$EVENT_LIST" = "$NEW_EVENT_LIST" ] ; then
echo "Import failed to preserve event state"
echo $EVENT_LIST
echo $NEW_EVENT_LIST
exit -1
fi
echo "Testing 'journal' commands..."
# Simplest thing: print the vital statistics of the journal
$BIN journal inspect
$BIN header get
# Make a copy of the journal in its original state
$BIN journal export $JOURNAL_FILE
if [ ! -s $JOURNAL_FILE ] ; then
echo "Export to $JOURNAL_FILE failed"
exit -1
fi
# Can we execute a journal reset?
$BIN journal reset
$BIN journal inspect
$BIN header get
echo "Rolling back journal to original state..."
$BIN journal import $JOURNAL_FILE
echo "Testing 'header' commands..."
$BIN header get
$BIN header set write_pos 123
$BIN header set expire_pos 123
$BIN header set trimmed_pos 123
echo "Rolling back journal to original state..."
$BIN journal import $JOURNAL_FILE
echo "Testing 'event' commands..."
$BIN event get summary
$BIN event get --type=UPDATE --path=/ --inode=0 --frag=0x100 summary
$BIN event get json --path $JSON_OUTPUT
if [ ! -s $JSON_OUTPUT ] ; then
echo "Export to $JSON_OUTPUT failed"
exit -1
fi
$BIN event get binary --path $BINARY_OUTPUT
if [ ! -s $BINARY_OUTPUT ] ; then
echo "Export to $BINARY_OUTPUT failed"
exit -1
fi
$BIN event recover_dentries summary
$BIN event splice summary
# Tests finish.
# Metadata objects have been modified by the 'event recover_dentries' command.
# Journal is no long consistent with respect to metadata objects (especially inotable).
# To ensure mds successfully replays its journal, we need to do journal reset.
$BIN journal reset
cephfs-table-tool all reset session
| 2,474 | 25.902174 | 87 | sh |
null | ceph-main/qa/workunits/suites/dbench-short.sh | #!/usr/bin/env bash
set -ex
dbench 1
| 39 | 5.666667 | 19 | sh |
null | ceph-main/qa/workunits/suites/dbench.sh | #!/usr/bin/env bash
set -ex
dbench 1
dbench 10
| 49 | 6.142857 | 19 | sh |
null | ceph-main/qa/workunits/suites/ffsb.sh | #!/usr/bin/env bash
set -ex
mydir=`dirname $0`
# try it again if the clone is slow and the second time
trap -- 'retry' EXIT
retry() {
rm -rf ffsb
# double the timeout value
timeout 3600 git clone https://git.ceph.com/ffsb.git --depth 1
}
rm -rf ffsb
timeout 1800 git clone https://git.ceph.com/ffsb.git --depth 1
trap - EXIT
cd ffsb
./configure
make
cd ..
mkdir tmp
cd tmp
for f in $mydir/*.ffsb
do
../ffsb/ffsb $f
done
cd ..
rm -r tmp ffsb*
| 464 | 13.53125 | 66 | sh |
null | ceph-main/qa/workunits/suites/fio.sh | #!/usr/bin/env bash
set -x
gen_fio_file() {
iter=$1
f=$2
cat > randio-$$-${iter}.fio <<EOF
[randio]
blocksize_range=32m:128m
blocksize_unaligned=1
filesize=10G:20G
readwrite=randrw
runtime=300
size=20G
filename=${f}
EOF
}
sudo apt-get -y install fio
for i in $(seq 1 20); do
fcount=$(ls donetestfile* 2>/dev/null | wc -l)
donef="foo"
fiof="bar"
if test ${fcount} -gt 0; then
# choose random file
r=$[ ${RANDOM} % ${fcount} ]
testfiles=( $(ls donetestfile*) )
donef=${testfiles[${r}]}
fiof=$(echo ${donef} | sed -e "s|done|fio|")
gen_fio_file $i ${fiof}
else
fiof=fiotestfile.$$.$i
donef=donetestfile.$$.$i
gen_fio_file $i ${fiof}
fi
sudo rm -f ${donef}
sudo fio randio-$$-$i.fio
sudo ln ${fiof} ${donef}
ls -la
done
| 791 | 17.418605 | 49 | sh |
null | ceph-main/qa/workunits/suites/fsstress.sh | #!/bin/bash
set -ex
mkdir -p fsstress
pushd fsstress
wget -q -O ltp-full.tgz http://download.ceph.com/qa/ltp-full-20091231.tgz
tar xzf ltp-full.tgz
pushd ltp-full-20091231/testcases/kernel/fs/fsstress
make
BIN=$(readlink -f fsstress)
popd
popd
T=$(mktemp -d -p .)
"$BIN" -d "$T" -l 1 -n 1000 -p 10 -v
rm -rf -- "$T"
| 319 | 16.777778 | 73 | sh |
null | ceph-main/qa/workunits/suites/fsx.sh | #!/bin/sh -x
set -e
git clone https://git.ceph.com/xfstests-dev.git
cd xfstests-dev
git checkout 12973fc04fd10d4af086901e10ffa8e48866b735
make -j4
cd ..
cp xfstests-dev/ltp/fsx .
OPTIONS="-z" # don't use zero range calls; not supported by cephfs
./fsx $OPTIONS 1MB -N 50000 -p 10000 -l 1048576
./fsx $OPTIONS 10MB -N 50000 -p 10000 -l 10485760
./fsx $OPTIONS 100MB -N 50000 -p 10000 -l 104857600
| 403 | 22.764706 | 67 | sh |
null | ceph-main/qa/workunits/suites/fsync-tester.sh | #!/bin/sh
set -ex
# To skirt around GPL compatibility issues:
wget http://download.ceph.com/qa/fsync-tester.c
gcc -D_GNU_SOURCE fsync-tester.c -o fsync-tester
./fsync-tester
echo $PATH
whereis lsof
lsof
| 207 | 13.857143 | 48 | sh |
null | ceph-main/qa/workunits/suites/iogen.sh | #!/usr/bin/env bash
set -ex
echo "getting iogen"
wget http://download.ceph.com/qa/iogen_3.1p0.tar
tar -xvzf iogen_3.1p0.tar
cd iogen_3.1p0
echo "making iogen"
make
echo "running iogen"
./iogen -n 5 -s 2g
echo "sleep for 10 min"
sleep 600
echo "stopping iogen"
./iogen -k
echo "OK"
| 283 | 14.777778 | 48 | sh |
null | ceph-main/qa/workunits/suites/iozone-sync.sh | #!/usr/bin/env bash
set -ex
# basic tests of O_SYNC, O_DSYNC, O_RSYNC
# test O_SYNC
iozone -c -e -s 512M -r 1M -t 1 -F osync1 -i 0 -i 1 -o
# test O_DSYNC
iozone -c -e -s 512M -r 1M -t 1 -F odsync1 -i 0 -i 1 -+D
# test O_RSYNC
iozone -c -e -s 512M -r 1M -t 1 -F orsync1 -i 0 -i 1 -+r
# test same file with O_SYNC in one process, buffered in the other
# the sync test starts first, so the buffered test should blow
# past it and
iozone -c -e -s 512M -r 1M -t 1 -F osync2 -i 0 -i 1 -o &
sleep 1
iozone -c -e -s 512M -r 256K -t 1 -F osync2 -i 0
wait $!
# test same file with O_SYNC from different threads
iozone -c -e -s 512M -r 1M -t 2 -F osync3 -i 2 -o
| 656 | 27.565217 | 66 | sh |
null | ceph-main/qa/workunits/suites/iozone.sh | #!/usr/bin/env bash
set -ex
iozone -c -e -s 1024M -r 16K -t 1 -F f1 -i 0 -i 1
iozone -c -e -s 1024M -r 1M -t 1 -F f2 -i 0 -i 1
iozone -c -e -s 10240M -r 1M -t 1 -F f3 -i 0 -i 1
| 179 | 21.5 | 49 | sh |
null | ceph-main/qa/workunits/suites/pjd.sh | #!/usr/bin/env bash
set -ex
wget http://download.ceph.com/qa/pjd-fstest-20090130-RC-aclfixes.tgz
tar zxvf pjd*.tgz
cd pjd-fstest-20090130-RC
make clean
make
cd ..
mkdir tmp
cd tmp
# must be root!
sudo prove -r -v --exec 'bash -x' ../pjd*/tests
cd ..
rm -rf tmp pjd*
| 269 | 14 | 68 | sh |
null | ceph-main/qa/workunits/windows/run-tests.sh | #!/usr/bin/env bash
set -ex
DIR="$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)"
source ${DIR}/libvirt_vm/build_utils.sh
source ${DIR}/libvirt_vm/connection_info.sh
# Run the Windows tests
scp_upload ${DIR} /windows-workunits
SSH_TIMEOUT=30m ssh_exec powershell.exe -File /windows-workunits/run-tests.ps1
| 305 | 24.5 | 78 | sh |
null | ceph-main/qa/workunits/windows/libvirt_vm/setup.sh | #!/usr/bin/env bash
set -ex
WINDOWS_SERVER_2019_ISO_URL=${WINDOWS_SERVER_2019_ISO_URL:-"https://software-download.microsoft.com/download/pr/17763.737.190906-2324.rs5_release_svc_refresh_SERVER_EVAL_x64FRE_en-us_1.iso"}
VIRTIO_WIN_ISO_URL=${VIRTIO_WIN_ISO_URL:-"https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/stable-virtio/virtio-win.iso"}
DIR="$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)"
# Use build_utils.sh from ceph-build
curl --retry-max-time 30 --retry 10 -L -o ${DIR}/build_utils.sh https://raw.githubusercontent.com/ceph/ceph-build/main/scripts/build_utils.sh
source ${DIR}/build_utils.sh
# Helper function to restart the Windows VM
function restart_windows_vm() {
echo "Restarting Windows VM"
ssh_exec "cmd.exe /c 'shutdown.exe /r /t 0 & sc.exe stop sshd'"
SECONDS=0
TIMEOUT=${1:-600}
while true; do
if [[ $SECONDS -gt $TIMEOUT ]]; then
echo "Timeout waiting for the VM to start"
exit 1
fi
ssh_exec hostname || {
echo "Cannot execute SSH commands yet"
sleep 10
continue
}
break
done
echo "Windows VM restarted"
}
# Install libvirt with KVM
retrycmd_if_failure 5 0 5m sudo apt-get update
retrycmd_if_failure 5 0 10m sudo apt-get install -y qemu-kvm libvirt-daemon-system libvirt-clients virtinst
# Download ISO images
echo "Downloading virtio-win ISO"
retrycmd_if_failure 5 0 30m curl -C - -L $VIRTIO_WIN_ISO_URL -o ${DIR}/virtio-win.iso
echo "Downloading Windows Server 2019 ISO"
retrycmd_if_failure 5 0 60m curl -C - -L $WINDOWS_SERVER_2019_ISO_URL -o ${DIR}/windows-server-2019.iso
# Create virtual floppy image with the unattended instructions to install Windows Server 2019
echo "Creating floppy image"
qemu-img create -f raw ${DIR}/floppy.img 1440k
mkfs.msdos -s 1 ${DIR}/floppy.img
mkdir ${DIR}/floppy
sudo mount ${DIR}/floppy.img ${DIR}/floppy
ssh-keygen -b 2048 -t rsa -f ${DIR}/id_rsa -q -N ""
sudo cp \
${DIR}/autounattend.xml \
${DIR}/first-logon.ps1 \
${DIR}/id_rsa.pub \
${DIR}/utils.ps1 \
${DIR}/setup.ps1 \
${DIR}/floppy/
sudo umount ${DIR}/floppy
rmdir ${DIR}/floppy
echo "Starting libvirt VM"
qemu-img create -f qcow2 ${DIR}/ceph-win-ltsc2019.qcow2 50G
VM_NAME="ceph-win-ltsc2019"
sudo virt-install \
--name $VM_NAME \
--os-variant win2k19 \
--boot hd,cdrom \
--virt-type kvm \
--graphics spice \
--cpu host \
--vcpus 4 \
--memory 4096 \
--disk ${DIR}/floppy.img,device=floppy \
--disk ${DIR}/ceph-win-ltsc2019.qcow2,bus=virtio \
--disk ${DIR}/windows-server-2019.iso,device=cdrom \
--disk ${DIR}/virtio-win.iso,device=cdrom \
--network network=default,model=virtio \
--controller type=virtio-serial \
--channel unix,target_type=virtio,name=org.qemu.guest_agent.0 \
--noautoconsole
export SSH_USER="administrator"
export SSH_KNOWN_HOSTS_FILE="${DIR}/known_hosts"
export SSH_KEY="${DIR}/id_rsa"
SECONDS=0
TIMEOUT=1800
SLEEP_SECS=30
while true; do
if [[ $SECONDS -gt $TIMEOUT ]]; then
echo "Timeout waiting for the VM to start"
exit 1
fi
VM_IP=$(sudo virsh domifaddr --source agent --interface Ethernet --full $VM_NAME | grep ipv4 | awk '{print $4}' | cut -d '/' -f1) || {
echo "Retrying in $SLEEP_SECS seconds"
sleep $SLEEP_SECS
continue
}
ssh-keyscan -H $VM_IP &> $SSH_KNOWN_HOSTS_FILE || {
echo "SSH is not reachable yet"
sleep $SLEEP_SECS
continue
}
SSH_ADDRESS=$VM_IP ssh_exec hostname || {
echo "Cannot execute SSH commands yet"
sleep $SLEEP_SECS
continue
}
break
done
export SSH_ADDRESS=$VM_IP
scp_upload ${DIR}/utils.ps1 /utils.ps1
scp_upload ${DIR}/setup.ps1 /setup.ps1
SSH_TIMEOUT=1h ssh_exec /setup.ps1
cd $DIR
# Get the helper script to download Chacra builds
retrycmd_if_failure 10 5 1m curl -L -o ./get-chacra-bin.py https://raw.githubusercontent.com/ceph/ceph-win32-tests/main/get-bin.py
chmod +x ./get-chacra-bin.py
# Download latest WNBD build from Chacra
retrycmd_if_failure 10 0 10m ./get-chacra-bin.py --project wnbd --filename wnbd.zip
scp_upload wnbd.zip /wnbd.zip
ssh_exec tar.exe xzvf /wnbd.zip -C /
# Install WNBD driver
ssh_exec Import-Certificate -FilePath /wnbd/driver/wnbd.cer -Cert Cert:\\LocalMachine\\Root
ssh_exec Import-Certificate -FilePath /wnbd/driver/wnbd.cer -Cert Cert:\\LocalMachine\\TrustedPublisher
ssh_exec /wnbd/binaries/wnbd-client.exe install-driver /wnbd/driver/wnbd.inf
restart_windows_vm
ssh_exec wnbd-client.exe -v
# Download Ceph Windows build from Chacra
CEPH_REPO_FILE="/etc/apt/sources.list.d/ceph.list"
PROJECT=$(cat $CEPH_REPO_FILE | cut -d ' ' -f3 | tr '\/', ' ' | awk '{print $4}')
BRANCH=$(cat $CEPH_REPO_FILE | cut -d ' ' -f3 | tr '\/', ' ' | awk '{print $5}')
SHA1=$(cat $CEPH_REPO_FILE | cut -d ' ' -f3 | tr '\/', ' ' | awk '{print $6}')
retrycmd_if_failure 10 0 10m ./get-chacra-bin.py --project $PROJECT --branchname $BRANCH --sha1 $SHA1 --filename ceph.zip
# Install Ceph on Windows
SSH_TIMEOUT=5m scp_upload ./ceph.zip /ceph.zip
SSH_TIMEOUT=10m ssh_exec tar.exe xzvf /ceph.zip -C /
ssh_exec "New-Service -Name ceph-rbd -BinaryPathName 'c:\ceph\rbd-wnbd.exe service'"
ssh_exec Start-Service -Name ceph-rbd
ssh_exec rbd.exe -v
# Setup Ceph configs and directories
ssh_exec mkdir -force /etc/ceph, /var/run/ceph, /var/log/ceph
for i in $(ls /etc/ceph); do
scp_upload /etc/ceph/$i /etc/ceph/$i
done
cat << EOF > ${DIR}/connection_info.sh
export SSH_USER="${SSH_USER}"
export SSH_KNOWN_HOSTS_FILE="${SSH_KNOWN_HOSTS_FILE}"
export SSH_KEY="${SSH_KEY}"
export SSH_ADDRESS="${SSH_ADDRESS}"
EOF
echo "Windows Server 2019 libvirt testing VM is ready"
| 5,714 | 34.06135 | 190 | sh |
null | ceph-main/src/SimpleRADOSStriper.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License version 2.1, as published by
* the Free Software Foundation. See file COPYING.
*
*/
#include <boost/smart_ptr/intrusive_ptr.hpp>
#include <fcntl.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <iomanip>
#include <iostream>
#include <regex>
#include <sstream>
#include <string_view>
#include <limits.h>
#include <string.h>
#include "include/ceph_assert.h"
#include "include/rados/librados.hpp"
#include "cls/lock/cls_lock_client.h"
#include "common/ceph_argparse.h"
#include "common/ceph_mutex.h"
#include "common/common_init.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/version.h"
#include "SimpleRADOSStriper.h"
using ceph::bufferlist;
#define dout_subsys ceph_subsys_cephsqlite
#undef dout_prefix
#define dout_prefix *_dout << "client." << ioctx.get_instance_id() << ": SimpleRADOSStriper: " << __func__ << ": " << oid << ": "
#define d(lvl) ldout((CephContext*)ioctx.cct(), (lvl))
enum {
P_FIRST = 0xe0000,
P_UPDATE_METADATA,
P_UPDATE_ALLOCATED,
P_UPDATE_SIZE,
P_UPDATE_VERSION,
P_SHRINK,
P_SHRINK_BYTES,
P_LOCK,
P_UNLOCK,
P_LAST,
};
int SimpleRADOSStriper::config_logger(CephContext* cct, std::string_view name, std::shared_ptr<PerfCounters>* l)
{
PerfCountersBuilder plb(cct, name.data(), P_FIRST, P_LAST);
plb.add_u64_counter(P_UPDATE_METADATA, "update_metadata", "Number of metadata updates");
plb.add_u64_counter(P_UPDATE_ALLOCATED, "update_allocated", "Number of allocated updates");
plb.add_u64_counter(P_UPDATE_SIZE, "update_size", "Number of size updates");
plb.add_u64_counter(P_UPDATE_VERSION, "update_version", "Number of version updates");
plb.add_u64_counter(P_SHRINK, "shrink", "Number of allocation shrinks");
plb.add_u64_counter(P_SHRINK_BYTES, "shrink_bytes", "Bytes shrunk");
plb.add_u64_counter(P_LOCK, "lock", "Number of locks");
plb.add_u64_counter(P_UNLOCK, "unlock", "Number of unlocks");
l->reset(plb.create_perf_counters());
return 0;
}
SimpleRADOSStriper::~SimpleRADOSStriper()
{
if (lock_keeper.joinable()) {
shutdown = true;
lock_keeper_cvar.notify_all();
lock_keeper.join();
}
if (ioctx.is_valid()) {
d(5) << dendl;
if (is_locked()) {
unlock();
}
}
}
SimpleRADOSStriper::extent SimpleRADOSStriper::get_next_extent(uint64_t off, size_t len) const
{
extent e;
{
uint64_t stripe = (off>>object_size);
CachedStackStringStream css;
*css << oid;
*css << ".";
*css << std::setw(16) << std::setfill('0') << std::hex << stripe;
e.soid = css->str();
}
e.off = off & ((1<<object_size)-1);
e.len = std::min<size_t>(len, (1<<object_size)-e.off);
return e;
}
int SimpleRADOSStriper::remove()
{
d(5) << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
if (int rc = wait_for_aios(true); rc < 0) {
aios_failure = 0;
return rc;
}
if (int rc = set_metadata(0, true); rc < 0) {
return rc;
}
auto ext = get_first_extent();
if (int rc = ioctx.remove(ext.soid); rc < 0) {
d(1) << " remove failed: " << cpp_strerror(rc) << dendl;
return rc;
}
locked = false;
return 0;
}
int SimpleRADOSStriper::truncate(uint64_t size)
{
d(5) << size << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
/* TODO: (not currently used by SQLite) handle growth + sparse */
if (int rc = set_metadata(size, true); rc < 0) {
return rc;
}
return 0;
}
int SimpleRADOSStriper::wait_for_aios(bool block)
{
while (!aios.empty()) {
auto& aiocp = aios.front();
int rc;
if (block) {
rc = aiocp->wait_for_complete();
} else {
if (aiocp->is_complete()) {
rc = aiocp->get_return_value();
} else {
return 0;
}
}
if (rc) {
d(1) << " aio failed: " << cpp_strerror(rc) << dendl;
if (aios_failure == 0) {
aios_failure = rc;
}
}
aios.pop();
}
return aios_failure;
}
int SimpleRADOSStriper::flush()
{
d(5) << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
if (size_dirty) {
if (int rc = set_metadata(size, true); rc < 0) {
return rc;
}
}
if (int rc = wait_for_aios(true); rc < 0) {
aios_failure = 0;
return rc;
}
return 0;
}
int SimpleRADOSStriper::stat(uint64_t* s)
{
d(5) << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
*s = size;
return 0;
}
int SimpleRADOSStriper::create()
{
d(5) << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
auto ext = get_first_extent();
auto op = librados::ObjectWriteOperation();
/* exclusive create ensures we do none of these setxattrs happen if it fails */
op.create(1);
op.setxattr(XATTR_VERSION, uint2bl(0));
op.setxattr(XATTR_EXCL, bufferlist());
op.setxattr(XATTR_SIZE, uint2bl(0));
op.setxattr(XATTR_ALLOCATED, uint2bl(0));
op.setxattr(XATTR_LAYOUT_STRIPE_UNIT, uint2bl(1));
op.setxattr(XATTR_LAYOUT_STRIPE_COUNT, uint2bl(1));
op.setxattr(XATTR_LAYOUT_OBJECT_SIZE, uint2bl(1<<object_size));
if (int rc = ioctx.operate(ext.soid, &op); rc < 0) {
return rc; /* including EEXIST */
}
return 0;
}
int SimpleRADOSStriper::open()
{
d(5) << oid << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
auto ext = get_first_extent();
auto op = librados::ObjectReadOperation();
bufferlist bl_excl, bl_size, bl_alloc, bl_version, pbl;
int prval_excl, prval_size, prval_alloc, prval_version;
op.getxattr(XATTR_EXCL, &bl_excl, &prval_excl);
op.getxattr(XATTR_SIZE, &bl_size, &prval_size);
op.getxattr(XATTR_ALLOCATED, &bl_alloc, &prval_alloc);
op.getxattr(XATTR_VERSION, &bl_version, &prval_version);
if (int rc = ioctx.operate(ext.soid, &op, &pbl); rc < 0) {
d(1) << " getxattr failed: " << cpp_strerror(rc) << dendl;
return rc;
}
exclusive_holder = bl_excl.to_str();
{
auto sstr = bl_size.to_str();
std::string err;
size = strict_strtoll(sstr.c_str(), 10, &err);
ceph_assert(err.empty());
}
{
auto sstr = bl_alloc.to_str();
std::string err;
allocated = strict_strtoll(sstr.c_str(), 10, &err);
ceph_assert(err.empty());
}
{
auto sstr = bl_version.to_str();
std::string err;
version = strict_strtoll(sstr.c_str(), 10, &err);
ceph_assert(err.empty());
}
d(15) << " size: " << size << " allocated: " << allocated << " version: " << version << dendl;
return 0;
}
int SimpleRADOSStriper::shrink_alloc(uint64_t a)
{
d(5) << dendl;
std::vector<aiocompletionptr> removes;
ceph_assert(a <= allocated);
uint64_t prune = std::max<uint64_t>(a, (1u << object_size)); /* never delete first extent here */
uint64_t len = allocated - prune;
const uint64_t bytes_removed = len;
uint64_t offset = prune;
while (len > 0) {
auto ext = get_next_extent(offset, len);
auto aiocp = aiocompletionptr(librados::Rados::aio_create_completion());
if (int rc = ioctx.aio_remove(ext.soid, aiocp.get()); rc < 0) {
d(1) << " aio_remove failed: " << cpp_strerror(rc) << dendl;
return rc;
}
removes.emplace_back(std::move(aiocp));
len -= ext.len;
offset += ext.len;
}
for (auto& aiocp : removes) {
if (int rc = aiocp->wait_for_complete(); rc < 0 && rc != -ENOENT) {
d(1) << " aio_remove failed: " << cpp_strerror(rc) << dendl;
return rc;
}
}
auto ext = get_first_extent();
auto op = librados::ObjectWriteOperation();
auto aiocp = aiocompletionptr(librados::Rados::aio_create_completion());
op.setxattr(XATTR_ALLOCATED, uint2bl(a));
d(15) << " updating allocated to " << a << dendl;
op.setxattr(XATTR_VERSION, uint2bl(version+1));
d(15) << " updating version to " << (version+1) << dendl;
if (int rc = ioctx.aio_operate(ext.soid, aiocp.get(), &op); rc < 0) {
d(1) << " update failed: " << cpp_strerror(rc) << dendl;
return rc;
}
/* we need to wait so we don't have dangling extents */
d(10) << " waiting for allocated update" << dendl;
if (int rc = aiocp->wait_for_complete(); rc < 0) {
d(1) << " update failure: " << cpp_strerror(rc) << dendl;
return rc;
}
if (logger) {
logger->inc(P_UPDATE_METADATA);
logger->inc(P_UPDATE_ALLOCATED);
logger->inc(P_UPDATE_VERSION);
logger->inc(P_SHRINK);
logger->inc(P_SHRINK_BYTES, bytes_removed);
}
version += 1;
allocated = a;
return 0;
}
int SimpleRADOSStriper::maybe_shrink_alloc()
{
d(15) << dendl;
if (size == 0) {
if (allocated > 0) {
d(10) << "allocation shrink to 0" << dendl;
return shrink_alloc(0);
} else {
return 0;
}
}
uint64_t mask = (1<<object_size)-1;
uint64_t new_allocated = min_growth + ((size + mask) & ~mask); /* round up base 2 */
if (allocated > new_allocated && ((allocated-new_allocated) > min_growth)) {
d(10) << "allocation shrink to " << new_allocated << dendl;
return shrink_alloc(new_allocated);
}
return 0;
}
bufferlist SimpleRADOSStriper::str2bl(std::string_view sv)
{
bufferlist bl;
bl.append(sv);
return bl;
}
bufferlist SimpleRADOSStriper::uint2bl(uint64_t v)
{
CachedStackStringStream css;
*css << std::dec << std::setw(16) << std::setfill('0') << v;
bufferlist bl;
bl.append(css->strv());
return bl;
}
int SimpleRADOSStriper::set_metadata(uint64_t new_size, bool update_size)
{
d(10) << " new_size: " << new_size
<< " update_size: " << update_size
<< " allocated: " << allocated
<< " size: " << size
<< " version: " << version
<< dendl;
bool do_op = false;
auto new_allocated = allocated;
auto ext = get_first_extent();
auto op = librados::ObjectWriteOperation();
if (new_size > allocated) {
uint64_t mask = (1<<object_size)-1;
new_allocated = min_growth + ((size + mask) & ~mask); /* round up base 2 */
op.setxattr(XATTR_ALLOCATED, uint2bl(new_allocated));
do_op = true;
if (logger) logger->inc(P_UPDATE_ALLOCATED);
d(15) << " updating allocated to " << new_allocated << dendl;
}
if (update_size) {
op.setxattr(XATTR_SIZE, uint2bl(new_size));
do_op = true;
if (logger) logger->inc(P_UPDATE_SIZE);
d(15) << " updating size to " << new_size << dendl;
}
if (do_op) {
if (logger) logger->inc(P_UPDATE_METADATA);
if (logger) logger->inc(P_UPDATE_VERSION);
op.setxattr(XATTR_VERSION, uint2bl(version+1));
d(15) << " updating version to " << (version+1) << dendl;
auto aiocp = aiocompletionptr(librados::Rados::aio_create_completion());
if (int rc = ioctx.aio_operate(ext.soid, aiocp.get(), &op); rc < 0) {
d(1) << " update failure: " << cpp_strerror(rc) << dendl;
return rc;
}
version += 1;
if (allocated != new_allocated) {
/* we need to wait so we don't have dangling extents */
d(10) << "waiting for allocated update" << dendl;
if (int rc = aiocp->wait_for_complete(); rc < 0) {
d(1) << " update failure: " << cpp_strerror(rc) << dendl;
return rc;
}
aiocp.reset();
allocated = new_allocated;
}
if (aiocp) {
aios.emplace(std::move(aiocp));
}
if (update_size) {
size = new_size;
size_dirty = false;
return maybe_shrink_alloc();
}
}
return 0;
}
ssize_t SimpleRADOSStriper::write(const void* data, size_t len, uint64_t off)
{
d(5) << off << "~" << len << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
if (allocated < (len+off)) {
if (int rc = set_metadata(len+off, false); rc < 0) {
return rc;
}
}
size_t w = 0;
while ((len-w) > 0) {
auto ext = get_next_extent(off+w, len-w);
auto aiocp = aiocompletionptr(librados::Rados::aio_create_completion());
bufferlist bl;
bl.append((const char*)data+w, ext.len);
if (int rc = ioctx.aio_write(ext.soid, aiocp.get(), bl, ext.len, ext.off); rc < 0) {
break;
}
aios.emplace(std::move(aiocp));
w += ext.len;
}
wait_for_aios(false); // clean up finished completions
if (size < (len+off)) {
size = len+off;
size_dirty = true;
d(10) << " dirty size: " << size << dendl;
}
return (ssize_t)w;
}
ssize_t SimpleRADOSStriper::read(void* data, size_t len, uint64_t off)
{
d(5) << off << "~" << len << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
size_t r = 0;
// Don't use std::vector to store bufferlists (e.g for parallelizing aio_reads),
// as they are being moved whenever the vector resizes
// and will cause invalidated references.
std::deque<std::pair<bufferlist, aiocompletionptr>> reads;
while ((len-r) > 0) {
auto ext = get_next_extent(off+r, len-r);
auto& [bl, aiocp] = reads.emplace_back();
aiocp = aiocompletionptr(librados::Rados::aio_create_completion());
if (int rc = ioctx.aio_read(ext.soid, aiocp.get(), &bl, ext.len, ext.off); rc < 0) {
d(1) << " read failure: " << cpp_strerror(rc) << dendl;
return rc;
}
r += ext.len;
}
r = 0;
for (auto& [bl, aiocp] : reads) {
if (int rc = aiocp->wait_for_complete(); rc < 0) {
d(1) << " read failure: " << cpp_strerror(rc) << dendl;
return rc;
}
bl.begin().copy(bl.length(), ((char*)data)+r);
r += bl.length();
}
ceph_assert(r <= len);
return r;
}
int SimpleRADOSStriper::print_lockers(std::ostream& out)
{
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
auto ext = get_first_extent();
if (int rc = ioctx.list_lockers(ext.soid, biglock, &exclusive, &tag, &lockers); rc < 0) {
d(1) << " list_lockers failure: " << cpp_strerror(rc) << dendl;
return rc;
}
if (lockers.empty()) {
out << " lockers none";
} else {
out << " lockers exclusive=" << exclusive << " tag=" << tag << " lockers=[";
bool first = true;
for (const auto& l : lockers) {
if (!first) out << ",";
out << l.client << ":" << l.cookie << ":" << l.address;
}
out << "]";
}
return 0;
}
/* Do lock renewal in a separate thread: while it's unlikely sqlite chews on
* something for multiple seconds without calling into the VFS (where we could
* initiate a lock renewal), it's not impossible with complex queries. Also, we
* want to allow "PRAGMA locking_mode = exclusive" where the application may
* not use the sqlite3 database connection for an indeterminate amount of time.
*/
void SimpleRADOSStriper::lock_keeper_main(void)
{
d(20) << dendl;
const auto ext = get_first_extent();
while (!shutdown) {
d(20) << "tick" << dendl;
std::unique_lock lock(lock_keeper_mutex);
auto now = clock::now();
auto since = now-last_renewal;
if (since >= lock_keeper_interval && locked) {
d(10) << "renewing lock" << dendl;
auto tv = ceph::to_timeval(lock_keeper_timeout);
int rc = ioctx.lock_exclusive(ext.soid, biglock, cookie.to_string(), lockdesc, &tv, LIBRADOS_LOCK_FLAG_MUST_RENEW);
if (rc) {
/* If lock renewal fails, we cannot continue the application. Return
* -EBLOCKLISTED for all calls into the striper for this instance, even
* if we're not actually blocklisted.
*/
d(-1) << "lock renewal failed: " << cpp_strerror(rc) << dendl;
blocklisted = true;
break;
}
last_renewal = clock::now();
}
lock_keeper_cvar.wait_for(lock, lock_keeper_interval);
}
}
int SimpleRADOSStriper::recover_lock()
{
d(5) << "attempting to recover lock" << dendl;
std::string addrs;
const auto ext = get_first_extent();
{
auto tv = ceph::to_timeval(lock_keeper_timeout);
if (int rc = ioctx.lock_exclusive(ext.soid, biglock, cookie.to_string(), lockdesc, &tv, 0); rc < 0) {
return rc;
}
locked = true;
last_renewal = clock::now();
}
d(5) << "acquired lock, fetching last owner" << dendl;
{
bufferlist bl_excl;
if (int rc = ioctx.getxattr(ext.soid, XATTR_EXCL, bl_excl); rc < 0) {
if (rc == -ENOENT) {
/* someone removed it? ok... */
goto setowner;
} else {
d(-1) << "could not recover exclusive locker" << dendl;
locked = false; /* it will drop eventually */
return -EIO;
}
}
addrs = bl_excl.to_str();
}
if (addrs.empty()) {
d(5) << "someone else cleaned up" << dendl;
goto setowner;
} else {
d(5) << "exclusive lock holder was " << addrs << dendl;
}
if (blocklist_the_dead) {
entity_addrvec_t addrv;
addrv.parse(addrs.c_str());
auto R = librados::Rados(ioctx);
std::string_view b = "blocklist";
retry:
for (auto& a : addrv.v) {
CachedStackStringStream css;
*css << "{\"prefix\":\"osd " << b << "\", \"" << b << "op\":\"add\",";
*css << "\"addr\":\"";
*css << a;
*css << "\"}";
std::vector<std::string> cmd = {css->str()};
d(5) << "sending blocklist command: " << cmd << dendl;
std::string out;
if (int rc = R.mon_command(css->str(), bufferlist(), nullptr, &out); rc < 0) {
if (rc == -EINVAL && b == "blocklist") {
b = "blacklist";
goto retry;
}
d(-1) << "Cannot proceed with recovery because I have failed to blocklist the old client: " << cpp_strerror(rc) << ", out = " << out << dendl;
locked = false; /* it will drop eventually */
return -EIO;
}
}
/* Ensure our osd_op requests have the latest epoch. */
R.wait_for_latest_osdmap();
}
setowner:
d(5) << "setting new owner to myself, " << myaddrs << dendl;
{
auto myaddrbl = str2bl(myaddrs);
if (int rc = ioctx.setxattr(ext.soid, XATTR_EXCL, myaddrbl); rc < 0) {
d(-1) << "could not set lock owner" << dendl;
locked = false; /* it will drop eventually */
return -EIO;
}
}
return 0;
}
int SimpleRADOSStriper::lock(uint64_t timeoutms)
{
/* XXX: timeoutms is unused */
d(5) << "timeout=" << timeoutms << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
std::scoped_lock lock(lock_keeper_mutex);
ceph_assert(!is_locked());
/* We're going to be very lazy here in implementation: only exclusive locks
* are allowed. That even ensures a single reader.
*/
uint64_t slept = 0;
auto ext = get_first_extent();
while (true) {
/* The general fast path in one compound operation: obtain the lock,
* confirm the past locker cleaned up after themselves (set XATTR_EXCL to
* ""), then finally set XATTR_EXCL to our address vector as the new
* exclusive locker.
*/
auto op = librados::ObjectWriteOperation();
auto tv = ceph::to_timeval(lock_keeper_timeout);
utime_t duration;
duration.set_from_timeval(&tv);
rados::cls::lock::lock(&op, biglock, ClsLockType::EXCLUSIVE, cookie.to_string(), "", lockdesc, duration, 0);
op.cmpxattr(XATTR_EXCL, LIBRADOS_CMPXATTR_OP_EQ, bufferlist());
op.setxattr(XATTR_EXCL, str2bl(myaddrs));
int rc = ioctx.operate(ext.soid, &op);
if (rc == 0) {
locked = true;
last_renewal = clock::now();
break;
} else if (rc == -EBUSY) {
if ((slept % 500000) == 0) {
d(-1) << "waiting for locks: ";
print_lockers(*_dout);
*_dout << dendl;
}
usleep(5000);
slept += 5000;
continue;
} else if (rc == -ECANCELED) {
/* CMPXATTR failed, a locker didn't cleanup. Try to recover! */
if (rc = recover_lock(); rc < 0) {
if (rc == -EBUSY) {
continue; /* try again */
}
return rc;
}
break;
} else {
d(-1) << " lock failed: " << cpp_strerror(rc) << dendl;
return rc;
}
}
if (!lock_keeper.joinable()) {
lock_keeper = std::thread(&SimpleRADOSStriper::lock_keeper_main, this);
}
if (int rc = open(); rc < 0) {
d(1) << " open failed: " << cpp_strerror(rc) << dendl;
return rc;
}
d(5) << " = 0" << dendl;
if (logger) {
logger->inc(P_LOCK);
}
return 0;
}
int SimpleRADOSStriper::unlock()
{
d(5) << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
std::scoped_lock lock(lock_keeper_mutex);
ceph_assert(is_locked());
/* wait for flush of metadata */
if (int rc = flush(); rc < 0) {
return rc;
}
const auto ext = get_first_extent();
auto op = librados::ObjectWriteOperation();
op.cmpxattr(XATTR_EXCL, LIBRADOS_CMPXATTR_OP_EQ, str2bl(myaddrs));
op.setxattr(XATTR_EXCL, bufferlist());
rados::cls::lock::unlock(&op, biglock, cookie.to_string());
if (int rc = ioctx.operate(ext.soid, &op); rc < 0) {
d(-1) << " unlock failed: " << cpp_strerror(rc) << dendl;
return rc;
}
locked = false;
d(5) << " = 0" << dendl;
if (logger) {
logger->inc(P_UNLOCK);
}
return 0;
}
| 20,980 | 26.037371 | 150 | cc |
null | ceph-main/src/SimpleRADOSStriper.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License version 2.1, as published by
* the Free Software Foundation. See file COPYING.
*
*/
#ifndef _SIMPLERADOSSTRIPER_H
#define _SIMPLERADOSSTRIPER_H
#include <queue>
#include <string_view>
#include <thread>
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "include/uuid.h"
#include "include/types.h"
#include "common/ceph_time.h"
#include "common/perf_counters.h"
class [[gnu::visibility("default")]] SimpleRADOSStriper
{
public:
using aiocompletionptr = std::unique_ptr<librados::AioCompletion>;
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
static inline const uint64_t object_size = 22; /* power of 2 */
static inline const uint64_t min_growth = (1<<27); /* 128 MB */
static int config_logger(CephContext* cct, std::string_view name, std::shared_ptr<PerfCounters>* l);
SimpleRADOSStriper() = default;
SimpleRADOSStriper(librados::IoCtx _ioctx, std::string _oid)
: ioctx(std::move(_ioctx))
, oid(std::move(_oid))
{
cookie.generate_random();
auto r = librados::Rados(ioctx);
myaddrs = r.get_addrs();
}
SimpleRADOSStriper(const SimpleRADOSStriper&) = delete;
SimpleRADOSStriper& operator=(const SimpleRADOSStriper&) = delete;
SimpleRADOSStriper& operator=(SimpleRADOSStriper&&) = delete;
SimpleRADOSStriper(SimpleRADOSStriper&&) = delete;
~SimpleRADOSStriper();
int create();
int open();
int remove();
int stat(uint64_t* size);
ssize_t write(const void* data, size_t len, uint64_t off);
ssize_t read(void* data, size_t len, uint64_t off);
int truncate(size_t size);
int flush();
int lock(uint64_t timeoutms);
int unlock();
int is_locked() const {
return locked;
}
int print_lockers(std::ostream& out);
void set_logger(std::shared_ptr<PerfCounters> l) {
logger = std::move(l);
}
void set_lock_interval(std::chrono::milliseconds t) {
lock_keeper_interval = t;
}
void set_lock_timeout(std::chrono::milliseconds t) {
lock_keeper_timeout = t;
}
void set_blocklist_the_dead(bool b) {
blocklist_the_dead = b;
}
protected:
struct extent {
std::string soid;
size_t len;
size_t off;
};
ceph::bufferlist str2bl(std::string_view sv);
ceph::bufferlist uint2bl(uint64_t v);
int set_metadata(uint64_t new_size, bool update_size);
int shrink_alloc(uint64_t a);
int maybe_shrink_alloc();
int wait_for_aios(bool block);
int recover_lock();
extent get_next_extent(uint64_t off, size_t len) const;
extent get_first_extent() const {
return get_next_extent(0, 0);
}
private:
static inline const char XATTR_EXCL[] = "striper.excl";
static inline const char XATTR_SIZE[] = "striper.size";
static inline const char XATTR_ALLOCATED[] = "striper.allocated";
static inline const char XATTR_VERSION[] = "striper.version";
static inline const char XATTR_LAYOUT_STRIPE_UNIT[] = "striper.layout.stripe_unit";
static inline const char XATTR_LAYOUT_STRIPE_COUNT[] = "striper.layout.stripe_count";
static inline const char XATTR_LAYOUT_OBJECT_SIZE[] = "striper.layout.object_size";
static inline const std::string biglock = "striper.lock";
static inline const std::string lockdesc = "SimpleRADOSStriper";
void lock_keeper_main();
librados::IoCtx ioctx;
std::shared_ptr<PerfCounters> logger;
std::string oid;
std::thread lock_keeper;
std::condition_variable lock_keeper_cvar;
std::mutex lock_keeper_mutex;
time last_renewal = time::min();
std::chrono::milliseconds lock_keeper_interval{2000};
std::chrono::milliseconds lock_keeper_timeout{30000};
std::atomic<bool> blocklisted = false;
bool shutdown = false;
version_t version = 0;
std::string exclusive_holder;
uint64_t size = 0;
uint64_t allocated = 0;
uuid_d cookie{};
bool locked = false;
bool size_dirty = false;
bool blocklist_the_dead = true;
std::queue<aiocompletionptr> aios;
int aios_failure = 0;
std::string myaddrs;
};
#endif /* _SIMPLERADOSSTRIPER_H */
| 4,279 | 29.571429 | 102 | h |
null | ceph-main/src/ceph-osd-prestart.sh | #!/bin/sh
if [ `uname` = FreeBSD ]; then
GETOPT=/usr/local/bin/getopt
else
GETOPT=getopt
fi
eval set -- "$(${GETOPT} -o i: --long id:,cluster: -- $@)"
while true ; do
case "$1" in
-i|--id) id=$2; shift 2 ;;
--cluster) cluster=$2; shift 2 ;;
--) shift ; break ;;
esac
done
if [ -z "$id" ]; then
echo "Usage: $0 [OPTIONS]"
echo "--id/-i ID set ID portion of my name"
echo "--cluster NAME set cluster name (default: ceph)"
exit 1;
fi
data="/var/lib/ceph/osd/${cluster:-ceph}-$id"
# assert data directory exists - see http://tracker.ceph.com/issues/17091
if [ ! -d "$data" ]; then
echo "OSD data directory $data does not exist; bailing out." 1>&2
exit 1
fi
journal="$data/journal"
if [ -L "$journal" -a ! -e "$journal" ]; then
udevadm settle --timeout=5 || :
if [ -L "$journal" -a ! -e "$journal" ]; then
echo "ceph-osd(${cluster:-ceph}-$id): journal not present, not starting yet." 1>&2
exit 0
fi
fi
# ensure ownership is correct
owner=`stat -c %U $data/.`
if [ $owner != 'ceph' -a $owner != 'root' ]; then
echo "ceph-osd data dir $data is not owned by 'ceph' or 'root'"
echo "you must 'chown -R ceph:ceph ...' or similar to fix ownership"
exit 1
fi
exit 0
| 1,251 | 22.622642 | 90 | sh |
null | ceph-main/src/ceph_common.sh | #!/bin/sh
CCONF="$BINDIR/ceph-conf"
default_conf=$ETCDIR"/ceph.conf"
conf=$default_conf
hostname=`hostname -s`
verify_conf() {
# fetch conf?
if [ -x "$ETCDIR/fetch_config" ] && [ "$conf" = "$default_conf" ]; then
conf="/tmp/fetched.ceph.conf.$$"
echo "[$ETCDIR/fetch_config $conf]"
if $ETCDIR/fetch_config $conf && [ -e $conf ]; then true ; else
echo "$0: failed to fetch config with '$ETCDIR/fetch_config $conf'"
exit 1
fi
# yay!
else
# make sure ceph.conf exists
if [ ! -e $conf ]; then
if [ "$conf" = "$default_conf" ]; then
echo "$0: ceph conf $conf not found; system is not configured."
exit 0
fi
echo "$0: ceph conf $conf not found!"
usage_exit
fi
fi
}
check_host() {
# what host is this daemon assigned to?
host=`$CCONF -c $conf -n $type.$id host`
if [ "$host" = "localhost" ]; then
echo "$0: use a proper short hostname (hostname -s), not 'localhost', in $conf section $type.$id; skipping entry"
return 1
fi
if expr match "$host" '.*\.' > /dev/null 2>&1; then
echo "$0: $conf section $type.$id"
echo "contains host=$host, which contains dots; this is probably wrong"
echo "It must match the result of hostname -s"
fi
ssh=""
rootssh=""
sshdir=$PWD
get_conf user "" "user"
#echo host for $name is $host, i am $hostname
cluster=$1
if [ -e "/var/lib/ceph/$type/$cluster-$id/upstart" ]; then
return 1
fi
# sysvinit managed instance in standard location?
if [ -e "/var/lib/ceph/$type/$cluster-$id/sysvinit" ]; then
host="$hostname"
echo "=== $type.$id === "
return 0
fi
# ignore all sections without 'host' defined
if [ -z "$host" ]; then
return 1
fi
if [ "$host" != "$hostname" ]; then
# skip, unless we're starting remote daemons too
if [ $allhosts -eq 0 ]; then
return 1
fi
# we'll need to ssh into that host
if [ -z "$user" ]; then
ssh="ssh $host"
else
ssh="ssh $user@$host"
fi
rootssh="ssh root@$host"
get_conf sshdir "$sshdir" "ssh path"
fi
echo "=== $type.$id === "
return 0
}
do_cmd() {
if [ -z "$ssh" ]; then
[ $verbose -eq 1 ] && echo "--- $host# $1"
ulimit -c unlimited
whoami=`whoami`
if [ "$whoami" = "$user" ] || [ -z "$user" ]; then
bash -c "$1" || { [ -z "$3" ] && echo "failed: '$1'" && exit 1; }
else
sudo su $user -c "$1" || { [ -z "$3" ] && echo "failed: '$1'" && exit 1; }
fi
else
[ $verbose -eq 1 ] && echo "--- $ssh $2 \"if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi; cd $sshdir ; ulimit -c unlimited ; $1\""
$ssh $2 "if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi; cd $sshdir ; ulimit -c unlimited ; $1" || { [ -z "$3" ] && echo "failed: '$ssh $1'" && exit 1; }
fi
}
do_cmd_okfail() {
ERR=0
if [ -z "$ssh" ]; then
[ $verbose -eq 1 ] && echo "--- $host# $1"
ulimit -c unlimited
whoami=`whoami`
if [ "$whoami" = "$user" ] || [ -z "$user" ]; then
bash -c "$1" || { [ -z "$3" ] && echo "failed: '$1'" && ERR=1 && return 1; }
else
sudo su $user -c "$1" || { [ -z "$3" ] && echo "failed: '$1'" && ERR=1 && return 1; }
fi
else
[ $verbose -eq 1 ] && echo "--- $ssh $2 \"if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi; cd $sshdir ; ulimit -c unlimited ; $1\""
$ssh $2 "if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi; cd $sshdir ; ulimit -c unlimited ; $1" || { [ -z "$3" ] && echo "failed: '$ssh $1'" && ERR=1 && return 1; }
fi
return 0
}
do_root_cmd() {
if [ -z "$ssh" ]; then
[ $verbose -eq 1 ] && echo "--- $host# $1"
ulimit -c unlimited
whoami=`whoami`
if [ "$whoami" = "root" ]; then
bash -c "$1" || { echo "failed: '$1'" ; exit 1; }
else
sudo bash -c "$1" || { echo "failed: '$1'" ; exit 1; }
fi
else
[ $verbose -eq 1 ] && echo "--- $rootssh $2 \"if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi ; cd $sshdir ; ulimit -c unlimited ; $1\""
$rootssh $2 "if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi ; cd $sshdir; ulimit -c unlimited ; $1" || { echo "failed: '$rootssh $1'" ; exit 1; }
fi
}
do_root_cmd_okfail() {
ERR=0
if [ -z "$ssh" ]; then
[ $verbose -eq 1 ] && echo "--- $host# $1"
ulimit -c unlimited
whoami=`whoami`
if [ "$whoami" = "root" ]; then
bash -c "$1" || { [ -z "$3" ] && echo "failed: '$1'" && ERR=1 && return 1; }
else
sudo bash -c "$1" || { [ -z "$3" ] && echo "failed: '$1'" && ERR=1 && return 1; }
fi
else
[ $verbose -eq 1 ] && echo "--- $rootssh $2 \"if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi; cd $sshdir ; ulimit -c unlimited ; $1\""
$rootssh $2 "if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi; cd $sshdir ; ulimit -c unlimited ; $1" || { [ -z "$3" ] && echo "failed: '$rootssh $1'" && ERR=1 && return 1; }
fi
return 0
}
get_local_daemon_list() {
type=$1
if [ -d "/var/lib/ceph/$type" ]; then
for p in `find -L /var/lib/ceph/$type -mindepth 1 -maxdepth 1 -type d`; do
i=`basename $p`
if [ -e "/var/lib/ceph/$type/$i/sysvinit" ]; then
id=`echo $i | sed 's/[^-]*-//'`
local="$local $type.$id"
fi
done
fi
}
get_local_name_list() {
# enumerate local directories
local=""
get_local_daemon_list "mon"
get_local_daemon_list "osd"
get_local_daemon_list "mds"
get_local_daemon_list "mgr"
}
get_name_list() {
orig="$*"
# extract list of monitors, mdss, osds, mgrs defined in startup.conf
allconf=$(for entity in \
$local \
`$CCONF -c $conf -l mon | egrep -v '^mon$' || true` \
`$CCONF -c $conf -l mds | egrep -v '^mds$' || true` \
`$CCONF -c $conf -l mgr | egrep -v '^mgr$' || true` \
`$CCONF -c $conf -l osd | egrep -v '^osd$' || true`; do
echo $entity
done | sort -u)
if [ -z "$orig" ]; then
what="$allconf"
return
fi
what=""
for f in $orig; do
type=`echo $f | cut -c 1-3` # e.g. 'mon', if $item is 'mon1'
id=`echo $f | cut -c 4- | sed 's/\\.//'`
case $f in
mon | osd | mds | mgr)
for d in $allconf; do
if echo $d | grep -q ^$type; then
what="$what $d"
fi
done
;;
*)
if ! echo " " $allconf $local " " | egrep -q "( $type$id | $type.$id )"; then
echo "$0: $type.$id not found ($conf defines" $allconf", /var/lib/ceph defines" $local")"
exit 1
fi
what="$what $f"
;;
esac
done
}
get_conf() {
var=$1
def=$2
key=$3
shift; shift; shift
if [ -z "$1" ]; then
[ "$verbose" -eq 1 ] && echo "$CCONF -c $conf -n $type.$id \"$key\""
eval "$var=\"`$CCONF -c $conf -n $type.$id \"$key\" || printf \"$def\"`\""
else
[ "$verbose" -eq 1 ] && echo "$CCONF -c $conf -s $1 \"$key\""
eval "$var=\"`$CCONF -c $conf -s $1 \"$key\" || eval printf \"$def\"`\""
fi
}
get_conf_bool() {
get_conf "$@"
eval "val=$"$1
[ "$val" = "0" ] && export $1=0
[ "$val" = "false" ] && export $1=0
[ "$val" = "1" ] && export $1=1
[ "$val" = "true" ] && export $1=1
}
| 6,846 | 26.720648 | 173 | sh |
null | ceph-main/src/ceph_fuse.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/stat.h>
#include <sys/utsname.h>
#include <iostream>
#include <string>
#include <optional>
#include "common/async/context_pool.h"
#include "common/config.h"
#include "common/errno.h"
#include "client/Client.h"
#include "client/fuse_ll.h"
#include "msg/Messenger.h"
#include "mon/MonClient.h"
#include "common/Timer.h"
#include "common/ceph_argparse.h"
#if defined(__linux__)
#include "common/linux_version.h"
#endif
#include "global/global_init.h"
#include "global/signal_handler.h"
#include "common/Preforker.h"
#include "common/safe_io.h"
#include <sys/types.h>
#include <fcntl.h>
#include "include/ceph_fuse.h"
#include <fuse_lowlevel.h>
#define dout_context g_ceph_context
using namespace std;
ceph::async::io_context_pool icp;
static void fuse_usage()
{
const char* argv[] = {
"ceph-fuse",
"-h",
};
struct fuse_args args = FUSE_ARGS_INIT(2, (char**)argv);
#if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0)
struct fuse_cmdline_opts opts = {};
if (fuse_parse_cmdline(&args, &opts) != -1) {
if (opts.show_help) {
cout << "usage: " << argv[0] << " [options] <mountpoint>\n\n";
cout << "FUSE options:\n";
fuse_cmdline_help();
fuse_lowlevel_help();
cout << "\n";
}
} else {
#else
if (fuse_parse_cmdline(&args, nullptr, nullptr, nullptr) == -1) {
#endif
derr << "fuse_parse_cmdline failed." << dendl;
}
ceph_assert(args.allocated);
fuse_opt_free_args(&args);
}
void usage()
{
cout <<
"usage: ceph-fuse [-n client.username] [-m mon-ip-addr:mon-port] <mount point> [OPTIONS]\n"
" --client_mountpoint/-r <sub_directory>\n"
" use sub_directory as the mounted root, rather than the full Ceph tree.\n"
"\n";
fuse_usage();
generic_client_usage();
}
int main(int argc, const char **argv, const char *envp[]) {
int filer_flags = 0;
//cerr << "ceph-fuse starting " << myrank << "/" << world << std::endl;
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
std::map<std::string,std::string> defaults = {
{ "pid_file", "" },
{ "chdir", "/" } // FUSE will chdir("/"); be ready.
};
auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_DAEMON,
CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS);
for (auto i = args.begin(); i != args.end();) {
if (ceph_argparse_double_dash(args, i)) {
break;
} else if (ceph_argparse_flag(args, i, "--localize-reads", (char*)nullptr)) {
cerr << "setting CEPH_OSD_FLAG_LOCALIZE_READS" << std::endl;
filer_flags |= CEPH_OSD_FLAG_LOCALIZE_READS;
} else if (ceph_argparse_flag(args, i, "-V", (char*)nullptr)) {
const char* tmpargv[] = {
"ceph-fuse",
"-V"
};
struct fuse_args fargs = FUSE_ARGS_INIT(2, (char**)tmpargv);
#if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0)
struct fuse_cmdline_opts opts = {};
if (fuse_parse_cmdline(&fargs, &opts) == -1) {
#else
if (fuse_parse_cmdline(&fargs, nullptr, nullptr, nullptr) == -1) {
#endif
derr << "fuse_parse_cmdline failed." << dendl;
}
ceph_assert(fargs.allocated);
fuse_opt_free_args(&fargs);
exit(0);
} else {
++i;
}
}
// args for fuse
const char **newargv;
int newargc;
vec_to_argv(argv[0], args, &newargc, &newargv);
// check for 32-bit arch
#ifndef __LP64__
cerr << std::endl;
cerr << "WARNING: Ceph inode numbers are 64 bits wide, and FUSE on 32-bit kernels does" << std::endl;
cerr << " not cope well with that situation. Expect to crash shortly." << std::endl;
cerr << std::endl;
#endif
Preforker forker;
auto daemonize = g_conf().get_val<bool>("daemonize");
if (daemonize) {
global_init_prefork(g_ceph_context);
int r;
string err;
r = forker.prefork(err);
if (r < 0 || forker.is_parent()) {
// Start log if current process is about to exit. Otherwise, we hit an assert
// in the Ceph context destructor.
g_ceph_context->_log->start();
}
if (r < 0) {
cerr << "ceph-fuse " << err << std::endl;
return r;
}
if (forker.is_parent()) {
r = forker.parent_wait(err);
if (r < 0) {
cerr << "ceph-fuse " << err << std::endl;
}
return r;
}
global_init_postfork_start(cct.get());
}
{
common_init_finish(g_ceph_context);
init_async_signal_handler();
register_async_signal_handler(SIGHUP, sighup_handler);
//cout << "child, mounting" << std::endl;
class RemountTest : public Thread {
public:
CephFuse *cfuse;
Client *client;
RemountTest() : cfuse(nullptr), client(nullptr) {}
void init(CephFuse *cf, Client *cl) {
cfuse = cf;
client = cl;
}
~RemountTest() override {}
void *entry() override {
#if defined(__linux__)
bool can_invalidate_dentries = g_conf().get_val<bool>(
"client_try_dentry_invalidate");
uint64_t max_retries = g_conf().get_val<uint64_t>(
"client_max_retries_on_remount_failure");
std::pair<int, bool> test_result;
uint64_t i = 0;
int tr = 0;
do {
test_result = client->test_dentry_handling(can_invalidate_dentries);
tr = test_result.first;
if (tr) {
sleep(1);
}
} while (++i < max_retries && tr);
bool abort_on_failure = test_result.second;
bool client_die_on_failed_dentry_invalidate = g_conf().get_val<bool>(
"client_die_on_failed_dentry_invalidate");
if (tr != 0 && client_die_on_failed_dentry_invalidate) {
cerr << "ceph-fuse[" << getpid()
<< "]: fuse failed dentry invalidate/remount test with error "
<< cpp_strerror(tr) << ", stopping" << std::endl;
char buf[5050];
string mountpoint = cfuse->get_mount_point();
snprintf(buf, sizeof(buf), "fusermount -u -z %s", mountpoint.c_str());
int umount_r = system(buf);
if (umount_r) {
if (umount_r != -1) {
if (WIFEXITED(umount_r)) {
umount_r = WEXITSTATUS(umount_r);
cerr << "got error " << umount_r
<< " when unmounting Ceph on failed remount test!" << std::endl;
} else {
cerr << "attempt to umount on failed remount test failed (on a signal?)" << std::endl;
}
} else {
cerr << "system() invocation failed during remount test" << std::endl;
}
}
}
if(abort_on_failure) {
ceph_abort();
}
return reinterpret_cast<void*>(tr);
#else
return reinterpret_cast<void*>(0);
#endif
}
} tester;
// get monmap
Messenger *messenger = nullptr;
StandaloneClient *client;
CephFuse *cfuse;
UserPerm perms;
int tester_r = 0;
void *tester_rp = nullptr;
icp.start(cct->_conf.get_val<std::uint64_t>("client_asio_thread_count"));
MonClient *mc = new MonClient(g_ceph_context, icp);
int r = mc->build_initial_monmap();
if (r == -EINVAL) {
cerr << "failed to generate initial mon list" << std::endl;
exit(1);
}
if (r < 0)
goto out_mc_start_failed;
// start up network
messenger = Messenger::create_client_messenger(g_ceph_context, "client");
messenger->set_default_policy(Messenger::Policy::lossy_client(0));
messenger->set_policy(entity_name_t::TYPE_MDS,
Messenger::Policy::lossless_client(0));
client = new StandaloneClient(messenger, mc, icp);
if (filer_flags) {
client->set_filer_flags(filer_flags);
}
cfuse = new CephFuse(client, forker.get_signal_fd());
r = cfuse->init(newargc, newargv);
if (r != 0) {
cerr << "ceph-fuse[" << getpid() << "]: fuse failed to initialize" << std::endl;
goto out_messenger_start_failed;
}
cerr << "ceph-fuse[" << getpid() << "]: starting ceph client" << std::endl;
r = messenger->start();
if (r < 0) {
cerr << "ceph-fuse[" << getpid() << "]: ceph messenger failed with " << cpp_strerror(-r) << std::endl;
goto out_messenger_start_failed;
}
// start client
r = client->init();
if (r < 0) {
cerr << "ceph-fuse[" << getpid() << "]: ceph client failed with " << cpp_strerror(-r) << std::endl;
goto out_init_failed;
}
client->update_metadata("mount_point", cfuse->get_mount_point());
perms = client->pick_my_perms();
{
// start up fuse
// use my argc, argv (make sure you pass a mount point!)
auto client_mountpoint = g_conf().get_val<std::string>(
"client_mountpoint");
auto mountpoint = client_mountpoint.c_str();
auto fuse_require_active_mds = g_conf().get_val<bool>(
"fuse_require_active_mds");
r = client->mount(mountpoint, perms, fuse_require_active_mds);
if (r < 0) {
if (r == CEPH_FUSE_NO_MDS_UP) {
cerr << "ceph-fuse[" << getpid() << "]: probably no MDS server is up?" << std::endl;
}
cerr << "ceph-fuse[" << getpid() << "]: ceph mount failed with " << cpp_strerror(-r) << std::endl;
r = EXIT_FAILURE;
goto out_shutdown;
}
}
r = cfuse->start();
if (r != 0) {
cerr << "ceph-fuse[" << getpid() << "]: fuse failed to start" << std::endl;
goto out_client_unmount;
}
cerr << "ceph-fuse[" << getpid() << "]: starting fuse" << std::endl;
tester.init(cfuse, client);
tester.create("tester");
r = cfuse->loop();
tester.join(&tester_rp);
tester_r = static_cast<int>(reinterpret_cast<uint64_t>(tester_rp));
cerr << "ceph-fuse[" << getpid() << "]: fuse finished with error " << r
<< " and tester_r " << tester_r <<std::endl;
out_client_unmount:
client->unmount();
cfuse->finalize();
out_shutdown:
icp.stop();
client->shutdown();
out_init_failed:
unregister_async_signal_handler(SIGHUP, sighup_handler);
shutdown_async_signal_handler();
// wait for messenger to finish
messenger->shutdown();
messenger->wait();
out_messenger_start_failed:
delete cfuse;
cfuse = nullptr;
delete client;
client = nullptr;
delete messenger;
messenger = nullptr;
out_mc_start_failed:
free(newargv);
delete mc;
mc = nullptr;
//cout << "child done" << std::endl;
return forker.signal_exit(r);
}
}
| 10,720 | 28.133152 | 108 | cc |
null | ceph-main/src/ceph_mds.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <pthread.h>
#include <iostream>
#include <string>
#include "common/async/context_pool.h"
#include "include/ceph_features.h"
#include "include/compat.h"
#include "include/random.h"
#include "common/config.h"
#include "common/strtol.h"
#include "common/numa.h"
#include "mon/MonMap.h"
#include "mds/MDSDaemon.h"
#include "msg/Messenger.h"
#include "common/Timer.h"
#include "common/ceph_argparse.h"
#include "common/pick_address.h"
#include "common/Preforker.h"
#include "global/global_init.h"
#include "global/signal_handler.h"
#include "global/pidfile.h"
#include "mon/MonClient.h"
#include "auth/KeyRing.h"
#include "perfglue/heap_profiler.h"
#include "include/ceph_assert.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
using std::cerr;
using std::cout;
using std::vector;
static void usage()
{
cout << "usage: ceph-mds -i <ID> [flags]\n"
<< " -m monitorip:port\n"
<< " connect to monitor at given address\n"
<< " --debug_mds n\n"
<< " debug MDS level (e.g. 10)\n"
<< std::endl;
generic_server_usage();
}
MDSDaemon *mds = NULL;
static void handle_mds_signal(int signum)
{
if (mds)
mds->handle_signal(signum);
}
int main(int argc, const char **argv)
{
ceph_pthread_setname(pthread_self(), "ceph-mds");
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
auto cct = global_init(NULL, args,
CEPH_ENTITY_TYPE_MDS, CODE_ENVIRONMENT_DAEMON, 0);
ceph_heap_profiler_init();
int numa_node = g_conf().get_val<int64_t>("mds_numa_node");
size_t numa_cpu_set_size = 0;
cpu_set_t numa_cpu_set;
if (numa_node >= 0) {
int r = get_numa_node_cpu_set(numa_node, &numa_cpu_set_size, &numa_cpu_set);
if (r < 0) {
dout(1) << __func__ << " unable to determine mds numa node " << numa_node
<< " CPUs" << dendl;
numa_node = -1;
} else {
r = set_cpu_affinity_all_threads(numa_cpu_set_size, &numa_cpu_set);
if (r < 0) {
derr << __func__ << " failed to set numa affinity: " << cpp_strerror(r)
<< dendl;
}
}
} else {
dout(1) << __func__ << " not setting numa affinity" << dendl;
}
std::string val, action;
for (std::vector<const char*>::iterator i = args.begin(); i != args.end(); ) {
if (ceph_argparse_double_dash(args, i)) {
break;
}
else if (ceph_argparse_witharg(args, i, &val, "--hot-standby", (char*)NULL)) {
dout(0) << "--hot-standby is obsolete and has no effect" << dendl;
}
else {
derr << "Error: can't understand argument: " << *i << "\n" << dendl;
exit(1);
}
}
Preforker forker;
entity_addrvec_t addrs;
pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC, &addrs);
// Normal startup
if (g_conf()->name.has_default_id()) {
derr << "must specify '-i name' with the ceph-mds instance name" << dendl;
exit(1);
}
if (g_conf()->name.get_id().empty() ||
(g_conf()->name.get_id()[0] >= '0' && g_conf()->name.get_id()[0] <= '9')) {
derr << "MDS id '" << g_conf()->name << "' is invalid. "
"MDS names may not start with a numeric digit." << dendl;
exit(1);
}
if (global_init_prefork(g_ceph_context) >= 0) {
std::string err;
int r = forker.prefork(err);
if (r < 0) {
cerr << err << std::endl;
return r;
}
if (forker.is_parent()) {
if (forker.parent_wait(err) != 0) {
return -ENXIO;
}
return 0;
}
global_init_postfork_start(g_ceph_context);
}
common_init_finish(g_ceph_context);
global_init_chdir(g_ceph_context);
std::string public_msgr_type = g_conf()->ms_public_type.empty() ? g_conf().get_val<std::string>("ms_type") : g_conf()->ms_public_type;
Messenger *msgr = Messenger::create(g_ceph_context, public_msgr_type,
entity_name_t::MDS(-1), "mds",
Messenger::get_random_nonce());
if (!msgr)
forker.exit(1);
msgr->set_cluster_protocol(CEPH_MDS_PROTOCOL);
cout << "starting " << g_conf()->name << " at " << msgr->get_myaddrs()
<< std::endl;
uint64_t required =
CEPH_FEATURE_OSDREPLYMUX;
msgr->set_default_policy(Messenger::Policy::lossy_client(required));
msgr->set_policy(entity_name_t::TYPE_MON,
Messenger::Policy::lossy_client(CEPH_FEATURE_UID |
CEPH_FEATURE_PGID64));
msgr->set_policy(entity_name_t::TYPE_MDS,
Messenger::Policy::lossless_peer(CEPH_FEATURE_UID));
msgr->set_policy(entity_name_t::TYPE_CLIENT,
Messenger::Policy::stateful_server(0));
int r = msgr->bindv(addrs);
if (r < 0)
forker.exit(1);
// set up signal handlers, now that we've daemonized/forked.
init_async_signal_handler();
register_async_signal_handler(SIGHUP, sighup_handler);
// get monmap
ceph::async::io_context_pool ctxpool(2);
MonClient mc(g_ceph_context, ctxpool);
if (mc.build_initial_monmap() < 0)
forker.exit(1);
global_init_chdir(g_ceph_context);
msgr->start();
// start mds
mds = new MDSDaemon(g_conf()->name.get_id().c_str(), msgr, &mc, ctxpool);
// in case we have to respawn...
mds->orig_argc = argc;
mds->orig_argv = argv;
if (g_conf()->daemonize) {
global_init_postfork_finish(g_ceph_context);
forker.daemonize();
}
r = mds->init();
if (r < 0) {
msgr->wait();
goto shutdown;
}
register_async_signal_handler_oneshot(SIGINT, handle_mds_signal);
register_async_signal_handler_oneshot(SIGTERM, handle_mds_signal);
if (g_conf()->inject_early_sigterm)
kill(getpid(), SIGTERM);
msgr->wait();
unregister_async_signal_handler(SIGHUP, sighup_handler);
unregister_async_signal_handler(SIGINT, handle_mds_signal);
unregister_async_signal_handler(SIGTERM, handle_mds_signal);
shutdown_async_signal_handler();
shutdown:
ctxpool.stop();
// yuck: grab the mds lock, so we can be sure that whoever in *mds
// called shutdown finishes what they were doing.
mds->mds_lock.lock();
mds->mds_lock.unlock();
pidfile_remove();
// only delete if it was a clean shutdown (to aid memory leak
// detection, etc.). don't bother if it was a suicide.
if (mds->is_clean_shutdown()) {
delete mds;
delete msgr;
}
// cd on exit, so that gmon.out (if any) goes into a separate directory for each node.
char s[20];
snprintf(s, sizeof(s), "gmon/%d", getpid());
if ((mkdir(s, 0755) == 0) && (chdir(s) == 0)) {
cerr << "ceph-mds: gmon.out should be in " << s << std::endl;
}
return 0;
}
| 7,183 | 26.212121 | 136 | cc |
null | ceph-main/src/ceph_mgr.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat Inc
*
* Author: John Spray <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <Python.h>
#include <pthread.h>
#include "include/types.h"
#include "include/compat.h"
#include "common/config.h"
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include "common/pick_address.h"
#include "global/global_init.h"
#include "mgr/MgrStandby.h"
static void usage()
{
std::cout << "usage: ceph-mgr -i <ID> [flags]\n"
<< std::endl;
generic_server_usage();
}
/**
* A short main() which just instantiates a MgrStandby and
* hands over control to that.
*/
int main(int argc, const char **argv)
{
ceph_pthread_setname(pthread_self(), "ceph-mgr");
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
std::cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
std::map<std::string,std::string> defaults = {
{ "keyring", "$mgr_data/keyring" }
};
auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_MGR,
CODE_ENVIRONMENT_DAEMON, 0);
pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC);
global_init_daemonize(g_ceph_context);
global_init_chdir(g_ceph_context);
common_init_finish(g_ceph_context);
MgrStandby mgr(argc, argv);
int rc = mgr.init();
if (rc != 0) {
std::cerr << "Error in initialization: " << cpp_strerror(rc) << std::endl;
return rc;
}
return mgr.main(args);
}
| 1,832 | 22.5 | 80 | cc |
null | ceph-main/src/ceph_mon.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <iostream>
#include <string>
#include "common/config.h"
#include "include/ceph_features.h"
#include "mon/MonMap.h"
#include "mon/Monitor.h"
#include "mon/MonitorDBStore.h"
#include "mon/MonClient.h"
#include "msg/Messenger.h"
#include "include/CompatSet.h"
#include "common/ceph_argparse.h"
#include "common/pick_address.h"
#include "common/Throttle.h"
#include "common/Timer.h"
#include "common/errno.h"
#include "common/Preforker.h"
#include "global/global_init.h"
#include "global/signal_handler.h"
#include "perfglue/heap_profiler.h"
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_mon
using std::cerr;
using std::cout;
using std::list;
using std::map;
using std::ostringstream;
using std::string;
using std::vector;
using ceph::bufferlist;
using ceph::decode;
using ceph::encode;
using ceph::JSONFormatter;
Monitor *mon = NULL;
void handle_mon_signal(int signum)
{
if (mon)
mon->handle_signal(signum);
}
int obtain_monmap(MonitorDBStore &store, bufferlist &bl)
{
dout(10) << __func__ << dendl;
/*
* the monmap may be in one of three places:
* 'mon_sync:temp_newer_monmap' - stashed newer map for bootstrap
* 'monmap:<latest_version_no>' - the monmap we'd really like to have
* 'mon_sync:latest_monmap' - last monmap backed up for the last sync
* 'mkfs:monmap' - a monmap resulting from mkfs
*/
if (store.exists("monmap", "last_committed")) {
version_t latest_ver = store.get("monmap", "last_committed");
if (store.exists("monmap", latest_ver)) {
int err = store.get("monmap", latest_ver, bl);
ceph_assert(err == 0);
ceph_assert(bl.length() > 0);
dout(10) << __func__ << " read last committed monmap ver "
<< latest_ver << dendl;
// see if there is stashed newer map (see bootstrap())
if (store.exists("mon_sync", "temp_newer_monmap")) {
bufferlist bl2;
int err = store.get("mon_sync", "temp_newer_monmap", bl2);
ceph_assert(err == 0);
ceph_assert(bl2.length() > 0);
MonMap b;
b.decode(bl2);
if (b.get_epoch() > latest_ver) {
dout(10) << __func__ << " using stashed monmap " << b.get_epoch()
<< " instead" << dendl;
bl = std::move(bl2);
} else {
dout(10) << __func__ << " ignoring stashed monmap " << b.get_epoch()
<< dendl;
}
}
return 0;
}
}
if (store.exists("mon_sync", "in_sync")
|| store.exists("mon_sync", "force_sync")) {
dout(10) << __func__ << " detected aborted sync" << dendl;
if (store.exists("mon_sync", "latest_monmap")) {
int err = store.get("mon_sync", "latest_monmap", bl);
ceph_assert(err == 0);
ceph_assert(bl.length() > 0);
dout(10) << __func__ << " read backup monmap" << dendl;
return 0;
}
}
if (store.exists("mon_sync", "temp_newer_monmap")) {
dout(10) << __func__ << " found temp_newer_monmap" << dendl;
int err = store.get("mon_sync", "temp_newer_monmap", bl);
ceph_assert(err == 0);
ceph_assert(bl.length() > 0);
return 0;
}
if (store.exists("mkfs", "monmap")) {
dout(10) << __func__ << " found mkfs monmap" << dendl;
int err = store.get("mkfs", "monmap", bl);
ceph_assert(err == 0);
ceph_assert(bl.length() > 0);
return 0;
}
derr << __func__ << " unable to find a monmap" << dendl;
return -ENOENT;
}
int check_mon_data_exists()
{
string mon_data = g_conf()->mon_data;
struct stat buf;
if (::stat(mon_data.c_str(), &buf)) {
if (errno != ENOENT) {
derr << "stat(" << mon_data << ") " << cpp_strerror(errno) << dendl;
}
return -errno;
}
return 0;
}
/** Check whether **mon data** is empty.
*
* Being empty means mkfs has not been run and there's no monitor setup
* at **g_conf()->mon_data**.
*
* If the directory g_conf()->mon_data is not empty we will return -ENOTEMPTY.
* Otherwise we will return 0. Any other negative returns will represent
* a failure to be handled by the caller.
*
* @return **0** on success, -ENOTEMPTY if not empty or **-errno** otherwise.
*/
int check_mon_data_empty()
{
string mon_data = g_conf()->mon_data;
DIR *dir = ::opendir(mon_data.c_str());
if (!dir) {
derr << "opendir(" << mon_data << ") " << cpp_strerror(errno) << dendl;
return -errno;
}
int code = 0;
struct dirent *de = nullptr;
errno = 0;
while ((de = ::readdir(dir))) {
if (string(".") != de->d_name &&
string("..") != de->d_name &&
string("kv_backend") != de->d_name) {
code = -ENOTEMPTY;
break;
}
}
if (!de && errno) {
derr << "readdir(" << mon_data << ") " << cpp_strerror(errno) << dendl;
code = -errno;
}
::closedir(dir);
return code;
}
static void usage()
{
cout << "usage: ceph-mon -i <ID> [flags]\n"
<< " --debug_mon n\n"
<< " debug monitor level (e.g. 10)\n"
<< " --mkfs\n"
<< " build fresh monitor fs\n"
<< " --force-sync\n"
<< " force a sync from another mon by wiping local data (BE CAREFUL)\n"
<< " --yes-i-really-mean-it\n"
<< " mandatory safeguard for --force-sync\n"
<< " --compact\n"
<< " compact the monitor store\n"
<< " --osdmap <filename>\n"
<< " only used when --mkfs is provided: load the osdmap from <filename>\n"
<< " --inject-monmap <filename>\n"
<< " write the <filename> monmap to the local monitor store and exit\n"
<< " --extract-monmap <filename>\n"
<< " extract the monmap from the local monitor store and exit\n"
<< " --mon-data <directory>\n"
<< " where the mon store and keyring are located\n"
<< " --set-crush-location <bucket>=<foo>"
<< " sets monitor's crush bucket location (only for stretch mode)"
<< std::endl;
generic_server_usage();
}
entity_addrvec_t make_mon_addrs(entity_addr_t a)
{
entity_addrvec_t addrs;
if (a.get_port() == 0) {
a.set_type(entity_addr_t::TYPE_MSGR2);
a.set_port(CEPH_MON_PORT_IANA);
addrs.v.push_back(a);
a.set_type(entity_addr_t::TYPE_LEGACY);
a.set_port(CEPH_MON_PORT_LEGACY);
addrs.v.push_back(a);
} else if (a.get_port() == CEPH_MON_PORT_LEGACY) {
a.set_type(entity_addr_t::TYPE_LEGACY);
addrs.v.push_back(a);
} else if (a.get_type() == entity_addr_t::TYPE_ANY) {
a.set_type(entity_addr_t::TYPE_MSGR2);
addrs.v.push_back(a);
} else {
addrs.v.push_back(a);
}
return addrs;
}
int main(int argc, const char **argv)
{
// reset our process name, in case we did a respawn, so that it's not
// left as "exe".
ceph_pthread_setname(pthread_self(), "ceph-mon");
int err;
bool mkfs = false;
bool compact = false;
bool force_sync = false;
bool yes_really = false;
std::string osdmapfn, inject_monmap, extract_monmap, crush_loc;
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
// We need to specify some default values that may be overridden by the
// user, that are specific to the monitor. The options we are overriding
// are also used on the OSD, so changing the global defaults is not an option.
// This is not the prettiest way of doing this, especially since it has us
// having a different place defining default values, but it's not horribly
// wrong enough to prevent us from doing it :)
//
// NOTE: user-defined options will take precedence over ours.
map<string,string> defaults = {
{ "keyring", "$mon_data/keyring" },
};
int flags = 0;
{
vector<const char*> args_copy = args;
std::string val;
for (std::vector<const char*>::iterator i = args_copy.begin();
i != args_copy.end(); ) {
if (ceph_argparse_double_dash(args_copy, i)) {
break;
} else if (ceph_argparse_flag(args_copy, i, "--mkfs", (char*)NULL)) {
flags |= CINIT_FLAG_NO_DAEMON_ACTIONS;
} else if (ceph_argparse_witharg(args_copy, i, &val, "--inject_monmap", (char*)NULL)) {
flags |= CINIT_FLAG_NO_DAEMON_ACTIONS;
} else if (ceph_argparse_witharg(args_copy, i, &val, "--extract-monmap", (char*)NULL)) {
flags |= CINIT_FLAG_NO_DAEMON_ACTIONS;
} else {
++i;
}
}
}
// don't try to get config from mon cluster during startup
flags |= CINIT_FLAG_NO_MON_CONFIG;
auto cct = global_init(&defaults, args,
CEPH_ENTITY_TYPE_MON, CODE_ENVIRONMENT_DAEMON,
flags);
ceph_heap_profiler_init();
std::string val;
for (std::vector<const char*>::iterator i = args.begin(); i != args.end(); ) {
if (ceph_argparse_double_dash(args, i)) {
break;
} else if (ceph_argparse_flag(args, i, "--mkfs", (char*)NULL)) {
mkfs = true;
} else if (ceph_argparse_flag(args, i, "--compact", (char*)NULL)) {
compact = true;
} else if (ceph_argparse_flag(args, i, "--force-sync", (char*)NULL)) {
force_sync = true;
} else if (ceph_argparse_flag(args, i, "--yes-i-really-mean-it", (char*)NULL)) {
yes_really = true;
} else if (ceph_argparse_witharg(args, i, &val, "--osdmap", (char*)NULL)) {
osdmapfn = val;
} else if (ceph_argparse_witharg(args, i, &val, "--inject_monmap", (char*)NULL)) {
inject_monmap = val;
} else if (ceph_argparse_witharg(args, i, &val, "--extract-monmap", (char*)NULL)) {
extract_monmap = val;
} else if (ceph_argparse_witharg(args, i, &val, "--set-crush-location", (char*)NULL)) {
crush_loc = val;
} else {
++i;
}
}
if (!args.empty()) {
cerr << "too many arguments: " << args << std::endl;
exit(1);
}
if (force_sync && !yes_really) {
cerr << "are you SURE you want to force a sync? this will erase local data and may\n"
<< "break your mon cluster. pass --yes-i-really-mean-it if you do." << std::endl;
exit(1);
}
if (g_conf()->mon_data.empty()) {
cerr << "must specify '--mon-data=foo' data path" << std::endl;
exit(1);
}
if (g_conf()->name.get_id().empty()) {
cerr << "must specify id (--id <id> or --name mon.<id>)" << std::endl;
exit(1);
}
// -- mkfs --
if (mkfs) {
int err = check_mon_data_exists();
if (err == -ENOENT) {
if (::mkdir(g_conf()->mon_data.c_str(), 0755)) {
derr << "mkdir(" << g_conf()->mon_data << ") : "
<< cpp_strerror(errno) << dendl;
exit(1);
}
} else if (err < 0) {
derr << "error opening '" << g_conf()->mon_data << "': "
<< cpp_strerror(-err) << dendl;
exit(-err);
}
err = check_mon_data_empty();
if (err == -ENOTEMPTY) {
// Mon may exist. Let the user know and exit gracefully.
derr << "'" << g_conf()->mon_data << "' already exists and is not empty"
<< ": monitor may already exist" << dendl;
exit(0);
} else if (err < 0) {
derr << "error checking if '" << g_conf()->mon_data << "' is empty: "
<< cpp_strerror(-err) << dendl;
exit(-err);
}
// resolve public_network -> public_addr
pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC);
dout(10) << "public_network " << g_conf()->public_network << dendl;
dout(10) << "public_addr " << g_conf()->public_addr << dendl;
dout(10) << "public_addrv " << g_conf()->public_addrv << dendl;
common_init_finish(g_ceph_context);
bufferlist monmapbl, osdmapbl;
std::string error;
MonMap monmap;
// load or generate monmap
const auto monmap_fn = g_conf().get_val<string>("monmap");
if (monmap_fn.length()) {
int err = monmapbl.read_file(monmap_fn.c_str(), &error);
if (err < 0) {
derr << argv[0] << ": error reading " << monmap_fn << ": " << error << dendl;
exit(1);
}
try {
monmap.decode(monmapbl);
// always mark seed/mkfs monmap as epoch 0
monmap.set_epoch(0);
} catch (const ceph::buffer::error& e) {
derr << argv[0] << ": error decoding monmap " << monmap_fn << ": " << e.what() << dendl;
exit(1);
}
dout(1) << "imported monmap:\n";
monmap.print(*_dout);
*_dout << dendl;
} else {
ostringstream oss;
int err = monmap.build_initial(g_ceph_context, true, oss);
if (oss.tellp())
derr << oss.str() << dendl;
if (err < 0) {
derr << argv[0] << ": warning: no initial monitors; must use admin socket to feed hints" << dendl;
}
dout(1) << "initial generated monmap:\n";
monmap.print(*_dout);
*_dout << dendl;
// am i part of the initial quorum?
if (monmap.contains(g_conf()->name.get_id())) {
// hmm, make sure the ip listed exists on the current host?
// maybe later.
} else if (!g_conf()->public_addrv.empty()) {
entity_addrvec_t av = g_conf()->public_addrv;
string name;
if (monmap.contains(av, &name)) {
monmap.rename(name, g_conf()->name.get_id());
dout(0) << argv[0] << ": renaming mon." << name << " " << av
<< " to mon." << g_conf()->name.get_id() << dendl;
}
} else if (!g_conf()->public_addr.is_blank_ip()) {
entity_addrvec_t av = make_mon_addrs(g_conf()->public_addr);
string name;
if (monmap.contains(av, &name)) {
monmap.rename(name, g_conf()->name.get_id());
dout(0) << argv[0] << ": renaming mon." << name << " " << av
<< " to mon." << g_conf()->name.get_id() << dendl;
}
} else {
// is a local address listed without a name? if so, name myself.
list<entity_addr_t> ls;
monmap.list_addrs(ls);
dout(0) << " monmap addrs are " << ls << ", checking if any are local"
<< dendl;
entity_addr_t local;
if (have_local_addr(g_ceph_context, ls, &local)) {
dout(0) << " have local addr " << local << dendl;
string name;
local.set_type(entity_addr_t::TYPE_MSGR2);
if (!monmap.get_addr_name(local, name)) {
local.set_type(entity_addr_t::TYPE_LEGACY);
if (!monmap.get_addr_name(local, name)) {
dout(0) << "no local addresses appear in bootstrap monmap"
<< dendl;
}
}
if (name.compare(0, 7, "noname-") == 0) {
dout(0) << argv[0] << ": mon." << name << " " << local
<< " is local, renaming to mon." << g_conf()->name.get_id()
<< dendl;
monmap.rename(name, g_conf()->name.get_id());
} else if (name.size()) {
dout(0) << argv[0] << ": mon." << name << " " << local
<< " is local, but not 'noname-' + something; "
<< "not assuming it's me" << dendl;
}
} else {
dout(0) << " no local addrs match monmap" << dendl;
}
}
}
const auto fsid = g_conf().get_val<uuid_d>("fsid");
if (!fsid.is_zero()) {
monmap.fsid = fsid;
dout(0) << argv[0] << ": set fsid to " << fsid << dendl;
}
if (monmap.fsid.is_zero()) {
derr << argv[0] << ": generated monmap has no fsid; use '--fsid <uuid>'" << dendl;
exit(10);
}
//monmap.print(cout);
// osdmap
if (osdmapfn.length()) {
err = osdmapbl.read_file(osdmapfn.c_str(), &error);
if (err < 0) {
derr << argv[0] << ": error reading " << osdmapfn << ": "
<< error << dendl;
exit(1);
}
}
// go
MonitorDBStore store(g_conf()->mon_data);
ostringstream oss;
int r = store.create_and_open(oss);
if (oss.tellp())
derr << oss.str() << dendl;
if (r < 0) {
derr << argv[0] << ": error opening mon data directory at '"
<< g_conf()->mon_data << "': " << cpp_strerror(r) << dendl;
exit(1);
}
ceph_assert(r == 0);
Monitor mon(g_ceph_context, g_conf()->name.get_id(), &store, 0, 0, &monmap);
r = mon.mkfs(osdmapbl);
if (r < 0) {
derr << argv[0] << ": error creating monfs: " << cpp_strerror(r) << dendl;
exit(1);
}
store.close();
dout(0) << argv[0] << ": created monfs at " << g_conf()->mon_data
<< " for " << g_conf()->name << dendl;
return 0;
}
err = check_mon_data_exists();
if (err < 0 && err == -ENOENT) {
derr << "monitor data directory at '" << g_conf()->mon_data << "'"
<< " does not exist: have you run 'mkfs'?" << dendl;
exit(1);
} else if (err < 0) {
derr << "error accessing monitor data directory at '"
<< g_conf()->mon_data << "': " << cpp_strerror(-err) << dendl;
exit(1);
}
err = check_mon_data_empty();
if (err == 0) {
derr << "monitor data directory at '" << g_conf()->mon_data
<< "' is empty: have you run 'mkfs'?" << dendl;
exit(1);
} else if (err < 0 && err != -ENOTEMPTY) {
// we don't want an empty data dir by now
derr << "error accessing '" << g_conf()->mon_data << "': "
<< cpp_strerror(-err) << dendl;
exit(1);
}
{
// check fs stats. don't start if it's critically close to full.
ceph_data_stats_t stats;
int err = get_fs_stats(stats, g_conf()->mon_data.c_str());
if (err < 0) {
derr << "error checking monitor data's fs stats: " << cpp_strerror(err)
<< dendl;
exit(-err);
}
if (stats.avail_percent <= g_conf()->mon_data_avail_crit) {
derr << "error: monitor data filesystem reached concerning levels of"
<< " available storage space (available: "
<< stats.avail_percent << "% " << byte_u_t(stats.byte_avail)
<< ")\nyou may adjust 'mon data avail crit' to a lower value"
<< " to make this go away (default: " << g_conf()->mon_data_avail_crit
<< "%)\n" << dendl;
exit(ENOSPC);
}
}
Preforker prefork;
if (!(flags & CINIT_FLAG_NO_DAEMON_ACTIONS)) {
if (global_init_prefork(g_ceph_context) >= 0) {
string err_msg;
err = prefork.prefork(err_msg);
if (err < 0) {
derr << err_msg << dendl;
prefork.exit(err);
}
if (prefork.is_parent()) {
err = prefork.parent_wait(err_msg);
if (err < 0)
derr << err_msg << dendl;
prefork.exit(err);
}
setsid();
global_init_postfork_start(g_ceph_context);
}
common_init_finish(g_ceph_context);
global_init_chdir(g_ceph_context);
if (global_init_preload_erasure_code(g_ceph_context) < 0)
prefork.exit(1);
}
// set up signal handlers, now that we've daemonized/forked.
init_async_signal_handler();
MonitorDBStore *store = new MonitorDBStore(g_conf()->mon_data);
// make sure we aren't upgrading too fast
{
string val;
int r = store->read_meta("min_mon_release", &val);
if (r >= 0 && val.size()) {
ceph_release_t from_release = ceph_release_from_name(val);
ostringstream err;
if (!can_upgrade_from(from_release, "min_mon_release", err)) {
derr << err.str() << dendl;
prefork.exit(1);
}
}
}
{
ostringstream oss;
err = store->open(oss);
if (oss.tellp())
derr << oss.str() << dendl;
if (err < 0) {
derr << "error opening mon data directory at '"
<< g_conf()->mon_data << "': " << cpp_strerror(err) << dendl;
prefork.exit(1);
}
}
bufferlist magicbl;
err = store->get(Monitor::MONITOR_NAME, "magic", magicbl);
if (err || !magicbl.length()) {
derr << "unable to read magic from mon data" << dendl;
prefork.exit(1);
}
string magic(magicbl.c_str(), magicbl.length()-1); // ignore trailing \n
if (strcmp(magic.c_str(), CEPH_MON_ONDISK_MAGIC)) {
derr << "mon fs magic '" << magic << "' != current '" << CEPH_MON_ONDISK_MAGIC << "'" << dendl;
prefork.exit(1);
}
err = Monitor::check_features(store);
if (err < 0) {
derr << "error checking features: " << cpp_strerror(err) << dendl;
prefork.exit(1);
}
// inject new monmap?
if (!inject_monmap.empty()) {
bufferlist bl;
std::string error;
int r = bl.read_file(inject_monmap.c_str(), &error);
if (r) {
derr << "unable to read monmap from " << inject_monmap << ": "
<< error << dendl;
prefork.exit(1);
}
// get next version
version_t v = store->get("monmap", "last_committed");
dout(0) << "last committed monmap epoch is " << v << ", injected map will be " << (v+1)
<< dendl;
v++;
// set the version
MonMap tmp;
tmp.decode(bl);
if (tmp.get_epoch() != v) {
dout(0) << "changing monmap epoch from " << tmp.get_epoch()
<< " to " << v << dendl;
tmp.set_epoch(v);
}
bufferlist mapbl;
tmp.encode(mapbl, CEPH_FEATURES_ALL);
bufferlist final;
encode(v, final);
encode(mapbl, final);
auto t(std::make_shared<MonitorDBStore::Transaction>());
// save it
t->put("monmap", v, mapbl);
t->put("monmap", "latest", final);
t->put("monmap", "last_committed", v);
store->apply_transaction(t);
dout(0) << "done." << dendl;
prefork.exit(0);
}
// monmap?
MonMap monmap;
{
// note that even if we don't find a viable monmap, we should go ahead
// and try to build it up in the next if-else block.
bufferlist mapbl;
int err = obtain_monmap(*store, mapbl);
if (err >= 0) {
try {
monmap.decode(mapbl);
} catch (const ceph::buffer::error& e) {
derr << "can't decode monmap: " << e.what() << dendl;
}
} else {
derr << "unable to obtain a monmap: " << cpp_strerror(err) << dendl;
}
dout(10) << __func__ << " monmap:\n";
JSONFormatter jf(true);
jf.dump_object("monmap", monmap);
jf.flush(*_dout);
*_dout << dendl;
if (!extract_monmap.empty()) {
int r = mapbl.write_file(extract_monmap.c_str());
if (r < 0) {
r = -errno;
derr << "error writing monmap to " << extract_monmap << ": " << cpp_strerror(r) << dendl;
prefork.exit(1);
}
derr << "wrote monmap to " << extract_monmap << dendl;
prefork.exit(0);
}
}
// this is what i will bind to
entity_addrvec_t ipaddrs;
if (monmap.contains(g_conf()->name.get_id())) {
ipaddrs = monmap.get_addrs(g_conf()->name.get_id());
// print helpful warning if the conf file doesn't match
std::vector<std::string> my_sections = g_conf().get_my_sections();
std::string mon_addr_str;
if (g_conf().get_val_from_conf_file(my_sections, "mon addr",
mon_addr_str, true) == 0) {
entity_addr_t conf_addr;
if (conf_addr.parse(mon_addr_str)) {
entity_addrvec_t conf_addrs = make_mon_addrs(conf_addr);
if (ipaddrs != conf_addrs) {
derr << "WARNING: 'mon addr' config option " << conf_addrs
<< " does not match monmap file" << std::endl
<< " continuing with monmap configuration" << dendl;
}
} else
derr << "WARNING: invalid 'mon addr' config option" << std::endl
<< " continuing with monmap configuration" << dendl;
}
} else {
dout(0) << g_conf()->name << " does not exist in monmap, will attempt to join an existing cluster" << dendl;
pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC);
if (!g_conf()->public_addrv.empty()) {
ipaddrs = g_conf()->public_addrv;
dout(0) << "using public_addrv " << ipaddrs << dendl;
} else if (!g_conf()->public_addr.is_blank_ip()) {
ipaddrs = make_mon_addrs(g_conf()->public_addr);
dout(0) << "using public_addr " << g_conf()->public_addr << " -> "
<< ipaddrs << dendl;
} else {
MonMap tmpmap;
ostringstream oss;
int err = tmpmap.build_initial(g_ceph_context, true, oss);
if (oss.tellp())
derr << oss.str() << dendl;
if (err < 0) {
derr << argv[0] << ": error generating initial monmap: "
<< cpp_strerror(err) << dendl;
prefork.exit(1);
}
if (tmpmap.contains(g_conf()->name.get_id())) {
ipaddrs = tmpmap.get_addrs(g_conf()->name.get_id());
} else {
derr << "no public_addr or public_network specified, and "
<< g_conf()->name << " not present in monmap or ceph.conf" << dendl;
prefork.exit(1);
}
}
}
// bind
int rank = monmap.get_rank(g_conf()->name.get_id());
std::string public_msgr_type = g_conf()->ms_public_type.empty() ? g_conf().get_val<std::string>("ms_type") : g_conf()->ms_public_type;
Messenger *msgr = Messenger::create(g_ceph_context, public_msgr_type,
entity_name_t::MON(rank), "mon", 0);
if (!msgr)
exit(1);
msgr->set_cluster_protocol(CEPH_MON_PROTOCOL);
msgr->set_default_send_priority(CEPH_MSG_PRIO_HIGH);
msgr->set_default_policy(Messenger::Policy::stateless_server(0));
msgr->set_policy(entity_name_t::TYPE_MON,
Messenger::Policy::lossless_peer_reuse(
CEPH_FEATURE_SERVER_LUMINOUS));
msgr->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::stateless_server(
CEPH_FEATURE_SERVER_LUMINOUS));
msgr->set_policy(entity_name_t::TYPE_CLIENT,
Messenger::Policy::stateless_server(0));
msgr->set_policy(entity_name_t::TYPE_MDS,
Messenger::Policy::stateless_server(0));
// throttle client traffic
Throttle *client_throttler = new Throttle(g_ceph_context, "mon_client_bytes",
g_conf()->mon_client_bytes);
msgr->set_policy_throttlers(entity_name_t::TYPE_CLIENT,
client_throttler, NULL);
// throttle daemon traffic
// NOTE: actual usage on the leader may multiply by the number of
// monitors if they forward large update messages from daemons.
Throttle *daemon_throttler = new Throttle(g_ceph_context, "mon_daemon_bytes",
g_conf()->mon_daemon_bytes);
msgr->set_policy_throttlers(entity_name_t::TYPE_OSD, daemon_throttler,
NULL);
msgr->set_policy_throttlers(entity_name_t::TYPE_MDS, daemon_throttler,
NULL);
entity_addrvec_t bind_addrs = ipaddrs;
entity_addrvec_t public_addrs = ipaddrs;
// check if the public_bind_addr option is set
if (!g_conf()->public_bind_addr.is_blank_ip()) {
bind_addrs = make_mon_addrs(g_conf()->public_bind_addr);
}
dout(0) << "starting " << g_conf()->name << " rank " << rank
<< " at public addrs " << public_addrs
<< " at bind addrs " << bind_addrs
<< " mon_data " << g_conf()->mon_data
<< " fsid " << monmap.get_fsid()
<< dendl;
Messenger *mgr_msgr = Messenger::create(g_ceph_context, public_msgr_type,
entity_name_t::MON(rank), "mon-mgrc",
Messenger::get_random_nonce());
if (!mgr_msgr) {
derr << "unable to create mgr_msgr" << dendl;
prefork.exit(1);
}
mon = new Monitor(g_ceph_context, g_conf()->name.get_id(), store,
msgr, mgr_msgr, &monmap);
mon->orig_argc = argc;
mon->orig_argv = argv;
if (force_sync) {
derr << "flagging a forced sync ..." << dendl;
ostringstream oss;
JSONFormatter jf(true);
mon->sync_force(&jf);
derr << "out:\n";
jf.flush(*_dout);
*_dout << dendl;
}
err = mon->preinit();
if (err < 0) {
derr << "failed to initialize" << dendl;
prefork.exit(1);
}
if (compact || g_conf()->mon_compact_on_start) {
derr << "compacting monitor store ..." << dendl;
mon->store->compact();
derr << "done compacting" << dendl;
}
// bind
err = msgr->bindv(bind_addrs, public_addrs);
if (err < 0) {
derr << "unable to bind monitor to " << bind_addrs << dendl;
prefork.exit(1);
}
if (g_conf()->daemonize) {
global_init_postfork_finish(g_ceph_context);
prefork.daemonize();
}
msgr->start();
mgr_msgr->start();
mon->set_mon_crush_location(crush_loc);
mon->init();
register_async_signal_handler_oneshot(SIGINT, handle_mon_signal);
register_async_signal_handler_oneshot(SIGTERM, handle_mon_signal);
register_async_signal_handler(SIGHUP, handle_mon_signal);
if (g_conf()->inject_early_sigterm)
kill(getpid(), SIGTERM);
msgr->wait();
mgr_msgr->wait();
store->close();
unregister_async_signal_handler(SIGHUP, handle_mon_signal);
unregister_async_signal_handler(SIGINT, handle_mon_signal);
unregister_async_signal_handler(SIGTERM, handle_mon_signal);
shutdown_async_signal_handler();
delete mon;
delete store;
delete msgr;
delete mgr_msgr;
delete client_throttler;
delete daemon_throttler;
// cd on exit, so that gmon.out (if any) goes into a separate directory for each node.
char s[20];
snprintf(s, sizeof(s), "gmon/%d", getpid());
if ((mkdir(s, 0755) == 0) && (chdir(s) == 0)) {
dout(0) << "ceph-mon: gmon.out should be in " << s << dendl;
}
prefork.signal_exit(0);
return 0;
}
| 28,712 | 29.97411 | 136 | cc |
null | ceph-main/src/ceph_osd.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <boost/scoped_ptr.hpp>
#include <iostream>
#include <string>
#include "auth/KeyRing.h"
#include "osd/OSD.h"
#include "os/ObjectStore.h"
#include "mon/MonClient.h"
#include "include/ceph_features.h"
#include "common/config.h"
#include "extblkdev/ExtBlkDevPlugin.h"
#include "mon/MonMap.h"
#include "msg/Messenger.h"
#include "common/Throttle.h"
#include "common/Timer.h"
#include "common/TracepointProvider.h"
#include "common/ceph_argparse.h"
#include "common/numa.h"
#include "global/global_init.h"
#include "global/signal_handler.h"
#include "include/color.h"
#include "common/errno.h"
#include "common/pick_address.h"
#include "perfglue/heap_profiler.h"
#include "include/ceph_assert.h"
#include "common/Preforker.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
using std::cerr;
using std::cout;
using std::map;
using std::ostringstream;
using std::string;
using std::vector;
using ceph::bufferlist;
namespace {
TracepointProvider::Traits osd_tracepoint_traits("libosd_tp.so",
"osd_tracing");
TracepointProvider::Traits os_tracepoint_traits("libos_tp.so",
"osd_objectstore_tracing");
TracepointProvider::Traits bluestore_tracepoint_traits("libbluestore_tp.so",
"bluestore_tracing");
#ifdef WITH_OSD_INSTRUMENT_FUNCTIONS
TracepointProvider::Traits cyg_profile_traits("libcyg_profile_tp.so",
"osd_function_tracing");
#endif
} // anonymous namespace
OSD *osdptr = nullptr;
void handle_osd_signal(int signum)
{
if (osdptr)
osdptr->handle_signal(signum);
}
static void usage()
{
cout << "usage: ceph-osd -i <ID> [flags]\n"
<< " --osd-data PATH data directory\n"
<< " --osd-journal PATH\n"
<< " journal file or block device\n"
<< " --mkfs create a [new] data directory\n"
<< " --mkkey generate a new secret key. This is normally used in combination with --mkfs\n"
<< " --monmap specify the path to the monitor map. This is normally used in combination with --mkfs\n"
<< " --osd-uuid specify the OSD's fsid. This is normally used in combination with --mkfs\n"
<< " --keyring specify a path to the osd keyring. This is normally used in combination with --mkfs\n"
<< " --convert-filestore\n"
<< " run any pending upgrade operations\n"
<< " --flush-journal flush all data out of journal\n"
<< " --osdspec-affinity\n"
<< " set affinity to an osdspec\n"
<< " --dump-journal dump all data of journal\n"
<< " --mkjournal initialize a new journal\n"
<< " --check-wants-journal\n"
<< " check whether a journal is desired\n"
<< " --check-allows-journal\n"
<< " check whether a journal is allowed\n"
<< " --check-needs-journal\n"
<< " check whether a journal is required\n"
<< " --debug_osd <N> set debug level (e.g. 10)\n"
<< " --get-device-fsid PATH\n"
<< " get OSD fsid for the given block device\n"
<< std::endl;
generic_server_usage();
}
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
auto cct = global_init(
nullptr,
args, CEPH_ENTITY_TYPE_OSD,
CODE_ENVIRONMENT_DAEMON, 0);
ceph_heap_profiler_init();
Preforker forker;
// osd specific args
bool mkfs = false;
bool mkjournal = false;
bool check_wants_journal = false;
bool check_allows_journal = false;
bool check_needs_journal = false;
bool mkkey = false;
bool flushjournal = false;
bool dump_journal = false;
bool convertfilestore = false;
bool get_osd_fsid = false;
bool get_cluster_fsid = false;
bool get_journal_fsid = false;
bool get_device_fsid = false;
string device_path;
std::string dump_pg_log;
std::string osdspec_affinity;
std::string val;
for (std::vector<const char*>::iterator i = args.begin(); i != args.end(); ) {
if (ceph_argparse_double_dash(args, i)) {
break;
} else if (ceph_argparse_flag(args, i, "--mkfs", (char*)NULL)) {
mkfs = true;
} else if (ceph_argparse_witharg(args, i, &val, "--osdspec-affinity", (char*)NULL)) {
osdspec_affinity = val;
} else if (ceph_argparse_flag(args, i, "--mkjournal", (char*)NULL)) {
mkjournal = true;
} else if (ceph_argparse_flag(args, i, "--check-allows-journal", (char*)NULL)) {
check_allows_journal = true;
} else if (ceph_argparse_flag(args, i, "--check-wants-journal", (char*)NULL)) {
check_wants_journal = true;
} else if (ceph_argparse_flag(args, i, "--check-needs-journal", (char*)NULL)) {
check_needs_journal = true;
} else if (ceph_argparse_flag(args, i, "--mkkey", (char*)NULL)) {
mkkey = true;
} else if (ceph_argparse_flag(args, i, "--flush-journal", (char*)NULL)) {
flushjournal = true;
} else if (ceph_argparse_flag(args, i, "--convert-filestore", (char*)NULL)) {
convertfilestore = true;
} else if (ceph_argparse_witharg(args, i, &val, "--dump-pg-log", (char*)NULL)) {
dump_pg_log = val;
} else if (ceph_argparse_flag(args, i, "--dump-journal", (char*)NULL)) {
dump_journal = true;
} else if (ceph_argparse_flag(args, i, "--get-cluster-fsid", (char*)NULL)) {
get_cluster_fsid = true;
} else if (ceph_argparse_flag(args, i, "--get-osd-fsid", "--get-osd-uuid", (char*)NULL)) {
get_osd_fsid = true;
} else if (ceph_argparse_flag(args, i, "--get-journal-fsid", "--get-journal-uuid", (char*)NULL)) {
get_journal_fsid = true;
} else if (ceph_argparse_witharg(args, i, &device_path,
"--get-device-fsid", (char*)NULL)) {
get_device_fsid = true;
} else {
++i;
}
}
if (!args.empty()) {
cerr << "unrecognized arg " << args[0] << std::endl;
exit(1);
}
if (global_init_prefork(g_ceph_context) >= 0) {
std::string err;
int r = forker.prefork(err);
if (r < 0) {
cerr << err << std::endl;
return r;
}
if (forker.is_parent()) {
g_ceph_context->_log->start();
if (forker.parent_wait(err) != 0) {
return -ENXIO;
}
return 0;
}
setsid();
global_init_postfork_start(g_ceph_context);
}
common_init_finish(g_ceph_context);
global_init_chdir(g_ceph_context);
if (get_journal_fsid) {
device_path = g_conf().get_val<std::string>("osd_journal");
get_device_fsid = true;
}
if (get_device_fsid) {
uuid_d uuid;
int r = ObjectStore::probe_block_device_fsid(g_ceph_context, device_path,
&uuid);
if (r < 0) {
cerr << "failed to get device fsid for " << device_path
<< ": " << cpp_strerror(r) << std::endl;
forker.exit(1);
}
cout << uuid << std::endl;
forker.exit(0);
}
if (!dump_pg_log.empty()) {
common_init_finish(g_ceph_context);
bufferlist bl;
std::string error;
if (bl.read_file(dump_pg_log.c_str(), &error) >= 0) {
pg_log_entry_t e;
auto p = bl.cbegin();
while (!p.end()) {
uint64_t pos = p.get_off();
try {
decode(e, p);
}
catch (const ceph::buffer::error &e) {
derr << "failed to decode LogEntry at offset " << pos << dendl;
forker.exit(1);
}
derr << pos << ":\t" << e << dendl;
}
} else {
derr << "unable to open " << dump_pg_log << ": " << error << dendl;
}
forker.exit(0);
}
// whoami
char *end;
const char *id = g_conf()->name.get_id().c_str();
int whoami = strtol(id, &end, 10);
std::string data_path = g_conf().get_val<std::string>("osd_data");
if (*end || end == id || whoami < 0) {
derr << "must specify '-i #' where # is the osd number" << dendl;
forker.exit(1);
}
if (data_path.empty()) {
derr << "must specify '--osd-data=foo' data path" << dendl;
forker.exit(1);
}
// the store
std::string store_type;
{
char fn[PATH_MAX];
snprintf(fn, sizeof(fn), "%s/type", data_path.c_str());
int fd = ::open(fn, O_RDONLY|O_CLOEXEC);
if (fd >= 0) {
bufferlist bl;
bl.read_fd(fd, 64);
if (bl.length()) {
store_type = string(bl.c_str(), bl.length() - 1); // drop \n
dout(5) << "object store type is " << store_type << dendl;
}
::close(fd);
} else if (mkfs) {
store_type = g_conf().get_val<std::string>("osd_objectstore");
} else {
// hrm, infer the type
snprintf(fn, sizeof(fn), "%s/current", data_path.c_str());
struct stat st;
if (::stat(fn, &st) == 0 &&
S_ISDIR(st.st_mode)) {
derr << "missing 'type' file, inferring filestore from current/ dir"
<< dendl;
store_type = "filestore";
} else {
snprintf(fn, sizeof(fn), "%s/block", data_path.c_str());
if (::stat(fn, &st) == 0 &&
S_ISLNK(st.st_mode)) {
derr << "missing 'type' file, inferring bluestore from block symlink"
<< dendl;
store_type = "bluestore";
} else {
derr << "missing 'type' file and unable to infer osd type" << dendl;
forker.exit(1);
}
}
}
}
std::string journal_path = g_conf().get_val<std::string>("osd_journal");
uint32_t flags = g_conf().get_val<uint64_t>("osd_os_flags");
std::unique_ptr<ObjectStore> store = ObjectStore::create(g_ceph_context,
store_type,
data_path,
journal_path,
flags);
if (!store) {
derr << "unable to create object store" << dendl;
forker.exit(-ENODEV);
}
if (mkkey) {
common_init_finish(g_ceph_context);
KeyRing keyring;
EntityName ename{g_conf()->name};
EntityAuth eauth;
std::string keyring_path = g_conf().get_val<std::string>("keyring");
int ret = keyring.load(g_ceph_context, keyring_path);
if (ret == 0 &&
keyring.get_auth(ename, eauth)) {
derr << "already have key in keyring " << keyring_path << dendl;
} else {
eauth.key.create(g_ceph_context, CEPH_CRYPTO_AES);
keyring.add(ename, eauth);
bufferlist bl;
keyring.encode_plaintext(bl);
int r = bl.write_file(keyring_path.c_str(), 0600);
if (r)
derr << TEXT_RED << " ** ERROR: writing new keyring to "
<< keyring_path << ": " << cpp_strerror(r) << TEXT_NORMAL
<< dendl;
else
derr << "created new key in keyring " << keyring_path << dendl;
}
}
if (mkfs) {
common_init_finish(g_ceph_context);
if (g_conf().get_val<uuid_d>("fsid").is_zero()) {
derr << "must specify cluster fsid" << dendl;
forker.exit(-EINVAL);
}
int err = OSD::mkfs(g_ceph_context, std::move(store), g_conf().get_val<uuid_d>("fsid"),
whoami, osdspec_affinity);
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error creating empty object store in "
<< data_path << ": " << cpp_strerror(-err) << TEXT_NORMAL << dendl;
forker.exit(1);
}
dout(0) << "created object store " << data_path
<< " for osd." << whoami
<< " fsid " << g_conf().get_val<uuid_d>("fsid")
<< dendl;
}
if (mkfs || mkkey) {
forker.exit(0);
}
if (mkjournal) {
common_init_finish(g_ceph_context);
int err = store->mkjournal();
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error creating fresh journal "
<< journal_path << " for object store " << data_path << ": "
<< cpp_strerror(-err) << TEXT_NORMAL << dendl;
forker.exit(1);
}
derr << "created new journal " << journal_path
<< " for object store " << data_path << dendl;
forker.exit(0);
}
if (check_wants_journal) {
if (store->wants_journal()) {
cout << "wants journal: yes" << std::endl;
forker.exit(0);
} else {
cout << "wants journal: no" << std::endl;
forker.exit(1);
}
}
if (check_allows_journal) {
if (store->allows_journal()) {
cout << "allows journal: yes" << std::endl;
forker.exit(0);
} else {
cout << "allows journal: no" << std::endl;
forker.exit(1);
}
}
if (check_needs_journal) {
if (store->needs_journal()) {
cout << "needs journal: yes" << std::endl;
forker.exit(0);
} else {
cout << "needs journal: no" << std::endl;
forker.exit(1);
}
}
if (flushjournal) {
common_init_finish(g_ceph_context);
int err = store->mount();
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error flushing journal " << journal_path
<< " for object store " << data_path
<< ": " << cpp_strerror(-err) << TEXT_NORMAL << dendl;
goto flushjournal_out;
}
store->umount();
derr << "flushed journal " << journal_path
<< " for object store " << data_path
<< dendl;
flushjournal_out:
store.reset();
forker.exit(err < 0 ? 1 : 0);
}
if (dump_journal) {
common_init_finish(g_ceph_context);
int err = store->dump_journal(cout);
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error dumping journal " << journal_path
<< " for object store " << data_path
<< ": " << cpp_strerror(-err) << TEXT_NORMAL << dendl;
forker.exit(1);
}
derr << "dumped journal " << journal_path
<< " for object store " << data_path
<< dendl;
forker.exit(0);
}
if (convertfilestore) {
int err = store->mount();
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error mounting store " << data_path
<< ": " << cpp_strerror(-err) << TEXT_NORMAL << dendl;
forker.exit(1);
}
err = store->upgrade();
store->umount();
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error converting store " << data_path
<< ": " << cpp_strerror(-err) << TEXT_NORMAL << dendl;
forker.exit(1);
}
forker.exit(0);
}
{
int r = extblkdev::preload(g_ceph_context);
if (r < 0) {
derr << "Failed preloading extblkdev plugins, error code: " << r << dendl;
forker.exit(1);
}
}
string magic;
uuid_d cluster_fsid, osd_fsid;
ceph_release_t require_osd_release = ceph_release_t::unknown;
int w;
int r = OSD::peek_meta(store.get(), &magic, &cluster_fsid, &osd_fsid, &w,
&require_osd_release);
if (r < 0) {
derr << TEXT_RED << " ** ERROR: unable to open OSD superblock on "
<< data_path << ": " << cpp_strerror(-r)
<< TEXT_NORMAL << dendl;
if (r == -ENOTSUP) {
derr << TEXT_RED << " ** please verify that underlying storage "
<< "supports xattrs" << TEXT_NORMAL << dendl;
}
forker.exit(1);
}
if (w != whoami) {
derr << "OSD id " << w << " != my id " << whoami << dendl;
forker.exit(1);
}
if (strcmp(magic.c_str(), CEPH_OSD_ONDISK_MAGIC)) {
derr << "OSD magic " << magic << " != my " << CEPH_OSD_ONDISK_MAGIC
<< dendl;
forker.exit(1);
}
if (get_cluster_fsid) {
cout << cluster_fsid << std::endl;
forker.exit(0);
}
if (get_osd_fsid) {
cout << osd_fsid << std::endl;
forker.exit(0);
}
{
ostringstream err;
if (!can_upgrade_from(require_osd_release, "require_osd_release", err)) {
derr << err.str() << dendl;
forker.exit(1);
}
}
// consider objectstore numa node
int os_numa_node = -1;
r = store->get_numa_node(&os_numa_node, nullptr, nullptr);
if (r >= 0 && os_numa_node >= 0) {
dout(1) << " objectstore numa_node " << os_numa_node << dendl;
}
int iface_preferred_numa_node = -1;
if (g_conf().get_val<bool>("osd_numa_prefer_iface")) {
iface_preferred_numa_node = os_numa_node;
}
// messengers
std::string msg_type = g_conf().get_val<std::string>("ms_type");
std::string public_msg_type =
g_conf().get_val<std::string>("ms_public_type");
std::string cluster_msg_type =
g_conf().get_val<std::string>("ms_cluster_type");
public_msg_type = public_msg_type.empty() ? msg_type : public_msg_type;
cluster_msg_type = cluster_msg_type.empty() ? msg_type : cluster_msg_type;
uint64_t nonce = Messenger::get_random_nonce();
Messenger *ms_public = Messenger::create(g_ceph_context, public_msg_type,
entity_name_t::OSD(whoami), "client", nonce);
Messenger *ms_cluster = Messenger::create(g_ceph_context, cluster_msg_type,
entity_name_t::OSD(whoami), "cluster", nonce);
Messenger *ms_hb_back_client = Messenger::create(g_ceph_context, cluster_msg_type,
entity_name_t::OSD(whoami), "hb_back_client", nonce);
Messenger *ms_hb_front_client = Messenger::create(g_ceph_context, public_msg_type,
entity_name_t::OSD(whoami), "hb_front_client", nonce);
Messenger *ms_hb_back_server = Messenger::create(g_ceph_context, cluster_msg_type,
entity_name_t::OSD(whoami), "hb_back_server", nonce);
Messenger *ms_hb_front_server = Messenger::create(g_ceph_context, public_msg_type,
entity_name_t::OSD(whoami), "hb_front_server", nonce);
Messenger *ms_objecter = Messenger::create(g_ceph_context, public_msg_type,
entity_name_t::OSD(whoami), "ms_objecter", nonce);
if (!ms_public || !ms_cluster || !ms_hb_front_client || !ms_hb_back_client || !ms_hb_back_server || !ms_hb_front_server || !ms_objecter)
forker.exit(1);
ms_cluster->set_cluster_protocol(CEPH_OSD_PROTOCOL);
ms_hb_front_client->set_cluster_protocol(CEPH_OSD_PROTOCOL);
ms_hb_back_client->set_cluster_protocol(CEPH_OSD_PROTOCOL);
ms_hb_back_server->set_cluster_protocol(CEPH_OSD_PROTOCOL);
ms_hb_front_server->set_cluster_protocol(CEPH_OSD_PROTOCOL);
dout(0) << "starting osd." << whoami
<< " osd_data " << data_path
<< " " << ((journal_path.empty()) ?
"(no journal)" : journal_path)
<< dendl;
uint64_t message_size =
g_conf().get_val<Option::size_t>("osd_client_message_size_cap");
boost::scoped_ptr<Throttle> client_byte_throttler(
new Throttle(g_ceph_context, "osd_client_bytes", message_size));
uint64_t message_cap = g_conf().get_val<uint64_t>("osd_client_message_cap");
boost::scoped_ptr<Throttle> client_msg_throttler(
new Throttle(g_ceph_context, "osd_client_messages", message_cap));
// All feature bits 0 - 34 should be present from dumpling v0.67 forward
uint64_t osd_required =
CEPH_FEATURE_UID |
CEPH_FEATURE_PGID64 |
CEPH_FEATURE_OSDENC;
ms_public->set_default_policy(Messenger::Policy::stateless_registered_server(0));
ms_public->set_policy_throttlers(entity_name_t::TYPE_CLIENT,
client_byte_throttler.get(),
client_msg_throttler.get());
ms_public->set_policy(entity_name_t::TYPE_MON,
Messenger::Policy::lossy_client(osd_required));
ms_public->set_policy(entity_name_t::TYPE_MGR,
Messenger::Policy::lossy_client(osd_required));
ms_cluster->set_default_policy(Messenger::Policy::stateless_server(0));
ms_cluster->set_policy(entity_name_t::TYPE_MON, Messenger::Policy::lossy_client(0));
ms_cluster->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::lossless_peer(osd_required));
ms_cluster->set_policy(entity_name_t::TYPE_CLIENT,
Messenger::Policy::stateless_server(0));
ms_hb_front_client->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::lossy_client(0));
ms_hb_back_client->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::lossy_client(0));
ms_hb_back_server->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::stateless_server(0));
ms_hb_front_server->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::stateless_server(0));
ms_objecter->set_default_policy(Messenger::Policy::lossy_client(CEPH_FEATURE_OSDREPLYMUX));
entity_addrvec_t public_addrs, public_bind_addrs, cluster_addrs;
r = pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC, &public_addrs,
iface_preferred_numa_node);
if (r < 0) {
derr << "Failed to pick public address." << dendl;
forker.exit(1);
} else {
dout(10) << "picked public_addrs " << public_addrs << dendl;
}
r = pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC_BIND,
&public_bind_addrs, iface_preferred_numa_node);
if (r == -ENOENT) {
dout(10) << "there is no public_bind_addrs, defaulting to public_addrs"
<< dendl;
public_bind_addrs = public_addrs;
} else if (r < 0) {
derr << "Failed to pick public bind address." << dendl;
forker.exit(1);
} else {
dout(10) << "picked public_bind_addrs " << public_bind_addrs << dendl;
}
r = pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_CLUSTER, &cluster_addrs,
iface_preferred_numa_node);
if (r < 0) {
derr << "Failed to pick cluster address." << dendl;
forker.exit(1);
}
if (ms_public->bindv(public_bind_addrs, public_addrs) < 0) {
derr << "Failed to bind to " << public_bind_addrs << dendl;
forker.exit(1);
}
if (ms_cluster->bindv(cluster_addrs) < 0)
forker.exit(1);
bool is_delay = g_conf().get_val<bool>("osd_heartbeat_use_min_delay_socket");
if (is_delay) {
ms_hb_front_client->set_socket_priority(SOCKET_PRIORITY_MIN_DELAY);
ms_hb_back_client->set_socket_priority(SOCKET_PRIORITY_MIN_DELAY);
ms_hb_back_server->set_socket_priority(SOCKET_PRIORITY_MIN_DELAY);
ms_hb_front_server->set_socket_priority(SOCKET_PRIORITY_MIN_DELAY);
}
entity_addrvec_t hb_front_addrs = public_bind_addrs;
for (auto& a : hb_front_addrs.v) {
a.set_port(0);
}
if (ms_hb_front_server->bindv(hb_front_addrs) < 0)
forker.exit(1);
if (ms_hb_front_client->client_bind(hb_front_addrs.front()) < 0)
forker.exit(1);
entity_addrvec_t hb_back_addrs = cluster_addrs;
for (auto& a : hb_back_addrs.v) {
a.set_port(0);
}
if (ms_hb_back_server->bindv(hb_back_addrs) < 0)
forker.exit(1);
if (ms_hb_back_client->client_bind(hb_back_addrs.front()) < 0)
forker.exit(1);
// install signal handlers
init_async_signal_handler();
register_async_signal_handler(SIGHUP, sighup_handler);
TracepointProvider::initialize<osd_tracepoint_traits>(g_ceph_context);
TracepointProvider::initialize<os_tracepoint_traits>(g_ceph_context);
TracepointProvider::initialize<bluestore_tracepoint_traits>(g_ceph_context);
#ifdef WITH_OSD_INSTRUMENT_FUNCTIONS
TracepointProvider::initialize<cyg_profile_traits>(g_ceph_context);
#endif
srand(time(NULL) + getpid());
ceph::async::io_context_pool poolctx(
cct->_conf.get_val<std::uint64_t>("osd_asio_thread_count"));
MonClient mc(g_ceph_context, poolctx);
if (mc.build_initial_monmap() < 0)
return -1;
global_init_chdir(g_ceph_context);
if (global_init_preload_erasure_code(g_ceph_context) < 0) {
forker.exit(1);
}
osdptr = new OSD(g_ceph_context,
std::move(store),
whoami,
ms_cluster,
ms_public,
ms_hb_front_client,
ms_hb_back_client,
ms_hb_front_server,
ms_hb_back_server,
ms_objecter,
&mc,
data_path,
journal_path,
poolctx);
int err = osdptr->pre_init();
if (err < 0) {
derr << TEXT_RED << " ** ERROR: osd pre_init failed: " << cpp_strerror(-err)
<< TEXT_NORMAL << dendl;
forker.exit(1);
}
ms_public->start();
ms_hb_front_client->start();
ms_hb_back_client->start();
ms_hb_front_server->start();
ms_hb_back_server->start();
ms_cluster->start();
ms_objecter->start();
// start osd
err = osdptr->init();
if (err < 0) {
derr << TEXT_RED << " ** ERROR: osd init failed: " << cpp_strerror(-err)
<< TEXT_NORMAL << dendl;
forker.exit(1);
}
// -- daemonize --
if (g_conf()->daemonize) {
global_init_postfork_finish(g_ceph_context);
forker.daemonize();
}
register_async_signal_handler_oneshot(SIGINT, handle_osd_signal);
register_async_signal_handler_oneshot(SIGTERM, handle_osd_signal);
osdptr->final_init();
if (g_conf().get_val<bool>("inject_early_sigterm"))
kill(getpid(), SIGTERM);
ms_public->wait();
ms_hb_front_client->wait();
ms_hb_back_client->wait();
ms_hb_front_server->wait();
ms_hb_back_server->wait();
ms_cluster->wait();
ms_objecter->wait();
unregister_async_signal_handler(SIGHUP, sighup_handler);
unregister_async_signal_handler(SIGINT, handle_osd_signal);
unregister_async_signal_handler(SIGTERM, handle_osd_signal);
shutdown_async_signal_handler();
// done
poolctx.stop();
delete osdptr;
delete ms_public;
delete ms_hb_front_client;
delete ms_hb_back_client;
delete ms_hb_front_server;
delete ms_hb_back_server;
delete ms_cluster;
delete ms_objecter;
client_byte_throttler.reset();
client_msg_throttler.reset();
// cd on exit, so that gmon.out (if any) goes into a separate directory for each node.
char s[20];
snprintf(s, sizeof(s), "gmon/%d", getpid());
if ((mkdir(s, 0755) == 0) && (chdir(s) == 0)) {
dout(0) << "ceph-osd: gmon.out should be in " << s << dendl;
}
return 0;
}
| 25,438 | 31.242079 | 138 | cc |
null | ceph-main/src/ceph_syn.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/stat.h>
#include <iostream>
#include <string>
#include "common/config.h"
#include "common/async/context_pool.h"
#include "client/SyntheticClient.h"
#include "client/Client.h"
#include "msg/Messenger.h"
#include "mon/MonClient.h"
#include "common/Timer.h"
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include "common/pick_address.h"
#include <sys/types.h>
#include <fcntl.h>
using namespace std;
extern int syn_filer_flags;
int main(int argc, const char **argv, char *envp[])
{
//cerr << "ceph-syn starting" << std::endl;
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY, 0);
common_init_finish(g_ceph_context);
parse_syn_options(args); // for SyntheticClient
pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC);
// get monmap
ceph::async::io_context_pool poolctx(1);
MonClient mc(g_ceph_context, poolctx);
if (mc.build_initial_monmap() < 0)
return -1;
list<Client*> clients;
list<SyntheticClient*> synclients;
vector<Messenger*> messengers{static_cast<unsigned>(num_client), nullptr};
vector<MonClient*> mclients{static_cast<unsigned>(num_client), nullptr};
cout << "ceph-syn: starting " << num_client << " syn client(s)" << std::endl;
for (int i=0; i<num_client; i++) {
messengers[i] = Messenger::create_client_messenger(g_ceph_context,
"synclient");
mclients[i] = new MonClient(g_ceph_context, poolctx);
mclients[i]->build_initial_monmap();
auto client = new StandaloneClient(messengers[i], mclients[i], poolctx);
client->set_filer_flags(syn_filer_flags);
SyntheticClient *syn = new SyntheticClient(client);
clients.push_back(client);
synclients.push_back(syn);
messengers[i]->start();
}
for (list<SyntheticClient*>::iterator p = synclients.begin();
p != synclients.end();
++p)
(*p)->start_thread();
poolctx.stop();
//cout << "waiting for client(s) to finish" << std::endl;
while (!clients.empty()) {
Client *client = clients.front();
SyntheticClient *syn = synclients.front();
clients.pop_front();
synclients.pop_front();
syn->join_thread();
delete syn;
delete client;
}
for (int i = 0; i < num_client; ++i) {
// wait for messenger to finish
delete mclients[i];
messengers[i]->shutdown();
messengers[i]->wait();
delete messengers[i];
}
return 0;
}
| 2,891 | 26.283019 | 79 | cc |
null | ceph-main/src/ckill.sh | #!/bin/bash -e
if [ -e CMakeCache.txt ]; then
[ -z "$CEPH_BIN" ] && CEPH_BIN=bin
fi
if [ -z "$CEPHADM" ]; then
CEPHADM="${CEPH_BIN}/cephadm"
fi
# fsid
if [ -e fsid ] ; then
fsid=`cat fsid`
else
echo 'no fsid file, so no cluster?'
exit 0
fi
echo "fsid $fsid"
sudo $CEPHADM rm-cluster --force --fsid $fsid
| 329 | 14 | 45 | sh |
null | ceph-main/src/cls_acl.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <openssl/md5.h>
#include <openssl/sha.h>
#include "include/types.h"
#include "objclass/objclass.h"
CLS_VER(1,0)
CLS_NAME(acl)
int get_method(cls_method_context_t ctx, char *indata, int datalen,
char **outdata, int *outdatalen)
{
MD5_CTX c;
cls_log("acl test method");
cls_log("indata=%.*s data_len=%d", datalen, indata, datalen);
cls_getxattr(ctx, "acls", outdata, outdatalen);
return 0;
}
int set_method(cls_method_context_t ctx, char *indata, int datalen,
char **outdata, int *outdatalen)
{
MD5_CTX c;
cls_log("acl test method");
cls_log("indata=%.*s data_len=%d", datalen, indata, datalen);
cls_setxattr(ctx, "acls", indata, datalen);
return 0;
}
CLS_INIT(acl)
{
cls_log("Loaded acl class!");
cls_handle_t h_class;
cls_method_handle_t h_get;
cls_method_handle_t h_set;
cls_register("acl", &h_class);
cls_register_method(h_class, "get", CLS_METHOD_RD, get_method, &h_get);
cls_register_method(h_class, "set", CLS_METHOD_WR, set_method, &h_set);
return;
}
| 1,223 | 20.103448 | 74 | cc |
null | ceph-main/src/cls_crypto.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <openssl/md5.h>
#include <openssl/sha.h>
#include "include/types.h"
#include "objclass/objclass.h"
CLS_VER(1,0)
CLS_NAME(crypto)
int md5_method(cls_method_context_t ctx, char *indata, int datalen,
char **outdata, int *outdatalen)
{
MD5_CTX c;
unsigned char *md;
cls_log("md5 method");
cls_log("indata=%.*s data_len=%d", datalen, indata, datalen);
md = (unsigned char *)cls_alloc(MD5_DIGEST_LENGTH);
if (!md)
return -ENOMEM;
MD5_Init(&c);
MD5_Update(&c, indata, (unsigned long)datalen);
MD5_Final(md,&c);
*outdata = (char *)md;
*outdatalen = MD5_DIGEST_LENGTH;
return 0;
}
int sha1_method(cls_method_context_t ctx, char *indata, int datalen,
char **outdata, int *outdatalen)
{
SHA_CTX c;
unsigned char *md;
cls_log("sha1 method");
cls_log("indata=%.*s data_len=%d", datalen, indata, datalen);
md = (unsigned char *)cls_alloc(SHA_DIGEST_LENGTH);
if (!md)
return -ENOMEM;
SHA1_Init(&c);
SHA1_Update(&c, indata, (unsigned long)datalen);
SHA1_Final(md,&c);
*outdata = (char *)md;
*outdatalen = SHA_DIGEST_LENGTH;
return 0;
}
CLS_INIT(crypto)
{
cls_log("Loaded crypto class!");
cls_handle_t h_class;
cls_method_handle_t h_md5;
cls_method_handle_t h_sha1;
cls_register("crypto", &h_class);
cls_register_method(h_class, "md5", CLS_METHOD_RD, md5_method, &h_md5);
cls_register_method(h_class, "sha1", CLS_METHOD_RD, sha1_method, &h_sha1);
return;
}
| 1,662 | 20.320513 | 77 | cc |
null | ceph-main/src/cstart.sh | #!/bin/bash -e
if [ -e CMakeCache.txt ]; then
[ -z "$CEPH_BIN" ] && CEPH_BIN=bin
fi
if [ -z "$CEPHADM" ]; then
CEPHADM="${CEPH_BIN}/cephadm"
fi
image_base="quay.io/ceph-ci/ceph"
if which podman 2>&1 > /dev/null; then
runtime="podman"
else
runtime="docker"
fi
# fsid
if [ -e fsid ] ; then
fsid=`cat fsid`
else
fsid=`uuidgen`
echo $fsid > fsid
fi
echo "fsid $fsid"
shortid=`echo $fsid | cut -c 1-8`
echo $shortid > shortid
echo "shortid $shortid"
# ip
if [ -z "$ip" ]; then
if [ -x "$(which ip 2>/dev/null)" ]; then
IP_CMD="ip addr"
else
IP_CMD="ifconfig"
fi
# filter out IPv4 and localhost addresses
ip="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
# if nothing left, try using localhost address, it might work
if [ -z "$ip" ]; then ip="127.0.0.1"; fi
fi
echo "ip $ip"
# port
if [ -e port ] ; then
port=`cat port`
else
while [ true ]
do
port="$(echo $(( RANDOM % 1000 + 40000 )))"
ss -a -n | grep LISTEN | grep "${ip}:${port} " 2>&1 >/dev/null || break
done
echo $port > port
fi
echo "mon port $port"
# make sure we have an image
if ! sudo $runtime image inspect $image_base:$shortid 1>/dev/null 2>/dev/null; then
echo "building initial $image_base:$shortid image..."
sudo ../src/script/cpatch -t $image_base:$shortid
fi
sudo $CEPHADM rm-cluster --force --fsid $fsid
sudo $CEPHADM --image ${image_base}:${shortid} bootstrap \
--skip-pull \
--fsid $fsid \
--mon-addrv "[v2:$ip:$port]" \
--output-dir . \
--allow-overwrite \
$@
# kludge to make 'bin/ceph ...' work
sudo chmod 755 ceph.client.admin.keyring
echo 'keyring = ceph.client.admin.keyring' >> ceph.conf
# don't use repo digests; this implicitly does a pull and we don't want that
${CEPH_BIN}/ceph config set mgr mgr/cephadm/use_repo_digest false
echo
echo "sudo ../src/script/cpatch -t $image_base:$shortid"
echo
| 1,965 | 22.129412 | 102 | sh |
null | ceph-main/src/krbd.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include <fcntl.h>
#include <iostream>
#include <memory>
#include <optional>
#include <poll.h>
#include <regex>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <tuple>
#include <unistd.h>
#include <utility>
#include "auth/KeyRing.h"
#include "common/errno.h"
#include "common/Formatter.h"
#include "common/module.h"
#include "common/run_cmd.h"
#include "common/safe_io.h"
#include "common/secret.h"
#include "common/TextTable.h"
#include "common/Thread.h"
#include "include/ceph_assert.h"
#include "include/stringify.h"
#include "include/krbd.h"
#include "mon/MonMap.h"
#include <blkid/blkid.h>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/tokenizer.hpp>
#include <libudev.h>
static const int UDEV_BUF_SIZE = 1 << 20; /* doubled to 2M (SO_RCVBUFFORCE) */
static const char DEVNODE_PREFIX[] = "/dev/rbd";
static const char SNAP_HEAD_NAME[] = "-";
#define DEFINE_UDEV_UPTR(what) \
struct udev_##what##_deleter { \
void operator()(udev_##what *p) { \
udev_##what##_unref(p); \
} \
}; \
using udev_##what##_uptr = \
std::unique_ptr<udev_##what, udev_##what##_deleter>;
DEFINE_UDEV_UPTR(monitor) /* udev_monitor_uptr */
DEFINE_UDEV_UPTR(enumerate) /* udev_enumerate_uptr */
DEFINE_UDEV_UPTR(device) /* udev_device_uptr */
using std::string;
struct krbd_ctx {
CephContext *cct;
struct udev *udev;
uint32_t flags; /* KRBD_CTX_F_* */
};
struct krbd_spec {
std::string pool_name;
std::string nspace_name;
std::string image_name;
std::string snap_name;
krbd_spec(const char *pool_name, const char *nspace_name,
const char *image_name, const char *snap_name)
: pool_name(pool_name),
nspace_name(nspace_name),
image_name(image_name),
snap_name(*snap_name ? snap_name : SNAP_HEAD_NAME) { }
bool operator==(const krbd_spec& rhs) const {
return pool_name == rhs.pool_name &&
nspace_name == rhs.nspace_name &&
image_name == rhs.image_name &&
snap_name == rhs.snap_name;
}
};
static std::ostream& operator<<(std::ostream& os, const krbd_spec& spec)
{
os << spec.pool_name << "/";
if (!spec.nspace_name.empty())
os << spec.nspace_name << "/";
os << spec.image_name;
if (spec.snap_name != SNAP_HEAD_NAME)
os << "@" << spec.snap_name;
return os;
}
static std::optional<krbd_spec> spec_from_dev(udev_device *dev)
{
const char *pool_name = udev_device_get_sysattr_value(dev, "pool");
const char *nspace_name = udev_device_get_sysattr_value(dev, "pool_ns");
const char *image_name = udev_device_get_sysattr_value(dev, "name");
const char *snap_name = udev_device_get_sysattr_value(dev, "current_snap");
if (!pool_name || !image_name || !snap_name)
return std::nullopt;
return std::make_optional<krbd_spec>(
pool_name, nspace_name ?: "", image_name, snap_name);
}
static udev_device_uptr dev_from_list_entry(udev *udev, udev_list_entry *l)
{
return udev_device_uptr(
udev_device_new_from_syspath(udev, udev_list_entry_get_name(l)));
}
static std::string get_devnode(udev_device *dev)
{
std::string devnode = DEVNODE_PREFIX;
devnode += udev_device_get_sysname(dev);
return devnode;
}
static int sysfs_write_rbd(const char *which, const string& buf)
{
const string s = string("/sys/bus/rbd/") + which;
const string t = s + "_single_major";
int fd;
int r;
/*
* 'add' and 'add_single_major' interfaces are identical, but if rbd
* kernel module is new enough and is configured to use single-major
* scheme, 'add' is disabled in order to prevent old userspace from
* doing weird things at unmap time.
*
* Same goes for 'remove' vs 'remove_single_major'.
*/
fd = open(t.c_str(), O_WRONLY);
if (fd < 0) {
if (errno == ENOENT) {
fd = open(s.c_str(), O_WRONLY);
if (fd < 0)
return -errno;
} else {
return -errno;
}
}
r = safe_write(fd, buf.c_str(), buf.size());
close(fd);
return r;
}
static int sysfs_write_rbd_add(const string& buf)
{
return sysfs_write_rbd("add", buf);
}
static int sysfs_write_rbd_remove(const string& buf)
{
return sysfs_write_rbd("remove", buf);
}
static int have_minor_attr(void)
{
/*
* 'minor' attribute was added as part of single_major merge, which
* exposed the 'single_major' parameter. 'minor' is always present,
* regardless of whether single-major scheme is turned on or not.
*
* (Something like ver >= KERNEL_VERSION(3, 14, 0) is a no-go because
* this has to work with rbd.ko backported to various kernels.)
*/
return access("/sys/module/rbd/parameters/single_major", F_OK) == 0;
}
static int build_map_buf(CephContext *cct, const krbd_spec& spec,
const string& options, string *pbuf)
{
bool msgr2 = false;
std::ostringstream oss;
int r;
boost::char_separator<char> sep(",");
boost::tokenizer<boost::char_separator<char>> tok(options, sep);
for (const auto& t : tok) {
if (boost::starts_with(t, "ms_mode=")) {
/* msgr2 unless ms_mode=legacy */
msgr2 = t.compare(8, t.npos, "legacy");
}
}
MonMap monmap;
r = monmap.build_initial(cct, false, std::cerr);
if (r < 0)
return r;
/*
* If msgr2, filter TYPE_MSGR2 addresses. Otherwise, filter
* TYPE_LEGACY addresses.
*/
for (const auto& p : monmap.mon_info) {
for (const auto& a : p.second.public_addrs.v) {
if ((msgr2 && a.is_msgr2()) || (!msgr2 && a.is_legacy())) {
if (oss.tellp() > 0) {
oss << ",";
}
oss << a.get_sockaddr();
}
}
}
if (oss.tellp() == 0) {
std::cerr << "rbd: failed to get mon address (possible ms_mode mismatch)" << std::endl;
return -ENOENT;
}
oss << " name=" << cct->_conf->name.get_id();
KeyRing keyring;
auto auth_client_required =
cct->_conf.get_val<std::string>("auth_client_required");
if (auth_client_required != "none") {
r = keyring.from_ceph_context(cct);
auto keyfile = cct->_conf.get_val<std::string>("keyfile");
auto key = cct->_conf.get_val<std::string>("key");
if (r == -ENOENT && keyfile.empty() && key.empty())
r = 0;
if (r < 0) {
std::cerr << "rbd: failed to get secret" << std::endl;
return r;
}
}
CryptoKey secret;
string key_name = string("client.") + cct->_conf->name.get_id();
if (keyring.get_secret(cct->_conf->name, secret)) {
string secret_str;
secret.encode_base64(secret_str);
r = set_kernel_secret(secret_str.c_str(), key_name.c_str());
if (r >= 0) {
if (r == 0)
std::cerr << "rbd: warning: secret has length 0" << std::endl;
oss << ",key=" << key_name;
} else if (r == -ENODEV || r == -ENOSYS) {
// running against older kernel; fall back to secret= in options
oss << ",secret=" << secret_str;
} else {
std::cerr << "rbd: failed to add secret '" << key_name << "' to kernel"
<< std::endl;
return r;
}
} else if (is_kernel_secret(key_name.c_str())) {
oss << ",key=" << key_name;
}
if (!options.empty())
oss << "," << options;
if (!spec.nspace_name.empty())
oss << ",_pool_ns=" << spec.nspace_name;
oss << " " << spec.pool_name << " " << spec.image_name << " "
<< spec.snap_name;
*pbuf = oss.str();
return 0;
}
/*
* Return:
* <kernel error, false> - didn't map
* <0 or udev error, true> - mapped
*/
template <typename F>
static std::pair<int, bool> wait_for_mapping(int sysfs_r_fd, udev_monitor *mon,
F udev_device_handler)
{
struct pollfd fds[2];
int sysfs_r = INT_MAX, udev_r = INT_MAX;
int r;
fds[0].fd = sysfs_r_fd;
fds[0].events = POLLIN;
fds[1].fd = udev_monitor_get_fd(mon);
fds[1].events = POLLIN;
for (;;) {
if (poll(fds, 2, -1) < 0) {
ceph_abort_msgf("poll failed: %d", -errno);
}
if (fds[0].revents) {
r = safe_read_exact(sysfs_r_fd, &sysfs_r, sizeof(sysfs_r));
if (r < 0) {
ceph_abort_msgf("safe_read_exact failed: %d", r);
}
if (sysfs_r < 0) {
return std::make_pair(sysfs_r, false);
}
if (udev_r != INT_MAX) {
ceph_assert(!sysfs_r);
return std::make_pair(udev_r, true);
}
fds[0].fd = -1;
}
if (fds[1].revents) {
for (;;) {
udev_device_uptr dev(udev_monitor_receive_device(mon));
if (!dev) {
if (errno != EINTR && errno != EAGAIN) {
udev_r = -errno;
if (sysfs_r != INT_MAX) {
ceph_assert(!sysfs_r);
return std::make_pair(udev_r, true);
}
fds[1].fd = -1;
}
break;
}
if (udev_device_handler(std::move(dev))) {
udev_r = 0;
if (sysfs_r != INT_MAX) {
ceph_assert(!sysfs_r);
return std::make_pair(udev_r, true);
}
fds[1].fd = -1;
break;
}
}
}
}
}
class UdevMapHandler {
public:
UdevMapHandler(const krbd_spec *spec, std::string *pdevnode,
std::string *majnum, std::string *minnum) :
m_spec(spec), m_pdevnode(pdevnode), m_majnum(majnum), m_minnum(minnum) {}
/*
* Catch /sys/devices/rbd/<id>/ and wait for the corresponding
* block device to show up. This is necessary because rbd devices
* and block devices aren't linked together in our sysfs layout.
*
* Note that our "block" event can come before the "rbd" event, so
* all potential "block" events are gathered in m_block_devs before
* m_bus_dev is caught.
*/
bool operator()(udev_device_uptr dev) {
if (strcmp(udev_device_get_action(dev.get()), "add")) {
return false;
}
if (!strcmp(udev_device_get_subsystem(dev.get()), "rbd")) {
if (!m_bus_dev) {
auto spec = spec_from_dev(dev.get());
if (spec && *spec == *m_spec) {
m_bus_dev = std::move(dev);
m_devnode = get_devnode(m_bus_dev.get());
}
}
} else if (!strcmp(udev_device_get_subsystem(dev.get()), "block")) {
if (boost::starts_with(udev_device_get_devnode(dev.get()),
DEVNODE_PREFIX)) {
m_block_devs.push_back(std::move(dev));
}
}
if (m_bus_dev && !m_block_devs.empty()) {
for (const auto& p : m_block_devs) {
if (udev_device_get_devnode(p.get()) == m_devnode) {
*m_pdevnode = std::move(m_devnode);
*m_majnum = udev_device_get_property_value(p.get(), "MAJOR");
*m_minnum = udev_device_get_property_value(p.get(), "MINOR");
ceph_assert(*m_majnum == udev_device_get_sysattr_value(
m_bus_dev.get(), "major"));
ceph_assert(!have_minor_attr() ||
*m_minnum == udev_device_get_sysattr_value(
m_bus_dev.get(), "minor"));
return true;
}
}
m_block_devs.clear();
}
return false;
}
private:
udev_device_uptr m_bus_dev;
std::vector<udev_device_uptr> m_block_devs;
std::string m_devnode;
const krbd_spec *m_spec;
std::string *m_pdevnode;
std::string *m_majnum;
std::string *m_minnum;
};
static const char *get_event_source(const krbd_ctx *ctx)
{
if (ctx->flags & KRBD_CTX_F_NOUDEV) {
/*
* For block devices (unlike network interfaces, they don't
* carry any namespace tags), the kernel broadcasts uevents
* into all network namespaces that are owned by the initial
* user namespace. This restriction is new in 4.18: starting
* with 2.6.35 and through 4.17 the kernel broadcast uevents
* into all network namespaces, period.
*
* However, when invoked from a non-initial user namespace,
* udev_monitor_receive_device() has always ignored both kernel
* and udev uevents by virtue of requiring SCM_CREDENTIALS and
* checking that ucred->uid == 0. When UIDs and GIDs are sent to
* a process in a user namespace, they are translated according
* to that process's UID and GID mappings and, unless root in the
* user namespace is mapped to the global root, that check fails.
* Normally they show up as 65534(nobody) because the global root
* is not mapped.
*/
return "kernel";
}
/*
* Like most netlink messages, udev uevents don't cross network
* namespace boundaries and are therefore confined to the initial
* network namespace.
*/
return "udev";
}
static int do_map(krbd_ctx *ctx, const krbd_spec& spec, const string& buf,
string *pname)
{
std::string majnum, minnum;
struct stat sb;
bool mapped;
int fds[2];
int r;
udev_monitor_uptr mon(udev_monitor_new_from_netlink(ctx->udev,
get_event_source(ctx)));
if (!mon)
return -ENOMEM;
r = udev_monitor_filter_add_match_subsystem_devtype(mon.get(), "rbd",
nullptr);
if (r < 0)
return r;
r = udev_monitor_filter_add_match_subsystem_devtype(mon.get(), "block",
"disk");
if (r < 0)
return r;
r = udev_monitor_set_receive_buffer_size(mon.get(), UDEV_BUF_SIZE);
if (r < 0) {
std::cerr << "rbd: failed to set udev buffer size: " << cpp_strerror(r)
<< std::endl;
/* not fatal */
}
r = udev_monitor_enable_receiving(mon.get());
if (r < 0)
return r;
if (pipe2(fds, O_NONBLOCK) < 0)
return -errno;
auto mapper = make_named_thread("mapper", [&buf, sysfs_r_fd = fds[1]]() {
int sysfs_r = sysfs_write_rbd_add(buf);
int r = safe_write(sysfs_r_fd, &sysfs_r, sizeof(sysfs_r));
if (r < 0) {
ceph_abort_msgf("safe_write failed: %d", r);
}
});
std::tie(r, mapped) = wait_for_mapping(fds[0], mon.get(),
UdevMapHandler(&spec, pname, &majnum,
&minnum));
if (r < 0) {
if (!mapped) {
std::cerr << "rbd: sysfs write failed" << std::endl;
} else {
std::cerr << "rbd: udev wait failed" << std::endl;
/* TODO: fall back to enumeration */
}
}
mapper.join();
close(fds[0]);
close(fds[1]);
if (r < 0)
return r;
/*
* Make sure our device node is there. This is intended to help
* diagnose environments where "rbd map" is run from a container with
* a private /dev and some external mechanism (e.g. udev) is used to
* add the device to the container asynchronously, possibly seconds
* after "rbd map" successfully exits. These setups are very fragile
* and in some cases can even lead to data loss, depending on higher
* level logic and orchestration layers involved.
*/
ceph_assert(mapped);
if (stat(pname->c_str(), &sb) < 0 || !S_ISBLK(sb.st_mode)) {
std::cerr << "rbd: mapping succeeded but " << *pname
<< " is not accessible, is host /dev mounted?" << std::endl;
return -EINVAL;
}
if (stringify(major(sb.st_rdev)) != majnum ||
stringify(minor(sb.st_rdev)) != minnum) {
std::cerr << "rbd: mapping succeeded but " << *pname
<< " (" << major(sb.st_rdev) << ":" << minor(sb.st_rdev)
<< ") does not match expected " << majnum << ":" << minnum
<< std::endl;
return -EINVAL;
}
return 0;
}
static int map_image(struct krbd_ctx *ctx, const krbd_spec& spec,
const char *options, string *pname)
{
string buf;
int r;
/*
* Modprobe rbd kernel module. If it supports single-major device
* number allocation scheme, make sure it's turned on.
*
* Do this before calling build_map_buf() - it wants "ceph" key type
* registered.
*/
if (access("/sys/bus/rbd", F_OK) != 0) {
const char *module_options = NULL;
if (module_has_param("rbd", "single_major"))
module_options = "single_major=Y";
r = module_load("rbd", module_options);
if (r) {
std::cerr << "rbd: failed to load rbd kernel module (" << r << ")"
<< std::endl;
/*
* Ignore the error: modprobe failing doesn't necessarily prevent
* from working.
*/
}
}
r = build_map_buf(ctx->cct, spec, options, &buf);
if (r < 0)
return r;
return do_map(ctx, spec, buf, pname);
}
static int devno_to_krbd_id(struct udev *udev, dev_t devno, string *pid)
{
udev_enumerate_uptr enm;
struct udev_list_entry *l;
int r;
retry:
enm.reset(udev_enumerate_new(udev));
if (!enm)
return -ENOMEM;
r = udev_enumerate_add_match_subsystem(enm.get(), "rbd");
if (r < 0)
return r;
r = udev_enumerate_add_match_sysattr(enm.get(), "major",
stringify(major(devno)).c_str());
if (r < 0)
return r;
if (have_minor_attr()) {
r = udev_enumerate_add_match_sysattr(enm.get(), "minor",
stringify(minor(devno)).c_str());
if (r < 0)
return r;
}
r = udev_enumerate_scan_devices(enm.get());
if (r < 0) {
if (r == -ENOENT || r == -ENODEV) {
std::cerr << "rbd: udev enumerate failed, retrying" << std::endl;
goto retry;
}
return r;
}
l = udev_enumerate_get_list_entry(enm.get());
if (!l)
return -ENOENT;
/* make sure there is only one match */
ceph_assert(!udev_list_entry_get_next(l));
auto dev = dev_from_list_entry(udev, l);
if (!dev)
return -ENOMEM;
*pid = udev_device_get_sysname(dev.get());
return 0;
}
// wrap any of * ? [ between square brackets
static std::string escape_glob(const std::string& s)
{
std::regex glob_meta("([*?[])");
return std::regex_replace(s, glob_meta, "[$1]");
}
static int __enumerate_devices(struct udev *udev, const krbd_spec& spec,
bool match_nspace, udev_enumerate_uptr *penm)
{
udev_enumerate_uptr enm;
int r;
retry:
enm.reset(udev_enumerate_new(udev));
if (!enm)
return -ENOMEM;
r = udev_enumerate_add_match_subsystem(enm.get(), "rbd");
if (r < 0)
return r;
r = udev_enumerate_add_match_sysattr(enm.get(), "pool",
escape_glob(spec.pool_name).c_str());
if (r < 0)
return r;
if (match_nspace) {
r = udev_enumerate_add_match_sysattr(enm.get(), "pool_ns",
escape_glob(spec.nspace_name).c_str());
} else {
/*
* Match _only_ devices that don't have pool_ns attribute.
* If the kernel supports namespaces, the result will be empty.
*/
r = udev_enumerate_add_nomatch_sysattr(enm.get(), "pool_ns", nullptr);
}
if (r < 0)
return r;
r = udev_enumerate_add_match_sysattr(enm.get(), "name",
escape_glob(spec.image_name).c_str());
if (r < 0)
return r;
r = udev_enumerate_add_match_sysattr(enm.get(), "current_snap",
escape_glob(spec.snap_name).c_str());
if (r < 0)
return r;
r = udev_enumerate_scan_devices(enm.get());
if (r < 0) {
if (r == -ENOENT || r == -ENODEV) {
std::cerr << "rbd: udev enumerate failed, retrying" << std::endl;
goto retry;
}
return r;
}
*penm = std::move(enm);
return 0;
}
static int enumerate_devices(struct udev *udev, const krbd_spec& spec,
udev_enumerate_uptr *penm)
{
udev_enumerate_uptr enm;
int r;
r = __enumerate_devices(udev, spec, true, &enm);
if (r < 0)
return r;
/*
* If no namespace is set, try again with match_nspace=false to
* handle older kernels. On a newer kernel the result will remain
* the same (i.e. empty).
*/
if (!udev_enumerate_get_list_entry(enm.get()) && spec.nspace_name.empty()) {
r = __enumerate_devices(udev, spec, false, &enm);
if (r < 0)
return r;
}
*penm = std::move(enm);
return 0;
}
static int spec_to_devno_and_krbd_id(struct udev *udev, const krbd_spec& spec,
dev_t *pdevno, string *pid)
{
udev_enumerate_uptr enm;
struct udev_list_entry *l;
unsigned int maj, min = 0;
string err;
int r;
r = enumerate_devices(udev, spec, &enm);
if (r < 0)
return r;
l = udev_enumerate_get_list_entry(enm.get());
if (!l)
return -ENOENT;
auto dev = dev_from_list_entry(udev, l);
if (!dev)
return -ENOMEM;
maj = strict_strtoll(udev_device_get_sysattr_value(dev.get(), "major"), 10,
&err);
if (!err.empty()) {
std::cerr << "rbd: couldn't parse major: " << err << std::endl;
return -EINVAL;
}
if (have_minor_attr()) {
min = strict_strtoll(udev_device_get_sysattr_value(dev.get(), "minor"), 10,
&err);
if (!err.empty()) {
std::cerr << "rbd: couldn't parse minor: " << err << std::endl;
return -EINVAL;
}
}
/*
* If an image is mapped more than once don't bother trying to unmap
* all devices - let users run unmap the same number of times they
* ran map.
*/
if (udev_list_entry_get_next(l))
std::cerr << "rbd: " << spec << ": mapped more than once, unmapping "
<< get_devnode(dev.get()) << " only" << std::endl;
*pdevno = makedev(maj, min);
*pid = udev_device_get_sysname(dev.get());
return 0;
}
static void append_unmap_options(std::string *buf, const char *options)
{
if (strcmp(options, "") != 0) {
*buf += " ";
*buf += options;
}
}
class UdevUnmapHandler {
public:
UdevUnmapHandler(dev_t devno) : m_devno(devno) {}
bool operator()(udev_device_uptr dev) {
if (strcmp(udev_device_get_action(dev.get()), "remove")) {
return false;
}
return udev_device_get_devnum(dev.get()) == m_devno;
}
private:
dev_t m_devno;
};
static int do_unmap(krbd_ctx *ctx, dev_t devno, const string& buf)
{
bool unmapped;
int fds[2];
int r;
udev_monitor_uptr mon(udev_monitor_new_from_netlink(ctx->udev,
get_event_source(ctx)));
if (!mon)
return -ENOMEM;
r = udev_monitor_filter_add_match_subsystem_devtype(mon.get(), "block",
"disk");
if (r < 0)
return r;
r = udev_monitor_set_receive_buffer_size(mon.get(), UDEV_BUF_SIZE);
if (r < 0) {
std::cerr << "rbd: failed to set udev buffer size: " << cpp_strerror(r)
<< std::endl;
/* not fatal */
}
r = udev_monitor_enable_receiving(mon.get());
if (r < 0)
return r;
if (pipe2(fds, O_NONBLOCK) < 0)
return -errno;
auto unmapper = make_named_thread(
"unmapper", [&buf, sysfs_r_fd = fds[1], flags = ctx->flags]() {
/*
* On final device close(), kernel sends a block change event, in
* response to which udev apparently runs blkid on the device. This
* makes unmap fail with EBUSY, if issued right after final close().
* Try to circumvent this with a retry before turning to udev.
*/
for (int tries = 0; ; tries++) {
int sysfs_r = sysfs_write_rbd_remove(buf);
if (sysfs_r == -EBUSY && tries < 2) {
if (!tries) {
usleep(250 * 1000);
} else if (!(flags & KRBD_CTX_F_NOUDEV)) {
/*
* libudev does not provide the "wait until the queue is empty"
* API or the sufficient amount of primitives to build it from.
*/
std::string err = run_cmd("udevadm", "settle", "--timeout", "10",
(char *)NULL);
if (!err.empty())
std::cerr << "rbd: " << err << std::endl;
}
} else {
int r = safe_write(sysfs_r_fd, &sysfs_r, sizeof(sysfs_r));
if (r < 0) {
ceph_abort_msgf("safe_write failed: %d", r);
}
break;
}
}
});
std::tie(r, unmapped) = wait_for_mapping(fds[0], mon.get(),
UdevUnmapHandler(devno));
if (r < 0) {
if (!unmapped) {
std::cerr << "rbd: sysfs write failed" << std::endl;
} else {
std::cerr << "rbd: udev wait failed: " << cpp_strerror(r) << std::endl;
r = 0;
}
}
unmapper.join();
close(fds[0]);
close(fds[1]);
return r;
}
static int unmap_image(struct krbd_ctx *ctx, const char *devnode,
const char *options)
{
struct stat sb;
dev_t wholedevno = 0;
std::string buf;
int r;
if (stat(devnode, &sb) < 0 || !S_ISBLK(sb.st_mode)) {
std::cerr << "rbd: '" << devnode << "' is not a block device" << std::endl;
return -EINVAL;
}
r = blkid_devno_to_wholedisk(sb.st_rdev, NULL, 0, &wholedevno);
if (r < 0) {
std::cerr << "rbd: couldn't compute wholedevno: " << cpp_strerror(r)
<< std::endl;
/*
* Ignore the error: we are given whole disks most of the time, and
* if it turns out this is a partition we will fail later anyway.
*/
wholedevno = sb.st_rdev;
}
for (int tries = 0; ; tries++) {
r = devno_to_krbd_id(ctx->udev, wholedevno, &buf);
if (r == -ENOENT && tries < 2) {
usleep(250 * 1000);
} else {
if (r < 0) {
if (r == -ENOENT) {
std::cerr << "rbd: '" << devnode << "' is not an rbd device"
<< std::endl;
r = -EINVAL;
}
return r;
}
if (tries) {
std::cerr << "rbd: udev enumerate missed a device, tries = " << tries
<< std::endl;
}
break;
}
}
append_unmap_options(&buf, options);
return do_unmap(ctx, wholedevno, buf);
}
static int unmap_image(struct krbd_ctx *ctx, const krbd_spec& spec,
const char *options)
{
dev_t devno = 0;
std::string buf;
int r;
for (int tries = 0; ; tries++) {
r = spec_to_devno_and_krbd_id(ctx->udev, spec, &devno, &buf);
if (r == -ENOENT && tries < 2) {
usleep(250 * 1000);
} else {
if (r < 0) {
if (r == -ENOENT) {
std::cerr << "rbd: " << spec << ": not a mapped image or snapshot"
<< std::endl;
r = -EINVAL;
}
return r;
}
if (tries) {
std::cerr << "rbd: udev enumerate missed a device, tries = " << tries
<< std::endl;
}
break;
}
}
append_unmap_options(&buf, options);
return do_unmap(ctx, devno, buf);
}
static bool dump_one_image(Formatter *f, TextTable *tbl,
struct udev_device *dev)
{
auto spec = spec_from_dev(dev);
std::string devnode = get_devnode(dev);
const char *id = devnode.c_str() + sizeof(DEVNODE_PREFIX) - 1;
if (!spec)
return false;
if (f) {
f->open_object_section("device");
f->dump_string("id", id);
f->dump_string("pool", spec->pool_name);
f->dump_string("namespace", spec->nspace_name);
f->dump_string("name", spec->image_name);
f->dump_string("snap", spec->snap_name);
f->dump_string("device", devnode);
f->close_section();
} else {
*tbl << id << spec->pool_name << spec->nspace_name << spec->image_name
<< spec->snap_name << devnode << TextTable::endrow;
}
return true;
}
static int do_dump(struct udev *udev, Formatter *f, TextTable *tbl)
{
udev_enumerate_uptr enm;
struct udev_list_entry *l = NULL;
bool have_output = false;
int r;
retry:
enm.reset(udev_enumerate_new(udev));
if (!enm)
return -ENOMEM;
r = udev_enumerate_add_match_subsystem(enm.get(), "rbd");
if (r < 0)
return r;
r = udev_enumerate_scan_devices(enm.get());
if (r < 0) {
if (r == -ENOENT || r == -ENODEV) {
std::cerr << "rbd: udev enumerate failed, retrying" << std::endl;
goto retry;
}
return r;
}
udev_list_entry_foreach(l, udev_enumerate_get_list_entry(enm.get())) {
auto dev = dev_from_list_entry(udev, l);
if (dev) {
have_output |= dump_one_image(f, tbl, dev.get());
}
}
return have_output;
}
static int dump_images(struct krbd_ctx *ctx, Formatter *f)
{
TextTable tbl;
int r;
if (f) {
f->open_array_section("devices");
} else {
tbl.define_column("id", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("pool", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("namespace", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("image", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("snap", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("device", TextTable::LEFT, TextTable::LEFT);
}
r = do_dump(ctx->udev, f, &tbl);
if (f) {
f->close_section();
f->flush(std::cout);
} else {
if (r > 0)
std::cout << tbl;
}
return r;
}
static int is_mapped_image(struct udev *udev, const krbd_spec& spec,
string *pname)
{
udev_enumerate_uptr enm;
struct udev_list_entry *l;
int r;
r = enumerate_devices(udev, spec, &enm);
if (r < 0)
return r;
l = udev_enumerate_get_list_entry(enm.get());
if (l) {
auto dev = dev_from_list_entry(udev, l);
if (!dev)
return -ENOMEM;
*pname = get_devnode(dev.get());
return 1;
}
return 0; /* not mapped */
}
extern "C" int krbd_create_from_context(rados_config_t cct, uint32_t flags,
struct krbd_ctx **pctx)
{
struct krbd_ctx *ctx = new struct krbd_ctx();
ctx->cct = reinterpret_cast<CephContext *>(cct);
ctx->udev = udev_new();
if (!ctx->udev) {
delete ctx;
return -ENOMEM;
}
ctx->flags = flags;
*pctx = ctx;
return 0;
}
extern "C" void krbd_destroy(struct krbd_ctx *ctx)
{
if (!ctx)
return;
udev_unref(ctx->udev);
delete ctx;
}
extern "C" int krbd_map(struct krbd_ctx *ctx,
const char *pool_name,
const char *nspace_name,
const char *image_name,
const char *snap_name,
const char *options,
char **pdevnode)
{
krbd_spec spec(pool_name, nspace_name, image_name, snap_name);
string name;
char *devnode;
int r;
r = map_image(ctx, spec, options, &name);
if (r < 0)
return r;
devnode = strdup(name.c_str());
if (!devnode)
return -ENOMEM;
*pdevnode = devnode;
return r;
}
extern "C" int krbd_unmap(struct krbd_ctx *ctx, const char *devnode,
const char *options)
{
return unmap_image(ctx, devnode, options);
}
extern "C" int krbd_unmap_by_spec(struct krbd_ctx *ctx,
const char *pool_name,
const char *nspace_name,
const char *image_name,
const char *snap_name,
const char *options)
{
krbd_spec spec(pool_name, nspace_name, image_name, snap_name);
return unmap_image(ctx, spec, options);
}
int krbd_showmapped(struct krbd_ctx *ctx, Formatter *f)
{
return dump_images(ctx, f);
}
extern "C" int krbd_is_mapped(struct krbd_ctx *ctx,
const char *pool_name,
const char *nspace_name,
const char *image_name,
const char *snap_name,
char **pdevnode)
{
krbd_spec spec(pool_name, nspace_name, image_name, snap_name);
string name;
char *devnode;
int r;
r = is_mapped_image(ctx->udev, spec, &name);
if (r <= 0) /* error or not mapped */
return r;
devnode = strdup(name.c_str());
if (!devnode)
return -ENOMEM;
*pdevnode = devnode;
return r;
}
| 32,172 | 26.80726 | 91 | cc |
null | ceph-main/src/libcephfs.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2009-2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <fcntl.h>
#include <iostream>
#include <string.h>
#include <string>
#include "auth/Crypto.h"
#include "client/Client.h"
#include "client/Inode.h"
#include "librados/RadosClient.h"
#include "common/async/context_pool.h"
#include "common/ceph_argparse.h"
#include "common/common_init.h"
#include "common/config.h"
#include "common/version.h"
#include "mon/MonClient.h"
#include "include/str_list.h"
#include "include/stringify.h"
#include "include/object.h"
#include "messages/MMonMap.h"
#include "msg/Messenger.h"
#include "include/ceph_assert.h"
#include "mds/MDSMap.h"
#include "include/cephfs/libcephfs.h"
#define DEFAULT_UMASK 002
using namespace std;
static mode_t umask_cb(void *);
namespace {
// Set things up this way so we don't start up threads until mount and
// kill them off when the last mount goes away, but are tolerant to
// multiple mounts of overlapping duration.
std::shared_ptr<ceph::async::io_context_pool> get_icp(CephContext* cct)
{
static std::mutex m;
static std::weak_ptr<ceph::async::io_context_pool> icwp;
std::unique_lock l(m);
auto icp = icwp.lock();
if (icp)
return icp;
icp = std::make_shared<ceph::async::io_context_pool>();
icwp = icp;
icp->start(cct->_conf.get_val<std::uint64_t>("client_asio_thread_count"));
return icp;
}
}
struct ceph_mount_info
{
mode_t umask = DEFAULT_UMASK;
std::shared_ptr<ceph::async::io_context_pool> icp;
public:
explicit ceph_mount_info(CephContext *cct_)
: default_perms(),
mounted(false),
inited(false),
client(nullptr),
monclient(nullptr),
messenger(nullptr),
cct(cct_)
{
if (cct_) {
cct->get();
}
}
~ceph_mount_info()
{
try {
shutdown();
if (cct) {
cct->put();
cct = nullptr;
}
}
catch (const std::exception& e) {
// we shouldn't get here, but if we do, we want to know about it.
lderr(cct) << "ceph_mount_info::~ceph_mount_info: caught exception: "
<< e.what() << dendl;
}
catch (...) {
// ignore
}
}
int init()
{
int ret;
if (!cct->_log->is_started()) {
cct->_log->start();
}
icp = get_icp(cct);
{
MonClient mc_bootstrap(cct, icp->get_io_context());
ret = mc_bootstrap.get_monmap_and_config();
if (ret < 0)
return ret;
}
common_init_finish(cct);
//monmap
monclient = new MonClient(cct, icp->get_io_context());
ret = -CEPHFS_ERROR_MON_MAP_BUILD; //defined in libcephfs.h;
if (monclient->build_initial_monmap() < 0)
goto fail;
//network connection
messenger = Messenger::create_client_messenger(cct, "client");
//at last the client
ret = -CEPHFS_ERROR_NEW_CLIENT; //defined in libcephfs.h;
client = new StandaloneClient(messenger, monclient, icp->get_io_context());
if (!client)
goto fail;
ret = -CEPHFS_ERROR_MESSENGER_START; //defined in libcephfs.h;
if (messenger->start() != 0)
goto fail;
ret = client->init();
if (ret)
goto fail;
{
ceph_client_callback_args args = {};
args.handle = this;
args.umask_cb = umask_cb;
client->ll_register_callbacks(&args);
}
default_perms = Client::pick_my_perms(cct);
inited = true;
return 0;
fail:
shutdown();
return ret;
}
int select_filesystem(const std::string &fs_name_)
{
if (mounted) {
return -CEPHFS_EISCONN;
}
fs_name = fs_name_;
return 0;
}
const std::string& get_filesystem(void)
{
return fs_name;
}
int mount(const std::string &mount_root, const UserPerm& perms)
{
int ret;
if (mounted)
return -CEPHFS_EISCONN;
if (!inited) {
ret = init();
if (ret != 0) {
return ret;
}
}
ret = client->mount(mount_root, perms, false, fs_name);
if (ret) {
shutdown();
return ret;
} else {
mounted = true;
return 0;
}
}
int unmount()
{
if (!mounted)
return -CEPHFS_ENOTCONN;
shutdown();
return 0;
}
int abort_conn()
{
if (mounted) {
client->abort_conn();
mounted = false;
}
return 0;
}
void shutdown()
{
if (mounted) {
client->unmount();
mounted = false;
}
if (inited) {
client->shutdown();
inited = false;
}
if (messenger) {
messenger->shutdown();
messenger->wait();
delete messenger;
messenger = nullptr;
}
icp.reset();
if (monclient) {
delete monclient;
monclient = nullptr;
}
if (client) {
delete client;
client = nullptr;
}
}
bool is_initialized() const
{
return inited;
}
bool is_mounted()
{
return mounted;
}
mode_t set_umask(mode_t umask)
{
this->umask = umask;
return umask;
}
std::string getaddrs()
{
CachedStackStringStream cos;
*cos << messenger->get_myaddrs();
return std::string(cos->strv());
}
int conf_read_file(const char *path_list)
{
int ret = cct->_conf.parse_config_files(path_list, nullptr, 0);
if (ret)
return ret;
cct->_conf.apply_changes(nullptr);
cct->_conf.complain_about_parse_error(cct);
return 0;
}
int conf_parse_argv(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
int ret = cct->_conf.parse_argv(args);
if (ret)
return ret;
cct->_conf.apply_changes(nullptr);
return 0;
}
int conf_parse_env(const char *name)
{
auto& conf = cct->_conf;
conf.parse_env(cct->get_module_type(), name);
conf.apply_changes(nullptr);
return 0;
}
int conf_set(const char *option, const char *value)
{
int ret = cct->_conf.set_val(option, value);
if (ret)
return ret;
cct->_conf.apply_changes(nullptr);
return 0;
}
int conf_get(const char *option, char *buf, size_t len)
{
char *tmp = buf;
return cct->_conf.get_val(option, &tmp, len);
}
Client *get_client()
{
return client;
}
const char *get_cwd(const UserPerm& perms)
{
client->getcwd(cwd, perms);
return cwd.c_str();
}
int chdir(const char *to, const UserPerm& perms)
{
return client->chdir(to, cwd, perms);
}
CephContext *get_ceph_context() const {
return cct;
}
UserPerm default_perms;
private:
bool mounted;
bool inited;
StandaloneClient *client;
MonClient *monclient;
Messenger *messenger;
CephContext *cct;
std::string cwd;
std::string fs_name;
};
static mode_t umask_cb(void *handle)
{
return ((struct ceph_mount_info *)handle)->umask;
}
static void do_out_buffer(bufferlist& outbl, char **outbuf, size_t *outbuflen)
{
if (outbuf) {
if (outbl.length() > 0) {
*outbuf = (char *)malloc(outbl.length());
memcpy(*outbuf, outbl.c_str(), outbl.length());
} else {
*outbuf = nullptr;
}
}
if (outbuflen)
*outbuflen = outbl.length();
}
static void do_out_buffer(string& outbl, char **outbuf, size_t *outbuflen)
{
if (outbuf) {
if (outbl.length() > 0) {
*outbuf = (char *)malloc(outbl.length());
memcpy(*outbuf, outbl.c_str(), outbl.length());
} else {
*outbuf = nullptr;
}
}
if (outbuflen)
*outbuflen = outbl.length();
}
extern "C" UserPerm *ceph_userperm_new(uid_t uid, gid_t gid, int ngids,
gid_t *gidlist)
{
return new (std::nothrow) UserPerm(uid, gid, ngids, gidlist);
}
extern "C" void ceph_userperm_destroy(UserPerm *perm)
{
delete perm;
}
extern "C" const char *ceph_version(int *pmajor, int *pminor, int *ppatch)
{
int major, minor, patch;
const char *v = ceph_version_to_str();
int n = sscanf(v, "%d.%d.%d", &major, &minor, &patch);
if (pmajor)
*pmajor = (n >= 1) ? major : 0;
if (pminor)
*pminor = (n >= 2) ? minor : 0;
if (ppatch)
*ppatch = (n >= 3) ? patch : 0;
return PROJECT_VERSION;
}
extern "C" int ceph_create_with_context(struct ceph_mount_info **cmount, CephContext *cct)
{
*cmount = new struct ceph_mount_info(cct);
return 0;
}
extern "C" int ceph_create_from_rados(struct ceph_mount_info **cmount,
rados_t cluster)
{
auto rados = (librados::RadosClient *) cluster;
auto cct = rados->cct;
return ceph_create_with_context(cmount, cct);
}
extern "C" int ceph_create(struct ceph_mount_info **cmount, const char * const id)
{
CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
if (id) {
iparams.name.set(CEPH_ENTITY_TYPE_CLIENT, id);
}
CephContext *cct = common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0);
cct->_conf.parse_env(cct->get_module_type()); // environment variables coverride
cct->_conf.apply_changes(nullptr);
int ret = ceph_create_with_context(cmount, cct);
cct->put();
cct = nullptr;
return ret;
}
extern "C" int ceph_unmount(struct ceph_mount_info *cmount)
{
return cmount->unmount();
}
extern "C" int ceph_abort_conn(struct ceph_mount_info *cmount)
{
return cmount->abort_conn();
}
extern "C" int ceph_release(struct ceph_mount_info *cmount)
{
if (cmount->is_mounted())
return -CEPHFS_EISCONN;
delete cmount;
cmount = nullptr;
return 0;
}
extern "C" void ceph_shutdown(struct ceph_mount_info *cmount)
{
cmount->shutdown();
delete cmount;
cmount = nullptr;
}
extern "C" uint64_t ceph_get_instance_id(struct ceph_mount_info *cmount)
{
if (cmount->is_initialized())
return cmount->get_client()->get_nodeid().v;
return 0;
}
extern "C" int ceph_getaddrs(struct ceph_mount_info *cmount, char** addrs)
{
if (!cmount->is_initialized())
return -CEPHFS_ENOTCONN;
auto s = cmount->getaddrs();
*addrs = strdup(s.c_str());
return 0;
}
extern "C" int ceph_conf_read_file(struct ceph_mount_info *cmount, const char *path)
{
return cmount->conf_read_file(path);
}
extern "C" mode_t ceph_umask(struct ceph_mount_info *cmount, mode_t mode)
{
return cmount->set_umask(mode);
}
extern "C" int ceph_conf_parse_argv(struct ceph_mount_info *cmount, int argc,
const char **argv)
{
return cmount->conf_parse_argv(argc, argv);
}
extern "C" int ceph_conf_parse_env(struct ceph_mount_info *cmount, const char *name)
{
return cmount->conf_parse_env(name);
}
extern "C" int ceph_conf_set(struct ceph_mount_info *cmount, const char *option,
const char *value)
{
return cmount->conf_set(option, value);
}
extern "C" int ceph_conf_get(struct ceph_mount_info *cmount, const char *option,
char *buf, size_t len)
{
if (!buf) {
return -CEPHFS_EINVAL;
}
return cmount->conf_get(option, buf, len);
}
extern "C" int ceph_set_mount_timeout(struct ceph_mount_info *cmount, uint32_t timeout) {
if (cmount->is_mounted()) {
return -CEPHFS_EINVAL;
}
auto timeout_str = stringify(timeout);
return ceph_conf_set(cmount, "client_mount_timeout", timeout_str.c_str());
}
extern "C" int ceph_mds_command(struct ceph_mount_info *cmount,
const char *mds_spec,
const char **cmd,
size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outsbuf, size_t *outsbuflen)
{
bufferlist inbl;
bufferlist outbl;
std::vector<string> cmdv;
std::string outs;
if (!cmount->is_initialized()) {
return -CEPHFS_ENOTCONN;
}
// Construct inputs
for (size_t i = 0; i < cmdlen; ++i) {
cmdv.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
// Issue remote command
C_SaferCond cond;
int r = cmount->get_client()->mds_command(
mds_spec,
cmdv, inbl,
&outbl, &outs,
&cond);
if (r != 0) {
goto out;
}
// Wait for completion
r = cond.wait();
// Construct outputs
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outs, outsbuf, outsbuflen);
out:
return r;
}
extern "C" int ceph_init(struct ceph_mount_info *cmount)
{
return cmount->init();
}
extern "C" int ceph_select_filesystem(struct ceph_mount_info *cmount,
const char *fs_name)
{
if (fs_name == nullptr) {
return -CEPHFS_EINVAL;
}
return cmount->select_filesystem(fs_name);
}
extern "C" int ceph_mount(struct ceph_mount_info *cmount, const char *root)
{
std::string mount_root;
if (root)
mount_root = root;
return cmount->mount(mount_root, cmount->default_perms);
}
extern "C" int ceph_is_mounted(struct ceph_mount_info *cmount)
{
return cmount->is_mounted() ? 1 : 0;
}
extern "C" struct UserPerm *ceph_mount_perms(struct ceph_mount_info *cmount)
{
return &cmount->default_perms;
}
extern "C" int64_t ceph_get_fs_cid(struct ceph_mount_info *cmount)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->get_fs_cid();
}
extern "C" int ceph_mount_perms_set(struct ceph_mount_info *cmount,
struct UserPerm *perms)
{
if (cmount->is_mounted())
return -CEPHFS_EISCONN;
cmount->default_perms = *perms;
return 0;
}
extern "C" int ceph_statfs(struct ceph_mount_info *cmount, const char *path,
struct statvfs *stbuf)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->statfs(path, stbuf, cmount->default_perms);
}
extern "C" int ceph_get_local_osd(struct ceph_mount_info *cmount)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->get_local_osd();
}
extern "C" const char* ceph_getcwd(struct ceph_mount_info *cmount)
{
return cmount->get_cwd(cmount->default_perms);
}
extern "C" int ceph_chdir (struct ceph_mount_info *cmount, const char *s)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->chdir(s, cmount->default_perms);
}
extern "C" int ceph_opendir(struct ceph_mount_info *cmount,
const char *name, struct ceph_dir_result **dirpp)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->opendir(name, (dir_result_t **)dirpp, cmount->default_perms);
}
extern "C" int ceph_fdopendir(struct ceph_mount_info *cmount, int dirfd,
struct ceph_dir_result **dirpp)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fdopendir(dirfd, (dir_result_t **)dirpp, cmount->default_perms);
}
extern "C" int ceph_closedir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->closedir(reinterpret_cast<dir_result_t*>(dirp));
}
extern "C" struct dirent * ceph_readdir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp)
{
if (!cmount->is_mounted()) {
/* Client::readdir also sets errno to signal errors. */
errno = CEPHFS_ENOTCONN;
return nullptr;
}
return cmount->get_client()->readdir(reinterpret_cast<dir_result_t*>(dirp));
}
extern "C" int ceph_readdir_r(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, struct dirent *de)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->readdir_r(reinterpret_cast<dir_result_t*>(dirp), de);
}
extern "C" int ceph_readdirplus_r(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp,
struct dirent *de, struct ceph_statx *stx, unsigned want,
unsigned flags, struct Inode **out)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return cmount->get_client()->readdirplus_r(reinterpret_cast<dir_result_t*>(dirp), de, stx, want, flags, out);
}
extern "C" int ceph_open_snapdiff(struct ceph_mount_info* cmount,
const char* root_path,
const char* rel_path,
const char* snap1,
const char* snap2,
struct ceph_snapdiff_info* out)
{
if (!cmount->is_mounted()) {
/* we set errno to signal errors. */
errno = ENOTCONN;
return -errno;
}
if (!out || !root_path || !rel_path ||
!snap1 || !*snap1 || !snap2 || !*snap2) {
errno = EINVAL;
return -errno;
}
out->cmount = cmount;
out->dir1 = out->dir_aux = nullptr;
char full_path1[PATH_MAX];
char snapdir[PATH_MAX];
cmount->conf_get("client_snapdir", snapdir, sizeof(snapdir) - 1);
int n = snprintf(full_path1, PATH_MAX,
"%s/%s/%s/%s", root_path, snapdir, snap1, rel_path);
if (n < 0 || n == PATH_MAX) {
errno = ENAMETOOLONG;
return -errno;
}
char full_path2[PATH_MAX];
n = snprintf(full_path2, PATH_MAX,
"%s/%s/%s/%s", root_path, snapdir, snap2, rel_path);
if (n < 0 || n == PATH_MAX) {
errno = ENAMETOOLONG;
return -errno;
}
int r = ceph_opendir(cmount, full_path1, &(out->dir1));
if (r != 0) {
//it's OK to have one of the snap paths absent - attempting another one
r = ceph_opendir(cmount, full_path2, &(out->dir1));
if (r != 0) {
// both snaps are absent, giving up
errno = ENOENT;
return -errno;
}
std::swap(snap1, snap2); // will use snap1 to learn snap_other below
} else {
// trying to open second snapshot to learn snapid and
// get the entry loaded into the client cache if any.
r = ceph_opendir(cmount, full_path2, &(out->dir_aux));
//paranoic, rely on this value below
out->dir_aux = r == 0 ? out->dir_aux : nullptr;
}
if (!out->dir_aux) {
// now trying to learn the second snapshot's id by using snapshot's root
n = snprintf(full_path2, PATH_MAX,
"%s/%s/%s", root_path, snapdir, snap2);
ceph_assert(n > 0 && n < PATH_MAX); //we've already checked above
//that longer string fits.
// Hence unlikely to assert
r = ceph_opendir(cmount, full_path2, &(out->dir_aux));
if (r != 0) {
goto close_err;
}
}
return 0;
close_err:
ceph_close_snapdiff(out);
return r;
}
extern "C" int ceph_readdir_snapdiff(struct ceph_snapdiff_info* snapdiff,
struct ceph_snapdiff_entry_t* out)
{
if (!snapdiff->cmount->is_mounted()) {
/* also sets errno to signal errors. */
errno = ENOTCONN;
return -errno;
}
dir_result_t* d1 = reinterpret_cast<dir_result_t*>(snapdiff->dir1);
dir_result_t* d2 = reinterpret_cast<dir_result_t*>(snapdiff->dir_aux);
if (!d1 || !d2 || !d1->inode || !d2->inode) {
errno = EINVAL;
return -errno;
}
snapid_t snapid;
int r = snapdiff->cmount->get_client()->readdir_snapdiff(
d1,
d2->inode->snapid,
&(out->dir_entry),
&snapid);
if (r >= 0) {
// converting snapid_t to uint64_t to avoid snapid_t exposure
out->snapid = snapid;
}
return r;
}
extern "C" int ceph_close_snapdiff(struct ceph_snapdiff_info* snapdiff)
{
if (!snapdiff->cmount || !snapdiff->cmount->is_mounted()) {
/* also sets errno to signal errors. */
errno = ENOTCONN;
return -errno;
}
if (snapdiff->dir_aux) {
ceph_closedir(snapdiff->cmount, snapdiff->dir_aux);
}
if (snapdiff->dir1) {
ceph_closedir(snapdiff->cmount, snapdiff->dir1);
}
snapdiff->cmount = nullptr;
snapdiff->dir1 = snapdiff->dir_aux = nullptr;
return 0;
}
extern "C" int ceph_getdents(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp,
char *buf, int buflen)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->getdents(reinterpret_cast<dir_result_t*>(dirp), buf, buflen);
}
extern "C" int ceph_getdnames(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp,
char *buf, int buflen)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->getdnames(reinterpret_cast<dir_result_t*>(dirp), buf, buflen);
}
extern "C" void ceph_rewinddir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp)
{
if (!cmount->is_mounted())
return;
cmount->get_client()->rewinddir(reinterpret_cast<dir_result_t*>(dirp));
}
extern "C" int64_t ceph_telldir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->telldir(reinterpret_cast<dir_result_t*>(dirp));
}
extern "C" void ceph_seekdir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, int64_t offset)
{
if (!cmount->is_mounted())
return;
cmount->get_client()->seekdir(reinterpret_cast<dir_result_t*>(dirp), offset);
}
extern "C" int ceph_may_delete(struct ceph_mount_info *cmount, const char *path)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->may_delete(path, cmount->default_perms);
}
extern "C" int ceph_link (struct ceph_mount_info *cmount, const char *existing,
const char *newname)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->link(existing, newname, cmount->default_perms);
}
extern "C" int ceph_unlink(struct ceph_mount_info *cmount, const char *path)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->unlink(path, cmount->default_perms);
}
extern "C" int ceph_unlinkat(struct ceph_mount_info *cmount, int dirfd, const char *relpath, int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->unlinkat(dirfd, relpath, flags, cmount->default_perms);
}
extern "C" int ceph_rename(struct ceph_mount_info *cmount, const char *from,
const char *to)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->rename(from, to, cmount->default_perms);
}
// dirs
extern "C" int ceph_mkdir(struct ceph_mount_info *cmount, const char *path, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->mkdir(path, mode, cmount->default_perms);
}
extern "C" int ceph_mkdirat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->mkdirat(dirfd, relpath, mode, cmount->default_perms);
}
extern "C" int ceph_mksnap(struct ceph_mount_info *cmount, const char *path, const char *name,
mode_t mode, struct snap_metadata *snap_metadata, size_t nr_snap_metadata)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
size_t i = 0;
std::map<std::string, std::string> metadata;
while (i < nr_snap_metadata) {
metadata.emplace(snap_metadata[i].key, snap_metadata[i].value);
++i;
}
return cmount->get_client()->mksnap(path, name, cmount->default_perms, mode, metadata);
}
extern "C" int ceph_rmsnap(struct ceph_mount_info *cmount, const char *path, const char *name)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->rmsnap(path, name, cmount->default_perms, true);
}
extern "C" int ceph_mkdirs(struct ceph_mount_info *cmount, const char *path, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->mkdirs(path, mode, cmount->default_perms);
}
extern "C" int ceph_rmdir(struct ceph_mount_info *cmount, const char *path)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->rmdir(path, cmount->default_perms);
}
// symlinks
extern "C" int ceph_readlink(struct ceph_mount_info *cmount, const char *path,
char *buf, int64_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->readlink(path, buf, size, cmount->default_perms);
}
extern "C" int ceph_readlinkat(struct ceph_mount_info *cmount, int dirfd,
const char *relpath, char *buf, int64_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->readlinkat(dirfd, relpath, buf, size, cmount->default_perms);
}
extern "C" int ceph_symlink(struct ceph_mount_info *cmount, const char *existing,
const char *newname)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->symlink(existing, newname, cmount->default_perms);
}
extern "C" int ceph_symlinkat(struct ceph_mount_info *cmount, const char *existing, int dirfd,
const char *newname)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->symlinkat(existing, dirfd, newname, cmount->default_perms);
}
extern "C" int ceph_fstatx(struct ceph_mount_info *cmount, int fd, struct ceph_statx *stx,
unsigned int want, unsigned int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return cmount->get_client()->fstatx(fd, stx, cmount->default_perms,
want, flags);
}
extern "C" int ceph_statxat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
struct ceph_statx *stx, unsigned int want, unsigned int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return cmount->get_client()->statxat(dirfd, relpath, stx, cmount->default_perms,
want, flags);
}
extern "C" int ceph_statx(struct ceph_mount_info *cmount, const char *path,
struct ceph_statx *stx, unsigned int want, unsigned int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return cmount->get_client()->statx(path, stx, cmount->default_perms,
want, flags);
}
extern "C" int ceph_fsetattrx(struct ceph_mount_info *cmount, int fd,
struct ceph_statx *stx, int mask)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fsetattrx(fd, stx, mask, cmount->default_perms);
}
extern "C" int ceph_setattrx(struct ceph_mount_info *cmount, const char *relpath,
struct ceph_statx *stx, int mask, int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return cmount->get_client()->setattrx(relpath, stx, mask,
cmount->default_perms, flags);
}
// *xattr() calls supporting samba/vfs
extern "C" int ceph_getxattr(struct ceph_mount_info *cmount, const char *path, const char *name, void *value, size_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->getxattr(path, name, value, size, cmount->default_perms);
}
extern "C" int ceph_lgetxattr(struct ceph_mount_info *cmount, const char *path, const char *name, void *value, size_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lgetxattr(path, name, value, size, cmount->default_perms);
}
extern "C" int ceph_fgetxattr(struct ceph_mount_info *cmount, int fd, const char *name, void *value, size_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fgetxattr(fd, name, value, size, cmount->default_perms);
}
extern "C" int ceph_listxattr(struct ceph_mount_info *cmount, const char *path, char *list, size_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->listxattr(path, list, size, cmount->default_perms);
}
extern "C" int ceph_llistxattr(struct ceph_mount_info *cmount, const char *path, char *list, size_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->llistxattr(path, list, size, cmount->default_perms);
}
extern "C" int ceph_flistxattr(struct ceph_mount_info *cmount, int fd, char *list, size_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->flistxattr(fd, list, size, cmount->default_perms);
}
extern "C" int ceph_removexattr(struct ceph_mount_info *cmount, const char *path, const char *name)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->removexattr(path, name, cmount->default_perms);
}
extern "C" int ceph_lremovexattr(struct ceph_mount_info *cmount, const char *path, const char *name)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lremovexattr(path, name, cmount->default_perms);
}
extern "C" int ceph_fremovexattr(struct ceph_mount_info *cmount, int fd, const char *name)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fremovexattr(fd, name, cmount->default_perms);
}
extern "C" int ceph_setxattr(struct ceph_mount_info *cmount, const char *path, const char *name, const void *value, size_t size, int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->setxattr(path, name, value, size, flags, cmount->default_perms);
}
extern "C" int ceph_lsetxattr(struct ceph_mount_info *cmount, const char *path, const char *name, const void *value, size_t size, int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lsetxattr(path, name, value, size, flags, cmount->default_perms);
}
extern "C" int ceph_fsetxattr(struct ceph_mount_info *cmount, int fd, const char *name, const void *value, size_t size, int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fsetxattr(fd, name, value, size, flags, cmount->default_perms);
}
/* end xattr support */
extern "C" int ceph_stat(struct ceph_mount_info *cmount, const char *path, struct stat *stbuf)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->stat(path, stbuf, cmount->default_perms);
}
extern "C" int ceph_fstat(struct ceph_mount_info *cmount, int fd, struct stat *stbuf)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fstat(fd, stbuf, cmount->default_perms);
}
extern int ceph_lstat(struct ceph_mount_info *cmount, const char *path, struct stat *stbuf)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lstat(path, stbuf, cmount->default_perms);
}
extern "C" int ceph_chmod(struct ceph_mount_info *cmount, const char *path, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->chmod(path, mode, cmount->default_perms);
}
extern "C" int ceph_lchmod(struct ceph_mount_info *cmount, const char *path, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lchmod(path, mode, cmount->default_perms);
}
extern "C" int ceph_fchmod(struct ceph_mount_info *cmount, int fd, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fchmod(fd, mode, cmount->default_perms);
}
extern "C" int ceph_chmodat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
mode_t mode, int flags) {
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->chmodat(dirfd, relpath, mode, flags, cmount->default_perms);
}
extern "C" int ceph_chown(struct ceph_mount_info *cmount, const char *path,
int uid, int gid)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->chown(path, uid, gid, cmount->default_perms);
}
extern "C" int ceph_fchown(struct ceph_mount_info *cmount, int fd,
int uid, int gid)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fchown(fd, uid, gid, cmount->default_perms);
}
extern "C" int ceph_lchown(struct ceph_mount_info *cmount, const char *path,
int uid, int gid)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lchown(path, uid, gid, cmount->default_perms);
}
extern "C" int ceph_chownat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
uid_t uid, gid_t gid, int flags) {
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->chownat(dirfd, relpath, uid, gid, flags, cmount->default_perms);
}
extern "C" int ceph_utime(struct ceph_mount_info *cmount, const char *path,
struct utimbuf *buf)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->utime(path, buf, cmount->default_perms);
}
extern "C" int ceph_futime(struct ceph_mount_info *cmount, int fd,
struct utimbuf *buf)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->futime(fd, buf, cmount->default_perms);
}
extern "C" int ceph_utimes(struct ceph_mount_info *cmount, const char *path,
struct timeval times[2])
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->utimes(path, times, cmount->default_perms);
}
extern "C" int ceph_lutimes(struct ceph_mount_info *cmount, const char *path,
struct timeval times[2])
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lutimes(path, times, cmount->default_perms);
}
extern "C" int ceph_futimes(struct ceph_mount_info *cmount, int fd,
struct timeval times[2])
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->futimes(fd, times, cmount->default_perms);
}
extern "C" int ceph_futimens(struct ceph_mount_info *cmount, int fd,
struct timespec times[2])
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->futimens(fd, times, cmount->default_perms);
}
extern "C" int ceph_utimensat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
struct timespec times[2], int flags) {
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->utimensat(dirfd, relpath, times, flags, cmount->default_perms);
}
extern "C" int ceph_flock(struct ceph_mount_info *cmount, int fd, int operation,
uint64_t owner)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->flock(fd, operation, owner);
}
extern "C" int ceph_truncate(struct ceph_mount_info *cmount, const char *path,
int64_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->truncate(path, size, cmount->default_perms);
}
// file ops
extern "C" int ceph_mknod(struct ceph_mount_info *cmount, const char *path,
mode_t mode, dev_t rdev)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->mknod(path, mode, cmount->default_perms, rdev);
}
extern "C" int ceph_open(struct ceph_mount_info *cmount, const char *path,
int flags, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->open(path, flags, cmount->default_perms, mode);
}
extern "C" int ceph_openat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
int flags, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->openat(dirfd, relpath, flags, cmount->default_perms, mode);
}
extern "C" int ceph_open_layout(struct ceph_mount_info *cmount, const char *path, int flags,
mode_t mode, int stripe_unit, int stripe_count, int object_size, const char *data_pool)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->open(path, flags, cmount->default_perms, mode,
stripe_unit, stripe_count,
object_size, data_pool);
}
extern "C" int ceph_close(struct ceph_mount_info *cmount, int fd)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->close(fd);
}
extern "C" int64_t ceph_lseek(struct ceph_mount_info *cmount, int fd,
int64_t offset, int whence)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lseek(fd, offset, whence);
}
extern "C" int ceph_read(struct ceph_mount_info *cmount, int fd, char *buf,
int64_t size, int64_t offset)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->read(fd, buf, size, offset);
}
extern "C" int ceph_preadv(struct ceph_mount_info *cmount, int fd,
const struct iovec *iov, int iovcnt, int64_t offset)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->preadv(fd, iov, iovcnt, offset);
}
extern "C" int ceph_write(struct ceph_mount_info *cmount, int fd, const char *buf,
int64_t size, int64_t offset)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->write(fd, buf, size, offset);
}
extern "C" int ceph_pwritev(struct ceph_mount_info *cmount, int fd,
const struct iovec *iov, int iovcnt, int64_t offset)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->pwritev(fd, iov, iovcnt, offset);
}
extern "C" int ceph_ftruncate(struct ceph_mount_info *cmount, int fd, int64_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->ftruncate(fd, size, cmount->default_perms);
}
extern "C" int ceph_fsync(struct ceph_mount_info *cmount, int fd, int syncdataonly)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fsync(fd, syncdataonly);
}
extern "C" int ceph_fallocate(struct ceph_mount_info *cmount, int fd, int mode,
int64_t offset, int64_t length)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fallocate(fd, mode, offset, length);
}
extern "C" int ceph_lazyio(class ceph_mount_info *cmount,
int fd, int enable)
{
return (cmount->get_client()->lazyio(fd, enable));
}
extern "C" int ceph_lazyio_propagate(class ceph_mount_info *cmount,
int fd, int64_t offset, size_t count)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return (cmount->get_client()->lazyio_propagate(fd, offset, count));
}
extern "C" int ceph_lazyio_synchronize(class ceph_mount_info *cmount,
int fd, int64_t offset, size_t count)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return (cmount->get_client()->lazyio_synchronize(fd, offset, count));
}
extern "C" int ceph_sync_fs(struct ceph_mount_info *cmount)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->sync_fs();
}
extern "C" int ceph_get_file_stripe_unit(struct ceph_mount_info *cmount, int fh)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
return l.stripe_unit;
}
extern "C" int ceph_get_path_stripe_unit(struct ceph_mount_info *cmount, const char *path)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
return l.stripe_unit;
}
extern "C" int ceph_get_file_stripe_count(struct ceph_mount_info *cmount, int fh)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
return l.stripe_count;
}
extern "C" int ceph_get_path_stripe_count(struct ceph_mount_info *cmount, const char *path)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
return l.stripe_count;
}
extern "C" int ceph_get_file_object_size(struct ceph_mount_info *cmount, int fh)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
return l.object_size;
}
extern "C" int ceph_get_path_object_size(struct ceph_mount_info *cmount, const char *path)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
return l.object_size;
}
extern "C" int ceph_get_file_pool(struct ceph_mount_info *cmount, int fh)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
return l.pool_id;
}
extern "C" int ceph_get_path_pool(struct ceph_mount_info *cmount, const char *path)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
return l.pool_id;
}
extern "C" int ceph_get_file_pool_name(struct ceph_mount_info *cmount, int fh, char *buf, size_t len)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
string name = cmount->get_client()->get_pool_name(l.pool_id);
if (len == 0)
return name.length();
if (name.length() > len)
return -CEPHFS_ERANGE;
strncpy(buf, name.c_str(), len);
return name.length();
}
extern "C" int ceph_get_pool_name(struct ceph_mount_info *cmount, int pool, char *buf, size_t len)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
string name = cmount->get_client()->get_pool_name(pool);
if (len == 0)
return name.length();
if (name.length() > len)
return -CEPHFS_ERANGE;
strncpy(buf, name.c_str(), len);
return name.length();
}
extern "C" int ceph_get_path_pool_name(struct ceph_mount_info *cmount, const char *path, char *buf, size_t len)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
string name = cmount->get_client()->get_pool_name(l.pool_id);
if (len == 0)
return name.length();
if (name.length() > len)
return -CEPHFS_ERANGE;
strncpy(buf, name.c_str(), len);
return name.length();
}
extern "C" int ceph_get_default_data_pool_name(struct ceph_mount_info *cmount, char *buf, size_t len)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
int64_t pool_id = cmount->get_client()->get_default_pool_id();
string name = cmount->get_client()->get_pool_name(pool_id);
if (len == 0)
return name.length();
if (name.length() > len)
return -CEPHFS_ERANGE;
strncpy(buf, name.c_str(), len);
return name.length();
}
extern "C" int ceph_get_file_layout(struct ceph_mount_info *cmount, int fh, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
if (stripe_unit)
*stripe_unit = l.stripe_unit;
if (stripe_count)
*stripe_count = l.stripe_count;
if (object_size)
*object_size = l.object_size;
if (pg_pool)
*pg_pool = l.pool_id;
return 0;
}
extern "C" int ceph_get_path_layout(struct ceph_mount_info *cmount, const char *path, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
if (stripe_unit)
*stripe_unit = l.stripe_unit;
if (stripe_count)
*stripe_count = l.stripe_count;
if (object_size)
*object_size = l.object_size;
if (pg_pool)
*pg_pool = l.pool_id;
return 0;
}
extern "C" int ceph_get_file_replication(struct ceph_mount_info *cmount, int fh)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
int rep = cmount->get_client()->get_pool_replication(l.pool_id);
return rep;
}
extern "C" int ceph_get_path_replication(struct ceph_mount_info *cmount, const char *path)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
int rep = cmount->get_client()->get_pool_replication(l.pool_id);
return rep;
}
extern "C" int ceph_set_default_file_stripe_unit(struct ceph_mount_info *cmount,
int stripe)
{
// this option no longer exists
return -CEPHFS_EOPNOTSUPP;
}
extern "C" int ceph_set_default_file_stripe_count(struct ceph_mount_info *cmount,
int count)
{
// this option no longer exists
return -CEPHFS_EOPNOTSUPP;
}
extern "C" int ceph_set_default_object_size(struct ceph_mount_info *cmount, int size)
{
// this option no longer exists
return -CEPHFS_EOPNOTSUPP;
}
extern "C" int ceph_set_default_file_replication(struct ceph_mount_info *cmount,
int replication)
{
// this option no longer exists
return -CEPHFS_EOPNOTSUPP;
}
extern "C" int ceph_set_default_preferred_pg(struct ceph_mount_info *cmount, int osd)
{
// this option no longer exists
return -CEPHFS_EOPNOTSUPP;
}
extern "C" int ceph_get_file_extent_osds(struct ceph_mount_info *cmount, int fh,
int64_t offset, int64_t *length, int *osds, int nosds)
{
if (nosds < 0)
return -CEPHFS_EINVAL;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
vector<int> vosds;
int ret = cmount->get_client()->get_file_extent_osds(fh, offset, length, vosds);
if (ret < 0)
return ret;
if (!nosds)
return vosds.size();
if ((int)vosds.size() > nosds)
return -CEPHFS_ERANGE;
for (int i = 0; i < (int)vosds.size(); i++)
osds[i] = vosds[i];
return vosds.size();
}
extern "C" int ceph_get_osd_crush_location(struct ceph_mount_info *cmount,
int osd, char *path, size_t len)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (!path && len)
return -CEPHFS_EINVAL;
vector<pair<string, string> > loc;
int ret = cmount->get_client()->get_osd_crush_location(osd, loc);
if (ret)
return ret;
size_t needed = 0;
size_t cur = 0;
vector<pair<string, string> >::iterator it;
for (it = loc.begin(); it != loc.end(); ++it) {
string& type = it->first;
string& name = it->second;
needed += type.size() + name.size() + 2;
if (needed <= len) {
if (path)
strcpy(path + cur, type.c_str());
cur += type.size() + 1;
if (path)
strcpy(path + cur, name.c_str());
cur += name.size() + 1;
}
}
if (len == 0)
return needed;
if (needed > len)
return -CEPHFS_ERANGE;
return needed;
}
extern "C" int ceph_get_osd_addr(struct ceph_mount_info *cmount, int osd,
struct sockaddr_storage *addr)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (!addr)
return -CEPHFS_EINVAL;
entity_addr_t address;
int ret = cmount->get_client()->get_osd_addr(osd, address);
if (ret < 0)
return ret;
*addr = address.get_sockaddr_storage();
return 0;
}
extern "C" int ceph_get_file_stripe_address(struct ceph_mount_info *cmount, int fh,
int64_t offset, struct sockaddr_storage *addr, int naddr)
{
vector<entity_addr_t> address;
unsigned i;
int r;
if (naddr < 0)
return -CEPHFS_EINVAL;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->get_file_stripe_address(fh, offset, address);
if (r < 0)
return r;
for (i = 0; i < (unsigned)naddr && i < address.size(); i++)
addr[i] = address[i].get_sockaddr_storage();
/* naddr == 0: drop through and return actual size */
if (naddr && (address.size() > (unsigned)naddr))
return -CEPHFS_ERANGE;
return address.size();
}
extern "C" int ceph_localize_reads(struct ceph_mount_info *cmount, int val)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (!val)
cmount->get_client()->clear_filer_flags(CEPH_OSD_FLAG_LOCALIZE_READS);
else
cmount->get_client()->set_filer_flags(CEPH_OSD_FLAG_LOCALIZE_READS);
return 0;
}
extern "C" CephContext *ceph_get_mount_context(struct ceph_mount_info *cmount)
{
return cmount->get_ceph_context();
}
extern "C" int ceph_debug_get_fd_caps(struct ceph_mount_info *cmount, int fd)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->get_caps_issued(fd);
}
extern "C" int ceph_debug_get_file_caps(struct ceph_mount_info *cmount, const char *path)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->get_caps_issued(path, cmount->default_perms);
}
extern "C" int ceph_get_stripe_unit_granularity(struct ceph_mount_info *cmount)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return CEPH_MIN_STRIPE_UNIT;
}
extern "C" int ceph_get_pool_id(struct ceph_mount_info *cmount, const char *pool_name)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (!pool_name || !pool_name[0])
return -CEPHFS_EINVAL;
/* negative range reserved for errors */
int64_t pool_id = cmount->get_client()->get_pool_id(pool_name);
if (pool_id > 0x7fffffff)
return -CEPHFS_ERANGE;
/* get_pool_id error codes fit in int */
return (int)pool_id;
}
extern "C" int ceph_get_pool_replication(struct ceph_mount_info *cmount,
int pool_id)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->get_pool_replication(pool_id);
}
/* Low-level exports */
extern "C" int ceph_ll_lookup_root(struct ceph_mount_info *cmount,
Inode **parent)
{
*parent = cmount->get_client()->get_root();
if (*parent)
return 0;
return -CEPHFS_EFAULT;
}
extern "C" struct Inode *ceph_ll_get_inode(class ceph_mount_info *cmount,
vinodeno_t vino)
{
return (cmount->get_client())->ll_get_inode(vino);
}
extern "C" int ceph_ll_lookup_vino(
struct ceph_mount_info *cmount,
vinodeno_t vino,
Inode **inode)
{
return (cmount->get_client())->ll_lookup_vino(vino, cmount->default_perms, inode);
}
/**
* Populates the client cache with the requested inode, and its
* parent dentry.
*/
extern "C" int ceph_ll_lookup_inode(
struct ceph_mount_info *cmount,
struct inodeno_t ino,
Inode **inode)
{
return (cmount->get_client())->ll_lookup_inode(ino, cmount->default_perms, inode);
}
extern "C" int ceph_ll_lookup(struct ceph_mount_info *cmount,
Inode *parent, const char *name, Inode **out,
struct ceph_statx *stx, unsigned want,
unsigned flags, const UserPerm *perms)
{
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return (cmount->get_client())->ll_lookupx(parent, name, out, stx, want,
flags, *perms);
}
extern "C" int ceph_ll_put(class ceph_mount_info *cmount, Inode *in)
{
return (cmount->get_client()->ll_put(in));
}
extern "C" int ceph_ll_forget(class ceph_mount_info *cmount, Inode *in,
int count)
{
return (cmount->get_client()->ll_forget(in, count));
}
extern "C" int ceph_ll_walk(struct ceph_mount_info *cmount, const char* name, Inode **i,
struct ceph_statx *stx, unsigned int want, unsigned int flags,
const UserPerm *perms)
{
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return(cmount->get_client()->ll_walk(name, i, stx, want, flags, *perms));
}
extern "C" int ceph_ll_getattr(class ceph_mount_info *cmount,
Inode *in, struct ceph_statx *stx,
unsigned int want, unsigned int flags,
const UserPerm *perms)
{
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return (cmount->get_client()->ll_getattrx(in, stx, want, flags, *perms));
}
extern "C" int ceph_ll_setattr(class ceph_mount_info *cmount,
Inode *in, struct ceph_statx *stx,
int mask, const UserPerm *perms)
{
return (cmount->get_client()->ll_setattrx(in, stx, mask, *perms));
}
extern "C" int ceph_ll_open(class ceph_mount_info *cmount, Inode *in,
int flags, Fh **fh, const UserPerm *perms)
{
return (cmount->get_client()->ll_open(in, flags, fh, *perms));
}
extern "C" int ceph_ll_read(class ceph_mount_info *cmount, Fh* filehandle,
int64_t off, uint64_t len, char* buf)
{
bufferlist bl;
int r = 0;
r = cmount->get_client()->ll_read(filehandle, off, len, &bl);
if (r >= 0)
{
bl.begin().copy(bl.length(), buf);
r = bl.length();
}
return r;
}
extern "C" int ceph_ll_read_block(class ceph_mount_info *cmount,
Inode *in, uint64_t blockid,
char* buf, uint64_t offset,
uint64_t length,
struct ceph_file_layout* layout)
{
file_layout_t l;
int r = (cmount->get_client()->ll_read_block(in, blockid, buf, offset,
length, &l));
l.to_legacy(layout);
return r;
}
extern "C" int ceph_ll_write_block(class ceph_mount_info *cmount,
Inode *in, uint64_t blockid,
char *buf, uint64_t offset,
uint64_t length,
struct ceph_file_layout *layout,
uint64_t snapseq, uint32_t sync)
{
file_layout_t l;
int r = (cmount->get_client()->ll_write_block(in, blockid, buf, offset,
length, &l, snapseq, sync));
l.to_legacy(layout);
return r;
}
extern "C" int ceph_ll_commit_blocks(class ceph_mount_info *cmount,
Inode *in, uint64_t offset,
uint64_t range)
{
return (cmount->get_client()->ll_commit_blocks(in, offset, range));
}
extern "C" int ceph_ll_fsync(class ceph_mount_info *cmount,
Fh *fh, int syncdataonly)
{
return (cmount->get_client()->ll_fsync(fh, syncdataonly));
}
extern "C" int ceph_ll_sync_inode(class ceph_mount_info *cmount,
Inode *in, int syncdataonly)
{
return (cmount->get_client()->ll_sync_inode(in, syncdataonly));
}
extern "C" int ceph_ll_fallocate(class ceph_mount_info *cmount, Fh *fh,
int mode, int64_t offset, int64_t length)
{
return cmount->get_client()->ll_fallocate(fh, mode, offset, length);
}
extern "C" off_t ceph_ll_lseek(class ceph_mount_info *cmount,
Fh *fh, off_t offset, int whence)
{
return (cmount->get_client()->ll_lseek(fh, offset, whence));
}
extern "C" int ceph_ll_write(class ceph_mount_info *cmount,
Fh *fh, int64_t off, uint64_t len,
const char *data)
{
return (cmount->get_client()->ll_write(fh, off, len, data));
}
extern "C" int64_t ceph_ll_readv(class ceph_mount_info *cmount,
struct Fh *fh, const struct iovec *iov,
int iovcnt, int64_t off)
{
return (cmount->get_client()->ll_readv(fh, iov, iovcnt, off));
}
extern "C" int64_t ceph_ll_writev(class ceph_mount_info *cmount,
struct Fh *fh, const struct iovec *iov,
int iovcnt, int64_t off)
{
return (cmount->get_client()->ll_writev(fh, iov, iovcnt, off));
}
extern "C" int ceph_ll_close(class ceph_mount_info *cmount, Fh* fh)
{
return (cmount->get_client()->ll_release(fh));
}
extern "C" int ceph_ll_create(class ceph_mount_info *cmount,
Inode *parent, const char *name, mode_t mode,
int oflags, Inode **outp, Fh **fhp,
struct ceph_statx *stx, unsigned want,
unsigned lflags, const UserPerm *perms)
{
if (lflags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return (cmount->get_client())->ll_createx(parent, name, mode, oflags, outp,
fhp, stx, want, lflags, *perms);
}
extern "C" int ceph_ll_mknod(class ceph_mount_info *cmount, Inode *parent,
const char *name, mode_t mode, dev_t rdev,
Inode **out, struct ceph_statx *stx,
unsigned want, unsigned flags,
const UserPerm *perms)
{
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return (cmount->get_client())->ll_mknodx(parent, name, mode, rdev,
out, stx, want, flags, *perms);
}
extern "C" int ceph_ll_mkdir(class ceph_mount_info *cmount, Inode *parent,
const char *name, mode_t mode, Inode **out,
struct ceph_statx *stx, unsigned want,
unsigned flags, const UserPerm *perms)
{
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return cmount->get_client()->ll_mkdirx(parent, name, mode, out, stx, want,
flags, *perms);
}
extern "C" int ceph_ll_link(class ceph_mount_info *cmount,
Inode *in, Inode *newparent,
const char *name, const UserPerm *perms)
{
return cmount->get_client()->ll_link(in, newparent, name, *perms);
}
extern "C" int ceph_ll_opendir(class ceph_mount_info *cmount,
Inode *in,
struct ceph_dir_result **dirpp,
const UserPerm *perms)
{
return (cmount->get_client()->ll_opendir(in, O_RDONLY, (dir_result_t**) dirpp,
*perms));
}
extern "C" int ceph_ll_releasedir(class ceph_mount_info *cmount,
ceph_dir_result *dir)
{
return cmount->get_client()->ll_releasedir(reinterpret_cast<dir_result_t*>(dir));
}
extern "C" int ceph_ll_rename(class ceph_mount_info *cmount,
Inode *parent, const char *name,
Inode *newparent, const char *newname,
const UserPerm *perms)
{
return cmount->get_client()->ll_rename(parent, name, newparent,
newname, *perms);
}
extern "C" int ceph_ll_unlink(class ceph_mount_info *cmount, Inode *in,
const char *name, const UserPerm *perms)
{
return cmount->get_client()->ll_unlink(in, name, *perms);
}
extern "C" int ceph_ll_statfs(class ceph_mount_info *cmount,
Inode *in, struct statvfs *stbuf)
{
return (cmount->get_client()->ll_statfs(in, stbuf, cmount->default_perms));
}
extern "C" int ceph_ll_readlink(class ceph_mount_info *cmount, Inode *in,
char *buf, size_t bufsiz,
const UserPerm *perms)
{
return cmount->get_client()->ll_readlink(in, buf, bufsiz, *perms);
}
extern "C" int ceph_ll_symlink(class ceph_mount_info *cmount,
Inode *in, const char *name,
const char *value, Inode **out,
struct ceph_statx *stx, unsigned want,
unsigned flags, const UserPerm *perms)
{
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return (cmount->get_client()->ll_symlinkx(in, name, value, out, stx, want,
flags, *perms));
}
extern "C" int ceph_ll_rmdir(class ceph_mount_info *cmount,
Inode *in, const char *name,
const UserPerm *perms)
{
return cmount->get_client()->ll_rmdir(in, name, *perms);
}
extern "C" int ceph_ll_getxattr(class ceph_mount_info *cmount,
Inode *in, const char *name, void *value,
size_t size, const UserPerm *perms)
{
return (cmount->get_client()->ll_getxattr(in, name, value, size, *perms));
}
extern "C" int ceph_ll_listxattr(struct ceph_mount_info *cmount,
Inode *in, char *list,
size_t buf_size, size_t *list_size,
const UserPerm *perms)
{
int res = cmount->get_client()->ll_listxattr(in, list, buf_size, *perms);
if (res >= 0) {
*list_size = (size_t)res;
return 0;
}
return res;
}
extern "C" int ceph_ll_setxattr(class ceph_mount_info *cmount,
Inode *in, const char *name,
const void *value, size_t size,
int flags, const UserPerm *perms)
{
return (cmount->get_client()->ll_setxattr(in, name, value, size, flags, *perms));
}
extern "C" int ceph_ll_removexattr(class ceph_mount_info *cmount,
Inode *in, const char *name,
const UserPerm *perms)
{
return (cmount->get_client()->ll_removexattr(in, name, *perms));
}
extern "C" int ceph_ll_getlk(struct ceph_mount_info *cmount,
Fh *fh, struct flock *fl, uint64_t owner)
{
return (cmount->get_client()->ll_getlk(fh, fl, owner));
}
extern "C" int ceph_ll_setlk(struct ceph_mount_info *cmount,
Fh *fh, struct flock *fl, uint64_t owner,
int sleep)
{
return (cmount->get_client()->ll_setlk(fh, fl, owner, sleep));
}
extern "C" int ceph_ll_lazyio(class ceph_mount_info *cmount,
Fh *fh, int enable)
{
return (cmount->get_client()->ll_lazyio(fh, enable));
}
extern "C" int ceph_ll_delegation(struct ceph_mount_info *cmount, Fh *fh,
unsigned cmd, ceph_deleg_cb_t cb, void *priv)
{
return (cmount->get_client()->ll_delegation(fh, cmd, cb, priv));
}
extern "C" uint32_t ceph_ll_stripe_unit(class ceph_mount_info *cmount,
Inode *in)
{
return (cmount->get_client()->ll_stripe_unit(in));
}
extern "C" uint32_t ceph_ll_file_layout(class ceph_mount_info *cmount,
Inode *in,
struct ceph_file_layout *layout)
{
file_layout_t l;
int r = (cmount->get_client()->ll_file_layout(in, &l));
l.to_legacy(layout);
return r;
}
uint64_t ceph_ll_snap_seq(class ceph_mount_info *cmount, Inode *in)
{
return (cmount->get_client()->ll_snap_seq(in));
}
extern "C" int ceph_ll_get_stripe_osd(class ceph_mount_info *cmount,
Inode *in, uint64_t blockno,
struct ceph_file_layout* layout)
{
file_layout_t l;
int r = (cmount->get_client()->ll_get_stripe_osd(in, blockno, &l));
l.to_legacy(layout);
return r;
}
extern "C" int ceph_ll_num_osds(class ceph_mount_info *cmount)
{
return (cmount->get_client()->ll_num_osds());
}
extern "C" int ceph_ll_osdaddr(class ceph_mount_info *cmount,
int osd, uint32_t *addr)
{
return (cmount->get_client()->ll_osdaddr(osd, addr));
}
extern "C" uint64_t ceph_ll_get_internal_offset(class ceph_mount_info *cmount,
Inode *in,
uint64_t blockno)
{
return (cmount->get_client()->ll_get_internal_offset(in, blockno));
}
extern "C" void ceph_buffer_free(char *buf)
{
if (buf) {
free(buf);
}
}
extern "C" uint32_t ceph_get_cap_return_timeout(class ceph_mount_info *cmount)
{
if (!cmount->is_mounted())
return 0;
return cmount->get_client()->mdsmap->get_session_autoclose().sec();
}
extern "C" int ceph_set_deleg_timeout(class ceph_mount_info *cmount, uint32_t timeout)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->set_deleg_timeout(timeout);
}
extern "C" void ceph_set_session_timeout(class ceph_mount_info *cmount, unsigned timeout)
{
cmount->get_client()->set_session_timeout(timeout);
}
extern "C" void ceph_set_uuid(class ceph_mount_info *cmount, const char *uuid)
{
cmount->get_client()->set_uuid(std::string(uuid));
}
extern "C" int ceph_start_reclaim(class ceph_mount_info *cmount,
const char *uuid, unsigned flags)
{
if (!cmount->is_initialized()) {
int ret = cmount->init();
if (ret != 0)
return ret;
}
return cmount->get_client()->start_reclaim(std::string(uuid), flags,
cmount->get_filesystem());
}
extern "C" void ceph_finish_reclaim(class ceph_mount_info *cmount)
{
cmount->get_client()->finish_reclaim();
}
// This is deprecated, use ceph_ll_register_callbacks2 instead.
extern "C" void ceph_ll_register_callbacks(class ceph_mount_info *cmount,
struct ceph_client_callback_args *args)
{
cmount->get_client()->ll_register_callbacks(args);
}
extern "C" int ceph_ll_register_callbacks2(class ceph_mount_info *cmount,
struct ceph_client_callback_args *args)
{
return cmount->get_client()->ll_register_callbacks2(args);
}
extern "C" int ceph_get_snap_info(struct ceph_mount_info *cmount,
const char *path, struct snap_info *snap_info) {
Client::SnapInfo info;
int r = cmount->get_client()->get_snap_info(path, cmount->default_perms, &info);
if (r < 0) {
return r;
}
size_t i = 0;
auto nr_metadata = info.metadata.size();
snap_info->id = info.id.val;
snap_info->nr_snap_metadata = nr_metadata;
if (nr_metadata) {
snap_info->snap_metadata = (struct snap_metadata *)calloc(nr_metadata, sizeof(struct snap_metadata));
if (!snap_info->snap_metadata) {
return -CEPHFS_ENOMEM;
}
// fill with key, value pairs
for (auto &[key, value] : info.metadata) {
// len(key) + '\0' + len(value) + '\0'
char *kvp = (char *)malloc(key.size() + value.size() + 2);
if (!kvp) {
break;
}
char *_key = kvp;
char *_value = kvp + key.size() + 1;
memcpy(_key, key.c_str(), key.size());
_key[key.size()] = '\0';
memcpy(_value, value.c_str(), value.size());
_value[value.size()] = '\0';
snap_info->snap_metadata[i].key = _key;
snap_info->snap_metadata[i].value = _value;
++i;
}
}
if (nr_metadata && i != nr_metadata) {
ceph_free_snap_info_buffer(snap_info);
return -CEPHFS_ENOMEM;
}
return 0;
}
extern "C" void ceph_free_snap_info_buffer(struct snap_info *snap_info) {
for (size_t i = 0; i < snap_info->nr_snap_metadata; ++i) {
free((void *)snap_info->snap_metadata[i].key); // malloc'd memory is key+value composite
}
free(snap_info->snap_metadata);
}
| 64,989 | 26.596603 | 154 | cc |
null | ceph-main/src/libcephsqlite.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License version 2.1, as published by
* the Free Software Foundation. See file COPYING.
*
*/
#include <boost/smart_ptr/intrusive_ptr.hpp>
#include <fmt/format.h>
#include <fcntl.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <regex>
#include <sstream>
#include <string_view>
#include <limits.h>
#include <string.h>
#include <sqlite3ext.h>
SQLITE_EXTENSION_INIT1
#include "include/ceph_assert.h"
#include "include/rados/librados.hpp"
#include "common/Clock.h"
#include "common/Formatter.h"
#include "common/ceph_argparse.h"
#include "common/ceph_mutex.h"
#include "common/common_init.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/perf_counters.h"
#include "common/version.h"
#include "include/libcephsqlite.h"
#include "SimpleRADOSStriper.h"
#define dout_subsys ceph_subsys_cephsqlite
#undef dout_prefix
#define dout_prefix *_dout << "cephsqlite: " << __func__ << ": "
#define d(cct,cluster,lvl) ldout((cct), (lvl)) << "(client." << cluster->get_instance_id() << ") "
#define dv(lvl) d(cct,cluster,(lvl))
#define df(lvl) d(f->io.cct,f->io.cluster,(lvl)) << f->loc << " "
enum {
P_FIRST = 0xf0000,
P_OP_OPEN,
P_OP_DELETE,
P_OP_ACCESS,
P_OP_FULLPATHNAME,
P_OP_CURRENTTIME,
P_OPF_CLOSE,
P_OPF_READ,
P_OPF_WRITE,
P_OPF_TRUNCATE,
P_OPF_SYNC,
P_OPF_FILESIZE,
P_OPF_LOCK,
P_OPF_UNLOCK,
P_OPF_CHECKRESERVEDLOCK,
P_OPF_FILECONTROL,
P_OPF_SECTORSIZE,
P_OPF_DEVICECHARACTERISTICS,
P_LAST,
};
using cctptr = boost::intrusive_ptr<CephContext>;
using rsptr = std::shared_ptr<librados::Rados>;
struct cephsqlite_appdata {
~cephsqlite_appdata() {
{
std::scoped_lock lock(cluster_mutex);
_disconnect();
}
if (logger) {
cct->get_perfcounters_collection()->remove(logger.get());
}
if (striper_logger) {
cct->get_perfcounters_collection()->remove(striper_logger.get());
}
}
int setup_perf() {
ceph_assert(cct);
PerfCountersBuilder plb(cct.get(), "libcephsqlite_vfs", P_FIRST, P_LAST);
plb.add_time_avg(P_OP_OPEN, "op_open", "Time average of Open operations");
plb.add_time_avg(P_OP_DELETE, "op_delete", "Time average of Delete operations");
plb.add_time_avg(P_OP_ACCESS, "op_access", "Time average of Access operations");
plb.add_time_avg(P_OP_FULLPATHNAME, "op_fullpathname", "Time average of FullPathname operations");
plb.add_time_avg(P_OP_CURRENTTIME, "op_currenttime", "Time average of Currenttime operations");
plb.add_time_avg(P_OPF_CLOSE, "opf_close", "Time average of Close file operations");
plb.add_time_avg(P_OPF_READ, "opf_read", "Time average of Read file operations");
plb.add_time_avg(P_OPF_WRITE, "opf_write", "Time average of Write file operations");
plb.add_time_avg(P_OPF_TRUNCATE, "opf_truncate", "Time average of Truncate file operations");
plb.add_time_avg(P_OPF_SYNC, "opf_sync", "Time average of Sync file operations");
plb.add_time_avg(P_OPF_FILESIZE, "opf_filesize", "Time average of FileSize file operations");
plb.add_time_avg(P_OPF_LOCK, "opf_lock", "Time average of Lock file operations");
plb.add_time_avg(P_OPF_UNLOCK, "opf_unlock", "Time average of Unlock file operations");
plb.add_time_avg(P_OPF_CHECKRESERVEDLOCK, "opf_checkreservedlock", "Time average of CheckReservedLock file operations");
plb.add_time_avg(P_OPF_FILECONTROL, "opf_filecontrol", "Time average of FileControl file operations");
plb.add_time_avg(P_OPF_SECTORSIZE, "opf_sectorsize", "Time average of SectorSize file operations");
plb.add_time_avg(P_OPF_DEVICECHARACTERISTICS, "opf_devicecharacteristics", "Time average of DeviceCharacteristics file operations");
logger.reset(plb.create_perf_counters());
if (int rc = SimpleRADOSStriper::config_logger(cct.get(), "libcephsqlite_striper", &striper_logger); rc < 0) {
return rc;
}
cct->get_perfcounters_collection()->add(logger.get());
cct->get_perfcounters_collection()->add(striper_logger.get());
return 0;
}
std::pair<cctptr, rsptr> get_cluster() {
std::scoped_lock lock(cluster_mutex);
if (!cct) {
if (int rc = _open(nullptr); rc < 0) {
ceph_abort("could not open connection to ceph");
}
}
return {cct, cluster};
}
int connect() {
std::scoped_lock lock(cluster_mutex);
return _connect();
}
int reconnect() {
std::scoped_lock lock(cluster_mutex);
_disconnect();
return _connect();
}
int maybe_reconnect(rsptr _cluster) {
std::scoped_lock lock(cluster_mutex);
if (!cluster || cluster == _cluster) {
ldout(cct, 10) << "reconnecting to RADOS" << dendl;
_disconnect();
return _connect();
} else {
ldout(cct, 10) << "already reconnected" << dendl;
return 0;
}
}
int open(CephContext* _cct) {
std::scoped_lock lock(cluster_mutex);
return _open(_cct);
}
std::unique_ptr<PerfCounters> logger;
std::shared_ptr<PerfCounters> striper_logger;
private:
int _open(CephContext* _cct) {
if (!_cct) {
std::vector<const char*> env_args;
env_to_vec(env_args, "CEPH_ARGS");
std::string cluster, conf_file_list; // unused
CephInitParameters iparams = ceph_argparse_early_args(env_args, CEPH_ENTITY_TYPE_CLIENT, &cluster, &conf_file_list);
cct = cctptr(common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0), false);
cct->_conf.parse_config_files(nullptr, &std::cerr, 0);
cct->_conf.parse_env(cct->get_module_type()); // environment variables override
cct->_conf.apply_changes(nullptr);
common_init_finish(cct.get());
} else {
cct = cctptr(_cct);
}
if (int rc = setup_perf(); rc < 0) {
return rc;
}
if (int rc = _connect(); rc < 0) {
return rc;
}
return 0;
}
void _disconnect() {
if (cluster) {
cluster.reset();
}
}
int _connect() {
ceph_assert(cct);
auto _cluster = rsptr(new librados::Rados());
ldout(cct, 5) << "initializing RADOS handle as " << cct->_conf->name << dendl;
if (int rc = _cluster->init_with_context(cct.get()); rc < 0) {
lderr(cct) << "cannot initialize RADOS: " << cpp_strerror(rc) << dendl;
return rc;
}
if (int rc = _cluster->connect(); rc < 0) {
lderr(cct) << "cannot connect: " << cpp_strerror(rc) << dendl;
return rc;
}
auto s = _cluster->get_addrs();
ldout(cct, 5) << "completed connection to RADOS with address " << s << dendl;
cluster = std::move(_cluster);
return 0;
}
ceph::mutex cluster_mutex = ceph::make_mutex("libcephsqlite");;
cctptr cct;
rsptr cluster;
};
struct cephsqlite_fileloc {
std::string pool;
std::string radosns;
std::string name;
};
struct cephsqlite_fileio {
cctptr cct;
rsptr cluster; // anchor for ioctx
librados::IoCtx ioctx;
std::unique_ptr<SimpleRADOSStriper> rs;
};
std::ostream& operator<<(std::ostream &out, const cephsqlite_fileloc& fileloc) {
return out
<< "["
<< fileloc.pool
<< ":"
<< fileloc.radosns
<< "/"
<< fileloc.name
<< "]"
;
}
struct cephsqlite_file {
sqlite3_file base;
struct sqlite3_vfs* vfs = nullptr;
int flags = 0;
// There are 5 lock states: https://sqlite.org/c3ref/c_lock_exclusive.html
int lock = 0;
struct cephsqlite_fileloc loc{};
struct cephsqlite_fileio io{};
};
#define getdata(vfs) (*((cephsqlite_appdata*)((vfs)->pAppData)))
static int Lock(sqlite3_file *file, int ilock)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << std::hex << ilock << dendl;
auto& lock = f->lock;
ceph_assert(!f->io.rs->is_locked() || lock > SQLITE_LOCK_NONE);
ceph_assert(lock <= ilock);
if (!f->io.rs->is_locked() && ilock > SQLITE_LOCK_NONE) {
if (int rc = f->io.rs->lock(0); rc < 0) {
df(5) << "failed: " << rc << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_IOERR;
}
}
lock = ilock;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_LOCK, end-start);
return SQLITE_OK;
}
static int Unlock(sqlite3_file *file, int ilock)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << std::hex << ilock << dendl;
auto& lock = f->lock;
ceph_assert(lock == SQLITE_LOCK_NONE || (lock > SQLITE_LOCK_NONE && f->io.rs->is_locked()));
ceph_assert(lock >= ilock);
if (ilock <= SQLITE_LOCK_NONE && SQLITE_LOCK_NONE < lock) {
if (int rc = f->io.rs->unlock(); rc < 0) {
df(5) << "failed: " << rc << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_IOERR;
}
}
lock = ilock;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_UNLOCK, end-start);
return SQLITE_OK;
}
static int CheckReservedLock(sqlite3_file *file, int *result)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << dendl;
*result = 0;
auto& lock = f->lock;
if (lock > SQLITE_LOCK_SHARED) {
*result = 1;
}
df(10);
f->io.rs->print_lockers(*_dout);
*_dout << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_CHECKRESERVEDLOCK, end-start);
return SQLITE_OK;
}
static int Close(sqlite3_file *file)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << dendl;
f->~cephsqlite_file();
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_CLOSE, end-start);
return SQLITE_OK;
}
static int Read(sqlite3_file *file, void *buf, int len, sqlite_int64 off)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << buf << " " << off << "~" << len << dendl;
if (int rc = f->io.rs->read(buf, len, off); rc < 0) {
df(5) << "read failed: " << cpp_strerror(rc) << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_IOERR_READ;
} else {
df(5) << "= " << rc << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_READ, end-start);
if (rc < len) {
memset(buf, 0, len-rc);
return SQLITE_IOERR_SHORT_READ;
} else {
return SQLITE_OK;
}
}
}
static int Write(sqlite3_file *file, const void *buf, int len, sqlite_int64 off)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << off << "~" << len << dendl;
if (int rc = f->io.rs->write(buf, len, off); rc < 0) {
df(5) << "write failed: " << cpp_strerror(rc) << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_IOERR_WRITE;
} else {
df(5) << "= " << rc << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_WRITE, end-start);
return SQLITE_OK;
}
}
static int Truncate(sqlite3_file *file, sqlite_int64 size)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << size << dendl;
if (int rc = f->io.rs->truncate(size); rc < 0) {
df(5) << "truncate failed: " << cpp_strerror(rc) << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_IOERR;
}
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_TRUNCATE, end-start);
return SQLITE_OK;
}
static int Sync(sqlite3_file *file, int flags)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << flags << dendl;
if (int rc = f->io.rs->flush(); rc < 0) {
df(5) << "failed: " << cpp_strerror(rc) << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_IOERR;
}
df(5) << " = 0" << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_SYNC, end-start);
return SQLITE_OK;
}
static int FileSize(sqlite3_file *file, sqlite_int64 *osize)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << dendl;
uint64_t size = 0;
if (int rc = f->io.rs->stat(&size); rc < 0) {
df(5) << "stat failed: " << cpp_strerror(rc) << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_NOTFOUND;
}
*osize = (sqlite_int64)size;
df(5) << "= " << size << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_FILESIZE, end-start);
return SQLITE_OK;
}
static bool parsepath(std::string_view path, struct cephsqlite_fileloc* fileloc)
{
static const std::regex re1{"^/*(\\*[[:digit:]]+):([[:alnum:]\\-_.]*)/([[:alnum:]\\-._]+)$"};
static const std::regex re2{"^/*([[:alnum:]\\-_.]+):([[:alnum:]\\-_.]*)/([[:alnum:]\\-._]+)$"};
std::cmatch cm;
if (!std::regex_match(path.data(), cm, re1)) {
if (!std::regex_match(path.data(), cm, re2)) {
return false;
}
}
fileloc->pool = cm[1];
fileloc->radosns = cm[2];
fileloc->name = cm[3];
return true;
}
static int makestriper(sqlite3_vfs* vfs, cctptr cct, rsptr cluster, const cephsqlite_fileloc& loc, cephsqlite_fileio* io)
{
bool gotmap = false;
d(cct,cluster,10) << loc << dendl;
enoent_retry:
if (loc.pool[0] == '*') {
std::string err;
int64_t id = strict_strtoll(loc.pool.c_str()+1, 10, &err);
ceph_assert(err.empty());
if (int rc = cluster->ioctx_create2(id, io->ioctx); rc < 0) {
if (rc == -ENOENT && !gotmap) {
cluster->wait_for_latest_osdmap();
gotmap = true;
goto enoent_retry;
}
d(cct,cluster,1) << "cannot create ioctx: " << cpp_strerror(rc) << dendl;
return rc;
}
} else {
if (int rc = cluster->ioctx_create(loc.pool.c_str(), io->ioctx); rc < 0) {
if (rc == -ENOENT && !gotmap) {
cluster->wait_for_latest_osdmap();
gotmap = true;
goto enoent_retry;
}
d(cct,cluster,1) << "cannot create ioctx: " << cpp_strerror(rc) << dendl;
return rc;
}
}
if (!loc.radosns.empty())
io->ioctx.set_namespace(loc.radosns);
io->rs = std::make_unique<SimpleRADOSStriper>(io->ioctx, loc.name);
io->rs->set_logger(getdata(vfs).striper_logger);
io->rs->set_lock_timeout(cct->_conf.get_val<std::chrono::milliseconds>("cephsqlite_lock_renewal_timeout"));
io->rs->set_lock_interval(cct->_conf.get_val<std::chrono::milliseconds>("cephsqlite_lock_renewal_interval"));
io->rs->set_blocklist_the_dead(cct->_conf.get_val<bool>("cephsqlite_blocklist_dead_locker"));
io->cluster = std::move(cluster);
io->cct = cct;
return 0;
}
static int SectorSize(sqlite3_file* sf)
{
static const int size = 65536;
auto start = ceph::coarse_mono_clock::now();
auto f = (cephsqlite_file*)sf;
df(5) << " = " << size << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_SECTORSIZE, end-start);
return size;
}
static int FileControl(sqlite3_file* sf, int op, void *arg)
{
auto f = (cephsqlite_file*)sf;
auto start = ceph::coarse_mono_clock::now();
df(5) << op << ", " << arg << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_FILECONTROL, end-start);
return SQLITE_NOTFOUND;
}
static int DeviceCharacteristics(sqlite3_file* sf)
{
auto f = (cephsqlite_file*)sf;
auto start = ceph::coarse_mono_clock::now();
df(5) << dendl;
static const int c = 0
|SQLITE_IOCAP_ATOMIC
|SQLITE_IOCAP_POWERSAFE_OVERWRITE
|SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN
|SQLITE_IOCAP_SAFE_APPEND
;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_DEVICECHARACTERISTICS, end-start);
return c;
}
static int Open(sqlite3_vfs *vfs, const char *name, sqlite3_file *file,
int flags, int *oflags)
{
static const sqlite3_io_methods io = {
1, /* iVersion */
Close, /* xClose */
Read, /* xRead */
Write, /* xWrite */
Truncate, /* xTruncate */
Sync, /* xSync */
FileSize, /* xFileSize */
Lock, /* xLock */
Unlock, /* xUnlock */
CheckReservedLock, /* xCheckReservedLock */
FileControl, /* xFileControl */
SectorSize, /* xSectorSize */
DeviceCharacteristics /* xDeviceCharacteristics */
};
auto start = ceph::coarse_mono_clock::now();
bool gotmap = false;
auto [cct, cluster] = getdata(vfs).get_cluster();
/* we are not going to create temporary files */
if (name == NULL) {
dv(-1) << " cannot open temporary database" << dendl;
return SQLITE_CANTOPEN;
}
auto path = std::string_view(name);
if (path == ":memory:") {
dv(-1) << " cannot open temporary database" << dendl;
return SQLITE_IOERR;
}
dv(5) << path << " flags=" << std::hex << flags << dendl;
auto f = new (file)cephsqlite_file();
f->vfs = vfs;
if (!parsepath(path, &f->loc)) {
ceph_assert(0); /* xFullPathname validates! */
}
f->flags = flags;
enoent_retry:
if (int rc = makestriper(vfs, cct, cluster, f->loc, &f->io); rc < 0) {
f->~cephsqlite_file();
dv(-1) << "cannot open striper" << dendl;
return SQLITE_IOERR;
}
if (flags & SQLITE_OPEN_CREATE) {
dv(10) << "OPEN_CREATE" << dendl;
if (int rc = f->io.rs->create(); rc < 0 && rc != -EEXIST) {
if (rc == -ENOENT && !gotmap) {
/* we may have an out of date OSDMap which cancels the op in the
* Objecter. Try to get a new one and retry. This is mostly noticable
* in testing when pools are getting created/deleted left and right.
*/
dv(5) << "retrying create after getting latest OSDMap" << dendl;
cluster->wait_for_latest_osdmap();
gotmap = true;
goto enoent_retry;
}
dv(5) << "file cannot be created: " << cpp_strerror(rc) << dendl;
return SQLITE_IOERR;
}
}
if (int rc = f->io.rs->open(); rc < 0) {
if (rc == -ENOENT && !gotmap) {
/* See comment above for create case. */
dv(5) << "retrying open after getting latest OSDMap" << dendl;
cluster->wait_for_latest_osdmap();
gotmap = true;
goto enoent_retry;
}
dv(10) << "cannot open striper: " << cpp_strerror(rc) << dendl;
return rc;
}
if (oflags) {
*oflags = flags;
}
f->base.pMethods = &io;
auto end = ceph::coarse_mono_clock::now();
getdata(vfs).logger->tinc(P_OP_OPEN, end-start);
return SQLITE_OK;
}
/*
** Delete the file identified by argument path. If the dsync parameter
** is non-zero, then ensure the file-system modification to delete the
** file has been synced to disk before returning.
*/
static int Delete(sqlite3_vfs* vfs, const char* path, int dsync)
{
auto start = ceph::coarse_mono_clock::now();
auto [cct, cluster] = getdata(vfs).get_cluster();
dv(5) << "'" << path << "', " << dsync << dendl;
cephsqlite_fileloc fileloc;
if (!parsepath(path, &fileloc)) {
dv(5) << "path does not parse!" << dendl;
return SQLITE_NOTFOUND;
}
cephsqlite_fileio io;
if (int rc = makestriper(vfs, cct, cluster, fileloc, &io); rc < 0) {
dv(-1) << "cannot open striper" << dendl;
return SQLITE_IOERR;
}
if (int rc = io.rs->lock(0); rc < 0) {
return SQLITE_IOERR;
}
if (int rc = io.rs->remove(); rc < 0) {
dv(5) << "= " << rc << dendl;
return SQLITE_IOERR_DELETE;
}
/* No need to unlock */
dv(5) << "= 0" << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(vfs).logger->tinc(P_OP_DELETE, end-start);
return SQLITE_OK;
}
/*
** Query the file-system to see if the named file exists, is readable or
** is both readable and writable.
*/
static int Access(sqlite3_vfs* vfs, const char* path, int flags, int* result)
{
auto start = ceph::coarse_mono_clock::now();
auto [cct, cluster] = getdata(vfs).get_cluster();
dv(5) << path << " " << std::hex << flags << dendl;
cephsqlite_fileloc fileloc;
if (!parsepath(path, &fileloc)) {
dv(5) << "path does not parse!" << dendl;
return SQLITE_NOTFOUND;
}
cephsqlite_fileio io;
if (int rc = makestriper(vfs, cct, cluster, fileloc, &io); rc < 0) {
dv(-1) << "cannot open striper" << dendl;
return SQLITE_IOERR;
}
if (int rc = io.rs->open(); rc < 0) {
if (rc == -ENOENT) {
*result = 0;
return SQLITE_OK;
} else {
dv(10) << "cannot open striper: " << cpp_strerror(rc) << dendl;
*result = 0;
return SQLITE_IOERR;
}
}
uint64_t size = 0;
if (int rc = io.rs->stat(&size); rc < 0) {
dv(5) << "= " << rc << " (" << cpp_strerror(rc) << ")" << dendl;
*result = 0;
} else {
dv(5) << "= 0" << dendl;
*result = 1;
}
auto end = ceph::coarse_mono_clock::now();
getdata(vfs).logger->tinc(P_OP_ACCESS, end-start);
return SQLITE_OK;
}
/* This method is only called once for each database. It provides a chance to
* reformat the path into a canonical format.
*/
static int FullPathname(sqlite3_vfs* vfs, const char* ipath, int opathlen, char* opath)
{
auto start = ceph::coarse_mono_clock::now();
auto path = std::string_view(ipath);
auto [cct, cluster] = getdata(vfs).get_cluster();
dv(5) << "1: " << path << dendl;
cephsqlite_fileloc fileloc;
if (!parsepath(path, &fileloc)) {
dv(5) << "path does not parse!" << dendl;
return SQLITE_NOTFOUND;
}
dv(5) << " parsed " << fileloc << dendl;
auto p = fmt::format("{}:{}/{}", fileloc.pool, fileloc.radosns, fileloc.name);
if (p.size() >= (size_t)opathlen) {
dv(5) << "path too long!" << dendl;
return SQLITE_CANTOPEN;
}
strcpy(opath, p.c_str());
dv(5) << " output " << p << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(vfs).logger->tinc(P_OP_FULLPATHNAME, end-start);
return SQLITE_OK;
}
static int CurrentTime(sqlite3_vfs* vfs, sqlite3_int64* time)
{
auto start = ceph::coarse_mono_clock::now();
auto [cct, cluster] = getdata(vfs).get_cluster();
dv(5) << time << dendl;
auto t = ceph_clock_now();
*time = t.to_msec() + 2440587.5*86400000; /* julian days since 1970 converted to ms */
auto end = ceph::coarse_mono_clock::now();
getdata(vfs).logger->tinc(P_OP_CURRENTTIME, end-start);
return SQLITE_OK;
}
LIBCEPHSQLITE_API int cephsqlite_setcct(CephContext* _cct, char** ident)
{
ldout(_cct, 1) << "cct: " << _cct << dendl;
if (sqlite3_api == nullptr) {
lderr(_cct) << "API violation: must have sqlite3 init libcephsqlite" << dendl;
return -EINVAL;
}
auto vfs = sqlite3_vfs_find("ceph");
if (!vfs) {
lderr(_cct) << "API violation: must have sqlite3 init libcephsqlite" << dendl;
return -EINVAL;
}
auto& appd = getdata(vfs);
if (int rc = appd.open(_cct); rc < 0) {
return rc;
}
auto [cct, cluster] = appd.get_cluster();
auto s = cluster->get_addrs();
if (ident) {
*ident = strdup(s.c_str());
}
ldout(cct, 1) << "complete" << dendl;
return 0;
}
static void f_perf(sqlite3_context* ctx, int argc, sqlite3_value** argv)
{
auto vfs = (sqlite3_vfs*)sqlite3_user_data(ctx);
auto [cct, cluster] = getdata(vfs).get_cluster();
dv(10) << dendl;
auto&& appd = getdata(vfs);
JSONFormatter f(false);
f.open_object_section("ceph_perf");
appd.logger->dump_formatted(&f, false, false);
appd.striper_logger->dump_formatted(&f, false, false);
f.close_section();
{
CachedStackStringStream css;
f.flush(*css);
auto sv = css->strv();
dv(20) << " = " << sv << dendl;
sqlite3_result_text(ctx, sv.data(), sv.size(), SQLITE_TRANSIENT);
}
}
static void f_status(sqlite3_context* ctx, int argc, sqlite3_value** argv)
{
auto vfs = (sqlite3_vfs*)sqlite3_user_data(ctx);
auto [cct, cluster] = getdata(vfs).get_cluster();
dv(10) << dendl;
JSONFormatter f(false);
f.open_object_section("ceph_status");
f.dump_int("id", cluster->get_instance_id());
f.dump_string("addr", cluster->get_addrs());
f.close_section();
{
CachedStackStringStream css;
f.flush(*css);
auto sv = css->strv();
dv(20) << " = " << sv << dendl;
sqlite3_result_text(ctx, sv.data(), sv.size(), SQLITE_TRANSIENT);
}
}
static int autoreg(sqlite3* db, char** err, const struct sqlite3_api_routines* thunk)
{
auto vfs = sqlite3_vfs_find("ceph");
if (!vfs) {
ceph_abort("ceph vfs not found");
}
if (int rc = sqlite3_create_function(db, "ceph_perf", 0, SQLITE_UTF8, vfs, f_perf, nullptr, nullptr); rc) {
return rc;
}
if (int rc = sqlite3_create_function(db, "ceph_status", 0, SQLITE_UTF8, vfs, f_status, nullptr, nullptr); rc) {
return rc;
}
return SQLITE_OK;
}
/* You may wonder why we have an atexit handler? After all, atexit/exit creates
* a mess for multithreaded programs. Well, sqlite3 does not have an API for
* orderly removal of extensions. And, in fact, any API we might make
* unofficially (such as "sqlite3_cephsqlite_fini") would potentially race with
* other threads interacting with sqlite3 + the "ceph" VFS. There is a method
* for removing a VFS but it's not called by sqlite3 in any error scenario and
* there is no mechanism within sqlite3 to tell a VFS to unregister itself.
*
* This all would be mostly okay if /bin/sqlite3 did not call exit(3), but it
* does. (This occurs only for the sqlite3 binary, not when used as a library.)
* exit(3) calls destructors on all static-duration structures for the program.
* This breaks any outstanding threads created by the librados handle in all
* sorts of fantastic ways from C++ exceptions to memory faults. In general,
* Ceph libraries are not tolerant of exit(3) (_exit(3) is okay!). Applications
* must clean up after themselves or _exit(3).
*
* So, we have an atexit handler for libcephsqlite. This simply shuts down the
* RADOS handle. We can be assured that this occurs before any ceph library
* static-duration structures are destructed due to ordering guarantees by
* exit(3). Generally, we only see this called when the VFS is used by
* /bin/sqlite3 and only during sqlite3 error scenarios (like I/O errors
* arrising from blocklisting).
*/
static void cephsqlite_atexit()
{
if (auto vfs = sqlite3_vfs_find("ceph"); vfs) {
if (vfs->pAppData) {
auto&& appd = getdata(vfs);
delete &appd;
vfs->pAppData = nullptr;
}
}
}
LIBCEPHSQLITE_API int sqlite3_cephsqlite_init(sqlite3* db, char** err, const sqlite3_api_routines* api)
{
SQLITE_EXTENSION_INIT2(api);
auto vfs = sqlite3_vfs_find("ceph");
if (!vfs) {
vfs = (sqlite3_vfs*) calloc(1, sizeof(sqlite3_vfs));
auto appd = new cephsqlite_appdata;
vfs->iVersion = 2;
vfs->szOsFile = sizeof(struct cephsqlite_file);
vfs->mxPathname = 4096;
vfs->zName = "ceph";
vfs->pAppData = appd;
vfs->xOpen = Open;
vfs->xDelete = Delete;
vfs->xAccess = Access;
vfs->xFullPathname = FullPathname;
vfs->xCurrentTimeInt64 = CurrentTime;
if (int rc = sqlite3_vfs_register(vfs, 0); rc) {
delete appd;
free(vfs);
return rc;
}
}
if (int rc = std::atexit(cephsqlite_atexit); rc) {
return SQLITE_INTERNAL;
}
if (int rc = sqlite3_auto_extension((void(*)(void))autoreg); rc) {
return rc;
}
if (int rc = autoreg(db, err, api); rc) {
return rc;
}
return SQLITE_OK_LOAD_PERMANENTLY;
}
| 27,898 | 28.87045 | 136 | cc |
null | ceph-main/src/librados-config.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <iostream>
#include <boost/program_options/cmdline.hpp>
#include <boost/program_options/option.hpp>
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/program_options/variables_map.hpp>
#include "include/rados/librados.h"
#include "ceph_ver.h"
namespace po = boost::program_options;
int main(int argc, const char **argv)
{
po::options_description desc{"usage: librados-config [option]"};
desc.add_options()
("help,h", "print this help message")
("version", "library version")
("vernum", "library version code")
("release", "print release name");
po::parsed_options parsed =
po::command_line_parser(argc, argv).options(desc).run();
po::variables_map vm;
po::store(parsed, vm);
po::notify(vm);
if (vm.count("help")) {
std::cout << desc << std::endl;
} else if (vm.count("version")) {
int maj, min, ext;
rados_version(&maj, &min, &ext);
std::cout << maj << "." << min << "." << ext << std::endl;
} else if (vm.count("vernum")) {
std::cout << std::hex << LIBRADOS_VERSION_CODE << std::dec << std::endl;
} else if (vm.count("release")) {
std::cout << CEPH_RELEASE_NAME << ' '
<< '(' << CEPH_RELEASE_TYPE << ')'
<< std::endl;
} else {
std::cerr << argv[0] << ": -h or --help for usage" << std::endl;
return 1;
}
}
| 1,799 | 29 | 76 | cc |
null | ceph-main/src/loadclass.sh | #!/usr/bin/env bash
fname=$1
[ -z "$fname" ] && exit
[ -e $fname ] || { echo "file no found: $fname"; exit; }
name="`nm $fname | grep __cls_name__ | sed 's/.*__cls_name__//g' | head -1`"
[ -z "$name" ] && exit
ver="`nm $fname | grep __cls_ver__ | sed 's/.*__cls_ver__//g' | sed 's/_/\./g' | head -1`"
[ -z "$ver" ] && exit
echo loading $name v$ver
fl=`file $fname`
arch=""
[ `echo "$fl" | grep -c i386` -gt 0 ] && arch="i386"
[ `echo "$fl" | grep -c x86-64` -gt 0 ] && arch="x86-64"
[ -z "$arch" ] && { echo "lib architecture not identified"; exit; }
`dirname $0`/ceph class add $name $ver $arch --in-data=$fname
| 624 | 22.148148 | 90 | sh |
null | ceph-main/src/mrgw.sh | #!/usr/bin/env bash
set -e
rgw_frontend=${RGW_FRONTEND:-"beast"}
script_root=$(dirname "$0")
script_root=$(cd "$script_root" && pwd)
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
if [ -e CMakeCache.txt ]; then
script_root=$PWD
elif [ -e "$script_root"/../${BUILD_DIR}/CMakeCache.txt ]; then
cd "$script_root"/../${BUILD_DIR}
script_root=$PWD
fi
#ceph_bin=$script_root/bin
vstart_path=$(dirname "$0")
[ "$#" -lt 3 ] && echo "usage: $0 <name> <port> <ssl-port> [params...]" && exit 1
name=$1
port=$2
ssl_port=$3
cert_param=""
port_param="port=$port"
if [ "$ssl_port" -gt 0 ]; then
cert_param="ssl_certificate=./cert.pem"
if [ "$rgw_frontend" = "civetweb" ]; then
port_param="port=${port} port=${ssl_port}s"
else
port_param="port=${port} ssl_port=${ssl_port}"
fi
fi
if [ -n "$RGW_FRONTEND_THREADS" ]; then
set_frontend_threads="num_threads=$RGW_FRONTEND_THREADS"
fi
shift 3
run_root=$script_root/run/$name
pidfile=$run_root/out/radosgw.${port}.pid
asokfile=$run_root/out/radosgw.${port}.asok
logfile=$run_root/out/radosgw.${port}.log
"$vstart_path"/mstop.sh "$name" radosgw "$port"
"$vstart_path"/mrun "$name" ceph -c "$run_root"/ceph.conf \
-k "$run_root"/keyring auth get-or-create client.rgw."$port" mon \
'allow rw' osd 'allow rwx' mgr 'allow rw' >> "$run_root"/keyring
"$vstart_path"/mrun "$name" radosgw --rgw-frontends="$rgw_frontend $port_param $set_frontend_threads $cert_param" \
-n client.rgw."$port" --pid-file="$pidfile" \
--admin-socket="$asokfile" "$@" --log-file="$logfile"
| 1,543 | 27.072727 | 115 | sh |
null | ceph-main/src/mstart.sh | #!/bin/sh
usage="usage: $0 <name> [vstart options]..\n"
usage_exit() {
printf "$usage"
exit
}
[ $# -lt 1 ] && usage_exit
instance=$1
shift
vstart_path=`dirname $0`
root_path=`dirname $0`
root_path=`(cd $root_path; pwd)`
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
if [ -e CMakeCache.txt ]; then
root_path=$PWD
elif [ -e $root_path/../${BUILD_DIR}/CMakeCache.txt ]; then
cd $root_path/../${BUILD_DIR}
root_path=$PWD
fi
RUN_ROOT_PATH=${root_path}/run
mkdir -p $RUN_ROOT_PATH
if [ -z "$CLUSTERS_LIST" ]
then
CLUSTERS_LIST=$RUN_ROOT_PATH/.clusters.list
fi
if [ ! -f $CLUSTERS_LIST ]; then
touch $CLUSTERS_LIST
fi
pos=`grep -n -w $instance $CLUSTERS_LIST`
if [ $? -ne 0 ]; then
echo $instance >> $CLUSTERS_LIST
pos=`grep -n -w $instance $CLUSTERS_LIST`
fi
pos=`echo $pos | cut -d: -f1`
base_port=$((6800+pos*20))
rgw_port=$((8000+pos*1))
[ -z "$VSTART_DEST" ] && export VSTART_DEST=$RUN_ROOT_PATH/$instance
[ -z "$CEPH_PORT" ] && export CEPH_PORT=$base_port
[ -z "$CEPH_RGW_PORT" ] && export CEPH_RGW_PORT=$rgw_port
mkdir -p $VSTART_DEST
echo "Cluster dest path: $VSTART_DEST"
echo "monitors base port: $CEPH_PORT"
echo "rgw base port: $CEPH_RGW_PORT"
$vstart_path/vstart.sh "$@"
| 1,208 | 18.190476 | 68 | sh |
null | ceph-main/src/mstop.sh | #!/usr/bin/env bash
set -e
script_root=`dirname $0`
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
if [ -e CMakeCache.txt ]; then
script_root=$PWD
elif [ -e $script_root/../${BUILD_DIR}/CMakeCache.txt ]; then
script_root=`(cd $script_root/../${BUILD_DIR}; pwd)`
fi
[ "$#" -lt 1 ] && echo "usage: $0 <name> [entity [id]]" && exit 1
name=$1
entity=$2
id=$3
run_root=$script_root/run/$name
pidpath=$run_root/out
if [ "$entity" == "" ]; then
pfiles=`ls $pidpath/*.pid` || true
elif [ "$id" == "" ]; then
pfiles=`ls $pidpath/$entity.*.pid` || true
else
pfiles=`ls $pidpath/$entity.$id.pid` || true
fi
MAX_RETRIES=20
for pidfile in $pfiles; do
pid=`cat $pidfile`
fname=`echo $pidfile | sed 's/.*\///g'`
[ "$pid" == "" ] && exit
[ $pid -eq 0 ] && exit
echo pid=$pid
extra_check=""
entity=`echo $fname | sed 's/\..*//g'`
name=`echo $fname | sed 's/\.pid$//g'`
[ "$entity" == "radosgw" ] && extra_check="-e lt-radosgw"
echo entity=$entity pid=$pid name=$name
counter=0
signal=""
while ps -p $pid -o args= | grep -q -e $entity $extracheck ; do
if [[ "$counter" -gt MAX_RETRIES ]]; then
signal="-9"
fi
cmd="kill $signal $pid"
printf "$cmd...\n"
$cmd
sleep 1
counter=$((counter+1))
continue
done
done
| 1,274 | 20.25 | 65 | sh |
null | ceph-main/src/multi-dump.sh | #!/usr/bin/env bash
#
# multi-dump.sh
#
# Dumps interesting information about the Ceph cluster at a series of epochs.
#
### Functions
usage() {
cat <<EOF
multi-dump.sh: dumps out ceph maps
-D Enable diff-mode
-e <start-epoch> What epoch to end with.
-h This help message
-s <start-epoch> What epoch to start with. Defaults to 1.
-t <map-type> What type of map to dump. Defaults to osdmap.
Valid map types are: osdmap,
EOF
}
cleanup() {
[ -n ${TEMPDIR} ] && rm -rf "${TEMPDIR}"
}
die() {
echo $@
exit 1
}
dump_osdmap() {
for v in `seq $START_EPOCH $END_EPOCH`; do
./ceph osd getmap $v -o $TEMPDIR/$v >> $TEMPDIR/cephtool-out \
|| die "cephtool failed to dump epoch $v"
done
if [ $DIFFMODE -eq 1 ]; then
for v in `seq $START_EPOCH $END_EPOCH`; do
./osdmaptool --print $TEMPDIR/$v > $TEMPDIR/$v.out
done
cat $TEMPDIR/$START_EPOCH.out
E=$((END_EPOCH-1))
for v in `seq $START_EPOCH $E`; do
S=$((v+1))
echo "************** $S **************"
diff $TEMPDIR/$v.out $TEMPDIR/$S.out
done
else
for v in `seq $START_EPOCH $END_EPOCH`; do
echo "************** $v **************"
./osdmaptool --print $TEMPDIR/$v \
|| die "osdmaptool failed to print epoch $v"
done
fi
}
### Setup
trap cleanup INT TERM EXIT
TEMPDIR=`mktemp -d`
MYDIR=`dirname $0`
MYDIR=`readlink -f $MYDIR`
MAP_TYPE=osdmap
cd $MYDIR
### Parse arguments
DIFFMODE=0
START_EPOCH=1
END_EPOCH=0
while getopts "De:hs:t:" flag; do
case $flag in
D) DIFFMODE=1;;
e) END_EPOCH=$OPTARG;;
h) usage
exit 0
;;
s) START_EPOCH=$OPTARG;;
t) MAP_TYPE=$OPTARG;;
*) usage
exit 1;;
esac
done
[ $END_EPOCH -eq 0 ] && die "You must supply an end epoch with -e"
### Dump maps
case $MAP_TYPE in
"osdmap") dump_osdmap;;
*) die "sorry, don't know how to handle MAP_TYPE '$MAP_TYPE'"
esac
exit 0
| 2,364 | 23.132653 | 78 | sh |
null | ceph-main/src/perf_histogram.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 OVH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_PERF_HISTOGRAM_H
#define CEPH_COMMON_PERF_HISTOGRAM_H
#include "common/Formatter.h"
#include "include/int_types.h"
#include <array>
#include <atomic>
#include <memory>
#include "include/ceph_assert.h"
class PerfHistogramCommon {
public:
enum scale_type_d : uint8_t {
SCALE_LINEAR = 1,
SCALE_LOG2 = 2,
};
struct axis_config_d {
const char *m_name = nullptr;
scale_type_d m_scale_type = SCALE_LINEAR;
int64_t m_min = 0;
int64_t m_quant_size = 0;
int32_t m_buckets = 0;
axis_config_d() = default;
axis_config_d(const char* name,
scale_type_d scale_type,
int64_t min,
int64_t quant_size,
int32_t buckets)
: m_name(name),
m_scale_type(scale_type),
m_min(min),
m_quant_size(quant_size),
m_buckets(buckets)
{}
};
protected:
/// Dump configuration of one axis to a formatter
static void dump_formatted_axis(ceph::Formatter *f, const axis_config_d &ac);
/// Quantize given value and convert to bucket number on given axis
static int64_t get_bucket_for_axis(int64_t value, const axis_config_d &ac);
/// Calculate inclusive ranges of axis values for each bucket on that axis
static std::vector<std::pair<int64_t, int64_t>> get_axis_bucket_ranges(
const axis_config_d &ac);
};
/// PerfHistogram does trace a histogram of input values. It's an extended
/// version of a standard histogram which does trace characteristics of a single
/// one value only. In this implementation, values can be traced in multiple
/// dimensions - i.e. we can create a histogram of input request size (first
/// dimension) and processing latency (second dimension). Creating standard
/// histogram out of such multidimensional one is trivial and requires summing
/// values across dimensions we're not interested in.
template <int DIM = 2>
class PerfHistogram : public PerfHistogramCommon {
public:
/// Initialize new histogram object
PerfHistogram(std::initializer_list<axis_config_d> axes_config) {
ceph_assert(axes_config.size() == DIM &&
"Invalid number of axis configuration objects");
int i = 0;
for (const auto &ac : axes_config) {
ceph_assertf(ac.m_buckets > 0,
"Must have at least one bucket on axis");
ceph_assertf(ac.m_quant_size > 0,
"Quantization unit must be non-zero positive integer value");
m_axes_config[i++] = ac;
}
m_rawData.reset(new std::atomic<uint64_t>[get_raw_size()]);
}
/// Copy from other histogram object
PerfHistogram(const PerfHistogram &other)
: m_axes_config(other.m_axes_config) {
int64_t size = get_raw_size();
m_rawData.reset(new std::atomic<uint64_t>[size]);
for (int64_t i = 0; i < size; i++) {
m_rawData[i] = other.m_rawData[i];
}
}
/// Set all histogram values to 0
void reset() {
auto size = get_raw_size();
for (auto i = size; --i >= 0;) {
m_rawData[i] = 0;
}
}
/// Increase counter for given axis values by one
template <typename... T>
void inc(T... axis) {
auto index = get_raw_index_for_value(axis...);
m_rawData[index] += 1;
}
/// Increase counter for given axis buckets by one
template <typename... T>
void inc_bucket(T... bucket) {
auto index = get_raw_index_for_bucket(bucket...);
m_rawData[index] += 1;
}
/// Read value from given bucket
template <typename... T>
uint64_t read_bucket(T... bucket) const {
auto index = get_raw_index_for_bucket(bucket...);
return m_rawData[index];
}
/// Dump data to a Formatter object
void dump_formatted(ceph::Formatter *f) const {
// Dump axes configuration
f->open_array_section("axes");
for (auto &ac : m_axes_config) {
dump_formatted_axis(f, ac);
}
f->close_section();
// Dump histogram values
dump_formatted_values(f);
}
protected:
/// Raw data stored as linear space, internal indexes are calculated on
/// demand.
std::unique_ptr<std::atomic<uint64_t>[]> m_rawData;
/// Configuration of axes
std::array<axis_config_d, DIM> m_axes_config;
/// Dump histogram counters to a formatter
void dump_formatted_values(ceph::Formatter *f) const {
visit_values([f](int) { f->open_array_section("values"); },
[f](int64_t value) { f->dump_unsigned("value", value); },
[f](int) { f->close_section(); });
}
/// Get number of all histogram counters
int64_t get_raw_size() {
int64_t ret = 1;
for (const auto &ac : m_axes_config) {
ret *= ac.m_buckets;
}
return ret;
}
/// Calculate m_rawData index from axis values
template <typename... T>
int64_t get_raw_index_for_value(T... axes) const {
static_assert(sizeof...(T) == DIM, "Incorrect number of arguments");
return get_raw_index_internal<0>(get_bucket_for_axis, 0, axes...);
}
/// Calculate m_rawData index from axis bucket numbers
template <typename... T>
int64_t get_raw_index_for_bucket(T... buckets) const {
static_assert(sizeof...(T) == DIM, "Incorrect number of arguments");
return get_raw_index_internal<0>(
[](int64_t bucket, const axis_config_d &ac) {
ceph_assertf(bucket >= 0, "Bucket index can not be negative");
ceph_assertf(bucket < ac.m_buckets, "Bucket index too large");
return bucket;
},
0, buckets...);
}
template <int level = 0, typename F, typename... T>
int64_t get_raw_index_internal(F bucket_evaluator, int64_t startIndex,
int64_t value, T... tail) const {
static_assert(level + 1 + sizeof...(T) == DIM,
"Internal consistency check");
auto &ac = m_axes_config[level];
auto bucket = bucket_evaluator(value, ac);
return get_raw_index_internal<level + 1>(
bucket_evaluator, ac.m_buckets * startIndex + bucket, tail...);
}
template <int level, typename F>
int64_t get_raw_index_internal(F, int64_t startIndex) const {
static_assert(level == DIM, "Internal consistency check");
return startIndex;
}
/// Visit all histogram counters, call onDimensionEnter / onDimensionLeave
/// when starting / finishing traversal
/// on given axis, call onValue when dumping raw histogram counter value.
template <typename FDE, typename FV, typename FDL>
void visit_values(FDE onDimensionEnter, FV onValue, FDL onDimensionLeave,
int level = 0, int startIndex = 0) const {
if (level == DIM) {
onValue(m_rawData[startIndex]);
return;
}
onDimensionEnter(level);
auto &ac = m_axes_config[level];
startIndex *= ac.m_buckets;
for (int32_t i = 0; i < ac.m_buckets; ++i, ++startIndex) {
visit_values(onDimensionEnter, onValue, onDimensionLeave, level + 1,
startIndex);
}
onDimensionLeave(level);
}
};
#endif
| 7,206 | 30.334783 | 80 | h |
null | ceph-main/src/stop.sh | #!/usr/bin/env bash
# -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
# vim: softtabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2013 Inktank <[email protected]>
# Copyright (C) 2013 Cloudwatt <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
test -d dev/osd0/. && test -e dev/sudo && SUDO="sudo"
if [ -e CMakeCache.txt ]; then
[ -z "$CEPH_BIN" ] && CEPH_BIN=bin
fi
if [ -n "$VSTART_DEST" ]; then
CEPH_CONF_PATH=$VSTART_DEST
else
CEPH_CONF_PATH="$PWD"
fi
conf_fn="$CEPH_CONF_PATH/ceph.conf"
if [ -z "$CEPHADM" ]; then
CEPHADM="${CEPH_BIN}/cephadm"
fi
MYUID=$(id -u)
MYNAME=$(id -nu)
do_killall() {
local pname="ceph-run.*$1"
if [ $1 == "ganesha.nfsd" ]; then
pname=$1
fi
pg=`pgrep -u $MYUID -f $pname`
[ -n "$pg" ] && kill $pg
$SUDO killall -u $MYNAME $1
}
maybe_kill() {
local p=$1
shift
local step=$1
shift
case $step in
0)
# killing processes
pkill -SIGTERM -u $MYUID $p
return 1
;;
[1-5])
# wait for processes to stop
if pkill -0 -u $MYUID $p; then
# $p is still alive
return 1
fi
;;
8)
# kill and print if some left
if pkill -0 -u $MYUID $p; then
echo "WARNING: $p did not orderly shutdown, killing it hard!" >&2
pkill -SIGKILL -u $MYUID $p
fi
;;
esac
}
do_killcephadm() {
local FSID=$($CEPH_BIN/ceph -c $conf_fn fsid)
if [ -n "$FSID" ]; then
sudo $CEPHADM rm-cluster --fsid $FSID --force
fi
}
do_umountall() {
#VSTART_IP_PORTS is of the format as below
#"[v[num]:IP:PORT/0,v[num]:IP:PORT/0][v[num]:IP:PORT/0,v[num]:IP:PORT/0]..."
VSTART_IP_PORTS=$("${CEPH_BIN}"/ceph -c $conf_fn mon metadata 2>/dev/null | jq -j '.[].addrs')
#SRC_MNT_ARRAY is of the format as below
#SRC_MNT_ARRAY[0] = IP:PORT,IP:PORT,IP:PORT:/
#SRC_MNT_ARRAY[1] = MNT_POINT1
#SRC_MNT_ARRAY[2] = IP:PORT:/ #Could be mounted using single mon IP
#SRC_MNT_ARRAY[3] = MNT_POINT2
#...
SRC_MNT_ARRAY=($(findmnt -t ceph -n --raw --output=source,target))
LEN_SRC_MNT_ARRAY=${#SRC_MNT_ARRAY[@]}
for (( i=0; i<${LEN_SRC_MNT_ARRAY}; i=$((i+2)) ))
do
# The first IP:PORT among the list is checked against vstart monitor IP:PORTS
IP_PORT1=$(echo ${SRC_MNT_ARRAY[$i]} | awk -F ':/' '{print $1}' | awk -F ',' '{print $1}')
if [[ "$VSTART_IP_PORTS" == *"$IP_PORT1"* ]]
then
CEPH_MNT=${SRC_MNT_ARRAY[$((i+1))]}
[ -n "$CEPH_MNT" ] && sudo umount -f $CEPH_MNT
fi
done
#Get fuse mounts of the cluster
num_of_ceph_mdss=$(ps -e | grep \ ceph-mds$ | wc -l)
if test $num_of_ceph_mdss -ne 0; then
CEPH_FUSE_MNTS=$("${CEPH_BIN}"/ceph -c $conf_fn tell mds.* client ls 2>/dev/null | grep mount_point | tr -d '",' | awk '{print $2}')
[ -n "$CEPH_FUSE_MNTS" ] && sudo umount -f $CEPH_FUSE_MNTS
fi
}
usage="usage: $0 [all] [mon] [mds] [osd] [rgw] [nfs] [--crimson] [--cephadm]\n"
stop_all=1
stop_mon=0
stop_mds=0
stop_osd=0
stop_mgr=0
stop_rgw=0
stop_ganesha=0
ceph_osd=ceph-osd
stop_cephadm=0
while [ $# -ge 1 ]; do
case $1 in
all )
stop_all=1
;;
mon | ceph-mon )
stop_mon=1
stop_all=0
;;
mgr | ceph-mgr )
stop_mgr=1
stop_all=0
;;
mds | ceph-mds )
stop_mds=1
stop_all=0
;;
osd | ceph-osd )
stop_osd=1
stop_all=0
;;
rgw | ceph-rgw )
stop_rgw=1
stop_all=0
;;
nfs | ganesha.nfsd )
stop_ganesha=1
stop_all=0
;;
--crimson)
ceph_osd=crimson-osd
;;
--cephadm)
stop_cephadm=1
stop_all=0
;;
* )
printf "$usage"
exit
esac
shift
done
if [ $stop_all -eq 1 ]; then
if "${CEPH_BIN}"/ceph -s --connect-timeout 1 -c $conf_fn >/dev/null 2>&1; then
# Umount mounted filesystems from vstart cluster
do_umountall
fi
if "${CEPH_BIN}"/rbd device list -c $conf_fn >/dev/null 2>&1; then
"${CEPH_BIN}"/rbd device list -c $conf_fn | tail -n +2 |
while read DEV; do
# While it is currently possible to create an rbd image with
# whitespace chars in its name, krbd will refuse mapping such
# an image, so we can safely split on whitespace here. (The
# same goes for whitespace chars in names of the pools that
# contain rbd images).
DEV="$(echo "${DEV}" | tr -s '[:space:]' | awk '{ print $5 }')"
sudo "${CEPH_BIN}"/rbd device unmap "${DEV}" -c $conf_fn
done
if [ -n "$("${CEPH_BIN}"/rbd device list -c $conf_fn)" ]; then
echo "WARNING: Some rbd images are still mapped!" >&2
fi
fi
daemons="$(sudo $CEPHADM ls 2> /dev/null)"
if [ $? -eq 0 -a "$daemons" != "[]" ]; then
do_killcephadm
fi
# killing processes
to_kill="$ceph_osd ceph-mon ceph-mds ceph-mgr radosgw lt-radosgw apache2 ganesha.nfsd cephfs-top"
since_kill=0
for step in 0 1 1 2 3 5 8; do
sleep $step
since_kill=$((since_kill + step))
survivors=''
for p in $to_kill; do
if ! maybe_kill "$p" $step; then
survivors+=" $p"
fi
done
if [ -z "$survivors" ]; then
break
fi
to_kill=$survivors
if [ $since_kill -gt 0 ]; then
echo "WARNING: $to_kill still alive after $since_kill seconds" >&2
fi
done
pkill -u $MYUID -f valgrind.bin.\*ceph-mon
$SUDO pkill -u $MYUID -f valgrind.bin.\*$ceph_osd
pkill -u $MYUID -f valgrind.bin.\*ceph-mds
asok_dir=`dirname $("${CEPH_BIN}"/ceph-conf -c ${conf_fn} --show-config-value admin_socket)`
rm -rf "${asok_dir}"
else
[ $stop_mon -eq 1 ] && do_killall ceph-mon
[ $stop_mds -eq 1 ] && do_killall ceph-mds
[ $stop_osd -eq 1 ] && do_killall $ceph_osd
[ $stop_mgr -eq 1 ] && do_killall ceph-mgr
[ $stop_ganesha -eq 1 ] && do_killall ganesha.nfsd
[ $stop_rgw -eq 1 ] && do_killall radosgw lt-radosgw apache2
[ $stop_cephadm -eq 1 ] && do_killcephadm
fi
| 6,966 | 28.396624 | 140 | sh |
null | ceph-main/src/vnewosd.sh | #!/bin/bash -ex
OSD_SECRET=`bin/ceph-authtool --gen-print-key`
echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > /tmp/$$
OSD_UUID=`uuidgen`
OSD_ID=`bin/ceph osd new $OSD_UUID -i /tmp/$$`
rm /tmp/$$
rm dev/osd$OSD_ID/* || true
mkdir -p dev/osd$OSD_ID
bin/ceph-osd -i $OSD_ID --mkfs --key $OSD_SECRET --osd-uuid $OSD_UUID
echo "[osd.$OSD_ID]
key = $OSD_SECRET" > dev/osd$OSD_ID/keyring
H=`hostname`
echo "[osd.$OSD_ID]
host = $H" >> ceph.conf
| 437 | 26.375 | 69 | sh |
null | ceph-main/src/vstart.sh | #!/usr/bin/env bash
# -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
# vim: softtabstop=4 shiftwidth=4 expandtab
# abort on failure
set -e
quoted_print() {
for s in "$@"; do
if [[ "$s" =~ \ ]]; then
printf -- "'%s' " "$s"
else
printf -- "$s "
fi
done
printf '\n'
}
debug() {
"$@" >&2
}
prunb() {
debug quoted_print "$@" '&'
PATH=$CEPH_BIN:$PATH "$@" &
}
prun() {
debug quoted_print "$@"
PATH=$CEPH_BIN:$PATH "$@"
}
if [ -n "$VSTART_DEST" ]; then
SRC_PATH=`dirname $0`
SRC_PATH=`(cd $SRC_PATH; pwd)`
CEPH_DIR=$SRC_PATH
CEPH_BIN=${CEPH_BIN:-${PWD}/bin}
CEPH_LIB=${CEPH_LIB:-${PWD}/lib}
CEPH_CONF_PATH=$VSTART_DEST
CEPH_DEV_DIR=$VSTART_DEST/dev
CEPH_OUT_DIR=$VSTART_DEST/out
CEPH_ASOK_DIR=$VSTART_DEST/asok
CEPH_OUT_CLIENT_DIR=${CEPH_OUT_CLIENT_DIR:-$CEPH_OUT_DIR}
fi
get_cmake_variable() {
local variable=$1
grep "${variable}:" CMakeCache.txt | cut -d "=" -f 2
}
# for running out of the CMake build directory
if [ -e CMakeCache.txt ]; then
# Out of tree build, learn source location from CMakeCache.txt
CEPH_ROOT=$(get_cmake_variable ceph_SOURCE_DIR)
CEPH_BUILD_DIR=`pwd`
[ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
fi
# use CEPH_BUILD_ROOT to vstart from a 'make install'
if [ -n "$CEPH_BUILD_ROOT" ]; then
[ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
[ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
[ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_ROOT/external/lib
[ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
[ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
# make install should install python extensions into PYTHONPATH
elif [ -n "$CEPH_ROOT" ]; then
[ -z "$CEPHFS_SHELL" ] && CEPHFS_SHELL=$CEPH_ROOT/src/tools/cephfs/shell/cephfs-shell
[ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
[ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
[ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
[ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
[ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
[ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_DIR/external/lib
[ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
[ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
[ -z "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON=$CEPH_ROOT/src/python-common
fi
if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
PATH=$(pwd):$PATH
fi
[ -z "$PYBIND" ] && PYBIND=./pybind
[ -n "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON="$CEPH_PYTHON_COMMON:"
CYTHON_PYTHONPATH="$CEPH_LIB/cython_modules/lib.3"
export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON$PYTHONPATH
export LD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$LD_LIBRARY_PATH
export DYLD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$DYLD_LIBRARY_PATH
# Suppress logging for regular use that indicated that we are using a
# development version. vstart.sh is only used during testing and
# development
export CEPH_DEV=1
[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
[ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM="$NFS"
# if none of the CEPH_NUM_* number is specified, kill the existing
# cluster.
if [ -z "$CEPH_NUM_MON" -a \
-z "$CEPH_NUM_OSD" -a \
-z "$CEPH_NUM_MDS" -a \
-z "$CEPH_NUM_MGR" -a \
-z "$GANESHA_DAEMON_NUM" ]; then
kill_all=1
else
kill_all=0
fi
[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
[ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
[ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM=0
[ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
[ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
[ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
[ -z "$CEPH_ASOK_DIR" ] && CEPH_ASOK_DIR="$CEPH_DIR/asok"
[ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
[ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
CEPH_OUT_CLIENT_DIR=${CEPH_OUT_CLIENT_DIR:-$CEPH_OUT_DIR}
if [ $CEPH_NUM_OSD -gt 3 ]; then
OSD_POOL_DEFAULT_SIZE=3
else
OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
fi
extra_conf=""
new=0
standby=0
debug=0
trace=0
ip=""
nodaemon=0
redirect=0
smallmds=0
short=0
crimson=0
ec=0
cephadm=0
parallel=true
restart=1
hitset=""
overwrite_conf=0
cephx=1 #turn cephx on by default
gssapi_authx=0
cache=""
if [ `uname` = FreeBSD ]; then
objectstore="memstore"
else
objectstore="bluestore"
fi
ceph_osd=ceph-osd
rgw_frontend="beast"
rgw_compression=""
lockdep=${LOCKDEP:-1}
spdk_enabled=0 # disable SPDK by default
pmem_enabled=0
zoned_enabled=0
io_uring_enabled=0
with_jaeger=0
with_mgr_dashboard=true
if [[ "$(get_cmake_variable WITH_MGR_DASHBOARD_FRONTEND)" != "ON" ]] ||
[[ "$(get_cmake_variable WITH_RBD)" != "ON" ]]; then
debug echo "ceph-mgr dashboard not built - disabling."
with_mgr_dashboard=false
fi
with_mgr_restful=false
kstore_path=
declare -a block_devs
declare -a bluestore_db_devs
declare -a bluestore_wal_devs
declare -a secondary_block_devs
secondary_block_devs_type="SSD"
VSTART_SEC="client.vstart.sh"
MON_ADDR=""
DASH_URLS=""
RESTFUL_URLS=""
conf_fn="$CEPH_CONF_PATH/ceph.conf"
keyring_fn="$CEPH_CONF_PATH/keyring"
monmap_fn="/tmp/ceph_monmap.$$"
inc_osd_num=0
msgr="21"
read -r -d '' usage <<EOF || true
usage: $0 [option]... \nex: MON=3 OSD=1 MDS=1 MGR=1 RGW=1 NFS=1 $0 -n -d
options:
-d, --debug
-t, --trace
-s, --standby_mds: Generate standby-replay MDS for each active
-l, --localhost: use localhost instead of hostname
-i <ip>: bind to specific ip
-n, --new
--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'
--nodaemon: use ceph-run as wrapper for mon/osd/mds
--redirect-output: only useful with nodaemon, directs output to log file
--smallmds: limit mds cache memory limit
-m ip:port specify monitor address
-k keep old configuration files (default)
-x enable cephx (on by default)
-X disable cephx
-g --gssapi enable Kerberos/GSSApi authentication
-G disable Kerberos/GSSApi authentication
--hitset <pool> <hit_set_type>: enable hitset tracking
-e : create an erasure pool
-o config add extra config parameters to all sections
--rgw_port specify ceph rgw http listen port
--rgw_frontend specify the rgw frontend configuration
--rgw_arrow_flight start arrow flight frontend
--rgw_compression specify the rgw compression plugin
--seastore use seastore as crimson osd backend
-b, --bluestore use bluestore as the osd objectstore backend (default)
-K, --kstore use kstore as the osd objectstore backend
--cyanstore use cyanstore as the osd objectstore backend
--memstore use memstore as the osd objectstore backend
--cache <pool>: enable cache tiering on pool
--short: short object names only; necessary for ext4 dev
--nolockdep disable lockdep
--multimds <count> allow multimds with maximum active count
--without-dashboard: do not run using mgr dashboard
--bluestore-spdk: enable SPDK and with a comma-delimited list of PCI-IDs of NVME device (e.g, 0000:81:00.0)
--bluestore-pmem: enable PMEM and with path to a file mapped to PMEM
--msgr1: use msgr1 only
--msgr2: use msgr2 only
--msgr21: use msgr2 and msgr1
--crimson: use crimson-osd instead of ceph-osd
--crimson-foreground: use crimson-osd, but run it in the foreground
--osd-args: specify any extra osd specific options
--bluestore-devs: comma-separated list of blockdevs to use for bluestore
--bluestore-db-devs: comma-separated list of db-devs to use for bluestore
--bluestore-wal-devs: comma-separated list of wal-devs to use for bluestore
--bluestore-zoned: blockdevs listed by --bluestore-devs are zoned devices (HM-SMR HDD or ZNS SSD)
--bluestore-io-uring: enable io_uring backend
--inc-osd: append some more osds into existing vcluster
--cephadm: enable cephadm orchestrator with ~/.ssh/id_rsa[.pub]
--no-parallel: dont start all OSDs in parallel
--no-restart: dont restart process when using ceph-run
--jaeger: use jaegertracing for tracing
--seastore-devs: comma-separated list of blockdevs to use for seastore
--seastore-secondary-devs: comma-separated list of secondary blockdevs to use for seastore
--seastore-secondary-devs-type: device type of all secondary blockdevs. HDD, SSD(default), ZNS or RANDOM_BLOCK_SSD
--crimson-smp: number of cores to use for crimson
\n
EOF
usage_exit() {
printf "$usage"
exit
}
parse_block_devs() {
local opt_name=$1
shift
local devs=$1
shift
local dev
IFS=',' read -r -a block_devs <<< "$devs"
for dev in "${block_devs[@]}"; do
if [ ! -b $dev ] || [ ! -w $dev ]; then
echo "All $opt_name must refer to writable block devices"
exit 1
fi
done
}
parse_bluestore_db_devs() {
local opt_name=$1
shift
local devs=$1
shift
local dev
IFS=',' read -r -a bluestore_db_devs <<< "$devs"
for dev in "${bluestore_db_devs[@]}"; do
if [ ! -b $dev ] || [ ! -w $dev ]; then
echo "All $opt_name must refer to writable block devices"
exit 1
fi
done
}
parse_bluestore_wal_devs() {
local opt_name=$1
shift
local devs=$1
shift
local dev
IFS=',' read -r -a bluestore_wal_devs <<< "$devs"
for dev in "${bluestore_wal_devs[@]}"; do
if [ ! -b $dev ] || [ ! -w $dev ]; then
echo "All $opt_name must refer to writable block devices"
exit 1
fi
done
}
parse_secondary_devs() {
local opt_name=$1
shift
local devs=$1
shift
local dev
IFS=',' read -r -a secondary_block_devs <<< "$devs"
for dev in "${secondary_block_devs[@]}"; do
if [ ! -b $dev ] || [ ! -w $dev ]; then
echo "All $opt_name must refer to writable block devices"
exit 1
fi
done
}
crimson_smp=1
while [ $# -ge 1 ]; do
case $1 in
-d | --debug)
debug=1
;;
-t | --trace)
trace=1
;;
-s | --standby_mds)
standby=1
;;
-l | --localhost)
ip="127.0.0.1"
;;
-i)
[ -z "$2" ] && usage_exit
ip="$2"
shift
;;
-e)
ec=1
;;
--new | -n)
new=1
;;
--inc-osd)
new=0
kill_all=0
inc_osd_num=$2
if [ "$inc_osd_num" == "" ]; then
inc_osd_num=1
else
shift
fi
;;
--short)
short=1
;;
--crimson)
crimson=1
ceph_osd=crimson-osd
nodaemon=1
msgr=2
;;
--crimson-foreground)
crimson=1
ceph_osd=crimson-osd
nodaemon=0
msgr=2
;;
--osd-args)
extra_osd_args="$2"
shift
;;
--msgr1)
msgr="1"
;;
--msgr2)
msgr="2"
;;
--msgr21)
msgr="21"
;;
--cephadm)
cephadm=1
;;
--no-parallel)
parallel=false
;;
--no-restart)
restart=0
;;
--valgrind)
[ -z "$2" ] && usage_exit
valgrind=$2
shift
;;
--valgrind_args)
valgrind_args="$2"
shift
;;
--valgrind_mds)
[ -z "$2" ] && usage_exit
valgrind_mds=$2
shift
;;
--valgrind_osd)
[ -z "$2" ] && usage_exit
valgrind_osd=$2
shift
;;
--valgrind_mon)
[ -z "$2" ] && usage_exit
valgrind_mon=$2
shift
;;
--valgrind_mgr)
[ -z "$2" ] && usage_exit
valgrind_mgr=$2
shift
;;
--valgrind_rgw)
[ -z "$2" ] && usage_exit
valgrind_rgw=$2
shift
;;
--nodaemon)
nodaemon=1
;;
--redirect-output)
redirect=1
;;
--smallmds)
smallmds=1
;;
--rgw_port)
CEPH_RGW_PORT=$2
shift
;;
--rgw_frontend)
rgw_frontend=$2
shift
;;
--rgw_arrow_flight)
rgw_flight_frontend="yes"
;;
--rgw_compression)
rgw_compression=$2
shift
;;
--kstore_path)
kstore_path=$2
shift
;;
-m)
[ -z "$2" ] && usage_exit
MON_ADDR=$2
shift
;;
-x)
cephx=1 # this is on be default, flag exists for historical consistency
;;
-X)
cephx=0
;;
-g | --gssapi)
gssapi_authx=1
;;
-G)
gssapi_authx=0
;;
-k)
if [ ! -r $conf_fn ]; then
echo "cannot use old configuration: $conf_fn not readable." >&2
exit
fi
new=0
;;
--memstore)
objectstore="memstore"
;;
--cyanstore)
objectstore="cyanstore"
;;
--seastore)
objectstore="seastore"
;;
-b | --bluestore)
objectstore="bluestore"
;;
-K | --kstore)
objectstore="kstore"
;;
--hitset)
hitset="$hitset $2 $3"
shift
shift
;;
-o)
extra_conf+=$'\n'"$2"
shift
;;
--cache)
if [ -z "$cache" ]; then
cache="$2"
else
cache="$cache $2"
fi
shift
;;
--nolockdep)
lockdep=0
;;
--multimds)
CEPH_MAX_MDS="$2"
shift
;;
--without-dashboard)
with_mgr_dashboard=false
;;
--with-restful)
with_mgr_restful=true
;;
--seastore-devs)
parse_block_devs --seastore-devs "$2"
shift
;;
--seastore-secondary-devs)
parse_secondary_devs --seastore-devs "$2"
shift
;;
--seastore-secondary-devs-type)
secondary_block_devs_type="$2"
shift
;;
--crimson-smp)
crimson_smp=$2
shift
;;
--bluestore-spdk)
[ -z "$2" ] && usage_exit
IFS=',' read -r -a bluestore_spdk_dev <<< "$2"
spdk_enabled=1
shift
;;
--bluestore-pmem)
[ -z "$2" ] && usage_exit
bluestore_pmem_file="$2"
pmem_enabled=1
shift
;;
--bluestore-devs)
parse_block_devs --bluestore-devs "$2"
shift
;;
--bluestore-db-devs)
parse_bluestore_db_devs --bluestore-db-devs "$2"
shift
;;
--bluestore-wal-devs)
parse_bluestore_wal_devs --bluestore-wal-devs "$2"
shift
;;
--bluestore-zoned)
zoned_enabled=1
;;
--bluestore-io-uring)
io_uring_enabled=1
shift
;;
--jaeger)
with_jaeger=1
echo "with_jaeger $with_jaeger"
;;
*)
usage_exit
esac
shift
done
if [ $kill_all -eq 1 ]; then
$SUDO $INIT_CEPH stop
fi
if [ "$new" -eq 0 ]; then
if [ -z "$CEPH_ASOK_DIR" ]; then
CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
fi
mkdir -p $CEPH_ASOK_DIR
MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mon 2>/dev/null` && \
CEPH_NUM_MON="$MON"
OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_osd 2>/dev/null` && \
CEPH_NUM_OSD="$OSD"
MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mds 2>/dev/null` && \
CEPH_NUM_MDS="$MDS"
MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mgr 2>/dev/null` && \
CEPH_NUM_MGR="$MGR"
RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_rgw 2>/dev/null` && \
CEPH_NUM_RGW="$RGW"
NFS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_ganesha 2>/dev/null` && \
GANESHA_DAEMON_NUM="$NFS"
else
# only delete if -n
if [ -e "$conf_fn" ]; then
asok_dir=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
rm -- "$conf_fn"
if [ $asok_dir != /var/run/ceph ]; then
[ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
fi
fi
if [ -z "$CEPH_ASOK_DIR" ]; then
CEPH_ASOK_DIR=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
fi
fi
ARGS="-c $conf_fn"
run() {
type=$1
shift
num=$1
shift
eval "valg=\$valgrind_$type"
[ -z "$valg" ] && valg="$valgrind"
if [ -n "$valg" ]; then
prunb valgrind --tool="$valg" $valgrind_args "$@" -f
sleep 1
else
if [ "$nodaemon" -eq 0 ]; then
prun "$@"
else
if [ "$restart" -eq 0 ]; then
set -- '--no-restart' "$@"
fi
if [ "$redirect" -eq 0 ]; then
prunb ${CEPH_ROOT}/src/ceph-run "$@" -f
else
( prunb ${CEPH_ROOT}/src/ceph-run "$@" -f ) >$CEPH_OUT_DIR/$type.$num.stdout 2>&1
fi
fi
fi
}
wconf() {
if [ "$new" -eq 1 -o "$overwrite_conf" -eq 1 ]; then
cat >> "$conf_fn"
fi
}
do_rgw_conf() {
if [ $CEPH_NUM_RGW -eq 0 ]; then
return 0
fi
# setup each rgw on a sequential port, starting at $CEPH_RGW_PORT.
# individual rgw's ids will be their ports.
current_port=$CEPH_RGW_PORT
# allow only first rgw to start arrow_flight server/port
local flight_conf=$rgw_flight_frontend
for n in $(seq 1 $CEPH_NUM_RGW); do
wconf << EOF
[client.rgw.${current_port}]
rgw frontends = $rgw_frontend port=${current_port}${flight_conf:+,arrow_flight}
admin socket = ${CEPH_OUT_DIR}/radosgw.${current_port}.asok
debug rgw_flight = 20
EOF
current_port=$((current_port + 1))
unset flight_conf
done
}
format_conf() {
local opts=$1
local indent=" "
local opt
local formatted
while read -r opt; do
if [ -z "$formatted" ]; then
formatted="${opt}"
else
formatted+=$'\n'${indent}${opt}
fi
done <<< "$opts"
echo "$formatted"
}
prepare_conf() {
local DAEMONOPTS="
log file = $CEPH_OUT_DIR/\$name.log
admin socket = $CEPH_ASOK_DIR/\$name.asok
chdir = \"\"
pid file = $CEPH_OUT_DIR/\$name.pid
heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
"
local mgr_modules="iostat nfs"
if $with_mgr_dashboard; then
mgr_modules+=" dashboard"
fi
if $with_mgr_restful; then
mgr_modules+=" restful"
fi
local msgr_conf=''
if [ $msgr -eq 21 ]; then
msgr_conf="ms bind msgr2 = true
ms bind msgr1 = true"
fi
if [ $msgr -eq 2 ]; then
msgr_conf="ms bind msgr2 = true
ms bind msgr1 = false"
fi
if [ $msgr -eq 1 ]; then
msgr_conf="ms bind msgr2 = false
ms bind msgr1 = true"
fi
wconf <<EOF
; generated by vstart.sh on `date`
[$VSTART_SEC]
num mon = $CEPH_NUM_MON
num osd = $CEPH_NUM_OSD
num mds = $CEPH_NUM_MDS
num mgr = $CEPH_NUM_MGR
num rgw = $CEPH_NUM_RGW
num ganesha = $GANESHA_DAEMON_NUM
[global]
fsid = $(uuidgen)
osd failsafe full ratio = .99
mon osd full ratio = .99
mon osd nearfull ratio = .99
mon osd backfillfull ratio = .99
mon_max_pg_per_osd = ${MON_MAX_PG_PER_OSD:-1000}
erasure code dir = $EC_PATH
plugin dir = $CEPH_LIB
run dir = $CEPH_OUT_DIR
crash dir = $CEPH_OUT_DIR
enable experimental unrecoverable data corrupting features = *
osd_crush_chooseleaf_type = 0
debug asok assert abort = true
$(format_conf "${msgr_conf}")
$(format_conf "${extra_conf}")
$AUTOSCALER_OPTS
EOF
if [ "$with_jaeger" -eq 1 ] ; then
wconf <<EOF
jaeger_agent_port = 6831
EOF
fi
if [ "$lockdep" -eq 1 ] ; then
wconf <<EOF
lockdep = true
EOF
fi
if [ "$cephx" -eq 1 ] ; then
wconf <<EOF
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
EOF
elif [ "$gssapi_authx" -eq 1 ] ; then
wconf <<EOF
auth cluster required = gss
auth service required = gss
auth client required = gss
gss ktab client file = $CEPH_DEV_DIR/gss_\$name.keytab
EOF
else
wconf <<EOF
auth cluster required = none
auth service required = none
auth client required = none
ms mon client mode = crc
EOF
fi
if [ "$short" -eq 1 ]; then
COSDSHORT=" osd max object name len = 460
osd max object namespace len = 64"
fi
if [ "$objectstore" == "bluestore" ]; then
if [ "$spdk_enabled" -eq 1 ] || [ "$pmem_enabled" -eq 1 ]; then
BLUESTORE_OPTS=" bluestore_block_db_path = \"\"
bluestore_block_db_size = 0
bluestore_block_db_create = false
bluestore_block_wal_path = \"\"
bluestore_block_wal_size = 0
bluestore_block_wal_create = false
bluestore_spdk_mem = 2048"
else
BLUESTORE_OPTS=" bluestore block db path = $CEPH_DEV_DIR/osd\$id/block.db.file
bluestore block db size = 1073741824
bluestore block db create = true
bluestore block wal path = $CEPH_DEV_DIR/osd\$id/block.wal.file
bluestore block wal size = 1048576000
bluestore block wal create = true"
if [ ${#block_devs[@]} -gt 0 ] || \
[ ${#bluestore_db_devs[@]} -gt 0 ] || \
[ ${#bluestore_wal_devs[@]} -gt 0 ]; then
# when use physical disk, not create file for db/wal
BLUESTORE_OPTS=""
fi
fi
if [ "$zoned_enabled" -eq 1 ]; then
BLUESTORE_OPTS+="
bluestore min alloc size = 65536
bluestore prefer deferred size = 0
bluestore prefer deferred size hdd = 0
bluestore prefer deferred size ssd = 0
bluestore allocator = zoned"
fi
if [ "$io_uring_enabled" -eq 1 ]; then
BLUESTORE_OPTS+="
bdev ioring = true"
fi
fi
wconf <<EOF
[client]
$CCLIENTDEBUG
keyring = $keyring_fn
log file = $CEPH_OUT_CLIENT_DIR/\$name.\$pid.log
admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
; needed for s3tests
rgw crypt s3 kms backend = testing
rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
rgw crypt require ssl = false
; uncomment the following to set LC days as the value in seconds;
; needed for passing lc time based s3-tests (can be verbose)
; rgw lc debug interval = 10
$(format_conf "${extra_conf}")
EOF
do_rgw_conf
wconf << EOF
[mds]
$CMDSDEBUG
$DAEMONOPTS
mds data = $CEPH_DEV_DIR/mds.\$id
mds root ino uid = `id -u`
mds root ino gid = `id -g`
$(format_conf "${extra_conf}")
[mgr]
mgr disabled modules = rook
mgr data = $CEPH_DEV_DIR/mgr.\$id
mgr module path = $MGR_PYTHON_PATH
cephadm path = $CEPH_BIN/cephadm
$DAEMONOPTS
$(format_conf "${extra_conf}")
[osd]
$DAEMONOPTS
osd_check_max_object_name_len_on_startup = false
osd data = $CEPH_DEV_DIR/osd\$id
osd journal = $CEPH_DEV_DIR/osd\$id/journal
osd journal size = 100
osd class tmp = out
osd class dir = $OBJCLASS_PATH
osd class load list = *
osd class default list = *
osd fast shutdown = false
bluestore fsck on mount = true
bluestore block create = true
$BLUESTORE_OPTS
; kstore
kstore fsck on mount = true
osd objectstore = $objectstore
$COSDSHORT
$(format_conf "${extra_conf}")
[mon]
mon_data_avail_crit = 1
mgr initial modules = $mgr_modules
$DAEMONOPTS
$CMONDEBUG
$(format_conf "${extra_conf}")
mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
auth allow insecure global id reclaim = false
EOF
if [ "$crimson" -eq 1 ]; then
wconf <<EOF
osd pool default crimson = true
EOF
fi
}
write_logrotate_conf() {
out_dir=$(pwd)"/out/*.log"
cat << EOF
$out_dir
{
rotate 5
size 1G
copytruncate
compress
notifempty
missingok
sharedscripts
postrotate
# NOTE: assuring that the absence of one of the following processes
# won't abort the logrotate command.
killall -u $USER -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror || echo ""
endscript
}
EOF
}
init_logrotate() {
logrotate_conf_path=$(pwd)"/logrotate.conf"
logrotate_state_path=$(pwd)"/logrotate.state"
if ! test -a $logrotate_conf_path; then
if test -a $logrotate_state_path; then
rm -f $logrotate_state_path
fi
write_logrotate_conf > $logrotate_conf_path
fi
}
start_mon() {
local MONS=""
local count=0
for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
do
[ $count -eq $CEPH_NUM_MON ] && break;
count=$(($count + 1))
if [ -z "$MONS" ]; then
MONS="$f"
else
MONS="$MONS $f"
fi
done
if [ "$new" -eq 1 ]; then
if [ `echo $IP | grep '^127\\.'` ]; then
echo
echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
echo " connect. either adjust /etc/hosts, or edit this script to use your"
echo " machine's real IP."
echo
fi
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin \
--cap mon 'allow *' \
--cap osd 'allow *' \
--cap mds 'allow *' \
--cap mgr 'allow *' \
"$keyring_fn"
# build a fresh fs monmap, mon fs
local params=()
local count=0
local mon_host=""
for f in $MONS
do
if [ $msgr -eq 1 ]; then
A="v1:$IP:$(($CEPH_PORT+$count+1))"
fi
if [ $msgr -eq 2 ]; then
A="v2:$IP:$(($CEPH_PORT+$count+1))"
fi
if [ $msgr -eq 21 ]; then
A="[v2:$IP:$(($CEPH_PORT+$count)),v1:$IP:$(($CEPH_PORT+$count+1))]"
fi
params+=("--addv" "$f" "$A")
mon_host="$mon_host $A"
wconf <<EOF
[mon.$f]
host = $HOSTNAME
mon data = $CEPH_DEV_DIR/mon.$f
EOF
count=$(($count + 2))
done
wconf <<EOF
[global]
mon host = $mon_host
EOF
prun "$CEPH_BIN/monmaptool" --create --clobber "${params[@]}" --print "$monmap_fn"
for f in $MONS
do
prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
done
prun rm -- "$monmap_fn"
fi
# start monitors
for f in $MONS
do
run 'mon' $f $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
done
if [ "$crimson" -eq 1 ]; then
$CEPH_BIN/ceph osd set-allow-crimson --yes-i-really-mean-it
fi
}
start_osd() {
if [ $inc_osd_num -gt 0 ]; then
old_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
start=$old_maxosd
end=$(($start-1+$inc_osd_num))
overwrite_conf=1 # fake wconf
else
start=0
end=$(($CEPH_NUM_OSD-1))
fi
local osds_wait
for osd in `seq $start $end`
do
local extra_seastar_args
if [ "$ceph_osd" == "crimson-osd" ]; then
bottom_cpu=$(( osd * crimson_smp ))
top_cpu=$(( bottom_cpu + crimson_smp - 1 ))
# set a single CPU nodes for each osd
extra_seastar_args="--cpuset $bottom_cpu-$top_cpu"
if [ "$debug" -ne 0 ]; then
extra_seastar_args+=" --debug"
fi
if [ "$trace" -ne 0 ]; then
extra_seastar_args+=" --trace"
fi
fi
if [ "$new" -eq 1 -o $inc_osd_num -gt 0 ]; then
wconf <<EOF
[osd.$osd]
host = $HOSTNAME
EOF
if [ "$spdk_enabled" -eq 1 ]; then
wconf <<EOF
bluestore_block_path = spdk:${bluestore_spdk_dev[$osd]}
EOF
elif [ "$pmem_enabled" -eq 1 ]; then
wconf <<EOF
bluestore_block_path = ${bluestore_pmem_file}
EOF
fi
rm -rf $CEPH_DEV_DIR/osd$osd || true
if command -v btrfs > /dev/null; then
for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
fi
if [ -n "$kstore_path" ]; then
ln -s $kstore_path $CEPH_DEV_DIR/osd$osd
else
mkdir -p $CEPH_DEV_DIR/osd$osd
if [ -n "${block_devs[$osd]}" ]; then
dd if=/dev/zero of=${block_devs[$osd]} bs=1M count=1
ln -s ${block_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block
fi
if [ -n "${bluestore_db_devs[$osd]}" ]; then
dd if=/dev/zero of=${bluestore_db_devs[$osd]} bs=1M count=1
ln -s ${bluestore_db_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block.db
fi
if [ -n "${bluestore_wal_devs[$osd]}" ]; then
dd if=/dev/zero of=${bluestore_wal_devs[$osd]} bs=1M count=1
ln -s ${bluestore_wal_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block.wal
fi
if [ -n "${secondary_block_devs[$osd]}" ]; then
dd if=/dev/zero of=${secondary_block_devs[$osd]} bs=1M count=1
mkdir -p $CEPH_DEV_DIR/osd$osd/block.${secondary_block_devs_type}.1
ln -s ${secondary_block_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block.${secondary_block_devs_type}.1/block
fi
fi
if [ "$objectstore" == "bluestore" ]; then
wconf <<EOF
bluestore fsck on mount = false
EOF
fi
local uuid=`uuidgen`
echo "add osd$osd $uuid"
OSD_SECRET=$($CEPH_BIN/ceph-authtool --gen-print-key)
echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $CEPH_DEV_DIR/osd$osd/new.json
ceph_adm osd new $uuid -i $CEPH_DEV_DIR/osd$osd/new.json
rm $CEPH_DEV_DIR/osd$osd/new.json
prun $SUDO $CEPH_BIN/$ceph_osd $extra_osd_args -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid $extra_seastar_args \
2>&1 | tee $CEPH_OUT_DIR/osd-mkfs.$osd.log
local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
cat > $key_fn<<EOF
[osd.$osd]
key = $OSD_SECRET
EOF
fi
echo start osd.$osd
local osd_pid
echo 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
$extra_seastar_args $extra_osd_args \
-i $osd $ARGS $COSD_ARGS
run 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
$extra_seastar_args $extra_osd_args \
-i $osd $ARGS $COSD_ARGS &
osd_pid=$!
if $parallel; then
osds_wait=$osd_pid
else
wait $osd_pid
fi
done
if $parallel; then
for p in $osds_wait; do
wait $p
done
debug echo OSDs started
fi
if [ $inc_osd_num -gt 0 ]; then
# update num osd
new_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
sed -i "s/num osd = .*/num osd = $new_maxosd/g" $conf_fn
fi
}
create_mgr_restful_secret() {
while ! ceph_adm -h | grep -c -q ^restful ; do
debug echo 'waiting for mgr restful module to start'
sleep 1
done
local secret_file
if ceph_adm restful create-self-signed-cert > /dev/null; then
secret_file=`mktemp`
ceph_adm restful create-key admin -o $secret_file
RESTFUL_SECRET=`cat $secret_file`
rm $secret_file
else
debug echo MGR Restful is not working, perhaps the package is not installed?
fi
}
start_mgr() {
local mgr=0
local ssl=${DASHBOARD_SSL:-1}
# avoid monitors on nearby ports (which test/*.sh use extensively)
MGR_PORT=$(($CEPH_PORT + 1000))
PROMETHEUS_PORT=9283
for name in x y z a b c d e f g h i j k l m n o p
do
[ $mgr -eq $CEPH_NUM_MGR ] && break
mgr=$(($mgr + 1))
if [ "$new" -eq 1 ]; then
mkdir -p $CEPH_DEV_DIR/mgr.$name
key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
$SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
wconf <<EOF
[mgr.$name]
host = $HOSTNAME
EOF
if $with_mgr_dashboard ; then
local port_option="ssl_server_port"
local http_proto="https"
if [ "$ssl" == "0" ]; then
port_option="server_port"
http_proto="http"
ceph_adm config set mgr mgr/dashboard/ssl false --force
fi
ceph_adm config set mgr mgr/dashboard/$name/$port_option $MGR_PORT --force
if [ $mgr -eq 1 ]; then
DASH_URLS="$http_proto://$IP:$MGR_PORT"
else
DASH_URLS+=", $http_proto://$IP:$MGR_PORT"
fi
fi
MGR_PORT=$(($MGR_PORT + 1000))
ceph_adm config set mgr mgr/prometheus/$name/server_port $PROMETHEUS_PORT --force
PROMETHEUS_PORT=$(($PROMETHEUS_PORT + 1000))
ceph_adm config set mgr mgr/restful/$name/server_port $MGR_PORT --force
if [ $mgr -eq 1 ]; then
RESTFUL_URLS="https://$IP:$MGR_PORT"
else
RESTFUL_URLS+=", https://$IP:$MGR_PORT"
fi
MGR_PORT=$(($MGR_PORT + 1000))
fi
debug echo "Starting mgr.${name}"
run 'mgr' $name $CEPH_BIN/ceph-mgr -i $name $ARGS
done
while ! ceph_adm mgr stat | jq -e '.available'; do
debug echo 'waiting for mgr to become available'
sleep 1
done
if [ "$new" -eq 1 ]; then
# setting login credentials for dashboard
if $with_mgr_dashboard; then
while ! ceph_adm -h | grep -c -q ^dashboard ; do
debug echo 'waiting for mgr dashboard module to start'
sleep 1
done
DASHBOARD_ADMIN_SECRET_FILE="${CEPH_CONF_PATH}/dashboard-admin-secret.txt"
printf 'admin' > "${DASHBOARD_ADMIN_SECRET_FILE}"
ceph_adm dashboard ac-user-create admin -i "${DASHBOARD_ADMIN_SECRET_FILE}" \
administrator --force-password
if [ "$ssl" != "0" ]; then
if ! ceph_adm dashboard create-self-signed-cert; then
debug echo dashboard module not working correctly!
fi
fi
fi
if $with_mgr_restful; then
create_mgr_restful_secret
fi
fi
if [ "$cephadm" -eq 1 ]; then
debug echo Enabling cephadm orchestrator
if [ "$new" -eq 1 ]; then
digest=$(curl -s \
https://hub.docker.com/v2/repositories/ceph/daemon-base/tags/latest-master-devel \
| jq -r '.images[0].digest')
ceph_adm config set global container_image "docker.io/ceph/daemon-base@$digest"
fi
ceph_adm config-key set mgr/cephadm/ssh_identity_key -i ~/.ssh/id_rsa
ceph_adm config-key set mgr/cephadm/ssh_identity_pub -i ~/.ssh/id_rsa.pub
ceph_adm mgr module enable cephadm
ceph_adm orch set backend cephadm
ceph_adm orch host add "$(hostname)"
ceph_adm orch apply crash '*'
ceph_adm config set mgr mgr/cephadm/allow_ptrace true
fi
}
start_mds() {
local mds=0
for name in a b c d e f g h i j k l m n o p
do
[ $mds -eq $CEPH_NUM_MDS ] && break
mds=$(($mds + 1))
if [ "$new" -eq 1 ]; then
prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
key_fn=$CEPH_DEV_DIR/mds.$name/keyring
wconf <<EOF
[mds.$name]
host = $HOSTNAME
EOF
if [ "$standby" -eq 1 ]; then
mkdir -p $CEPH_DEV_DIR/mds.${name}s
wconf <<EOF
mds standby for rank = $mds
[mds.${name}s]
mds standby replay = true
mds standby for name = ${name}
EOF
fi
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow rw tag cephfs *=*' mds 'allow' mgr 'allow profile mds'
if [ "$standby" -eq 1 ]; then
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
"$CEPH_DEV_DIR/mds.${name}s/keyring"
ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
fi
fi
run 'mds' $name $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
if [ "$standby" -eq 1 ]; then
run 'mds' $name $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
fi
#valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
#$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
#ceph_adm mds set max_mds 2
done
if [ $new -eq 1 ]; then
if [ "$CEPH_NUM_FS" -gt "0" ] ; then
sleep 5 # time for MDS to come up as standby to avoid health warnings on fs creation
if [ "$CEPH_NUM_FS" -gt "1" ] ; then
ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
fi
# wait for volume module to load
while ! ceph_adm fs volume ls ; do sleep 1 ; done
local fs=0
for name in a b c d e f g h i j k l m n o p
do
ceph_adm fs volume create ${name}
ceph_adm fs authorize ${name} "client.fs_${name}" / rwp >> "$keyring_fn"
fs=$(($fs + 1))
[ $fs -eq $CEPH_NUM_FS ] && break
done
fi
fi
}
# Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace
# nfs-ganesha-rados-urls (version 3.3 and above) packages installed. On
# Fedora>=31 these packages can be installed directly with 'dnf'. For CentOS>=8
# the packages are available at
# https://wiki.centos.org/SpecialInterestGroup/Storage
# Similarly for Ubuntu>=16.04 follow the instructions on
# https://launchpad.net/~nfs-ganesha
start_ganesha() {
cluster_id="vstart"
GANESHA_PORT=$(($CEPH_PORT + 4000))
local ganesha=0
test_user="$cluster_id"
pool_name=".nfs"
namespace=$cluster_id
url="rados://$pool_name/$namespace/conf-nfs.$test_user"
prun ceph_adm auth get-or-create client.$test_user \
mon "allow r" \
osd "allow rw pool=$pool_name namespace=$namespace, allow rw tag cephfs data=a" \
mds "allow rw path=/" \
>> "$keyring_fn"
ceph_adm mgr module enable test_orchestrator
ceph_adm orch set backend test_orchestrator
ceph_adm test_orchestrator load_data -i $CEPH_ROOT/src/pybind/mgr/test_orchestrator/dummy_data.json
prun ceph_adm nfs cluster create $cluster_id
prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path "/cephfs"
for name in a b c d e f g h i j k l m n o p
do
[ $ganesha -eq $GANESHA_DAEMON_NUM ] && break
port=$(($GANESHA_PORT + ganesha))
ganesha=$(($ganesha + 1))
ganesha_dir="$CEPH_DEV_DIR/ganesha.$name"
prun rm -rf $ganesha_dir
prun mkdir -p $ganesha_dir
echo "NFS_CORE_PARAM {
Enable_NLM = false;
Enable_RQUOTA = false;
Protocols = 4;
NFS_Port = $port;
}
MDCACHE {
Dir_Chunk = 0;
}
NFSv4 {
RecoveryBackend = rados_cluster;
Minor_Versions = 1, 2;
}
RADOS_KV {
pool = '$pool_name';
namespace = $namespace;
UserId = $test_user;
nodeid = $name;
}
RADOS_URLS {
Userid = $test_user;
watch_url = '$url';
}
%url $url" > "$ganesha_dir/ganesha-$name.conf"
wconf <<EOF
[ganesha.$name]
host = $HOSTNAME
ip = $IP
port = $port
ganesha data = $ganesha_dir
pid file = $CEPH_OUT_DIR/ganesha-$name.pid
EOF
prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace add $name
prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
prun env CEPH_CONF="${conf_fn}" ganesha.nfsd -L "$CEPH_OUT_DIR/ganesha-$name.log" -f "$ganesha_dir/ganesha-$name.conf" -p "$CEPH_OUT_DIR/ganesha-$name.pid" -N NIV_DEBUG
# Wait few seconds for grace period to be removed
sleep 2
prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
echo "$test_user ganesha daemon $name started on port: $port"
done
}
if [ "$debug" -eq 0 ]; then
CMONDEBUG='
debug mon = 10
debug ms = 1'
CCLIENTDEBUG=''
CMDSDEBUG=''
else
debug echo "** going verbose **"
CMONDEBUG='
debug osd = 20
debug mon = 20
debug osd = 20
debug paxos = 20
debug auth = 20
debug mgrc = 20
debug ms = 1'
CCLIENTDEBUG='
debug client = 20'
CMDSDEBUG='
debug mds = 20'
fi
# Crimson doesn't support PG merge/split yet.
if [ "$ceph_osd" == "crimson-osd" ]; then
AUTOSCALER_OPTS='
osd_pool_default_pg_autoscale_mode = off'
fi
if [ -n "$MON_ADDR" ]; then
CMON_ARGS=" -m "$MON_ADDR
COSD_ARGS=" -m "$MON_ADDR
CMDS_ARGS=" -m "$MON_ADDR
fi
if [ -z "$CEPH_PORT" ]; then
while [ true ]
do
CEPH_PORT="$(echo $(( RANDOM % 1000 + 40000 )))"
ss -a -n | egrep "\<LISTEN\>.+:${CEPH_PORT}\s+" 1>/dev/null 2>&1 || break
done
fi
[ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
# sudo if btrfs
[ -d $CEPH_DEV_DIR/osd0/. ] && [ -e $CEPH_DEV_DIR/sudo ] && SUDO="sudo"
if [ $inc_osd_num -eq 0 ]; then
prun $SUDO rm -f core*
fi
[ -d $CEPH_ASOK_DIR ] || mkdir -p $CEPH_ASOK_DIR
[ -d $CEPH_OUT_DIR ] || mkdir -p $CEPH_OUT_DIR
[ -d $CEPH_DEV_DIR ] || mkdir -p $CEPH_DEV_DIR
[ -d $CEPH_OUT_CLIENT_DIR ] || mkdir -p $CEPH_OUT_CLIENT_DIR
if [ $inc_osd_num -eq 0 ]; then
$SUDO find "$CEPH_OUT_DIR" -type f -delete
fi
[ -d gmon ] && $SUDO rm -rf gmon/*
[ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && [ -e $keyring_fn ] && rm $keyring_fn
# figure machine's ip
HOSTNAME=`hostname -s`
if [ -n "$ip" ]; then
IP="$ip"
else
echo hostname $HOSTNAME
if [ -x "$(which ip 2>/dev/null)" ]; then
IP_CMD="ip addr"
else
IP_CMD="ifconfig"
fi
# filter out IPv4 and localhost addresses
IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
# if nothing left, try using localhost address, it might work
if [ -z "$IP" ]; then IP="127.0.0.1"; fi
fi
echo "ip $IP"
echo "port $CEPH_PORT"
[ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
ceph_adm() {
if [ "$cephx" -eq 1 ]; then
prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
else
prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
fi
}
if [ $inc_osd_num -gt 0 ]; then
start_osd
exit
fi
if [ "$new" -eq 1 ]; then
prepare_conf
fi
if [ $CEPH_NUM_MON -gt 0 ]; then
start_mon
debug echo Populating config ...
cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
[global]
osd_pool_default_size = $OSD_POOL_DEFAULT_SIZE
osd_pool_default_min_size = 1
[mon]
mon_osd_reporter_subtree_level = osd
mon_data_avail_warn = 2
mon_data_avail_crit = 1
mon_allow_pool_delete = true
mon_allow_pool_size_one = true
[osd]
osd_scrub_load_threshold = 2000
osd_debug_op_order = true
osd_debug_misdirected_ops = true
osd_copyfrom_max_chunk = 524288
[mds]
mds_debug_frag = true
mds_debug_auth_pins = true
mds_debug_subtrees = true
[mgr]
mgr/telemetry/nag = false
mgr/telemetry/enable = false
EOF
if [ "$debug" -ne 0 ]; then
debug echo Setting debug configs ...
cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
[mgr]
debug_ms = 1
debug_mgr = 20
debug_monc = 20
debug_mon = 20
[osd]
debug_ms = 1
debug_osd = 25
debug_objecter = 20
debug_monc = 20
debug_mgrc = 20
debug_journal = 20
debug_bluestore = 20
debug_bluefs = 20
debug_rocksdb = 20
debug_bdev = 20
debug_reserver = 10
debug_objclass = 20
[mds]
debug_ms = 1
debug_mds = 20
debug_monc = 20
debug_mgrc = 20
mds_debug_scatterstat = true
mds_verify_scatter = true
EOF
fi
if [ "$cephadm" -gt 0 ]; then
debug echo Setting mon public_network ...
public_network=$(ip route list | grep -w "$IP" | awk '{print $1}')
ceph_adm config set mon public_network $public_network
fi
fi
if [ "$ceph_osd" == "crimson-osd" ]; then
$CEPH_BIN/ceph -c $conf_fn config set osd crimson_seastar_smp $crimson_smp
fi
if [ $CEPH_NUM_MGR -gt 0 ]; then
start_mgr
fi
# osd
if [ $CEPH_NUM_OSD -gt 0 ]; then
start_osd
fi
# mds
if [ "$smallmds" -eq 1 ]; then
wconf <<EOF
[mds]
mds log max segments = 2
# Default 'mds cache memory limit' is 1GiB, and here we set it to 100MiB.
mds cache memory limit = 100M
EOF
fi
if [ $CEPH_NUM_MDS -gt 0 ]; then
start_mds
# key with access to all FS
ceph_adm fs authorize \* "client.fs" / rwp >> "$keyring_fn"
fi
# Don't set max_mds until all the daemons are started, otherwise
# the intended standbys might end up in active roles.
if [ "$CEPH_MAX_MDS" -gt 1 ]; then
sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
fi
fs=0
for name in a b c d e f g h i j k l m n o p
do
[ $fs -eq $CEPH_NUM_FS ] && break
fs=$(($fs + 1))
if [ "$CEPH_MAX_MDS" -gt 1 ]; then
ceph_adm fs set "${name}" max_mds "$CEPH_MAX_MDS"
fi
done
# mgr
if [ "$ec" -eq 1 ]; then
ceph_adm <<EOF
osd erasure-code-profile set ec-profile m=2 k=2
osd pool create ec erasure ec-profile
EOF
fi
do_cache() {
while [ -n "$*" ]; do
p="$1"
shift
debug echo "creating cache for pool $p ..."
ceph_adm <<EOF
osd pool create ${p}-cache
osd tier add $p ${p}-cache
osd tier cache-mode ${p}-cache writeback
osd tier set-overlay $p ${p}-cache
EOF
done
}
do_cache $cache
do_hitsets() {
while [ -n "$*" ]; do
pool="$1"
type="$2"
shift
shift
debug echo "setting hit_set on pool $pool type $type ..."
ceph_adm <<EOF
osd pool set $pool hit_set_type $type
osd pool set $pool hit_set_count 8
osd pool set $pool hit_set_period 30
EOF
done
}
do_hitsets $hitset
do_rgw_create_bucket()
{
# Create RGW Bucket
local rgw_python_file='rgw-create-bucket.py'
echo "import boto
import boto.s3.connection
conn = boto.connect_s3(
aws_access_key_id = '$s3_akey',
aws_secret_access_key = '$s3_skey',
host = '$HOSTNAME',
port = 80,
is_secure=False,
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
bucket = conn.create_bucket('nfs-bucket')
print('created new bucket')" > "$CEPH_OUT_DIR/$rgw_python_file"
prun python $CEPH_OUT_DIR/$rgw_python_file
}
do_rgw_create_users()
{
# Create S3 user
s3_akey='0555b35654ad1656d804'
s3_skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
debug echo "setting up user testid"
$CEPH_BIN/radosgw-admin user create --uid testid --access-key $s3_akey --secret $s3_skey --display-name 'M. Tester' --email [email protected] -c $conf_fn > /dev/null
# Create S3-test users
# See: https://github.com/ceph/s3-tests
debug echo "setting up s3-test users"
$CEPH_BIN/radosgw-admin user create \
--uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
--access-key ABCDEFGHIJKLMNOPQRST \
--secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
--display-name youruseridhere \
--email [email protected] --caps="user-policy=*" -c $conf_fn > /dev/null
$CEPH_BIN/radosgw-admin user create \
--uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
--access-key NOPQRSTUVWXYZABCDEFG \
--secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
--display-name john.doe \
--email [email protected] -c $conf_fn > /dev/null
$CEPH_BIN/radosgw-admin user create \
--tenant testx \
--uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
--access-key HIJKLMNOPQRSTUVWXYZA \
--secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
--display-name tenanteduser \
--email [email protected] -c $conf_fn > /dev/null
# Create Swift user
debug echo "setting up user tester"
$CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
echo ""
echo "S3 User Info:"
echo " access key: $s3_akey"
echo " secret key: $s3_skey"
echo ""
echo "Swift User Info:"
echo " account : test"
echo " user : tester"
echo " password : testing"
echo ""
}
do_rgw()
{
if [ "$new" -eq 1 ]; then
do_rgw_create_users
if [ -n "$rgw_compression" ]; then
debug echo "setting compression type=$rgw_compression"
$CEPH_BIN/radosgw-admin zone placement modify -c $conf_fn --rgw-zone=default --placement-id=default-placement --compression=$rgw_compression > /dev/null
fi
fi
if [ -n "$rgw_flight_frontend" ] ;then
debug echo "starting arrow_flight frontend on first rgw"
fi
# Start server
if [ "$cephadm" -gt 0 ]; then
ceph_adm orch apply rgw rgwTest
return
fi
RGWDEBUG=""
if [ "$debug" -ne 0 ]; then
RGWDEBUG="--debug-rgw=20 --debug-ms=1"
fi
local CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT}"
local CEPH_RGW_HTTPS="${CEPH_RGW_PORT: -1}"
if [[ "${CEPH_RGW_HTTPS}" = "s" ]]; then
CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT::-1}"
else
CEPH_RGW_HTTPS=""
fi
RGWSUDO=
[ $CEPH_RGW_PORT_NUM -lt 1024 ] && RGWSUDO=sudo
current_port=$CEPH_RGW_PORT
# allow only first rgw to start arrow_flight server/port
local flight_conf=$rgw_flight_frontend
for n in $(seq 1 $CEPH_NUM_RGW); do
rgw_name="client.rgw.${current_port}"
ceph_adm auth get-or-create $rgw_name \
mon 'allow rw' \
osd 'allow rwx' \
mgr 'allow rw' \
>> "$keyring_fn"
debug echo start rgw on http${CEPH_RGW_HTTPS}://localhost:${current_port}
run 'rgw' $current_port $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn \
--log-file=${CEPH_OUT_DIR}/radosgw.${current_port}.log \
--admin-socket=${CEPH_OUT_DIR}/radosgw.${current_port}.asok \
--pid-file=${CEPH_OUT_DIR}/radosgw.${current_port}.pid \
--rgw_luarocks_location=${CEPH_OUT_DIR}/luarocks \
${RGWDEBUG} \
-n ${rgw_name} \
"--rgw_frontends=${rgw_frontend} port=${current_port}${CEPH_RGW_HTTPS}${flight_conf:+,arrow_flight}"
i=$(($i + 1))
[ $i -eq $CEPH_NUM_RGW ] && break
current_port=$((current_port+1))
unset flight_conf
done
}
if [ "$CEPH_NUM_RGW" -gt 0 ]; then
do_rgw
fi
# Ganesha Daemons
if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
pseudo_path="/cephfs"
if [ "$cephadm" -gt 0 ]; then
cluster_id="vstart"
port="2049"
prun ceph_adm nfs cluster create $cluster_id
if [ $CEPH_NUM_MDS -gt 0 ]; then
prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path $pseudo_path
echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
fi
if [ "$CEPH_NUM_RGW" -gt 0 ]; then
pseudo_path="/rgw"
do_rgw_create_bucket
prun ceph_adm nfs export create rgw --cluster-id $cluster_id --pseudo-path $pseudo_path --bucket "nfs-bucket"
echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
fi
else
start_ganesha
echo "Mount using: mount -t nfs -o port=<ganesha-port-num> $IP:$pseudo_path mountpoint"
fi
fi
docker_service(){
local service=''
#prefer podman
if command -v podman > /dev/null; then
service="podman"
elif pgrep -f docker > /dev/null; then
service="docker"
fi
if [ -n "$service" ]; then
echo "using $service for deploying jaeger..."
#check for exited container, remove them and restart container
if [ "$($service ps -aq -f status=exited -f name=jaeger)" ]; then
$service rm jaeger
fi
if [ ! "$(podman ps -aq -f name=jaeger)" ]; then
$service "$@"
fi
else
echo "cannot find docker or podman, please restart service and rerun."
fi
}
echo ""
if [ $with_jaeger -eq 1 ]; then
debug echo "Enabling jaegertracing..."
docker_service run -d --name jaeger \
-p 5775:5775/udp \
-p 6831:6831/udp \
-p 6832:6832/udp \
-p 5778:5778 \
-p 16686:16686 \
-p 14268:14268 \
-p 14250:14250 \
quay.io/jaegertracing/all-in-one
fi
debug echo "vstart cluster complete. Use stop.sh to stop. See out/* (e.g. 'tail -f out/????') for debug output."
echo ""
if [ "$new" -eq 1 ]; then
if $with_mgr_dashboard; then
cat <<EOF
dashboard urls: $DASH_URLS
w/ user/pass: admin / admin
EOF
fi
if $with_mgr_restful; then
cat <<EOF
restful urls: $RESTFUL_URLS
w/ user/pass: admin / $RESTFUL_SECRET
EOF
fi
fi
echo ""
# add header to the environment file
{
echo "#"
echo "# source this file into your shell to set up the environment."
echo "# For example:"
echo "# $ . $CEPH_DIR/vstart_environment.sh"
echo "#"
} > $CEPH_DIR/vstart_environment.sh
{
echo "export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON\$PYTHONPATH"
echo "export LD_LIBRARY_PATH=$CEPH_LIB:\$LD_LIBRARY_PATH"
echo "export PATH=$CEPH_DIR/bin:\$PATH"
if [ "$CEPH_DIR" != "$PWD" ]; then
echo "export CEPH_CONF=$conf_fn"
echo "export CEPH_KEYRING=$keyring_fn"
fi
if [ -n "$CEPHFS_SHELL" ]; then
echo "alias cephfs-shell=$CEPHFS_SHELL"
fi
} | tee -a $CEPH_DIR/vstart_environment.sh
echo "CEPH_DEV=1"
# always keep this section at the very bottom of this file
STRAY_CONF_PATH="/etc/ceph/ceph.conf"
if [ -f "$STRAY_CONF_PATH" -a -n "$conf_fn" -a ! "$conf_fn" -ef "$STRAY_CONF_PATH" ]; then
echo ""
echo ""
echo "WARNING:"
echo " Please remove stray $STRAY_CONF_PATH if not needed."
echo " Your conf files $conf_fn and $STRAY_CONF_PATH may not be in sync"
echo " and may lead to undesired results."
echo ""
echo "NOTE:"
echo " Remember to restart cluster after removing $STRAY_CONF_PATH"
fi
init_logrotate
| 55,884 | 28.121939 | 176 | sh |
null | ceph-main/src/arch/arm.h | #ifndef CEPH_ARCH_ARM_H
#define CEPH_ARCH_ARM_H
#ifdef __cplusplus
extern "C" {
#endif
extern int ceph_arch_neon; /* true if we have ARM NEON or ASIMD abilities */
extern int ceph_arch_aarch64_crc32; /* true if we have AArch64 CRC32/CRC32C abilities */
extern int ceph_arch_aarch64_pmull; /* true if we have AArch64 PMULL abilities */
extern int ceph_arch_arm_probe(void);
#ifdef __cplusplus
}
#endif
#endif
| 416 | 20.947368 | 89 | h |
null | ceph-main/src/arch/intel.h | #ifndef CEPH_ARCH_INTEL_H
#define CEPH_ARCH_INTEL_H
#ifdef __cplusplus
extern "C" {
#endif
extern int ceph_arch_intel_pclmul; /* true if we have PCLMUL features */
extern int ceph_arch_intel_sse42; /* true if we have sse 4.2 features */
extern int ceph_arch_intel_sse41; /* true if we have sse 4.1 features */
extern int ceph_arch_intel_ssse3; /* true if we have ssse 3 features */
extern int ceph_arch_intel_sse3; /* true if we have sse 3 features */
extern int ceph_arch_intel_sse2; /* true if we have sse 2 features */
extern int ceph_arch_intel_aesni; /* true if we have aesni features */
extern int ceph_arch_intel_probe(void);
#ifdef __cplusplus
}
#endif
#endif
| 681 | 28.652174 | 73 | h |
null | ceph-main/src/arch/ppc.h | /* Copyright (C) 2017 International Business Machines Corp.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef CEPH_ARCH_PPC_H
#define CEPH_ARCH_PPC_H
#ifdef __cplusplus
extern "C" {
#endif
extern int ceph_arch_ppc_crc32;
extern int ceph_arch_ppc_probe(void);
#ifdef __cplusplus
}
#endif
#endif
| 540 | 20.64 | 64 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.