Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/qa/workunits/rbd/rbd-nbd.sh
|
#!/usr/bin/env bash
set -ex
. $(dirname $0)/../../standalone/ceph-helpers.sh
POOL=rbd
ANOTHER_POOL=new_default_pool$$
NS=ns
IMAGE=testrbdnbd$$
SIZE=64
DATA=
DEV=
_sudo()
{
local cmd
if [ `id -u` -eq 0 ]
then
"$@"
return $?
fi
# Look for the command in the user path. If it fails run it as is,
# supposing it is in sudo path.
cmd=`which $1 2>/dev/null` || cmd=$1
shift
sudo -nE "${cmd}" "$@"
}
setup()
{
local ns x
if [ -e CMakeCache.txt ]; then
# running under cmake build dir
CEPH_SRC=$(readlink -f $(dirname $0)/../../../src)
CEPH_ROOT=${PWD}
CEPH_BIN=${CEPH_ROOT}/bin
export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind:${CEPH_ROOT}/lib/cython_modules/lib.3
PATH=${CEPH_BIN}:${PATH}
fi
_sudo echo test sudo
trap cleanup INT TERM EXIT
TEMPDIR=`mktemp -d`
DATA=${TEMPDIR}/data
dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
rbd namespace create ${POOL}/${NS}
for ns in '' ${NS}; do
rbd --dest-pool ${POOL} --dest-namespace "${ns}" --no-progress import \
${DATA} ${IMAGE}
done
# create another pool
ceph osd pool create ${ANOTHER_POOL} 8
rbd pool init ${ANOTHER_POOL}
}
function cleanup()
{
local ns s
set +e
mount | fgrep ${TEMPDIR}/mnt && _sudo umount -f ${TEMPDIR}/mnt
rm -Rf ${TEMPDIR}
if [ -n "${DEV}" ]
then
_sudo rbd device --device-type nbd unmap ${DEV}
fi
for ns in '' ${NS}; do
if rbd -p ${POOL} --namespace "${ns}" status ${IMAGE} 2>/dev/null; then
for s in 0.5 1 2 4 8 16 32; do
sleep $s
rbd -p ${POOL} --namespace "${ns}" status ${IMAGE} |
grep 'Watchers: none' && break
done
rbd -p ${POOL} --namespace "${ns}" snap purge ${IMAGE}
rbd -p ${POOL} --namespace "${ns}" remove ${IMAGE}
fi
done
rbd namespace remove ${POOL}/${NS}
# cleanup/reset default pool
rbd config global rm global rbd_default_pool
ceph osd pool delete ${ANOTHER_POOL} ${ANOTHER_POOL} --yes-i-really-really-mean-it
}
function expect_false()
{
if "$@"; then return 1; else return 0; fi
}
function get_pid()
{
local pool=$1
local ns=$2
PID=$(rbd device --device-type nbd --format xml list | $XMLSTARLET sel -t -v \
"//devices/device[pool='${pool}'][namespace='${ns}'][image='${IMAGE}'][device='${DEV}']/id")
test -n "${PID}" || return 1
ps -p ${PID} -C rbd-nbd
}
unmap_device()
{
local args=$1
local pid=$2
_sudo rbd device --device-type nbd unmap ${args}
rbd device --device-type nbd list | expect_false grep "^${pid}\\b" || return 1
ps -C rbd-nbd | expect_false grep "^ *${pid}\\b" || return 1
# workaround possible race between unmap and following map
sleep 0.5
}
#
# main
#
setup
# exit status test
expect_false rbd-nbd
expect_false rbd-nbd INVALIDCMD
if [ `id -u` -ne 0 ]
then
expect_false rbd device --device-type nbd map ${IMAGE}
fi
expect_false _sudo rbd device --device-type nbd map INVALIDIMAGE
expect_false _sudo rbd-nbd --device INVALIDDEV map ${IMAGE}
# list format test
expect_false rbd device --device-type nbd --format INVALID list
rbd device --device-type nbd --format json --pretty-format list
rbd device --device-type nbd --format xml list
# map test using the first unused device
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
# map test specifying the device
expect_false _sudo rbd-nbd --device ${DEV} map ${POOL}/${IMAGE}
dev1=${DEV}
unmap_device ${DEV} ${PID}
DEV=
# XXX: race possible when the device is reused by other process
DEV=`_sudo rbd-nbd --device ${dev1} map ${POOL}/${IMAGE}`
[ "${DEV}" = "${dev1}" ]
rbd device --device-type nbd list | grep "${IMAGE}"
get_pid ${POOL}
# read test
[ "`dd if=${DATA} bs=1M | md5sum`" = "`_sudo dd if=${DEV} bs=1M | md5sum`" ]
# write test
dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
_sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
[ "`dd if=${DATA} bs=1M | md5sum`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5sum`" ]
unmap_device ${DEV} ${PID}
# notrim test
DEV=`_sudo rbd device --device-type nbd --options notrim map ${POOL}/${IMAGE}`
get_pid ${POOL}
provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
used=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
[ "${used}" -eq "${provisioned}" ]
# should fail discard as at time of mapping notrim was used
expect_false _sudo blkdiscard ${DEV}
sync
provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
used=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
[ "${used}" -eq "${provisioned}" ]
unmap_device ${DEV} ${PID}
# trim test
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
used=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
[ "${used}" -eq "${provisioned}" ]
# should honor discard as at time of mapping trim was considered by default
_sudo blkdiscard ${DEV}
sync
provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
used=`rbd -p ${POOL} --format xml du ${IMAGE} |
$XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
[ "${used}" -lt "${provisioned}" ]
# resize test
devname=$(basename ${DEV})
blocks=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
test -n "${blocks}"
rbd resize ${POOL}/${IMAGE} --size $((SIZE * 2))M
rbd info ${POOL}/${IMAGE}
blocks2=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
test -n "${blocks2}"
test ${blocks2} -eq $((blocks * 2))
rbd resize ${POOL}/${IMAGE} --allow-shrink --size ${SIZE}M
blocks2=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
test -n "${blocks2}"
test ${blocks2} -eq ${blocks}
# read-only option test
unmap_device ${DEV} ${PID}
DEV=`_sudo rbd --device-type nbd map --read-only ${POOL}/${IMAGE}`
PID=$(rbd device --device-type nbd list | awk -v pool=${POOL} -v img=${IMAGE} -v dev=${DEV} \
'$2 == pool && $3 == img && $5 == dev {print $1}')
test -n "${PID}"
ps -p ${PID} -C rbd-nbd
_sudo dd if=${DEV} of=/dev/null bs=1M
expect_false _sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
unmap_device ${DEV} ${PID}
# exclusive option test
DEV=`_sudo rbd --device-type nbd map --exclusive ${POOL}/${IMAGE}`
get_pid ${POOL}
_sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
expect_false timeout 10 \
rbd bench ${IMAGE} --io-type write --io-size=1024 --io-total=1024
unmap_device ${DEV} ${PID}
DEV=
rbd bench ${IMAGE} --io-type write --io-size=1024 --io-total=1024
# unmap by image name test
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
unmap_device ${IMAGE} ${PID}
DEV=
# map/unmap snap test
rbd snap create ${POOL}/${IMAGE}@snap
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}@snap`
get_pid ${POOL}
unmap_device "${IMAGE}@snap" ${PID}
DEV=
# map/unmap snap test with --snap-id
SNAPID=`rbd snap ls ${POOL}/${IMAGE} | awk '$2 == "snap" {print $1}'`
DEV=`_sudo rbd device --device-type nbd map --snap-id ${SNAPID} ${POOL}/${IMAGE}`
get_pid ${POOL}
unmap_device "--snap-id ${SNAPID} ${IMAGE}" ${PID}
DEV=
# map/unmap namespace test
rbd snap create ${POOL}/${NS}/${IMAGE}@snap
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${NS}/${IMAGE}@snap`
get_pid ${POOL} ${NS}
unmap_device "${POOL}/${NS}/${IMAGE}@snap" ${PID}
DEV=
# map/unmap namespace test with --snap-id
SNAPID=`rbd snap ls ${POOL}/${NS}/${IMAGE} | awk '$2 == "snap" {print $1}'`
DEV=`_sudo rbd device --device-type nbd map --snap-id ${SNAPID} ${POOL}/${NS}/${IMAGE}`
get_pid ${POOL} ${NS}
unmap_device "--snap-id ${SNAPID} ${POOL}/${NS}/${IMAGE}" ${PID}
DEV=
# map/unmap namespace using options test
DEV=`_sudo rbd device --device-type nbd map --pool ${POOL} --namespace ${NS} --image ${IMAGE}`
get_pid ${POOL} ${NS}
unmap_device "--pool ${POOL} --namespace ${NS} --image ${IMAGE}" ${PID}
DEV=`_sudo rbd device --device-type nbd map --pool ${POOL} --namespace ${NS} --image ${IMAGE} --snap snap`
get_pid ${POOL} ${NS}
unmap_device "--pool ${POOL} --namespace ${NS} --image ${IMAGE} --snap snap" ${PID}
DEV=
# unmap by image name test 2
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
pid=$PID
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${NS}/${IMAGE}`
get_pid ${POOL} ${NS}
unmap_device ${POOL}/${NS}/${IMAGE} ${PID}
DEV=
unmap_device ${POOL}/${IMAGE} ${pid}
# map/unmap test with just image name and expect image to come from default pool
if [ "${POOL}" = "rbd" ];then
DEV=`_sudo rbd device --device-type nbd map ${IMAGE}`
get_pid ${POOL}
unmap_device ${IMAGE} ${PID}
DEV=
fi
# map/unmap test with just image name after changing default pool
rbd config global set global rbd_default_pool ${ANOTHER_POOL}
rbd create --size 10M ${IMAGE}
DEV=`_sudo rbd device --device-type nbd map ${IMAGE}`
get_pid ${ANOTHER_POOL}
unmap_device ${IMAGE} ${PID}
DEV=
# reset
rbd config global rm global rbd_default_pool
# auto unmap test
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
_sudo kill ${PID}
for i in `seq 10`; do
rbd device --device-type nbd list | expect_false grep "^${PID} *${POOL} *${IMAGE}" && break
sleep 1
done
rbd device --device-type nbd list | expect_false grep "^${PID} *${POOL} *${IMAGE}"
# quiesce test
QUIESCE_HOOK=${TEMPDIR}/quiesce.sh
DEV=`_sudo rbd device --device-type nbd map --quiesce --quiesce-hook ${QUIESCE_HOOK} ${POOL}/${IMAGE}`
get_pid ${POOL}
# test it fails if the hook does not exists
test ! -e ${QUIESCE_HOOK}
expect_false rbd snap create ${POOL}/${IMAGE}@quiesce1
_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
# test the hook is executed
touch ${QUIESCE_HOOK}
chmod +x ${QUIESCE_HOOK}
cat > ${QUIESCE_HOOK} <<EOF
#/bin/sh
echo "test the hook is executed" >&2
echo \$1 > ${TEMPDIR}/\$2
EOF
rbd snap create ${POOL}/${IMAGE}@quiesce1
_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
test "$(cat ${TEMPDIR}/quiesce)" = ${DEV}
test "$(cat ${TEMPDIR}/unquiesce)" = ${DEV}
# test snap create fails if the hook fails
touch ${QUIESCE_HOOK}
chmod +x ${QUIESCE_HOOK}
cat > ${QUIESCE_HOOK} <<EOF
#/bin/sh
echo "test snap create fails if the hook fails" >&2
exit 22
EOF
expect_false rbd snap create ${POOL}/${IMAGE}@quiesce2
_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
# test the hook is slow
cat > ${QUIESCE_HOOK} <<EOF
#/bin/sh
echo "test the hook is slow" >&2
sleep 7
EOF
rbd snap create ${POOL}/${IMAGE}@quiesce2
_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
# test rbd-nbd_quiesce hook that comes with distribution
unmap_device ${DEV} ${PID}
LOG_FILE=${TEMPDIR}/rbd-nbd.log
if [ -n "${CEPH_SRC}" ]; then
QUIESCE_HOOK=${CEPH_SRC}/tools/rbd_nbd/rbd-nbd_quiesce
DEV=`_sudo rbd device --device-type nbd map --quiesce --quiesce-hook ${QUIESCE_HOOK} \
${POOL}/${IMAGE} --log-file=${LOG_FILE}`
else
DEV=`_sudo rbd device --device-type nbd map --quiesce ${POOL}/${IMAGE} --log-file=${LOG_FILE}`
fi
get_pid ${POOL}
_sudo mkfs ${DEV}
mkdir ${TEMPDIR}/mnt
_sudo mount ${DEV} ${TEMPDIR}/mnt
rbd snap create ${POOL}/${IMAGE}@quiesce3
_sudo dd if=${DATA} of=${TEMPDIR}/mnt/test bs=1M count=1 oflag=direct
_sudo umount ${TEMPDIR}/mnt
unmap_device ${DEV} ${PID}
DEV=
cat ${LOG_FILE}
expect_false grep 'quiesce failed' ${LOG_FILE}
# test detach/attach
OUT=`_sudo rbd device --device-type nbd --options try-netlink,show-cookie map ${POOL}/${IMAGE}`
read DEV COOKIE <<< "${OUT}"
get_pid ${POOL}
_sudo mount ${DEV} ${TEMPDIR}/mnt
_sudo rbd device detach ${POOL}/${IMAGE} --device-type nbd
expect_false get_pid ${POOL}
expect_false _sudo rbd device attach --device ${DEV} ${POOL}/${IMAGE} --device-type nbd
if [ -n "${COOKIE}" ]; then
_sudo rbd device attach --device ${DEV} --cookie ${COOKIE} ${POOL}/${IMAGE} --device-type nbd
else
_sudo rbd device attach --device ${DEV} ${POOL}/${IMAGE} --device-type nbd --force
fi
get_pid ${POOL}
_sudo rbd device detach ${DEV} --device-type nbd
expect_false get_pid ${POOL}
if [ -n "${COOKIE}" ]; then
_sudo rbd device attach --device ${DEV} --cookie ${COOKIE} ${POOL}/${IMAGE} --device-type nbd
else
_sudo rbd device attach --device ${DEV} ${POOL}/${IMAGE} --device-type nbd --force
fi
get_pid ${POOL}
ls ${TEMPDIR}/mnt/
dd if=${TEMPDIR}/mnt/test of=/dev/null bs=1M count=1
_sudo dd if=${DATA} of=${TEMPDIR}/mnt/test1 bs=1M count=1 oflag=direct
_sudo umount ${TEMPDIR}/mnt
unmap_device ${DEV} ${PID}
# if kernel supports cookies
if [ -n "${COOKIE}" ]; then
OUT=`_sudo rbd device --device-type nbd --show-cookie --cookie "abc de" --options try-netlink map ${POOL}/${IMAGE}`
read DEV ANOTHER_COOKIE <<< "${OUT}"
get_pid ${POOL}
test "${ANOTHER_COOKIE}" = "abc de"
unmap_device ${DEV} ${PID}
fi
DEV=
# test detach/attach with --snap-id
SNAPID=`rbd snap ls ${POOL}/${IMAGE} | awk '$2 == "snap" {print $1}'`
OUT=`_sudo rbd device --device-type nbd --options try-netlink,show-cookie map --snap-id ${SNAPID} ${POOL}/${IMAGE}`
read DEV COOKIE <<< "${OUT}"
get_pid ${POOL}
_sudo rbd device detach ${POOL}/${IMAGE} --snap-id ${SNAPID} --device-type nbd
expect_false get_pid ${POOL}
expect_false _sudo rbd device attach --device ${DEV} --snap-id ${SNAPID} ${POOL}/${IMAGE} --device-type nbd
if [ -n "${COOKIE}" ]; then
_sudo rbd device attach --device ${DEV} --cookie ${COOKIE} --snap-id ${SNAPID} ${POOL}/${IMAGE} --device-type nbd
else
_sudo rbd device attach --device ${DEV} --snap-id ${SNAPID} ${POOL}/${IMAGE} --device-type nbd --force
fi
get_pid ${POOL}
_sudo rbd device detach ${DEV} --device-type nbd
expect_false get_pid ${POOL}
DEV=
# test discard granularity with journaling
rbd config image set ${POOL}/${IMAGE} rbd_discard_granularity_bytes 4096
rbd feature enable ${POOL}/${IMAGE} journaling
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
# since a discard will now be pruned to only whole blocks (0..4095, 4096..8191)
# let us test all the cases around those alignments. 512 is the smallest
# possible block blkdiscard allows us to use. Thus the test checks
# 512 before, on the alignment, 512 after.
_sudo blkdiscard --offset 0 --length $((4096-512)) ${DEV}
_sudo blkdiscard --offset 0 --length 4096 ${DEV}
_sudo blkdiscard --offset 0 --length $((4096+512)) ${DEV}
_sudo blkdiscard --offset 512 --length $((8192-1024)) ${DEV}
_sudo blkdiscard --offset 512 --length $((8192-512)) ${DEV}
_sudo blkdiscard --offset 512 --length 8192 ${DEV}
# wait for commit log to be empty, 10 seconds should be well enough
tries=0
queue_length=`rbd journal inspect --pool ${POOL} --image ${IMAGE} | awk '/entries inspected/ {print $1}'`
while [ ${tries} -lt 10 ] && [ ${queue_length} -gt 0 ]; do
rbd journal inspect --pool ${POOL} --image ${IMAGE} --verbose
sleep 1
queue_length=`rbd journal inspect --pool ${POOL} --image ${IMAGE} | awk '/entries inspected/ {print $1}'`
tries=$((tries+1))
done
[ ${queue_length} -eq 0 ]
unmap_device ${DEV} ${PID}
DEV=
rbd feature disable ${POOL}/${IMAGE} journaling
rbd config image rm ${POOL}/${IMAGE} rbd_discard_granularity_bytes
# test that rbd_op_threads setting takes effect
EXPECTED=`ceph-conf --show-config-value librados_thread_count`
DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
get_pid ${POOL}
ACTUAL=`ps -p ${PID} -T | grep -c io_context_pool`
[ ${ACTUAL} -eq ${EXPECTED} ]
unmap_device ${DEV} ${PID}
EXPECTED=$((EXPECTED * 3 + 1))
DEV=`_sudo rbd device --device-type nbd --rbd-op-threads ${EXPECTED} map ${POOL}/${IMAGE}`
get_pid ${POOL}
ACTUAL=`ps -p ${PID} -T | grep -c io_context_pool`
[ ${ACTUAL} -eq ${EXPECTED} ]
unmap_device ${DEV} ${PID}
DEV=
echo OK
| 16,034 | 31.657841 | 119 |
sh
|
null |
ceph-main/qa/workunits/rbd/rbd_groups.sh
|
#!/usr/bin/env bash
set -ex
#
# rbd_consistency_groups.sh - test consistency groups cli commands
#
#
# Functions
#
create_group()
{
local group_name=$1
rbd group create $group_name
}
list_groups()
{
rbd group list
}
check_group_exists()
{
local group_name=$1
list_groups | grep $group_name
}
remove_group()
{
local group_name=$1
rbd group remove $group_name
}
rename_group()
{
local src_name=$1
local dest_name=$2
rbd group rename $src_name $dest_name
}
check_group_does_not_exist()
{
local group_name=$1
for v in $(list_groups); do
if [ "$v" == "$group_name" ]; then
return 1
fi
done
return 0
}
create_image()
{
local image_name=$1
rbd create --size 10M $image_name
}
remove_image()
{
local image_name=$1
rbd remove $image_name
}
add_image_to_group()
{
local image_name=$1
local group_name=$2
rbd group image add $group_name $image_name
}
remove_image_from_group()
{
local image_name=$1
local group_name=$2
rbd group image remove $group_name $image_name
}
check_image_in_group()
{
local image_name=$1
local group_name=$2
for v in $(rbd group image list $group_name); do
local vtrimmed=${v#*/}
if [ "$vtrimmed" = "$image_name" ]; then
return 0
fi
done
return 1
}
check_image_not_in_group()
{
local image_name=$1
local group_name=$2
for v in $(rbd group image list $group_name); do
local vtrimmed=${v#*/}
if [ "$vtrimmed" = "$image_name" ]; then
return 1
fi
done
return 0
}
create_snapshot()
{
local group_name=$1
local snap_name=$2
rbd group snap create $group_name@$snap_name
}
create_snapshots()
{
local group_name=$1
local snap_name=$2
local snap_count=$3
for i in `seq 1 $snap_count`; do
rbd group snap create $group_name@$snap_name$i
done
}
remove_snapshot()
{
local group_name=$1
local snap_name=$2
rbd group snap remove $group_name@$snap_name
}
remove_snapshots()
{
local group_name=$1
local snap_name=$2
local snap_count=$3
for i in `seq 1 $snap_count`; do
rbd group snap remove $group_name@$snap_name$i
done
}
rename_snapshot()
{
local group_name=$1
local snap_name=$2
local new_snap_name=$3
rbd group snap rename $group_name@$snap_name $new_snap_name
}
list_snapshots()
{
local group_name=$1
rbd group snap list $group_name
}
rollback_snapshot()
{
local group_name=$1
local snap_name=$2
rbd group snap rollback $group_name@$snap_name
}
check_snapshot_in_group()
{
local group_name=$1
local snap_name=$2
list_snapshots $group_name | grep $snap_name
}
check_snapshots_count_in_group()
{
local group_name=$1
local snap_name=$2
local expected_count=$3
local actual_count
actual_count=$(list_snapshots $group_name | grep -c $snap_name)
(( actual_count == expected_count ))
}
check_snapshot_not_in_group()
{
local group_name=$1
local snap_name=$2
for v in $(list_snapshots $group_name | awk '{print $1}'); do
if [ "$v" = "$snap_name" ]; then
return 1
fi
done
return 0
}
echo "TEST: create remove consistency group"
group="test_consistency_group"
new_group="test_new_consistency_group"
create_group $group
check_group_exists $group
rename_group $group $new_group
check_group_exists $new_group
remove_group $new_group
check_group_does_not_exist $new_group
echo "PASSED"
echo "TEST: add remove images to consistency group"
image="test_image"
group="test_consistency_group"
create_image $image
create_group $group
add_image_to_group $image $group
check_image_in_group $image $group
remove_image_from_group $image $group
check_image_not_in_group $image $group
remove_group $group
remove_image $image
echo "PASSED"
echo "TEST: create remove snapshots of consistency group"
image="test_image"
group="test_consistency_group"
snap="group_snap"
new_snap="new_group_snap"
sec_snap="group_snap2"
create_image $image
create_group $group
add_image_to_group $image $group
create_snapshot $group $snap
check_snapshot_in_group $group $snap
rename_snapshot $group $snap $new_snap
check_snapshot_not_in_group $group $snap
create_snapshot $group $sec_snap
check_snapshot_in_group $group $sec_snap
rollback_snapshot $group $new_snap
remove_snapshot $group $new_snap
check_snapshot_not_in_group $group $new_snap
remove_snapshot $group $sec_snap
check_snapshot_not_in_group $group $sec_snap
remove_group $group
remove_image $image
echo "PASSED"
echo "TEST: list snapshots of consistency group"
image="test_image"
group="test_consistency_group"
snap="group_snap"
create_image $image
create_group $group
add_image_to_group $image $group
create_snapshots $group $snap 10
check_snapshots_count_in_group $group $snap 10
remove_snapshots $group $snap 10
create_snapshots $group $snap 100
check_snapshots_count_in_group $group $snap 100
remove_snapshots $group $snap 100
remove_group $group
remove_image $image
echo "PASSED"
echo "OK"
| 5,078 | 18.610039 | 67 |
sh
|
null |
ceph-main/qa/workunits/rbd/rbd_mirror_bootstrap.sh
|
#!/bin/sh -ex
#
# rbd_mirror_bootstrap.sh - test peer bootstrap create/import
#
RBD_MIRROR_MANUAL_PEERS=1
RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-1}
. $(dirname $0)/rbd_mirror_helpers.sh
setup
testlog "TEST: bootstrap cluster2 from cluster1"
# create token on cluster1 and import to cluster2
TOKEN=${TEMPDIR}/peer-token
TOKEN_2=${TEMPDIR}/peer-token-2
CEPH_ARGS='' rbd --cluster ${CLUSTER1} mirror pool peer bootstrap create ${POOL} > ${TOKEN}
CEPH_ARGS='' rbd --cluster ${CLUSTER1} mirror pool peer bootstrap create ${PARENT_POOL} > ${TOKEN_2}
cmp ${TOKEN} ${TOKEN_2}
CEPH_ARGS='' rbd --cluster ${CLUSTER2} --pool ${POOL} mirror pool peer bootstrap import ${TOKEN} --direction rx-only
CEPH_ARGS='' rbd --cluster ${CLUSTER2} --pool ${PARENT_POOL} mirror pool peer bootstrap import ${TOKEN} --direction rx-tx
start_mirrors ${CLUSTER1}
start_mirrors ${CLUSTER2}
testlog "TEST: verify rx-only direction"
# rx-only peer is added immediately by "rbd mirror pool peer bootstrap import"
rbd --cluster ${CLUSTER2} --pool ${POOL} mirror pool info --format json | jq -e '.peers[0].direction == "rx-only"'
# tx-only peer is added asynchronously by mirror_peer_ping class method
while ! rbd --cluster ${CLUSTER1} --pool ${POOL} mirror pool info --format json | jq -e '.peers | length > 0'; do
sleep 1
done
rbd --cluster ${CLUSTER1} --pool ${POOL} mirror pool info --format json | jq -e '.peers[0].direction == "tx-only"'
create_image_and_enable_mirror ${CLUSTER1} ${POOL} image1
wait_for_image_replay_started ${CLUSTER2} ${POOL} image1
write_image ${CLUSTER1} ${POOL} image1 100
wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} image1
testlog "TEST: verify rx-tx direction"
# both rx-tx peers are added immediately by "rbd mirror pool peer bootstrap import"
rbd --cluster ${CLUSTER1} --pool ${PARENT_POOL} mirror pool info --format json | jq -e '.peers[0].direction == "rx-tx"'
rbd --cluster ${CLUSTER2} --pool ${PARENT_POOL} mirror pool info --format json | jq -e '.peers[0].direction == "rx-tx"'
create_image ${CLUSTER1} ${PARENT_POOL} image1
create_image ${CLUSTER2} ${PARENT_POOL} image2
enable_mirror ${CLUSTER1} ${PARENT_POOL} image1
enable_mirror ${CLUSTER2} ${PARENT_POOL} image2
wait_for_image_replay_started ${CLUSTER2} ${PARENT_POOL} image1
write_image ${CLUSTER1} ${PARENT_POOL} image1 100
wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${PARENT_POOL} image1
wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} image2
write_image ${CLUSTER2} ${PARENT_POOL} image2 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} image2
| 2,576 | 42.677966 | 121 |
sh
|
null |
ceph-main/qa/workunits/rbd/rbd_mirror_fsx_compare.sh
|
#!/bin/sh -ex
#
# rbd_mirror_fsx_compare.sh - test rbd-mirror daemon under FSX workload
#
# The script is used to compare FSX-generated images between two clusters.
#
. $(dirname $0)/rbd_mirror_helpers.sh
trap 'cleanup $?' INT TERM EXIT
setup_tempdir
testlog "TEST: wait for all images"
image_count=$(rbd --cluster ${CLUSTER1} --pool ${POOL} ls | wc -l)
retrying_seconds=0
sleep_seconds=10
while [ ${retrying_seconds} -le 7200 ]; do
[ $(rbd --cluster ${CLUSTER2} --pool ${POOL} ls | wc -l) -ge ${image_count} ] && break
sleep ${sleep_seconds}
retrying_seconds=$(($retrying_seconds+${sleep_seconds}))
done
testlog "TEST: snapshot all pool images"
snap_id=`uuidgen`
for image in $(rbd --cluster ${CLUSTER1} --pool ${POOL} ls); do
create_snapshot ${CLUSTER1} ${POOL} ${image} ${snap_id}
done
testlog "TEST: wait for snapshots"
for image in $(rbd --cluster ${CLUSTER1} --pool ${POOL} ls); do
wait_for_snap_present ${CLUSTER2} ${POOL} ${image} ${snap_id}
done
testlog "TEST: compare image snapshots"
for image in $(rbd --cluster ${CLUSTER1} --pool ${POOL} ls); do
compare_image_snapshots ${POOL} ${image}
done
| 1,136 | 28.153846 | 90 |
sh
|
null |
ceph-main/qa/workunits/rbd/rbd_mirror_fsx_prepare.sh
|
#!/bin/sh -ex
#
# rbd_mirror_fsx_prepare.sh - test rbd-mirror daemon under FSX workload
#
# The script is used to compare FSX-generated images between two clusters.
#
. $(dirname $0)/rbd_mirror_helpers.sh
setup
| 213 | 18.454545 | 74 |
sh
|
null |
ceph-main/qa/workunits/rbd/rbd_mirror_ha.sh
|
#!/bin/sh -ex
#
# rbd_mirror_ha.sh - test rbd-mirror daemons in HA mode
#
RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-7}
. $(dirname $0)/rbd_mirror_helpers.sh
setup
is_leader()
{
local instance=$1
local pool=$2
test -n "${pool}" || pool=${POOL}
admin_daemon "${CLUSTER1}:${instance}" \
rbd mirror status ${pool} ${CLUSTER2}${PEER_CLUSTER_SUFFIX} |
grep '"leader": true'
}
wait_for_leader()
{
local s instance
for s in 1 1 2 4 4 4 4 4 8 8 8 8 16 16 32 64; do
sleep $s
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
is_leader ${instance} || continue
LEADER=${instance}
return 0
done
done
LEADER=
return 1
}
release_leader()
{
local pool=$1
local cmd="rbd mirror leader release"
test -n "${pool}" && cmd="${cmd} ${pool} ${CLUSTER2}"
admin_daemon "${CLUSTER1}:${LEADER}" ${cmd}
}
wait_for_leader_released()
{
local i
test -n "${LEADER}"
for i in `seq 10`; do
is_leader ${LEADER} || return 0
sleep 1
done
return 1
}
test_replay()
{
local image
for image; do
wait_for_image_replay_started ${CLUSTER1}:${LEADER} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1}:${LEADER} ${CLUSTER2} ${POOL} \
${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' \
'primary_position' \
"${MIRROR_USER_ID_PREFIX}${LEADER} on $(hostname -s)"
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} \
'down+unknown'
fi
compare_images ${POOL} ${image}
done
}
testlog "TEST: start first daemon instance and test replay"
start_mirror ${CLUSTER1}:0
image1=test1
create_image ${CLUSTER2} ${POOL} ${image1}
LEADER=0
test_replay ${image1}
testlog "TEST: release leader and wait it is reacquired"
is_leader 0 ${POOL}
is_leader 0 ${PARENT_POOL}
release_leader ${POOL}
wait_for_leader_released
is_leader 0 ${PARENT_POOL}
wait_for_leader
release_leader
wait_for_leader_released
expect_failure "" is_leader 0 ${PARENT_POOL}
wait_for_leader
testlog "TEST: start second daemon instance and test replay"
start_mirror ${CLUSTER1}:1
image2=test2
create_image ${CLUSTER2} ${POOL} ${image2}
test_replay ${image1} ${image2}
testlog "TEST: release leader and test it is acquired by secondary"
is_leader 0 ${POOL}
is_leader 0 ${PARENT_POOL}
release_leader ${POOL}
wait_for_leader_released
wait_for_leader
test_replay ${image1} ${image2}
release_leader
wait_for_leader_released
wait_for_leader
test "${LEADER}" = 0
testlog "TEST: stop first daemon instance and test replay"
stop_mirror ${CLUSTER1}:0
image3=test3
create_image ${CLUSTER2} ${POOL} ${image3}
LEADER=1
test_replay ${image1} ${image2} ${image3}
testlog "TEST: start first daemon instance and test replay"
start_mirror ${CLUSTER1}:0
image4=test4
create_image ${CLUSTER2} ${POOL} ${image4}
test_replay ${image3} ${image4}
testlog "TEST: crash leader and test replay"
stop_mirror ${CLUSTER1}:1 -KILL
image5=test5
create_image ${CLUSTER2} ${POOL} ${image5}
LEADER=0
test_replay ${image1} ${image4} ${image5}
testlog "TEST: start crashed leader and test replay"
start_mirror ${CLUSTER1}:1
image6=test6
create_image ${CLUSTER2} ${POOL} ${image6}
test_replay ${image1} ${image6}
testlog "TEST: start yet another daemon instance and test replay"
start_mirror ${CLUSTER1}:2
image7=test7
create_image ${CLUSTER2} ${POOL} ${image7}
test_replay ${image1} ${image7}
testlog "TEST: release leader and test it is acquired by secondary"
is_leader 0
release_leader
wait_for_leader_released
wait_for_leader
test_replay ${image1} ${image2}
testlog "TEST: stop leader and test replay"
stop_mirror ${CLUSTER1}:${LEADER}
image8=test8
create_image ${CLUSTER2} ${POOL} ${image8}
prev_leader=${LEADER}
wait_for_leader
test_replay ${image1} ${image8}
testlog "TEST: start previous leader and test replay"
start_mirror ${CLUSTER1}:${prev_leader}
image9=test9
create_image ${CLUSTER2} ${POOL} ${image9}
test_replay ${image1} ${image9}
testlog "TEST: crash leader and test replay"
stop_mirror ${CLUSTER1}:${LEADER} -KILL
image10=test10
create_image ${CLUSTER2} ${POOL} ${image10}
prev_leader=${LEADER}
wait_for_leader
test_replay ${image1} ${image10}
testlog "TEST: start previous leader and test replay"
start_mirror ${CLUSTER1}:${prev_leader}
image11=test11
create_image ${CLUSTER2} ${POOL} ${image11}
test_replay ${image1} ${image11}
testlog "TEST: start some more daemon instances and test replay"
start_mirror ${CLUSTER1}:3
start_mirror ${CLUSTER1}:4
start_mirror ${CLUSTER1}:5
start_mirror ${CLUSTER1}:6
image13=test13
create_image ${CLUSTER2} ${POOL} ${image13}
test_replay ${leader} ${image1} ${image13}
testlog "TEST: release leader and test it is acquired by secondary"
release_leader
wait_for_leader_released
wait_for_leader
test_replay ${image1} ${image2}
testlog "TEST: in loop: stop leader and test replay"
for i in 0 1 2 3 4 5; do
stop_mirror ${CLUSTER1}:${LEADER}
wait_for_leader
test_replay ${image1}
done
stop_mirror ${CLUSTER1}:${LEADER}
| 5,121 | 23.274882 | 89 |
sh
|
null |
ceph-main/qa/workunits/rbd/rbd_mirror_helpers.sh
|
#!/bin/sh
#
# rbd_mirror_helpers.sh - shared rbd-mirror daemon helper functions
#
# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
# creates a temporary directory, used for cluster configs, daemon logs, admin
# socket, temporary files, and launches rbd-mirror daemon.
#
# There are several env variables useful when troubleshooting a test failure:
#
# RBD_MIRROR_NOCLEANUP - if not empty, don't run the cleanup (stop processes,
# destroy the clusters and remove the temp directory)
# on exit, so it is possible to check the test state
# after failure.
# RBD_MIRROR_TEMDIR - use this path when creating the temporary directory
# (should not exist) instead of running mktemp(1).
# RBD_MIRROR_ARGS - use this to pass additional arguments to started
# rbd-mirror daemons.
# RBD_MIRROR_VARGS - use this to pass additional arguments to vstart.sh
# when starting clusters.
# RBD_MIRROR_INSTANCES - number of daemons to start per cluster
# RBD_MIRROR_CONFIG_KEY - if not empty, use config-key for remote cluster
# secrets
# The cleanup can be done as a separate step, running the script with
# `cleanup ${RBD_MIRROR_TEMDIR}' arguments.
#
# Note, as other workunits tests, rbd_mirror_journal.sh expects to find ceph binaries
# in PATH.
#
# Thus a typical troubleshooting session:
#
# From Ceph src dir (CEPH_SRC_PATH), start the test in NOCLEANUP mode and with
# TEMPDIR pointing to a known location:
#
# cd $CEPH_SRC_PATH
# PATH=$CEPH_SRC_PATH:$PATH
# RBD_MIRROR_NOCLEANUP=1 RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror \
# ../qa/workunits/rbd/rbd_mirror_journal.sh
#
# After the test failure cd to TEMPDIR and check the current state:
#
# cd /tmp/tmp.rbd_mirror
# ls
# less rbd-mirror.cluster1_daemon.$pid.log
# ceph --cluster cluster1 -s
# ceph --cluster cluster1 -s
# rbd --cluster cluster2 -p mirror ls
# rbd --cluster cluster2 -p mirror journal status --image test
# ceph --admin-daemon rbd-mirror.cluster1_daemon.cluster1.$pid.asok help
# ...
#
# Also you can execute commands (functions) from the script:
#
# cd $CEPH_SRC_PATH
# export RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror
# ../qa/workunits/rbd/rbd_mirror_journal.sh status
# ../qa/workunits/rbd/rbd_mirror_journal.sh stop_mirror cluster1
# ../qa/workunits/rbd/rbd_mirror_journal.sh start_mirror cluster2
# ../qa/workunits/rbd/rbd_mirror_journal.sh flush cluster2
# ...
#
# Eventually, run the cleanup:
#
# cd $CEPH_SRC_PATH
# RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror \
# ../qa/workunits/rbd/rbd_mirror_journal.sh cleanup
#
if type xmlstarlet > /dev/null 2>&1; then
XMLSTARLET=xmlstarlet
elif type xml > /dev/null 2>&1; then
XMLSTARLET=xml
else
echo "Missing xmlstarlet binary!"
exit 1
fi
RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-2}
CLUSTER1=cluster1
CLUSTER2=cluster2
PEER_CLUSTER_SUFFIX=
POOL=mirror
PARENT_POOL=mirror_parent
NS1=ns1
NS2=ns2
TEMPDIR=
CEPH_ID=${CEPH_ID:-mirror}
RBD_IMAGE_FEATURES=${RBD_IMAGE_FEATURES:-layering,exclusive-lock,journaling}
MIRROR_USER_ID_PREFIX=${MIRROR_USER_ID_PREFIX:-${CEPH_ID}.}
MIRROR_POOL_MODE=${MIRROR_POOL_MODE:-pool}
MIRROR_IMAGE_MODE=${MIRROR_IMAGE_MODE:-journal}
export CEPH_ARGS="--id ${CEPH_ID}"
LAST_MIRROR_INSTANCE=$((${RBD_MIRROR_INSTANCES} - 1))
CEPH_ROOT=$(readlink -f $(dirname $0)/../../../src)
CEPH_BIN=.
CEPH_SRC=.
if [ -e CMakeCache.txt ]; then
CEPH_SRC=${CEPH_ROOT}
CEPH_ROOT=${PWD}
CEPH_BIN=./bin
# needed for ceph CLI under cmake
export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind:${CEPH_ROOT}/lib/cython_modules/lib.3
fi
# These vars facilitate running this script in an environment with
# ceph installed from packages, like teuthology. These are not defined
# by default.
#
# RBD_MIRROR_USE_EXISTING_CLUSTER - if set, do not start and stop ceph clusters
# RBD_MIRROR_USE_RBD_MIRROR - if set, use an existing instance of rbd-mirror
# running as ceph client $CEPH_ID. If empty,
# this script will start and stop rbd-mirror
#
# Functions
#
# Parse a value in format cluster[:instance] and set cluster and instance vars.
set_cluster_instance()
{
local val=$1
local cluster_var_name=$2
local instance_var_name=$3
cluster=${val%:*}
instance=${val##*:}
if [ "${instance}" = "${val}" ]; then
# instance was not specified, use default
instance=0
fi
eval ${cluster_var_name}=${cluster}
eval ${instance_var_name}=${instance}
}
daemon_asok_file()
{
local local_cluster=$1
local cluster=$2
local instance
set_cluster_instance "${local_cluster}" local_cluster instance
echo $(ceph-conf --cluster $local_cluster --name "client.${MIRROR_USER_ID_PREFIX}${instance}" 'admin socket')
}
daemon_pid_file()
{
local cluster=$1
local instance
set_cluster_instance "${cluster}" cluster instance
echo $(ceph-conf --cluster $cluster --name "client.${MIRROR_USER_ID_PREFIX}${instance}" 'pid file')
}
testlog()
{
echo $(date '+%F %T') $@ | tee -a "${TEMPDIR}/rbd-mirror.test.log" >&2
}
expect_failure()
{
local expected="$1" ; shift
local out=${TEMPDIR}/expect_failure.out
if "$@" > ${out} 2>&1 ; then
cat ${out} >&2
return 1
fi
if [ -z "${expected}" ]; then
return 0
fi
if ! grep -q "${expected}" ${out} ; then
cat ${out} >&2
return 1
fi
return 0
}
mkfname()
{
echo "$@" | sed -e 's|[/ ]|_|g'
}
create_users()
{
local cluster=$1
CEPH_ARGS='' ceph --cluster "${cluster}" \
auth get-or-create client.${CEPH_ID} \
mon 'profile rbd' osd 'profile rbd' mgr 'profile rbd' >> \
${CEPH_ROOT}/run/${cluster}/keyring
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
CEPH_ARGS='' ceph --cluster "${cluster}" \
auth get-or-create client.${MIRROR_USER_ID_PREFIX}${instance} \
mon 'profile rbd-mirror' osd 'profile rbd' mgr 'profile rbd' >> \
${CEPH_ROOT}/run/${cluster}/keyring
done
}
setup_cluster()
{
local cluster=$1
CEPH_ARGS='' ${CEPH_SRC}/mstart.sh ${cluster} -n ${RBD_MIRROR_VARGS}
cd ${CEPH_ROOT}
rm -f ${TEMPDIR}/${cluster}.conf
ln -s $(readlink -f run/${cluster}/ceph.conf) \
${TEMPDIR}/${cluster}.conf
cd ${TEMPDIR}
create_users "${cluster}"
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
cat<<EOF >> ${TEMPDIR}/${cluster}.conf
[client.${MIRROR_USER_ID_PREFIX}${instance}]
admin socket = ${TEMPDIR}/rbd-mirror.\$cluster-\$name.asok
pid file = ${TEMPDIR}/rbd-mirror.\$cluster-\$name.pid
log file = ${TEMPDIR}/rbd-mirror.${cluster}_daemon.${instance}.log
EOF
done
}
peer_add()
{
local cluster=$1 ; shift
local pool=$1 ; shift
local client_cluster=$1 ; shift
local remote_cluster="${client_cluster##*@}"
local uuid_var_name
if [ -n "$1" ]; then
uuid_var_name=$1 ; shift
fi
local error_code
local peer_uuid
for s in 1 2 4 8 16 32; do
set +e
peer_uuid=$(rbd --cluster ${cluster} mirror pool peer add \
${pool} ${client_cluster} $@)
error_code=$?
set -e
if [ $error_code -eq 17 ]; then
# raced with a remote heartbeat ping -- remove and retry
sleep $s
peer_uuid=$(rbd mirror pool info --cluster ${cluster} --pool ${pool} --format xml | \
xmlstarlet sel -t -v "//peers/peer[site_name='${remote_cluster}']/uuid")
CEPH_ARGS='' rbd --cluster ${cluster} --pool ${pool} mirror pool peer remove ${peer_uuid}
else
test $error_code -eq 0
if [ -n "$uuid_var_name" ]; then
eval ${uuid_var_name}=${peer_uuid}
fi
return 0
fi
done
return 1
}
setup_pools()
{
local cluster=$1
local remote_cluster=$2
local mon_map_file
local mon_addr
local admin_key_file
local uuid
CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${POOL} 64 64
CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${PARENT_POOL} 64 64
CEPH_ARGS='' rbd --cluster ${cluster} pool init ${POOL}
CEPH_ARGS='' rbd --cluster ${cluster} pool init ${PARENT_POOL}
if [ -n "${RBD_MIRROR_CONFIG_KEY}" ]; then
PEER_CLUSTER_SUFFIX=-DNE
fi
CEPH_ARGS='' rbd --cluster ${cluster} mirror pool enable \
--site-name ${cluster}${PEER_CLUSTER_SUFFIX} ${POOL} ${MIRROR_POOL_MODE}
rbd --cluster ${cluster} mirror pool enable ${PARENT_POOL} image
rbd --cluster ${cluster} namespace create ${POOL}/${NS1}
rbd --cluster ${cluster} namespace create ${POOL}/${NS2}
rbd --cluster ${cluster} mirror pool enable ${POOL}/${NS1} ${MIRROR_POOL_MODE}
rbd --cluster ${cluster} mirror pool enable ${POOL}/${NS2} image
if [ -z ${RBD_MIRROR_MANUAL_PEERS} ]; then
if [ -z ${RBD_MIRROR_CONFIG_KEY} ]; then
peer_add ${cluster} ${POOL} ${remote_cluster}
peer_add ${cluster} ${PARENT_POOL} ${remote_cluster}
else
mon_map_file=${TEMPDIR}/${remote_cluster}.monmap
CEPH_ARGS='' ceph --cluster ${remote_cluster} mon getmap > ${mon_map_file}
mon_addr=$(monmaptool --print ${mon_map_file} | grep -E 'mon\.' |
head -n 1 | sed -E 's/^[0-9]+: ([^ ]+).+$/\1/' | sed -E 's/\/[0-9]+//g')
admin_key_file=${TEMPDIR}/${remote_cluster}.client.${CEPH_ID}.key
CEPH_ARGS='' ceph --cluster ${remote_cluster} auth get-key client.${CEPH_ID} > ${admin_key_file}
CEPH_ARGS='' peer_add ${cluster} ${POOL} \
client.${CEPH_ID}@${remote_cluster}${PEER_CLUSTER_SUFFIX} '' \
--remote-mon-host "${mon_addr}" --remote-key-file ${admin_key_file}
peer_add ${cluster} ${PARENT_POOL} client.${CEPH_ID}@${remote_cluster}${PEER_CLUSTER_SUFFIX} uuid
CEPH_ARGS='' rbd --cluster ${cluster} mirror pool peer set ${PARENT_POOL} ${uuid} mon-host ${mon_addr}
CEPH_ARGS='' rbd --cluster ${cluster} mirror pool peer set ${PARENT_POOL} ${uuid} key-file ${admin_key_file}
fi
fi
}
setup_tempdir()
{
if [ -n "${RBD_MIRROR_TEMDIR}" ]; then
test -d "${RBD_MIRROR_TEMDIR}" ||
mkdir "${RBD_MIRROR_TEMDIR}"
TEMPDIR="${RBD_MIRROR_TEMDIR}"
cd ${TEMPDIR}
else
TEMPDIR=`mktemp -d`
fi
}
setup()
{
local c
trap 'cleanup $?' INT TERM EXIT
setup_tempdir
if [ -z "${RBD_MIRROR_USE_EXISTING_CLUSTER}" ]; then
setup_cluster "${CLUSTER1}"
setup_cluster "${CLUSTER2}"
fi
setup_pools "${CLUSTER1}" "${CLUSTER2}"
setup_pools "${CLUSTER2}" "${CLUSTER1}"
if [ -n "${RBD_MIRROR_MIN_COMPAT_CLIENT}" ]; then
CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd \
set-require-min-compat-client ${RBD_MIRROR_MIN_COMPAT_CLIENT}
CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd \
set-require-min-compat-client ${RBD_MIRROR_MIN_COMPAT_CLIENT}
fi
}
cleanup()
{
local error_code=$1
set +e
if [ "${error_code}" -ne 0 ]; then
status
fi
if [ -z "${RBD_MIRROR_NOCLEANUP}" ]; then
local cluster instance
CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool rm ${PARENT_POOL} ${PARENT_POOL} --yes-i-really-really-mean-it
CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool rm ${PARENT_POOL} ${PARENT_POOL} --yes-i-really-really-mean-it
for cluster in "${CLUSTER1}" "${CLUSTER2}"; do
stop_mirrors "${cluster}"
done
if [ -z "${RBD_MIRROR_USE_EXISTING_CLUSTER}" ]; then
cd ${CEPH_ROOT}
CEPH_ARGS='' ${CEPH_SRC}/mstop.sh ${CLUSTER1}
CEPH_ARGS='' ${CEPH_SRC}/mstop.sh ${CLUSTER2}
fi
test "${RBD_MIRROR_TEMDIR}" = "${TEMPDIR}" || rm -Rf ${TEMPDIR}
fi
if [ "${error_code}" -eq 0 ]; then
echo "OK"
else
echo "FAIL"
fi
exit ${error_code}
}
start_mirror()
{
local cluster=$1
local instance
set_cluster_instance "${cluster}" cluster instance
test -n "${RBD_MIRROR_USE_RBD_MIRROR}" && return
rbd-mirror \
--cluster ${cluster} \
--id ${MIRROR_USER_ID_PREFIX}${instance} \
--rbd-mirror-delete-retry-interval=5 \
--rbd-mirror-image-state-check-interval=5 \
--rbd-mirror-journal-poll-age=1 \
--rbd-mirror-pool-replayers-refresh-interval=5 \
--debug-rbd=30 --debug-journaler=30 \
--debug-rbd_mirror=30 \
--daemonize=true \
${RBD_MIRROR_ARGS}
}
start_mirrors()
{
local cluster=$1
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
start_mirror "${cluster}:${instance}"
done
}
stop_mirror()
{
local cluster=$1
local sig=$2
test -n "${RBD_MIRROR_USE_RBD_MIRROR}" && return
local pid
pid=$(cat $(daemon_pid_file "${cluster}") 2>/dev/null) || :
if [ -n "${pid}" ]
then
kill ${sig} ${pid}
for s in 1 2 4 8 16 32; do
sleep $s
ps auxww | awk -v pid=${pid} '$2 == pid {print; exit 1}' && break
done
ps auxww | awk -v pid=${pid} '$2 == pid {print; exit 1}'
fi
rm -f $(daemon_asok_file "${cluster}" "${CLUSTER1}")
rm -f $(daemon_asok_file "${cluster}" "${CLUSTER2}")
rm -f $(daemon_pid_file "${cluster}")
}
stop_mirrors()
{
local cluster=$1
local sig=$2
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
stop_mirror "${cluster}:${instance}" "${sig}"
done
}
admin_daemon()
{
local cluster=$1 ; shift
local instance
set_cluster_instance "${cluster}" cluster instance
local asok_file=$(daemon_asok_file "${cluster}:${instance}" "${cluster}")
test -S "${asok_file}"
ceph --admin-daemon ${asok_file} $@
}
admin_daemons()
{
local cluster_instance=$1 ; shift
local cluster="${cluster_instance%:*}"
local instance="${cluster_instance##*:}"
local loop_instance
for s in 0 1 2 4 8 8 8 8 8 8 8 8 16 16; do
sleep ${s}
if [ "${instance}" != "${cluster_instance}" ]; then
admin_daemon "${cluster}:${instance}" $@ && return 0
else
for loop_instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
admin_daemon "${cluster}:${loop_instance}" $@ && return 0
done
fi
done
return 1
}
all_admin_daemons()
{
local cluster=$1 ; shift
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
admin_daemon "${cluster}:${instance}" $@
done
}
status()
{
local cluster daemon image_pool image_ns image
for cluster in ${CLUSTER1} ${CLUSTER2}
do
echo "${cluster} status"
CEPH_ARGS='' ceph --cluster ${cluster} -s
CEPH_ARGS='' ceph --cluster ${cluster} service dump
CEPH_ARGS='' ceph --cluster ${cluster} service status
echo
for image_pool in ${POOL} ${PARENT_POOL}
do
for image_ns in "" "${NS1}" "${NS2}"
do
echo "${cluster} ${image_pool} ${image_ns} images"
rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" ls -l
echo
echo "${cluster} ${image_pool}${image_ns} mirror pool info"
rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" mirror pool info
echo
echo "${cluster} ${image_pool}${image_ns} mirror pool status"
CEPH_ARGS='' rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" mirror pool status --verbose
echo
for image in `rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" ls 2>/dev/null`
do
echo "image ${image} info"
rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" info ${image}
echo
echo "image ${image} journal status"
rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" journal status --image ${image}
echo
echo "image ${image} snapshots"
rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" snap ls --all ${image}
echo
done
echo "${cluster} ${image_pool} ${image_ns} rbd_mirroring omap vals"
rados --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" listomapvals rbd_mirroring
echo "${cluster} ${image_pool} ${image_ns} rbd_mirror_leader omap vals"
rados --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" listomapvals rbd_mirror_leader
echo
done
done
done
local ret
for cluster in "${CLUSTER1}" "${CLUSTER2}"
do
for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
local pid_file=$(daemon_pid_file ${cluster}:${instance})
if [ ! -e ${pid_file} ]
then
echo "${cluster} rbd-mirror not running or unknown" \
"(${pid_file} not exist)"
continue
fi
local pid
pid=$(cat ${pid_file} 2>/dev/null) || :
if [ -z "${pid}" ]
then
echo "${cluster} rbd-mirror not running or unknown" \
"(can't find pid using ${pid_file})"
ret=1
continue
fi
echo "${daemon} rbd-mirror process in ps output:"
if ps auxww |
awk -v pid=${pid} 'NR == 1 {print} $2 == pid {print; exit 1}'
then
echo
echo "${cluster} rbd-mirror not running" \
"(can't find pid $pid in ps output)"
ret=1
continue
fi
echo
local asok_file=$(daemon_asok_file ${cluster}:${instance} ${cluster})
if [ ! -S "${asok_file}" ]
then
echo "${cluster} rbd-mirror asok is unknown (${asok_file} not exits)"
ret=1
continue
fi
echo "${cluster} rbd-mirror status"
ceph --admin-daemon ${asok_file} rbd mirror status
echo
done
done
return ${ret}
}
flush()
{
local cluster=$1
local pool=$2
local image=$3
local cmd="rbd mirror flush"
if [ -n "${image}" ]
then
cmd="${cmd} ${pool}/${image}"
fi
admin_daemons "${cluster}" ${cmd}
}
test_image_replay_state()
{
local cluster=$1
local pool=$2
local image=$3
local test_state=$4
local status_result
local current_state=stopped
status_result=$(admin_daemons "${cluster}" rbd mirror status ${pool}/${image} | grep -i 'state') || return 1
echo "${status_result}" | grep -i 'Replaying' && current_state=started
test "${test_state}" = "${current_state}"
}
wait_for_image_replay_state()
{
local cluster=$1
local pool=$2
local image=$3
local state=$4
local s
# TODO: add a way to force rbd-mirror to update replayers
for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
sleep ${s}
test_image_replay_state "${cluster}" "${pool}" "${image}" "${state}" && return 0
done
return 1
}
wait_for_image_replay_started()
{
local cluster=$1
local pool=$2
local image=$3
wait_for_image_replay_state "${cluster}" "${pool}" "${image}" started
}
wait_for_image_replay_stopped()
{
local cluster=$1
local pool=$2
local image=$3
wait_for_image_replay_state "${cluster}" "${pool}" "${image}" stopped
}
get_journal_position()
{
local cluster=$1
local pool=$2
local image=$3
local id_regexp=$4
# Parse line like below, looking for the first position
# [id=, commit_position=[positions=[[object_number=1, tag_tid=3, entry_tid=9], [object_number=0, tag_tid=3, entry_tid=8], [object_number=3, tag_tid=3, entry_tid=7], [object_number=2, tag_tid=3, entry_tid=6]]]]
local status_log=${TEMPDIR}/$(mkfname ${CLUSTER2}-${pool}-${image}.status)
rbd --cluster ${cluster} journal status --image ${pool}/${image} |
tee ${status_log} >&2
sed -nEe 's/^.*\[id='"${id_regexp}"',.*positions=\[\[([^]]*)\],.*state=connected.*$/\1/p' \
${status_log}
}
get_master_journal_position()
{
local cluster=$1
local pool=$2
local image=$3
get_journal_position "${cluster}" "${pool}" "${image}" ''
}
get_mirror_journal_position()
{
local cluster=$1
local pool=$2
local image=$3
get_journal_position "${cluster}" "${pool}" "${image}" '..*'
}
wait_for_journal_replay_complete()
{
local local_cluster=$1
local cluster=$2
local pool=$3
local image=$4
local s master_pos mirror_pos last_mirror_pos
local master_tag master_entry mirror_tag mirror_entry
while true; do
for s in 0.2 0.4 0.8 1.6 2 2 4 4 8 8 16 16 32 32; do
sleep ${s}
flush "${local_cluster}" "${pool}" "${image}"
master_pos=$(get_master_journal_position "${cluster}" "${pool}" "${image}")
mirror_pos=$(get_mirror_journal_position "${cluster}" "${pool}" "${image}")
test -n "${master_pos}" -a "${master_pos}" = "${mirror_pos}" && return 0
test "${mirror_pos}" != "${last_mirror_pos}" && break
done
test "${mirror_pos}" = "${last_mirror_pos}" && return 1
last_mirror_pos="${mirror_pos}"
# handle the case where the mirror is ahead of the master
master_tag=$(echo "${master_pos}" | grep -Eo "tag_tid=[0-9]*" | cut -d'=' -f 2)
mirror_tag=$(echo "${mirror_pos}" | grep -Eo "tag_tid=[0-9]*" | cut -d'=' -f 2)
master_entry=$(echo "${master_pos}" | grep -Eo "entry_tid=[0-9]*" | cut -d'=' -f 2)
mirror_entry=$(echo "${mirror_pos}" | grep -Eo "entry_tid=[0-9]*" | cut -d'=' -f 2)
test "${master_tag}" = "${mirror_tag}" -a ${master_entry} -le ${mirror_entry} && return 0
done
return 1
}
mirror_image_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster "${cluster}" mirror image snapshot "${pool}/${image}"
}
get_newest_mirror_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
local log=$4
rbd --cluster "${cluster}" snap list --all "${pool}/${image}" --format xml | \
xmlstarlet sel -t -c "//snapshots/snapshot[namespace/complete='true' and position()=last()]" > \
${log} || true
}
wait_for_snapshot_sync_complete()
{
local local_cluster=$1
local cluster=$2
local pool=$3
local image=$4
local status_log=${TEMPDIR}/$(mkfname ${cluster}-${pool}-${image}.status)
local local_status_log=${TEMPDIR}/$(mkfname ${local_cluster}-${pool}-${image}.status)
mirror_image_snapshot "${cluster}" "${pool}" "${image}"
get_newest_mirror_snapshot "${cluster}" "${pool}" "${image}" "${status_log}"
local snapshot_id=$(xmlstarlet sel -t -v "//snapshot/id" < ${status_log})
while true; do
for s in 0.2 0.4 0.8 1.6 2 2 4 4 8 8 16 16 32 32; do
sleep ${s}
get_newest_mirror_snapshot "${local_cluster}" "${pool}" "${image}" "${local_status_log}"
local primary_snapshot_id=$(xmlstarlet sel -t -v "//snapshot/namespace/primary_snap_id" < ${local_status_log})
test "${snapshot_id}" = "${primary_snapshot_id}" && return 0
done
return 1
done
return 1
}
wait_for_replay_complete()
{
local local_cluster=$1
local cluster=$2
local pool=$3
local image=$4
if [ "${MIRROR_IMAGE_MODE}" = "journal" ]; then
wait_for_journal_replay_complete ${local_cluster} ${cluster} ${pool} ${image}
elif [ "${MIRROR_IMAGE_MODE}" = "snapshot" ]; then
wait_for_snapshot_sync_complete ${local_cluster} ${cluster} ${pool} ${image}
else
return 1
fi
}
test_status_in_pool_dir()
{
local cluster=$1
local pool=$2
local image=$3
local state_pattern="$4"
local description_pattern="$5"
local service_pattern="$6"
local status_log=${TEMPDIR}/$(mkfname ${cluster}-${pool}-${image}.mirror_status)
CEPH_ARGS='' rbd --cluster ${cluster} mirror image status ${pool}/${image} |
tee ${status_log} >&2
grep "^ state: .*${state_pattern}" ${status_log} || return 1
grep "^ description: .*${description_pattern}" ${status_log} || return 1
if [ -n "${service_pattern}" ]; then
grep "service: *${service_pattern}" ${status_log} || return 1
elif echo ${state_pattern} | grep '^up+'; then
grep "service: *${MIRROR_USER_ID_PREFIX}.* on " ${status_log} || return 1
else
grep "service: " ${status_log} && return 1
fi
# recheck using `mirror pool status` command to stress test it.
local last_update="$(sed -nEe 's/^ last_update: *(.*) *$/\1/p' ${status_log})"
test_mirror_pool_status_verbose \
${cluster} ${pool} ${image} "${state_pattern}" "${last_update}" &&
return 0
echo "'mirror pool status' test failed" >&2
exit 1
}
test_mirror_pool_status_verbose()
{
local cluster=$1
local pool=$2
local image=$3
local state_pattern="$4"
local prev_last_update="$5"
local status_log=${TEMPDIR}/$(mkfname ${cluster}-${pool}.mirror_status)
rbd --cluster ${cluster} mirror pool status ${pool} --verbose --format xml \
> ${status_log}
local last_update state
last_update=$($XMLSTARLET sel -t -v \
"//images/image[name='${image}']/last_update" < ${status_log})
state=$($XMLSTARLET sel -t -v \
"//images/image[name='${image}']/state" < ${status_log})
echo "${state}" | grep "${state_pattern}" ||
test "${last_update}" '>' "${prev_last_update}"
}
wait_for_status_in_pool_dir()
{
local cluster=$1
local pool=$2
local image=$3
local state_pattern="$4"
local description_pattern="$5"
local service_pattern="$6"
for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
sleep ${s}
test_status_in_pool_dir ${cluster} ${pool} ${image} "${state_pattern}" \
"${description_pattern}" "${service_pattern}" &&
return 0
done
return 1
}
create_image()
{
local cluster=$1 ; shift
local pool=$1 ; shift
local image=$1 ; shift
local size=128
if [ -n "$1" ]; then
size=$1
shift
fi
rbd --cluster ${cluster} create --size ${size} \
--image-feature "${RBD_IMAGE_FEATURES}" $@ ${pool}/${image}
}
create_image_and_enable_mirror()
{
local cluster=$1 ; shift
local pool=$1 ; shift
local image=$1 ; shift
local mode=${1:-${MIRROR_IMAGE_MODE}}
if [ -n "$1" ]; then
shift
fi
create_image ${cluster} ${pool} ${image} $@
if [ "${MIRROR_POOL_MODE}" = "image" ] || [ "$pool" = "${PARENT_POOL}" ]; then
enable_mirror ${cluster} ${pool} ${image} ${mode}
fi
}
enable_journaling()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster ${cluster} feature enable ${pool}/${image} journaling
}
set_image_meta()
{
local cluster=$1
local pool=$2
local image=$3
local key=$4
local val=$5
rbd --cluster ${cluster} image-meta set ${pool}/${image} $key $val
}
compare_image_meta()
{
local cluster=$1
local pool=$2
local image=$3
local key=$4
local value=$5
test `rbd --cluster ${cluster} image-meta get ${pool}/${image} ${key}` = "${value}"
}
rename_image()
{
local cluster=$1
local pool=$2
local image=$3
local new_name=$4
rbd --cluster=${cluster} rename ${pool}/${image} ${pool}/${new_name}
}
remove_image()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster=${cluster} snap purge ${pool}/${image}
rbd --cluster=${cluster} rm ${pool}/${image}
}
remove_image_retry()
{
local cluster=$1
local pool=$2
local image=$3
for s in 0 1 2 4 8 16 32; do
sleep ${s}
remove_image ${cluster} ${pool} ${image} && return 0
done
return 1
}
trash_move() {
local cluster=$1
local pool=$2
local image=$3
rbd --cluster=${cluster} trash move ${pool}/${image}
}
trash_restore() {
local cluster=$1
local pool=$2
local image_id=$3
rbd --cluster=${cluster} trash restore ${pool}/${image_id}
}
clone_image()
{
local cluster=$1
local parent_pool=$2
local parent_image=$3
local parent_snap=$4
local clone_pool=$5
local clone_image=$6
shift 6
rbd --cluster ${cluster} clone \
${parent_pool}/${parent_image}@${parent_snap} \
${clone_pool}/${clone_image} --image-feature "${RBD_IMAGE_FEATURES}" $@
}
clone_image_and_enable_mirror()
{
local cluster=$1
local parent_pool=$2
local parent_image=$3
local parent_snap=$4
local clone_pool=$5
local clone_image=$6
shift 6
local mode=${1:-${MIRROR_IMAGE_MODE}}
if [ -n "$1" ]; then
shift
fi
clone_image ${cluster} ${parent_pool} ${parent_image} ${parent_snap} ${clone_pool} ${clone_image} $@
enable_mirror ${cluster} ${clone_pool} ${clone_image} ${mode}
}
disconnect_image()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster ${cluster} journal client disconnect \
--image ${pool}/${image}
}
create_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
local snap=$4
rbd --cluster ${cluster} snap create ${pool}/${image}@${snap}
}
remove_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
local snap=$4
rbd --cluster ${cluster} snap rm ${pool}/${image}@${snap}
}
rename_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
local snap=$4
local new_snap=$5
rbd --cluster ${cluster} snap rename ${pool}/${image}@${snap} \
${pool}/${image}@${new_snap}
}
purge_snapshots()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster ${cluster} snap purge ${pool}/${image}
}
protect_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
local snap=$4
rbd --cluster ${cluster} snap protect ${pool}/${image}@${snap}
}
unprotect_snapshot()
{
local cluster=$1
local pool=$2
local image=$3
local snap=$4
rbd --cluster ${cluster} snap unprotect ${pool}/${image}@${snap}
}
unprotect_snapshot_retry()
{
local cluster=$1
local pool=$2
local image=$3
local snap=$4
for s in 0 1 2 4 8 16 32; do
sleep ${s}
unprotect_snapshot ${cluster} ${pool} ${image} ${snap} && return 0
done
return 1
}
wait_for_snap_present()
{
local cluster=$1
local pool=$2
local image=$3
local snap_name=$4
local s
for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
sleep ${s}
rbd --cluster ${cluster} info ${pool}/${image}@${snap_name} || continue
return 0
done
return 1
}
test_snap_moved_to_trash()
{
local cluster=$1
local pool=$2
local image=$3
local snap_name=$4
rbd --cluster ${cluster} snap ls ${pool}/${image} --all |
grep -F " trash (${snap_name})"
}
wait_for_snap_moved_to_trash()
{
local s
for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
sleep ${s}
test_snap_moved_to_trash $@ || continue
return 0
done
return 1
}
test_snap_removed_from_trash()
{
test_snap_moved_to_trash $@ && return 1
return 0
}
wait_for_snap_removed_from_trash()
{
local s
for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
sleep ${s}
test_snap_removed_from_trash $@ || continue
return 0
done
return 1
}
write_image()
{
local cluster=$1
local pool=$2
local image=$3
local count=$4
local size=$5
test -n "${size}" || size=4096
rbd --cluster ${cluster} bench ${pool}/${image} --io-type write \
--io-size ${size} --io-threads 1 --io-total $((size * count)) \
--io-pattern rand
}
stress_write_image()
{
local cluster=$1
local pool=$2
local image=$3
local duration=$(awk 'BEGIN {srand(); print int(10 * rand()) + 5}')
set +e
timeout ${duration}s ceph_test_rbd_mirror_random_write \
--cluster ${cluster} ${pool} ${image} \
--debug-rbd=20 --debug-journaler=20 \
2> ${TEMPDIR}/rbd-mirror-random-write.log
error_code=$?
set -e
if [ $error_code -eq 124 ]; then
return 0
fi
return 1
}
show_diff()
{
local file1=$1
local file2=$2
xxd ${file1} > ${file1}.xxd
xxd ${file2} > ${file2}.xxd
sdiff -s ${file1}.xxd ${file2}.xxd | head -n 64
rm -f ${file1}.xxd ${file2}.xxd
}
compare_images()
{
local pool=$1
local image=$2
local ret=0
local rmt_export=${TEMPDIR}/$(mkfname ${CLUSTER2}-${pool}-${image}.export)
local loc_export=${TEMPDIR}/$(mkfname ${CLUSTER1}-${pool}-${image}.export)
rm -f ${rmt_export} ${loc_export}
rbd --cluster ${CLUSTER2} export ${pool}/${image} ${rmt_export}
rbd --cluster ${CLUSTER1} export ${pool}/${image} ${loc_export}
if ! cmp ${rmt_export} ${loc_export}
then
show_diff ${rmt_export} ${loc_export}
ret=1
fi
rm -f ${rmt_export} ${loc_export}
return ${ret}
}
compare_image_snapshots()
{
local pool=$1
local image=$2
local ret=0
local rmt_export=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.export
local loc_export=${TEMPDIR}/${CLUSTER1}-${pool}-${image}.export
for snap_name in $(rbd --cluster ${CLUSTER1} --format xml \
snap list ${pool}/${image} | \
$XMLSTARLET sel -t -v "//snapshot/name" | \
grep -E -v "^\.rbd-mirror\."); do
rm -f ${rmt_export} ${loc_export}
rbd --cluster ${CLUSTER2} export ${pool}/${image}@${snap_name} ${rmt_export}
rbd --cluster ${CLUSTER1} export ${pool}/${image}@${snap_name} ${loc_export}
if ! cmp ${rmt_export} ${loc_export}
then
show_diff ${rmt_export} ${loc_export}
ret=1
fi
done
rm -f ${rmt_export} ${loc_export}
return ${ret}
}
demote_image()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster=${cluster} mirror image demote ${pool}/${image}
}
promote_image()
{
local cluster=$1
local pool=$2
local image=$3
local force=$4
rbd --cluster=${cluster} mirror image promote ${pool}/${image} ${force}
}
set_pool_mirror_mode()
{
local cluster=$1
local pool=$2
local mode=${3:-${MIRROR_POOL_MODE}}
rbd --cluster=${cluster} mirror pool enable ${pool} ${mode}
}
disable_mirror()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster=${cluster} mirror image disable ${pool}/${image}
}
enable_mirror()
{
local cluster=$1
local pool=$2
local image=$3
local mode=${4:-${MIRROR_IMAGE_MODE}}
rbd --cluster=${cluster} mirror image enable ${pool}/${image} ${mode}
# Display image info including the global image id for debugging purpose
rbd --cluster=${cluster} info ${pool}/${image}
}
test_image_present()
{
local cluster=$1
local pool=$2
local image=$3
local test_state=$4
local image_id=$5
local current_state=deleted
local current_image_id
current_image_id=$(get_image_id ${cluster} ${pool} ${image})
test -n "${current_image_id}" &&
test -z "${image_id}" -o "${image_id}" = "${current_image_id}" &&
current_state=present
test "${test_state}" = "${current_state}"
}
wait_for_image_present()
{
local cluster=$1
local pool=$2
local image=$3
local state=$4
local image_id=$5
local s
test -n "${image_id}" ||
image_id=$(get_image_id ${cluster} ${pool} ${image})
# TODO: add a way to force rbd-mirror to update replayers
for s in 0.1 1 2 4 8 8 8 8 8 8 8 8 16 16 32 32; do
sleep ${s}
test_image_present \
"${cluster}" "${pool}" "${image}" "${state}" "${image_id}" &&
return 0
done
return 1
}
get_image_id()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster=${cluster} info ${pool}/${image} |
sed -ne 's/^.*block_name_prefix: rbd_data\.//p'
}
request_resync_image()
{
local cluster=$1
local pool=$2
local image=$3
local image_id_var_name=$4
eval "${image_id_var_name}='$(get_image_id ${cluster} ${pool} ${image})'"
eval 'test -n "$'${image_id_var_name}'"'
rbd --cluster=${cluster} mirror image resync ${pool}/${image}
}
get_image_data_pool()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster ${cluster} info ${pool}/${image} |
awk '$1 == "data_pool:" {print $2}'
}
get_clone_format()
{
local cluster=$1
local pool=$2
local image=$3
rbd --cluster ${cluster} info ${pool}/${image} |
awk 'BEGIN {
format = 1
}
$1 == "parent:" {
parent = $2
}
/op_features: .*clone-child/ {
format = 2
}
END {
if (!parent) exit 1
print format
}'
}
list_omap_keys()
{
local cluster=$1
local pool=$2
local obj_name=$3
rados --cluster ${cluster} -p ${pool} listomapkeys ${obj_name}
}
count_omap_keys_with_filter()
{
local cluster=$1
local pool=$2
local obj_name=$3
local filter=$4
list_omap_keys ${cluster} ${pool} ${obj_name} | grep -c ${filter}
}
wait_for_omap_keys()
{
local cluster=$1
local pool=$2
local obj_name=$3
local filter=$4
for s in 0 1 2 2 4 4 8 8 8 16 16 32; do
sleep $s
set +e
test "$(count_omap_keys_with_filter ${cluster} ${pool} ${obj_name} ${filter})" = 0
error_code=$?
set -e
if [ $error_code -eq 0 ]; then
return 0
fi
done
return 1
}
wait_for_image_in_omap()
{
local cluster=$1
local pool=$2
wait_for_omap_keys ${cluster} ${pool} rbd_mirroring status_global
wait_for_omap_keys ${cluster} ${pool} rbd_mirroring image_
wait_for_omap_keys ${cluster} ${pool} rbd_mirror_leader image_map
}
#
# Main
#
if [ "$#" -gt 0 ]
then
if [ -z "${RBD_MIRROR_TEMDIR}" ]
then
echo "RBD_MIRROR_TEMDIR is not set" >&2
exit 1
fi
TEMPDIR="${RBD_MIRROR_TEMDIR}"
cd ${TEMPDIR}
$@
exit $?
fi
| 38,897 | 25.300203 | 213 |
sh
|
null |
ceph-main/qa/workunits/rbd/rbd_mirror_journal.sh
|
#!/bin/sh -ex
#
# rbd_mirror_journal.sh - test rbd-mirror daemon in journal-based mirroring mode
#
# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
# creates a temporary directory, used for cluster configs, daemon logs, admin
# socket, temporary files, and launches rbd-mirror daemon.
#
. $(dirname $0)/rbd_mirror_helpers.sh
setup
testlog "TEST: add image and test replay"
start_mirrors ${CLUSTER1}
image=test
create_image ${CLUSTER2} ${POOL} ${image}
set_image_meta ${CLUSTER2} ${POOL} ${image} "key1" "value1"
set_image_meta ${CLUSTER2} ${POOL} ${image} "key2" "value2"
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'down+unknown'
fi
compare_images ${POOL} ${image}
compare_image_meta ${CLUSTER1} ${POOL} ${image} "key1" "value1"
compare_image_meta ${CLUSTER1} ${POOL} ${image} "key2" "value2"
testlog "TEST: stop mirror, add image, start mirror and test replay"
stop_mirrors ${CLUSTER1}
image1=test1
create_image ${CLUSTER2} ${POOL} ${image1}
write_image ${CLUSTER2} ${POOL} ${image1} 100
start_mirrors ${CLUSTER1}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying' 'primary_position'
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image1} 'down+unknown'
fi
compare_images ${POOL} ${image1}
testlog "TEST: test the first image is replaying after restart"
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${image}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
testlog "TEST: stop/start/restart mirror via admin socket"
all_admin_daemons ${CLUSTER1} rbd mirror stop
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
all_admin_daemons ${CLUSTER1} rbd mirror start
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror restart
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror stop
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
all_admin_daemons ${CLUSTER1} rbd mirror restart
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror start ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
flush ${CLUSTER1}
all_admin_daemons ${CLUSTER1} rbd mirror status
fi
remove_image_retry ${CLUSTER2} ${POOL} ${image1}
testlog "TEST: test image rename"
new_name="${image}_RENAMED"
rename_image ${CLUSTER2} ${POOL} ${image} ${new_name}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
admin_daemons ${CLUSTER1} rbd mirror status ${POOL}/${new_name}
admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${new_name}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
rename_image ${CLUSTER2} ${POOL} ${new_name} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
testlog "TEST: test trash move restore"
image_id=$(get_image_id ${CLUSTER2} ${POOL} ${image})
trash_move ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
trash_restore ${CLUSTER2} ${POOL} ${image_id}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
testlog "TEST: check if removed images' OMAP are removed (with rbd-mirror on one cluster)"
remove_image_retry ${CLUSTER2} ${POOL} ${image}
wait_for_image_in_omap ${CLUSTER1} ${POOL}
wait_for_image_in_omap ${CLUSTER2} ${POOL}
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
testlog "TEST: failover and failback"
start_mirrors ${CLUSTER2}
# demote and promote same cluster
demote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${image}
# failover (unmodified)
demote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
# failback (unmodified)
demote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
compare_images ${POOL} ${image}
# failover
demote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
write_image ${CLUSTER1} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${image}
# failback
demote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
compare_images ${POOL} ${image}
# force promote
force_promote_image=test_force_promote
create_image ${CLUSTER2} ${POOL} ${force_promote_image}
write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${force_promote_image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${force_promote_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+replaying' 'primary_position'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
promote_image ${CLUSTER1} ${POOL} ${force_promote_image} '--force'
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${force_promote_image}
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
write_image ${CLUSTER1} ${POOL} ${force_promote_image} 100
write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
remove_image_retry ${CLUSTER1} ${POOL} ${force_promote_image}
remove_image_retry ${CLUSTER2} ${POOL} ${force_promote_image}
testlog "TEST: cloned images"
testlog " - default"
parent_image=test_parent
parent_snap=snap
create_image ${CLUSTER2} ${PARENT_POOL} ${parent_image}
write_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} 100
create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
protect_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
clone_image=test_clone
clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}
write_image ${CLUSTER2} ${POOL} ${clone_image} 100
enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} journal
wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} ${parent_image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} ${parent_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${PARENT_POOL} ${parent_image} 'up+replaying' 'primary_position'
compare_images ${PARENT_POOL} ${parent_image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${clone_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${clone_image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${clone_image}
remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}
testlog " - clone v1"
clone_image ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}1
clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \
${clone_image}_v1 --rbd-default-clone-format 1
test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v1) = 1
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v1
test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v1) = 1
remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v1
remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}1
unprotect_snapshot_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
testlog " - clone v2"
parent_snap=snap_v2
create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \
${clone_image}_v2 --rbd-default-clone-format 2
test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v2) = 2
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v2
test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v2) = 2
remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
test_snap_moved_to_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v2
wait_for_image_present ${CLUSTER1} ${POOL} ${clone_image}_v2 'deleted'
test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
testlog " - clone v2 non-primary"
create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
wait_for_snap_present ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
clone_image ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \
${clone_image}_v2 --rbd-default-clone-format 2
remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}_v2
wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_image_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image}
testlog "TEST: data pool"
dp_image=test_data_pool
create_image ${CLUSTER2} ${POOL} ${dp_image} 128 --data-pool ${PARENT_POOL}
data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${dp_image}
data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap1'
write_image ${CLUSTER2} ${POOL} ${dp_image} 100
create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap2'
write_image ${CLUSTER2} ${POOL} ${dp_image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${dp_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${dp_image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${dp_image}@snap1
compare_images ${POOL} ${dp_image}@snap2
compare_images ${POOL} ${dp_image}
remove_image_retry ${CLUSTER2} ${POOL} ${dp_image}
testlog "TEST: disable mirroring / delete non-primary image"
image2=test2
image3=test3
image4=test4
image5=test5
for i in ${image2} ${image3} ${image4} ${image5}; do
create_image ${CLUSTER2} ${POOL} ${i}
write_image ${CLUSTER2} ${POOL} ${i} 100
create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
if [ "${i}" = "${image4}" ] || [ "${i}" = "${image5}" ]; then
protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
fi
write_image ${CLUSTER2} ${POOL} ${i} 100
wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
done
set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
for i in ${image2} ${image4}; do
disable_mirror ${CLUSTER2} ${POOL} ${i}
done
unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap1'
unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap2'
for i in ${image3} ${image5}; do
remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
remove_image_retry ${CLUSTER2} ${POOL} ${i}
done
for i in ${image2} ${image3} ${image4} ${image5}; do
wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'deleted'
done
set_pool_mirror_mode ${CLUSTER2} ${POOL} 'pool'
for i in ${image2} ${image4}; do
enable_journaling ${CLUSTER2} ${POOL} ${i}
wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${i}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${i}
compare_images ${POOL} ${i}
done
testlog "TEST: remove mirroring pool"
pool=pool_to_remove
for cluster in ${CLUSTER1} ${CLUSTER2}; do
CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${pool} 16 16
CEPH_ARGS='' rbd --cluster ${cluster} pool init ${pool}
rbd --cluster ${cluster} mirror pool enable ${pool} pool
done
peer_add ${CLUSTER1} ${pool} ${CLUSTER2}
peer_add ${CLUSTER2} ${pool} ${CLUSTER1}
rdp_image=test_remove_data_pool
create_image ${CLUSTER2} ${pool} ${image} 128
create_image ${CLUSTER2} ${POOL} ${rdp_image} 128 --data-pool ${pool}
write_image ${CLUSTER2} ${pool} ${image} 100
write_image ${CLUSTER2} ${POOL} ${rdp_image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${pool} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${pool} ${image} 'up+replaying' 'primary_position'
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${rdp_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${rdp_image} 'up+replaying' 'primary_position'
for cluster in ${CLUSTER1} ${CLUSTER2}; do
CEPH_ARGS='' ceph --cluster ${cluster} osd pool rm ${pool} ${pool} --yes-i-really-really-mean-it
done
remove_image_retry ${CLUSTER2} ${POOL} ${rdp_image}
wait_for_image_present ${CLUSTER1} ${POOL} ${rdp_image} 'deleted'
for i in 0 1 2 4 8 8 8 8 16 16; do
sleep $i
admin_daemons "${CLUSTER2}" rbd mirror status ${pool}/${image} || break
done
admin_daemons "${CLUSTER2}" rbd mirror status ${pool}/${image} && false
testlog "TEST: snapshot rename"
snap_name='snap_rename'
create_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_0"
for i in `seq 1 20`; do
rename_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_$(expr ${i} - 1)" "${snap_name}_${i}"
done
wait_for_snap_present ${CLUSTER1} ${POOL} ${image2} "${snap_name}_${i}"
unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap1'
unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap2'
for i in ${image2} ${image4}; do
remove_image_retry ${CLUSTER2} ${POOL} ${i}
done
testlog "TEST: disable mirror while daemon is stopped"
stop_mirrors ${CLUSTER1}
stop_mirrors ${CLUSTER2}
set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
disable_mirror ${CLUSTER2} ${POOL} ${image}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
test_image_present ${CLUSTER1} ${POOL} ${image} 'present'
fi
start_mirrors ${CLUSTER1}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
set_pool_mirror_mode ${CLUSTER2} ${POOL} 'pool'
enable_journaling ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
testlog "TEST: non-default namespace image mirroring"
testlog " - replay"
create_image ${CLUSTER2} ${POOL}/${NS1} ${image}
create_image ${CLUSTER2} ${POOL}/${NS2} ${image}
enable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image} journal
wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS2} ${image}
write_image ${CLUSTER2} ${POOL}/${NS1} ${image} 100
write_image ${CLUSTER2} ${POOL}/${NS2} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS2} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${image} 'up+replaying' 'primary_position'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS2} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL}/${NS1} ${image}
compare_images ${POOL}/${NS2} ${image}
testlog " - disable mirroring / delete image"
remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${image}
disable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image}
wait_for_image_present ${CLUSTER1} ${POOL}/${NS1} ${image} 'deleted'
wait_for_image_present ${CLUSTER1} ${POOL}/${NS2} ${image} 'deleted'
remove_image_retry ${CLUSTER2} ${POOL}/${NS2} ${image}
testlog " - data pool"
dp_image=test_data_pool
create_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 128 --data-pool ${PARENT_POOL}
data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL}/${NS1} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${dp_image}
data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL}/${NS1} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
write_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${dp_image} 'up+replaying' 'primary_position'
compare_images ${POOL}/${NS1} ${dp_image}
remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
testlog "TEST: simple image resync"
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${image}
testlog "TEST: image resync while replayer is stopped"
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
admin_daemons ${CLUSTER1} rbd mirror stop ${POOL}/${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${image}
fi
testlog "TEST: request image resync while daemon is offline"
stop_mirrors ${CLUSTER1}
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
start_mirrors ${CLUSTER1}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
compare_images ${POOL} ${image}
remove_image_retry ${CLUSTER2} ${POOL} ${image}
testlog "TEST: client disconnect"
image=laggy
create_image ${CLUSTER2} ${POOL} ${image} 128 --journal-object-size 64K
write_image ${CLUSTER2} ${POOL} ${image} 10
testlog " - replay stopped after disconnect"
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
disconnect_image ${CLUSTER2} ${POOL} ${image}
test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
testlog " - replay started after resync requested"
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
compare_images ${POOL} ${image}
testlog " - disconnected after max_concurrent_object_sets reached"
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
admin_daemons ${CLUSTER1} rbd mirror stop ${POOL}/${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
set_image_meta ${CLUSTER2} ${POOL} ${image} \
conf_rbd_journal_max_concurrent_object_sets 1
write_image ${CLUSTER2} ${POOL} ${image} 20 16384
write_image ${CLUSTER2} ${POOL} ${image} 20 16384
test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
set_image_meta ${CLUSTER2} ${POOL} ${image} \
conf_rbd_journal_max_concurrent_object_sets 0
testlog " - replay is still stopped (disconnected) after restart"
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
fi
testlog " - replay started after resync requested"
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
compare_images ${POOL} ${image}
testlog " - rbd_mirroring_resync_after_disconnect config option"
set_image_meta ${CLUSTER2} ${POOL} ${image} \
conf_rbd_mirroring_resync_after_disconnect true
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
image_id=$(get_image_id ${CLUSTER1} ${POOL} ${image})
disconnect_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
compare_images ${POOL} ${image}
set_image_meta ${CLUSTER2} ${POOL} ${image} \
conf_rbd_mirroring_resync_after_disconnect false
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
disconnect_image ${CLUSTER2} ${POOL} ${image}
test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
remove_image_retry ${CLUSTER2} ${POOL} ${image}
testlog "TEST: split-brain"
image=split-brain
create_image ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
promote_image ${CLUSTER1} ${POOL} ${image} --force
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
write_image ${CLUSTER1} ${POOL} ${image} 10
demote_image ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'split-brain'
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
remove_image_retry ${CLUSTER2} ${POOL} ${image}
testlog "TEST: check if removed images' OMAP are removed"
start_mirrors ${CLUSTER2}
wait_for_image_in_omap ${CLUSTER1} ${POOL}
wait_for_image_in_omap ${CLUSTER2} ${POOL}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
# teuthology will trash the daemon
testlog "TEST: no blocklists"
CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
fi
| 29,142 | 48.145025 | 104 |
sh
|
null |
ceph-main/qa/workunits/rbd/rbd_mirror_snapshot.sh
|
#!/bin/sh -ex
#
# rbd_mirror_snapshot.sh - test rbd-mirror daemon in snapshot-based mirroring mode
#
# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
# creates a temporary directory, used for cluster configs, daemon logs, admin
# socket, temporary files, and launches rbd-mirror daemon.
#
MIRROR_POOL_MODE=image
MIRROR_IMAGE_MODE=snapshot
. $(dirname $0)/rbd_mirror_helpers.sh
setup
testlog "TEST: add image and test replay"
start_mirrors ${CLUSTER1}
image=test
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
set_image_meta ${CLUSTER2} ${POOL} ${image} "key1" "value1"
set_image_meta ${CLUSTER2} ${POOL} ${image} "key2" "value2"
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'down+unknown'
fi
compare_images ${POOL} ${image}
compare_image_meta ${CLUSTER1} ${POOL} ${image} "key1" "value1"
compare_image_meta ${CLUSTER1} ${POOL} ${image} "key2" "value2"
testlog "TEST: stop mirror, add image, start mirror and test replay"
stop_mirrors ${CLUSTER1}
image1=test1
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image1}
write_image ${CLUSTER2} ${POOL} ${image1} 100
start_mirrors ${CLUSTER1}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image1} 'down+unknown'
fi
compare_images ${POOL} ${image1}
testlog "TEST: test the first image is replaying after restart"
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
compare_images ${POOL} ${image}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
testlog "TEST: stop/start/restart mirror via admin socket"
all_admin_daemons ${CLUSTER1} rbd mirror stop
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
all_admin_daemons ${CLUSTER1} rbd mirror start
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror restart
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror stop
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
all_admin_daemons ${CLUSTER1} rbd mirror restart
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror start ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
flush ${CLUSTER1}
all_admin_daemons ${CLUSTER1} rbd mirror status
fi
remove_image_retry ${CLUSTER2} ${POOL} ${image1}
testlog "TEST: test image rename"
new_name="${image}_RENAMED"
rename_image ${CLUSTER2} ${POOL} ${image} ${new_name}
mirror_image_snapshot ${CLUSTER2} ${POOL} ${new_name}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
admin_daemons ${CLUSTER1} rbd mirror status ${POOL}/${new_name}
admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${new_name}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
rename_image ${CLUSTER2} ${POOL} ${new_name} ${image}
mirror_image_snapshot ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
testlog "TEST: test trash move restore"
image_id=$(get_image_id ${CLUSTER2} ${POOL} ${image})
trash_move ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
trash_restore ${CLUSTER2} ${POOL} ${image_id}
enable_mirror ${CLUSTER2} ${POOL} ${image} snapshot
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
testlog "TEST: check if removed images' OMAP are removed (with rbd-mirror on one cluster)"
remove_image_retry ${CLUSTER2} ${POOL} ${image}
wait_for_image_in_omap ${CLUSTER1} ${POOL}
wait_for_image_in_omap ${CLUSTER2} ${POOL}
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
testlog "TEST: failover and failback"
start_mirrors ${CLUSTER2}
# demote and promote same cluster
demote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
compare_images ${POOL} ${image}
# failover (unmodified)
demote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
# failback (unmodified)
demote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
compare_images ${POOL} ${image}
# failover
demote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
write_image ${CLUSTER1} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying'
compare_images ${POOL} ${image}
# failback
demote_image ${CLUSTER1} ${POOL} ${image}
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
promote_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
write_image ${CLUSTER2} ${POOL} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
compare_images ${POOL} ${image}
# force promote
force_promote_image=test_force_promote
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${force_promote_image}
write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${force_promote_image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${force_promote_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
promote_image ${CLUSTER1} ${POOL} ${force_promote_image} '--force'
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${force_promote_image}
wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+stopped'
wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
write_image ${CLUSTER1} ${POOL} ${force_promote_image} 100
write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
remove_image_retry ${CLUSTER1} ${POOL} ${force_promote_image}
remove_image_retry ${CLUSTER2} ${POOL} ${force_promote_image}
testlog "TEST: cloned images"
testlog " - default"
parent_image=test_parent
parent_snap=snap
create_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image}
write_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} 100
create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
protect_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
clone_image=test_clone
clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}
write_image ${CLUSTER2} ${POOL} ${clone_image} 100
enable_mirror ${CLUSTER2} ${POOL} ${clone_image} snapshot
wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} ${parent_image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} ${parent_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${PARENT_POOL} ${parent_image} 'up+replaying'
compare_images ${PARENT_POOL} ${parent_image}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${clone_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${clone_image} 'up+replaying'
compare_images ${POOL} ${clone_image}
remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}
testlog " - clone v1"
clone_image_and_enable_mirror ${CLUSTER1} ${PARENT_POOL} ${parent_image} \
${parent_snap} ${POOL} ${clone_image}1
clone_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} \
${parent_snap} ${POOL} ${clone_image}_v1 snapshot --rbd-default-clone-format 1
test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v1) = 1
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v1
test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v1) = 1
remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v1
remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}1
unprotect_snapshot_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
testlog " - clone v2"
parent_snap=snap_v2
create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
clone_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} \
${parent_snap} ${POOL} ${clone_image}_v2 snapshot --rbd-default-clone-format 2
test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v2) = 2
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v2
test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v2) = 2
remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
test_snap_moved_to_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v2
wait_for_image_present ${CLUSTER1} ${POOL} ${clone_image}_v2 'deleted'
test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
testlog " - clone v2 non-primary"
create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
wait_for_snap_present ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
clone_image_and_enable_mirror ${CLUSTER1} ${PARENT_POOL} ${parent_image} \
${parent_snap} ${POOL} ${clone_image}_v2 snapshot --rbd-default-clone-format 2
remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}_v2
wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
remove_image_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image}
testlog "TEST: data pool"
dp_image=test_data_pool
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${dp_image} snapshot 128 --data-pool ${PARENT_POOL}
data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${dp_image}
data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap1'
write_image ${CLUSTER2} ${POOL} ${dp_image} 100
create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap2'
write_image ${CLUSTER2} ${POOL} ${dp_image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${dp_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${dp_image} 'up+replaying'
compare_images ${POOL} ${dp_image}@snap1
compare_images ${POOL} ${dp_image}@snap2
compare_images ${POOL} ${dp_image}
remove_image_retry ${CLUSTER2} ${POOL} ${dp_image}
testlog "TEST: disable mirroring / delete non-primary image"
image2=test2
image3=test3
image4=test4
image5=test5
for i in ${image2} ${image3} ${image4} ${image5}; do
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${i}
write_image ${CLUSTER2} ${POOL} ${i} 100
create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
if [ "${i}" = "${image4}" ] || [ "${i}" = "${image5}" ]; then
protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
fi
write_image ${CLUSTER2} ${POOL} ${i} 100
mirror_image_snapshot ${CLUSTER2} ${POOL} ${i}
wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
done
set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
for i in ${image2} ${image4}; do
disable_mirror ${CLUSTER2} ${POOL} ${i}
done
unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap1'
unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap2'
for i in ${image3} ${image5}; do
remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
remove_image_retry ${CLUSTER2} ${POOL} ${i}
done
for i in ${image2} ${image3} ${image4} ${image5}; do
wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'deleted'
done
testlog "TEST: snapshot rename"
snap_name='snap_rename'
enable_mirror ${CLUSTER2} ${POOL} ${image2}
create_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_0"
for i in `seq 1 20`; do
rename_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_$(expr ${i} - 1)" "${snap_name}_${i}"
done
mirror_image_snapshot ${CLUSTER2} ${POOL} ${image2}
wait_for_snap_present ${CLUSTER1} ${POOL} ${image2} "${snap_name}_${i}"
unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap1'
unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap2'
for i in ${image2} ${image4}; do
remove_image_retry ${CLUSTER2} ${POOL} ${i}
done
testlog "TEST: disable mirror while daemon is stopped"
stop_mirrors ${CLUSTER1}
stop_mirrors ${CLUSTER2}
disable_mirror ${CLUSTER2} ${POOL} ${image}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
test_image_present ${CLUSTER1} ${POOL} ${image} 'present'
fi
start_mirrors ${CLUSTER1}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
enable_mirror ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
testlog "TEST: non-default namespace image mirroring"
testlog " - replay"
create_image_and_enable_mirror ${CLUSTER2} ${POOL}/${NS1} ${image}
create_image_and_enable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${image}
wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS2} ${image}
write_image ${CLUSTER2} ${POOL}/${NS1} ${image} 100
write_image ${CLUSTER2} ${POOL}/${NS2} ${image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS2} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${image} 'up+replaying'
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS2} ${image} 'up+replaying'
compare_images ${POOL}/${NS1} ${image}
compare_images ${POOL}/${NS2} ${image}
testlog " - disable mirroring / delete image"
remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${image}
disable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image}
wait_for_image_present ${CLUSTER1} ${POOL}/${NS1} ${image} 'deleted'
wait_for_image_present ${CLUSTER1} ${POOL}/${NS2} ${image} 'deleted'
remove_image_retry ${CLUSTER2} ${POOL}/${NS2} ${image}
testlog " - data pool"
dp_image=test_data_pool
create_image_and_enable_mirror ${CLUSTER2} ${POOL}/${NS1} ${dp_image} snapshot 128 --data-pool ${PARENT_POOL}
data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL}/${NS1} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${dp_image}
data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL}/${NS1} ${dp_image})
test "${data_pool}" = "${PARENT_POOL}"
write_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 100
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${dp_image} 'up+replaying'
compare_images ${POOL}/${NS1} ${dp_image}
remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
testlog "TEST: simple image resync"
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
compare_images ${POOL} ${image}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
testlog "TEST: image resync while replayer is stopped"
admin_daemons ${CLUSTER1} rbd mirror stop ${POOL}/${image}
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
compare_images ${POOL} ${image}
fi
testlog "TEST: request image resync while daemon is offline"
stop_mirrors ${CLUSTER1}
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
start_mirrors ${CLUSTER1}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
compare_images ${POOL} ${image}
remove_image_retry ${CLUSTER2} ${POOL} ${image}
testlog "TEST: split-brain"
image=split-brain
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
promote_image ${CLUSTER1} ${POOL} ${image} --force
wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
write_image ${CLUSTER1} ${POOL} ${image} 10
demote_image ${CLUSTER1} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'split-brain'
request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
remove_image_retry ${CLUSTER2} ${POOL} ${image}
testlog "TEST: check if removed images' OMAP are removed"
start_mirrors ${CLUSTER2}
wait_for_image_in_omap ${CLUSTER1} ${POOL}
wait_for_image_in_omap ${CLUSTER2} ${POOL}
if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
# teuthology will trash the daemon
testlog "TEST: no blocklists"
CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
fi
| 23,941 | 47.563895 | 109 |
sh
|
null |
ceph-main/qa/workunits/rbd/rbd_mirror_stress.sh
|
#!/bin/sh -ex
#
# rbd_mirror_stress.sh - stress test rbd-mirror daemon
#
# The following additional environment variables affect the test:
#
# RBD_MIRROR_REDUCE_WRITES - if not empty, don't run the stress bench write
# tool during the many image test
#
IMAGE_COUNT=50
export LOCKDEP=0
. $(dirname $0)/rbd_mirror_helpers.sh
setup
create_snap()
{
local cluster=$1
local pool=$2
local image=$3
local snap_name=$4
rbd --cluster ${cluster} -p ${pool} snap create ${image}@${snap_name} \
--debug-rbd=20 --debug-journaler=20 2> ${TEMPDIR}/rbd-snap-create.log
}
compare_image_snaps()
{
local pool=$1
local image=$2
local snap_name=$3
local ret=0
local rmt_export=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.export
local loc_export=${TEMPDIR}/${CLUSTER1}-${pool}-${image}.export
rm -f ${rmt_export} ${loc_export}
rbd --cluster ${CLUSTER2} -p ${pool} export ${image}@${snap_name} ${rmt_export}
rbd --cluster ${CLUSTER1} -p ${pool} export ${image}@${snap_name} ${loc_export}
if ! cmp ${rmt_export} ${loc_export}
then
show_diff ${rmt_export} ${loc_export}
ret=1
fi
rm -f ${rmt_export} ${loc_export}
return ${ret}
}
wait_for_pool_images()
{
local cluster=$1
local pool=$2
local image_count=$3
local s
local count
local last_count=0
while true; do
for s in `seq 1 40`; do
test $s -ne 1 && sleep 30
count=$(rbd --cluster ${cluster} -p ${pool} mirror pool status | grep 'images: ' | cut -d' ' -f 2)
test "${count}" = "${image_count}" && return 0
# reset timeout if making forward progress
test $count -ne $last_count && break
done
test $count -eq $last_count && break
last_count=$count
done
rbd --cluster ${cluster} -p ${pool} mirror pool status --verbose >&2
return 1
}
wait_for_pool_healthy()
{
local cluster=$1
local pool=$2
local s
local state
for s in `seq 1 40`; do
test $s -ne 1 && sleep 30
state=$(rbd --cluster ${cluster} -p ${pool} mirror pool status | grep 'image health:' | cut -d' ' -f 3)
test "${state}" = "ERROR" && break
test "${state}" = "OK" && return 0
done
rbd --cluster ${cluster} -p ${pool} mirror pool status --verbose >&2
return 1
}
start_mirrors ${CLUSTER1}
start_mirrors ${CLUSTER2}
testlog "TEST: add image and test replay after client crashes"
image=test
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image} ${MIRROR_IMAGE_MODE} '512M'
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
clean_snap_name=
for i in `seq 1 10`
do
stress_write_image ${CLUSTER2} ${POOL} ${image}
wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
snap_name="snap${i}"
create_snap ${CLUSTER2} ${POOL} ${image} ${snap_name}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${snap_name}
if [ -n "${clean_snap_name}" ]; then
compare_image_snaps ${POOL} ${image} ${clean_snap_name}
fi
compare_image_snaps ${POOL} ${image} ${snap_name}
clean_snap_name="snap${i}-clean"
create_snap ${CLUSTER2} ${POOL} ${image} ${clean_snap_name}
done
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${clean_snap_name}
for i in `seq 1 10`
do
snap_name="snap${i}"
compare_image_snaps ${POOL} ${image} ${snap_name}
snap_name="snap${i}-clean"
compare_image_snaps ${POOL} ${image} ${snap_name}
done
for i in `seq 1 10`
do
snap_name="snap${i}"
remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
snap_name="snap${i}-clean"
remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
done
remove_image_retry ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
testlog "TEST: create many images"
snap_name="snap"
for i in `seq 1 ${IMAGE_COUNT}`
do
image="image_${i}"
create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image} ${MIRROR_IMAGE_MODE} '128M'
if [ -n "${RBD_MIRROR_REDUCE_WRITES}" ]; then
write_image ${CLUSTER2} ${POOL} ${image} 100
else
stress_write_image ${CLUSTER2} ${POOL} ${image}
fi
done
wait_for_pool_images ${CLUSTER2} ${POOL} ${IMAGE_COUNT}
wait_for_pool_healthy ${CLUSTER2} ${POOL}
wait_for_pool_images ${CLUSTER1} ${POOL} ${IMAGE_COUNT}
wait_for_pool_healthy ${CLUSTER1} ${POOL}
testlog "TEST: compare many images"
for i in `seq 1 ${IMAGE_COUNT}`
do
image="image_${i}"
create_snap ${CLUSTER2} ${POOL} ${image} ${snap_name}
wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${snap_name}
compare_image_snaps ${POOL} ${image} ${snap_name}
done
testlog "TEST: delete many images"
for i in `seq 1 ${IMAGE_COUNT}`
do
image="image_${i}"
remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
remove_image_retry ${CLUSTER2} ${POOL} ${image}
done
testlog "TEST: image deletions should propagate"
wait_for_pool_images ${CLUSTER1} ${POOL} 0
wait_for_pool_healthy ${CLUSTER1} ${POOL} 0
for i in `seq 1 ${IMAGE_COUNT}`
do
image="image_${i}"
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
done
testlog "TEST: delete images during bootstrap"
set_pool_mirror_mode ${CLUSTER1} ${POOL} 'image'
set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
start_mirror ${CLUSTER1}
image=test
for i in `seq 1 10`
do
image="image_${i}"
create_image ${CLUSTER2} ${POOL} ${image} '512M'
enable_mirror ${CLUSTER2} ${POOL} ${image}
stress_write_image ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
disable_mirror ${CLUSTER2} ${POOL} ${image}
wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
purge_snapshots ${CLUSTER2} ${POOL} ${image}
remove_image_retry ${CLUSTER2} ${POOL} ${image}
done
testlog "TEST: check if removed images' OMAP are removed"
wait_for_image_in_omap ${CLUSTER1} ${POOL}
wait_for_image_in_omap ${CLUSTER2} ${POOL}
| 6,273 | 27.261261 | 111 |
sh
|
null |
ceph-main/qa/workunits/rbd/read-flags.sh
|
#!/usr/bin/env bash
set -ex
# create a snapshot, then export it and check that setting read flags works
# by looking at --debug-ms output
function clean_up {
rm -f test.log || true
rbd snap remove test@snap || true
rbd rm test || true
}
function test_read_flags {
local IMAGE=$1
local SET_BALANCED=$2
local SET_LOCALIZED=$3
local EXPECT_BALANCED=$4
local EXPECT_LOCALIZED=$5
local EXTRA_ARGS="--log-file test.log --debug-ms 1 --no-log-to-stderr"
if [ "$SET_BALANCED" = 'y' ]; then
EXTRA_ARGS="$EXTRA_ARGS --rbd-balance-snap-reads"
elif [ "$SET_LOCALIZED" = 'y' ]; then
EXTRA_ARGS="$EXTRA_ARGS --rbd-localize-snap-reads"
fi
rbd export $IMAGE - $EXTRA_ARGS > /dev/null
if [ "$EXPECT_BALANCED" = 'y' ]; then
grep -q balance_reads test.log
else
grep -L balance_reads test.log | grep -q test.log
fi
if [ "$EXPECT_LOCALIZED" = 'y' ]; then
grep -q localize_reads test.log
else
grep -L localize_reads test.log | grep -q test.log
fi
rm -f test.log
}
clean_up
trap clean_up INT TERM EXIT
rbd create --image-feature layering -s 10 test
rbd snap create test@snap
# export from non snapshot with or without settings should not have flags
test_read_flags test n n n n
test_read_flags test y y n n
# export from snapshot should have read flags in log if they are set
test_read_flags test@snap n n n n
test_read_flags test@snap y n y n
test_read_flags test@snap n y n y
# balanced_reads happens to take priority over localize_reads
test_read_flags test@snap y y y n
echo OK
| 1,557 | 24.129032 | 75 |
sh
|
null |
ceph-main/qa/workunits/rbd/simple_big.sh
|
#!/bin/sh -ex
mb=100000
rbd create foo --size $mb
DEV=$(sudo rbd map foo)
dd if=/dev/zero of=$DEV bs=1M count=$mb
dd if=$DEV of=/dev/null bs=1M count=$mb
sudo rbd unmap $DEV
rbd rm foo
echo OK
| 196 | 14.153846 | 39 |
sh
|
null |
ceph-main/qa/workunits/rbd/test_admin_socket.sh
|
#!/usr/bin/env bash
set -ex
TMPDIR=/tmp/rbd_test_admin_socket$$
mkdir $TMPDIR
trap "rm -fr $TMPDIR" 0
. $(dirname $0)/../../standalone/ceph-helpers.sh
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
function rbd_watch_out_file()
{
echo ${TMPDIR}/rbd_watch_$1.out
}
function rbd_watch_pid_file()
{
echo ${TMPDIR}/rbd_watch_$1.pid
}
function rbd_watch_fifo()
{
echo ${TMPDIR}/rbd_watch_$1.fifo
}
function rbd_watch_asok()
{
echo ${TMPDIR}/rbd_watch_$1.asok
}
function rbd_get_perfcounter()
{
local image=$1
local counter=$2
local name
name=$(ceph --format xml --admin-daemon $(rbd_watch_asok ${image}) \
perf schema | $XMLSTARLET el -d3 |
grep "/librbd-.*-${image}/${counter}\$")
test -n "${name}" || return 1
ceph --format xml --admin-daemon $(rbd_watch_asok ${image}) perf dump |
$XMLSTARLET sel -t -m "${name}" -v .
}
function rbd_check_perfcounter()
{
local image=$1
local counter=$2
local expected_val=$3
local val=
val=$(rbd_get_perfcounter ${image} ${counter})
test "${val}" -eq "${expected_val}"
}
function rbd_watch_start()
{
local image=$1
local asok=$(rbd_watch_asok ${image})
mkfifo $(rbd_watch_fifo ${image})
(cat $(rbd_watch_fifo ${image}) |
rbd --admin-socket ${asok} watch ${image} \
> $(rbd_watch_out_file ${image}) 2>&1)&
# find pid of the started rbd watch process
local pid
for i in `seq 10`; do
pid=$(ps auxww | awk "/[r]bd --admin.* watch ${image}/ {print \$2}")
test -n "${pid}" && break
sleep 0.1
done
test -n "${pid}"
echo ${pid} > $(rbd_watch_pid_file ${image})
# find watcher admin socket
test -n "${asok}"
for i in `seq 10`; do
test -S "${asok}" && break
sleep 0.1
done
test -S "${asok}"
# configure debug level
ceph --admin-daemon "${asok}" config set debug_rbd 20
# check that watcher is registered
rbd status ${image} | expect_false grep "Watchers: none"
}
function rbd_watch_end()
{
local image=$1
local regexp=$2
# send 'enter' to watch to exit
echo > $(rbd_watch_fifo ${image})
# just in case it is not terminated
kill $(cat $(rbd_watch_pid_file ${image})) || :
# output rbd watch out file for easier troubleshooting
cat $(rbd_watch_out_file ${image})
# cleanup
rm -f $(rbd_watch_fifo ${image}) $(rbd_watch_pid_file ${image}) \
$(rbd_watch_out_file ${image}) $(rbd_watch_asok ${image})
}
pool="rbd"
image=testimg$$
ceph_admin="ceph --admin-daemon $(rbd_watch_asok ${image})"
rbd create --size 128 ${pool}/${image}
# check rbd cache commands are present in help output
rbd_cache_flush="rbd cache flush ${pool}/${image}"
rbd_cache_invalidate="rbd cache invalidate ${pool}/${image}"
rbd_watch_start ${image}
${ceph_admin} help | fgrep "${rbd_cache_flush}"
${ceph_admin} help | fgrep "${rbd_cache_invalidate}"
rbd_watch_end ${image}
# test rbd cache commands with disabled and enabled cache
for conf_rbd_cache in false true; do
rbd image-meta set ${image} conf_rbd_cache ${conf_rbd_cache}
rbd_watch_start ${image}
rbd_check_perfcounter ${image} flush 0
${ceph_admin} ${rbd_cache_flush}
# 'flush' counter should increase regardless if cache is enabled
rbd_check_perfcounter ${image} flush 1
rbd_check_perfcounter ${image} invalidate_cache 0
${ceph_admin} ${rbd_cache_invalidate}
# 'invalidate_cache' counter should increase regardless if cache is enabled
rbd_check_perfcounter ${image} invalidate_cache 1
rbd_watch_end ${image}
done
rbd rm ${image}
| 3,611 | 22.763158 | 79 |
sh
|
null |
ceph-main/qa/workunits/rbd/test_librbd.sh
|
#!/bin/sh -e
if [ -n "${VALGRIND}" ]; then
valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
--error-exitcode=1 ceph_test_librbd
else
ceph_test_librbd
fi
exit 0
| 183 | 17.4 | 64 |
sh
|
null |
ceph-main/qa/workunits/rbd/test_librbd_python.sh
|
#!/bin/sh -ex
relpath=$(dirname $0)/../../../src/test/pybind
if [ -n "${VALGRIND}" ]; then
valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
--errors-for-leak-kinds=definite --error-exitcode=1 \
python3 -m nose -v $relpath/test_rbd.py "$@"
else
python3 -m nose -v $relpath/test_rbd.py "$@"
fi
exit 0
| 329 | 24.384615 | 64 |
sh
|
null |
ceph-main/qa/workunits/rbd/test_lock_fence.sh
|
#!/usr/bin/env bash
# can't use -e because of background process
set -x
IMAGE=rbdrw-image
LOCKID=rbdrw
RELPATH=$(dirname $0)/../../../src/test/librbd
RBDRW=$RELPATH/rbdrw.py
rbd create $IMAGE --size 10 --image-format 2 --image-shared || exit 1
# rbdrw loops doing I/O to $IMAGE after locking with lockid $LOCKID
python3 $RBDRW $IMAGE $LOCKID &
iochild=$!
# give client time to lock and start reading/writing
LOCKS='[]'
while [ "$LOCKS" == '[]' ]
do
LOCKS=$(rbd lock list $IMAGE --format json)
sleep 1
done
clientaddr=$(rbd lock list $IMAGE | tail -1 | awk '{print $NF;}')
clientid=$(rbd lock list $IMAGE | tail -1 | awk '{print $1;}')
echo "clientaddr: $clientaddr"
echo "clientid: $clientid"
ceph osd blocklist add $clientaddr || exit 1
wait $iochild
rbdrw_exitcode=$?
if [ $rbdrw_exitcode != 108 ]
then
echo "wrong exitcode from rbdrw: $rbdrw_exitcode"
exit 1
else
echo "rbdrw stopped with ESHUTDOWN"
fi
set -e
ceph osd blocklist rm $clientaddr
rbd lock remove $IMAGE $LOCKID "$clientid"
# rbdrw will have exited with an existing watch, so, until #3527 is fixed,
# hang out until the watch expires
sleep 30
rbd rm $IMAGE
echo OK
| 1,150 | 22.489796 | 74 |
sh
|
null |
ceph-main/qa/workunits/rbd/test_rbd_mirror.sh
|
#!/bin/sh -e
if [ -n "${VALGRIND}" ]; then
valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
--error-exitcode=1 ceph_test_rbd_mirror
else
ceph_test_rbd_mirror
fi
exit 0
| 191 | 18.2 | 64 |
sh
|
null |
ceph-main/qa/workunits/rbd/test_rbd_tasks.sh
|
#!/usr/bin/env bash
set -ex
POOL=rbd_tasks
POOL_NS=ns1
setup() {
trap 'cleanup' INT TERM EXIT
ceph osd pool create ${POOL} 128
rbd pool init ${POOL}
rbd namespace create ${POOL}/${POOL_NS}
TEMPDIR=`mktemp -d`
}
cleanup() {
ceph osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
rm -rf ${TEMPDIR}
}
wait_for() {
local TEST_FN=$1
shift 1
local TEST_FN_ARGS=("$@")
for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
sleep ${s}
${TEST_FN} "${TEST_FN_ARGS[@]}" || continue
return 0
done
return 1
}
task_exists() {
local TASK_ID=$1
[[ -z "${TASK_ID}" ]] && exit 1
ceph rbd task list ${TASK_ID} || return 1
return 0
}
task_dne() {
local TASK_ID=$1
[[ -z "${TASK_ID}" ]] && exit 1
ceph rbd task list ${TASK_ID} || return 0
return 1
}
task_in_progress() {
local TASK_ID=$1
[[ -z "${TASK_ID}" ]] && exit 1
[[ $(ceph rbd task list ${TASK_ID} | jq '.in_progress') == 'true' ]]
}
test_remove() {
echo "test_remove"
local IMAGE=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${IMAGE}
# MGR might require some time to discover the OSD map w/ new pool
wait_for ceph rbd task add remove ${POOL}/${IMAGE}
}
test_flatten() {
echo "test_flatten"
local PARENT_IMAGE=`uuidgen`
local CHILD_IMAGE=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${PARENT_IMAGE}
rbd snap create ${POOL}/${PARENT_IMAGE}@snap
rbd clone ${POOL}/${PARENT_IMAGE}@snap ${POOL}/${POOL_NS}/${CHILD_IMAGE} --rbd-default-clone-format=2
[[ "$(rbd info --format json ${POOL}/${POOL_NS}/${CHILD_IMAGE} | jq 'has("parent")')" == "true" ]]
local TASK_ID=`ceph rbd task add flatten ${POOL}/${POOL_NS}/${CHILD_IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
[[ "$(rbd info --format json ${POOL}/${POOL_NS}/${CHILD_IMAGE} | jq 'has("parent")')" == "false" ]]
}
test_trash_remove() {
echo "test_trash_remove"
local IMAGE=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${IMAGE}
local IMAGE_ID=`rbd info --format json ${POOL}/${IMAGE} | jq --raw-output ".id"`
rbd trash mv ${POOL}/${IMAGE}
[[ -n "$(rbd trash list ${POOL})" ]] || exit 1
local TASK_ID=`ceph rbd task add trash remove ${POOL}/${IMAGE_ID} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
[[ -z "$(rbd trash list ${POOL})" ]] || exit 1
}
test_migration_execute() {
echo "test_migration_execute"
local SOURCE_IMAGE=`uuidgen`
local TARGET_IMAGE=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${SOURCE_IMAGE}
rbd migration prepare ${POOL}/${SOURCE_IMAGE} ${POOL}/${TARGET_IMAGE}
[[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "prepared" ]]
local TASK_ID=`ceph rbd task add migration execute ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
[[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "executed" ]]
}
test_migration_commit() {
echo "test_migration_commit"
local SOURCE_IMAGE=`uuidgen`
local TARGET_IMAGE=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${SOURCE_IMAGE}
rbd migration prepare ${POOL}/${SOURCE_IMAGE} ${POOL}/${TARGET_IMAGE}
[[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "prepared" ]]
local TASK_ID=`ceph rbd task add migration execute ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
TASK_ID=`ceph rbd task add migration commit ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
[[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq 'has("migration")')" == "false" ]]
(rbd info ${POOL}/${SOURCE_IMAGE} && return 1) || true
rbd info ${POOL}/${TARGET_IMAGE}
}
test_migration_abort() {
echo "test_migration_abort"
local SOURCE_IMAGE=`uuidgen`
local TARGET_IMAGE=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${SOURCE_IMAGE}
rbd migration prepare ${POOL}/${SOURCE_IMAGE} ${POOL}/${TARGET_IMAGE}
[[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "prepared" ]]
local TASK_ID=`ceph rbd task add migration execute ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
TASK_ID=`ceph rbd task add migration abort ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID}
[[ "$(rbd status --format json ${POOL}/${SOURCE_IMAGE} | jq 'has("migration")')" == "false" ]]
rbd info ${POOL}/${SOURCE_IMAGE}
(rbd info ${POOL}/${TARGET_IMAGE} && return 1) || true
}
test_list() {
echo "test_list"
local IMAGE_1=`uuidgen`
local IMAGE_2=`uuidgen`
rbd create --size 1T --image-shared ${POOL}/${IMAGE_1}
rbd create --size 1T --image-shared ${POOL}/${IMAGE_2}
local TASK_ID_1=`ceph rbd task add remove ${POOL}/${IMAGE_1} | jq --raw-output ".id"`
local TASK_ID_2=`ceph rbd task add remove ${POOL}/${IMAGE_2} | jq --raw-output ".id"`
local LIST_FILE="${TEMPDIR}/list_file"
ceph rbd task list > ${LIST_FILE}
cat ${LIST_FILE}
[[ $(jq "[.[] | .id] | contains([\"${TASK_ID_1}\", \"${TASK_ID_2}\"])" ${LIST_FILE}) == "true" ]]
ceph rbd task cancel ${TASK_ID_1}
ceph rbd task cancel ${TASK_ID_2}
}
test_cancel() {
echo "test_cancel"
local IMAGE=`uuidgen`
rbd create --size 1T --image-shared ${POOL}/${IMAGE}
local TASK_ID=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
wait_for task_exists ${TASK_ID}
ceph rbd task cancel ${TASK_ID}
wait_for task_dne ${TASK_ID}
}
test_duplicate_task() {
echo "test_duplicate_task"
local IMAGE=`uuidgen`
rbd create --size 1T --image-shared ${POOL}/${IMAGE}
local IMAGE_ID=`rbd info --format json ${POOL}/${IMAGE} | jq --raw-output ".id"`
rbd trash mv ${POOL}/${IMAGE}
local TASK_ID_1=`ceph rbd task add trash remove ${POOL}/${IMAGE_ID} | jq --raw-output ".id"`
local TASK_ID_2=`ceph rbd task add trash remove ${POOL}/${IMAGE_ID} | jq --raw-output ".id"`
[[ "${TASK_ID_1}" == "${TASK_ID_2}" ]]
ceph rbd task cancel ${TASK_ID_1}
}
test_duplicate_name() {
echo "test_duplicate_name"
local IMAGE=`uuidgen`
rbd create --size 1G --image-shared ${POOL}/${IMAGE}
local TASK_ID_1=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID_1}
rbd create --size 1G --image-shared ${POOL}/${IMAGE}
local TASK_ID_2=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
[[ "${TASK_ID_1}" != "${TASK_ID_2}" ]]
wait_for task_dne ${TASK_ID_2}
local TASK_ID_3=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
[[ "${TASK_ID_2}" == "${TASK_ID_3}" ]]
}
test_progress() {
echo "test_progress"
local IMAGE_1=`uuidgen`
local IMAGE_2=`uuidgen`
rbd create --size 1 --image-shared ${POOL}/${IMAGE_1}
local TASK_ID_1=`ceph rbd task add remove ${POOL}/${IMAGE_1} | jq --raw-output ".id"`
wait_for task_dne ${TASK_ID_1}
local PROGRESS_FILE="${TEMPDIR}/progress_file"
ceph progress json > ${PROGRESS_FILE}
cat ${PROGRESS_FILE}
[[ $(jq "[.completed | .[].id] | contains([\"${TASK_ID_1}\"])" ${PROGRESS_FILE}) == "true" ]]
rbd create --size 1T --image-shared ${POOL}/${IMAGE_2}
local TASK_ID_2=`ceph rbd task add remove ${POOL}/${IMAGE_2} | jq --raw-output ".id"`
wait_for task_in_progress ${TASK_ID_2}
ceph progress json > ${PROGRESS_FILE}
cat ${PROGRESS_FILE}
[[ $(jq "[.events | .[].id] | contains([\"${TASK_ID_2}\"])" ${PROGRESS_FILE}) == "true" ]]
ceph rbd task cancel ${TASK_ID_2}
wait_for task_dne ${TASK_ID_2}
ceph progress json > ${PROGRESS_FILE}
cat ${PROGRESS_FILE}
[[ $(jq "[.completed | map(select(.failed)) | .[].id] | contains([\"${TASK_ID_2}\"])" ${PROGRESS_FILE}) == "true" ]]
}
setup
test_remove
test_flatten
test_trash_remove
test_migration_execute
test_migration_commit
test_migration_abort
test_list
test_cancel
test_duplicate_task
test_duplicate_name
test_progress
echo OK
| 7,928 | 27.624549 | 118 |
sh
|
null |
ceph-main/qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh
|
#!/bin/sh
#
# Regression test for http://tracker.ceph.com/issues/14984
#
# When the bug is present, starting the rbdmap service causes
# a bogus log message to be emitted to the log because the RBDMAPFILE
# environment variable is not set.
#
# When the bug is not present, starting the rbdmap service will emit
# no log messages, because /etc/ceph/rbdmap does not contain any lines
# that require processing.
#
set -ex
echo "TEST: save timestamp for use later with journalctl --since"
TIMESTAMP=$(date +%Y-%m-%d\ %H:%M:%S)
echo "TEST: assert that rbdmap has not logged anything since boot"
journalctl -b 0 -t rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
journalctl -b 0 -t init-rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
echo "TEST: restart the rbdmap.service"
sudo systemctl restart rbdmap.service
echo "TEST: ensure that /usr/bin/rbdmap runs to completion"
until sudo systemctl status rbdmap.service | grep 'active (exited)' ; do
sleep 0.5
done
echo "TEST: assert that rbdmap has not logged anything since TIMESTAMP"
journalctl --since "$TIMESTAMP" -t rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
journalctl --since "$TIMESTAMP" -t init-rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
exit 0
| 1,208 | 33.542857 | 85 |
sh
|
null |
ceph-main/qa/workunits/rbd/verify_pool.sh
|
#!/bin/sh -ex
POOL_NAME=rbd_test_validate_pool
PG_NUM=32
tear_down () {
ceph osd pool delete $POOL_NAME $POOL_NAME --yes-i-really-really-mean-it || true
}
set_up () {
tear_down
ceph osd pool create $POOL_NAME $PG_NUM
ceph osd pool mksnap $POOL_NAME snap
rbd pool init $POOL_NAME
}
trap tear_down EXIT HUP INT
set_up
# creating an image in a pool-managed snapshot pool should fail
rbd create --pool $POOL_NAME --size 1 foo && exit 1 || true
# should succeed if the pool already marked as validated
printf "overwrite validated" | rados --pool $POOL_NAME put rbd_info -
rbd create --pool $POOL_NAME --size 1 foo
echo OK
| 634 | 21.678571 | 82 |
sh
|
null |
ceph-main/qa/workunits/rbd/crimson/test_crimson_librbd.sh
|
#!/bin/sh -e
if [ -n "${VALGRIND}" ]; then
valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
--error-exitcode=1 ceph_test_librbd
else
# Run test cases indivually to allow better selection
# of ongoing Crimson development.
# Disabled test groups are tracked here:
# https://tracker.ceph.com/issues/58791
ceph_test_librbd --gtest_filter='TestLibRBD.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/0.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/1.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/2.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/3.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/4.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/5.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/6.*'
ceph_test_librbd --gtest_filter='EncryptedFlattenTest/7.*'
# ceph_test_librbd --gtest_filter='DiffIterateTest/0.*'
# ceph_test_librbd --gtest_filter='DiffIterateTest/1.*'
ceph_test_librbd --gtest_filter='TestImageWatcher.*'
ceph_test_librbd --gtest_filter='TestInternal.*'
ceph_test_librbd --gtest_filter='TestMirroring.*'
# ceph_test_librbd --gtest_filter='TestDeepCopy.*'
ceph_test_librbd --gtest_filter='TestGroup.*'
# ceph_test_librbd --gtest_filter='TestMigration.*'
ceph_test_librbd --gtest_filter='TestMirroringWatcher.*'
ceph_test_librbd --gtest_filter='TestObjectMap.*'
ceph_test_librbd --gtest_filter='TestOperations.*'
ceph_test_librbd --gtest_filter='TestTrash.*'
ceph_test_librbd --gtest_filter='TestJournalEntries.*'
ceph_test_librbd --gtest_filter='TestJournalReplay.*'
fi
exit 0
| 1,632 | 44.361111 | 64 |
sh
|
null |
ceph-main/qa/workunits/rename/all.sh
|
#!/usr/bin/env bash
set -ex
dir=`dirname $0`
CEPH_TOOL='./ceph'
$CEPH_TOOL || CEPH_TOOL='ceph'
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/prepare.sh
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_nul.sh
rm ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/rem_nul.sh
rm ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_pri.sh
rm ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/rem_pri.sh
rm ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/rem_rem.sh
rm ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_nul.sh
rm -r ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/pri_pri.sh
rm -r ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/dir_pri_pri.sh
rm -r ./?/* || true
CEPH_ARGS=$CEPH_ARGS CEPH_TOOL=$CEPH_TOOL $dir/dir_pri_nul.sh
rm -r ./?/* || true
| 861 | 21.684211 | 61 |
sh
|
null |
ceph-main/qa/workunits/rename/dir_pri_nul.sh
|
#!/bin/sh -ex
# dir: srcdn=destdn
mkdir ./a/dir1
mv ./a/dir1 ./a/dir1.renamed
# dir: diff
mkdir ./a/dir2
mv ./a/dir2 ./b/dir2
# dir: diff, child subtree on target
mkdir -p ./a/dir3/child/foo
$CEPH_TOOL mds tell 0 export_dir /a/dir3/child 1
sleep 5
mv ./a/dir3 ./b/dir3
# dir: diff, child subtree on other
mkdir -p ./a/dir4/child/foo
$CEPH_TOOL mds tell 0 export_dir /a/dir4/child 2
sleep 5
mv ./a/dir4 ./b/dir4
# dir: witness subtree adjustment
mkdir -p ./a/dir5/1/2/3/4
$CEPH_TOOL mds tell 0 export_dir /a/dir5/1/2/3 2
sleep 5
mv ./a/dir5 ./b
| 550 | 18 | 48 |
sh
|
null |
ceph-main/qa/workunits/rename/dir_pri_pri.sh
|
#!/bin/sh -ex
# dir, srcdn=destdn
mkdir ./a/dir1
mkdir ./a/dir2
mv -T ./a/dir1 ./a/dir2
# dir, different
mkdir ./a/dir3
mkdir ./b/dir4
mv -T ./a/dir3 ./b/dir4
| 161 | 12.5 | 23 |
sh
|
null |
ceph-main/qa/workunits/rename/prepare.sh
|
#!/bin/sh -ex
$CEPH_TOOL mds tell 0 injectargs '--mds-bal-interval 0'
$CEPH_TOOL mds tell 1 injectargs '--mds-bal-interval 0'
$CEPH_TOOL mds tell 2 injectargs '--mds-bal-interval 0'
$CEPH_TOOL mds tell 3 injectargs '--mds-bal-interval 0'
#$CEPH_TOOL mds tell 4 injectargs '--mds-bal-interval 0'
mkdir -p ./a/a
mkdir -p ./b/b
mkdir -p ./c/c
mkdir -p ./d/d
mount_dir=`df . | grep -o " /.*" | grep -o "/.*"`
cur_dir=`pwd`
ceph_dir=${cur_dir##$mount_dir}
$CEPH_TOOL mds tell 0 export_dir $ceph_dir/b 1
$CEPH_TOOL mds tell 0 export_dir $ceph_dir/c 2
$CEPH_TOOL mds tell 0 export_dir $ceph_dir/d 3
sleep 5
| 604 | 26.5 | 56 |
sh
|
null |
ceph-main/qa/workunits/rename/pri_nul.sh
|
#!/bin/sh -ex
# srcdn=destdn
touch ./a/file1
mv ./a/file1 ./a/file1.renamed
# different
touch ./a/file2
mv ./a/file2 ./b
| 125 | 9.5 | 30 |
sh
|
null |
ceph-main/qa/workunits/rename/pri_pri.sh
|
#!/bin/sh -ex
# srcdn=destdn
touch ./a/file1
touch ./a/file2
mv ./a/file1 ./a/file2
# different (srcdn != destdn)
touch ./a/file3
touch ./b/file4
mv ./a/file3 ./b/file4
| 172 | 12.307692 | 29 |
sh
|
null |
ceph-main/qa/workunits/rename/pri_rem.sh
|
#!/bin/sh -ex
dotest() {
src=$1
desti=$2
destdn=$3
n=$4
touch ./$src/src$n
touch ./$desti/desti$n
ln ./$desti/desti$n ./$destdn/destdn$n
mv ./$src/src$n ./$destdn/destdn$n
}
# srcdn=destdn=desti
dotest 'a' 'a' 'a' 1
# destdn=desti
dotest 'b' 'a' 'a' 2
# srcdn=destdn
dotest 'a' 'b' 'a' 3
# srcdn=desti
dotest 'a' 'a' 'b' 4
# all different
dotest 'a' 'b' 'c' 5
| 402 | 11.59375 | 42 |
sh
|
null |
ceph-main/qa/workunits/rename/rem_nul.sh
|
#!/bin/sh -ex
dotest() {
srci=$1
srcdn=$2
dest=$3
n=$4
touch ./$srci/srci$n
ln ./$srci/srci$n ./$srcdn/srcdn$n
mv ./$srcdn/srcdn$n ./$dest/dest$n
}
# srci=srcdn=destdn
dotest 'a' 'a' 'a' 1
# srcdn=destdn
dotest 'b' 'a' 'a' 2
# srci=destdn
dotest 'a' 'b' 'a' 3
# srci=srcdn
dotest 'a' 'a' 'b' 4
# all different
dotest 'a' 'b' 'c' 5
| 368 | 11.3 | 38 |
sh
|
null |
ceph-main/qa/workunits/rename/rem_pri.sh
|
#!/bin/sh -ex
dotest() {
srci=$1
srcdn=$2
dest=$3
n=$4
touch ./$srci/srci$n
ln ./$srci/srci$n ./$srcdn/srcdn$n
touch ./$dest/dest$n
mv ./$srcdn/srcdn$n ./$dest/dest$n
}
# srci=srcdn=destdn
dotest 'a' 'a' 'a' 1
# srcdn=destdn
dotest 'b' 'a' 'a' 2
# srci=destdn
dotest 'a' 'b' 'a' 3
# srci=srcdn
dotest 'a' 'a' 'b' 4
# all different
dotest 'a' 'b' 'c' 5
| 392 | 12.1 | 38 |
sh
|
null |
ceph-main/qa/workunits/rename/rem_rem.sh
|
#!/bin/sh -ex
dotest() {
srci=$1
srcdn=$2
desti=$3
destdn=$4
n=$5
touch ./$srci/srci$n
ln ./$srci/srci$n ./$srcdn/srcdn$n
touch ./$desti/desti$n
ln ./$desti/desti$n ./$destdn/destdn$n
mv ./$srcdn/srcdn$n ./$destdn/destdn$n
}
# srci=srcdn=destdn=desti
dotest 'a' 'a' 'a' 'a' 1
# srcdn=destdn=desti
dotest 'b' 'a' 'a' 'a' 2
# srci=destdn=desti
dotest 'a' 'b' 'a' 'a' 3
# srci=srcdn=destdn
dotest 'a' 'a' 'b' 'a' 4
# srci=srcdn=desti
dotest 'a' 'a' 'a' 'b' 5
# srci=srcdn destdn=desti
dotest 'a' 'a' 'b' 'b' 6
# srci=destdn srcdn=desti
dotest 'a' 'b' 'b' 'a' 7
# srci=desti srcdn=destdn
dotest 'a' 'b' 'a' 'b' 8
# srci=srcdn
dotest 'a' 'a' 'b' 'c' 9
# srci=desti
dotest 'a' 'b' 'a' 'c' 10
# srci=destdn
dotest 'a' 'b' 'c' 'a' 11
# srcdn=desti
dotest 'a' 'b' 'b' 'c' 12
# srcdn=destdn
dotest 'a' 'b' 'c' 'b' 13
# destdn=desti
dotest 'a' 'b' 'c' 'c' 14
# all different
dotest 'a' 'b' 'c' 'd' 15
| 963 | 14.548387 | 42 |
sh
|
null |
ceph-main/qa/workunits/rest/test-restful.sh
|
#!/bin/sh -ex
mydir=`dirname $0`
secret=`ceph config-key get mgr/restful/keys/admin`
url=$(ceph mgr dump|jq -r .services.restful|sed -e 's/\/$//')
echo "url $url secret $secret"
$mydir/test_mgr_rest_api.py $url $secret
echo $0 OK
| 233 | 20.272727 | 61 |
sh
|
null |
ceph-main/qa/workunits/rest/test_mgr_rest_api.py
|
#! /usr/bin/env python3
import requests
import time
import sys
import json
# Do not show the stupid message about verify=False. ignore exceptions bc
# this doesn't work on some distros.
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
except:
pass
if len(sys.argv) < 3:
print("Usage: %s <url> <admin_key>" % sys.argv[0])
sys.exit(1)
addr = sys.argv[1]
auth = ('admin', sys.argv[2])
headers = {'Content-type': 'application/json'}
request = None
# Create a pool and get its id
request = requests.post(
addr + '/pool?wait=yes',
data=json.dumps({'name': 'supertestfriends', 'pg_num': 128}),
headers=headers,
verify=False,
auth=auth)
print(request.text)
request = requests.get(addr + '/pool', verify=False, auth=auth)
assert(request.json()[-1]['pool_name'] == 'supertestfriends')
pool_id = request.json()[-1]['pool']
# get a mon name
request = requests.get(addr + '/mon', verify=False, auth=auth)
firstmon = request.json()[0]['name']
print('first mon is %s' % firstmon)
# get a server name
request = requests.get(addr + '/osd', verify=False, auth=auth)
aserver = request.json()[0]['server']
print('a server is %s' % aserver)
screenplay = [
('get', '/', {}),
('get', '/config/cluster', {}),
('get', '/crush/rule', {}),
('get', '/doc', {}),
('get', '/mon', {}),
('get', '/mon/' + firstmon, {}),
('get', '/osd', {}),
('get', '/osd/0', {}),
('get', '/osd/0/command', {}),
('get', '/pool/1', {}),
('get', '/server', {}),
('get', '/server/' + aserver, {}),
('post', '/osd/0/command', {'command': 'scrub'}),
('post', '/pool?wait=1', {'name': 'supertestfriends', 'pg_num': 128}),
('patch', '/osd/0', {'in': False}),
('patch', '/config/osd', {'pause': True}),
('get', '/config/osd', {}),
('patch', '/pool/' + str(pool_id), {'size': 2}),
('patch', '/config/osd', {'pause': False}),
('patch', '/osd/0', {'in': True}),
('get', '/pool', {}),
('delete', '/pool/' + str(pool_id) + '?wait=1', {}),
('get', '/request?page=0', {}),
('delete', '/request', {}),
('get', '/request', {}),
('patch', '/pool/1', {'pg_num': 128}),
('patch', '/pool/1', {'pgp_num': 128}),
('get', '/perf?daemon=.*', {}),
]
for method, endpoint, args in screenplay:
if method == 'sleep':
time.sleep(endpoint)
continue
url = addr + endpoint
print("URL = " + url)
request = getattr(requests, method)(
url,
data=json.dumps(args) if args else None,
headers=headers,
verify=False,
auth=auth)
assert request is not None
print(request.text)
if request.status_code != 200 or 'error' in request.json():
print('ERROR: %s request for URL "%s" failed' % (method, url))
sys.exit(1)
print('OK')
| 2,963 | 28.939394 | 79 |
py
|
null |
ceph-main/qa/workunits/restart/test-backtraces.py
|
#!/usr/bin/env python3
from __future__ import print_function
import subprocess
import json
import os
import time
import sys
import rados as rados
import cephfs as cephfs
prefix='testbt'
def get_name(b, i, j):
c = '{pre}.{pid}.{i}.{j}'.format(pre=prefix, pid=os.getpid(), i=i, j=j)
return c, b + '/' + c
def mkdir(ceph, d):
print("mkdir {d}".format(d=d), file=sys.stderr)
ceph.mkdir(d, 0o755)
return ceph.stat(d)['st_ino']
def create(ceph, f):
print("creating {f}".format(f=f), file=sys.stderr)
fd = ceph.open(f, os.O_CREAT | os.O_RDWR, 0o644)
ceph.close(fd)
return ceph.stat(f)['st_ino']
def set_mds_config_param(ceph, param):
with open('/dev/null', 'rb') as devnull:
confarg = ''
if conf != '':
confarg = '-c {c}'.format(c=conf)
r = subprocess.call("ceph {ca} mds tell a injectargs '{p}'".format(ca=confarg, p=param), shell=True, stdout=devnull)
if r != 0:
raise Exception
class _TrimIndentFile(object):
def __init__(self, fp):
self.fp = fp
def readline(self):
line = self.fp.readline()
return line.lstrip(' \t')
def _optionxform(s):
s = s.replace('_', ' ')
s = '_'.join(s.split())
return s
def conf_set_kill_mds(location, killnum):
print('setting mds kill config option for {l}.{k}'.format(l=location, k=killnum), file=sys.stderr)
print("restart mds a mds_kill_{l}_at {k}".format(l=location, k=killnum))
sys.stdout.flush()
for l in sys.stdin.readline():
if l == 'restarted':
break
def flush(ceph, testnum):
print('flushing {t}'.format(t=testnum), file=sys.stderr)
set_mds_config_param(ceph, '--mds_log_max_segments 1')
for i in range(1, 500):
f = '{p}.{pid}.{t}.{i}'.format(p=prefix, pid=os.getpid(), t=testnum, i=i)
print('flushing with create {f}'.format(f=f), file=sys.stderr)
fd = ceph.open(f, os.O_CREAT | os.O_RDWR, 0o644)
ceph.close(fd)
ceph.unlink(f)
print('flush doing shutdown', file=sys.stderr)
ceph.shutdown()
print('flush reinitializing ceph', file=sys.stderr)
ceph = cephfs.LibCephFS(conffile=conf)
print('flush doing mount', file=sys.stderr)
ceph.mount()
return ceph
def kill_mds(ceph, location, killnum):
print('killing mds: {l}.{k}'.format(l=location, k=killnum), file=sys.stderr)
set_mds_config_param(ceph, '--mds_kill_{l}_at {k}'.format(l=location, k=killnum))
def wait_for_mds(ceph):
# wait for restart
while True:
confarg = ''
if conf != '':
confarg = '-c {c}'.format(c=conf)
r = subprocess.check_output("ceph {ca} mds stat".format(ca=confarg), shell=True).decode()
if r.find('a=up:active'):
break
time.sleep(1)
def decode(value):
tmpfile = '/tmp/{p}.{pid}'.format(p=prefix, pid=os.getpid())
with open(tmpfile, 'w+') as f:
f.write(value)
p = subprocess.Popen(
[
'ceph-dencoder',
'import',
tmpfile,
'type',
'inode_backtrace_t',
'decode',
'dump_json',
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
(stdout, _) = p.communicate(input=value)
p.stdin.close()
if p.returncode != 0:
raise Exception
os.remove(tmpfile)
return json.loads(stdout)
class VerifyFailure(Exception):
pass
def verify(rados_ioctx, ino, values, pool):
print('getting parent attr for ino: %lx.00000000' % ino, file=sys.stderr)
savede = None
for i in range(1, 20):
try:
savede = None
binbt = rados_ioctx.get_xattr('%lx.00000000' % ino, 'parent')
except rados.ObjectNotFound as e:
# wait for a bit to let segments get flushed out
savede = e
time.sleep(10)
if savede:
raise savede
bt = decode(binbt)
ind = 0
if bt['ino'] != ino:
raise VerifyFailure('inode mismatch: {bi} != {ino}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
bi=bt['ancestors'][ind]['dname'], ino=ino, bt=bt, i=ino, v=values))
for (n, i) in values:
if bt['ancestors'][ind]['dirino'] != i:
raise VerifyFailure('ancestor dirino mismatch: {b} != {ind}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
b=bt['ancestors'][ind]['dirino'], ind=i, bt=bt, i=ino, v=values))
if bt['ancestors'][ind]['dname'] != n:
raise VerifyFailure('ancestor dname mismatch: {b} != {n}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
b=bt['ancestors'][ind]['dname'], n=n, bt=bt, i=ino, v=values))
ind += 1
if bt['pool'] != pool:
raise VerifyFailure('pool mismatch: {btp} != {p}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
btp=bt['pool'], p=pool, bt=bt, i=ino, v=values))
def make_abc(ceph, rooti, i):
expected_bt = []
c, d = get_name("/", i, 0)
expected_bt = [(c, rooti)] + expected_bt
di = mkdir(ceph, d)
c, d = get_name(d, i, 1)
expected_bt = [(c, di)] + expected_bt
di = mkdir(ceph, d)
c, f = get_name(d, i, 2)
fi = create(ceph, f)
expected_bt = [(c, di)] + expected_bt
return fi, expected_bt
test = -1
if len(sys.argv) > 1:
test = int(sys.argv[1])
conf = ''
if len(sys.argv) > 2:
conf = sys.argv[2]
radosobj = rados.Rados(conffile=conf)
radosobj.connect()
ioctx = radosobj.open_ioctx('data')
ceph = cephfs.LibCephFS(conffile=conf)
ceph.mount()
rooti = ceph.stat('/')['st_ino']
test = -1
if len(sys.argv) > 1:
test = int(sys.argv[1])
conf = '/etc/ceph/ceph.conf'
if len(sys.argv) > 2:
conf = sys.argv[2]
# create /a/b/c
# flush
# verify
i = 0
if test < 0 or test == i:
print('Running test %d: basic verify' % i, file=sys.stderr)
ino, expected_bt = make_abc(ceph, rooti, i)
ceph = flush(ceph, i)
verify(ioctx, ino, expected_bt, 0)
i += 1
# kill-mds-at-openc-1
# create /a/b/c
# restart-mds
# flush
# verify
if test < 0 or test == i:
print('Running test %d: kill openc' % i, file=sys.stderr)
print("restart mds a")
sys.stdout.flush()
kill_mds(ceph, 'openc', 1)
ino, expected_bt = make_abc(ceph, rooti, i)
ceph = flush(ceph, i)
verify(ioctx, ino, expected_bt, 0)
i += 1
# kill-mds-at-openc-1
# create /a/b/c
# restart-mds with kill-mds-at-replay-1
# restart-mds
# flush
# verify
if test < 0 or test == i:
print('Running test %d: kill openc/replay' % i, file=sys.stderr)
# these are reversed because we want to prepare the config
conf_set_kill_mds('journal_replay', 1)
kill_mds(ceph, 'openc', 1)
print("restart mds a")
sys.stdout.flush()
ino, expected_bt = make_abc(ceph, rooti, i)
ceph = flush(ceph, i)
verify(ioctx, ino, expected_bt, 0)
i += 1
ioctx.close()
radosobj.shutdown()
ceph.shutdown()
print("done")
sys.stdout.flush()
| 6,980 | 26.812749 | 144 |
py
|
null |
ceph-main/qa/workunits/rgw/keystone-fake-server.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2022 Binero
#
# Author: Tobias Urdin <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
from datetime import datetime, timedelta
import logging
import json
from http.server import BaseHTTPRequestHandler, HTTPServer
DEFAULT_DOMAIN = {
'id': 'default',
'name': 'Default',
}
PROJECTS = {
'admin': {
'domain': DEFAULT_DOMAIN,
'id': 'a6944d763bf64ee6a275f1263fae0352',
'name': 'admin',
},
'deadbeef': {
'domain': DEFAULT_DOMAIN,
'id': 'b4221c214dd64ee6a464g2153fae3813',
'name': 'deadbeef',
},
}
USERS = {
'admin': {
'domain': DEFAULT_DOMAIN,
'id': '51cc68287d524c759f47c811e6463340',
'name': 'admin',
},
'deadbeef': {
'domain': DEFAULT_DOMAIN,
'id': '99gg485738df758349jf8d848g774392',
'name': 'deadbeef',
},
}
USERROLES = {
'admin': [
{
'id': '51cc68287d524c759f47c811e6463340',
'name': 'admin',
}
],
'deadbeef': [
{
'id': '98bd32184f854f393a72b932g5334124',
'name': 'Member',
}
],
}
TOKENS = {
'admin-token-1': {
'username': 'admin',
'project': 'admin',
'expired': False,
},
'user-token-1': {
'username': 'deadbeef',
'project': 'deadbeef',
'expired': False,
},
'user-token-2': {
'username': 'deadbeef',
'project': 'deadbeef',
'expired': True,
},
}
def _generate_token_result(username, project, expired=False):
userdata = USERS[username]
projectdata = PROJECTS[project]
userroles = USERROLES[username]
if expired:
then = datetime.now() - timedelta(hours=2)
issued_at = then.strftime('%Y-%m-%dT%H:%M:%SZ')
expires_at = (then + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%M:%SZ')
else:
now = datetime.now()
issued_at = now.strftime('%Y-%m-%dT%H:%M:%SZ')
expires_at = (now + timedelta(seconds=10)).strftime('%Y-%m-%dT%H:%M:%SZ')
result = {
'token': {
'audit_ids': ['3T2dc1CGQxyJsHdDu1xkcw'],
'catalog': [],
'expires_at': expires_at,
'is_domain': False,
'issued_at': issued_at,
'methods': ['password'],
'project': projectdata,
'roles': userroles,
'user': userdata,
}
}
return result
COUNTERS = {
'get_total': 0,
'post_total': 0,
}
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
# This is not part of the Keystone API
if self.path == '/stats':
self._handle_stats()
return
if str(self.path).startswith('/v3/auth/tokens'):
self._handle_get_auth()
else:
self.send_response(403)
self.end_headers()
def do_POST(self):
if self.path == '/v3/auth/tokens':
self._handle_post_auth()
else:
self.send_response(400)
self.end_headers()
def _get_data(self):
length = int(self.headers.get('content-length'))
data = self.rfile.read(length).decode('utf8')
return json.loads(data)
def _set_data(self, data):
jdata = json.dumps(data)
self.wfile.write(jdata.encode('utf8'))
def _handle_stats(self):
self.send_response(200)
self.end_headers()
self._set_data(COUNTERS)
def _handle_get_auth(self):
logging.info('Increasing get_total counter from %d -> %d' % (COUNTERS['get_total'], COUNTERS['get_total']+1))
COUNTERS['get_total'] += 1
auth_token = self.headers.get('X-Subject-Token', None)
if auth_token and auth_token in TOKENS:
tokendata = TOKENS[auth_token]
if tokendata['expired'] and 'allow_expired=1' not in self.path:
self.send_response(404)
self.end_headers()
else:
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
result = _generate_token_result(tokendata['username'], tokendata['project'], tokendata['expired'])
self._set_data(result)
else:
self.send_response(404)
self.end_headers()
def _handle_post_auth(self):
logging.info('Increasing post_total counter from %d -> %d' % (COUNTERS['post_total'], COUNTERS['post_total']+1))
COUNTERS['post_total'] += 1
data = self._get_data()
user = data['auth']['identity']['password']['user']
if user['name'] == 'admin' and user['password'] == 'ADMIN':
self.send_response(201)
self.send_header('Content-Type', 'application/json')
self.send_header('X-Subject-Token', 'admin-token-1')
self.end_headers()
tokendata = TOKENS['admin-token-1']
result = _generate_token_result(tokendata['username'], tokendata['project'], tokendata['expired'])
self._set_data(result)
else:
self.send_response(401)
self.end_headers()
def main():
logging.basicConfig(level=logging.DEBUG)
logging.info('Starting keystone-fake-server')
server = HTTPServer(('localhost', 5000), HTTPRequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
| 5,898 | 27.22488 | 120 |
py
|
null |
ceph-main/qa/workunits/rgw/keystone-service-token.sh
|
#!/usr/bin/env bash
#
# Copyright (C) 2022 Binero
#
# Author: Tobias Urdin <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
trap cleanup EXIT
function cleanup() {
kill $KEYSTONE_FAKE_SERVER_PID
wait
}
function run() {
$CEPH_ROOT/qa/workunits/rgw//keystone-fake-server.py &
KEYSTONE_FAKE_SERVER_PID=$!
# Give fake Keystone server some seconds to startup
sleep 5
$CEPH_ROOT/qa/workunits/rgw/test-keystone-service-token.py
}
main keystone-service-token "$@"
| 978 | 26.971429 | 70 |
sh
|
null |
ceph-main/qa/workunits/rgw/run-d4n.sh
|
#!/usr/bin/env bash
set -ex
mydir=`dirname $0`
python3 -m venv $mydir
source $mydir/bin/activate
pip install pip --upgrade
pip install redis
pip install configobj
pip install boto3
# run test
$mydir/bin/python3 $mydir/test_rgw_d4n.py
deactivate
echo OK.
| 257 | 14.176471 | 41 |
sh
|
null |
ceph-main/qa/workunits/rgw/run-datacache.sh
|
#!/usr/bin/env bash
set -ex
#assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
# localhost::443 for ssl
mydir=`dirname $0`
python3 -m venv $mydir
source $mydir/bin/activate
pip install pip --upgrade
pip install configobj
## run test
$mydir/bin/python3 $mydir/test_rgw_datacache.py
deactivate
echo OK.
| 337 | 15.9 | 80 |
sh
|
null |
ceph-main/qa/workunits/rgw/run-reshard.sh
|
#!/usr/bin/env bash
set -ex
# this test uses fault injection to abort during 'radosgw-admin bucket reshard'
# disable coredumps so teuthology won't mark a failure
ulimit -c 0
#assume working ceph environment (radosgw-admin in path) and rgw on localhost:80
# localhost::443 for ssl
mydir=`dirname $0`
python3 -m venv $mydir
source $mydir/bin/activate
pip install pip --upgrade
pip install boto3
## run test
$mydir/bin/python3 $mydir/test_rgw_reshard.py
deactivate
echo OK.
| 479 | 19 | 80 |
sh
|
null |
ceph-main/qa/workunits/rgw/run-s3tests.sh
|
#!/usr/bin/env bash
set -ex
# run s3-tests from current directory. assume working
# ceph environment (radosgw-admin in path) and rgw on localhost:8000
# (the vstart default).
branch=$1
[ -z "$1" ] && branch=master
port=$2
[ -z "$2" ] && port=8000 # this is vstart's default
##
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
if [ -e CMakeCache.txt ]; then
BIN_PATH=$PWD/bin
elif [ -e $root_path/../${BUILD_DIR}/CMakeCache.txt ]; then
cd $root_path/../${BUILD_DIR}
BIN_PATH=$PWD/bin
fi
PATH=$PATH:$BIN_PATH
dir=tmp.s3-tests.$$
# clone and bootstrap
mkdir $dir
cd $dir
git clone https://github.com/ceph/s3-tests
cd s3-tests
git checkout ceph-$branch
S3TEST_CONF=s3tests.conf.SAMPLE tox -- -m "not fails_on_rgw and not sse_s3 and not lifecycle_expiration and not test_of_sts and not webidentity_test" -v
cd ../..
rm -rf $dir
echo OK.
| 846 | 20.175 | 152 |
sh
|
null |
ceph-main/qa/workunits/rgw/s3_bucket_quota.pl
|
#! /usr/bin/perl
=head1 NAME
s3_bucket_quota.pl - Script to test the rgw bucket quota functionality using s3 interface.
=head1 SYNOPSIS
Use:
perl s3_bucket_quota.pl [--help]
Examples:
perl s3_bucket_quota.pl
or
perl s3_bucket_quota.pl --help
=head1 DESCRIPTION
This script intends to test the rgw bucket quota funcionality using s3 interface
and reports the test results
=head1 ARGUMENTS
s3_bucket_quota.pl takes the following arguments:
--help
(optional) Displays the usage message.
=cut
use Amazon::S3;
use Data::Dumper;
#use strict;
use IO::File;
use Getopt::Long;
use Digest::MD5;
use Pod::Usage();
use FindBin;
use lib $FindBin::Bin;
use s3_utilities;
use Net::Domain qw(hostfqdn);
my $help;
Getopt::Long::GetOptions(
'help' => \$help
);
Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
#== local variables ===
our $mytestfilename;
my $mytestfilename1;
my $logmsg;
my $kruft;
my $s3;
my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
my $port = $ENV{RGW_PORT}||80;
our $hostname = "$hostdom:$port";
our $testfileloc;
my $rgw_user = "qa_user";
# Function that deletes the user $rgw_user and write to logfile.
sub delete_user
{
my $cmd = "$radosgw_admin user rm --uid=$rgw_user";
my $cmd_op = get_command_output($cmd);
if ($cmd_op !~ /aborting/){
print "user $rgw_user deleted\n";
} else {
print "user $rgw_user NOT deleted\n";
return 1;
}
return 0;
}
sub quota_set_max_size {
my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=1048576000`;
if ($set_quota !~ /./){
print "quota set for the bucket: $bucketname \n";
} else {
print "quota set failed for the bucket: $bucketname \n";
exit 1;
}
return 0;
}
sub quota_set_max_size_zero {
run_s3($rgw_user);
my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=0`;
if ($set_quota !~ /./){
pass ("quota set for the bucket: $bucketname with max size as zero\n");
} else {
fail ("quota set with max size 0 failed for the bucket: $bucketname \n");
}
delete_bucket();
}
sub quota_set_max_objs_zero {
run_s3($rgw_user);
my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=0`;
if ($set_quota !~ /./){
pass ("quota set for the bucket: $bucketname with max objects as zero\n");
} else {
fail ("quota set with max objects 0 failed for the bucket: $bucketname \n");
}
delete_bucket();
}
sub quota_set_neg_size {
run_s3($rgw_user);
my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-size=-1`;
if ($set_quota !~ /./){
pass ("quota set for the bucket: $bucketname with max size -1\n");
} else {
fail ("quota set failed for the bucket: $bucketname with max size -1 \n");
}
delete_bucket();
}
sub quota_set_neg_objs {
run_s3($rgw_user);
my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=-1`;
if ($set_quota !~ /./){
pass ("quota set for the bucket: $bucketname max objects -1 \n");
} else {
fail ("quota set failed for the bucket: $bucketname \n with max objects -1");
}
delete_bucket();
}
sub quota_set_user_objs {
my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=bucket`;
my $set_quota1 = `$radosgw_admin quota set --bucket=$bucketname --max-objects=1`;
if ($set_quota1 !~ /./){
print "bucket quota max_objs set for the given user: $bucketname \n";
} else {
print "bucket quota max_objs set failed for the given user: $bucketname \n";
exit 1;
}
return 0;
}
sub quota_set_user_size {
my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=bucket`;
my $set_quota1 = `$radosgw_admin quota set --bucket=$bucketname --max-size=1048576000`;
if ($set_quota1 !~ /./){
print "bucket quota max size set for the given user: $bucketname \n";
} else {
print "bucket quota max size set failed for the user: $bucketname \n";
exit 1;
}
return 0;
}
sub quota_set_max_obj {
# set max objects
my $set_quota = `$radosgw_admin quota set --bucket=$bucketname --max-objects=1`;
if ($set_quota !~ /./){
print "quota set for the bucket: $bucketname \n";
} else {
print "quota set failed for the bucket: $bucketname \n";
exit 1;
}
return 0;
}
sub quota_enable {
my $en_quota = `$radosgw_admin quota enable --bucket=$bucketname`;
if ($en_quota !~ /./){
print "quota enabled for the bucket: $bucketname \n";
} else {
print "quota enable failed for the bucket: $bucketname \n";
exit 1;
}
return 0;
}
sub quota_disable {
my $dis_quota = `$radosgw_admin quota disable --bucket=$bucketname`;
if ($dis_quota !~ /./){
print "quota disabled for the bucket: $bucketname \n";
} else {
print "quota disable failed for the bucket: $bucketname \n";
exit 1;
}
return 0;
}
# upload a file to the bucket
sub upload_file {
print "adding file to bucket: $mytestfilename\n";
($bucket->add_key_filename( $mytestfilename, $testfileloc,
{ content_type => 'text/plain', },
) and (print "upload file successful\n" ) and return 0 ) or (return 1);
}
# delete the bucket
sub delete_bucket {
#($bucket->delete_key($mytestfilename1) and print "delete keys on bucket succeeded second time\n" ) or die $s3->err . "delete keys on bucket failed second time\n" . $s3->errstr;
($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
}
# set bucket quota with max_objects and verify
sub test_max_objects {
my $size = '10Mb';
create_file($size);
run_s3($rgw_user);
quota_set_max_obj();
quota_enable();
my $ret_value = upload_file();
if ($ret_value == 0){
pass ( "Test max objects passed" );
} else {
fail ( "Test max objects failed" );
}
delete_user();
delete_keys($mytestfilename);
delete_bucket();
}
# Set bucket quota for specific user and ensure max objects set for the user is validated
sub test_max_objects_per_user{
my $size = '10Mb';
create_file($size);
run_s3($rgw_user);
quota_set_user_objs();
quota_enable();
my $ret_value = upload_file();
if ($ret_value == 0){
pass ( "Test max objects for the given user passed" );
} else {
fail ( "Test max objects for the given user failed" );
}
delete_user();
delete_keys($mytestfilename);
delete_bucket();
}
# set bucket quota with max_objects and try to exceed the max_objects and verify
sub test_beyond_max_objs {
my $size = "10Mb";
create_file($size);
run_s3($rgw_user);
quota_set_max_obj();
quota_enable();
upload_file();
my $ret_value = readd_file();
if ($ret_value == 1){
pass ( "set max objects and test beyond max objects passed" );
} else {
fail ( "set max objects and test beyond max objects failed" );
}
delete_user();
delete_keys($mytestfilename);
delete_bucket();
}
# set bucket quota for a user with max_objects and try to exceed the max_objects and verify
sub test_beyond_max_objs_user {
my $size = "10Mb";
create_file($size);
run_s3($rgw_user);
quota_set_user_objs();
quota_enable();
upload_file();
my $ret_value = readd_file();
if ($ret_value == 1){
pass ( "set max objects for a given user and test beyond max objects passed" );
} else {
fail ( "set max objects for a given user and test beyond max objects failed" );
}
delete_user();
delete_keys($mytestfilename);
delete_bucket();
}
# set bucket quota for max size and ensure it is validated
sub test_quota_size {
my $ret_value;
my $size = "2Gb";
create_file($size);
run_s3($rgw_user);
quota_set_max_size();
quota_enable();
my $ret_value = upload_file();
if ($ret_value == 1) {
pass ( "set max size and ensure that objects upload beyond max size is not entertained" );
my $retdel = delete_keys($mytestfilename);
if ($retdel == 0) {
print "delete objects successful \n";
my $size1 = "1Gb";
create_file($size1);
my $ret_val1 = upload_file();
if ($ret_val1 == 0) {
pass ( "set max size and ensure that the max size is in effect" );
} else {
fail ( "set max size and ensure the max size takes effect" );
}
}
} else {
fail ( "set max size and ensure that objects beyond max size is not allowed" );
}
delete_user();
delete_keys($mytestfilename);
delete_bucket();
}
# set bucket quota for max size for a given user and ensure it is validated
sub test_quota_size_user {
my $ret_value;
my $size = "2Gb";
create_file($size);
run_s3($rgw_user);
quota_set_user_size();
quota_enable();
my $ret_value = upload_file();
if ($ret_value == 1) {
pass ( "set max size for a given user and ensure that objects upload beyond max size is not entertained" );
my $retdel = delete_keys($mytestfilename);
if ($retdel == 0) {
print "delete objects successful \n";
my $size1 = "1Gb";
create_file($size1);
my $ret_val1 = upload_file();
if ($ret_val1 == 0) {
pass ( "set max size for a given user and ensure that the max size is in effect" );
} else {
fail ( "set max size for a given user and ensure the max size takes effect" );
}
}
} else {
fail ( "set max size for a given user and ensure that objects beyond max size is not allowed" );
}
delete_user();
delete_keys($mytestfilename);
delete_bucket();
}
# set bucket quota size but disable quota and verify
sub test_quota_size_disabled {
my $ret_value;
my $size = "2Gb";
create_file($size);
run_s3($rgw_user);
quota_set_max_size();
quota_disable();
my $ret_value = upload_file();
if ($ret_value == 0) {
pass ( "bucket quota size doesnt take effect when quota is disabled" );
} else {
fail ( "bucket quota size doesnt take effect when quota is disabled" );
}
delete_user();
delete_keys($mytestfilename);
delete_bucket();
}
# set bucket quota size for a given user but disable quota and verify
sub test_quota_size_disabled_user {
my $ret_value;
my $size = "2Gb";
create_file($size);
run_s3($rgw_user);
quota_set_user_size();
quota_disable();
my $ret_value = upload_file();
if ($ret_value == 0) {
pass ( "bucket quota size for a given user doesnt take effect when quota is disabled" );
} else {
fail ( "bucket quota size for a given user doesnt take effect when quota is disabled" );
}
delete_user();
delete_keys($mytestfilename);
delete_bucket();
}
# set bucket quota for specified user and verify
#== Main starts here===
ceph_os_info();
test_max_objects();
test_max_objects_per_user();
test_beyond_max_objs();
test_beyond_max_objs_user();
quota_set_max_size_zero();
quota_set_max_objs_zero();
quota_set_neg_objs();
quota_set_neg_size();
test_quota_size();
test_quota_size_user();
test_quota_size_disabled();
test_quota_size_disabled_user();
print "OK";
| 11,478 | 28.134518 | 180 |
pl
|
null |
ceph-main/qa/workunits/rgw/s3_multipart_upload.pl
|
#! /usr/bin/perl
=head1 NAME
s3_multipart_upload.pl - Script to test rgw multipart upload using s3 interface.
=head1 SYNOPSIS
Use:
perl s3_multipart_upload.pl [--help]
Examples:
perl s3_multipart_upload.pl
or
perl s3_multipart_upload.pl --help
=head1 DESCRIPTION
This script intends to test the rgw multipart upload followed by a download
and verify checksum using s3 interface and reports test results
=head1 ARGUMENTS
s3_multipart_upload.pl takes the following arguments:
--help
(optional) Displays the usage message.
=cut
use Amazon::S3;
use Data::Dumper;
use IO::File;
use Getopt::Long;
use Digest::MD5;
use Pod::Usage();
use FindBin;
use lib $FindBin::Bin;
use s3_utilities;
use Net::Domain qw(hostfqdn);
my $help;
Getopt::Long::GetOptions(
'help' => \$help
);
Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
#== local variables ===
my $s3;
my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
my $port = $ENV{RGW_PORT}||80;
our $hostname = "$hostdom:$port";
our $testfileloc;
our $mytestfilename;
# upload a file to the bucket
sub upload_file {
my ($fsize, $i) = @_;
create_file($fsize, $i);
print "adding file to bucket $bucketname: $mytestfilename\n";
($bucket->add_key_filename( $mytestfilename, $testfileloc,
{ content_type => 'text/plain', },
) and (print "upload file successful\n" ) and return 0 ) or (print "upload failed\n" and return 1);
}
# delete the bucket
sub delete_bucket {
($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
}
# Function to perform multipart upload of given file size to the user bucket via s3 interface
sub multipart_upload
{
my ($size, $parts) = @_;
# generate random user every time
my $user = rand();
# Divide the file size in to equal parts and upload to bucket in multiple parts
my $fsize = ($size/$parts);
my $fsize1;
run_s3($user);
if ($parts == 10){
$fsize1 = '100Mb';
} elsif ($parts == 100){
$fsize1 = '10Mb';
}
foreach my $i(1..$parts){
print "uploading file - part $i \n";
upload_file($fsize1, $i);
}
fetch_file_from_bucket($fsize1, $parts);
compare_cksum($fsize1, $parts);
purge_data($user);
}
# Function to download the files from bucket to verify there is no data corruption
sub fetch_file_from_bucket
{
# fetch file from the bucket
my ($fsize, $parts) = @_;
foreach my $i(1..$parts){
my $src_file = "$fsize.$i";
my $dest_file = "/tmp/downloadfile.$i";
print
"Downloading $src_file from bucket to $dest_file \n";
$response =
$bucket->get_key_filename( $src_file, GET,
$dest_file )
or die $s3->err . ": " . $s3->errstr;
}
}
# Compare the source file with destination file and verify checksum to ensure
# the files are not corrupted
sub compare_cksum
{
my ($fsize, $parts)=@_;
my $md5 = Digest::MD5->new;
my $flag = 0;
foreach my $i (1..$parts){
my $src_file = "/tmp/"."$fsize".".$i";
my $dest_file = "/tmp/downloadfile".".$i";
open( FILE, $src_file )
or die "Error: Could not open $src_file for MD5 checksum...";
open( DLFILE, $dest_file )
or die "Error: Could not open $dest_file for MD5 checksum.";
binmode(FILE);
binmode(DLFILE);
my $md5sum = $md5->addfile(*FILE)->hexdigest;
my $md5sumdl = $md5->addfile(*DLFILE)->hexdigest;
close FILE;
close DLFILE;
# compare the checksums
if ( $md5sum eq $md5sumdl ) {
$flag++;
}
}
if ($flag == $parts){
pass("checksum verification for multipart upload passed" );
}else{
fail("checksum verification for multipart upload failed" );
}
}
#== Main starts here===
ceph_os_info();
check();
# The following test runs multi part upload of file size 1Gb in 10 parts
multipart_upload('1048576000', 10);
# The following test runs multipart upload of 1 Gb file in 100 parts
multipart_upload('1048576000', 100);
print "OK";
| 4,108 | 26.032895 | 126 |
pl
|
null |
ceph-main/qa/workunits/rgw/s3_user_quota.pl
|
#! /usr/bin/perl
=head1 NAME
s3_user_quota.pl - Script to test the rgw user quota functionality using s3 interface.
=head1 SYNOPSIS
Use:
perl s3_user_quota.pl [--help]
Examples:
perl s3_user_quota.pl
or
perl s3_user_quota.pl --help
=head1 DESCRIPTION
This script intends to test the rgw user quota funcionality using s3 interface
and reports the test results
=head1 ARGUMENTS
s3_user_quota.pl takes the following arguments:
--help
(optional) Displays the usage message.
=cut
use Amazon::S3;
use Data::Dumper;
use IO::File;
use Getopt::Long;
use Digest::MD5;
use Pod::Usage();
use FindBin;
use lib $FindBin::Bin;
use s3_utilities;
use Net::Domain qw(hostfqdn);
my $help;
Getopt::Long::GetOptions(
'help' => \$help
);
Pod::Usage::pod2usage(-verbose => 1) && exit if ($help);
#== local variables ===
our $mytestfilename;
my $mytestfilename1;
my $logmsg;
my $kruft;
my $s3;
my $hostdom = $ENV{RGW_FQDN}||hostfqdn();
my $port = $ENV{RGW_PORT}||80;
our $hostname = "$hostdom:$port";
our $testfileloc;
our $cnt;
sub quota_set_max_size_per_user {
my ($maxsize, $size1,$rgw_user) = @_;
run_s3($rgw_user);
my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-size=$maxsize`;
if (($set_quota !~ /./)&&($maxsize == 0)){
my $ret = test_max_objs($size1, $rgw_user);
if ($ret == 1){
pass("quota set for user: $rgw_user with max_size=$maxsize passed" );
}else {
fail("quota set for user: $rgw_user with max_size=$maxsize failed" );
}
} elsif (($set_quota !~ /./) && ($maxsize != 0)) {
my $ret = test_max_objs($size1, $rgw_user);
if ($ret == 0){
pass("quota set for user: $rgw_user with max_size=$maxsize passed" );
}else {
fail("quota set for user: $rgw_user with max_size=$maxsize failed" );
}
}
delete_keys($mytestfilename);
purge_data($rgw_user);
return 0;
}
sub max_size_per_user {
my ($maxsize, $size1,$rgw_user) = @_;
run_s3($rgw_user);
my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-size=$maxsize`;
if (($set_quota !~ /./) && ($maxsize != 0)) {
my $ret = test_max_objs($size1, $rgw_user);
if ($ret == 0){
$cnt++;
}
}
return $cnt;
}
sub quota_set_max_obj_per_user {
# set max objects
my ($maxobjs, $size1, $rgw_user) = @_;
run_s3($rgw_user);
my $set_quota = `$radosgw_admin quota set --uid=$rgw_user --quota-scope=user --max-objects=$maxobjs`;
if (($set_quota !~ /./) && ($maxobjs == 0)){
my $ret = test_max_objs($size1, $rgw_user);
if ($ret == 1){
pass("quota set for user: $rgw_user with max_objects=$maxobjs passed" );
}else {
fail("quota set for user: $rgw_user with max_objects=$maxobjs failed" );
}
} elsif (($set_quota !~ /./) && ($maxobjs == 1)) {
my $ret = test_max_objs($size1, $rgw_user);
if ($ret == 0){
pass("quota set for user: $rgw_user with max_objects=$maxobjs passed" );
}else {
fail("quota set for user: $rgw_user with max_objects=$maxobjs failed" );
}
}
delete_keys($mytestfilename);
purge_data($rgw_user);
}
sub quota_enable_user {
my ($rgw_user) = @_;
my $en_quota = `$radosgw_admin quota enable --uid=$rgw_user --quota-scope=user`;
if ($en_quota !~ /./){
print "quota enabled for the user $rgw_user \n";
} else {
print "quota enable failed for the user $rgw_user \n";
exit 1;
}
return 0;
}
sub quota_disable_user {
my $dis_quota = `$radosgw_admin quota disable --uid=$rgw_user --quota-scope=user`;
if ($dis_quota !~ /./){
print "quota disabled for the user $rgw_user \n";
} else {
print "quota disable failed for the user $rgw_user \n";
exit 1;
}
return 0;
}
# upload a file to the bucket
sub upload_file {
print "adding file to bucket $bucketname: $mytestfilename\n";
($bucket->add_key_filename( $mytestfilename, $testfileloc,
{ content_type => 'text/plain', },
) and (print "upload file successful\n" ) and return 0 ) or (return 1);
}
# delete the bucket
sub delete_bucket {
($bucket->delete_bucket) and (print "bucket delete succeeded \n") or die $s3->err . "delete bucket failed\n" . $s3->errstr;
}
#Function to upload the given file size to bucket and verify
sub test_max_objs {
my ($size, $rgw_user) = @_;
create_file($size);
quota_enable_user($rgw_user);
my $ret_value = upload_file();
return $ret_value;
}
# set user quota and ensure it is validated
sub test_user_quota_max_size{
my ($max_buckets,$size, $fsize) = @_;
my $usr = rand();
foreach my $i (1..$max_buckets){
my $ret_value = max_size_per_user($size, $fsize, $usr );
}
if ($ret_value == $max_buckets){
fail( "user quota max size for $usr failed on $max_buckets buckets" );
} else {
pass( "user quota max size for $usr passed on $max_buckets buckets" );
}
delete_keys($mytestfilename);
purge_data($usr);
}
#== Main starts here===
ceph_os_info();
check();
quota_set_max_obj_per_user('0', '10Mb', 'usr1');
quota_set_max_obj_per_user('1', '10Mb', 'usr2');
quota_set_max_size_per_user(0, '10Mb', 'usr1');
quota_set_max_size_per_user(1048576000, '1Gb', 'usr2');
test_user_quota_max_size(3,1048576000,'100Mb');
test_user_quota_max_size(2,1048576000, '1Gb');
print "OK";
| 5,453 | 27.40625 | 126 |
pl
|
null |
ceph-main/qa/workunits/rgw/s3_utilities.pm
|
# Common subroutines shared by the s3 testing code
my $sec;
my $min;
my $hour;
my $mon;
my $year;
my $mday;
my $wday;
my $yday;
my $isdst;
my $PASS_CNT = 0;
my $FAIL_CNT = 0;
our $radosgw_admin = $ENV{RGW_ADMIN}||"sudo radosgw-admin";
# function to get the current time stamp from the test set up
sub get_timestamp {
($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
if ($mon < 10) { $mon = "0$mon"; }
if ($hour < 10) { $hour = "0$hour"; }
if ($min < 10) { $min = "0$min"; }
if ($sec < 10) { $sec = "0$sec"; }
$year=$year+1900;
return $year . '_' . $mon . '_' . $mday . '_' . $hour . '_' . $min . '_' . $sec;
}
# Function to check if radosgw is already running
sub get_status {
my $service = "radosgw";
my $cmd = "pgrep $service";
my $status = get_cmd_op($cmd);
if ($status =~ /\d+/ ){
return 0;
}
return 1;
}
# function to execute the command and return output
sub get_cmd_op
{
my $cmd = shift;
my $excmd = `$cmd`;
return $excmd;
}
#Function that executes the CLI commands and returns the output of the command
sub get_command_output {
my $cmd_output = shift;
open( FH, ">>$test_log" );
print FH "\"$cmd_output\"\n";
my $exec_cmd = `$cmd_output 2>&1`;
print FH "$exec_cmd\n";
close(FH);
return $exec_cmd;
}
# Function to get the hostname
sub get_hostname
{
my $cmd = "hostname";
my $get_host = get_command_output($cmd);
chomp($get_host);
return($get_host);
}
sub pass {
my ($comment) = @_;
print "Comment required." unless length $comment;
chomp $comment;
print_border2();
print "Test case: $TC_CNT PASSED - $comment \n";
print_border2();
$PASS_CNT++;
}
sub fail {
my ($comment) = @_;
print "Comment required." unless length $comment;
chomp $comment;
print_border2();
print "Test case: $TC_CNT FAILED - $comment \n";
print_border2();
$FAIL_CNT++;
}
sub print_border2 {
print "~" x 90 . "\n";
}
# Function to create the user "qa_user" and extract the user access_key and secret_key of the user
sub get_user_info
{
my ($rgw_user) = @_;
my $cmd = "$radosgw_admin user create --uid=$rgw_user --display-name=$rgw_user";
my $cmd_op = get_command_output($cmd);
if ($cmd_op !~ /keys/){
return (0,0);
}
my @get_user = (split/\n/,$cmd_op);
foreach (@get_user) {
if ($_ =~ /access_key/ ){
$get_acc_key = $_;
} elsif ($_ =~ /secret_key/ ){
$get_sec_key = $_;
}
}
my $access_key = $get_acc_key;
my $acc_key = (split /:/, $access_key)[1];
$acc_key =~ s/\\//g;
$acc_key =~ s/ //g;
$acc_key =~ s/"//g;
$acc_key =~ s/,//g;
my $secret_key = $get_sec_key;
my $sec_key = (split /:/, $secret_key)[1];
$sec_key =~ s/\\//g;
$sec_key =~ s/ //g;
$sec_key =~ s/"//g;
$sec_key =~ s/,//g;
return ($acc_key, $sec_key);
}
# Function that deletes the given user and all associated user data
sub purge_data
{
my ($rgw_user) = @_;
my $cmd = "$radosgw_admin user rm --uid=$rgw_user --purge-data";
my $cmd_op = get_command_output($cmd);
if ($cmd_op !~ /./){
print "user $rgw_user deleted\n";
} else {
print "user $rgw_user NOT deleted\n";
return 1;
}
return 0;
}
# Read PRETTY_NAME from /etc/os-release
sub os_pretty_name
{
open(FH, '<', '/etc/os-release') or die $!;
while (my $line = <FH>) {
chomp $line;
if ($line =~ /^\s*PRETTY_NAME=\"?([^"]*)\"?/) {
return $1;
}
}
close(FH);
}
# Function to get the Ceph and distro info
sub ceph_os_info
{
my $ceph_v = get_command_output ( "ceph -v" );
my @ceph_arr = split(" ",$ceph_v);
$ceph_v = "Ceph Version: $ceph_arr[2]";
my $os_distro = os_pretty_name();
$os_distro = "Linux Flavor:$os_distro";
return ($ceph_v, $os_distro);
}
# Execute the test case based on the input to the script
sub create_file {
my ($file_size, $part) = @_;
my $cnt;
$mytestfilename = "$file_size.$part";
$testfileloc = "/tmp/".$mytestfilename;
if ($file_size == '10Mb'){
$cnt = 1;
} elsif ($file_size == '100Mb'){
$cnt = 10;
} elsif ($file_size == '500Mb'){
$cnt = 50;
} elsif ($file_size == '1Gb'){
$cnt = 100;
} elsif ($file_size == '2Gb'){
$cnt = 200;
}
my $ret = system("dd if=/dev/zero of=$testfileloc bs=10485760 count=$cnt");
if ($ret) { exit 1 };
return 0;
}
sub run_s3
{
# Run tests for the S3 functionality
# Modify access key and secret key to suit the user account
my ($user) = @_;
our ( $access_key, $secret_key ) = get_user_info($user);
if ( ($access_key) && ($secret_key) ) {
$s3 = Amazon::S3->new(
{
aws_access_key_id => $access_key,
aws_secret_access_key => $secret_key,
host => $hostname,
secure => 0,
retry => 1,
}
);
}
our $bucketname = 'buck_'.get_timestamp();
# create a new bucket (the test bucket)
our $bucket = $s3->add_bucket( { bucket => $bucketname } )
or die $s3->err. "bucket $bucketname create failed\n". $s3->errstr;
print "Bucket Created: $bucketname \n";
return 0;
}
# delete keys
sub delete_keys {
(($bucket->delete_key($_[0])) and return 0) or return 1;
}
# Read the file back to bucket
sub readd_file {
system("dd if=/dev/zero of=/tmp/10MBfile1 bs=10485760 count=1");
$mytestfilename1 = '10MBfile1';
print "readding file to bucket: $mytestfilename1\n";
((($bucket->add_key_filename( $mytestfilename1, $testfileloc,
{ content_type => 'text/plain', },
)) and (print "readding file success\n") and return 0) or (return 1));
}
# check if rgw service is already running
sub check
{
my $state = get_status();
if ($state) {
exit 1;
}
}
1
| 6,010 | 24.688034 | 98 |
pm
|
null |
ceph-main/qa/workunits/rgw/test-keystone-service-token.py
|
#!/usr/bin/env python3
#
# Copyright (C) 2022 Binero
#
# Author: Tobias Urdin <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
import sys
import requests
import time
# b4221c214dd64ee6a464g2153fae3813 is ID of deadbeef project
SWIFT_URL = 'http://localhost:8000/swift/v1/AUTH_b4221c214dd64ee6a464g2153fae3813'
KEYSTONE_URL = 'http://localhost:5000'
def get_stats():
stats_url = '%s/stats' % KEYSTONE_URL
return requests.get(stats_url)
def test_list_containers():
# Loop five list container requests with same token
for i in range(0, 5):
r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-1'})
if r.status_code != 204:
print('FAILED, status code is %d not 204' % r.status_code)
sys.exit(1)
# Get stats from fake Keystone server
r = get_stats()
if r.status_code != 200:
print('FAILED, status code is %d not 200' % r.status_code)
sys.exit(1)
stats = r.json()
# Verify admin token was cached
if stats['post_total'] != 1:
print('FAILED, post_total stat is %d not 1' % stats['post_total'])
sys.exit(1)
# Verify user token was cached
if stats['get_total'] != 1:
print('FAILED, get_total stat is %d not 1' % stats['get_total'])
sys.exit(1)
print('Wait for cache to be invalid')
time.sleep(11)
r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-1'})
if r.status_code != 204:
print('FAILED, status code is %d not 204' % r.status_code)
sys.exit(1)
# Get stats from fake Keystone server
r = get_stats()
if r.status_code != 200:
print('FAILED, status code is %d not 200' % r.status_code)
sys.exit(1)
stats = r.json()
if stats['post_total'] != 2:
print('FAILED, post_total stat is %d not 2' % stats['post_total'])
sys.exit(1)
if stats['get_total'] != 2:
print('FAILED, get_total stat is %d not 2' % stats['get_total'])
sys.exit(1)
def test_expired_token():
# Try listing containers with an expired token
for i in range(0, 3):
r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2'})
if r.status_code != 401:
print('FAILED, status code is %d not 401' % r.status_code)
sys.exit(1)
# Get stats from fake Keystone server
r = get_stats()
if r.status_code != 200:
print('FAILED, status code is %d not 200' % r.status_code)
sys.exit(1)
stats = r.json()
# Verify admin token was cached
if stats['post_total'] != 2:
print('FAILED, post_total stat is %d not 2' % stats['post_total'])
sys.exit(1)
# Verify we got to fake Keystone server since expired tokens is not cached
if stats['get_total'] != 5:
print('FAILED, get_total stat is %d not 5' % stats['get_total'])
sys.exit(1)
def test_expired_token_with_service_token():
# Try listing containers with an expired token but with a service token
for i in range(0, 3):
r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2', 'X-Service-Token': 'admin-token-1'})
if r.status_code != 204:
print('FAILED, status code is %d not 204' % r.status_code)
sys.exit(1)
# Get stats from fake Keystone server
r = get_stats()
if r.status_code != 200:
print('FAILED, status code is %d not 200' % r.status_code)
sys.exit(1)
stats = r.json()
# Verify admin token was cached
if stats['post_total'] != 2:
print('FAILED, post_total stat is %d not 2' % stats['post_total'])
sys.exit(1)
# Verify we got to fake Keystone server since expired tokens is not cached
if stats['get_total'] != 7:
print('FAILED, get_total stat is %d not 7' % stats['get_total'])
sys.exit(1)
print('Wait for cache to be invalid')
time.sleep(11)
r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2', 'X-Service-Token': 'admin-token-1'})
if r.status_code != 204:
print('FAILED, status code is %d not 204' % r.status_code)
sys.exit(1)
# Get stats from fake Keystone server
r = get_stats()
if r.status_code != 200:
print('FAILED, status code is %d not 200' % r.status_code)
sys.exit(1)
stats = r.json()
if stats['post_total'] != 3:
print('FAILED, post_total stat is %d not 3' % stats['post_total'])
sys.exit(1)
if stats['get_total'] != 9:
print('FAILED, get_total stat is %d not 9' % stats['get_total'])
sys.exit(1)
def test_expired_token_with_invalid_service_token():
print('Wait for cache to be invalid')
time.sleep(11)
# Test with a token that doesn't have allowed role as service token
for i in range(0, 3):
r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2', 'X-Service-Token': 'user-token-1'})
if r.status_code != 401:
print('FAILED, status code is %d not 401' % r.status_code)
sys.exit(1)
# Make sure we get user-token-1 cached
r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-1'})
if r.status_code != 204:
print('FAILED, status code is %d not 204' % r.status_code)
sys.exit(1)
# Test that a cached token (that is invalid as service token) cannot be used as service token
for i in range(0, 3):
r = requests.get(SWIFT_URL, headers={'X-Auth-Token': 'user-token-2', 'X-Service-Token': 'user-token-1'})
if r.status_code != 401:
print('FAILED, status code is %d not 401' % r.status_code)
sys.exit(1)
def main():
test_list_containers()
test_expired_token()
test_expired_token_with_service_token()
test_expired_token_with_invalid_service_token()
if __name__ == '__main__':
main()
| 6,300 | 32.163158 | 113 |
py
|
null |
ceph-main/qa/workunits/rgw/test_librgw_file.sh
|
#!/bin/sh -e
if [ -z ${AWS_ACCESS_KEY_ID} ]
then
export AWS_ACCESS_KEY_ID=`openssl rand -base64 20`
export AWS_SECRET_ACCESS_KEY=`openssl rand -base64 40`
radosgw-admin user create --uid ceph-test-librgw-file \
--access-key $AWS_ACCESS_KEY_ID \
--secret $AWS_SECRET_ACCESS_KEY \
--display-name "librgw test user" \
--email [email protected] || echo "librgw user exists"
# keyring override for teuthology env
KEYRING="/etc/ceph/ceph.keyring"
K="-k ${KEYRING}"
fi
# nfsns is the main suite
# create herarchy, and then list it
echo "phase 1.1"
ceph_test_librgw_file_nfsns ${K} --hier1 --dirs1 --create --rename --verbose
# the older librgw_file can consume the namespace
echo "phase 1.2"
ceph_test_librgw_file_nfsns ${K} --getattr --verbose
# and delete the hierarchy
echo "phase 1.3"
ceph_test_librgw_file_nfsns ${K} --hier1 --dirs1 --delete --verbose
# bulk create/delete buckets
echo "phase 2.1"
ceph_test_librgw_file_cd ${K} --create --multi --verbose
echo "phase 2.2"
ceph_test_librgw_file_cd ${K} --delete --multi --verbose
# write continuation test
echo "phase 3.1"
ceph_test_librgw_file_aw ${K} --create --large --verify
echo "phase 3.2"
ceph_test_librgw_file_aw ${K} --delete --large
# continued readdir
echo "phase 4.1"
ceph_test_librgw_file_marker ${K} --create --marker1 --marker2 --nobjs=100 --verbose
echo "phase 4.2"
ceph_test_librgw_file_marker ${K} --delete --verbose
# advanced i/o--but skip readv/writev for now--split delete from
# create and stat ops to avoid fault in sysobject cache
echo "phase 5.1"
ceph_test_librgw_file_gp ${K} --get --stat --put --create
echo "phase 5.2"
ceph_test_librgw_file_gp ${K} --delete
exit 0
| 1,707 | 27.466667 | 84 |
sh
|
null |
ceph-main/qa/workunits/rgw/test_rgw_d4n.py
|
#!/usr/bin/python3
import logging as log
from configobj import ConfigObj
import boto3
import redis
import subprocess
import json
log.basicConfig(level=log.DEBUG)
""" Constants """
ACCESS_KEY = 'test3'
SECRET_KEY = 'test3'
def exec_cmd(cmd):
log.debug("exec_cmd(%s)", cmd)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode == 0:
log.info('command succeeded')
if out is not None: log.info(out)
return out
else:
raise Exception("error: %s \nreturncode: %s" % (err, proc.returncode))
except Exception as e:
log.error('command failed')
log.error(e)
return False
def get_radosgw_endpoint():
out = exec_cmd('sudo netstat -nltp | egrep "rados|valgr"') # short for radosgw/valgrind
x = out.decode('utf8').split(" ")
port = [i for i in x if ':' in i][0].split(':')[1]
log.info('radosgw port: %s' % port)
proto = "http"
hostname = '127.0.0.1'
if port == '443':
proto = "https"
endpoint = "%s://%s:%s" % (proto, hostname, port)
log.info("radosgw endpoint is: %s", endpoint)
return endpoint, proto
def create_s3cmd_config(path, proto):
"""
Creates a minimal config file for s3cmd
"""
log.info("Creating s3cmd config...")
use_https_config = "False"
log.info("proto for s3cmd config is %s", proto)
if proto == "https":
use_https_config = "True"
s3cmd_config = ConfigObj(
indent_type='',
infile={
'default':
{
'host_bucket': 'no.way.in.hell',
'use_https': use_https_config,
},
}
)
f = open(path, 'wb')
s3cmd_config.write(f)
f.close()
log.info("s3cmd config written")
def get_cmd_output(cmd_out):
out = cmd_out.decode('utf8')
out = out.strip('\n')
return out
def test_directory_methods(r, client, obj):
test_txt = b'test'
# setValue call
response_put = obj.put(Body=test_txt)
assert(response_put.get('ResponseMetadata').get('HTTPStatusCode') == 200)
data = r.hgetall('rgw-object:test.txt:directory')
assert(data.get('key') == 'rgw-object:test.txt:directory')
assert(data.get('size') == '4')
assert(data.get('bucket_name') == 'bkt')
assert(data.get('obj_name') == 'test.txt')
assert(data.get('hosts') == '127.0.0.1:6379')
# getValue call
response_get = obj.get()
assert(response_get.get('ResponseMetadata').get('HTTPStatusCode') == 200)
data = r.hgetall('rgw-object:test.txt:directory')
assert(data.get('key') == 'rgw-object:test.txt:directory')
assert(data.get('size') == '4')
assert(data.get('bucket_name') == 'bkt')
assert(data.get('obj_name') == 'test.txt')
assert(data.get('hosts') == '127.0.0.1:6379')
# delValue call
response_del = obj.delete()
assert(response_del.get('ResponseMetadata').get('HTTPStatusCode') == 204)
assert(r.exists('rgw-object:test.txt:directory') == False)
r.flushall()
def test_cache_methods(r, client, obj):
test_txt = b'test'
# setObject call
response_put = obj.put(Body=test_txt)
assert(response_put.get('ResponseMetadata').get('HTTPStatusCode') == 200)
data = r.hgetall('rgw-object:test.txt:cache')
output = subprocess.check_output(['radosgw-admin', 'object', 'stat', '--bucket=bkt', '--object=test.txt'])
attrs = json.loads(output.decode('latin-1'))
assert((data.get(b'user.rgw.tail_tag')) == attrs.get('attrs').get('user.rgw.tail_tag').encode("latin-1") + b'\x00')
assert((data.get(b'user.rgw.pg_ver')) == attrs.get('attrs').get('user.rgw.pg_ver').encode("latin-1") + b'\x00\x00\x00\x00\x00\x00\x00')
assert((data.get(b'user.rgw.idtag')) == attrs.get('tag').encode("latin-1") + b'\x00')
assert((data.get(b'user.rgw.etag')) == attrs.get('etag').encode("latin-1"))
assert((data.get(b'user.rgw.x-amz-content-sha256')) == attrs.get('attrs').get('user.rgw.x-amz-content-sha256').encode("latin-1") + b'\x00')
assert((data.get(b'user.rgw.source_zone')) == attrs.get('attrs').get('user.rgw.source_zone').encode("latin-1") + b'\x00\x00\x00\x00')
assert((data.get(b'user.rgw.x-amz-date')) == attrs.get('attrs').get('user.rgw.x-amz-date').encode("latin-1") + b'\x00')
tmp1 = '\x08\x06L\x01\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x06\x84\x00\x00\x00\n\nj\x00\x00\x00\x03\x00\x00\x00bkt+\x00\x00\x00'
tmp2 = '+\x00\x00\x00'
tmp3 = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\b\x00\x00\x00test.txt\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00!\x00\x00\x00'
tmp4 = '\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x01 \x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
'\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x00\x00\x00default-placement\x11\x00\x00\x00default-placement\x00\x00\x00\x00\x02\x02\x18' \
'\x00\x00\x00\x04\x00\x00\x00none\x01\x01\t\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
assert(data.get(b'user.rgw.manifest') == tmp1.encode("latin-1") + attrs.get('manifest').get('tail_placement').get('bucket').get('bucket_id').encode("utf-8")
+ tmp2.encode("latin-1") + attrs.get('manifest').get('tail_placement').get('bucket').get('bucket_id').encode("utf-8")
+ tmp3.encode("latin-1") + attrs.get('manifest').get('prefix').encode("utf-8")
+ tmp4.encode("latin-1"))
tmp5 = '\x02\x02\x81\x00\x00\x00\x03\x02\x11\x00\x00\x00\x06\x00\x00\x00s3main\x03\x00\x00\x00Foo\x04\x03d\x00\x00\x00\x01\x01\x00\x00\x00\x06\x00\x00' \
'\x00s3main\x0f\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00s3main\x05\x035\x00\x00\x00\x02\x02\x04\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00s3main' \
'\x00\x00\x00\x00\x00\x00\x00\x00\x02\x02\x04\x00\x00\x00\x0f\x00\x00\x00\x03\x00\x00\x00Foo\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
'\x00\x00\x00'
assert((data.get(b'user.rgw.acl')) == tmp5.encode("latin-1"))
# getObject call
response_get = obj.get()
assert(response_get.get('ResponseMetadata').get('HTTPStatusCode') == 200)
# Copy to new object with 'COPY' directive; metadata value should not change
obj.metadata.update({'test':'value'})
m = obj.metadata
m['test'] = 'value_replace'
# copyObject call
client.copy_object(Bucket='bkt', Key='test_copy.txt', CopySource='bkt/test.txt', Metadata = m, MetadataDirective='COPY')
assert(r.hexists('rgw-object:test_copy.txt:cache', b'user.rgw.x-amz-meta-test') == 0)
# Update object with 'REPLACE' directive; metadata value should change
client.copy_object(Bucket='bkt', Key='test.txt', CopySource='bkt/test.txt', Metadata = m, MetadataDirective='REPLACE')
data = r.hget('rgw-object:test.txt:cache', b'user.rgw.x-amz-meta-test')
assert(data == b'value_replace\x00')
# Ensure cache entry exists in cache before deletion
assert(r.exists('rgw-object:test.txt:cache') == True)
# delObject call
response_del = obj.delete()
assert(response_del.get('ResponseMetadata').get('HTTPStatusCode') == 204)
assert(r.exists('rgw-object:test.txt:cache') == False)
r.flushall()
def main():
"""
execute the d4n test
"""
# Setup for test
log.info("D4NFilterTest setup.")
out = exec_cmd('pwd')
pwd = get_cmd_output(out)
log.debug("pwd is: %s", pwd)
endpoint, proto = get_radosgw_endpoint()
client = boto3.client(service_name='s3',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
endpoint_url=endpoint,
use_ssl=False,
verify=False)
s3 = boto3.resource('s3',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
endpoint_url=endpoint,
use_ssl=False,
verify=False)
bucket = s3.Bucket('bkt')
bucket.create()
obj = s3.Object(bucket_name='bkt', key='test.txt')
# Check for Redis instance
try:
connection = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
connection.ping()
except:
log.debug("ERROR: Redis instance not running.")
raise
# Create s3cmd config
s3cmd_config_path = pwd + '/s3cfg'
create_s3cmd_config(s3cmd_config_path, proto)
r = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
test_directory_methods(r, client, obj)
# Responses should not be decoded
r = redis.Redis(host='localhost', port=6379, db=0)
test_cache_methods(r, client, obj)
log.info("D4NFilterTest successfully completed.")
main()
log.info("Completed D4N tests")
| 8,918 | 35.553279 | 162 |
py
|
null |
ceph-main/qa/workunits/rgw/test_rgw_datacache.py
|
#!/usr/bin/python3
import logging as log
from configobj import ConfigObj
import subprocess
import json
import os
"""
Runs a test against a rgw with the data cache enabled. A client must be
set in the config for this task. This client must be the same client
that is in the config for the `rgw` task.
In the `overrides` section `datacache` and `datacache` must be configured for
the `rgw` task and the ceph conf overrides must contain the below config
variables in the client section.
`s3cmd` must be added as an extra_package to the install task.
In the `workunit` task, `- rgw/run-datacache.sh` must be set for the client that
is in the config for the `rgw` task. The `RGW_DATACACHE_PATH` variable must be
set in the workunit's `env` and it must match the `datacache_path` given to the
`rgw` task in `overrides`.
Ex:
- install:
extra_packages:
deb: ['s3cmd']
rpm: ['s3cmd']
- overrides:
rgw:
datacache: true
datacache_path: /tmp/rgw_datacache
install:
extra_packages:
deb: ['s3cmd']
rpm: ['s3cmd']
ceph:
conf:
client:
rgw d3n l1 datacache persistent path: /tmp/rgw_datacache/
rgw d3n l1 datacache size: 10737417240
rgw d3n l1 local datacache enabled: true
rgw enable ops log: true
- rgw:
client.0:
- workunit:
clients:
client.0:
- rgw/run-datacache.sh
env:
RGW_DATACACHE_PATH: /tmp/rgw_datacache
cleanup: true
"""
log.basicConfig(level=log.DEBUG)
""" Constants """
USER = 'rgw_datacache_user'
DISPLAY_NAME = 'DatacacheUser'
ACCESS_KEY = 'NX5QOQKC6BH2IDN8HC7A'
SECRET_KEY = 'LnEsqNNqZIpkzauboDcLXLcYaWwLQ3Kop0zAnKIn'
BUCKET_NAME = 'datacachebucket'
FILE_NAME = '7M.dat'
GET_FILE_NAME = '7M-get.dat'
def exec_cmd(cmd):
log.debug("exec_cmd(%s)", cmd)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode == 0:
log.info('command succeeded')
if out is not None: log.info(out)
return out
else:
raise Exception("error: %s \nreturncode: %s" % (err, proc.returncode))
except Exception as e:
log.error('command failed')
log.error(e)
return False
def get_radosgw_endpoint():
out = exec_cmd('sudo netstat -nltp | egrep "rados|valgr"') # short for radosgw/valgrind
x = out.decode('utf8').split(" ")
port = [i for i in x if ':' in i][0].split(':')[1]
log.info('radosgw port: %s' % port)
proto = "http"
hostname = '127.0.0.1'
if port == '443':
proto = "https"
endpoint = hostname
log.info("radosgw endpoint is: %s", endpoint)
return endpoint, proto
def create_s3cmd_config(path, proto):
"""
Creates a minimal config file for s3cmd
"""
log.info("Creating s3cmd config...")
use_https_config = "False"
log.info("proto for s3cmd config is %s", proto)
if proto == "https":
use_https_config = "True"
s3cmd_config = ConfigObj(
indent_type='',
infile={
'default':
{
'host_bucket': 'no.way.in.hell',
'use_https': use_https_config,
},
}
)
f = open(path, 'wb')
s3cmd_config.write(f)
f.close()
log.info("s3cmd config written")
def get_cmd_output(cmd_out):
out = cmd_out.decode('utf8')
out = out.strip('\n')
return out
def main():
"""
execute the datacache test
"""
# setup for test
cache_dir = os.environ['RGW_DATACACHE_PATH']
log.debug("datacache dir from config is: %s", cache_dir)
out = exec_cmd('pwd')
pwd = get_cmd_output(out)
log.debug("pwd is: %s", pwd)
endpoint, proto = get_radosgw_endpoint()
# create 7M file to put
outfile = pwd + '/' + FILE_NAME
exec_cmd('dd if=/dev/urandom of=%s bs=1M count=7' % (outfile))
# create user
exec_cmd('radosgw-admin user create --uid %s --display-name %s --access-key %s --secret %s'
% (USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY))
# create s3cmd config
s3cmd_config_path = pwd + '/s3cfg'
create_s3cmd_config(s3cmd_config_path, proto)
# create a bucket
exec_cmd('s3cmd --access_key=%s --secret_key=%s --config=%s --no-check-hostname --host=%s mb s3://%s'
% (ACCESS_KEY, SECRET_KEY, s3cmd_config_path, endpoint, BUCKET_NAME))
# put an object in the bucket
exec_cmd('s3cmd --access_key=%s --secret_key=%s --config=%s --no-check-hostname --host=%s put %s s3://%s'
% (ACCESS_KEY, SECRET_KEY, s3cmd_config_path, endpoint, outfile, BUCKET_NAME))
# get object from bucket
get_file_path = pwd + '/' + GET_FILE_NAME
exec_cmd('s3cmd --access_key=%s --secret_key=%s --config=%s --no-check-hostname --host=%s get s3://%s/%s %s --force'
% (ACCESS_KEY, SECRET_KEY, s3cmd_config_path, endpoint, BUCKET_NAME, FILE_NAME, get_file_path))
# get info of object
out = exec_cmd('radosgw-admin object stat --bucket=%s --object=%s' % (BUCKET_NAME, FILE_NAME))
json_op = json.loads(out)
cached_object_name = json_op['manifest']['prefix']
log.debug("Cached object name is: %s", cached_object_name)
# check that the cache is enabled (does the cache directory empty)
out = exec_cmd('find %s -type f | wc -l' % (cache_dir))
chk_cache_dir = int(get_cmd_output(out))
log.debug("Check cache dir content: %s", chk_cache_dir)
if chk_cache_dir == 0:
log.info("NOTICE: datacache test object not found, inspect if datacache was bypassed or disabled during this check.")
return
# list the files in the cache dir for troubleshooting
out = exec_cmd('ls -l %s' % (cache_dir))
# get name of cached object and check if it exists in the cache
out = exec_cmd('find %s -name "*%s1"' % (cache_dir, cached_object_name))
cached_object_path = get_cmd_output(out)
log.debug("Path of file in datacache is: %s", cached_object_path)
out = exec_cmd('basename %s' % (cached_object_path))
basename_cmd_out = get_cmd_output(out)
log.debug("Name of file in datacache is: %s", basename_cmd_out)
# check to see if the cached object is in Ceph
out = exec_cmd('rados ls -p default.rgw.buckets.data')
rados_ls_out = get_cmd_output(out)
log.debug("rados ls output is: %s", rados_ls_out)
assert(basename_cmd_out in rados_ls_out)
log.debug("RGW Datacache test SUCCESS")
# remove datacache dir
#cmd = exec_cmd('rm -rf %s' % (cache_dir))
#log.debug("RGW Datacache dir deleted")
#^ commenting for future refrence - the work unit will continue running tests and if the cache_dir is removed
# all the writes to cache will fail with errno 2 ENOENT No such file or directory.
main()
log.info("Completed Datacache tests")
| 6,864 | 31.690476 | 125 |
py
|
null |
ceph-main/qa/workunits/rgw/test_rgw_gc_log.sh
|
#!/bin/sh -e
ceph_test_rgw_gc_log
exit 0
| 43 | 6.333333 | 20 |
sh
|
null |
ceph-main/qa/workunits/rgw/test_rgw_obj.sh
|
#!/bin/sh -e
ceph_test_rgw_obj
exit 0
| 40 | 5.833333 | 17 |
sh
|
null |
ceph-main/qa/workunits/rgw/test_rgw_orphan_list.sh
|
#!/usr/bin/env bash
# set -x
set -e
# if defined, debug messages will be displayed and prepended with the string
# debug="DEBUG"
huge_size=5100 # in megabytes
big_size=7 # in megabytes
huge_obj=/tmp/huge_obj.temp.$$
big_obj=/tmp/big_obj.temp.$$
empty_obj=/tmp/empty_obj.temp.$$
fifo=/tmp/orphan-fifo.$$
awscli_dir=${HOME}/awscli_temp
export PATH=${PATH}:${awscli_dir}
rgw_host=$(hostname --fqdn)
if echo "$rgw_host" | grep -q '\.' ; then
:
else
host_domain=".front.sepia.ceph.com"
echo "WARNING: rgw hostname -- $rgw_host -- does not appear to be fully qualified; PUNTING and appending $host_domain"
rgw_host="${rgw_host}${host_domain}"
fi
rgw_port=80
echo "Fully Qualified Domain Name: $rgw_host"
success() {
echo OK.
exit 0
}
########################################################################
# INSTALL AND CONFIGURE TOOLING
install_awscli() {
# NB: this does verify authenticity and integrity of downloaded
# file; see
# https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html
here="$(pwd)"
cd "$HOME"
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
mkdir -p $awscli_dir
./aws/install -i $awscli_dir
cd "$here"
}
uninstall_awscli() {
here="$(pwd)"
cd "$HOME"
rm -rf $awscli_dir ./aws awscliv2.zip
cd "$here"
}
sudo yum -y install s3cmd
sudo yum -y install python3-setuptools
sudo yum -y install python3-pip
sudo pip3 install --upgrade setuptools
sudo pip3 install python-swiftclient
# get ready for transition from s3cmd to awscli
if false ;then
install_awscli
aws --version
uninstall_awscli
fi
s3config=/tmp/s3config.$$
# do not include the port when it is 80; the host base is used in the
# v4 signature and it needs to follow this convention for signatures
# to match
if [ "$rgw_port" -ne 80 ] ;then
s3_host_base="${rgw_host}:${rgw_port}"
else
s3_host_base="$rgw_host"
fi
cat >${s3config} <<EOF
[default]
host_base = $s3_host_base
access_key = 0555b35654ad1656d804
secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
bucket_location = us-east-1
check_ssl_certificate = True
check_ssl_hostname = True
default_mime_type = binary/octet-stream
delete_removed = False
dry_run = False
enable_multipart = True
encoding = UTF-8
encrypt = False
follow_symlinks = False
force = False
guess_mime_type = True
host_bucket = anything.with.three.dots
multipart_chunk_size_mb = 15
multipart_max_chunks = 10000
recursive = False
recv_chunk = 65536
send_chunk = 65536
signature_v2 = False
socket_timeout = 300
use_https = False
use_mime_magic = True
verbosity = WARNING
EOF
# set up swift authentication
export ST_AUTH=http://${rgw_host}:${rgw_port}/auth/v1.0
export ST_USER=test:tester
export ST_KEY=testing
create_users() {
# Create S3 user
local akey='0555b35654ad1656d804'
local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
radosgw-admin user create --uid testid \
--access-key $akey --secret $skey \
--display-name 'M. Tester' --email [email protected]
# Create Swift user
radosgw-admin user create --subuser=test:tester \
--display-name=Tester-Subuser --key-type=swift \
--secret=testing --access=full
}
myswift() {
if [ -n "$debug" ] ;then
echo "${debug}: swift --verbose --debug $@"
fi
swift --verbose --debug "$@"
local code=$?
if [ $code -ne 0 ] ;then
echo "ERROR: code = $code ; command = s3cmd --config=${s3config} --verbose --debug "$@""
exit $code
fi
}
mys3cmd() {
if [ -n "$debug" ] ;then
echo "${debug}: s3cmd --config=${s3config} --verbose --debug $@"
fi
s3cmd --config=${s3config} --verbose --debug "$@"
local code=$?
if [ $code -ne 0 ] ;then
echo "ERROR: code = $code ; command = s3cmd --config=${s3config} --verbose --debug "$@""
exit $code
fi
}
mys3uploadkill() {
if [ $# -ne 5 ] ;then
echo "$0: error expecting 5 arguments"
exit 1
fi
local_file="$1"
remote_bkt="$2"
remote_obj="$3"
fifo="$4"
stop_part="$5"
mkfifo $fifo
s3cmd --config=${s3config} put $local_file \
s3://${remote_bkt}/${remote_obj} \
--progress \
--multipart-chunk-size-mb=5 >$fifo &
set +e # don't allow errors to stop script
while read line ;do
echo "$line" | grep --quiet "part $stop_part "
if [ ${PIPESTATUS[1]} -eq 0 ] ;then
kill -9 $(jobs -p)
break
fi
done <$fifo
set -e
rm -f $fifo
}
mys3upload() {
obj=$1
bucket=$2
dest_obj=$3
mys3cmd put -q $obj s3://${bucket}/$dest_obj
}
########################################################################
# PREP
create_users
dd if=/dev/urandom of=$big_obj bs=1M count=${big_size}
dd if=/dev/urandom of=$huge_obj bs=1M count=${huge_size}
touch $empty_obj
quick_tests() {
echo TRY A SWIFT COMMAND
myswift upload swift-plain-ctr $big_obj --object-name swift-obj-test
myswift list
myswift list swift-plain-ctr
echo TRY A RADOSGW-ADMIN COMMAND
radosgw-admin bucket list # make sure rgw is up and running
}
########################################################################
# S3 TESTS
####################################
# regular multipart test
mys3cmd mb s3://multipart-bkt
mys3upload $huge_obj multipart-bkt multipart-obj
mys3cmd ls
mys3cmd ls s3://multipart-bkt
####################################
# multipart test with incomplete uploads
bkt="incomplete-mp-bkt-1"
mys3cmd mb s3://$bkt
mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c $fifo 20
# generate an incomplete multipart with more than 1,000 parts
mys3uploadkill $huge_obj $bkt incomplete-mp-obj-b $fifo 1005
# generate more than 1000 incomplet multiparts
for c in $(seq 1005) ;do
mys3uploadkill $huge_obj $bkt incomplete-mp-obj-c-$c $fifo 3
done
####################################
# resharded bucket
bkt=resharded-bkt-1
mys3cmd mb s3://$bkt
for f in $(seq 8) ; do
dest_obj="reshard-obj-${f}"
mys3cmd put -q $big_obj s3://${bkt}/$dest_obj
done
radosgw-admin bucket reshard --num-shards 3 --bucket=$bkt --yes-i-really-mean-it
radosgw-admin bucket reshard --num-shards 5 --bucket=$bkt --yes-i-really-mean-it
####################################
# versioned bucket
if true ;then
echo "WARNING: versioned bucket test currently turned off"
else
bkt=versioned-bkt-1
mys3cmd mb s3://$bkt
# bucket-enable-versioning $bkt
for f in $(seq 3) ;do
for g in $(seq 10) ;do
dest_obj="versioned-obj-${g}"
mys3cmd put -q $big_obj s3://${bkt}/$dest_obj
done
done
for g in $(seq 1 2 10) ;do
dest_obj="versioned-obj-${g}"
mys3cmd rm s3://${bkt}/$dest_obj
done
fi
############################################################
# copy small objects
o_bkt="orig-bkt-1"
d_bkt="copy-bkt-1"
mys3cmd mb s3://$o_bkt
for f in $(seq 4) ;do
dest_obj="orig-obj-$f"
mys3cmd put -q $big_obj s3://${o_bkt}/$dest_obj
done
mys3cmd mb s3://$d_bkt
mys3cmd cp s3://${o_bkt}/orig-obj-1 s3://${d_bkt}/copied-obj-1
mys3cmd cp s3://${o_bkt}/orig-obj-3 s3://${d_bkt}/copied-obj-3
for f in $(seq 5 6) ;do
dest_obj="orig-obj-$f"
mys3cmd put -q $big_obj s3://${d_bkt}/$dest_obj
done
############################################################
# copy small objects and delete original
o_bkt="orig-bkt-2"
d_bkt="copy-bkt-2"
mys3cmd mb s3://$o_bkt
for f in $(seq 4) ;do
dest_obj="orig-obj-$f"
mys3cmd put -q $big_obj s3://${o_bkt}/$dest_obj
done
mys3cmd mb s3://$d_bkt
mys3cmd cp s3://${o_bkt}/orig-obj-1 s3://${d_bkt}/copied-obj-1
mys3cmd cp s3://${o_bkt}/orig-obj-3 s3://${d_bkt}/copied-obj-3
for f in $(seq 5 6) ;do
dest_obj="orig-obj-$f"
mys3cmd put -q $big_obj s3://${d_bkt}/$dest_obj
done
mys3cmd rb --recursive s3://${o_bkt}
############################################################
# copy multipart objects
o_bkt="orig-mp-bkt-3"
d_bkt="copy-mp-bkt-3"
mys3cmd mb s3://$o_bkt
for f in $(seq 2) ;do
dest_obj="orig-multipart-obj-$f"
mys3cmd put -q $huge_obj s3://${o_bkt}/$dest_obj
done
mys3cmd mb s3://$d_bkt
mys3cmd cp s3://${o_bkt}/orig-multipart-obj-1 \
s3://${d_bkt}/copied-multipart-obj-1
for f in $(seq 5 5) ;do
dest_obj="orig-multipart-obj-$f"
mys3cmd put -q $huge_obj s3://${d_bkt}/$dest_obj
done
############################################################
# copy multipart objects and delete original
o_bkt="orig-mp-bkt-4"
d_bkt="copy-mp-bkt-4"
mys3cmd mb s3://$o_bkt
for f in $(seq 2) ;do
dest_obj="orig-multipart-obj-$f"
mys3cmd put -q $huge_obj s3://${o_bkt}/$dest_obj
done
mys3cmd mb s3://$d_bkt
mys3cmd cp s3://${o_bkt}/orig-multipart-obj-1 \
s3://${d_bkt}/copied-multipart-obj-1
for f in $(seq 5 5) ;do
dest_obj="orig-multipart-obj-$f"
mys3cmd put -q $huge_obj s3://${d_bkt}/$dest_obj
done
mys3cmd rb --recursive s3://$o_bkt
########################################################################
# SWIFT TESTS
# 600MB
segment_size=629145600
############################################################
# plain test
for f in $(seq 4) ;do
myswift upload swift-plain-ctr $big_obj --object-name swift-obj-$f
done
############################################################
# zero-len test
myswift upload swift-zerolen-ctr $empty_obj --object-name subdir/
myswift upload swift-zerolen-ctr $big_obj --object-name subdir/abc1
myswift upload swift-zerolen-ctr $empty_obj --object-name subdir/empty1
myswift upload swift-zerolen-ctr $big_obj --object-name subdir/xyz1
############################################################
# dlo test
# upload in 300MB segments
myswift upload swift-dlo-ctr $huge_obj --object-name dlo-obj-1 \
-S $segment_size
############################################################
# slo test
# upload in 300MB segments
myswift upload swift-slo-ctr $huge_obj --object-name slo-obj-1 \
-S $segment_size --use-slo
############################################################
# large object copy test
# upload in 300MB segments
o_ctr=swift-orig-ctr
o_obj=slo-orig-obj-1
d_ctr=swift-copy-ctr
d_obj=slo-copy-obj-1
myswift upload $o_ctr $big_obj --object-name $o_obj
myswift copy --destination /${d_ctr}/${d_obj} \
$o_ctr $o_obj
myswift delete $o_ctr $o_obj
############################################################
# huge dlo object copy test
o_ctr=swift-orig-dlo-ctr-1
o_obj=dlo-orig-dlo-obj-1
d_ctr=swift-copy-dlo-ctr-1
d_obj=dlo-copy-dlo-obj-1
myswift upload $o_ctr $huge_obj --object-name $o_obj \
-S $segment_size
myswift copy --destination /${d_ctr}/${d_obj} \
$o_ctr $o_obj
############################################################
# huge dlo object copy and orig delete
o_ctr=swift-orig-dlo-ctr-2
o_obj=dlo-orig-dlo-obj-2
d_ctr=swift-copy-dlo-ctr-2
d_obj=dlo-copy-dlo-obj-2
myswift upload $o_ctr $huge_obj --object-name $o_obj \
-S $segment_size
myswift copy --destination /${d_ctr}/${d_obj} \
$o_ctr $o_obj
myswift delete $o_ctr $o_obj
############################################################
# huge slo object copy test
o_ctr=swift-orig-slo-ctr-1
o_obj=slo-orig-slo-obj-1
d_ctr=swift-copy-slo-ctr-1
d_obj=slo-copy-slo-obj-1
myswift upload $o_ctr $huge_obj --object-name $o_obj \
-S $segment_size --use-slo
myswift copy --destination /${d_ctr}/${d_obj} $o_ctr $o_obj
############################################################
# huge slo object copy test and orig delete
o_ctr=swift-orig-slo-ctr-2
o_obj=slo-orig-slo-obj-2
d_ctr=swift-copy-slo-ctr-2
d_obj=slo-copy-slo-obj-2
myswift upload $o_ctr $huge_obj --object-name $o_obj \
-S $segment_size --use-slo
myswift copy --destination /${d_ctr}/${d_obj} $o_ctr $o_obj
myswift delete $o_ctr $o_obj
########################################################################
# FORCE GARBAGE COLLECTION
sleep 6 # since for testing age at which gc can happen is 5 secs
radosgw-admin gc process --include-all
########################################
# DO ORPHAN LIST
pool="default.rgw.buckets.data"
rgw-orphan-list $pool
# we only expect there to be one output file, but loop just in case
ol_error=""
for f in orphan-list-*.out ; do
if [ -s "$f" ] ;then # if file non-empty
ol_error="${ol_error}:$f"
echo "One ore more orphans found in $f:"
cat "$f"
fi
done
if [ -n "$ol_error" ] ;then
echo "ERROR: orphans found when none expected"
exit 1
fi
########################################################################
# CLEAN UP
rm -f $empty_obj $big_obj $huge_obj $s3config
success
| 12,507 | 23.053846 | 122 |
sh
|
null |
ceph-main/qa/workunits/rgw/test_rgw_reshard.py
|
#!/usr/bin/python3
import errno
import logging as log
import time
import subprocess
import json
import boto3
import botocore.exceptions
import os
"""
Rgw manual and dynamic resharding testing against a running instance
"""
# The test cases in this file have been annotated for inventory.
# To extract the inventory (in csv format) use the command:
#
# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
#
#
log.basicConfig(format = '%(message)s', level=log.DEBUG)
log.getLogger('botocore').setLevel(log.CRITICAL)
log.getLogger('boto3').setLevel(log.CRITICAL)
log.getLogger('urllib3').setLevel(log.CRITICAL)
""" Constants """
USER = 'tester'
DISPLAY_NAME = 'Testing'
ACCESS_KEY = 'NX5QOQKC6BH2IDN8HC7A'
SECRET_KEY = 'LnEsqNNqZIpkzauboDcLXLcYaWwLQ3Kop0zAnKIn'
BUCKET_NAME = 'a-bucket'
VER_BUCKET_NAME = 'myver'
INDEX_POOL = 'default.rgw.buckets.index'
def exec_cmd(cmd, **kwargs):
check_retcode = kwargs.pop('check_retcode', True)
kwargs['shell'] = True
kwargs['stdout'] = subprocess.PIPE
proc = subprocess.Popen(cmd, **kwargs)
log.info(proc.args)
out, _ = proc.communicate()
if check_retcode:
assert(proc.returncode == 0)
return out
return (out, proc.returncode)
class BucketStats:
def __init__(self, bucket_name, bucket_id, num_objs=0, size_kb=0, num_shards=0):
self.bucket_name = bucket_name
self.bucket_id = bucket_id
self.num_objs = num_objs
self.size_kb = size_kb
self.num_shards = num_shards if num_shards > 0 else 1
def get_num_shards(self):
self.num_shards = get_bucket_num_shards(self.bucket_name, self.bucket_id)
def get_bucket_stats(bucket_name):
"""
function to get bucket stats
"""
cmd = exec_cmd("radosgw-admin bucket stats --bucket {}".format(bucket_name))
json_op = json.loads(cmd)
#print(json.dumps(json_op, indent = 4, sort_keys=True))
bucket_id = json_op['id']
num_shards = json_op['num_shards']
if len(json_op['usage']) > 0:
num_objects = json_op['usage']['rgw.main']['num_objects']
size_kb = json_op['usage']['rgw.main']['size_kb']
else:
num_objects = 0
size_kb = 0
log.debug(" \nBUCKET_STATS: \nbucket: {} id: {} num_objects: {} size_kb: {} num_shards: {}\n".format(bucket_name, bucket_id,
num_objects, size_kb, num_shards))
return BucketStats(bucket_name, bucket_id, num_objects, size_kb, num_shards)
def get_bucket_layout(bucket_name):
res = exec_cmd("radosgw-admin bucket layout --bucket {}".format(bucket_name))
return json.loads(res)
def get_bucket_shard0(bucket_name):
bucket_id = get_bucket_stats(bucket_name).bucket_id
index_gen = get_bucket_layout(bucket_name)['layout']['current_index']['gen']
return '.dir.%s.%d.0' % (bucket_id, index_gen)
def get_bucket_num_shards(bucket_name, bucket_id):
"""
function to get bucket num shards
"""
metadata = 'bucket.instance:' + bucket_name + ':' + bucket_id
cmd = exec_cmd('radosgw-admin metadata get {}'.format(metadata))
json_op = json.loads(cmd)
num_shards = json_op['data']['bucket_info']['num_shards']
return num_shards
def run_bucket_reshard_cmd(bucket_name, num_shards, **kwargs):
cmd = 'radosgw-admin bucket reshard --bucket {} --num-shards {}'.format(bucket_name, num_shards)
cmd += ' --rgw-reshard-bucket-lock-duration 30' # reduce to minimum
if 'error_at' in kwargs:
cmd += ' --inject-error-at {}'.format(kwargs.pop('error_at'))
elif 'abort_at' in kwargs:
cmd += ' --inject-abort-at {}'.format(kwargs.pop('abort_at'))
if 'error_code' in kwargs:
cmd += ' --inject-error-code {}'.format(kwargs.pop('error_code'))
return exec_cmd(cmd, **kwargs)
def test_bucket_reshard(conn, name, **fault):
# create a bucket with non-default ACLs to verify that reshard preserves them
bucket = conn.create_bucket(Bucket=name, ACL='authenticated-read')
grants = bucket.Acl().grants
objs = []
try:
# create objs
for i in range(0, 20):
objs += [bucket.put_object(Key='key' + str(i), Body=b"some_data")]
old_shard_count = get_bucket_stats(name).num_shards
num_shards_expected = old_shard_count + 1
# try reshard with fault injection
_, ret = run_bucket_reshard_cmd(name, num_shards_expected, check_retcode=False, **fault)
if fault.get('error_code') == errno.ECANCELED:
assert(ret == 0) # expect ECANCELED to retry and succeed
else:
assert(ret != 0 and ret != errno.EBUSY)
# check shard count
cur_shard_count = get_bucket_stats(name).num_shards
assert(cur_shard_count == old_shard_count)
# verify that the bucket is writeable by deleting an object
objs.pop().delete()
assert grants == bucket.Acl().grants # recheck grants after cancel
# retry reshard without fault injection. if radosgw-admin aborted,
# we'll have to retry until the reshard lock expires
while True:
_, ret = run_bucket_reshard_cmd(name, num_shards_expected, check_retcode=False)
if ret == errno.EBUSY:
log.info('waiting 30 seconds for reshard lock to expire...')
time.sleep(30)
continue
assert(ret == 0)
break
# recheck shard count
final_shard_count = get_bucket_stats(name).num_shards
assert(final_shard_count == num_shards_expected)
assert grants == bucket.Acl().grants # recheck grants after commit
finally:
# cleanup on resharded bucket must succeed
bucket.delete_objects(Delete={'Objects':[{'Key':o.key} for o in objs]})
bucket.delete()
def main():
"""
execute manual and dynamic resharding commands
"""
# create user
_, ret = exec_cmd('radosgw-admin user create --uid {} --display-name {} --access-key {} --secret {}'.format(USER, DISPLAY_NAME, ACCESS_KEY, SECRET_KEY), check_retcode=False)
assert(ret == 0 or errno.EEXIST)
def boto_connect(portnum, ssl, proto):
endpoint = proto + '://localhost:' + portnum
conn = boto3.resource('s3',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
use_ssl=ssl,
endpoint_url=endpoint,
verify=False,
config=None,
)
try:
list(conn.buckets.limit(1)) # just verify we can list buckets
except botocore.exceptions.ConnectionError as e:
print(e)
raise
print('connected to', endpoint)
return conn
try:
connection = boto_connect('80', False, 'http')
except botocore.exceptions.ConnectionError:
try: # retry on non-privileged http port
connection = boto_connect('8000', False, 'http')
except botocore.exceptions.ConnectionError:
# retry with ssl
connection = boto_connect('443', True, 'https')
# create a bucket
bucket = connection.create_bucket(Bucket=BUCKET_NAME)
ver_bucket = connection.create_bucket(Bucket=VER_BUCKET_NAME)
connection.BucketVersioning('ver_bucket')
bucket_acl = connection.BucketAcl(BUCKET_NAME).load()
ver_bucket_acl = connection.BucketAcl(VER_BUCKET_NAME).load()
# TESTCASE 'reshard-add','reshard','add','add bucket to resharding queue','succeeds'
log.debug('TEST: reshard add\n')
num_shards_expected = get_bucket_stats(BUCKET_NAME).num_shards + 1
cmd = exec_cmd('radosgw-admin reshard add --bucket {} --num-shards {}'.format(BUCKET_NAME, num_shards_expected))
cmd = exec_cmd('radosgw-admin reshard list')
json_op = json.loads(cmd)
log.debug('bucket name {}'.format(json_op[0]['bucket_name']))
assert json_op[0]['bucket_name'] == BUCKET_NAME
assert json_op[0]['tentative_new_num_shards'] == num_shards_expected
# TESTCASE 'reshard-process','reshard','','process bucket resharding','succeeds'
log.debug('TEST: reshard process\n')
cmd = exec_cmd('radosgw-admin reshard process')
time.sleep(5)
# check bucket shards num
bucket_stats1 = get_bucket_stats(BUCKET_NAME)
if bucket_stats1.num_shards != num_shards_expected:
log.error("Resharding failed on bucket {}. Expected number of shards are not created\n".format(BUCKET_NAME))
# TESTCASE 'reshard-add','reshard','add','add non empty bucket to resharding queue','succeeds'
log.debug('TEST: reshard add non empty bucket\n')
# create objs
num_objs = 8
for i in range(0, num_objs):
connection.Object(BUCKET_NAME, ('key'+str(i))).put(Body=b"some_data")
num_shards_expected = get_bucket_stats(BUCKET_NAME).num_shards + 1
cmd = exec_cmd('radosgw-admin reshard add --bucket {} --num-shards {}'.format(BUCKET_NAME, num_shards_expected))
cmd = exec_cmd('radosgw-admin reshard list')
json_op = json.loads(cmd)
assert json_op[0]['bucket_name'] == BUCKET_NAME
assert json_op[0]['tentative_new_num_shards'] == num_shards_expected
# TESTCASE 'reshard process ,'reshard','process','reshard non empty bucket','succeeds'
log.debug('TEST: reshard process non empty bucket\n')
cmd = exec_cmd('radosgw-admin reshard process')
# check bucket shards num
bucket_stats1 = get_bucket_stats(BUCKET_NAME)
if bucket_stats1.num_shards != num_shards_expected:
log.error("Resharding failed on bucket {}. Expected number of shards are not created\n".format(BUCKET_NAME))
# TESTCASE 'manual bucket resharding','inject error','fail','check bucket accessibility', 'retry reshard'
log.debug('TEST: reshard bucket with EIO injected at set_target_layout\n')
test_bucket_reshard(connection, 'error-at-set-target-layout', error_at='set_target_layout')
log.debug('TEST: reshard bucket with ECANCELED injected at set_target_layout\n')
test_bucket_reshard(connection, 'error-at-set-target-layout', error_at='set_target_layout', error_code=errno.ECANCELED)
log.debug('TEST: reshard bucket with abort at set_target_layout\n')
test_bucket_reshard(connection, 'abort-at-set-target-layout', abort_at='set_target_layout')
log.debug('TEST: reshard bucket with EIO injected at block_writes\n')
test_bucket_reshard(connection, 'error-at-block-writes', error_at='block_writes')
log.debug('TEST: reshard bucket with abort at block_writes\n')
test_bucket_reshard(connection, 'abort-at-block-writes', abort_at='block_writes')
log.debug('TEST: reshard bucket with EIO injected at commit_target_layout\n')
test_bucket_reshard(connection, 'error-at-commit-target-layout', error_at='commit_target_layout')
log.debug('TEST: reshard bucket with ECANCELED injected at commit_target_layout\n')
test_bucket_reshard(connection, 'error-at-commit-target-layout', error_at='commit_target_layout', error_code=errno.ECANCELED)
log.debug('TEST: reshard bucket with abort at commit_target_layout\n')
test_bucket_reshard(connection, 'abort-at-commit-target-layout', abort_at='commit_target_layout')
log.debug('TEST: reshard bucket with EIO injected at do_reshard\n')
test_bucket_reshard(connection, 'error-at-do-reshard', error_at='do_reshard')
log.debug('TEST: reshard bucket with abort at do_reshard\n')
test_bucket_reshard(connection, 'abort-at-do-reshard', abort_at='do_reshard')
# TESTCASE 'versioning reshard-','bucket', reshard','versioning reshard','succeeds'
log.debug(' test: reshard versioned bucket')
num_shards_expected = get_bucket_stats(VER_BUCKET_NAME).num_shards + 1
cmd = exec_cmd('radosgw-admin bucket reshard --bucket {} --num-shards {}'.format(VER_BUCKET_NAME,
num_shards_expected))
# check bucket shards num
ver_bucket_stats = get_bucket_stats(VER_BUCKET_NAME)
assert ver_bucket_stats.num_shards == num_shards_expected
# TESTCASE 'check acl'
new_bucket_acl = connection.BucketAcl(BUCKET_NAME).load()
assert new_bucket_acl == bucket_acl
new_ver_bucket_acl = connection.BucketAcl(VER_BUCKET_NAME).load()
assert new_ver_bucket_acl == ver_bucket_acl
# TESTCASE 'check reshard removes olh entries with empty name'
log.debug(' test: reshard removes olh entries with empty name')
bucket.objects.all().delete()
# get name of shard 0 object, add a bogus olh entry with empty name
bucket_shard0 = get_bucket_shard0(BUCKET_NAME)
if 'CEPH_ROOT' in os.environ:
k = '%s/qa/workunits/rgw/olh_noname_key' % os.environ['CEPH_ROOT']
v = '%s/qa/workunits/rgw/olh_noname_val' % os.environ['CEPH_ROOT']
else:
k = 'olh_noname_key'
v = 'olh_noname_val'
exec_cmd('rados -p %s setomapval %s --omap-key-file %s < %s' % (INDEX_POOL, bucket_shard0, k, v))
# check that bi list has one entry with empty name
cmd = exec_cmd('radosgw-admin bi list --bucket %s' % BUCKET_NAME)
json_op = json.loads(cmd.decode('utf-8', 'ignore')) # ignore utf-8 can't decode 0x80
assert len(json_op) == 1
assert json_op[0]['entry']['key']['name'] == ''
# reshard to prune the bogus olh
cmd = exec_cmd('radosgw-admin bucket reshard --bucket %s --num-shards %s --yes-i-really-mean-it' % (BUCKET_NAME, 1))
# get that bi list has zero entries
cmd = exec_cmd('radosgw-admin bi list --bucket %s' % BUCKET_NAME)
json_op = json.loads(cmd.decode('utf-8', 'ignore')) # ignore utf-8 can't decode 0x80
assert len(json_op) == 0
# Clean up
log.debug("Deleting bucket {}".format(BUCKET_NAME))
bucket.objects.all().delete()
bucket.delete()
log.debug("Deleting bucket {}".format(VER_BUCKET_NAME))
ver_bucket.delete()
main()
log.info("Completed resharding tests")
| 13,952 | 41.800613 | 177 |
py
|
null |
ceph-main/qa/workunits/rgw/test_rgw_s3_mp_reupload.py
|
import boto3
import botocore.exceptions
import sys
import os
import subprocess
#boto3.set_stream_logger(name='botocore')
# handles two optional system arguments:
# <bucket-name> : default is "bkt134"
# <0 or 1> : 0 -> upload aborted, 1 -> completed; default is completed
if len(sys.argv) >= 2:
bucket_name = sys.argv[1]
else:
bucket_name = "bkt314738362229"
print("bucket nams is %s" % bucket_name)
complete_mpu = True
if len(sys.argv) >= 3:
complete_mpu = int(sys.argv[2]) > 0
versioned_bucket = False
if len(sys.argv) >= 4:
versioned_bucket = int(sys.argv[3]) > 0
rgw_host = os.environ['RGW_HOST']
access_key = os.environ['RGW_ACCESS_KEY']
secret_key = os.environ['RGW_SECRET_KEY']
try:
endpoint='http://%s:%d' % (rgw_host, 80)
client = boto3.client('s3',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
res = client.create_bucket(Bucket=bucket_name)
except botocore.exceptions.EndpointConnectionError:
try:
endpoint='https://%s:%d' % (rgw_host, 443)
client = boto3.client('s3',
endpoint_url=endpoint,
verify=False,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
res = client.create_bucket(Bucket=bucket_name)
except botocore.exceptions.EndpointConnectionError:
endpoint='http://%s:%d' % (rgw_host, 8000)
client = boto3.client('s3',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
res = client.create_bucket(Bucket=bucket_name)
print("endpoint is %s" % endpoint)
if versioned_bucket:
res = client.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration={
'MFADelete': 'Disabled',
'Status': 'Enabled'}
)
key = "mpu_test4"
nparts = 2
ndups = 11
do_reupload = True
part_path = "/tmp/mp_part_5m"
subprocess.run(["dd", "if=/dev/urandom", "of=" + part_path, "bs=1M", "count=5"], check=True)
f = open(part_path, 'rb')
res = client.create_multipart_upload(Bucket=bucket_name, Key=key)
mpu_id = res["UploadId"]
print("start UploadId=%s" % (mpu_id))
parts = []
parts2 = []
for ix in range(0,nparts):
part_num = ix + 1
f.seek(0)
res = client.upload_part(Body=f, Bucket=bucket_name, Key=key,
UploadId=mpu_id, PartNumber=part_num)
# save
etag = res['ETag']
part = {'ETag': etag, 'PartNumber': part_num}
print("phase 1 uploaded part %s" % part)
parts.append(part)
if do_reupload:
# just re-upload part 1
part_num = 1
for ix in range(0,ndups):
f.seek(0)
res = client.upload_part(Body=f, Bucket=bucket_name, Key=key,
UploadId=mpu_id, PartNumber=part_num)
etag = res['ETag']
part = {'ETag': etag, 'PartNumber': part_num}
print ("phase 2 uploaded part %s" % part)
# save
etag = res['ETag']
part = {'ETag': etag, 'PartNumber': part_num}
parts2.append(part)
if complete_mpu:
print("completing multipart upload, parts=%s" % parts)
res = client.complete_multipart_upload(
Bucket=bucket_name, Key=key, UploadId=mpu_id,
MultipartUpload={'Parts': parts})
else:
print("aborting multipart upload, parts=%s" % parts)
res = client.abort_multipart_upload(
Bucket=bucket_name, Key=key, UploadId=mpu_id)
# clean up
subprocess.run(["rm", "-f", part_path], check=True)
| 3,705 | 29.377049 | 92 |
py
|
null |
ceph-main/qa/workunits/rgw/test_rgw_s3_mp_reupload.sh
|
#!/usr/bin/env bash
# INITIALIZATION
mydir=$(dirname $0)
data_pool=default.rgw.buckets.data
orphan_list_out=/tmp/orphan_list.$$
radoslist_out=/tmp/radoslist.$$
rados_ls_out=/tmp/rados_ls.$$
diff_out=/tmp/diff.$$
rgw_host="$(hostname --fqdn)"
echo "INFO: fully qualified domain name: $rgw_host"
export RGW_ACCESS_KEY="0555b35654ad1656d804"
export RGW_SECRET_KEY="h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
export RGW_HOST="${RGW_HOST:-$rgw_host}"
# random argument determines if multipart is aborted or completed 50/50
outcome=$((RANDOM % 2))
if [ $outcome -eq 0 ] ;then
echo "== TESTING *ABORTING* MULTIPART UPLOAD WITH RE-UPLOADS =="
else
echo "== TESTING *COMPLETING* MULTIPART UPLOAD WITH RE-UPLOADS =="
fi
# random argument determines if multipart is aborted or completed 50/50
versioning=$((RANDOM % 2))
if [ $versioning -eq 0 ] ;then
echo "== TESTING NON-VERSIONED BUCKET =="
else
echo "== TESTING VERSIONED BUCKET =="
fi
# create a randomized bucket name
bucket="reupload-bkt-$((RANDOM % 899999 + 100000))"
# SET UP PYTHON VIRTUAL ENVIRONMENT
# install boto3
python3 -m venv $mydir
source $mydir/bin/activate
pip install pip --upgrade
pip install boto3
# CREATE RGW USER IF NECESSARY
if radosgw-admin user info --access-key $RGW_ACCESS_KEY 2>/dev/null ;then
echo INFO: user already exists
else
echo INFO: creating user
radosgw-admin user create --uid testid \
--access-key $RGW_ACCESS_KEY \
--secret $RGW_SECRET_KEY \
--display-name 'M. Tester' \
--email [email protected] 2>/dev/null
fi
# RUN REUPLOAD TEST
$mydir/bin/python3 ${mydir}/test_rgw_s3_mp_reupload.py $bucket $outcome $versioning
# ANALYZE FOR ERRORS
# (NOTE: for now we're choosing not to use the rgw-orphan-list tool)
# force garbage collection to remove extra parts
radosgw-admin gc process --include-all 2>/dev/null
marker=$(radosgw-admin metadata get bucket:$bucket 2>/dev/null | grep bucket_id | sed 's/.*: "\(.*\)".*/\1/')
# determine expected rados objects
radosgw-admin bucket radoslist --bucket=$bucket 2>/dev/null | sort >$radoslist_out
echo "radosgw-admin bucket radoslist:"
cat $radoslist_out
# determine found rados objects
rados ls -p $data_pool 2>/dev/null | grep "^$marker" | sort >$rados_ls_out
echo "rados ls:"
cat $rados_ls_out
# compare expected and found
diff $radoslist_out $rados_ls_out >$diff_out
if [ $(cat $diff_out | wc -l) -ne 0 ] ;then
error=1
echo "ERROR: Found differences between expected and actual rados objects for test bucket."
echo " note: indicators: '>' found but not expected; '<' expected but not found."
cat $diff_out
fi
# CLEAN UP
deactivate
rm -f $orphan_list_out $radoslist_out $rados_ls_out $diff_out
# PRODUCE FINAL RESULTS
if [ -n "$error" ] ;then
echo "== FAILED =="
exit 1
fi
echo "== PASSED =="
exit 0
| 2,838 | 24.576577 | 109 |
sh
|
null |
ceph-main/qa/workunits/suites/blogbench.sh
|
#!/usr/bin/env bash
set -ex
echo "getting blogbench"
wget http://download.ceph.com/qa/blogbench-1.0.tar.bz2
#cp /home/gregf/src/blogbench-1.0.tar.bz2 .
tar -xvf blogbench-1.0.tar.bz2
cd blogbench-1.0/
echo "making blogbench"
./configure
make
cd src
mkdir blogtest_in
echo "running blogbench"
./blogbench -d blogtest_in
| 320 | 19.0625 | 54 |
sh
|
null |
ceph-main/qa/workunits/suites/bonnie.sh
|
#!/usr/bin/env bash
set -ex
bonnie_bin=`which bonnie++`
[ $? -eq 1 ] && bonnie_bin=/usr/sbin/bonnie++
uid_flags=""
[ "`id -u`" == "0" ] && uid_flags="-u root"
$bonnie_bin $uid_flags -n 100
| 193 | 15.166667 | 45 |
sh
|
null |
ceph-main/qa/workunits/suites/cephfs_journal_tool_smoke.sh
|
#!/usr/bin/env bash
set -ex
export BIN="${BIN:-cephfs-journal-tool --rank=cephfs:0}"
export JOURNAL_FILE=/tmp/journal.bin
export JSON_OUTPUT=/tmp/json.tmp
export BINARY_OUTPUT=/tmp/binary.tmp
if [ -d $BINARY_OUTPUT ] ; then
rm -rf $BINARY_OUTPUT
fi
# Check that the import/export stuff really works as expected
# first because it's used as the reset method between
# following checks.
echo "Testing that export/import cycle preserves state"
HEADER_STATE=`$BIN header get`
EVENT_LIST=`$BIN event get list`
$BIN journal export $JOURNAL_FILE
$BIN journal import $JOURNAL_FILE
NEW_HEADER_STATE=`$BIN header get`
NEW_EVENT_LIST=`$BIN event get list`
if [ ! "$HEADER_STATE" = "$NEW_HEADER_STATE" ] ; then
echo "Import failed to preserve header state"
echo $HEADER_STATE
echo $NEW_HEADER_STATE
exit -1
fi
if [ ! "$EVENT_LIST" = "$NEW_EVENT_LIST" ] ; then
echo "Import failed to preserve event state"
echo $EVENT_LIST
echo $NEW_EVENT_LIST
exit -1
fi
echo "Testing 'journal' commands..."
# Simplest thing: print the vital statistics of the journal
$BIN journal inspect
$BIN header get
# Make a copy of the journal in its original state
$BIN journal export $JOURNAL_FILE
if [ ! -s $JOURNAL_FILE ] ; then
echo "Export to $JOURNAL_FILE failed"
exit -1
fi
# Can we execute a journal reset?
$BIN journal reset
$BIN journal inspect
$BIN header get
echo "Rolling back journal to original state..."
$BIN journal import $JOURNAL_FILE
echo "Testing 'header' commands..."
$BIN header get
$BIN header set write_pos 123
$BIN header set expire_pos 123
$BIN header set trimmed_pos 123
echo "Rolling back journal to original state..."
$BIN journal import $JOURNAL_FILE
echo "Testing 'event' commands..."
$BIN event get summary
$BIN event get --type=UPDATE --path=/ --inode=0 --frag=0x100 summary
$BIN event get json --path $JSON_OUTPUT
if [ ! -s $JSON_OUTPUT ] ; then
echo "Export to $JSON_OUTPUT failed"
exit -1
fi
$BIN event get binary --path $BINARY_OUTPUT
if [ ! -s $BINARY_OUTPUT ] ; then
echo "Export to $BINARY_OUTPUT failed"
exit -1
fi
$BIN event recover_dentries summary
$BIN event splice summary
# Tests finish.
# Metadata objects have been modified by the 'event recover_dentries' command.
# Journal is no long consistent with respect to metadata objects (especially inotable).
# To ensure mds successfully replays its journal, we need to do journal reset.
$BIN journal reset
cephfs-table-tool all reset session
| 2,474 | 25.902174 | 87 |
sh
|
null |
ceph-main/qa/workunits/suites/dbench-short.sh
|
#!/usr/bin/env bash
set -ex
dbench 1
| 39 | 5.666667 | 19 |
sh
|
null |
ceph-main/qa/workunits/suites/dbench.sh
|
#!/usr/bin/env bash
set -ex
dbench 1
dbench 10
| 49 | 6.142857 | 19 |
sh
|
null |
ceph-main/qa/workunits/suites/ffsb.sh
|
#!/usr/bin/env bash
set -ex
mydir=`dirname $0`
# try it again if the clone is slow and the second time
trap -- 'retry' EXIT
retry() {
rm -rf ffsb
# double the timeout value
timeout 3600 git clone https://git.ceph.com/ffsb.git --depth 1
}
rm -rf ffsb
timeout 1800 git clone https://git.ceph.com/ffsb.git --depth 1
trap - EXIT
cd ffsb
./configure
make
cd ..
mkdir tmp
cd tmp
for f in $mydir/*.ffsb
do
../ffsb/ffsb $f
done
cd ..
rm -r tmp ffsb*
| 464 | 13.53125 | 66 |
sh
|
null |
ceph-main/qa/workunits/suites/fio.sh
|
#!/usr/bin/env bash
set -x
gen_fio_file() {
iter=$1
f=$2
cat > randio-$$-${iter}.fio <<EOF
[randio]
blocksize_range=32m:128m
blocksize_unaligned=1
filesize=10G:20G
readwrite=randrw
runtime=300
size=20G
filename=${f}
EOF
}
sudo apt-get -y install fio
for i in $(seq 1 20); do
fcount=$(ls donetestfile* 2>/dev/null | wc -l)
donef="foo"
fiof="bar"
if test ${fcount} -gt 0; then
# choose random file
r=$[ ${RANDOM} % ${fcount} ]
testfiles=( $(ls donetestfile*) )
donef=${testfiles[${r}]}
fiof=$(echo ${donef} | sed -e "s|done|fio|")
gen_fio_file $i ${fiof}
else
fiof=fiotestfile.$$.$i
donef=donetestfile.$$.$i
gen_fio_file $i ${fiof}
fi
sudo rm -f ${donef}
sudo fio randio-$$-$i.fio
sudo ln ${fiof} ${donef}
ls -la
done
| 791 | 17.418605 | 49 |
sh
|
null |
ceph-main/qa/workunits/suites/fsstress.sh
|
#!/bin/bash
set -ex
mkdir -p fsstress
pushd fsstress
wget -q -O ltp-full.tgz http://download.ceph.com/qa/ltp-full-20091231.tgz
tar xzf ltp-full.tgz
pushd ltp-full-20091231/testcases/kernel/fs/fsstress
make
BIN=$(readlink -f fsstress)
popd
popd
T=$(mktemp -d -p .)
"$BIN" -d "$T" -l 1 -n 1000 -p 10 -v
rm -rf -- "$T"
| 319 | 16.777778 | 73 |
sh
|
null |
ceph-main/qa/workunits/suites/fsx.sh
|
#!/bin/sh -x
set -e
git clone https://git.ceph.com/xfstests-dev.git
cd xfstests-dev
git checkout 12973fc04fd10d4af086901e10ffa8e48866b735
make -j4
cd ..
cp xfstests-dev/ltp/fsx .
OPTIONS="-z" # don't use zero range calls; not supported by cephfs
./fsx $OPTIONS 1MB -N 50000 -p 10000 -l 1048576
./fsx $OPTIONS 10MB -N 50000 -p 10000 -l 10485760
./fsx $OPTIONS 100MB -N 50000 -p 10000 -l 104857600
| 403 | 22.764706 | 67 |
sh
|
null |
ceph-main/qa/workunits/suites/fsync-tester.sh
|
#!/bin/sh
set -ex
# To skirt around GPL compatibility issues:
wget http://download.ceph.com/qa/fsync-tester.c
gcc -D_GNU_SOURCE fsync-tester.c -o fsync-tester
./fsync-tester
echo $PATH
whereis lsof
lsof
| 207 | 13.857143 | 48 |
sh
|
null |
ceph-main/qa/workunits/suites/iogen.sh
|
#!/usr/bin/env bash
set -ex
echo "getting iogen"
wget http://download.ceph.com/qa/iogen_3.1p0.tar
tar -xvzf iogen_3.1p0.tar
cd iogen_3.1p0
echo "making iogen"
make
echo "running iogen"
./iogen -n 5 -s 2g
echo "sleep for 10 min"
sleep 600
echo "stopping iogen"
./iogen -k
echo "OK"
| 283 | 14.777778 | 48 |
sh
|
null |
ceph-main/qa/workunits/suites/iozone-sync.sh
|
#!/usr/bin/env bash
set -ex
# basic tests of O_SYNC, O_DSYNC, O_RSYNC
# test O_SYNC
iozone -c -e -s 512M -r 1M -t 1 -F osync1 -i 0 -i 1 -o
# test O_DSYNC
iozone -c -e -s 512M -r 1M -t 1 -F odsync1 -i 0 -i 1 -+D
# test O_RSYNC
iozone -c -e -s 512M -r 1M -t 1 -F orsync1 -i 0 -i 1 -+r
# test same file with O_SYNC in one process, buffered in the other
# the sync test starts first, so the buffered test should blow
# past it and
iozone -c -e -s 512M -r 1M -t 1 -F osync2 -i 0 -i 1 -o &
sleep 1
iozone -c -e -s 512M -r 256K -t 1 -F osync2 -i 0
wait $!
# test same file with O_SYNC from different threads
iozone -c -e -s 512M -r 1M -t 2 -F osync3 -i 2 -o
| 656 | 27.565217 | 66 |
sh
|
null |
ceph-main/qa/workunits/suites/iozone.sh
|
#!/usr/bin/env bash
set -ex
iozone -c -e -s 1024M -r 16K -t 1 -F f1 -i 0 -i 1
iozone -c -e -s 1024M -r 1M -t 1 -F f2 -i 0 -i 1
iozone -c -e -s 10240M -r 1M -t 1 -F f3 -i 0 -i 1
| 179 | 21.5 | 49 |
sh
|
null |
ceph-main/qa/workunits/suites/pjd.sh
|
#!/usr/bin/env bash
set -ex
wget http://download.ceph.com/qa/pjd-fstest-20090130-RC-aclfixes.tgz
tar zxvf pjd*.tgz
cd pjd-fstest-20090130-RC
make clean
make
cd ..
mkdir tmp
cd tmp
# must be root!
sudo prove -r -v --exec 'bash -x' ../pjd*/tests
cd ..
rm -rf tmp pjd*
| 269 | 14 | 68 |
sh
|
null |
ceph-main/qa/workunits/windows/run-tests.sh
|
#!/usr/bin/env bash
set -ex
DIR="$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)"
source ${DIR}/libvirt_vm/build_utils.sh
source ${DIR}/libvirt_vm/connection_info.sh
# Run the Windows tests
scp_upload ${DIR} /windows-workunits
SSH_TIMEOUT=30m ssh_exec powershell.exe -File /windows-workunits/run-tests.ps1
| 305 | 24.5 | 78 |
sh
|
null |
ceph-main/qa/workunits/windows/test_rbd_wnbd.py
|
import argparse
import collections
import functools
import json
import logging
import math
import os
import prettytable
import random
import subprocess
import time
import threading
import typing
import uuid
from concurrent import futures
LOG = logging.getLogger()
parser = argparse.ArgumentParser(description='rbd-wnbd tests')
parser.add_argument('--test-name',
help='The test to be run.',
default="RbdFioTest")
parser.add_argument('--iterations',
help='Total number of test iterations',
default=1, type=int)
parser.add_argument('--concurrency',
help='The number of tests to run in parallel',
default=4, type=int)
parser.add_argument('--fio-iterations',
help='Total number of benchmark iterations per disk.',
default=1, type=int)
parser.add_argument('--fio-workers',
help='Total number of fio workers per disk.',
default=1, type=int)
parser.add_argument('--fio-depth',
help='The number of concurrent asynchronous operations '
'executed per disk',
default=64, type=int)
parser.add_argument('--fio-verify',
help='The mechanism used to validate the written '
'data. Examples: crc32c, md5, sha1, null, etc. '
'If set to null, the written data will not be '
'verified.',
default='crc32c')
parser.add_argument('--bs',
help='Benchmark block size.',
default="2M")
parser.add_argument('--op',
help='Benchmark operation. '
'Examples: read, randwrite, rw, etc.',
default="rw")
parser.add_argument('--image-prefix',
help='The image name prefix.',
default="cephTest-")
parser.add_argument('--image-size-mb',
help='The image size in megabytes.',
default=1024, type=int)
parser.add_argument('--map-timeout',
help='Image map timeout.',
default=60, type=int)
parser.add_argument('--skip-enabling-disk', action='store_true',
help='If set, the disk will not be turned online and the '
'read-only flag will not be removed. Useful when '
'the SAN policy is set to "onlineAll".')
parser.add_argument('--verbose', action='store_true',
help='Print info messages.')
parser.add_argument('--debug', action='store_true',
help='Print debug messages.')
parser.add_argument('--stop-on-error', action='store_true',
help='Stop testing when hitting errors.')
parser.add_argument('--skip-cleanup-on-error', action='store_true',
help='Skip cleanup when hitting errors.')
class CephTestException(Exception):
msg_fmt = "An exception has been encountered."
def __init__(self, message: str = None, **kwargs):
self.kwargs = kwargs
if not message:
message = self.msg_fmt % kwargs
self.message = message
super(CephTestException, self).__init__(message)
class CommandFailed(CephTestException):
msg_fmt = (
"Command failed: %(command)s. "
"Return code: %(returncode)s. "
"Stdout: %(stdout)s. Stderr: %(stderr)s.")
class CephTestTimeout(CephTestException):
msg_fmt = "Operation timeout."
def setup_logging(log_level: int = logging.INFO):
handler = logging.StreamHandler()
handler.setLevel(log_level)
log_fmt = '[%(asctime)s] %(levelname)s - %(message)s'
formatter = logging.Formatter(log_fmt)
handler.setFormatter(formatter)
LOG.addHandler(handler)
LOG.setLevel(logging.DEBUG)
def retry_decorator(timeout: int = 60,
retry_interval: int = 2,
silent_interval: int = 10,
additional_details: str = "",
retried_exceptions:
typing.Union[
typing.Type[Exception],
collections.abc.Iterable[
typing.Type[Exception]]] = Exception):
def wrapper(f: typing.Callable[..., typing.Any]):
@functools.wraps(f)
def inner(*args, **kwargs):
tstart: float = time.time()
elapsed: float = 0
exc = None
details = additional_details or "%s failed" % f.__qualname__
while elapsed < timeout or not timeout:
try:
return f(*args, **kwargs)
except retried_exceptions as ex:
exc = ex
elapsed = time.time() - tstart
if elapsed > silent_interval:
level = logging.WARNING
else:
level = logging.DEBUG
LOG.log(level,
"Exception: %s. Additional details: %s. "
"Time elapsed: %d. Timeout: %d",
ex, details, elapsed, timeout)
time.sleep(retry_interval)
elapsed = time.time() - tstart
msg = (
"Operation timed out. Exception: %s. Additional details: %s. "
"Time elapsed: %d. Timeout: %d.")
raise CephTestTimeout(
msg % (exc, details, elapsed, timeout))
return inner
return wrapper
def execute(*args, **kwargs):
LOG.debug("Executing: %s", args)
result = subprocess.run(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
LOG.debug("Command %s returned %d.", args, result.returncode)
if result.returncode:
exc = CommandFailed(
command=args, returncode=result.returncode,
stdout=result.stdout, stderr=result.stderr)
LOG.error(exc)
raise exc
return result
def ps_execute(*args, **kwargs):
# Disable PS progress bar, causes issues when invoked remotely.
prefix = "$global:ProgressPreference = 'SilentlyContinue' ; "
return execute(
"powershell.exe", "-NonInteractive",
"-Command", prefix, *args, **kwargs)
def array_stats(array: list):
mean = sum(array) / len(array) if len(array) else 0
variance = (sum((i - mean) ** 2 for i in array) / len(array)
if len(array) else 0)
std_dev = math.sqrt(variance)
sorted_array = sorted(array)
return {
'min': min(array) if len(array) else 0,
'max': max(array) if len(array) else 0,
'sum': sum(array) if len(array) else 0,
'mean': mean,
'median': sorted_array[len(array) // 2] if len(array) else 0,
'max_90': sorted_array[int(len(array) * 0.9)] if len(array) else 0,
'min_90': sorted_array[int(len(array) * 0.1)] if len(array) else 0,
'variance': variance,
'std_dev': std_dev,
'count': len(array)
}
class Tracer:
data: collections.OrderedDict = collections.OrderedDict()
lock = threading.Lock()
@classmethod
def trace(cls, func):
def wrapper(*args, **kwargs):
tstart = time.time()
exc_str = None
# Preserve call order
with cls.lock:
if func.__qualname__ not in cls.data:
cls.data[func.__qualname__] = list()
try:
return func(*args, **kwargs)
except Exception as exc:
exc_str = str(exc)
raise
finally:
tend = time.time()
with cls.lock:
cls.data[func.__qualname__] += [{
"duration": tend - tstart,
"error": exc_str,
}]
return wrapper
@classmethod
def get_results(cls):
stats = collections.OrderedDict()
for f in cls.data.keys():
stats[f] = array_stats([i['duration'] for i in cls.data[f]])
errors = []
for i in cls.data[f]:
if i['error']:
errors.append(i['error'])
stats[f]['errors'] = errors
return stats
@classmethod
def print_results(cls):
r = cls.get_results()
table = prettytable.PrettyTable(title="Duration (s)")
table.field_names = [
"function", "min", "max", "total",
"mean", "median", "std_dev",
"max 90%", "min 90%", "count", "errors"]
table.float_format = ".4"
for f, s in r.items():
table.add_row([f, s['min'], s['max'], s['sum'],
s['mean'], s['median'], s['std_dev'],
s['max_90'], s['min_90'],
s['count'], len(s['errors'])])
print(table)
class RbdImage(object):
def __init__(self,
name: str,
size_mb: int,
is_shared: bool = True,
disk_number: int = -1,
mapped: bool = False):
self.name = name
self.size_mb = size_mb
self.is_shared = is_shared
self.disk_number = disk_number
self.mapped = mapped
self.removed = False
self.drive_letter = ""
@classmethod
@Tracer.trace
def create(cls,
name: str,
size_mb: int = 1024,
is_shared: bool = True):
LOG.info("Creating image: %s. Size: %s.", name, "%sM" % size_mb)
cmd = ["rbd", "create", name, "--size", "%sM" % size_mb]
if is_shared:
cmd += ["--image-shared"]
execute(*cmd)
return RbdImage(name, size_mb, is_shared)
@Tracer.trace
def get_disk_number(self,
timeout: int = 60,
retry_interval: int = 2):
@retry_decorator(
retried_exceptions=CephTestException,
timeout=timeout,
retry_interval=retry_interval)
def _get_disk_number():
LOG.info("Retrieving disk number: %s", self.name)
result = execute("rbd-wnbd", "show", self.name, "--format=json")
disk_info = json.loads(result.stdout)
disk_number = disk_info["disk_number"]
if disk_number > 0:
LOG.debug("Image %s disk number: %d", self.name, disk_number)
return disk_number
raise CephTestException(
f"Could not get disk number: {self.name}.")
return _get_disk_number()
@Tracer.trace
def _wait_for_disk(self,
timeout: int = 60,
retry_interval: int = 2):
@retry_decorator(
retried_exceptions=(FileNotFoundError, OSError),
additional_details="the mapped disk isn't available yet",
timeout=timeout,
retry_interval=retry_interval)
def wait_for_disk():
LOG.debug("Waiting for disk to be accessible: %s %s",
self.name, self.path)
with open(self.path, 'rb'):
pass
return wait_for_disk()
@property
def path(self):
return f"\\\\.\\PhysicalDrive{self.disk_number}"
@Tracer.trace
@retry_decorator(additional_details="couldn't clear disk read-only flag")
def set_writable(self):
ps_execute(
"Set-Disk", "-Number", str(self.disk_number),
"-IsReadOnly", "$false")
@Tracer.trace
@retry_decorator(additional_details="couldn't bring the disk online")
def set_online(self):
ps_execute(
"Set-Disk", "-Number", str(self.disk_number),
"-IsOffline", "$false")
@Tracer.trace
def map(self, timeout: int = 60):
LOG.info("Mapping image: %s", self.name)
tstart = time.time()
execute("rbd-wnbd", "map", self.name)
self.mapped = True
self.disk_number = self.get_disk_number(timeout=timeout)
elapsed = time.time() - tstart
self._wait_for_disk(timeout=timeout - elapsed)
@Tracer.trace
def unmap(self):
if self.mapped:
LOG.info("Unmapping image: %s", self.name)
execute("rbd-wnbd", "unmap", self.name)
self.mapped = False
@Tracer.trace
def remove(self):
if not self.removed:
LOG.info("Removing image: %s", self.name)
execute("rbd", "rm", self.name)
self.removed = True
def cleanup(self):
try:
self.unmap()
finally:
self.remove()
@Tracer.trace
@retry_decorator()
def _init_disk(self):
cmd = f"Get-Disk -Number {self.disk_number} | Initialize-Disk"
ps_execute(cmd)
@Tracer.trace
@retry_decorator()
def _create_partition(self):
cmd = (f"Get-Disk -Number {self.disk_number} | "
"New-Partition -AssignDriveLetter -UseMaximumSize")
ps_execute(cmd)
@Tracer.trace
@retry_decorator()
def _format_volume(self):
cmd = (
f"(Get-Partition -DiskNumber {self.disk_number}"
" | ? { $_.DriveLetter }) | Format-Volume -Force -Confirm:$false")
ps_execute(cmd)
@Tracer.trace
@retry_decorator()
def _get_drive_letter(self):
cmd = (f"(Get-Partition -DiskNumber {self.disk_number}"
" | ? { $_.DriveLetter }).DriveLetter")
result = ps_execute(cmd)
# The PowerShell command will place a null character if no drive letter
# is available. For example, we can receive "\x00\r\n".
self.drive_letter = result.stdout.decode().strip()
if not self.drive_letter.isalpha() or len(self.drive_letter) != 1:
raise CephTestException(
"Invalid drive letter received: %s" % self.drive_letter)
@Tracer.trace
def init_fs(self):
if not self.mapped:
raise CephTestException("Unable to create fs, image not mapped.")
LOG.info("Initializing fs, image: %s.", self.name)
self._init_disk()
self._create_partition()
self._format_volume()
self._get_drive_letter()
@Tracer.trace
def get_fs_capacity(self):
if not self.drive_letter:
raise CephTestException("No drive letter available")
cmd = f"(Get-Volume -DriveLetter {self.drive_letter}).Size"
result = ps_execute(cmd)
return int(result.stdout.decode().strip())
@Tracer.trace
def resize(self, new_size_mb, allow_shrink=False):
LOG.info(
"Resizing image: %s. New size: %s MB, old size: %s MB",
self.name, new_size_mb, self.size_mb)
cmd = ["rbd", "resize", self.name,
"--size", f"{new_size_mb}M", "--no-progress"]
if allow_shrink:
cmd.append("--allow-shrink")
execute(*cmd)
self.size_mb = new_size_mb
@Tracer.trace
def get_disk_size(self):
"""Retrieve the virtual disk size (bytes) reported by Windows."""
cmd = f"(Get-Disk -Number {self.disk_number}).Size"
result = ps_execute(cmd)
disk_size = result.stdout.decode().strip()
if not disk_size.isdigit():
raise CephTestException(
"Invalid disk size received: %s" % disk_size)
return int(disk_size)
@Tracer.trace
@retry_decorator(timeout=30)
def wait_for_disk_resize(self):
# After resizing the rbd image, the daemon is expected to receive
# the notification, inform the WNBD driver and then trigger a disk
# rescan (IOCTL_DISK_UPDATE_PROPERTIES). This might take a few seconds,
# so we'll need to do some polling.
disk_size = self.get_disk_size()
disk_size_mb = disk_size // (1 << 20)
if disk_size_mb != self.size_mb:
raise CephTestException(
"The disk size hasn't been updated yet. Retrieved size: "
f"{disk_size_mb}MB. Expected size: {self.size_mb}MB.")
class RbdTest(object):
image: RbdImage
requires_disk_online = False
requires_disk_write = False
def __init__(self,
image_prefix: str = "cephTest-",
image_size_mb: int = 1024,
map_timeout: int = 60,
**kwargs):
self.image_size_mb = image_size_mb
self.image_name = image_prefix + str(uuid.uuid4())
self.map_timeout = map_timeout
self.skip_enabling_disk = kwargs.get("skip_enabling_disk")
@Tracer.trace
def initialize(self):
self.image = RbdImage.create(
self.image_name,
self.image_size_mb)
self.image.map(timeout=self.map_timeout)
if not self.skip_enabling_disk:
if self.requires_disk_write:
self.image.set_writable()
if self.requires_disk_online:
self.image.set_online()
def run(self):
pass
def cleanup(self):
if self.image:
self.image.cleanup()
@classmethod
def print_results(cls,
title: str = "Test results",
description: str = None):
pass
class RbdFsTestMixin(object):
# Windows disks must be turned online before accessing partitions.
requires_disk_online = True
requires_disk_write = True
@Tracer.trace
def initialize(self):
super(RbdFsTestMixin, self).initialize()
self.image.init_fs()
def get_subpath(self, *args):
drive_path = f"{self.image.drive_letter}:\\"
return os.path.join(drive_path, *args)
class RbdFsTest(RbdFsTestMixin, RbdTest):
pass
class RbdFioTest(RbdTest):
data: typing.DefaultDict[str, typing.List[typing.Dict[str, str]]] = (
collections.defaultdict(list))
lock = threading.Lock()
def __init__(self,
*args,
fio_size_mb: int = None,
iterations: int = 1,
workers: int = 1,
bs: str = "2M",
iodepth: int = 64,
op: str = "rw",
verify: str = "crc32c",
**kwargs):
super(RbdFioTest, self).__init__(*args, **kwargs)
self.fio_size_mb = fio_size_mb or self.image_size_mb
self.iterations = iterations
self.workers = workers
self.bs = bs
self.iodepth = iodepth
self.op = op
if op not in ("read", "randread"):
self.requires_disk_write = True
self.verify = verify
def process_result(self, raw_fio_output: str):
result = json.loads(raw_fio_output)
with self.lock:
for job in result["jobs"]:
# Fio doesn't support trim on Windows
for op in ['read', 'write']:
if op in job:
self.data[op].append({
'error': job['error'],
'io_bytes': job[op]['io_bytes'],
'bw_bytes': job[op]['bw_bytes'],
'runtime': job[op]['runtime'] / 1000, # seconds
'total_ios': job[op]['short_ios'],
'short_ios': job[op]['short_ios'],
'dropped_ios': job[op]['short_ios'],
'clat_ns_min': job[op]['clat_ns']['min'],
'clat_ns_max': job[op]['clat_ns']['max'],
'clat_ns_mean': job[op]['clat_ns']['mean'],
'clat_ns_stddev': job[op]['clat_ns']['stddev'],
'clat_ns_10': job[op].get('clat_ns', {})
.get('percentile', {})
.get('10.000000', 0),
'clat_ns_90': job[op].get('clat_ns', {})
.get('percentile', {})
.get('90.000000', 0)
})
def _get_fio_path(self):
return self.image.path
@Tracer.trace
def _run_fio(self, fio_size_mb=None):
LOG.info("Starting FIO test.")
cmd = [
"fio", "--thread", "--output-format=json",
"--randrepeat=%d" % self.iterations,
"--direct=1", "--name=test",
"--bs=%s" % self.bs, "--iodepth=%s" % self.iodepth,
"--size=%sM" % (fio_size_mb or self.fio_size_mb),
"--readwrite=%s" % self.op,
"--numjobs=%s" % self.workers,
"--filename=%s" % self._get_fio_path(),
]
if self.verify:
cmd += ["--verify=%s" % self.verify]
result = execute(*cmd)
LOG.info("Completed FIO test.")
self.process_result(result.stdout)
@Tracer.trace
def run(self):
self._run_fio()
@classmethod
def print_results(cls,
title: str = "Benchmark results",
description: str = None):
if description:
title = "%s (%s)" % (title, description)
for op in cls.data.keys():
op_title = "%s op=%s" % (title, op)
table = prettytable.PrettyTable(title=op_title)
table.field_names = ["stat", "min", "max", "mean",
"median", "std_dev",
"max 90%", "min 90%", "total"]
table.float_format = ".4"
op_data = cls.data[op]
s = array_stats([float(i["bw_bytes"]) / 1000_000 for i in op_data])
table.add_row(["bandwidth (MB/s)",
s['min'], s['max'], s['mean'],
s['median'], s['std_dev'],
s['max_90'], s['min_90'], 'N/A'])
s = array_stats([float(i["runtime"]) for i in op_data])
table.add_row(["duration (s)",
s['min'], s['max'], s['mean'],
s['median'], s['std_dev'],
s['max_90'], s['min_90'], s['sum']])
s = array_stats([i["error"] for i in op_data])
table.add_row(["errors",
s['min'], s['max'], s['mean'],
s['median'], s['std_dev'],
s['max_90'], s['min_90'], s['sum']])
s = array_stats([i["short_ios"] for i in op_data])
table.add_row(["incomplete IOs",
s['min'], s['max'], s['mean'],
s['median'], s['std_dev'],
s['max_90'], s['min_90'], s['sum']])
s = array_stats([i["dropped_ios"] for i in op_data])
table.add_row(["dropped IOs",
s['min'], s['max'], s['mean'],
s['median'], s['std_dev'],
s['max_90'], s['min_90'], s['sum']])
clat_min = array_stats([i["clat_ns_min"] for i in op_data])
clat_max = array_stats([i["clat_ns_max"] for i in op_data])
clat_mean = array_stats([i["clat_ns_mean"] for i in op_data])
clat_stddev = math.sqrt(
sum([float(i["clat_ns_stddev"]) ** 2 for i in op_data]) / len(op_data)
if len(op_data) else 0)
clat_10 = array_stats([i["clat_ns_10"] for i in op_data])
clat_90 = array_stats([i["clat_ns_90"] for i in op_data])
# For convenience, we'll convert it from ns to seconds.
table.add_row(["completion latency (s)",
clat_min['min'] / 1e+9,
clat_max['max'] / 1e+9,
clat_mean['mean'] / 1e+9,
clat_mean['median'] / 1e+9,
clat_stddev / 1e+9,
clat_10['mean'] / 1e+9,
clat_90['mean'] / 1e+9,
clat_mean['sum'] / 1e+9])
print(table)
class RbdResizeFioTest(RbdFioTest):
"""Image resize test.
This test extends and then shrinks the image, performing FIO tests to
validate the resized image.
"""
@Tracer.trace
def run(self):
self.image.resize(self.image_size_mb * 2)
self.image.wait_for_disk_resize()
self._run_fio(fio_size_mb=self.image_size_mb * 2)
self.image.resize(self.image_size_mb // 2, allow_shrink=True)
self.image.wait_for_disk_resize()
self._run_fio(fio_size_mb=self.image_size_mb // 2)
# Just like rbd-nbd, rbd-wnbd is masking out-of-bounds errors.
# For this reason, we don't have a negative test that writes
# passed the disk boundary.
class RbdFsFioTest(RbdFsTestMixin, RbdFioTest):
def initialize(self):
super(RbdFsFioTest, self).initialize()
if not self.fio_size_mb or self.fio_size_mb == self.image_size_mb:
# Out of caution, we'll use up to 80% of the FS by default
self.fio_size_mb = int(
self.image.get_fs_capacity() * 0.8 / (1024 * 1024))
@staticmethod
def _fio_escape_path(path):
# FIO allows specifying multiple files separated by colon.
# This means that ":" has to be escaped, so
# F:\filename becomes F\:\filename.
return path.replace(":", "\\:")
def _get_fio_path(self):
return self._fio_escape_path(self.get_subpath("test-fio"))
class RbdStampTest(RbdTest):
requires_disk_write = True
_write_open_mode = "rb+"
_read_open_mode = "rb"
_expect_path_exists = True
@staticmethod
def _rand_float(min_val: float, max_val: float):
return min_val + (random.random() * max_val - min_val)
def _get_stamp(self):
buff = self.image_name.encode()
padding = 512 - len(buff)
buff += b'\0' * padding
return buff
def _get_stamp_path(self):
return self.image.path
@Tracer.trace
def _write_stamp(self):
with open(self._get_stamp_path(), self._write_open_mode) as disk:
stamp = self._get_stamp()
disk.write(stamp)
@Tracer.trace
def _read_stamp(self):
with open(self._get_stamp_path(), self._read_open_mode) as disk:
return disk.read(len(self._get_stamp()))
@Tracer.trace
def run(self):
if self._expect_path_exists:
# Wait up to 5 seconds and then check the disk, ensuring that
# nobody else wrote to it. This is particularly useful when
# running a high number of tests in parallel, ensuring that
# we aren't writing to the wrong disk.
time.sleep(self._rand_float(0, 5))
stamp = self._read_stamp()
assert stamp == b'\0' * len(self._get_stamp())
self._write_stamp()
stamp = self._read_stamp()
assert stamp == self._get_stamp()
class RbdFsStampTest(RbdFsTestMixin, RbdStampTest):
_write_open_mode = "wb"
_expect_path_exists = False
def _get_stamp_path(self):
return self.get_subpath("test-stamp")
class TestRunner(object):
def __init__(self,
test_cls: typing.Type[RbdTest],
test_params: dict = {},
iterations: int = 1,
workers: int = 1,
stop_on_error: bool = False,
cleanup_on_error: bool = True):
self.test_cls = test_cls
self.test_params = test_params
self.iterations = iterations
self.workers = workers
self.executor = futures.ThreadPoolExecutor(max_workers=workers)
self.lock = threading.Lock()
self.completed = 0
self.errors = 0
self.stopped = False
self.stop_on_error = stop_on_error
self.cleanup_on_error = cleanup_on_error
@Tracer.trace
def run(self):
tasks = []
for i in range(self.iterations):
task = self.executor.submit(self.run_single_test)
tasks.append(task)
LOG.info("Waiting for %d tests to complete.", self.iterations)
for task in tasks:
task.result()
def run_single_test(self):
failed = False
if self.stopped:
return
try:
test = self.test_cls(**self.test_params)
test.initialize()
test.run()
except KeyboardInterrupt:
LOG.warning("Received Ctrl-C.")
self.stopped = True
except Exception as ex:
failed = True
if self.stop_on_error:
self.stopped = True
with self.lock:
self.errors += 1
LOG.exception(
"Test exception: %s. Total exceptions: %d",
ex, self.errors)
finally:
if not failed or self.cleanup_on_error:
try:
test.cleanup()
except KeyboardInterrupt:
LOG.warning("Received Ctrl-C.")
self.stopped = True
# Retry the cleanup
test.cleanup()
except Exception:
LOG.exception("Test cleanup failed.")
with self.lock:
self.completed += 1
LOG.info("Completed tests: %d. Pending: %d",
self.completed, self.iterations - self.completed)
TESTS: typing.Dict[str, typing.Type[RbdTest]] = {
'RbdTest': RbdTest,
'RbdFioTest': RbdFioTest,
'RbdResizeFioTest': RbdResizeFioTest,
'RbdStampTest': RbdStampTest,
# FS tests
'RbdFsTest': RbdFsTest,
'RbdFsFioTest': RbdFsFioTest,
'RbdFsStampTest': RbdFsStampTest,
}
if __name__ == '__main__':
args = parser.parse_args()
log_level = logging.WARNING
if args.verbose:
log_level = logging.INFO
if args.debug:
log_level = logging.DEBUG
setup_logging(log_level)
test_params = dict(
image_size_mb=args.image_size_mb,
image_prefix=args.image_prefix,
bs=args.bs,
op=args.op,
verify=args.fio_verify,
iodepth=args.fio_depth,
map_timeout=args.map_timeout,
skip_enabling_disk=args.skip_enabling_disk,
)
try:
test_cls = TESTS[args.test_name]
except KeyError:
raise CephTestException("Unknown test: {}".format(args.test_name))
runner = TestRunner(
test_cls,
test_params=test_params,
iterations=args.iterations,
workers=args.concurrency,
stop_on_error=args.stop_on_error,
cleanup_on_error=not args.skip_cleanup_on_error)
runner.run()
Tracer.print_results()
test_cls.print_results(
description="count: %d, concurrency: %d" %
(args.iterations, args.concurrency))
assert runner.errors == 0, f"encountered {runner.errors} error(s)."
| 31,169 | 32.880435 | 86 |
py
|
null |
ceph-main/qa/workunits/windows/libvirt_vm/setup.sh
|
#!/usr/bin/env bash
set -ex
WINDOWS_SERVER_2019_ISO_URL=${WINDOWS_SERVER_2019_ISO_URL:-"https://software-download.microsoft.com/download/pr/17763.737.190906-2324.rs5_release_svc_refresh_SERVER_EVAL_x64FRE_en-us_1.iso"}
VIRTIO_WIN_ISO_URL=${VIRTIO_WIN_ISO_URL:-"https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/stable-virtio/virtio-win.iso"}
DIR="$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)"
# Use build_utils.sh from ceph-build
curl --retry-max-time 30 --retry 10 -L -o ${DIR}/build_utils.sh https://raw.githubusercontent.com/ceph/ceph-build/main/scripts/build_utils.sh
source ${DIR}/build_utils.sh
# Helper function to restart the Windows VM
function restart_windows_vm() {
echo "Restarting Windows VM"
ssh_exec "cmd.exe /c 'shutdown.exe /r /t 0 & sc.exe stop sshd'"
SECONDS=0
TIMEOUT=${1:-600}
while true; do
if [[ $SECONDS -gt $TIMEOUT ]]; then
echo "Timeout waiting for the VM to start"
exit 1
fi
ssh_exec hostname || {
echo "Cannot execute SSH commands yet"
sleep 10
continue
}
break
done
echo "Windows VM restarted"
}
# Install libvirt with KVM
retrycmd_if_failure 5 0 5m sudo apt-get update
retrycmd_if_failure 5 0 10m sudo apt-get install -y qemu-kvm libvirt-daemon-system libvirt-clients virtinst
# Download ISO images
echo "Downloading virtio-win ISO"
retrycmd_if_failure 5 0 30m curl -C - -L $VIRTIO_WIN_ISO_URL -o ${DIR}/virtio-win.iso
echo "Downloading Windows Server 2019 ISO"
retrycmd_if_failure 5 0 60m curl -C - -L $WINDOWS_SERVER_2019_ISO_URL -o ${DIR}/windows-server-2019.iso
# Create virtual floppy image with the unattended instructions to install Windows Server 2019
echo "Creating floppy image"
qemu-img create -f raw ${DIR}/floppy.img 1440k
mkfs.msdos -s 1 ${DIR}/floppy.img
mkdir ${DIR}/floppy
sudo mount ${DIR}/floppy.img ${DIR}/floppy
ssh-keygen -b 2048 -t rsa -f ${DIR}/id_rsa -q -N ""
sudo cp \
${DIR}/autounattend.xml \
${DIR}/first-logon.ps1 \
${DIR}/id_rsa.pub \
${DIR}/utils.ps1 \
${DIR}/setup.ps1 \
${DIR}/floppy/
sudo umount ${DIR}/floppy
rmdir ${DIR}/floppy
echo "Starting libvirt VM"
qemu-img create -f qcow2 ${DIR}/ceph-win-ltsc2019.qcow2 50G
VM_NAME="ceph-win-ltsc2019"
sudo virt-install \
--name $VM_NAME \
--os-variant win2k19 \
--boot hd,cdrom \
--virt-type kvm \
--graphics spice \
--cpu host \
--vcpus 4 \
--memory 4096 \
--disk ${DIR}/floppy.img,device=floppy \
--disk ${DIR}/ceph-win-ltsc2019.qcow2,bus=virtio \
--disk ${DIR}/windows-server-2019.iso,device=cdrom \
--disk ${DIR}/virtio-win.iso,device=cdrom \
--network network=default,model=virtio \
--controller type=virtio-serial \
--channel unix,target_type=virtio,name=org.qemu.guest_agent.0 \
--noautoconsole
export SSH_USER="administrator"
export SSH_KNOWN_HOSTS_FILE="${DIR}/known_hosts"
export SSH_KEY="${DIR}/id_rsa"
SECONDS=0
TIMEOUT=1800
SLEEP_SECS=30
while true; do
if [[ $SECONDS -gt $TIMEOUT ]]; then
echo "Timeout waiting for the VM to start"
exit 1
fi
VM_IP=$(sudo virsh domifaddr --source agent --interface Ethernet --full $VM_NAME | grep ipv4 | awk '{print $4}' | cut -d '/' -f1) || {
echo "Retrying in $SLEEP_SECS seconds"
sleep $SLEEP_SECS
continue
}
ssh-keyscan -H $VM_IP &> $SSH_KNOWN_HOSTS_FILE || {
echo "SSH is not reachable yet"
sleep $SLEEP_SECS
continue
}
SSH_ADDRESS=$VM_IP ssh_exec hostname || {
echo "Cannot execute SSH commands yet"
sleep $SLEEP_SECS
continue
}
break
done
export SSH_ADDRESS=$VM_IP
scp_upload ${DIR}/utils.ps1 /utils.ps1
scp_upload ${DIR}/setup.ps1 /setup.ps1
SSH_TIMEOUT=1h ssh_exec /setup.ps1
cd $DIR
# Get the helper script to download Chacra builds
retrycmd_if_failure 10 5 1m curl -L -o ./get-chacra-bin.py https://raw.githubusercontent.com/ceph/ceph-win32-tests/main/get-bin.py
chmod +x ./get-chacra-bin.py
# Download latest WNBD build from Chacra
retrycmd_if_failure 10 0 10m ./get-chacra-bin.py --project wnbd --filename wnbd.zip
scp_upload wnbd.zip /wnbd.zip
ssh_exec tar.exe xzvf /wnbd.zip -C /
# Install WNBD driver
ssh_exec Import-Certificate -FilePath /wnbd/driver/wnbd.cer -Cert Cert:\\LocalMachine\\Root
ssh_exec Import-Certificate -FilePath /wnbd/driver/wnbd.cer -Cert Cert:\\LocalMachine\\TrustedPublisher
ssh_exec /wnbd/binaries/wnbd-client.exe install-driver /wnbd/driver/wnbd.inf
restart_windows_vm
ssh_exec wnbd-client.exe -v
# Download Ceph Windows build from Chacra
CEPH_REPO_FILE="/etc/apt/sources.list.d/ceph.list"
PROJECT=$(cat $CEPH_REPO_FILE | cut -d ' ' -f3 | tr '\/', ' ' | awk '{print $4}')
BRANCH=$(cat $CEPH_REPO_FILE | cut -d ' ' -f3 | tr '\/', ' ' | awk '{print $5}')
SHA1=$(cat $CEPH_REPO_FILE | cut -d ' ' -f3 | tr '\/', ' ' | awk '{print $6}')
retrycmd_if_failure 10 0 10m ./get-chacra-bin.py --project $PROJECT --branchname $BRANCH --sha1 $SHA1 --filename ceph.zip
# Install Ceph on Windows
SSH_TIMEOUT=5m scp_upload ./ceph.zip /ceph.zip
SSH_TIMEOUT=10m ssh_exec tar.exe xzvf /ceph.zip -C /
ssh_exec "New-Service -Name ceph-rbd -BinaryPathName 'c:\ceph\rbd-wnbd.exe service'"
ssh_exec Start-Service -Name ceph-rbd
ssh_exec rbd.exe -v
# Setup Ceph configs and directories
ssh_exec mkdir -force /etc/ceph, /var/run/ceph, /var/log/ceph
for i in $(ls /etc/ceph); do
scp_upload /etc/ceph/$i /etc/ceph/$i
done
cat << EOF > ${DIR}/connection_info.sh
export SSH_USER="${SSH_USER}"
export SSH_KNOWN_HOSTS_FILE="${SSH_KNOWN_HOSTS_FILE}"
export SSH_KEY="${SSH_KEY}"
export SSH_ADDRESS="${SSH_ADDRESS}"
EOF
echo "Windows Server 2019 libvirt testing VM is ready"
| 5,714 | 34.06135 | 190 |
sh
|
null |
ceph-main/src/SimpleRADOSStriper.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License version 2.1, as published by
* the Free Software Foundation. See file COPYING.
*
*/
#include <boost/smart_ptr/intrusive_ptr.hpp>
#include <fcntl.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <iomanip>
#include <iostream>
#include <regex>
#include <sstream>
#include <string_view>
#include <limits.h>
#include <string.h>
#include "include/ceph_assert.h"
#include "include/rados/librados.hpp"
#include "cls/lock/cls_lock_client.h"
#include "common/ceph_argparse.h"
#include "common/ceph_mutex.h"
#include "common/common_init.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/version.h"
#include "SimpleRADOSStriper.h"
using ceph::bufferlist;
#define dout_subsys ceph_subsys_cephsqlite
#undef dout_prefix
#define dout_prefix *_dout << "client." << ioctx.get_instance_id() << ": SimpleRADOSStriper: " << __func__ << ": " << oid << ": "
#define d(lvl) ldout((CephContext*)ioctx.cct(), (lvl))
enum {
P_FIRST = 0xe0000,
P_UPDATE_METADATA,
P_UPDATE_ALLOCATED,
P_UPDATE_SIZE,
P_UPDATE_VERSION,
P_SHRINK,
P_SHRINK_BYTES,
P_LOCK,
P_UNLOCK,
P_LAST,
};
int SimpleRADOSStriper::config_logger(CephContext* cct, std::string_view name, std::shared_ptr<PerfCounters>* l)
{
PerfCountersBuilder plb(cct, name.data(), P_FIRST, P_LAST);
plb.add_u64_counter(P_UPDATE_METADATA, "update_metadata", "Number of metadata updates");
plb.add_u64_counter(P_UPDATE_ALLOCATED, "update_allocated", "Number of allocated updates");
plb.add_u64_counter(P_UPDATE_SIZE, "update_size", "Number of size updates");
plb.add_u64_counter(P_UPDATE_VERSION, "update_version", "Number of version updates");
plb.add_u64_counter(P_SHRINK, "shrink", "Number of allocation shrinks");
plb.add_u64_counter(P_SHRINK_BYTES, "shrink_bytes", "Bytes shrunk");
plb.add_u64_counter(P_LOCK, "lock", "Number of locks");
plb.add_u64_counter(P_UNLOCK, "unlock", "Number of unlocks");
l->reset(plb.create_perf_counters());
return 0;
}
SimpleRADOSStriper::~SimpleRADOSStriper()
{
if (lock_keeper.joinable()) {
shutdown = true;
lock_keeper_cvar.notify_all();
lock_keeper.join();
}
if (ioctx.is_valid()) {
d(5) << dendl;
if (is_locked()) {
unlock();
}
}
}
SimpleRADOSStriper::extent SimpleRADOSStriper::get_next_extent(uint64_t off, size_t len) const
{
extent e;
{
uint64_t stripe = (off>>object_size);
CachedStackStringStream css;
*css << oid;
*css << ".";
*css << std::setw(16) << std::setfill('0') << std::hex << stripe;
e.soid = css->str();
}
e.off = off & ((1<<object_size)-1);
e.len = std::min<size_t>(len, (1<<object_size)-e.off);
return e;
}
int SimpleRADOSStriper::remove()
{
d(5) << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
if (int rc = wait_for_aios(true); rc < 0) {
aios_failure = 0;
return rc;
}
if (int rc = set_metadata(0, true); rc < 0) {
return rc;
}
auto ext = get_first_extent();
if (int rc = ioctx.remove(ext.soid); rc < 0) {
d(1) << " remove failed: " << cpp_strerror(rc) << dendl;
return rc;
}
locked = false;
return 0;
}
int SimpleRADOSStriper::truncate(uint64_t size)
{
d(5) << size << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
/* TODO: (not currently used by SQLite) handle growth + sparse */
if (int rc = set_metadata(size, true); rc < 0) {
return rc;
}
return 0;
}
int SimpleRADOSStriper::wait_for_aios(bool block)
{
while (!aios.empty()) {
auto& aiocp = aios.front();
int rc;
if (block) {
rc = aiocp->wait_for_complete();
} else {
if (aiocp->is_complete()) {
rc = aiocp->get_return_value();
} else {
return 0;
}
}
if (rc) {
d(1) << " aio failed: " << cpp_strerror(rc) << dendl;
if (aios_failure == 0) {
aios_failure = rc;
}
}
aios.pop();
}
return aios_failure;
}
int SimpleRADOSStriper::flush()
{
d(5) << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
if (size_dirty) {
if (int rc = set_metadata(size, true); rc < 0) {
return rc;
}
}
if (int rc = wait_for_aios(true); rc < 0) {
aios_failure = 0;
return rc;
}
return 0;
}
int SimpleRADOSStriper::stat(uint64_t* s)
{
d(5) << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
*s = size;
return 0;
}
int SimpleRADOSStriper::create()
{
d(5) << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
auto ext = get_first_extent();
auto op = librados::ObjectWriteOperation();
/* exclusive create ensures we do none of these setxattrs happen if it fails */
op.create(1);
op.setxattr(XATTR_VERSION, uint2bl(0));
op.setxattr(XATTR_EXCL, bufferlist());
op.setxattr(XATTR_SIZE, uint2bl(0));
op.setxattr(XATTR_ALLOCATED, uint2bl(0));
op.setxattr(XATTR_LAYOUT_STRIPE_UNIT, uint2bl(1));
op.setxattr(XATTR_LAYOUT_STRIPE_COUNT, uint2bl(1));
op.setxattr(XATTR_LAYOUT_OBJECT_SIZE, uint2bl(1<<object_size));
if (int rc = ioctx.operate(ext.soid, &op); rc < 0) {
return rc; /* including EEXIST */
}
return 0;
}
int SimpleRADOSStriper::open()
{
d(5) << oid << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
auto ext = get_first_extent();
auto op = librados::ObjectReadOperation();
bufferlist bl_excl, bl_size, bl_alloc, bl_version, pbl;
int prval_excl, prval_size, prval_alloc, prval_version;
op.getxattr(XATTR_EXCL, &bl_excl, &prval_excl);
op.getxattr(XATTR_SIZE, &bl_size, &prval_size);
op.getxattr(XATTR_ALLOCATED, &bl_alloc, &prval_alloc);
op.getxattr(XATTR_VERSION, &bl_version, &prval_version);
if (int rc = ioctx.operate(ext.soid, &op, &pbl); rc < 0) {
d(1) << " getxattr failed: " << cpp_strerror(rc) << dendl;
return rc;
}
exclusive_holder = bl_excl.to_str();
{
auto sstr = bl_size.to_str();
std::string err;
size = strict_strtoll(sstr.c_str(), 10, &err);
ceph_assert(err.empty());
}
{
auto sstr = bl_alloc.to_str();
std::string err;
allocated = strict_strtoll(sstr.c_str(), 10, &err);
ceph_assert(err.empty());
}
{
auto sstr = bl_version.to_str();
std::string err;
version = strict_strtoll(sstr.c_str(), 10, &err);
ceph_assert(err.empty());
}
d(15) << " size: " << size << " allocated: " << allocated << " version: " << version << dendl;
return 0;
}
int SimpleRADOSStriper::shrink_alloc(uint64_t a)
{
d(5) << dendl;
std::vector<aiocompletionptr> removes;
ceph_assert(a <= allocated);
uint64_t prune = std::max<uint64_t>(a, (1u << object_size)); /* never delete first extent here */
uint64_t len = allocated - prune;
const uint64_t bytes_removed = len;
uint64_t offset = prune;
while (len > 0) {
auto ext = get_next_extent(offset, len);
auto aiocp = aiocompletionptr(librados::Rados::aio_create_completion());
if (int rc = ioctx.aio_remove(ext.soid, aiocp.get()); rc < 0) {
d(1) << " aio_remove failed: " << cpp_strerror(rc) << dendl;
return rc;
}
removes.emplace_back(std::move(aiocp));
len -= ext.len;
offset += ext.len;
}
for (auto& aiocp : removes) {
if (int rc = aiocp->wait_for_complete(); rc < 0 && rc != -ENOENT) {
d(1) << " aio_remove failed: " << cpp_strerror(rc) << dendl;
return rc;
}
}
auto ext = get_first_extent();
auto op = librados::ObjectWriteOperation();
auto aiocp = aiocompletionptr(librados::Rados::aio_create_completion());
op.setxattr(XATTR_ALLOCATED, uint2bl(a));
d(15) << " updating allocated to " << a << dendl;
op.setxattr(XATTR_VERSION, uint2bl(version+1));
d(15) << " updating version to " << (version+1) << dendl;
if (int rc = ioctx.aio_operate(ext.soid, aiocp.get(), &op); rc < 0) {
d(1) << " update failed: " << cpp_strerror(rc) << dendl;
return rc;
}
/* we need to wait so we don't have dangling extents */
d(10) << " waiting for allocated update" << dendl;
if (int rc = aiocp->wait_for_complete(); rc < 0) {
d(1) << " update failure: " << cpp_strerror(rc) << dendl;
return rc;
}
if (logger) {
logger->inc(P_UPDATE_METADATA);
logger->inc(P_UPDATE_ALLOCATED);
logger->inc(P_UPDATE_VERSION);
logger->inc(P_SHRINK);
logger->inc(P_SHRINK_BYTES, bytes_removed);
}
version += 1;
allocated = a;
return 0;
}
int SimpleRADOSStriper::maybe_shrink_alloc()
{
d(15) << dendl;
if (size == 0) {
if (allocated > 0) {
d(10) << "allocation shrink to 0" << dendl;
return shrink_alloc(0);
} else {
return 0;
}
}
uint64_t mask = (1<<object_size)-1;
uint64_t new_allocated = min_growth + ((size + mask) & ~mask); /* round up base 2 */
if (allocated > new_allocated && ((allocated-new_allocated) > min_growth)) {
d(10) << "allocation shrink to " << new_allocated << dendl;
return shrink_alloc(new_allocated);
}
return 0;
}
bufferlist SimpleRADOSStriper::str2bl(std::string_view sv)
{
bufferlist bl;
bl.append(sv);
return bl;
}
bufferlist SimpleRADOSStriper::uint2bl(uint64_t v)
{
CachedStackStringStream css;
*css << std::dec << std::setw(16) << std::setfill('0') << v;
bufferlist bl;
bl.append(css->strv());
return bl;
}
int SimpleRADOSStriper::set_metadata(uint64_t new_size, bool update_size)
{
d(10) << " new_size: " << new_size
<< " update_size: " << update_size
<< " allocated: " << allocated
<< " size: " << size
<< " version: " << version
<< dendl;
bool do_op = false;
auto new_allocated = allocated;
auto ext = get_first_extent();
auto op = librados::ObjectWriteOperation();
if (new_size > allocated) {
uint64_t mask = (1<<object_size)-1;
new_allocated = min_growth + ((size + mask) & ~mask); /* round up base 2 */
op.setxattr(XATTR_ALLOCATED, uint2bl(new_allocated));
do_op = true;
if (logger) logger->inc(P_UPDATE_ALLOCATED);
d(15) << " updating allocated to " << new_allocated << dendl;
}
if (update_size) {
op.setxattr(XATTR_SIZE, uint2bl(new_size));
do_op = true;
if (logger) logger->inc(P_UPDATE_SIZE);
d(15) << " updating size to " << new_size << dendl;
}
if (do_op) {
if (logger) logger->inc(P_UPDATE_METADATA);
if (logger) logger->inc(P_UPDATE_VERSION);
op.setxattr(XATTR_VERSION, uint2bl(version+1));
d(15) << " updating version to " << (version+1) << dendl;
auto aiocp = aiocompletionptr(librados::Rados::aio_create_completion());
if (int rc = ioctx.aio_operate(ext.soid, aiocp.get(), &op); rc < 0) {
d(1) << " update failure: " << cpp_strerror(rc) << dendl;
return rc;
}
version += 1;
if (allocated != new_allocated) {
/* we need to wait so we don't have dangling extents */
d(10) << "waiting for allocated update" << dendl;
if (int rc = aiocp->wait_for_complete(); rc < 0) {
d(1) << " update failure: " << cpp_strerror(rc) << dendl;
return rc;
}
aiocp.reset();
allocated = new_allocated;
}
if (aiocp) {
aios.emplace(std::move(aiocp));
}
if (update_size) {
size = new_size;
size_dirty = false;
return maybe_shrink_alloc();
}
}
return 0;
}
ssize_t SimpleRADOSStriper::write(const void* data, size_t len, uint64_t off)
{
d(5) << off << "~" << len << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
if (allocated < (len+off)) {
if (int rc = set_metadata(len+off, false); rc < 0) {
return rc;
}
}
size_t w = 0;
while ((len-w) > 0) {
auto ext = get_next_extent(off+w, len-w);
auto aiocp = aiocompletionptr(librados::Rados::aio_create_completion());
bufferlist bl;
bl.append((const char*)data+w, ext.len);
if (int rc = ioctx.aio_write(ext.soid, aiocp.get(), bl, ext.len, ext.off); rc < 0) {
break;
}
aios.emplace(std::move(aiocp));
w += ext.len;
}
wait_for_aios(false); // clean up finished completions
if (size < (len+off)) {
size = len+off;
size_dirty = true;
d(10) << " dirty size: " << size << dendl;
}
return (ssize_t)w;
}
ssize_t SimpleRADOSStriper::read(void* data, size_t len, uint64_t off)
{
d(5) << off << "~" << len << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
size_t r = 0;
// Don't use std::vector to store bufferlists (e.g for parallelizing aio_reads),
// as they are being moved whenever the vector resizes
// and will cause invalidated references.
std::deque<std::pair<bufferlist, aiocompletionptr>> reads;
while ((len-r) > 0) {
auto ext = get_next_extent(off+r, len-r);
auto& [bl, aiocp] = reads.emplace_back();
aiocp = aiocompletionptr(librados::Rados::aio_create_completion());
if (int rc = ioctx.aio_read(ext.soid, aiocp.get(), &bl, ext.len, ext.off); rc < 0) {
d(1) << " read failure: " << cpp_strerror(rc) << dendl;
return rc;
}
r += ext.len;
}
r = 0;
for (auto& [bl, aiocp] : reads) {
if (int rc = aiocp->wait_for_complete(); rc < 0) {
d(1) << " read failure: " << cpp_strerror(rc) << dendl;
return rc;
}
bl.begin().copy(bl.length(), ((char*)data)+r);
r += bl.length();
}
ceph_assert(r <= len);
return r;
}
int SimpleRADOSStriper::print_lockers(std::ostream& out)
{
int exclusive;
std::string tag;
std::list<librados::locker_t> lockers;
auto ext = get_first_extent();
if (int rc = ioctx.list_lockers(ext.soid, biglock, &exclusive, &tag, &lockers); rc < 0) {
d(1) << " list_lockers failure: " << cpp_strerror(rc) << dendl;
return rc;
}
if (lockers.empty()) {
out << " lockers none";
} else {
out << " lockers exclusive=" << exclusive << " tag=" << tag << " lockers=[";
bool first = true;
for (const auto& l : lockers) {
if (!first) out << ",";
out << l.client << ":" << l.cookie << ":" << l.address;
}
out << "]";
}
return 0;
}
/* Do lock renewal in a separate thread: while it's unlikely sqlite chews on
* something for multiple seconds without calling into the VFS (where we could
* initiate a lock renewal), it's not impossible with complex queries. Also, we
* want to allow "PRAGMA locking_mode = exclusive" where the application may
* not use the sqlite3 database connection for an indeterminate amount of time.
*/
void SimpleRADOSStriper::lock_keeper_main(void)
{
d(20) << dendl;
const auto ext = get_first_extent();
while (!shutdown) {
d(20) << "tick" << dendl;
std::unique_lock lock(lock_keeper_mutex);
auto now = clock::now();
auto since = now-last_renewal;
if (since >= lock_keeper_interval && locked) {
d(10) << "renewing lock" << dendl;
auto tv = ceph::to_timeval(lock_keeper_timeout);
int rc = ioctx.lock_exclusive(ext.soid, biglock, cookie.to_string(), lockdesc, &tv, LIBRADOS_LOCK_FLAG_MUST_RENEW);
if (rc) {
/* If lock renewal fails, we cannot continue the application. Return
* -EBLOCKLISTED for all calls into the striper for this instance, even
* if we're not actually blocklisted.
*/
d(-1) << "lock renewal failed: " << cpp_strerror(rc) << dendl;
blocklisted = true;
break;
}
last_renewal = clock::now();
}
lock_keeper_cvar.wait_for(lock, lock_keeper_interval);
}
}
int SimpleRADOSStriper::recover_lock()
{
d(5) << "attempting to recover lock" << dendl;
std::string addrs;
const auto ext = get_first_extent();
{
auto tv = ceph::to_timeval(lock_keeper_timeout);
if (int rc = ioctx.lock_exclusive(ext.soid, biglock, cookie.to_string(), lockdesc, &tv, 0); rc < 0) {
return rc;
}
locked = true;
last_renewal = clock::now();
}
d(5) << "acquired lock, fetching last owner" << dendl;
{
bufferlist bl_excl;
if (int rc = ioctx.getxattr(ext.soid, XATTR_EXCL, bl_excl); rc < 0) {
if (rc == -ENOENT) {
/* someone removed it? ok... */
goto setowner;
} else {
d(-1) << "could not recover exclusive locker" << dendl;
locked = false; /* it will drop eventually */
return -EIO;
}
}
addrs = bl_excl.to_str();
}
if (addrs.empty()) {
d(5) << "someone else cleaned up" << dendl;
goto setowner;
} else {
d(5) << "exclusive lock holder was " << addrs << dendl;
}
if (blocklist_the_dead) {
entity_addrvec_t addrv;
addrv.parse(addrs.c_str());
auto R = librados::Rados(ioctx);
std::string_view b = "blocklist";
retry:
for (auto& a : addrv.v) {
CachedStackStringStream css;
*css << "{\"prefix\":\"osd " << b << "\", \"" << b << "op\":\"add\",";
*css << "\"addr\":\"";
*css << a;
*css << "\"}";
std::vector<std::string> cmd = {css->str()};
d(5) << "sending blocklist command: " << cmd << dendl;
std::string out;
if (int rc = R.mon_command(css->str(), bufferlist(), nullptr, &out); rc < 0) {
if (rc == -EINVAL && b == "blocklist") {
b = "blacklist";
goto retry;
}
d(-1) << "Cannot proceed with recovery because I have failed to blocklist the old client: " << cpp_strerror(rc) << ", out = " << out << dendl;
locked = false; /* it will drop eventually */
return -EIO;
}
}
/* Ensure our osd_op requests have the latest epoch. */
R.wait_for_latest_osdmap();
}
setowner:
d(5) << "setting new owner to myself, " << myaddrs << dendl;
{
auto myaddrbl = str2bl(myaddrs);
if (int rc = ioctx.setxattr(ext.soid, XATTR_EXCL, myaddrbl); rc < 0) {
d(-1) << "could not set lock owner" << dendl;
locked = false; /* it will drop eventually */
return -EIO;
}
}
return 0;
}
int SimpleRADOSStriper::lock(uint64_t timeoutms)
{
/* XXX: timeoutms is unused */
d(5) << "timeout=" << timeoutms << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
std::scoped_lock lock(lock_keeper_mutex);
ceph_assert(!is_locked());
/* We're going to be very lazy here in implementation: only exclusive locks
* are allowed. That even ensures a single reader.
*/
uint64_t slept = 0;
auto ext = get_first_extent();
while (true) {
/* The general fast path in one compound operation: obtain the lock,
* confirm the past locker cleaned up after themselves (set XATTR_EXCL to
* ""), then finally set XATTR_EXCL to our address vector as the new
* exclusive locker.
*/
auto op = librados::ObjectWriteOperation();
auto tv = ceph::to_timeval(lock_keeper_timeout);
utime_t duration;
duration.set_from_timeval(&tv);
rados::cls::lock::lock(&op, biglock, ClsLockType::EXCLUSIVE, cookie.to_string(), "", lockdesc, duration, 0);
op.cmpxattr(XATTR_EXCL, LIBRADOS_CMPXATTR_OP_EQ, bufferlist());
op.setxattr(XATTR_EXCL, str2bl(myaddrs));
int rc = ioctx.operate(ext.soid, &op);
if (rc == 0) {
locked = true;
last_renewal = clock::now();
break;
} else if (rc == -EBUSY) {
if ((slept % 500000) == 0) {
d(-1) << "waiting for locks: ";
print_lockers(*_dout);
*_dout << dendl;
}
usleep(5000);
slept += 5000;
continue;
} else if (rc == -ECANCELED) {
/* CMPXATTR failed, a locker didn't cleanup. Try to recover! */
if (rc = recover_lock(); rc < 0) {
if (rc == -EBUSY) {
continue; /* try again */
}
return rc;
}
break;
} else {
d(-1) << " lock failed: " << cpp_strerror(rc) << dendl;
return rc;
}
}
if (!lock_keeper.joinable()) {
lock_keeper = std::thread(&SimpleRADOSStriper::lock_keeper_main, this);
}
if (int rc = open(); rc < 0) {
d(1) << " open failed: " << cpp_strerror(rc) << dendl;
return rc;
}
d(5) << " = 0" << dendl;
if (logger) {
logger->inc(P_LOCK);
}
return 0;
}
int SimpleRADOSStriper::unlock()
{
d(5) << dendl;
if (blocklisted.load()) {
return -EBLOCKLISTED;
}
std::scoped_lock lock(lock_keeper_mutex);
ceph_assert(is_locked());
/* wait for flush of metadata */
if (int rc = flush(); rc < 0) {
return rc;
}
const auto ext = get_first_extent();
auto op = librados::ObjectWriteOperation();
op.cmpxattr(XATTR_EXCL, LIBRADOS_CMPXATTR_OP_EQ, str2bl(myaddrs));
op.setxattr(XATTR_EXCL, bufferlist());
rados::cls::lock::unlock(&op, biglock, cookie.to_string());
if (int rc = ioctx.operate(ext.soid, &op); rc < 0) {
d(-1) << " unlock failed: " << cpp_strerror(rc) << dendl;
return rc;
}
locked = false;
d(5) << " = 0" << dendl;
if (logger) {
logger->inc(P_UNLOCK);
}
return 0;
}
| 20,980 | 26.037371 | 150 |
cc
|
null |
ceph-main/src/SimpleRADOSStriper.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License version 2.1, as published by
* the Free Software Foundation. See file COPYING.
*
*/
#ifndef _SIMPLERADOSSTRIPER_H
#define _SIMPLERADOSSTRIPER_H
#include <queue>
#include <string_view>
#include <thread>
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "include/uuid.h"
#include "include/types.h"
#include "common/ceph_time.h"
#include "common/perf_counters.h"
class [[gnu::visibility("default")]] SimpleRADOSStriper
{
public:
using aiocompletionptr = std::unique_ptr<librados::AioCompletion>;
using clock = ceph::coarse_mono_clock;
using time = ceph::coarse_mono_time;
static inline const uint64_t object_size = 22; /* power of 2 */
static inline const uint64_t min_growth = (1<<27); /* 128 MB */
static int config_logger(CephContext* cct, std::string_view name, std::shared_ptr<PerfCounters>* l);
SimpleRADOSStriper() = default;
SimpleRADOSStriper(librados::IoCtx _ioctx, std::string _oid)
: ioctx(std::move(_ioctx))
, oid(std::move(_oid))
{
cookie.generate_random();
auto r = librados::Rados(ioctx);
myaddrs = r.get_addrs();
}
SimpleRADOSStriper(const SimpleRADOSStriper&) = delete;
SimpleRADOSStriper& operator=(const SimpleRADOSStriper&) = delete;
SimpleRADOSStriper& operator=(SimpleRADOSStriper&&) = delete;
SimpleRADOSStriper(SimpleRADOSStriper&&) = delete;
~SimpleRADOSStriper();
int create();
int open();
int remove();
int stat(uint64_t* size);
ssize_t write(const void* data, size_t len, uint64_t off);
ssize_t read(void* data, size_t len, uint64_t off);
int truncate(size_t size);
int flush();
int lock(uint64_t timeoutms);
int unlock();
int is_locked() const {
return locked;
}
int print_lockers(std::ostream& out);
void set_logger(std::shared_ptr<PerfCounters> l) {
logger = std::move(l);
}
void set_lock_interval(std::chrono::milliseconds t) {
lock_keeper_interval = t;
}
void set_lock_timeout(std::chrono::milliseconds t) {
lock_keeper_timeout = t;
}
void set_blocklist_the_dead(bool b) {
blocklist_the_dead = b;
}
protected:
struct extent {
std::string soid;
size_t len;
size_t off;
};
ceph::bufferlist str2bl(std::string_view sv);
ceph::bufferlist uint2bl(uint64_t v);
int set_metadata(uint64_t new_size, bool update_size);
int shrink_alloc(uint64_t a);
int maybe_shrink_alloc();
int wait_for_aios(bool block);
int recover_lock();
extent get_next_extent(uint64_t off, size_t len) const;
extent get_first_extent() const {
return get_next_extent(0, 0);
}
private:
static inline const char XATTR_EXCL[] = "striper.excl";
static inline const char XATTR_SIZE[] = "striper.size";
static inline const char XATTR_ALLOCATED[] = "striper.allocated";
static inline const char XATTR_VERSION[] = "striper.version";
static inline const char XATTR_LAYOUT_STRIPE_UNIT[] = "striper.layout.stripe_unit";
static inline const char XATTR_LAYOUT_STRIPE_COUNT[] = "striper.layout.stripe_count";
static inline const char XATTR_LAYOUT_OBJECT_SIZE[] = "striper.layout.object_size";
static inline const std::string biglock = "striper.lock";
static inline const std::string lockdesc = "SimpleRADOSStriper";
void lock_keeper_main();
librados::IoCtx ioctx;
std::shared_ptr<PerfCounters> logger;
std::string oid;
std::thread lock_keeper;
std::condition_variable lock_keeper_cvar;
std::mutex lock_keeper_mutex;
time last_renewal = time::min();
std::chrono::milliseconds lock_keeper_interval{2000};
std::chrono::milliseconds lock_keeper_timeout{30000};
std::atomic<bool> blocklisted = false;
bool shutdown = false;
version_t version = 0;
std::string exclusive_holder;
uint64_t size = 0;
uint64_t allocated = 0;
uuid_d cookie{};
bool locked = false;
bool size_dirty = false;
bool blocklist_the_dead = true;
std::queue<aiocompletionptr> aios;
int aios_failure = 0;
std::string myaddrs;
};
#endif /* _SIMPLERADOSSTRIPER_H */
| 4,279 | 29.571429 | 102 |
h
|
null |
ceph-main/src/btrfs_ioc_test.c
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <asm/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <fcntl.h>
#include "common/safe_io.h"
#include "os/btrfs_ioctl.h"
void do_open_wr(const char *fname, int *fd)
{
*fd = open(fname, O_WRONLY | O_CREAT, 0644);
if (*fd < 0) {
perror("open");
exit(1);
}
}
void do_open_rd(const char *fname, int *fd)
{
*fd = open(fname, O_RDONLY);
if (*fd < 0) {
perror("open");
exit(1);
}
}
void do_lseek(int fd, int ofs)
{
int rc = lseek(fd, ofs, SEEK_SET);
if (rc < 0) {
perror("lseek");
exit(1);
}
}
void do_write(int fd, int len)
{
char *buf = malloc(len);
int rc;
if (!buf) {
printf("not enough memory\n");
exit(1);
}
memset(buf, 0, len);
rc = safe_write(fd, buf, len);
if (rc) {
fprintf(stderr, "safe_write failed with error %d (%s)\n",
rc, strerror(rc));
exit(1);
}
if (rc != len) {
printf("invalid number of bytes written\n");
exit(1);
}
free(buf);
}
void do_link(const char *old, const char *new)
{
int rc = link(old, new);
if (rc < 0) {
perror("link");
exit(1);
}
}
void do_clone_range(int from, int to, int off, int len)
{
struct btrfs_ioctl_clone_range_args a;
int r;
a.src_fd = from;
a.src_offset = off;
a.src_length = len;
a.dest_offset = off;
r = ioctl(to, BTRFS_IOC_CLONE_RANGE, &a);
if (r < 0) {
perror("ioctl");
exit(1);
}
}
void do_snap_async(int fd, const char *name, unsigned long long *transid)
{
struct btrfs_ioctl_async_vol_args async_args;
struct btrfs_ioctl_vol_args volargs;
int r;
strcpy(volargs.name, name);
volargs.fd = fd;
async_args.args = &volargs;
async_args.transid = transid;
r = ioctl(fd, BTRFS_IOC_SNAP_CREATE_ASYNC, &async_args);
if (r < 0) {
perror("ioctl");
exit(1);
}
}
void do_snap_destroy(int fd, const char *name)
{
struct btrfs_ioctl_vol_args volargs;
int r;
strcpy(volargs.name, name);
volargs.fd = 0;
r = ioctl(fd, BTRFS_IOC_SNAP_DESTROY, &volargs);
if (r < 0) {
perror("snap_destroy: ioctl");
exit(1);
}
}
void do_snap_wait(int fd, unsigned long long transid)
{
int r = ioctl(fd, BTRFS_IOC_WAIT_SYNC, &transid);
if (r < 0) {
perror("do_snap_wait: ioctl");
exit(1);
}
}
void usage_exit(char *arg)
{
printf("usage: %s <btrfs_base> <snap_name>\n", arg);
exit(1);
}
#define TEMP_FILENAME "temp"
#define DEST_FILENAME "dest"
#define SRC_FILENAME "src"
int main(int argc, char *argv[])
{
const char *base_dir;
const char *snap_name;
int fd;
int i;
unsigned long long transid;
if (argc < 3)
usage_exit(argv[0]);
base_dir = argv[1];
snap_name = argv[2];
for (i=0; i<10; i++) {
printf("%d\n", i);
do_open_rd(base_dir, &fd);
do_snap_async(fd, snap_name, &transid);
sleep(2);
//do_snap_wait(fd, transid);
do_snap_destroy(fd, snap_name);
close(fd);
}
return 0;
}
| 2,846 | 15.552326 | 73 |
c
|
null |
ceph-main/src/ceph-osd-prestart.sh
|
#!/bin/sh
if [ `uname` = FreeBSD ]; then
GETOPT=/usr/local/bin/getopt
else
GETOPT=getopt
fi
eval set -- "$(${GETOPT} -o i: --long id:,cluster: -- $@)"
while true ; do
case "$1" in
-i|--id) id=$2; shift 2 ;;
--cluster) cluster=$2; shift 2 ;;
--) shift ; break ;;
esac
done
if [ -z "$id" ]; then
echo "Usage: $0 [OPTIONS]"
echo "--id/-i ID set ID portion of my name"
echo "--cluster NAME set cluster name (default: ceph)"
exit 1;
fi
data="/var/lib/ceph/osd/${cluster:-ceph}-$id"
# assert data directory exists - see http://tracker.ceph.com/issues/17091
if [ ! -d "$data" ]; then
echo "OSD data directory $data does not exist; bailing out." 1>&2
exit 1
fi
journal="$data/journal"
if [ -L "$journal" -a ! -e "$journal" ]; then
udevadm settle --timeout=5 || :
if [ -L "$journal" -a ! -e "$journal" ]; then
echo "ceph-osd(${cluster:-ceph}-$id): journal not present, not starting yet." 1>&2
exit 0
fi
fi
# ensure ownership is correct
owner=`stat -c %U $data/.`
if [ $owner != 'ceph' -a $owner != 'root' ]; then
echo "ceph-osd data dir $data is not owned by 'ceph' or 'root'"
echo "you must 'chown -R ceph:ceph ...' or similar to fix ownership"
exit 1
fi
exit 0
| 1,251 | 22.622642 | 90 |
sh
|
null |
ceph-main/src/ceph_common.sh
|
#!/bin/sh
CCONF="$BINDIR/ceph-conf"
default_conf=$ETCDIR"/ceph.conf"
conf=$default_conf
hostname=`hostname -s`
verify_conf() {
# fetch conf?
if [ -x "$ETCDIR/fetch_config" ] && [ "$conf" = "$default_conf" ]; then
conf="/tmp/fetched.ceph.conf.$$"
echo "[$ETCDIR/fetch_config $conf]"
if $ETCDIR/fetch_config $conf && [ -e $conf ]; then true ; else
echo "$0: failed to fetch config with '$ETCDIR/fetch_config $conf'"
exit 1
fi
# yay!
else
# make sure ceph.conf exists
if [ ! -e $conf ]; then
if [ "$conf" = "$default_conf" ]; then
echo "$0: ceph conf $conf not found; system is not configured."
exit 0
fi
echo "$0: ceph conf $conf not found!"
usage_exit
fi
fi
}
check_host() {
# what host is this daemon assigned to?
host=`$CCONF -c $conf -n $type.$id host`
if [ "$host" = "localhost" ]; then
echo "$0: use a proper short hostname (hostname -s), not 'localhost', in $conf section $type.$id; skipping entry"
return 1
fi
if expr match "$host" '.*\.' > /dev/null 2>&1; then
echo "$0: $conf section $type.$id"
echo "contains host=$host, which contains dots; this is probably wrong"
echo "It must match the result of hostname -s"
fi
ssh=""
rootssh=""
sshdir=$PWD
get_conf user "" "user"
#echo host for $name is $host, i am $hostname
cluster=$1
if [ -e "/var/lib/ceph/$type/$cluster-$id/upstart" ]; then
return 1
fi
# sysvinit managed instance in standard location?
if [ -e "/var/lib/ceph/$type/$cluster-$id/sysvinit" ]; then
host="$hostname"
echo "=== $type.$id === "
return 0
fi
# ignore all sections without 'host' defined
if [ -z "$host" ]; then
return 1
fi
if [ "$host" != "$hostname" ]; then
# skip, unless we're starting remote daemons too
if [ $allhosts -eq 0 ]; then
return 1
fi
# we'll need to ssh into that host
if [ -z "$user" ]; then
ssh="ssh $host"
else
ssh="ssh $user@$host"
fi
rootssh="ssh root@$host"
get_conf sshdir "$sshdir" "ssh path"
fi
echo "=== $type.$id === "
return 0
}
do_cmd() {
if [ -z "$ssh" ]; then
[ $verbose -eq 1 ] && echo "--- $host# $1"
ulimit -c unlimited
whoami=`whoami`
if [ "$whoami" = "$user" ] || [ -z "$user" ]; then
bash -c "$1" || { [ -z "$3" ] && echo "failed: '$1'" && exit 1; }
else
sudo su $user -c "$1" || { [ -z "$3" ] && echo "failed: '$1'" && exit 1; }
fi
else
[ $verbose -eq 1 ] && echo "--- $ssh $2 \"if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi; cd $sshdir ; ulimit -c unlimited ; $1\""
$ssh $2 "if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi; cd $sshdir ; ulimit -c unlimited ; $1" || { [ -z "$3" ] && echo "failed: '$ssh $1'" && exit 1; }
fi
}
do_cmd_okfail() {
ERR=0
if [ -z "$ssh" ]; then
[ $verbose -eq 1 ] && echo "--- $host# $1"
ulimit -c unlimited
whoami=`whoami`
if [ "$whoami" = "$user" ] || [ -z "$user" ]; then
bash -c "$1" || { [ -z "$3" ] && echo "failed: '$1'" && ERR=1 && return 1; }
else
sudo su $user -c "$1" || { [ -z "$3" ] && echo "failed: '$1'" && ERR=1 && return 1; }
fi
else
[ $verbose -eq 1 ] && echo "--- $ssh $2 \"if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi; cd $sshdir ; ulimit -c unlimited ; $1\""
$ssh $2 "if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi; cd $sshdir ; ulimit -c unlimited ; $1" || { [ -z "$3" ] && echo "failed: '$ssh $1'" && ERR=1 && return 1; }
fi
return 0
}
do_root_cmd() {
if [ -z "$ssh" ]; then
[ $verbose -eq 1 ] && echo "--- $host# $1"
ulimit -c unlimited
whoami=`whoami`
if [ "$whoami" = "root" ]; then
bash -c "$1" || { echo "failed: '$1'" ; exit 1; }
else
sudo bash -c "$1" || { echo "failed: '$1'" ; exit 1; }
fi
else
[ $verbose -eq 1 ] && echo "--- $rootssh $2 \"if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi ; cd $sshdir ; ulimit -c unlimited ; $1\""
$rootssh $2 "if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi ; cd $sshdir; ulimit -c unlimited ; $1" || { echo "failed: '$rootssh $1'" ; exit 1; }
fi
}
do_root_cmd_okfail() {
ERR=0
if [ -z "$ssh" ]; then
[ $verbose -eq 1 ] && echo "--- $host# $1"
ulimit -c unlimited
whoami=`whoami`
if [ "$whoami" = "root" ]; then
bash -c "$1" || { [ -z "$3" ] && echo "failed: '$1'" && ERR=1 && return 1; }
else
sudo bash -c "$1" || { [ -z "$3" ] && echo "failed: '$1'" && ERR=1 && return 1; }
fi
else
[ $verbose -eq 1 ] && echo "--- $rootssh $2 \"if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi; cd $sshdir ; ulimit -c unlimited ; $1\""
$rootssh $2 "if [ ! -d $sshdir ]; then mkdir -p $sshdir; fi; cd $sshdir ; ulimit -c unlimited ; $1" || { [ -z "$3" ] && echo "failed: '$rootssh $1'" && ERR=1 && return 1; }
fi
return 0
}
get_local_daemon_list() {
type=$1
if [ -d "/var/lib/ceph/$type" ]; then
for p in `find -L /var/lib/ceph/$type -mindepth 1 -maxdepth 1 -type d`; do
i=`basename $p`
if [ -e "/var/lib/ceph/$type/$i/sysvinit" ]; then
id=`echo $i | sed 's/[^-]*-//'`
local="$local $type.$id"
fi
done
fi
}
get_local_name_list() {
# enumerate local directories
local=""
get_local_daemon_list "mon"
get_local_daemon_list "osd"
get_local_daemon_list "mds"
get_local_daemon_list "mgr"
}
get_name_list() {
orig="$*"
# extract list of monitors, mdss, osds, mgrs defined in startup.conf
allconf=$(for entity in \
$local \
`$CCONF -c $conf -l mon | egrep -v '^mon$' || true` \
`$CCONF -c $conf -l mds | egrep -v '^mds$' || true` \
`$CCONF -c $conf -l mgr | egrep -v '^mgr$' || true` \
`$CCONF -c $conf -l osd | egrep -v '^osd$' || true`; do
echo $entity
done | sort -u)
if [ -z "$orig" ]; then
what="$allconf"
return
fi
what=""
for f in $orig; do
type=`echo $f | cut -c 1-3` # e.g. 'mon', if $item is 'mon1'
id=`echo $f | cut -c 4- | sed 's/\\.//'`
case $f in
mon | osd | mds | mgr)
for d in $allconf; do
if echo $d | grep -q ^$type; then
what="$what $d"
fi
done
;;
*)
if ! echo " " $allconf $local " " | egrep -q "( $type$id | $type.$id )"; then
echo "$0: $type.$id not found ($conf defines" $allconf", /var/lib/ceph defines" $local")"
exit 1
fi
what="$what $f"
;;
esac
done
}
get_conf() {
var=$1
def=$2
key=$3
shift; shift; shift
if [ -z "$1" ]; then
[ "$verbose" -eq 1 ] && echo "$CCONF -c $conf -n $type.$id \"$key\""
eval "$var=\"`$CCONF -c $conf -n $type.$id \"$key\" || printf \"$def\"`\""
else
[ "$verbose" -eq 1 ] && echo "$CCONF -c $conf -s $1 \"$key\""
eval "$var=\"`$CCONF -c $conf -s $1 \"$key\" || eval printf \"$def\"`\""
fi
}
get_conf_bool() {
get_conf "$@"
eval "val=$"$1
[ "$val" = "0" ] && export $1=0
[ "$val" = "false" ] && export $1=0
[ "$val" = "1" ] && export $1=1
[ "$val" = "true" ] && export $1=1
}
| 6,846 | 26.720648 | 173 |
sh
|
null |
ceph-main/src/ceph_fuse.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/stat.h>
#include <sys/utsname.h>
#include <iostream>
#include <string>
#include <optional>
#include "common/async/context_pool.h"
#include "common/config.h"
#include "common/errno.h"
#include "client/Client.h"
#include "client/fuse_ll.h"
#include "msg/Messenger.h"
#include "mon/MonClient.h"
#include "common/Timer.h"
#include "common/ceph_argparse.h"
#if defined(__linux__)
#include "common/linux_version.h"
#endif
#include "global/global_init.h"
#include "global/signal_handler.h"
#include "common/Preforker.h"
#include "common/safe_io.h"
#include <sys/types.h>
#include <fcntl.h>
#include "include/ceph_fuse.h"
#include <fuse_lowlevel.h>
#define dout_context g_ceph_context
using namespace std;
ceph::async::io_context_pool icp;
static void fuse_usage()
{
const char* argv[] = {
"ceph-fuse",
"-h",
};
struct fuse_args args = FUSE_ARGS_INIT(2, (char**)argv);
#if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0)
struct fuse_cmdline_opts opts = {};
if (fuse_parse_cmdline(&args, &opts) != -1) {
if (opts.show_help) {
cout << "usage: " << argv[0] << " [options] <mountpoint>\n\n";
cout << "FUSE options:\n";
fuse_cmdline_help();
fuse_lowlevel_help();
cout << "\n";
}
} else {
#else
if (fuse_parse_cmdline(&args, nullptr, nullptr, nullptr) == -1) {
#endif
derr << "fuse_parse_cmdline failed." << dendl;
}
ceph_assert(args.allocated);
fuse_opt_free_args(&args);
}
void usage()
{
cout <<
"usage: ceph-fuse [-n client.username] [-m mon-ip-addr:mon-port] <mount point> [OPTIONS]\n"
" --client_mountpoint/-r <sub_directory>\n"
" use sub_directory as the mounted root, rather than the full Ceph tree.\n"
"\n";
fuse_usage();
generic_client_usage();
}
int main(int argc, const char **argv, const char *envp[]) {
int filer_flags = 0;
//cerr << "ceph-fuse starting " << myrank << "/" << world << std::endl;
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
std::map<std::string,std::string> defaults = {
{ "pid_file", "" },
{ "chdir", "/" } // FUSE will chdir("/"); be ready.
};
auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_DAEMON,
CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS);
for (auto i = args.begin(); i != args.end();) {
if (ceph_argparse_double_dash(args, i)) {
break;
} else if (ceph_argparse_flag(args, i, "--localize-reads", (char*)nullptr)) {
cerr << "setting CEPH_OSD_FLAG_LOCALIZE_READS" << std::endl;
filer_flags |= CEPH_OSD_FLAG_LOCALIZE_READS;
} else if (ceph_argparse_flag(args, i, "-V", (char*)nullptr)) {
const char* tmpargv[] = {
"ceph-fuse",
"-V"
};
struct fuse_args fargs = FUSE_ARGS_INIT(2, (char**)tmpargv);
#if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0)
struct fuse_cmdline_opts opts = {};
if (fuse_parse_cmdline(&fargs, &opts) == -1) {
#else
if (fuse_parse_cmdline(&fargs, nullptr, nullptr, nullptr) == -1) {
#endif
derr << "fuse_parse_cmdline failed." << dendl;
}
ceph_assert(fargs.allocated);
fuse_opt_free_args(&fargs);
exit(0);
} else {
++i;
}
}
// args for fuse
const char **newargv;
int newargc;
vec_to_argv(argv[0], args, &newargc, &newargv);
// check for 32-bit arch
#ifndef __LP64__
cerr << std::endl;
cerr << "WARNING: Ceph inode numbers are 64 bits wide, and FUSE on 32-bit kernels does" << std::endl;
cerr << " not cope well with that situation. Expect to crash shortly." << std::endl;
cerr << std::endl;
#endif
Preforker forker;
auto daemonize = g_conf().get_val<bool>("daemonize");
if (daemonize) {
global_init_prefork(g_ceph_context);
int r;
string err;
r = forker.prefork(err);
if (r < 0 || forker.is_parent()) {
// Start log if current process is about to exit. Otherwise, we hit an assert
// in the Ceph context destructor.
g_ceph_context->_log->start();
}
if (r < 0) {
cerr << "ceph-fuse " << err << std::endl;
return r;
}
if (forker.is_parent()) {
r = forker.parent_wait(err);
if (r < 0) {
cerr << "ceph-fuse " << err << std::endl;
}
return r;
}
global_init_postfork_start(cct.get());
}
{
common_init_finish(g_ceph_context);
init_async_signal_handler();
register_async_signal_handler(SIGHUP, sighup_handler);
//cout << "child, mounting" << std::endl;
class RemountTest : public Thread {
public:
CephFuse *cfuse;
Client *client;
RemountTest() : cfuse(nullptr), client(nullptr) {}
void init(CephFuse *cf, Client *cl) {
cfuse = cf;
client = cl;
}
~RemountTest() override {}
void *entry() override {
#if defined(__linux__)
bool can_invalidate_dentries = g_conf().get_val<bool>(
"client_try_dentry_invalidate");
uint64_t max_retries = g_conf().get_val<uint64_t>(
"client_max_retries_on_remount_failure");
std::pair<int, bool> test_result;
uint64_t i = 0;
int tr = 0;
do {
test_result = client->test_dentry_handling(can_invalidate_dentries);
tr = test_result.first;
if (tr) {
sleep(1);
}
} while (++i < max_retries && tr);
bool abort_on_failure = test_result.second;
bool client_die_on_failed_dentry_invalidate = g_conf().get_val<bool>(
"client_die_on_failed_dentry_invalidate");
if (tr != 0 && client_die_on_failed_dentry_invalidate) {
cerr << "ceph-fuse[" << getpid()
<< "]: fuse failed dentry invalidate/remount test with error "
<< cpp_strerror(tr) << ", stopping" << std::endl;
char buf[5050];
string mountpoint = cfuse->get_mount_point();
snprintf(buf, sizeof(buf), "fusermount -u -z %s", mountpoint.c_str());
int umount_r = system(buf);
if (umount_r) {
if (umount_r != -1) {
if (WIFEXITED(umount_r)) {
umount_r = WEXITSTATUS(umount_r);
cerr << "got error " << umount_r
<< " when unmounting Ceph on failed remount test!" << std::endl;
} else {
cerr << "attempt to umount on failed remount test failed (on a signal?)" << std::endl;
}
} else {
cerr << "system() invocation failed during remount test" << std::endl;
}
}
}
if(abort_on_failure) {
ceph_abort();
}
return reinterpret_cast<void*>(tr);
#else
return reinterpret_cast<void*>(0);
#endif
}
} tester;
// get monmap
Messenger *messenger = nullptr;
StandaloneClient *client;
CephFuse *cfuse;
UserPerm perms;
int tester_r = 0;
void *tester_rp = nullptr;
icp.start(cct->_conf.get_val<std::uint64_t>("client_asio_thread_count"));
MonClient *mc = new MonClient(g_ceph_context, icp);
int r = mc->build_initial_monmap();
if (r == -EINVAL) {
cerr << "failed to generate initial mon list" << std::endl;
exit(1);
}
if (r < 0)
goto out_mc_start_failed;
// start up network
messenger = Messenger::create_client_messenger(g_ceph_context, "client");
messenger->set_default_policy(Messenger::Policy::lossy_client(0));
messenger->set_policy(entity_name_t::TYPE_MDS,
Messenger::Policy::lossless_client(0));
client = new StandaloneClient(messenger, mc, icp);
if (filer_flags) {
client->set_filer_flags(filer_flags);
}
cfuse = new CephFuse(client, forker.get_signal_fd());
r = cfuse->init(newargc, newargv);
if (r != 0) {
cerr << "ceph-fuse[" << getpid() << "]: fuse failed to initialize" << std::endl;
goto out_messenger_start_failed;
}
cerr << "ceph-fuse[" << getpid() << "]: starting ceph client" << std::endl;
r = messenger->start();
if (r < 0) {
cerr << "ceph-fuse[" << getpid() << "]: ceph messenger failed with " << cpp_strerror(-r) << std::endl;
goto out_messenger_start_failed;
}
// start client
r = client->init();
if (r < 0) {
cerr << "ceph-fuse[" << getpid() << "]: ceph client failed with " << cpp_strerror(-r) << std::endl;
goto out_init_failed;
}
client->update_metadata("mount_point", cfuse->get_mount_point());
perms = client->pick_my_perms();
{
// start up fuse
// use my argc, argv (make sure you pass a mount point!)
auto client_mountpoint = g_conf().get_val<std::string>(
"client_mountpoint");
auto mountpoint = client_mountpoint.c_str();
auto fuse_require_active_mds = g_conf().get_val<bool>(
"fuse_require_active_mds");
r = client->mount(mountpoint, perms, fuse_require_active_mds);
if (r < 0) {
if (r == CEPH_FUSE_NO_MDS_UP) {
cerr << "ceph-fuse[" << getpid() << "]: probably no MDS server is up?" << std::endl;
}
cerr << "ceph-fuse[" << getpid() << "]: ceph mount failed with " << cpp_strerror(-r) << std::endl;
r = EXIT_FAILURE;
goto out_shutdown;
}
}
r = cfuse->start();
if (r != 0) {
cerr << "ceph-fuse[" << getpid() << "]: fuse failed to start" << std::endl;
goto out_client_unmount;
}
cerr << "ceph-fuse[" << getpid() << "]: starting fuse" << std::endl;
tester.init(cfuse, client);
tester.create("tester");
r = cfuse->loop();
tester.join(&tester_rp);
tester_r = static_cast<int>(reinterpret_cast<uint64_t>(tester_rp));
cerr << "ceph-fuse[" << getpid() << "]: fuse finished with error " << r
<< " and tester_r " << tester_r <<std::endl;
out_client_unmount:
client->unmount();
cfuse->finalize();
out_shutdown:
icp.stop();
client->shutdown();
out_init_failed:
unregister_async_signal_handler(SIGHUP, sighup_handler);
shutdown_async_signal_handler();
// wait for messenger to finish
messenger->shutdown();
messenger->wait();
out_messenger_start_failed:
delete cfuse;
cfuse = nullptr;
delete client;
client = nullptr;
delete messenger;
messenger = nullptr;
out_mc_start_failed:
free(newargv);
delete mc;
mc = nullptr;
//cout << "child done" << std::endl;
return forker.signal_exit(r);
}
}
| 10,720 | 28.133152 | 108 |
cc
|
null |
ceph-main/src/ceph_mds.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <pthread.h>
#include <iostream>
#include <string>
#include "common/async/context_pool.h"
#include "include/ceph_features.h"
#include "include/compat.h"
#include "include/random.h"
#include "common/config.h"
#include "common/strtol.h"
#include "common/numa.h"
#include "mon/MonMap.h"
#include "mds/MDSDaemon.h"
#include "msg/Messenger.h"
#include "common/Timer.h"
#include "common/ceph_argparse.h"
#include "common/pick_address.h"
#include "common/Preforker.h"
#include "global/global_init.h"
#include "global/signal_handler.h"
#include "global/pidfile.h"
#include "mon/MonClient.h"
#include "auth/KeyRing.h"
#include "perfglue/heap_profiler.h"
#include "include/ceph_assert.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_mds
using std::cerr;
using std::cout;
using std::vector;
static void usage()
{
cout << "usage: ceph-mds -i <ID> [flags]\n"
<< " -m monitorip:port\n"
<< " connect to monitor at given address\n"
<< " --debug_mds n\n"
<< " debug MDS level (e.g. 10)\n"
<< std::endl;
generic_server_usage();
}
MDSDaemon *mds = NULL;
static void handle_mds_signal(int signum)
{
if (mds)
mds->handle_signal(signum);
}
int main(int argc, const char **argv)
{
ceph_pthread_setname(pthread_self(), "ceph-mds");
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
auto cct = global_init(NULL, args,
CEPH_ENTITY_TYPE_MDS, CODE_ENVIRONMENT_DAEMON, 0);
ceph_heap_profiler_init();
int numa_node = g_conf().get_val<int64_t>("mds_numa_node");
size_t numa_cpu_set_size = 0;
cpu_set_t numa_cpu_set;
if (numa_node >= 0) {
int r = get_numa_node_cpu_set(numa_node, &numa_cpu_set_size, &numa_cpu_set);
if (r < 0) {
dout(1) << __func__ << " unable to determine mds numa node " << numa_node
<< " CPUs" << dendl;
numa_node = -1;
} else {
r = set_cpu_affinity_all_threads(numa_cpu_set_size, &numa_cpu_set);
if (r < 0) {
derr << __func__ << " failed to set numa affinity: " << cpp_strerror(r)
<< dendl;
}
}
} else {
dout(1) << __func__ << " not setting numa affinity" << dendl;
}
std::string val, action;
for (std::vector<const char*>::iterator i = args.begin(); i != args.end(); ) {
if (ceph_argparse_double_dash(args, i)) {
break;
}
else if (ceph_argparse_witharg(args, i, &val, "--hot-standby", (char*)NULL)) {
dout(0) << "--hot-standby is obsolete and has no effect" << dendl;
}
else {
derr << "Error: can't understand argument: " << *i << "\n" << dendl;
exit(1);
}
}
Preforker forker;
entity_addrvec_t addrs;
pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC, &addrs);
// Normal startup
if (g_conf()->name.has_default_id()) {
derr << "must specify '-i name' with the ceph-mds instance name" << dendl;
exit(1);
}
if (g_conf()->name.get_id().empty() ||
(g_conf()->name.get_id()[0] >= '0' && g_conf()->name.get_id()[0] <= '9')) {
derr << "MDS id '" << g_conf()->name << "' is invalid. "
"MDS names may not start with a numeric digit." << dendl;
exit(1);
}
if (global_init_prefork(g_ceph_context) >= 0) {
std::string err;
int r = forker.prefork(err);
if (r < 0) {
cerr << err << std::endl;
return r;
}
if (forker.is_parent()) {
if (forker.parent_wait(err) != 0) {
return -ENXIO;
}
return 0;
}
global_init_postfork_start(g_ceph_context);
}
common_init_finish(g_ceph_context);
global_init_chdir(g_ceph_context);
std::string public_msgr_type = g_conf()->ms_public_type.empty() ? g_conf().get_val<std::string>("ms_type") : g_conf()->ms_public_type;
Messenger *msgr = Messenger::create(g_ceph_context, public_msgr_type,
entity_name_t::MDS(-1), "mds",
Messenger::get_random_nonce());
if (!msgr)
forker.exit(1);
msgr->set_cluster_protocol(CEPH_MDS_PROTOCOL);
cout << "starting " << g_conf()->name << " at " << msgr->get_myaddrs()
<< std::endl;
uint64_t required =
CEPH_FEATURE_OSDREPLYMUX;
msgr->set_default_policy(Messenger::Policy::lossy_client(required));
msgr->set_policy(entity_name_t::TYPE_MON,
Messenger::Policy::lossy_client(CEPH_FEATURE_UID |
CEPH_FEATURE_PGID64));
msgr->set_policy(entity_name_t::TYPE_MDS,
Messenger::Policy::lossless_peer(CEPH_FEATURE_UID));
msgr->set_policy(entity_name_t::TYPE_CLIENT,
Messenger::Policy::stateful_server(0));
int r = msgr->bindv(addrs);
if (r < 0)
forker.exit(1);
// set up signal handlers, now that we've daemonized/forked.
init_async_signal_handler();
register_async_signal_handler(SIGHUP, sighup_handler);
// get monmap
ceph::async::io_context_pool ctxpool(2);
MonClient mc(g_ceph_context, ctxpool);
if (mc.build_initial_monmap() < 0)
forker.exit(1);
global_init_chdir(g_ceph_context);
msgr->start();
// start mds
mds = new MDSDaemon(g_conf()->name.get_id().c_str(), msgr, &mc, ctxpool);
// in case we have to respawn...
mds->orig_argc = argc;
mds->orig_argv = argv;
if (g_conf()->daemonize) {
global_init_postfork_finish(g_ceph_context);
forker.daemonize();
}
r = mds->init();
if (r < 0) {
msgr->wait();
goto shutdown;
}
register_async_signal_handler_oneshot(SIGINT, handle_mds_signal);
register_async_signal_handler_oneshot(SIGTERM, handle_mds_signal);
if (g_conf()->inject_early_sigterm)
kill(getpid(), SIGTERM);
msgr->wait();
unregister_async_signal_handler(SIGHUP, sighup_handler);
unregister_async_signal_handler(SIGINT, handle_mds_signal);
unregister_async_signal_handler(SIGTERM, handle_mds_signal);
shutdown_async_signal_handler();
shutdown:
ctxpool.stop();
// yuck: grab the mds lock, so we can be sure that whoever in *mds
// called shutdown finishes what they were doing.
mds->mds_lock.lock();
mds->mds_lock.unlock();
pidfile_remove();
// only delete if it was a clean shutdown (to aid memory leak
// detection, etc.). don't bother if it was a suicide.
if (mds->is_clean_shutdown()) {
delete mds;
delete msgr;
}
// cd on exit, so that gmon.out (if any) goes into a separate directory for each node.
char s[20];
snprintf(s, sizeof(s), "gmon/%d", getpid());
if ((mkdir(s, 0755) == 0) && (chdir(s) == 0)) {
cerr << "ceph-mds: gmon.out should be in " << s << std::endl;
}
return 0;
}
| 7,183 | 26.212121 | 136 |
cc
|
null |
ceph-main/src/ceph_mgr.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat Inc
*
* Author: John Spray <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <Python.h>
#include <pthread.h>
#include "include/types.h"
#include "include/compat.h"
#include "common/config.h"
#include "common/ceph_argparse.h"
#include "common/errno.h"
#include "common/pick_address.h"
#include "global/global_init.h"
#include "mgr/MgrStandby.h"
static void usage()
{
std::cout << "usage: ceph-mgr -i <ID> [flags]\n"
<< std::endl;
generic_server_usage();
}
/**
* A short main() which just instantiates a MgrStandby and
* hands over control to that.
*/
int main(int argc, const char **argv)
{
ceph_pthread_setname(pthread_self(), "ceph-mgr");
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
std::cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
std::map<std::string,std::string> defaults = {
{ "keyring", "$mgr_data/keyring" }
};
auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_MGR,
CODE_ENVIRONMENT_DAEMON, 0);
pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC);
global_init_daemonize(g_ceph_context);
global_init_chdir(g_ceph_context);
common_init_finish(g_ceph_context);
MgrStandby mgr(argc, argv);
int rc = mgr.init();
if (rc != 0) {
std::cerr << "Error in initialization: " << cpp_strerror(rc) << std::endl;
return rc;
}
return mgr.main(args);
}
| 1,832 | 22.5 | 80 |
cc
|
null |
ceph-main/src/ceph_mon.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <iostream>
#include <string>
#include "common/config.h"
#include "include/ceph_features.h"
#include "mon/MonMap.h"
#include "mon/Monitor.h"
#include "mon/MonitorDBStore.h"
#include "mon/MonClient.h"
#include "msg/Messenger.h"
#include "include/CompatSet.h"
#include "common/ceph_argparse.h"
#include "common/pick_address.h"
#include "common/Throttle.h"
#include "common/Timer.h"
#include "common/errno.h"
#include "common/Preforker.h"
#include "global/global_init.h"
#include "global/signal_handler.h"
#include "perfglue/heap_profiler.h"
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_mon
using std::cerr;
using std::cout;
using std::list;
using std::map;
using std::ostringstream;
using std::string;
using std::vector;
using ceph::bufferlist;
using ceph::decode;
using ceph::encode;
using ceph::JSONFormatter;
Monitor *mon = NULL;
void handle_mon_signal(int signum)
{
if (mon)
mon->handle_signal(signum);
}
int obtain_monmap(MonitorDBStore &store, bufferlist &bl)
{
dout(10) << __func__ << dendl;
/*
* the monmap may be in one of three places:
* 'mon_sync:temp_newer_monmap' - stashed newer map for bootstrap
* 'monmap:<latest_version_no>' - the monmap we'd really like to have
* 'mon_sync:latest_monmap' - last monmap backed up for the last sync
* 'mkfs:monmap' - a monmap resulting from mkfs
*/
if (store.exists("monmap", "last_committed")) {
version_t latest_ver = store.get("monmap", "last_committed");
if (store.exists("monmap", latest_ver)) {
int err = store.get("monmap", latest_ver, bl);
ceph_assert(err == 0);
ceph_assert(bl.length() > 0);
dout(10) << __func__ << " read last committed monmap ver "
<< latest_ver << dendl;
// see if there is stashed newer map (see bootstrap())
if (store.exists("mon_sync", "temp_newer_monmap")) {
bufferlist bl2;
int err = store.get("mon_sync", "temp_newer_monmap", bl2);
ceph_assert(err == 0);
ceph_assert(bl2.length() > 0);
MonMap b;
b.decode(bl2);
if (b.get_epoch() > latest_ver) {
dout(10) << __func__ << " using stashed monmap " << b.get_epoch()
<< " instead" << dendl;
bl = std::move(bl2);
} else {
dout(10) << __func__ << " ignoring stashed monmap " << b.get_epoch()
<< dendl;
}
}
return 0;
}
}
if (store.exists("mon_sync", "in_sync")
|| store.exists("mon_sync", "force_sync")) {
dout(10) << __func__ << " detected aborted sync" << dendl;
if (store.exists("mon_sync", "latest_monmap")) {
int err = store.get("mon_sync", "latest_monmap", bl);
ceph_assert(err == 0);
ceph_assert(bl.length() > 0);
dout(10) << __func__ << " read backup monmap" << dendl;
return 0;
}
}
if (store.exists("mon_sync", "temp_newer_monmap")) {
dout(10) << __func__ << " found temp_newer_monmap" << dendl;
int err = store.get("mon_sync", "temp_newer_monmap", bl);
ceph_assert(err == 0);
ceph_assert(bl.length() > 0);
return 0;
}
if (store.exists("mkfs", "monmap")) {
dout(10) << __func__ << " found mkfs monmap" << dendl;
int err = store.get("mkfs", "monmap", bl);
ceph_assert(err == 0);
ceph_assert(bl.length() > 0);
return 0;
}
derr << __func__ << " unable to find a monmap" << dendl;
return -ENOENT;
}
int check_mon_data_exists()
{
string mon_data = g_conf()->mon_data;
struct stat buf;
if (::stat(mon_data.c_str(), &buf)) {
if (errno != ENOENT) {
derr << "stat(" << mon_data << ") " << cpp_strerror(errno) << dendl;
}
return -errno;
}
return 0;
}
/** Check whether **mon data** is empty.
*
* Being empty means mkfs has not been run and there's no monitor setup
* at **g_conf()->mon_data**.
*
* If the directory g_conf()->mon_data is not empty we will return -ENOTEMPTY.
* Otherwise we will return 0. Any other negative returns will represent
* a failure to be handled by the caller.
*
* @return **0** on success, -ENOTEMPTY if not empty or **-errno** otherwise.
*/
int check_mon_data_empty()
{
string mon_data = g_conf()->mon_data;
DIR *dir = ::opendir(mon_data.c_str());
if (!dir) {
derr << "opendir(" << mon_data << ") " << cpp_strerror(errno) << dendl;
return -errno;
}
int code = 0;
struct dirent *de = nullptr;
errno = 0;
while ((de = ::readdir(dir))) {
if (string(".") != de->d_name &&
string("..") != de->d_name &&
string("kv_backend") != de->d_name) {
code = -ENOTEMPTY;
break;
}
}
if (!de && errno) {
derr << "readdir(" << mon_data << ") " << cpp_strerror(errno) << dendl;
code = -errno;
}
::closedir(dir);
return code;
}
static void usage()
{
cout << "usage: ceph-mon -i <ID> [flags]\n"
<< " --debug_mon n\n"
<< " debug monitor level (e.g. 10)\n"
<< " --mkfs\n"
<< " build fresh monitor fs\n"
<< " --force-sync\n"
<< " force a sync from another mon by wiping local data (BE CAREFUL)\n"
<< " --yes-i-really-mean-it\n"
<< " mandatory safeguard for --force-sync\n"
<< " --compact\n"
<< " compact the monitor store\n"
<< " --osdmap <filename>\n"
<< " only used when --mkfs is provided: load the osdmap from <filename>\n"
<< " --inject-monmap <filename>\n"
<< " write the <filename> monmap to the local monitor store and exit\n"
<< " --extract-monmap <filename>\n"
<< " extract the monmap from the local monitor store and exit\n"
<< " --mon-data <directory>\n"
<< " where the mon store and keyring are located\n"
<< " --set-crush-location <bucket>=<foo>"
<< " sets monitor's crush bucket location (only for stretch mode)"
<< std::endl;
generic_server_usage();
}
entity_addrvec_t make_mon_addrs(entity_addr_t a)
{
entity_addrvec_t addrs;
if (a.get_port() == 0) {
a.set_type(entity_addr_t::TYPE_MSGR2);
a.set_port(CEPH_MON_PORT_IANA);
addrs.v.push_back(a);
a.set_type(entity_addr_t::TYPE_LEGACY);
a.set_port(CEPH_MON_PORT_LEGACY);
addrs.v.push_back(a);
} else if (a.get_port() == CEPH_MON_PORT_LEGACY) {
a.set_type(entity_addr_t::TYPE_LEGACY);
addrs.v.push_back(a);
} else if (a.get_type() == entity_addr_t::TYPE_ANY) {
a.set_type(entity_addr_t::TYPE_MSGR2);
addrs.v.push_back(a);
} else {
addrs.v.push_back(a);
}
return addrs;
}
int main(int argc, const char **argv)
{
// reset our process name, in case we did a respawn, so that it's not
// left as "exe".
ceph_pthread_setname(pthread_self(), "ceph-mon");
int err;
bool mkfs = false;
bool compact = false;
bool force_sync = false;
bool yes_really = false;
std::string osdmapfn, inject_monmap, extract_monmap, crush_loc;
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
// We need to specify some default values that may be overridden by the
// user, that are specific to the monitor. The options we are overriding
// are also used on the OSD, so changing the global defaults is not an option.
// This is not the prettiest way of doing this, especially since it has us
// having a different place defining default values, but it's not horribly
// wrong enough to prevent us from doing it :)
//
// NOTE: user-defined options will take precedence over ours.
map<string,string> defaults = {
{ "keyring", "$mon_data/keyring" },
};
int flags = 0;
{
vector<const char*> args_copy = args;
std::string val;
for (std::vector<const char*>::iterator i = args_copy.begin();
i != args_copy.end(); ) {
if (ceph_argparse_double_dash(args_copy, i)) {
break;
} else if (ceph_argparse_flag(args_copy, i, "--mkfs", (char*)NULL)) {
flags |= CINIT_FLAG_NO_DAEMON_ACTIONS;
} else if (ceph_argparse_witharg(args_copy, i, &val, "--inject_monmap", (char*)NULL)) {
flags |= CINIT_FLAG_NO_DAEMON_ACTIONS;
} else if (ceph_argparse_witharg(args_copy, i, &val, "--extract-monmap", (char*)NULL)) {
flags |= CINIT_FLAG_NO_DAEMON_ACTIONS;
} else {
++i;
}
}
}
// don't try to get config from mon cluster during startup
flags |= CINIT_FLAG_NO_MON_CONFIG;
auto cct = global_init(&defaults, args,
CEPH_ENTITY_TYPE_MON, CODE_ENVIRONMENT_DAEMON,
flags);
ceph_heap_profiler_init();
std::string val;
for (std::vector<const char*>::iterator i = args.begin(); i != args.end(); ) {
if (ceph_argparse_double_dash(args, i)) {
break;
} else if (ceph_argparse_flag(args, i, "--mkfs", (char*)NULL)) {
mkfs = true;
} else if (ceph_argparse_flag(args, i, "--compact", (char*)NULL)) {
compact = true;
} else if (ceph_argparse_flag(args, i, "--force-sync", (char*)NULL)) {
force_sync = true;
} else if (ceph_argparse_flag(args, i, "--yes-i-really-mean-it", (char*)NULL)) {
yes_really = true;
} else if (ceph_argparse_witharg(args, i, &val, "--osdmap", (char*)NULL)) {
osdmapfn = val;
} else if (ceph_argparse_witharg(args, i, &val, "--inject_monmap", (char*)NULL)) {
inject_monmap = val;
} else if (ceph_argparse_witharg(args, i, &val, "--extract-monmap", (char*)NULL)) {
extract_monmap = val;
} else if (ceph_argparse_witharg(args, i, &val, "--set-crush-location", (char*)NULL)) {
crush_loc = val;
} else {
++i;
}
}
if (!args.empty()) {
cerr << "too many arguments: " << args << std::endl;
exit(1);
}
if (force_sync && !yes_really) {
cerr << "are you SURE you want to force a sync? this will erase local data and may\n"
<< "break your mon cluster. pass --yes-i-really-mean-it if you do." << std::endl;
exit(1);
}
if (g_conf()->mon_data.empty()) {
cerr << "must specify '--mon-data=foo' data path" << std::endl;
exit(1);
}
if (g_conf()->name.get_id().empty()) {
cerr << "must specify id (--id <id> or --name mon.<id>)" << std::endl;
exit(1);
}
// -- mkfs --
if (mkfs) {
int err = check_mon_data_exists();
if (err == -ENOENT) {
if (::mkdir(g_conf()->mon_data.c_str(), 0755)) {
derr << "mkdir(" << g_conf()->mon_data << ") : "
<< cpp_strerror(errno) << dendl;
exit(1);
}
} else if (err < 0) {
derr << "error opening '" << g_conf()->mon_data << "': "
<< cpp_strerror(-err) << dendl;
exit(-err);
}
err = check_mon_data_empty();
if (err == -ENOTEMPTY) {
// Mon may exist. Let the user know and exit gracefully.
derr << "'" << g_conf()->mon_data << "' already exists and is not empty"
<< ": monitor may already exist" << dendl;
exit(0);
} else if (err < 0) {
derr << "error checking if '" << g_conf()->mon_data << "' is empty: "
<< cpp_strerror(-err) << dendl;
exit(-err);
}
// resolve public_network -> public_addr
pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC);
dout(10) << "public_network " << g_conf()->public_network << dendl;
dout(10) << "public_addr " << g_conf()->public_addr << dendl;
dout(10) << "public_addrv " << g_conf()->public_addrv << dendl;
common_init_finish(g_ceph_context);
bufferlist monmapbl, osdmapbl;
std::string error;
MonMap monmap;
// load or generate monmap
const auto monmap_fn = g_conf().get_val<string>("monmap");
if (monmap_fn.length()) {
int err = monmapbl.read_file(monmap_fn.c_str(), &error);
if (err < 0) {
derr << argv[0] << ": error reading " << monmap_fn << ": " << error << dendl;
exit(1);
}
try {
monmap.decode(monmapbl);
// always mark seed/mkfs monmap as epoch 0
monmap.set_epoch(0);
} catch (const ceph::buffer::error& e) {
derr << argv[0] << ": error decoding monmap " << monmap_fn << ": " << e.what() << dendl;
exit(1);
}
dout(1) << "imported monmap:\n";
monmap.print(*_dout);
*_dout << dendl;
} else {
ostringstream oss;
int err = monmap.build_initial(g_ceph_context, true, oss);
if (oss.tellp())
derr << oss.str() << dendl;
if (err < 0) {
derr << argv[0] << ": warning: no initial monitors; must use admin socket to feed hints" << dendl;
}
dout(1) << "initial generated monmap:\n";
monmap.print(*_dout);
*_dout << dendl;
// am i part of the initial quorum?
if (monmap.contains(g_conf()->name.get_id())) {
// hmm, make sure the ip listed exists on the current host?
// maybe later.
} else if (!g_conf()->public_addrv.empty()) {
entity_addrvec_t av = g_conf()->public_addrv;
string name;
if (monmap.contains(av, &name)) {
monmap.rename(name, g_conf()->name.get_id());
dout(0) << argv[0] << ": renaming mon." << name << " " << av
<< " to mon." << g_conf()->name.get_id() << dendl;
}
} else if (!g_conf()->public_addr.is_blank_ip()) {
entity_addrvec_t av = make_mon_addrs(g_conf()->public_addr);
string name;
if (monmap.contains(av, &name)) {
monmap.rename(name, g_conf()->name.get_id());
dout(0) << argv[0] << ": renaming mon." << name << " " << av
<< " to mon." << g_conf()->name.get_id() << dendl;
}
} else {
// is a local address listed without a name? if so, name myself.
list<entity_addr_t> ls;
monmap.list_addrs(ls);
dout(0) << " monmap addrs are " << ls << ", checking if any are local"
<< dendl;
entity_addr_t local;
if (have_local_addr(g_ceph_context, ls, &local)) {
dout(0) << " have local addr " << local << dendl;
string name;
local.set_type(entity_addr_t::TYPE_MSGR2);
if (!monmap.get_addr_name(local, name)) {
local.set_type(entity_addr_t::TYPE_LEGACY);
if (!monmap.get_addr_name(local, name)) {
dout(0) << "no local addresses appear in bootstrap monmap"
<< dendl;
}
}
if (name.compare(0, 7, "noname-") == 0) {
dout(0) << argv[0] << ": mon." << name << " " << local
<< " is local, renaming to mon." << g_conf()->name.get_id()
<< dendl;
monmap.rename(name, g_conf()->name.get_id());
} else if (name.size()) {
dout(0) << argv[0] << ": mon." << name << " " << local
<< " is local, but not 'noname-' + something; "
<< "not assuming it's me" << dendl;
}
} else {
dout(0) << " no local addrs match monmap" << dendl;
}
}
}
const auto fsid = g_conf().get_val<uuid_d>("fsid");
if (!fsid.is_zero()) {
monmap.fsid = fsid;
dout(0) << argv[0] << ": set fsid to " << fsid << dendl;
}
if (monmap.fsid.is_zero()) {
derr << argv[0] << ": generated monmap has no fsid; use '--fsid <uuid>'" << dendl;
exit(10);
}
//monmap.print(cout);
// osdmap
if (osdmapfn.length()) {
err = osdmapbl.read_file(osdmapfn.c_str(), &error);
if (err < 0) {
derr << argv[0] << ": error reading " << osdmapfn << ": "
<< error << dendl;
exit(1);
}
}
// go
MonitorDBStore store(g_conf()->mon_data);
ostringstream oss;
int r = store.create_and_open(oss);
if (oss.tellp())
derr << oss.str() << dendl;
if (r < 0) {
derr << argv[0] << ": error opening mon data directory at '"
<< g_conf()->mon_data << "': " << cpp_strerror(r) << dendl;
exit(1);
}
ceph_assert(r == 0);
Monitor mon(g_ceph_context, g_conf()->name.get_id(), &store, 0, 0, &monmap);
r = mon.mkfs(osdmapbl);
if (r < 0) {
derr << argv[0] << ": error creating monfs: " << cpp_strerror(r) << dendl;
exit(1);
}
store.close();
dout(0) << argv[0] << ": created monfs at " << g_conf()->mon_data
<< " for " << g_conf()->name << dendl;
return 0;
}
err = check_mon_data_exists();
if (err < 0 && err == -ENOENT) {
derr << "monitor data directory at '" << g_conf()->mon_data << "'"
<< " does not exist: have you run 'mkfs'?" << dendl;
exit(1);
} else if (err < 0) {
derr << "error accessing monitor data directory at '"
<< g_conf()->mon_data << "': " << cpp_strerror(-err) << dendl;
exit(1);
}
err = check_mon_data_empty();
if (err == 0) {
derr << "monitor data directory at '" << g_conf()->mon_data
<< "' is empty: have you run 'mkfs'?" << dendl;
exit(1);
} else if (err < 0 && err != -ENOTEMPTY) {
// we don't want an empty data dir by now
derr << "error accessing '" << g_conf()->mon_data << "': "
<< cpp_strerror(-err) << dendl;
exit(1);
}
{
// check fs stats. don't start if it's critically close to full.
ceph_data_stats_t stats;
int err = get_fs_stats(stats, g_conf()->mon_data.c_str());
if (err < 0) {
derr << "error checking monitor data's fs stats: " << cpp_strerror(err)
<< dendl;
exit(-err);
}
if (stats.avail_percent <= g_conf()->mon_data_avail_crit) {
derr << "error: monitor data filesystem reached concerning levels of"
<< " available storage space (available: "
<< stats.avail_percent << "% " << byte_u_t(stats.byte_avail)
<< ")\nyou may adjust 'mon data avail crit' to a lower value"
<< " to make this go away (default: " << g_conf()->mon_data_avail_crit
<< "%)\n" << dendl;
exit(ENOSPC);
}
}
Preforker prefork;
if (!(flags & CINIT_FLAG_NO_DAEMON_ACTIONS)) {
if (global_init_prefork(g_ceph_context) >= 0) {
string err_msg;
err = prefork.prefork(err_msg);
if (err < 0) {
derr << err_msg << dendl;
prefork.exit(err);
}
if (prefork.is_parent()) {
err = prefork.parent_wait(err_msg);
if (err < 0)
derr << err_msg << dendl;
prefork.exit(err);
}
setsid();
global_init_postfork_start(g_ceph_context);
}
common_init_finish(g_ceph_context);
global_init_chdir(g_ceph_context);
if (global_init_preload_erasure_code(g_ceph_context) < 0)
prefork.exit(1);
}
// set up signal handlers, now that we've daemonized/forked.
init_async_signal_handler();
MonitorDBStore *store = new MonitorDBStore(g_conf()->mon_data);
// make sure we aren't upgrading too fast
{
string val;
int r = store->read_meta("min_mon_release", &val);
if (r >= 0 && val.size()) {
ceph_release_t from_release = ceph_release_from_name(val);
ostringstream err;
if (!can_upgrade_from(from_release, "min_mon_release", err)) {
derr << err.str() << dendl;
prefork.exit(1);
}
}
}
{
ostringstream oss;
err = store->open(oss);
if (oss.tellp())
derr << oss.str() << dendl;
if (err < 0) {
derr << "error opening mon data directory at '"
<< g_conf()->mon_data << "': " << cpp_strerror(err) << dendl;
prefork.exit(1);
}
}
bufferlist magicbl;
err = store->get(Monitor::MONITOR_NAME, "magic", magicbl);
if (err || !magicbl.length()) {
derr << "unable to read magic from mon data" << dendl;
prefork.exit(1);
}
string magic(magicbl.c_str(), magicbl.length()-1); // ignore trailing \n
if (strcmp(magic.c_str(), CEPH_MON_ONDISK_MAGIC)) {
derr << "mon fs magic '" << magic << "' != current '" << CEPH_MON_ONDISK_MAGIC << "'" << dendl;
prefork.exit(1);
}
err = Monitor::check_features(store);
if (err < 0) {
derr << "error checking features: " << cpp_strerror(err) << dendl;
prefork.exit(1);
}
// inject new monmap?
if (!inject_monmap.empty()) {
bufferlist bl;
std::string error;
int r = bl.read_file(inject_monmap.c_str(), &error);
if (r) {
derr << "unable to read monmap from " << inject_monmap << ": "
<< error << dendl;
prefork.exit(1);
}
// get next version
version_t v = store->get("monmap", "last_committed");
dout(0) << "last committed monmap epoch is " << v << ", injected map will be " << (v+1)
<< dendl;
v++;
// set the version
MonMap tmp;
tmp.decode(bl);
if (tmp.get_epoch() != v) {
dout(0) << "changing monmap epoch from " << tmp.get_epoch()
<< " to " << v << dendl;
tmp.set_epoch(v);
}
bufferlist mapbl;
tmp.encode(mapbl, CEPH_FEATURES_ALL);
bufferlist final;
encode(v, final);
encode(mapbl, final);
auto t(std::make_shared<MonitorDBStore::Transaction>());
// save it
t->put("monmap", v, mapbl);
t->put("monmap", "latest", final);
t->put("monmap", "last_committed", v);
store->apply_transaction(t);
dout(0) << "done." << dendl;
prefork.exit(0);
}
// monmap?
MonMap monmap;
{
// note that even if we don't find a viable monmap, we should go ahead
// and try to build it up in the next if-else block.
bufferlist mapbl;
int err = obtain_monmap(*store, mapbl);
if (err >= 0) {
try {
monmap.decode(mapbl);
} catch (const ceph::buffer::error& e) {
derr << "can't decode monmap: " << e.what() << dendl;
}
} else {
derr << "unable to obtain a monmap: " << cpp_strerror(err) << dendl;
}
dout(10) << __func__ << " monmap:\n";
JSONFormatter jf(true);
jf.dump_object("monmap", monmap);
jf.flush(*_dout);
*_dout << dendl;
if (!extract_monmap.empty()) {
int r = mapbl.write_file(extract_monmap.c_str());
if (r < 0) {
r = -errno;
derr << "error writing monmap to " << extract_monmap << ": " << cpp_strerror(r) << dendl;
prefork.exit(1);
}
derr << "wrote monmap to " << extract_monmap << dendl;
prefork.exit(0);
}
}
// this is what i will bind to
entity_addrvec_t ipaddrs;
if (monmap.contains(g_conf()->name.get_id())) {
ipaddrs = monmap.get_addrs(g_conf()->name.get_id());
// print helpful warning if the conf file doesn't match
std::vector<std::string> my_sections = g_conf().get_my_sections();
std::string mon_addr_str;
if (g_conf().get_val_from_conf_file(my_sections, "mon addr",
mon_addr_str, true) == 0) {
entity_addr_t conf_addr;
if (conf_addr.parse(mon_addr_str)) {
entity_addrvec_t conf_addrs = make_mon_addrs(conf_addr);
if (ipaddrs != conf_addrs) {
derr << "WARNING: 'mon addr' config option " << conf_addrs
<< " does not match monmap file" << std::endl
<< " continuing with monmap configuration" << dendl;
}
} else
derr << "WARNING: invalid 'mon addr' config option" << std::endl
<< " continuing with monmap configuration" << dendl;
}
} else {
dout(0) << g_conf()->name << " does not exist in monmap, will attempt to join an existing cluster" << dendl;
pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC);
if (!g_conf()->public_addrv.empty()) {
ipaddrs = g_conf()->public_addrv;
dout(0) << "using public_addrv " << ipaddrs << dendl;
} else if (!g_conf()->public_addr.is_blank_ip()) {
ipaddrs = make_mon_addrs(g_conf()->public_addr);
dout(0) << "using public_addr " << g_conf()->public_addr << " -> "
<< ipaddrs << dendl;
} else {
MonMap tmpmap;
ostringstream oss;
int err = tmpmap.build_initial(g_ceph_context, true, oss);
if (oss.tellp())
derr << oss.str() << dendl;
if (err < 0) {
derr << argv[0] << ": error generating initial monmap: "
<< cpp_strerror(err) << dendl;
prefork.exit(1);
}
if (tmpmap.contains(g_conf()->name.get_id())) {
ipaddrs = tmpmap.get_addrs(g_conf()->name.get_id());
} else {
derr << "no public_addr or public_network specified, and "
<< g_conf()->name << " not present in monmap or ceph.conf" << dendl;
prefork.exit(1);
}
}
}
// bind
int rank = monmap.get_rank(g_conf()->name.get_id());
std::string public_msgr_type = g_conf()->ms_public_type.empty() ? g_conf().get_val<std::string>("ms_type") : g_conf()->ms_public_type;
Messenger *msgr = Messenger::create(g_ceph_context, public_msgr_type,
entity_name_t::MON(rank), "mon", 0);
if (!msgr)
exit(1);
msgr->set_cluster_protocol(CEPH_MON_PROTOCOL);
msgr->set_default_send_priority(CEPH_MSG_PRIO_HIGH);
msgr->set_default_policy(Messenger::Policy::stateless_server(0));
msgr->set_policy(entity_name_t::TYPE_MON,
Messenger::Policy::lossless_peer_reuse(
CEPH_FEATURE_SERVER_LUMINOUS));
msgr->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::stateless_server(
CEPH_FEATURE_SERVER_LUMINOUS));
msgr->set_policy(entity_name_t::TYPE_CLIENT,
Messenger::Policy::stateless_server(0));
msgr->set_policy(entity_name_t::TYPE_MDS,
Messenger::Policy::stateless_server(0));
// throttle client traffic
Throttle *client_throttler = new Throttle(g_ceph_context, "mon_client_bytes",
g_conf()->mon_client_bytes);
msgr->set_policy_throttlers(entity_name_t::TYPE_CLIENT,
client_throttler, NULL);
// throttle daemon traffic
// NOTE: actual usage on the leader may multiply by the number of
// monitors if they forward large update messages from daemons.
Throttle *daemon_throttler = new Throttle(g_ceph_context, "mon_daemon_bytes",
g_conf()->mon_daemon_bytes);
msgr->set_policy_throttlers(entity_name_t::TYPE_OSD, daemon_throttler,
NULL);
msgr->set_policy_throttlers(entity_name_t::TYPE_MDS, daemon_throttler,
NULL);
entity_addrvec_t bind_addrs = ipaddrs;
entity_addrvec_t public_addrs = ipaddrs;
// check if the public_bind_addr option is set
if (!g_conf()->public_bind_addr.is_blank_ip()) {
bind_addrs = make_mon_addrs(g_conf()->public_bind_addr);
}
dout(0) << "starting " << g_conf()->name << " rank " << rank
<< " at public addrs " << public_addrs
<< " at bind addrs " << bind_addrs
<< " mon_data " << g_conf()->mon_data
<< " fsid " << monmap.get_fsid()
<< dendl;
Messenger *mgr_msgr = Messenger::create(g_ceph_context, public_msgr_type,
entity_name_t::MON(rank), "mon-mgrc",
Messenger::get_random_nonce());
if (!mgr_msgr) {
derr << "unable to create mgr_msgr" << dendl;
prefork.exit(1);
}
mon = new Monitor(g_ceph_context, g_conf()->name.get_id(), store,
msgr, mgr_msgr, &monmap);
mon->orig_argc = argc;
mon->orig_argv = argv;
if (force_sync) {
derr << "flagging a forced sync ..." << dendl;
ostringstream oss;
JSONFormatter jf(true);
mon->sync_force(&jf);
derr << "out:\n";
jf.flush(*_dout);
*_dout << dendl;
}
err = mon->preinit();
if (err < 0) {
derr << "failed to initialize" << dendl;
prefork.exit(1);
}
if (compact || g_conf()->mon_compact_on_start) {
derr << "compacting monitor store ..." << dendl;
mon->store->compact();
derr << "done compacting" << dendl;
}
// bind
err = msgr->bindv(bind_addrs, public_addrs);
if (err < 0) {
derr << "unable to bind monitor to " << bind_addrs << dendl;
prefork.exit(1);
}
if (g_conf()->daemonize) {
global_init_postfork_finish(g_ceph_context);
prefork.daemonize();
}
msgr->start();
mgr_msgr->start();
mon->set_mon_crush_location(crush_loc);
mon->init();
register_async_signal_handler_oneshot(SIGINT, handle_mon_signal);
register_async_signal_handler_oneshot(SIGTERM, handle_mon_signal);
register_async_signal_handler(SIGHUP, handle_mon_signal);
if (g_conf()->inject_early_sigterm)
kill(getpid(), SIGTERM);
msgr->wait();
mgr_msgr->wait();
store->close();
unregister_async_signal_handler(SIGHUP, handle_mon_signal);
unregister_async_signal_handler(SIGINT, handle_mon_signal);
unregister_async_signal_handler(SIGTERM, handle_mon_signal);
shutdown_async_signal_handler();
delete mon;
delete store;
delete msgr;
delete mgr_msgr;
delete client_throttler;
delete daemon_throttler;
// cd on exit, so that gmon.out (if any) goes into a separate directory for each node.
char s[20];
snprintf(s, sizeof(s), "gmon/%d", getpid());
if ((mkdir(s, 0755) == 0) && (chdir(s) == 0)) {
dout(0) << "ceph-mon: gmon.out should be in " << s << dendl;
}
prefork.signal_exit(0);
return 0;
}
| 28,712 | 29.97411 | 136 |
cc
|
null |
ceph-main/src/ceph_osd.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <boost/scoped_ptr.hpp>
#include <iostream>
#include <string>
#include "auth/KeyRing.h"
#include "osd/OSD.h"
#include "os/ObjectStore.h"
#include "mon/MonClient.h"
#include "include/ceph_features.h"
#include "common/config.h"
#include "extblkdev/ExtBlkDevPlugin.h"
#include "mon/MonMap.h"
#include "msg/Messenger.h"
#include "common/Throttle.h"
#include "common/Timer.h"
#include "common/TracepointProvider.h"
#include "common/ceph_argparse.h"
#include "common/numa.h"
#include "global/global_init.h"
#include "global/signal_handler.h"
#include "include/color.h"
#include "common/errno.h"
#include "common/pick_address.h"
#include "perfglue/heap_profiler.h"
#include "include/ceph_assert.h"
#include "common/Preforker.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
using std::cerr;
using std::cout;
using std::map;
using std::ostringstream;
using std::string;
using std::vector;
using ceph::bufferlist;
namespace {
TracepointProvider::Traits osd_tracepoint_traits("libosd_tp.so",
"osd_tracing");
TracepointProvider::Traits os_tracepoint_traits("libos_tp.so",
"osd_objectstore_tracing");
TracepointProvider::Traits bluestore_tracepoint_traits("libbluestore_tp.so",
"bluestore_tracing");
#ifdef WITH_OSD_INSTRUMENT_FUNCTIONS
TracepointProvider::Traits cyg_profile_traits("libcyg_profile_tp.so",
"osd_function_tracing");
#endif
} // anonymous namespace
OSD *osdptr = nullptr;
void handle_osd_signal(int signum)
{
if (osdptr)
osdptr->handle_signal(signum);
}
static void usage()
{
cout << "usage: ceph-osd -i <ID> [flags]\n"
<< " --osd-data PATH data directory\n"
<< " --osd-journal PATH\n"
<< " journal file or block device\n"
<< " --mkfs create a [new] data directory\n"
<< " --mkkey generate a new secret key. This is normally used in combination with --mkfs\n"
<< " --monmap specify the path to the monitor map. This is normally used in combination with --mkfs\n"
<< " --osd-uuid specify the OSD's fsid. This is normally used in combination with --mkfs\n"
<< " --keyring specify a path to the osd keyring. This is normally used in combination with --mkfs\n"
<< " --convert-filestore\n"
<< " run any pending upgrade operations\n"
<< " --flush-journal flush all data out of journal\n"
<< " --osdspec-affinity\n"
<< " set affinity to an osdspec\n"
<< " --dump-journal dump all data of journal\n"
<< " --mkjournal initialize a new journal\n"
<< " --check-wants-journal\n"
<< " check whether a journal is desired\n"
<< " --check-allows-journal\n"
<< " check whether a journal is allowed\n"
<< " --check-needs-journal\n"
<< " check whether a journal is required\n"
<< " --debug_osd <N> set debug level (e.g. 10)\n"
<< " --get-device-fsid PATH\n"
<< " get OSD fsid for the given block device\n"
<< std::endl;
generic_server_usage();
}
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
auto cct = global_init(
nullptr,
args, CEPH_ENTITY_TYPE_OSD,
CODE_ENVIRONMENT_DAEMON, 0);
ceph_heap_profiler_init();
Preforker forker;
// osd specific args
bool mkfs = false;
bool mkjournal = false;
bool check_wants_journal = false;
bool check_allows_journal = false;
bool check_needs_journal = false;
bool mkkey = false;
bool flushjournal = false;
bool dump_journal = false;
bool convertfilestore = false;
bool get_osd_fsid = false;
bool get_cluster_fsid = false;
bool get_journal_fsid = false;
bool get_device_fsid = false;
string device_path;
std::string dump_pg_log;
std::string osdspec_affinity;
std::string val;
for (std::vector<const char*>::iterator i = args.begin(); i != args.end(); ) {
if (ceph_argparse_double_dash(args, i)) {
break;
} else if (ceph_argparse_flag(args, i, "--mkfs", (char*)NULL)) {
mkfs = true;
} else if (ceph_argparse_witharg(args, i, &val, "--osdspec-affinity", (char*)NULL)) {
osdspec_affinity = val;
} else if (ceph_argparse_flag(args, i, "--mkjournal", (char*)NULL)) {
mkjournal = true;
} else if (ceph_argparse_flag(args, i, "--check-allows-journal", (char*)NULL)) {
check_allows_journal = true;
} else if (ceph_argparse_flag(args, i, "--check-wants-journal", (char*)NULL)) {
check_wants_journal = true;
} else if (ceph_argparse_flag(args, i, "--check-needs-journal", (char*)NULL)) {
check_needs_journal = true;
} else if (ceph_argparse_flag(args, i, "--mkkey", (char*)NULL)) {
mkkey = true;
} else if (ceph_argparse_flag(args, i, "--flush-journal", (char*)NULL)) {
flushjournal = true;
} else if (ceph_argparse_flag(args, i, "--convert-filestore", (char*)NULL)) {
convertfilestore = true;
} else if (ceph_argparse_witharg(args, i, &val, "--dump-pg-log", (char*)NULL)) {
dump_pg_log = val;
} else if (ceph_argparse_flag(args, i, "--dump-journal", (char*)NULL)) {
dump_journal = true;
} else if (ceph_argparse_flag(args, i, "--get-cluster-fsid", (char*)NULL)) {
get_cluster_fsid = true;
} else if (ceph_argparse_flag(args, i, "--get-osd-fsid", "--get-osd-uuid", (char*)NULL)) {
get_osd_fsid = true;
} else if (ceph_argparse_flag(args, i, "--get-journal-fsid", "--get-journal-uuid", (char*)NULL)) {
get_journal_fsid = true;
} else if (ceph_argparse_witharg(args, i, &device_path,
"--get-device-fsid", (char*)NULL)) {
get_device_fsid = true;
} else {
++i;
}
}
if (!args.empty()) {
cerr << "unrecognized arg " << args[0] << std::endl;
exit(1);
}
if (global_init_prefork(g_ceph_context) >= 0) {
std::string err;
int r = forker.prefork(err);
if (r < 0) {
cerr << err << std::endl;
return r;
}
if (forker.is_parent()) {
g_ceph_context->_log->start();
if (forker.parent_wait(err) != 0) {
return -ENXIO;
}
return 0;
}
setsid();
global_init_postfork_start(g_ceph_context);
}
common_init_finish(g_ceph_context);
global_init_chdir(g_ceph_context);
if (get_journal_fsid) {
device_path = g_conf().get_val<std::string>("osd_journal");
get_device_fsid = true;
}
if (get_device_fsid) {
uuid_d uuid;
int r = ObjectStore::probe_block_device_fsid(g_ceph_context, device_path,
&uuid);
if (r < 0) {
cerr << "failed to get device fsid for " << device_path
<< ": " << cpp_strerror(r) << std::endl;
forker.exit(1);
}
cout << uuid << std::endl;
forker.exit(0);
}
if (!dump_pg_log.empty()) {
common_init_finish(g_ceph_context);
bufferlist bl;
std::string error;
if (bl.read_file(dump_pg_log.c_str(), &error) >= 0) {
pg_log_entry_t e;
auto p = bl.cbegin();
while (!p.end()) {
uint64_t pos = p.get_off();
try {
decode(e, p);
}
catch (const ceph::buffer::error &e) {
derr << "failed to decode LogEntry at offset " << pos << dendl;
forker.exit(1);
}
derr << pos << ":\t" << e << dendl;
}
} else {
derr << "unable to open " << dump_pg_log << ": " << error << dendl;
}
forker.exit(0);
}
// whoami
char *end;
const char *id = g_conf()->name.get_id().c_str();
int whoami = strtol(id, &end, 10);
std::string data_path = g_conf().get_val<std::string>("osd_data");
if (*end || end == id || whoami < 0) {
derr << "must specify '-i #' where # is the osd number" << dendl;
forker.exit(1);
}
if (data_path.empty()) {
derr << "must specify '--osd-data=foo' data path" << dendl;
forker.exit(1);
}
// the store
std::string store_type;
{
char fn[PATH_MAX];
snprintf(fn, sizeof(fn), "%s/type", data_path.c_str());
int fd = ::open(fn, O_RDONLY|O_CLOEXEC);
if (fd >= 0) {
bufferlist bl;
bl.read_fd(fd, 64);
if (bl.length()) {
store_type = string(bl.c_str(), bl.length() - 1); // drop \n
dout(5) << "object store type is " << store_type << dendl;
}
::close(fd);
} else if (mkfs) {
store_type = g_conf().get_val<std::string>("osd_objectstore");
} else {
// hrm, infer the type
snprintf(fn, sizeof(fn), "%s/current", data_path.c_str());
struct stat st;
if (::stat(fn, &st) == 0 &&
S_ISDIR(st.st_mode)) {
derr << "missing 'type' file, inferring filestore from current/ dir"
<< dendl;
store_type = "filestore";
} else {
snprintf(fn, sizeof(fn), "%s/block", data_path.c_str());
if (::stat(fn, &st) == 0 &&
S_ISLNK(st.st_mode)) {
derr << "missing 'type' file, inferring bluestore from block symlink"
<< dendl;
store_type = "bluestore";
} else {
derr << "missing 'type' file and unable to infer osd type" << dendl;
forker.exit(1);
}
}
}
}
std::string journal_path = g_conf().get_val<std::string>("osd_journal");
uint32_t flags = g_conf().get_val<uint64_t>("osd_os_flags");
std::unique_ptr<ObjectStore> store = ObjectStore::create(g_ceph_context,
store_type,
data_path,
journal_path,
flags);
if (!store) {
derr << "unable to create object store" << dendl;
forker.exit(-ENODEV);
}
if (mkkey) {
common_init_finish(g_ceph_context);
KeyRing keyring;
EntityName ename{g_conf()->name};
EntityAuth eauth;
std::string keyring_path = g_conf().get_val<std::string>("keyring");
int ret = keyring.load(g_ceph_context, keyring_path);
if (ret == 0 &&
keyring.get_auth(ename, eauth)) {
derr << "already have key in keyring " << keyring_path << dendl;
} else {
eauth.key.create(g_ceph_context, CEPH_CRYPTO_AES);
keyring.add(ename, eauth);
bufferlist bl;
keyring.encode_plaintext(bl);
int r = bl.write_file(keyring_path.c_str(), 0600);
if (r)
derr << TEXT_RED << " ** ERROR: writing new keyring to "
<< keyring_path << ": " << cpp_strerror(r) << TEXT_NORMAL
<< dendl;
else
derr << "created new key in keyring " << keyring_path << dendl;
}
}
if (mkfs) {
common_init_finish(g_ceph_context);
if (g_conf().get_val<uuid_d>("fsid").is_zero()) {
derr << "must specify cluster fsid" << dendl;
forker.exit(-EINVAL);
}
int err = OSD::mkfs(g_ceph_context, std::move(store), g_conf().get_val<uuid_d>("fsid"),
whoami, osdspec_affinity);
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error creating empty object store in "
<< data_path << ": " << cpp_strerror(-err) << TEXT_NORMAL << dendl;
forker.exit(1);
}
dout(0) << "created object store " << data_path
<< " for osd." << whoami
<< " fsid " << g_conf().get_val<uuid_d>("fsid")
<< dendl;
}
if (mkfs || mkkey) {
forker.exit(0);
}
if (mkjournal) {
common_init_finish(g_ceph_context);
int err = store->mkjournal();
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error creating fresh journal "
<< journal_path << " for object store " << data_path << ": "
<< cpp_strerror(-err) << TEXT_NORMAL << dendl;
forker.exit(1);
}
derr << "created new journal " << journal_path
<< " for object store " << data_path << dendl;
forker.exit(0);
}
if (check_wants_journal) {
if (store->wants_journal()) {
cout << "wants journal: yes" << std::endl;
forker.exit(0);
} else {
cout << "wants journal: no" << std::endl;
forker.exit(1);
}
}
if (check_allows_journal) {
if (store->allows_journal()) {
cout << "allows journal: yes" << std::endl;
forker.exit(0);
} else {
cout << "allows journal: no" << std::endl;
forker.exit(1);
}
}
if (check_needs_journal) {
if (store->needs_journal()) {
cout << "needs journal: yes" << std::endl;
forker.exit(0);
} else {
cout << "needs journal: no" << std::endl;
forker.exit(1);
}
}
if (flushjournal) {
common_init_finish(g_ceph_context);
int err = store->mount();
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error flushing journal " << journal_path
<< " for object store " << data_path
<< ": " << cpp_strerror(-err) << TEXT_NORMAL << dendl;
goto flushjournal_out;
}
store->umount();
derr << "flushed journal " << journal_path
<< " for object store " << data_path
<< dendl;
flushjournal_out:
store.reset();
forker.exit(err < 0 ? 1 : 0);
}
if (dump_journal) {
common_init_finish(g_ceph_context);
int err = store->dump_journal(cout);
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error dumping journal " << journal_path
<< " for object store " << data_path
<< ": " << cpp_strerror(-err) << TEXT_NORMAL << dendl;
forker.exit(1);
}
derr << "dumped journal " << journal_path
<< " for object store " << data_path
<< dendl;
forker.exit(0);
}
if (convertfilestore) {
int err = store->mount();
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error mounting store " << data_path
<< ": " << cpp_strerror(-err) << TEXT_NORMAL << dendl;
forker.exit(1);
}
err = store->upgrade();
store->umount();
if (err < 0) {
derr << TEXT_RED << " ** ERROR: error converting store " << data_path
<< ": " << cpp_strerror(-err) << TEXT_NORMAL << dendl;
forker.exit(1);
}
forker.exit(0);
}
{
int r = extblkdev::preload(g_ceph_context);
if (r < 0) {
derr << "Failed preloading extblkdev plugins, error code: " << r << dendl;
forker.exit(1);
}
}
string magic;
uuid_d cluster_fsid, osd_fsid;
ceph_release_t require_osd_release = ceph_release_t::unknown;
int w;
int r = OSD::peek_meta(store.get(), &magic, &cluster_fsid, &osd_fsid, &w,
&require_osd_release);
if (r < 0) {
derr << TEXT_RED << " ** ERROR: unable to open OSD superblock on "
<< data_path << ": " << cpp_strerror(-r)
<< TEXT_NORMAL << dendl;
if (r == -ENOTSUP) {
derr << TEXT_RED << " ** please verify that underlying storage "
<< "supports xattrs" << TEXT_NORMAL << dendl;
}
forker.exit(1);
}
if (w != whoami) {
derr << "OSD id " << w << " != my id " << whoami << dendl;
forker.exit(1);
}
if (strcmp(magic.c_str(), CEPH_OSD_ONDISK_MAGIC)) {
derr << "OSD magic " << magic << " != my " << CEPH_OSD_ONDISK_MAGIC
<< dendl;
forker.exit(1);
}
if (get_cluster_fsid) {
cout << cluster_fsid << std::endl;
forker.exit(0);
}
if (get_osd_fsid) {
cout << osd_fsid << std::endl;
forker.exit(0);
}
{
ostringstream err;
if (!can_upgrade_from(require_osd_release, "require_osd_release", err)) {
derr << err.str() << dendl;
forker.exit(1);
}
}
// consider objectstore numa node
int os_numa_node = -1;
r = store->get_numa_node(&os_numa_node, nullptr, nullptr);
if (r >= 0 && os_numa_node >= 0) {
dout(1) << " objectstore numa_node " << os_numa_node << dendl;
}
int iface_preferred_numa_node = -1;
if (g_conf().get_val<bool>("osd_numa_prefer_iface")) {
iface_preferred_numa_node = os_numa_node;
}
// messengers
std::string msg_type = g_conf().get_val<std::string>("ms_type");
std::string public_msg_type =
g_conf().get_val<std::string>("ms_public_type");
std::string cluster_msg_type =
g_conf().get_val<std::string>("ms_cluster_type");
public_msg_type = public_msg_type.empty() ? msg_type : public_msg_type;
cluster_msg_type = cluster_msg_type.empty() ? msg_type : cluster_msg_type;
uint64_t nonce = Messenger::get_random_nonce();
Messenger *ms_public = Messenger::create(g_ceph_context, public_msg_type,
entity_name_t::OSD(whoami), "client", nonce);
Messenger *ms_cluster = Messenger::create(g_ceph_context, cluster_msg_type,
entity_name_t::OSD(whoami), "cluster", nonce);
Messenger *ms_hb_back_client = Messenger::create(g_ceph_context, cluster_msg_type,
entity_name_t::OSD(whoami), "hb_back_client", nonce);
Messenger *ms_hb_front_client = Messenger::create(g_ceph_context, public_msg_type,
entity_name_t::OSD(whoami), "hb_front_client", nonce);
Messenger *ms_hb_back_server = Messenger::create(g_ceph_context, cluster_msg_type,
entity_name_t::OSD(whoami), "hb_back_server", nonce);
Messenger *ms_hb_front_server = Messenger::create(g_ceph_context, public_msg_type,
entity_name_t::OSD(whoami), "hb_front_server", nonce);
Messenger *ms_objecter = Messenger::create(g_ceph_context, public_msg_type,
entity_name_t::OSD(whoami), "ms_objecter", nonce);
if (!ms_public || !ms_cluster || !ms_hb_front_client || !ms_hb_back_client || !ms_hb_back_server || !ms_hb_front_server || !ms_objecter)
forker.exit(1);
ms_cluster->set_cluster_protocol(CEPH_OSD_PROTOCOL);
ms_hb_front_client->set_cluster_protocol(CEPH_OSD_PROTOCOL);
ms_hb_back_client->set_cluster_protocol(CEPH_OSD_PROTOCOL);
ms_hb_back_server->set_cluster_protocol(CEPH_OSD_PROTOCOL);
ms_hb_front_server->set_cluster_protocol(CEPH_OSD_PROTOCOL);
dout(0) << "starting osd." << whoami
<< " osd_data " << data_path
<< " " << ((journal_path.empty()) ?
"(no journal)" : journal_path)
<< dendl;
uint64_t message_size =
g_conf().get_val<Option::size_t>("osd_client_message_size_cap");
boost::scoped_ptr<Throttle> client_byte_throttler(
new Throttle(g_ceph_context, "osd_client_bytes", message_size));
uint64_t message_cap = g_conf().get_val<uint64_t>("osd_client_message_cap");
boost::scoped_ptr<Throttle> client_msg_throttler(
new Throttle(g_ceph_context, "osd_client_messages", message_cap));
// All feature bits 0 - 34 should be present from dumpling v0.67 forward
uint64_t osd_required =
CEPH_FEATURE_UID |
CEPH_FEATURE_PGID64 |
CEPH_FEATURE_OSDENC;
ms_public->set_default_policy(Messenger::Policy::stateless_registered_server(0));
ms_public->set_policy_throttlers(entity_name_t::TYPE_CLIENT,
client_byte_throttler.get(),
client_msg_throttler.get());
ms_public->set_policy(entity_name_t::TYPE_MON,
Messenger::Policy::lossy_client(osd_required));
ms_public->set_policy(entity_name_t::TYPE_MGR,
Messenger::Policy::lossy_client(osd_required));
ms_cluster->set_default_policy(Messenger::Policy::stateless_server(0));
ms_cluster->set_policy(entity_name_t::TYPE_MON, Messenger::Policy::lossy_client(0));
ms_cluster->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::lossless_peer(osd_required));
ms_cluster->set_policy(entity_name_t::TYPE_CLIENT,
Messenger::Policy::stateless_server(0));
ms_hb_front_client->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::lossy_client(0));
ms_hb_back_client->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::lossy_client(0));
ms_hb_back_server->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::stateless_server(0));
ms_hb_front_server->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::stateless_server(0));
ms_objecter->set_default_policy(Messenger::Policy::lossy_client(CEPH_FEATURE_OSDREPLYMUX));
entity_addrvec_t public_addrs, public_bind_addrs, cluster_addrs;
r = pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC, &public_addrs,
iface_preferred_numa_node);
if (r < 0) {
derr << "Failed to pick public address." << dendl;
forker.exit(1);
} else {
dout(10) << "picked public_addrs " << public_addrs << dendl;
}
r = pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC_BIND,
&public_bind_addrs, iface_preferred_numa_node);
if (r == -ENOENT) {
dout(10) << "there is no public_bind_addrs, defaulting to public_addrs"
<< dendl;
public_bind_addrs = public_addrs;
} else if (r < 0) {
derr << "Failed to pick public bind address." << dendl;
forker.exit(1);
} else {
dout(10) << "picked public_bind_addrs " << public_bind_addrs << dendl;
}
r = pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_CLUSTER, &cluster_addrs,
iface_preferred_numa_node);
if (r < 0) {
derr << "Failed to pick cluster address." << dendl;
forker.exit(1);
}
if (ms_public->bindv(public_bind_addrs, public_addrs) < 0) {
derr << "Failed to bind to " << public_bind_addrs << dendl;
forker.exit(1);
}
if (ms_cluster->bindv(cluster_addrs) < 0)
forker.exit(1);
bool is_delay = g_conf().get_val<bool>("osd_heartbeat_use_min_delay_socket");
if (is_delay) {
ms_hb_front_client->set_socket_priority(SOCKET_PRIORITY_MIN_DELAY);
ms_hb_back_client->set_socket_priority(SOCKET_PRIORITY_MIN_DELAY);
ms_hb_back_server->set_socket_priority(SOCKET_PRIORITY_MIN_DELAY);
ms_hb_front_server->set_socket_priority(SOCKET_PRIORITY_MIN_DELAY);
}
entity_addrvec_t hb_front_addrs = public_bind_addrs;
for (auto& a : hb_front_addrs.v) {
a.set_port(0);
}
if (ms_hb_front_server->bindv(hb_front_addrs) < 0)
forker.exit(1);
if (ms_hb_front_client->client_bind(hb_front_addrs.front()) < 0)
forker.exit(1);
entity_addrvec_t hb_back_addrs = cluster_addrs;
for (auto& a : hb_back_addrs.v) {
a.set_port(0);
}
if (ms_hb_back_server->bindv(hb_back_addrs) < 0)
forker.exit(1);
if (ms_hb_back_client->client_bind(hb_back_addrs.front()) < 0)
forker.exit(1);
// install signal handlers
init_async_signal_handler();
register_async_signal_handler(SIGHUP, sighup_handler);
TracepointProvider::initialize<osd_tracepoint_traits>(g_ceph_context);
TracepointProvider::initialize<os_tracepoint_traits>(g_ceph_context);
TracepointProvider::initialize<bluestore_tracepoint_traits>(g_ceph_context);
#ifdef WITH_OSD_INSTRUMENT_FUNCTIONS
TracepointProvider::initialize<cyg_profile_traits>(g_ceph_context);
#endif
srand(time(NULL) + getpid());
ceph::async::io_context_pool poolctx(
cct->_conf.get_val<std::uint64_t>("osd_asio_thread_count"));
MonClient mc(g_ceph_context, poolctx);
if (mc.build_initial_monmap() < 0)
return -1;
global_init_chdir(g_ceph_context);
if (global_init_preload_erasure_code(g_ceph_context) < 0) {
forker.exit(1);
}
osdptr = new OSD(g_ceph_context,
std::move(store),
whoami,
ms_cluster,
ms_public,
ms_hb_front_client,
ms_hb_back_client,
ms_hb_front_server,
ms_hb_back_server,
ms_objecter,
&mc,
data_path,
journal_path,
poolctx);
int err = osdptr->pre_init();
if (err < 0) {
derr << TEXT_RED << " ** ERROR: osd pre_init failed: " << cpp_strerror(-err)
<< TEXT_NORMAL << dendl;
forker.exit(1);
}
ms_public->start();
ms_hb_front_client->start();
ms_hb_back_client->start();
ms_hb_front_server->start();
ms_hb_back_server->start();
ms_cluster->start();
ms_objecter->start();
// start osd
err = osdptr->init();
if (err < 0) {
derr << TEXT_RED << " ** ERROR: osd init failed: " << cpp_strerror(-err)
<< TEXT_NORMAL << dendl;
forker.exit(1);
}
// -- daemonize --
if (g_conf()->daemonize) {
global_init_postfork_finish(g_ceph_context);
forker.daemonize();
}
register_async_signal_handler_oneshot(SIGINT, handle_osd_signal);
register_async_signal_handler_oneshot(SIGTERM, handle_osd_signal);
osdptr->final_init();
if (g_conf().get_val<bool>("inject_early_sigterm"))
kill(getpid(), SIGTERM);
ms_public->wait();
ms_hb_front_client->wait();
ms_hb_back_client->wait();
ms_hb_front_server->wait();
ms_hb_back_server->wait();
ms_cluster->wait();
ms_objecter->wait();
unregister_async_signal_handler(SIGHUP, sighup_handler);
unregister_async_signal_handler(SIGINT, handle_osd_signal);
unregister_async_signal_handler(SIGTERM, handle_osd_signal);
shutdown_async_signal_handler();
// done
poolctx.stop();
delete osdptr;
delete ms_public;
delete ms_hb_front_client;
delete ms_hb_back_client;
delete ms_hb_front_server;
delete ms_hb_back_server;
delete ms_cluster;
delete ms_objecter;
client_byte_throttler.reset();
client_msg_throttler.reset();
// cd on exit, so that gmon.out (if any) goes into a separate directory for each node.
char s[20];
snprintf(s, sizeof(s), "gmon/%d", getpid());
if ((mkdir(s, 0755) == 0) && (chdir(s) == 0)) {
dout(0) << "ceph-osd: gmon.out should be in " << s << dendl;
}
return 0;
}
| 25,438 | 31.242079 | 138 |
cc
|
null |
ceph-main/src/ceph_syn.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/stat.h>
#include <iostream>
#include <string>
#include "common/config.h"
#include "common/async/context_pool.h"
#include "client/SyntheticClient.h"
#include "client/Client.h"
#include "msg/Messenger.h"
#include "mon/MonClient.h"
#include "common/Timer.h"
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include "common/pick_address.h"
#include <sys/types.h>
#include <fcntl.h>
using namespace std;
extern int syn_filer_flags;
int main(int argc, const char **argv, char *envp[])
{
//cerr << "ceph-syn starting" << std::endl;
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY, 0);
common_init_finish(g_ceph_context);
parse_syn_options(args); // for SyntheticClient
pick_addresses(g_ceph_context, CEPH_PICK_ADDRESS_PUBLIC);
// get monmap
ceph::async::io_context_pool poolctx(1);
MonClient mc(g_ceph_context, poolctx);
if (mc.build_initial_monmap() < 0)
return -1;
list<Client*> clients;
list<SyntheticClient*> synclients;
vector<Messenger*> messengers{static_cast<unsigned>(num_client), nullptr};
vector<MonClient*> mclients{static_cast<unsigned>(num_client), nullptr};
cout << "ceph-syn: starting " << num_client << " syn client(s)" << std::endl;
for (int i=0; i<num_client; i++) {
messengers[i] = Messenger::create_client_messenger(g_ceph_context,
"synclient");
mclients[i] = new MonClient(g_ceph_context, poolctx);
mclients[i]->build_initial_monmap();
auto client = new StandaloneClient(messengers[i], mclients[i], poolctx);
client->set_filer_flags(syn_filer_flags);
SyntheticClient *syn = new SyntheticClient(client);
clients.push_back(client);
synclients.push_back(syn);
messengers[i]->start();
}
for (list<SyntheticClient*>::iterator p = synclients.begin();
p != synclients.end();
++p)
(*p)->start_thread();
poolctx.stop();
//cout << "waiting for client(s) to finish" << std::endl;
while (!clients.empty()) {
Client *client = clients.front();
SyntheticClient *syn = synclients.front();
clients.pop_front();
synclients.pop_front();
syn->join_thread();
delete syn;
delete client;
}
for (int i = 0; i < num_client; ++i) {
// wait for messenger to finish
delete mclients[i];
messengers[i]->shutdown();
messengers[i]->wait();
delete messengers[i];
}
return 0;
}
| 2,891 | 26.283019 | 79 |
cc
|
null |
ceph-main/src/ceph_ver.c
|
#include "ceph_ver.h"
#define CONCAT_VER_SYMBOL(x) ceph_ver__##x
#define DEFINE_VER_SYMBOL(x) int CONCAT_VER_SYMBOL(x)
DEFINE_VER_SYMBOL(CEPH_GIT_VER);
| 159 | 12.333333 | 53 |
c
|
null |
ceph-main/src/ckill.sh
|
#!/bin/bash -e
if [ -e CMakeCache.txt ]; then
[ -z "$CEPH_BIN" ] && CEPH_BIN=bin
fi
if [ -z "$CEPHADM" ]; then
CEPHADM="${CEPH_BIN}/cephadm"
fi
# fsid
if [ -e fsid ] ; then
fsid=`cat fsid`
else
echo 'no fsid file, so no cluster?'
exit 0
fi
echo "fsid $fsid"
sudo $CEPHADM rm-cluster --force --fsid $fsid
| 329 | 14 | 45 |
sh
|
null |
ceph-main/src/cls_acl.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <openssl/md5.h>
#include <openssl/sha.h>
#include "include/types.h"
#include "objclass/objclass.h"
CLS_VER(1,0)
CLS_NAME(acl)
int get_method(cls_method_context_t ctx, char *indata, int datalen,
char **outdata, int *outdatalen)
{
MD5_CTX c;
cls_log("acl test method");
cls_log("indata=%.*s data_len=%d", datalen, indata, datalen);
cls_getxattr(ctx, "acls", outdata, outdatalen);
return 0;
}
int set_method(cls_method_context_t ctx, char *indata, int datalen,
char **outdata, int *outdatalen)
{
MD5_CTX c;
cls_log("acl test method");
cls_log("indata=%.*s data_len=%d", datalen, indata, datalen);
cls_setxattr(ctx, "acls", indata, datalen);
return 0;
}
CLS_INIT(acl)
{
cls_log("Loaded acl class!");
cls_handle_t h_class;
cls_method_handle_t h_get;
cls_method_handle_t h_set;
cls_register("acl", &h_class);
cls_register_method(h_class, "get", CLS_METHOD_RD, get_method, &h_get);
cls_register_method(h_class, "set", CLS_METHOD_WR, set_method, &h_set);
return;
}
| 1,223 | 20.103448 | 74 |
cc
|
null |
ceph-main/src/cls_crypto.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <openssl/md5.h>
#include <openssl/sha.h>
#include "include/types.h"
#include "objclass/objclass.h"
CLS_VER(1,0)
CLS_NAME(crypto)
int md5_method(cls_method_context_t ctx, char *indata, int datalen,
char **outdata, int *outdatalen)
{
MD5_CTX c;
unsigned char *md;
cls_log("md5 method");
cls_log("indata=%.*s data_len=%d", datalen, indata, datalen);
md = (unsigned char *)cls_alloc(MD5_DIGEST_LENGTH);
if (!md)
return -ENOMEM;
MD5_Init(&c);
MD5_Update(&c, indata, (unsigned long)datalen);
MD5_Final(md,&c);
*outdata = (char *)md;
*outdatalen = MD5_DIGEST_LENGTH;
return 0;
}
int sha1_method(cls_method_context_t ctx, char *indata, int datalen,
char **outdata, int *outdatalen)
{
SHA_CTX c;
unsigned char *md;
cls_log("sha1 method");
cls_log("indata=%.*s data_len=%d", datalen, indata, datalen);
md = (unsigned char *)cls_alloc(SHA_DIGEST_LENGTH);
if (!md)
return -ENOMEM;
SHA1_Init(&c);
SHA1_Update(&c, indata, (unsigned long)datalen);
SHA1_Final(md,&c);
*outdata = (char *)md;
*outdatalen = SHA_DIGEST_LENGTH;
return 0;
}
CLS_INIT(crypto)
{
cls_log("Loaded crypto class!");
cls_handle_t h_class;
cls_method_handle_t h_md5;
cls_method_handle_t h_sha1;
cls_register("crypto", &h_class);
cls_register_method(h_class, "md5", CLS_METHOD_RD, md5_method, &h_md5);
cls_register_method(h_class, "sha1", CLS_METHOD_RD, sha1_method, &h_sha1);
return;
}
| 1,662 | 20.320513 | 77 |
cc
|
null |
ceph-main/src/cstart.sh
|
#!/bin/bash -e
if [ -e CMakeCache.txt ]; then
[ -z "$CEPH_BIN" ] && CEPH_BIN=bin
fi
if [ -z "$CEPHADM" ]; then
CEPHADM="${CEPH_BIN}/cephadm"
fi
image_base="quay.io/ceph-ci/ceph"
if which podman 2>&1 > /dev/null; then
runtime="podman"
else
runtime="docker"
fi
# fsid
if [ -e fsid ] ; then
fsid=`cat fsid`
else
fsid=`uuidgen`
echo $fsid > fsid
fi
echo "fsid $fsid"
shortid=`echo $fsid | cut -c 1-8`
echo $shortid > shortid
echo "shortid $shortid"
# ip
if [ -z "$ip" ]; then
if [ -x "$(which ip 2>/dev/null)" ]; then
IP_CMD="ip addr"
else
IP_CMD="ifconfig"
fi
# filter out IPv4 and localhost addresses
ip="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
# if nothing left, try using localhost address, it might work
if [ -z "$ip" ]; then ip="127.0.0.1"; fi
fi
echo "ip $ip"
# port
if [ -e port ] ; then
port=`cat port`
else
while [ true ]
do
port="$(echo $(( RANDOM % 1000 + 40000 )))"
ss -a -n | grep LISTEN | grep "${ip}:${port} " 2>&1 >/dev/null || break
done
echo $port > port
fi
echo "mon port $port"
# make sure we have an image
if ! sudo $runtime image inspect $image_base:$shortid 1>/dev/null 2>/dev/null; then
echo "building initial $image_base:$shortid image..."
sudo ../src/script/cpatch -t $image_base:$shortid
fi
sudo $CEPHADM rm-cluster --force --fsid $fsid
sudo $CEPHADM --image ${image_base}:${shortid} bootstrap \
--skip-pull \
--fsid $fsid \
--mon-addrv "[v2:$ip:$port]" \
--output-dir . \
--allow-overwrite \
$@
# kludge to make 'bin/ceph ...' work
sudo chmod 755 ceph.client.admin.keyring
echo 'keyring = ceph.client.admin.keyring' >> ceph.conf
# don't use repo digests; this implicitly does a pull and we don't want that
${CEPH_BIN}/ceph config set mgr mgr/cephadm/use_repo_digest false
echo
echo "sudo ../src/script/cpatch -t $image_base:$shortid"
echo
| 1,965 | 22.129412 | 102 |
sh
|
null |
ceph-main/src/krbd.cc
|
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include <fcntl.h>
#include <iostream>
#include <memory>
#include <optional>
#include <poll.h>
#include <regex>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <tuple>
#include <unistd.h>
#include <utility>
#include "auth/KeyRing.h"
#include "common/errno.h"
#include "common/Formatter.h"
#include "common/module.h"
#include "common/run_cmd.h"
#include "common/safe_io.h"
#include "common/secret.h"
#include "common/TextTable.h"
#include "common/Thread.h"
#include "include/ceph_assert.h"
#include "include/stringify.h"
#include "include/krbd.h"
#include "mon/MonMap.h"
#include <blkid/blkid.h>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/tokenizer.hpp>
#include <libudev.h>
static const int UDEV_BUF_SIZE = 1 << 20; /* doubled to 2M (SO_RCVBUFFORCE) */
static const char DEVNODE_PREFIX[] = "/dev/rbd";
static const char SNAP_HEAD_NAME[] = "-";
#define DEFINE_UDEV_UPTR(what) \
struct udev_##what##_deleter { \
void operator()(udev_##what *p) { \
udev_##what##_unref(p); \
} \
}; \
using udev_##what##_uptr = \
std::unique_ptr<udev_##what, udev_##what##_deleter>;
DEFINE_UDEV_UPTR(monitor) /* udev_monitor_uptr */
DEFINE_UDEV_UPTR(enumerate) /* udev_enumerate_uptr */
DEFINE_UDEV_UPTR(device) /* udev_device_uptr */
using std::string;
struct krbd_ctx {
CephContext *cct;
struct udev *udev;
uint32_t flags; /* KRBD_CTX_F_* */
};
struct krbd_spec {
std::string pool_name;
std::string nspace_name;
std::string image_name;
std::string snap_name;
krbd_spec(const char *pool_name, const char *nspace_name,
const char *image_name, const char *snap_name)
: pool_name(pool_name),
nspace_name(nspace_name),
image_name(image_name),
snap_name(*snap_name ? snap_name : SNAP_HEAD_NAME) { }
bool operator==(const krbd_spec& rhs) const {
return pool_name == rhs.pool_name &&
nspace_name == rhs.nspace_name &&
image_name == rhs.image_name &&
snap_name == rhs.snap_name;
}
};
static std::ostream& operator<<(std::ostream& os, const krbd_spec& spec)
{
os << spec.pool_name << "/";
if (!spec.nspace_name.empty())
os << spec.nspace_name << "/";
os << spec.image_name;
if (spec.snap_name != SNAP_HEAD_NAME)
os << "@" << spec.snap_name;
return os;
}
static std::optional<krbd_spec> spec_from_dev(udev_device *dev)
{
const char *pool_name = udev_device_get_sysattr_value(dev, "pool");
const char *nspace_name = udev_device_get_sysattr_value(dev, "pool_ns");
const char *image_name = udev_device_get_sysattr_value(dev, "name");
const char *snap_name = udev_device_get_sysattr_value(dev, "current_snap");
if (!pool_name || !image_name || !snap_name)
return std::nullopt;
return std::make_optional<krbd_spec>(
pool_name, nspace_name ?: "", image_name, snap_name);
}
static udev_device_uptr dev_from_list_entry(udev *udev, udev_list_entry *l)
{
return udev_device_uptr(
udev_device_new_from_syspath(udev, udev_list_entry_get_name(l)));
}
static std::string get_devnode(udev_device *dev)
{
std::string devnode = DEVNODE_PREFIX;
devnode += udev_device_get_sysname(dev);
return devnode;
}
static int sysfs_write_rbd(const char *which, const string& buf)
{
const string s = string("/sys/bus/rbd/") + which;
const string t = s + "_single_major";
int fd;
int r;
/*
* 'add' and 'add_single_major' interfaces are identical, but if rbd
* kernel module is new enough and is configured to use single-major
* scheme, 'add' is disabled in order to prevent old userspace from
* doing weird things at unmap time.
*
* Same goes for 'remove' vs 'remove_single_major'.
*/
fd = open(t.c_str(), O_WRONLY);
if (fd < 0) {
if (errno == ENOENT) {
fd = open(s.c_str(), O_WRONLY);
if (fd < 0)
return -errno;
} else {
return -errno;
}
}
r = safe_write(fd, buf.c_str(), buf.size());
close(fd);
return r;
}
static int sysfs_write_rbd_add(const string& buf)
{
return sysfs_write_rbd("add", buf);
}
static int sysfs_write_rbd_remove(const string& buf)
{
return sysfs_write_rbd("remove", buf);
}
static int have_minor_attr(void)
{
/*
* 'minor' attribute was added as part of single_major merge, which
* exposed the 'single_major' parameter. 'minor' is always present,
* regardless of whether single-major scheme is turned on or not.
*
* (Something like ver >= KERNEL_VERSION(3, 14, 0) is a no-go because
* this has to work with rbd.ko backported to various kernels.)
*/
return access("/sys/module/rbd/parameters/single_major", F_OK) == 0;
}
static int build_map_buf(CephContext *cct, const krbd_spec& spec,
const string& options, string *pbuf)
{
bool msgr2 = false;
std::ostringstream oss;
int r;
boost::char_separator<char> sep(",");
boost::tokenizer<boost::char_separator<char>> tok(options, sep);
for (const auto& t : tok) {
if (boost::starts_with(t, "ms_mode=")) {
/* msgr2 unless ms_mode=legacy */
msgr2 = t.compare(8, t.npos, "legacy");
}
}
MonMap monmap;
r = monmap.build_initial(cct, false, std::cerr);
if (r < 0)
return r;
/*
* If msgr2, filter TYPE_MSGR2 addresses. Otherwise, filter
* TYPE_LEGACY addresses.
*/
for (const auto& p : monmap.mon_info) {
for (const auto& a : p.second.public_addrs.v) {
if ((msgr2 && a.is_msgr2()) || (!msgr2 && a.is_legacy())) {
if (oss.tellp() > 0) {
oss << ",";
}
oss << a.get_sockaddr();
}
}
}
if (oss.tellp() == 0) {
std::cerr << "rbd: failed to get mon address (possible ms_mode mismatch)" << std::endl;
return -ENOENT;
}
oss << " name=" << cct->_conf->name.get_id();
KeyRing keyring;
auto auth_client_required =
cct->_conf.get_val<std::string>("auth_client_required");
if (auth_client_required != "none") {
r = keyring.from_ceph_context(cct);
auto keyfile = cct->_conf.get_val<std::string>("keyfile");
auto key = cct->_conf.get_val<std::string>("key");
if (r == -ENOENT && keyfile.empty() && key.empty())
r = 0;
if (r < 0) {
std::cerr << "rbd: failed to get secret" << std::endl;
return r;
}
}
CryptoKey secret;
string key_name = string("client.") + cct->_conf->name.get_id();
if (keyring.get_secret(cct->_conf->name, secret)) {
string secret_str;
secret.encode_base64(secret_str);
r = set_kernel_secret(secret_str.c_str(), key_name.c_str());
if (r >= 0) {
if (r == 0)
std::cerr << "rbd: warning: secret has length 0" << std::endl;
oss << ",key=" << key_name;
} else if (r == -ENODEV || r == -ENOSYS) {
// running against older kernel; fall back to secret= in options
oss << ",secret=" << secret_str;
} else {
std::cerr << "rbd: failed to add secret '" << key_name << "' to kernel"
<< std::endl;
return r;
}
} else if (is_kernel_secret(key_name.c_str())) {
oss << ",key=" << key_name;
}
if (!options.empty())
oss << "," << options;
if (!spec.nspace_name.empty())
oss << ",_pool_ns=" << spec.nspace_name;
oss << " " << spec.pool_name << " " << spec.image_name << " "
<< spec.snap_name;
*pbuf = oss.str();
return 0;
}
/*
* Return:
* <kernel error, false> - didn't map
* <0 or udev error, true> - mapped
*/
template <typename F>
static std::pair<int, bool> wait_for_mapping(int sysfs_r_fd, udev_monitor *mon,
F udev_device_handler)
{
struct pollfd fds[2];
int sysfs_r = INT_MAX, udev_r = INT_MAX;
int r;
fds[0].fd = sysfs_r_fd;
fds[0].events = POLLIN;
fds[1].fd = udev_monitor_get_fd(mon);
fds[1].events = POLLIN;
for (;;) {
if (poll(fds, 2, -1) < 0) {
ceph_abort_msgf("poll failed: %d", -errno);
}
if (fds[0].revents) {
r = safe_read_exact(sysfs_r_fd, &sysfs_r, sizeof(sysfs_r));
if (r < 0) {
ceph_abort_msgf("safe_read_exact failed: %d", r);
}
if (sysfs_r < 0) {
return std::make_pair(sysfs_r, false);
}
if (udev_r != INT_MAX) {
ceph_assert(!sysfs_r);
return std::make_pair(udev_r, true);
}
fds[0].fd = -1;
}
if (fds[1].revents) {
for (;;) {
udev_device_uptr dev(udev_monitor_receive_device(mon));
if (!dev) {
if (errno != EINTR && errno != EAGAIN) {
udev_r = -errno;
if (sysfs_r != INT_MAX) {
ceph_assert(!sysfs_r);
return std::make_pair(udev_r, true);
}
fds[1].fd = -1;
}
break;
}
if (udev_device_handler(std::move(dev))) {
udev_r = 0;
if (sysfs_r != INT_MAX) {
ceph_assert(!sysfs_r);
return std::make_pair(udev_r, true);
}
fds[1].fd = -1;
break;
}
}
}
}
}
class UdevMapHandler {
public:
UdevMapHandler(const krbd_spec *spec, std::string *pdevnode,
std::string *majnum, std::string *minnum) :
m_spec(spec), m_pdevnode(pdevnode), m_majnum(majnum), m_minnum(minnum) {}
/*
* Catch /sys/devices/rbd/<id>/ and wait for the corresponding
* block device to show up. This is necessary because rbd devices
* and block devices aren't linked together in our sysfs layout.
*
* Note that our "block" event can come before the "rbd" event, so
* all potential "block" events are gathered in m_block_devs before
* m_bus_dev is caught.
*/
bool operator()(udev_device_uptr dev) {
if (strcmp(udev_device_get_action(dev.get()), "add")) {
return false;
}
if (!strcmp(udev_device_get_subsystem(dev.get()), "rbd")) {
if (!m_bus_dev) {
auto spec = spec_from_dev(dev.get());
if (spec && *spec == *m_spec) {
m_bus_dev = std::move(dev);
m_devnode = get_devnode(m_bus_dev.get());
}
}
} else if (!strcmp(udev_device_get_subsystem(dev.get()), "block")) {
if (boost::starts_with(udev_device_get_devnode(dev.get()),
DEVNODE_PREFIX)) {
m_block_devs.push_back(std::move(dev));
}
}
if (m_bus_dev && !m_block_devs.empty()) {
for (const auto& p : m_block_devs) {
if (udev_device_get_devnode(p.get()) == m_devnode) {
*m_pdevnode = std::move(m_devnode);
*m_majnum = udev_device_get_property_value(p.get(), "MAJOR");
*m_minnum = udev_device_get_property_value(p.get(), "MINOR");
ceph_assert(*m_majnum == udev_device_get_sysattr_value(
m_bus_dev.get(), "major"));
ceph_assert(!have_minor_attr() ||
*m_minnum == udev_device_get_sysattr_value(
m_bus_dev.get(), "minor"));
return true;
}
}
m_block_devs.clear();
}
return false;
}
private:
udev_device_uptr m_bus_dev;
std::vector<udev_device_uptr> m_block_devs;
std::string m_devnode;
const krbd_spec *m_spec;
std::string *m_pdevnode;
std::string *m_majnum;
std::string *m_minnum;
};
static const char *get_event_source(const krbd_ctx *ctx)
{
if (ctx->flags & KRBD_CTX_F_NOUDEV) {
/*
* For block devices (unlike network interfaces, they don't
* carry any namespace tags), the kernel broadcasts uevents
* into all network namespaces that are owned by the initial
* user namespace. This restriction is new in 4.18: starting
* with 2.6.35 and through 4.17 the kernel broadcast uevents
* into all network namespaces, period.
*
* However, when invoked from a non-initial user namespace,
* udev_monitor_receive_device() has always ignored both kernel
* and udev uevents by virtue of requiring SCM_CREDENTIALS and
* checking that ucred->uid == 0. When UIDs and GIDs are sent to
* a process in a user namespace, they are translated according
* to that process's UID and GID mappings and, unless root in the
* user namespace is mapped to the global root, that check fails.
* Normally they show up as 65534(nobody) because the global root
* is not mapped.
*/
return "kernel";
}
/*
* Like most netlink messages, udev uevents don't cross network
* namespace boundaries and are therefore confined to the initial
* network namespace.
*/
return "udev";
}
static int do_map(krbd_ctx *ctx, const krbd_spec& spec, const string& buf,
string *pname)
{
std::string majnum, minnum;
struct stat sb;
bool mapped;
int fds[2];
int r;
udev_monitor_uptr mon(udev_monitor_new_from_netlink(ctx->udev,
get_event_source(ctx)));
if (!mon)
return -ENOMEM;
r = udev_monitor_filter_add_match_subsystem_devtype(mon.get(), "rbd",
nullptr);
if (r < 0)
return r;
r = udev_monitor_filter_add_match_subsystem_devtype(mon.get(), "block",
"disk");
if (r < 0)
return r;
r = udev_monitor_set_receive_buffer_size(mon.get(), UDEV_BUF_SIZE);
if (r < 0) {
std::cerr << "rbd: failed to set udev buffer size: " << cpp_strerror(r)
<< std::endl;
/* not fatal */
}
r = udev_monitor_enable_receiving(mon.get());
if (r < 0)
return r;
if (pipe2(fds, O_NONBLOCK) < 0)
return -errno;
auto mapper = make_named_thread("mapper", [&buf, sysfs_r_fd = fds[1]]() {
int sysfs_r = sysfs_write_rbd_add(buf);
int r = safe_write(sysfs_r_fd, &sysfs_r, sizeof(sysfs_r));
if (r < 0) {
ceph_abort_msgf("safe_write failed: %d", r);
}
});
std::tie(r, mapped) = wait_for_mapping(fds[0], mon.get(),
UdevMapHandler(&spec, pname, &majnum,
&minnum));
if (r < 0) {
if (!mapped) {
std::cerr << "rbd: sysfs write failed" << std::endl;
} else {
std::cerr << "rbd: udev wait failed" << std::endl;
/* TODO: fall back to enumeration */
}
}
mapper.join();
close(fds[0]);
close(fds[1]);
if (r < 0)
return r;
/*
* Make sure our device node is there. This is intended to help
* diagnose environments where "rbd map" is run from a container with
* a private /dev and some external mechanism (e.g. udev) is used to
* add the device to the container asynchronously, possibly seconds
* after "rbd map" successfully exits. These setups are very fragile
* and in some cases can even lead to data loss, depending on higher
* level logic and orchestration layers involved.
*/
ceph_assert(mapped);
if (stat(pname->c_str(), &sb) < 0 || !S_ISBLK(sb.st_mode)) {
std::cerr << "rbd: mapping succeeded but " << *pname
<< " is not accessible, is host /dev mounted?" << std::endl;
return -EINVAL;
}
if (stringify(major(sb.st_rdev)) != majnum ||
stringify(minor(sb.st_rdev)) != minnum) {
std::cerr << "rbd: mapping succeeded but " << *pname
<< " (" << major(sb.st_rdev) << ":" << minor(sb.st_rdev)
<< ") does not match expected " << majnum << ":" << minnum
<< std::endl;
return -EINVAL;
}
return 0;
}
static int map_image(struct krbd_ctx *ctx, const krbd_spec& spec,
const char *options, string *pname)
{
string buf;
int r;
/*
* Modprobe rbd kernel module. If it supports single-major device
* number allocation scheme, make sure it's turned on.
*
* Do this before calling build_map_buf() - it wants "ceph" key type
* registered.
*/
if (access("/sys/bus/rbd", F_OK) != 0) {
const char *module_options = NULL;
if (module_has_param("rbd", "single_major"))
module_options = "single_major=Y";
r = module_load("rbd", module_options);
if (r) {
std::cerr << "rbd: failed to load rbd kernel module (" << r << ")"
<< std::endl;
/*
* Ignore the error: modprobe failing doesn't necessarily prevent
* from working.
*/
}
}
r = build_map_buf(ctx->cct, spec, options, &buf);
if (r < 0)
return r;
return do_map(ctx, spec, buf, pname);
}
static int devno_to_krbd_id(struct udev *udev, dev_t devno, string *pid)
{
udev_enumerate_uptr enm;
struct udev_list_entry *l;
int r;
retry:
enm.reset(udev_enumerate_new(udev));
if (!enm)
return -ENOMEM;
r = udev_enumerate_add_match_subsystem(enm.get(), "rbd");
if (r < 0)
return r;
r = udev_enumerate_add_match_sysattr(enm.get(), "major",
stringify(major(devno)).c_str());
if (r < 0)
return r;
if (have_minor_attr()) {
r = udev_enumerate_add_match_sysattr(enm.get(), "minor",
stringify(minor(devno)).c_str());
if (r < 0)
return r;
}
r = udev_enumerate_scan_devices(enm.get());
if (r < 0) {
if (r == -ENOENT || r == -ENODEV) {
std::cerr << "rbd: udev enumerate failed, retrying" << std::endl;
goto retry;
}
return r;
}
l = udev_enumerate_get_list_entry(enm.get());
if (!l)
return -ENOENT;
/* make sure there is only one match */
ceph_assert(!udev_list_entry_get_next(l));
auto dev = dev_from_list_entry(udev, l);
if (!dev)
return -ENOMEM;
*pid = udev_device_get_sysname(dev.get());
return 0;
}
// wrap any of * ? [ between square brackets
static std::string escape_glob(const std::string& s)
{
std::regex glob_meta("([*?[])");
return std::regex_replace(s, glob_meta, "[$1]");
}
static int __enumerate_devices(struct udev *udev, const krbd_spec& spec,
bool match_nspace, udev_enumerate_uptr *penm)
{
udev_enumerate_uptr enm;
int r;
retry:
enm.reset(udev_enumerate_new(udev));
if (!enm)
return -ENOMEM;
r = udev_enumerate_add_match_subsystem(enm.get(), "rbd");
if (r < 0)
return r;
r = udev_enumerate_add_match_sysattr(enm.get(), "pool",
escape_glob(spec.pool_name).c_str());
if (r < 0)
return r;
if (match_nspace) {
r = udev_enumerate_add_match_sysattr(enm.get(), "pool_ns",
escape_glob(spec.nspace_name).c_str());
} else {
/*
* Match _only_ devices that don't have pool_ns attribute.
* If the kernel supports namespaces, the result will be empty.
*/
r = udev_enumerate_add_nomatch_sysattr(enm.get(), "pool_ns", nullptr);
}
if (r < 0)
return r;
r = udev_enumerate_add_match_sysattr(enm.get(), "name",
escape_glob(spec.image_name).c_str());
if (r < 0)
return r;
r = udev_enumerate_add_match_sysattr(enm.get(), "current_snap",
escape_glob(spec.snap_name).c_str());
if (r < 0)
return r;
r = udev_enumerate_scan_devices(enm.get());
if (r < 0) {
if (r == -ENOENT || r == -ENODEV) {
std::cerr << "rbd: udev enumerate failed, retrying" << std::endl;
goto retry;
}
return r;
}
*penm = std::move(enm);
return 0;
}
static int enumerate_devices(struct udev *udev, const krbd_spec& spec,
udev_enumerate_uptr *penm)
{
udev_enumerate_uptr enm;
int r;
r = __enumerate_devices(udev, spec, true, &enm);
if (r < 0)
return r;
/*
* If no namespace is set, try again with match_nspace=false to
* handle older kernels. On a newer kernel the result will remain
* the same (i.e. empty).
*/
if (!udev_enumerate_get_list_entry(enm.get()) && spec.nspace_name.empty()) {
r = __enumerate_devices(udev, spec, false, &enm);
if (r < 0)
return r;
}
*penm = std::move(enm);
return 0;
}
static int spec_to_devno_and_krbd_id(struct udev *udev, const krbd_spec& spec,
dev_t *pdevno, string *pid)
{
udev_enumerate_uptr enm;
struct udev_list_entry *l;
unsigned int maj, min = 0;
string err;
int r;
r = enumerate_devices(udev, spec, &enm);
if (r < 0)
return r;
l = udev_enumerate_get_list_entry(enm.get());
if (!l)
return -ENOENT;
auto dev = dev_from_list_entry(udev, l);
if (!dev)
return -ENOMEM;
maj = strict_strtoll(udev_device_get_sysattr_value(dev.get(), "major"), 10,
&err);
if (!err.empty()) {
std::cerr << "rbd: couldn't parse major: " << err << std::endl;
return -EINVAL;
}
if (have_minor_attr()) {
min = strict_strtoll(udev_device_get_sysattr_value(dev.get(), "minor"), 10,
&err);
if (!err.empty()) {
std::cerr << "rbd: couldn't parse minor: " << err << std::endl;
return -EINVAL;
}
}
/*
* If an image is mapped more than once don't bother trying to unmap
* all devices - let users run unmap the same number of times they
* ran map.
*/
if (udev_list_entry_get_next(l))
std::cerr << "rbd: " << spec << ": mapped more than once, unmapping "
<< get_devnode(dev.get()) << " only" << std::endl;
*pdevno = makedev(maj, min);
*pid = udev_device_get_sysname(dev.get());
return 0;
}
static void append_unmap_options(std::string *buf, const char *options)
{
if (strcmp(options, "") != 0) {
*buf += " ";
*buf += options;
}
}
class UdevUnmapHandler {
public:
UdevUnmapHandler(dev_t devno) : m_devno(devno) {}
bool operator()(udev_device_uptr dev) {
if (strcmp(udev_device_get_action(dev.get()), "remove")) {
return false;
}
return udev_device_get_devnum(dev.get()) == m_devno;
}
private:
dev_t m_devno;
};
static int do_unmap(krbd_ctx *ctx, dev_t devno, const string& buf)
{
bool unmapped;
int fds[2];
int r;
udev_monitor_uptr mon(udev_monitor_new_from_netlink(ctx->udev,
get_event_source(ctx)));
if (!mon)
return -ENOMEM;
r = udev_monitor_filter_add_match_subsystem_devtype(mon.get(), "block",
"disk");
if (r < 0)
return r;
r = udev_monitor_set_receive_buffer_size(mon.get(), UDEV_BUF_SIZE);
if (r < 0) {
std::cerr << "rbd: failed to set udev buffer size: " << cpp_strerror(r)
<< std::endl;
/* not fatal */
}
r = udev_monitor_enable_receiving(mon.get());
if (r < 0)
return r;
if (pipe2(fds, O_NONBLOCK) < 0)
return -errno;
auto unmapper = make_named_thread(
"unmapper", [&buf, sysfs_r_fd = fds[1], flags = ctx->flags]() {
/*
* On final device close(), kernel sends a block change event, in
* response to which udev apparently runs blkid on the device. This
* makes unmap fail with EBUSY, if issued right after final close().
* Try to circumvent this with a retry before turning to udev.
*/
for (int tries = 0; ; tries++) {
int sysfs_r = sysfs_write_rbd_remove(buf);
if (sysfs_r == -EBUSY && tries < 2) {
if (!tries) {
usleep(250 * 1000);
} else if (!(flags & KRBD_CTX_F_NOUDEV)) {
/*
* libudev does not provide the "wait until the queue is empty"
* API or the sufficient amount of primitives to build it from.
*/
std::string err = run_cmd("udevadm", "settle", "--timeout", "10",
(char *)NULL);
if (!err.empty())
std::cerr << "rbd: " << err << std::endl;
}
} else {
int r = safe_write(sysfs_r_fd, &sysfs_r, sizeof(sysfs_r));
if (r < 0) {
ceph_abort_msgf("safe_write failed: %d", r);
}
break;
}
}
});
std::tie(r, unmapped) = wait_for_mapping(fds[0], mon.get(),
UdevUnmapHandler(devno));
if (r < 0) {
if (!unmapped) {
std::cerr << "rbd: sysfs write failed" << std::endl;
} else {
std::cerr << "rbd: udev wait failed: " << cpp_strerror(r) << std::endl;
r = 0;
}
}
unmapper.join();
close(fds[0]);
close(fds[1]);
return r;
}
static int unmap_image(struct krbd_ctx *ctx, const char *devnode,
const char *options)
{
struct stat sb;
dev_t wholedevno = 0;
std::string buf;
int r;
if (stat(devnode, &sb) < 0 || !S_ISBLK(sb.st_mode)) {
std::cerr << "rbd: '" << devnode << "' is not a block device" << std::endl;
return -EINVAL;
}
r = blkid_devno_to_wholedisk(sb.st_rdev, NULL, 0, &wholedevno);
if (r < 0) {
std::cerr << "rbd: couldn't compute wholedevno: " << cpp_strerror(r)
<< std::endl;
/*
* Ignore the error: we are given whole disks most of the time, and
* if it turns out this is a partition we will fail later anyway.
*/
wholedevno = sb.st_rdev;
}
for (int tries = 0; ; tries++) {
r = devno_to_krbd_id(ctx->udev, wholedevno, &buf);
if (r == -ENOENT && tries < 2) {
usleep(250 * 1000);
} else {
if (r < 0) {
if (r == -ENOENT) {
std::cerr << "rbd: '" << devnode << "' is not an rbd device"
<< std::endl;
r = -EINVAL;
}
return r;
}
if (tries) {
std::cerr << "rbd: udev enumerate missed a device, tries = " << tries
<< std::endl;
}
break;
}
}
append_unmap_options(&buf, options);
return do_unmap(ctx, wholedevno, buf);
}
static int unmap_image(struct krbd_ctx *ctx, const krbd_spec& spec,
const char *options)
{
dev_t devno = 0;
std::string buf;
int r;
for (int tries = 0; ; tries++) {
r = spec_to_devno_and_krbd_id(ctx->udev, spec, &devno, &buf);
if (r == -ENOENT && tries < 2) {
usleep(250 * 1000);
} else {
if (r < 0) {
if (r == -ENOENT) {
std::cerr << "rbd: " << spec << ": not a mapped image or snapshot"
<< std::endl;
r = -EINVAL;
}
return r;
}
if (tries) {
std::cerr << "rbd: udev enumerate missed a device, tries = " << tries
<< std::endl;
}
break;
}
}
append_unmap_options(&buf, options);
return do_unmap(ctx, devno, buf);
}
static bool dump_one_image(Formatter *f, TextTable *tbl,
struct udev_device *dev)
{
auto spec = spec_from_dev(dev);
std::string devnode = get_devnode(dev);
const char *id = devnode.c_str() + sizeof(DEVNODE_PREFIX) - 1;
if (!spec)
return false;
if (f) {
f->open_object_section("device");
f->dump_string("id", id);
f->dump_string("pool", spec->pool_name);
f->dump_string("namespace", spec->nspace_name);
f->dump_string("name", spec->image_name);
f->dump_string("snap", spec->snap_name);
f->dump_string("device", devnode);
f->close_section();
} else {
*tbl << id << spec->pool_name << spec->nspace_name << spec->image_name
<< spec->snap_name << devnode << TextTable::endrow;
}
return true;
}
static int do_dump(struct udev *udev, Formatter *f, TextTable *tbl)
{
udev_enumerate_uptr enm;
struct udev_list_entry *l = NULL;
bool have_output = false;
int r;
retry:
enm.reset(udev_enumerate_new(udev));
if (!enm)
return -ENOMEM;
r = udev_enumerate_add_match_subsystem(enm.get(), "rbd");
if (r < 0)
return r;
r = udev_enumerate_scan_devices(enm.get());
if (r < 0) {
if (r == -ENOENT || r == -ENODEV) {
std::cerr << "rbd: udev enumerate failed, retrying" << std::endl;
goto retry;
}
return r;
}
udev_list_entry_foreach(l, udev_enumerate_get_list_entry(enm.get())) {
auto dev = dev_from_list_entry(udev, l);
if (dev) {
have_output |= dump_one_image(f, tbl, dev.get());
}
}
return have_output;
}
static int dump_images(struct krbd_ctx *ctx, Formatter *f)
{
TextTable tbl;
int r;
if (f) {
f->open_array_section("devices");
} else {
tbl.define_column("id", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("pool", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("namespace", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("image", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("snap", TextTable::LEFT, TextTable::LEFT);
tbl.define_column("device", TextTable::LEFT, TextTable::LEFT);
}
r = do_dump(ctx->udev, f, &tbl);
if (f) {
f->close_section();
f->flush(std::cout);
} else {
if (r > 0)
std::cout << tbl;
}
return r;
}
static int is_mapped_image(struct udev *udev, const krbd_spec& spec,
string *pname)
{
udev_enumerate_uptr enm;
struct udev_list_entry *l;
int r;
r = enumerate_devices(udev, spec, &enm);
if (r < 0)
return r;
l = udev_enumerate_get_list_entry(enm.get());
if (l) {
auto dev = dev_from_list_entry(udev, l);
if (!dev)
return -ENOMEM;
*pname = get_devnode(dev.get());
return 1;
}
return 0; /* not mapped */
}
extern "C" int krbd_create_from_context(rados_config_t cct, uint32_t flags,
struct krbd_ctx **pctx)
{
struct krbd_ctx *ctx = new struct krbd_ctx();
ctx->cct = reinterpret_cast<CephContext *>(cct);
ctx->udev = udev_new();
if (!ctx->udev) {
delete ctx;
return -ENOMEM;
}
ctx->flags = flags;
*pctx = ctx;
return 0;
}
extern "C" void krbd_destroy(struct krbd_ctx *ctx)
{
if (!ctx)
return;
udev_unref(ctx->udev);
delete ctx;
}
extern "C" int krbd_map(struct krbd_ctx *ctx,
const char *pool_name,
const char *nspace_name,
const char *image_name,
const char *snap_name,
const char *options,
char **pdevnode)
{
krbd_spec spec(pool_name, nspace_name, image_name, snap_name);
string name;
char *devnode;
int r;
r = map_image(ctx, spec, options, &name);
if (r < 0)
return r;
devnode = strdup(name.c_str());
if (!devnode)
return -ENOMEM;
*pdevnode = devnode;
return r;
}
extern "C" int krbd_unmap(struct krbd_ctx *ctx, const char *devnode,
const char *options)
{
return unmap_image(ctx, devnode, options);
}
extern "C" int krbd_unmap_by_spec(struct krbd_ctx *ctx,
const char *pool_name,
const char *nspace_name,
const char *image_name,
const char *snap_name,
const char *options)
{
krbd_spec spec(pool_name, nspace_name, image_name, snap_name);
return unmap_image(ctx, spec, options);
}
int krbd_showmapped(struct krbd_ctx *ctx, Formatter *f)
{
return dump_images(ctx, f);
}
extern "C" int krbd_is_mapped(struct krbd_ctx *ctx,
const char *pool_name,
const char *nspace_name,
const char *image_name,
const char *snap_name,
char **pdevnode)
{
krbd_spec spec(pool_name, nspace_name, image_name, snap_name);
string name;
char *devnode;
int r;
r = is_mapped_image(ctx->udev, spec, &name);
if (r <= 0) /* error or not mapped */
return r;
devnode = strdup(name.c_str());
if (!devnode)
return -ENOMEM;
*pdevnode = devnode;
return r;
}
| 32,172 | 26.80726 | 91 |
cc
|
null |
ceph-main/src/libcephfs.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2009-2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <fcntl.h>
#include <iostream>
#include <string.h>
#include <string>
#include "auth/Crypto.h"
#include "client/Client.h"
#include "client/Inode.h"
#include "librados/RadosClient.h"
#include "common/async/context_pool.h"
#include "common/ceph_argparse.h"
#include "common/common_init.h"
#include "common/config.h"
#include "common/version.h"
#include "mon/MonClient.h"
#include "include/str_list.h"
#include "include/stringify.h"
#include "include/object.h"
#include "messages/MMonMap.h"
#include "msg/Messenger.h"
#include "include/ceph_assert.h"
#include "mds/MDSMap.h"
#include "include/cephfs/libcephfs.h"
#define DEFAULT_UMASK 002
using namespace std;
static mode_t umask_cb(void *);
namespace {
// Set things up this way so we don't start up threads until mount and
// kill them off when the last mount goes away, but are tolerant to
// multiple mounts of overlapping duration.
std::shared_ptr<ceph::async::io_context_pool> get_icp(CephContext* cct)
{
static std::mutex m;
static std::weak_ptr<ceph::async::io_context_pool> icwp;
std::unique_lock l(m);
auto icp = icwp.lock();
if (icp)
return icp;
icp = std::make_shared<ceph::async::io_context_pool>();
icwp = icp;
icp->start(cct->_conf.get_val<std::uint64_t>("client_asio_thread_count"));
return icp;
}
}
struct ceph_mount_info
{
mode_t umask = DEFAULT_UMASK;
std::shared_ptr<ceph::async::io_context_pool> icp;
public:
explicit ceph_mount_info(CephContext *cct_)
: default_perms(),
mounted(false),
inited(false),
client(nullptr),
monclient(nullptr),
messenger(nullptr),
cct(cct_)
{
if (cct_) {
cct->get();
}
}
~ceph_mount_info()
{
try {
shutdown();
if (cct) {
cct->put();
cct = nullptr;
}
}
catch (const std::exception& e) {
// we shouldn't get here, but if we do, we want to know about it.
lderr(cct) << "ceph_mount_info::~ceph_mount_info: caught exception: "
<< e.what() << dendl;
}
catch (...) {
// ignore
}
}
int init()
{
int ret;
if (!cct->_log->is_started()) {
cct->_log->start();
}
icp = get_icp(cct);
{
MonClient mc_bootstrap(cct, icp->get_io_context());
ret = mc_bootstrap.get_monmap_and_config();
if (ret < 0)
return ret;
}
common_init_finish(cct);
//monmap
monclient = new MonClient(cct, icp->get_io_context());
ret = -CEPHFS_ERROR_MON_MAP_BUILD; //defined in libcephfs.h;
if (monclient->build_initial_monmap() < 0)
goto fail;
//network connection
messenger = Messenger::create_client_messenger(cct, "client");
//at last the client
ret = -CEPHFS_ERROR_NEW_CLIENT; //defined in libcephfs.h;
client = new StandaloneClient(messenger, monclient, icp->get_io_context());
if (!client)
goto fail;
ret = -CEPHFS_ERROR_MESSENGER_START; //defined in libcephfs.h;
if (messenger->start() != 0)
goto fail;
ret = client->init();
if (ret)
goto fail;
{
ceph_client_callback_args args = {};
args.handle = this;
args.umask_cb = umask_cb;
client->ll_register_callbacks(&args);
}
default_perms = Client::pick_my_perms(cct);
inited = true;
return 0;
fail:
shutdown();
return ret;
}
int select_filesystem(const std::string &fs_name_)
{
if (mounted) {
return -CEPHFS_EISCONN;
}
fs_name = fs_name_;
return 0;
}
const std::string& get_filesystem(void)
{
return fs_name;
}
int mount(const std::string &mount_root, const UserPerm& perms)
{
int ret;
if (mounted)
return -CEPHFS_EISCONN;
if (!inited) {
ret = init();
if (ret != 0) {
return ret;
}
}
ret = client->mount(mount_root, perms, false, fs_name);
if (ret) {
shutdown();
return ret;
} else {
mounted = true;
return 0;
}
}
int unmount()
{
if (!mounted)
return -CEPHFS_ENOTCONN;
shutdown();
return 0;
}
int abort_conn()
{
if (mounted) {
client->abort_conn();
mounted = false;
}
return 0;
}
void shutdown()
{
if (mounted) {
client->unmount();
mounted = false;
}
if (inited) {
client->shutdown();
inited = false;
}
if (messenger) {
messenger->shutdown();
messenger->wait();
delete messenger;
messenger = nullptr;
}
icp.reset();
if (monclient) {
delete monclient;
monclient = nullptr;
}
if (client) {
delete client;
client = nullptr;
}
}
bool is_initialized() const
{
return inited;
}
bool is_mounted()
{
return mounted;
}
mode_t set_umask(mode_t umask)
{
this->umask = umask;
return umask;
}
std::string getaddrs()
{
CachedStackStringStream cos;
*cos << messenger->get_myaddrs();
return std::string(cos->strv());
}
int conf_read_file(const char *path_list)
{
int ret = cct->_conf.parse_config_files(path_list, nullptr, 0);
if (ret)
return ret;
cct->_conf.apply_changes(nullptr);
cct->_conf.complain_about_parse_error(cct);
return 0;
}
int conf_parse_argv(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
int ret = cct->_conf.parse_argv(args);
if (ret)
return ret;
cct->_conf.apply_changes(nullptr);
return 0;
}
int conf_parse_env(const char *name)
{
auto& conf = cct->_conf;
conf.parse_env(cct->get_module_type(), name);
conf.apply_changes(nullptr);
return 0;
}
int conf_set(const char *option, const char *value)
{
int ret = cct->_conf.set_val(option, value);
if (ret)
return ret;
cct->_conf.apply_changes(nullptr);
return 0;
}
int conf_get(const char *option, char *buf, size_t len)
{
char *tmp = buf;
return cct->_conf.get_val(option, &tmp, len);
}
Client *get_client()
{
return client;
}
const char *get_cwd(const UserPerm& perms)
{
client->getcwd(cwd, perms);
return cwd.c_str();
}
int chdir(const char *to, const UserPerm& perms)
{
return client->chdir(to, cwd, perms);
}
CephContext *get_ceph_context() const {
return cct;
}
UserPerm default_perms;
private:
bool mounted;
bool inited;
StandaloneClient *client;
MonClient *monclient;
Messenger *messenger;
CephContext *cct;
std::string cwd;
std::string fs_name;
};
static mode_t umask_cb(void *handle)
{
return ((struct ceph_mount_info *)handle)->umask;
}
static void do_out_buffer(bufferlist& outbl, char **outbuf, size_t *outbuflen)
{
if (outbuf) {
if (outbl.length() > 0) {
*outbuf = (char *)malloc(outbl.length());
memcpy(*outbuf, outbl.c_str(), outbl.length());
} else {
*outbuf = nullptr;
}
}
if (outbuflen)
*outbuflen = outbl.length();
}
static void do_out_buffer(string& outbl, char **outbuf, size_t *outbuflen)
{
if (outbuf) {
if (outbl.length() > 0) {
*outbuf = (char *)malloc(outbl.length());
memcpy(*outbuf, outbl.c_str(), outbl.length());
} else {
*outbuf = nullptr;
}
}
if (outbuflen)
*outbuflen = outbl.length();
}
extern "C" UserPerm *ceph_userperm_new(uid_t uid, gid_t gid, int ngids,
gid_t *gidlist)
{
return new (std::nothrow) UserPerm(uid, gid, ngids, gidlist);
}
extern "C" void ceph_userperm_destroy(UserPerm *perm)
{
delete perm;
}
extern "C" const char *ceph_version(int *pmajor, int *pminor, int *ppatch)
{
int major, minor, patch;
const char *v = ceph_version_to_str();
int n = sscanf(v, "%d.%d.%d", &major, &minor, &patch);
if (pmajor)
*pmajor = (n >= 1) ? major : 0;
if (pminor)
*pminor = (n >= 2) ? minor : 0;
if (ppatch)
*ppatch = (n >= 3) ? patch : 0;
return PROJECT_VERSION;
}
extern "C" int ceph_create_with_context(struct ceph_mount_info **cmount, CephContext *cct)
{
*cmount = new struct ceph_mount_info(cct);
return 0;
}
extern "C" int ceph_create_from_rados(struct ceph_mount_info **cmount,
rados_t cluster)
{
auto rados = (librados::RadosClient *) cluster;
auto cct = rados->cct;
return ceph_create_with_context(cmount, cct);
}
extern "C" int ceph_create(struct ceph_mount_info **cmount, const char * const id)
{
CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
if (id) {
iparams.name.set(CEPH_ENTITY_TYPE_CLIENT, id);
}
CephContext *cct = common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0);
cct->_conf.parse_env(cct->get_module_type()); // environment variables coverride
cct->_conf.apply_changes(nullptr);
int ret = ceph_create_with_context(cmount, cct);
cct->put();
cct = nullptr;
return ret;
}
extern "C" int ceph_unmount(struct ceph_mount_info *cmount)
{
return cmount->unmount();
}
extern "C" int ceph_abort_conn(struct ceph_mount_info *cmount)
{
return cmount->abort_conn();
}
extern "C" int ceph_release(struct ceph_mount_info *cmount)
{
if (cmount->is_mounted())
return -CEPHFS_EISCONN;
delete cmount;
cmount = nullptr;
return 0;
}
extern "C" void ceph_shutdown(struct ceph_mount_info *cmount)
{
cmount->shutdown();
delete cmount;
cmount = nullptr;
}
extern "C" uint64_t ceph_get_instance_id(struct ceph_mount_info *cmount)
{
if (cmount->is_initialized())
return cmount->get_client()->get_nodeid().v;
return 0;
}
extern "C" int ceph_getaddrs(struct ceph_mount_info *cmount, char** addrs)
{
if (!cmount->is_initialized())
return -CEPHFS_ENOTCONN;
auto s = cmount->getaddrs();
*addrs = strdup(s.c_str());
return 0;
}
extern "C" int ceph_conf_read_file(struct ceph_mount_info *cmount, const char *path)
{
return cmount->conf_read_file(path);
}
extern "C" mode_t ceph_umask(struct ceph_mount_info *cmount, mode_t mode)
{
return cmount->set_umask(mode);
}
extern "C" int ceph_conf_parse_argv(struct ceph_mount_info *cmount, int argc,
const char **argv)
{
return cmount->conf_parse_argv(argc, argv);
}
extern "C" int ceph_conf_parse_env(struct ceph_mount_info *cmount, const char *name)
{
return cmount->conf_parse_env(name);
}
extern "C" int ceph_conf_set(struct ceph_mount_info *cmount, const char *option,
const char *value)
{
return cmount->conf_set(option, value);
}
extern "C" int ceph_conf_get(struct ceph_mount_info *cmount, const char *option,
char *buf, size_t len)
{
if (!buf) {
return -CEPHFS_EINVAL;
}
return cmount->conf_get(option, buf, len);
}
extern "C" int ceph_set_mount_timeout(struct ceph_mount_info *cmount, uint32_t timeout) {
if (cmount->is_mounted()) {
return -CEPHFS_EINVAL;
}
auto timeout_str = stringify(timeout);
return ceph_conf_set(cmount, "client_mount_timeout", timeout_str.c_str());
}
extern "C" int ceph_mds_command(struct ceph_mount_info *cmount,
const char *mds_spec,
const char **cmd,
size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outsbuf, size_t *outsbuflen)
{
bufferlist inbl;
bufferlist outbl;
std::vector<string> cmdv;
std::string outs;
if (!cmount->is_initialized()) {
return -CEPHFS_ENOTCONN;
}
// Construct inputs
for (size_t i = 0; i < cmdlen; ++i) {
cmdv.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
// Issue remote command
C_SaferCond cond;
int r = cmount->get_client()->mds_command(
mds_spec,
cmdv, inbl,
&outbl, &outs,
&cond);
if (r != 0) {
goto out;
}
// Wait for completion
r = cond.wait();
// Construct outputs
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outs, outsbuf, outsbuflen);
out:
return r;
}
extern "C" int ceph_init(struct ceph_mount_info *cmount)
{
return cmount->init();
}
extern "C" int ceph_select_filesystem(struct ceph_mount_info *cmount,
const char *fs_name)
{
if (fs_name == nullptr) {
return -CEPHFS_EINVAL;
}
return cmount->select_filesystem(fs_name);
}
extern "C" int ceph_mount(struct ceph_mount_info *cmount, const char *root)
{
std::string mount_root;
if (root)
mount_root = root;
return cmount->mount(mount_root, cmount->default_perms);
}
extern "C" int ceph_is_mounted(struct ceph_mount_info *cmount)
{
return cmount->is_mounted() ? 1 : 0;
}
extern "C" struct UserPerm *ceph_mount_perms(struct ceph_mount_info *cmount)
{
return &cmount->default_perms;
}
extern "C" int64_t ceph_get_fs_cid(struct ceph_mount_info *cmount)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->get_fs_cid();
}
extern "C" int ceph_mount_perms_set(struct ceph_mount_info *cmount,
struct UserPerm *perms)
{
if (cmount->is_mounted())
return -CEPHFS_EISCONN;
cmount->default_perms = *perms;
return 0;
}
extern "C" int ceph_statfs(struct ceph_mount_info *cmount, const char *path,
struct statvfs *stbuf)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->statfs(path, stbuf, cmount->default_perms);
}
extern "C" int ceph_get_local_osd(struct ceph_mount_info *cmount)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->get_local_osd();
}
extern "C" const char* ceph_getcwd(struct ceph_mount_info *cmount)
{
return cmount->get_cwd(cmount->default_perms);
}
extern "C" int ceph_chdir (struct ceph_mount_info *cmount, const char *s)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->chdir(s, cmount->default_perms);
}
extern "C" int ceph_opendir(struct ceph_mount_info *cmount,
const char *name, struct ceph_dir_result **dirpp)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->opendir(name, (dir_result_t **)dirpp, cmount->default_perms);
}
extern "C" int ceph_fdopendir(struct ceph_mount_info *cmount, int dirfd,
struct ceph_dir_result **dirpp)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fdopendir(dirfd, (dir_result_t **)dirpp, cmount->default_perms);
}
extern "C" int ceph_closedir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->closedir(reinterpret_cast<dir_result_t*>(dirp));
}
extern "C" struct dirent * ceph_readdir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp)
{
if (!cmount->is_mounted()) {
/* Client::readdir also sets errno to signal errors. */
errno = CEPHFS_ENOTCONN;
return nullptr;
}
return cmount->get_client()->readdir(reinterpret_cast<dir_result_t*>(dirp));
}
extern "C" int ceph_readdir_r(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, struct dirent *de)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->readdir_r(reinterpret_cast<dir_result_t*>(dirp), de);
}
extern "C" int ceph_readdirplus_r(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp,
struct dirent *de, struct ceph_statx *stx, unsigned want,
unsigned flags, struct Inode **out)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return cmount->get_client()->readdirplus_r(reinterpret_cast<dir_result_t*>(dirp), de, stx, want, flags, out);
}
extern "C" int ceph_open_snapdiff(struct ceph_mount_info* cmount,
const char* root_path,
const char* rel_path,
const char* snap1,
const char* snap2,
struct ceph_snapdiff_info* out)
{
if (!cmount->is_mounted()) {
/* we set errno to signal errors. */
errno = ENOTCONN;
return -errno;
}
if (!out || !root_path || !rel_path ||
!snap1 || !*snap1 || !snap2 || !*snap2) {
errno = EINVAL;
return -errno;
}
out->cmount = cmount;
out->dir1 = out->dir_aux = nullptr;
char full_path1[PATH_MAX];
char snapdir[PATH_MAX];
cmount->conf_get("client_snapdir", snapdir, sizeof(snapdir) - 1);
int n = snprintf(full_path1, PATH_MAX,
"%s/%s/%s/%s", root_path, snapdir, snap1, rel_path);
if (n < 0 || n == PATH_MAX) {
errno = ENAMETOOLONG;
return -errno;
}
char full_path2[PATH_MAX];
n = snprintf(full_path2, PATH_MAX,
"%s/%s/%s/%s", root_path, snapdir, snap2, rel_path);
if (n < 0 || n == PATH_MAX) {
errno = ENAMETOOLONG;
return -errno;
}
int r = ceph_opendir(cmount, full_path1, &(out->dir1));
if (r != 0) {
//it's OK to have one of the snap paths absent - attempting another one
r = ceph_opendir(cmount, full_path2, &(out->dir1));
if (r != 0) {
// both snaps are absent, giving up
errno = ENOENT;
return -errno;
}
std::swap(snap1, snap2); // will use snap1 to learn snap_other below
} else {
// trying to open second snapshot to learn snapid and
// get the entry loaded into the client cache if any.
r = ceph_opendir(cmount, full_path2, &(out->dir_aux));
//paranoic, rely on this value below
out->dir_aux = r == 0 ? out->dir_aux : nullptr;
}
if (!out->dir_aux) {
// now trying to learn the second snapshot's id by using snapshot's root
n = snprintf(full_path2, PATH_MAX,
"%s/%s/%s", root_path, snapdir, snap2);
ceph_assert(n > 0 && n < PATH_MAX); //we've already checked above
//that longer string fits.
// Hence unlikely to assert
r = ceph_opendir(cmount, full_path2, &(out->dir_aux));
if (r != 0) {
goto close_err;
}
}
return 0;
close_err:
ceph_close_snapdiff(out);
return r;
}
extern "C" int ceph_readdir_snapdiff(struct ceph_snapdiff_info* snapdiff,
struct ceph_snapdiff_entry_t* out)
{
if (!snapdiff->cmount->is_mounted()) {
/* also sets errno to signal errors. */
errno = ENOTCONN;
return -errno;
}
dir_result_t* d1 = reinterpret_cast<dir_result_t*>(snapdiff->dir1);
dir_result_t* d2 = reinterpret_cast<dir_result_t*>(snapdiff->dir_aux);
if (!d1 || !d2 || !d1->inode || !d2->inode) {
errno = EINVAL;
return -errno;
}
snapid_t snapid;
int r = snapdiff->cmount->get_client()->readdir_snapdiff(
d1,
d2->inode->snapid,
&(out->dir_entry),
&snapid);
if (r >= 0) {
// converting snapid_t to uint64_t to avoid snapid_t exposure
out->snapid = snapid;
}
return r;
}
extern "C" int ceph_close_snapdiff(struct ceph_snapdiff_info* snapdiff)
{
if (!snapdiff->cmount || !snapdiff->cmount->is_mounted()) {
/* also sets errno to signal errors. */
errno = ENOTCONN;
return -errno;
}
if (snapdiff->dir_aux) {
ceph_closedir(snapdiff->cmount, snapdiff->dir_aux);
}
if (snapdiff->dir1) {
ceph_closedir(snapdiff->cmount, snapdiff->dir1);
}
snapdiff->cmount = nullptr;
snapdiff->dir1 = snapdiff->dir_aux = nullptr;
return 0;
}
extern "C" int ceph_getdents(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp,
char *buf, int buflen)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->getdents(reinterpret_cast<dir_result_t*>(dirp), buf, buflen);
}
extern "C" int ceph_getdnames(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp,
char *buf, int buflen)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->getdnames(reinterpret_cast<dir_result_t*>(dirp), buf, buflen);
}
extern "C" void ceph_rewinddir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp)
{
if (!cmount->is_mounted())
return;
cmount->get_client()->rewinddir(reinterpret_cast<dir_result_t*>(dirp));
}
extern "C" int64_t ceph_telldir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->telldir(reinterpret_cast<dir_result_t*>(dirp));
}
extern "C" void ceph_seekdir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, int64_t offset)
{
if (!cmount->is_mounted())
return;
cmount->get_client()->seekdir(reinterpret_cast<dir_result_t*>(dirp), offset);
}
extern "C" int ceph_may_delete(struct ceph_mount_info *cmount, const char *path)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->may_delete(path, cmount->default_perms);
}
extern "C" int ceph_link (struct ceph_mount_info *cmount, const char *existing,
const char *newname)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->link(existing, newname, cmount->default_perms);
}
extern "C" int ceph_unlink(struct ceph_mount_info *cmount, const char *path)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->unlink(path, cmount->default_perms);
}
extern "C" int ceph_unlinkat(struct ceph_mount_info *cmount, int dirfd, const char *relpath, int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->unlinkat(dirfd, relpath, flags, cmount->default_perms);
}
extern "C" int ceph_rename(struct ceph_mount_info *cmount, const char *from,
const char *to)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->rename(from, to, cmount->default_perms);
}
// dirs
extern "C" int ceph_mkdir(struct ceph_mount_info *cmount, const char *path, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->mkdir(path, mode, cmount->default_perms);
}
extern "C" int ceph_mkdirat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->mkdirat(dirfd, relpath, mode, cmount->default_perms);
}
extern "C" int ceph_mksnap(struct ceph_mount_info *cmount, const char *path, const char *name,
mode_t mode, struct snap_metadata *snap_metadata, size_t nr_snap_metadata)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
size_t i = 0;
std::map<std::string, std::string> metadata;
while (i < nr_snap_metadata) {
metadata.emplace(snap_metadata[i].key, snap_metadata[i].value);
++i;
}
return cmount->get_client()->mksnap(path, name, cmount->default_perms, mode, metadata);
}
extern "C" int ceph_rmsnap(struct ceph_mount_info *cmount, const char *path, const char *name)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->rmsnap(path, name, cmount->default_perms, true);
}
extern "C" int ceph_mkdirs(struct ceph_mount_info *cmount, const char *path, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->mkdirs(path, mode, cmount->default_perms);
}
extern "C" int ceph_rmdir(struct ceph_mount_info *cmount, const char *path)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->rmdir(path, cmount->default_perms);
}
// symlinks
extern "C" int ceph_readlink(struct ceph_mount_info *cmount, const char *path,
char *buf, int64_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->readlink(path, buf, size, cmount->default_perms);
}
extern "C" int ceph_readlinkat(struct ceph_mount_info *cmount, int dirfd,
const char *relpath, char *buf, int64_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->readlinkat(dirfd, relpath, buf, size, cmount->default_perms);
}
extern "C" int ceph_symlink(struct ceph_mount_info *cmount, const char *existing,
const char *newname)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->symlink(existing, newname, cmount->default_perms);
}
extern "C" int ceph_symlinkat(struct ceph_mount_info *cmount, const char *existing, int dirfd,
const char *newname)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->symlinkat(existing, dirfd, newname, cmount->default_perms);
}
extern "C" int ceph_fstatx(struct ceph_mount_info *cmount, int fd, struct ceph_statx *stx,
unsigned int want, unsigned int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return cmount->get_client()->fstatx(fd, stx, cmount->default_perms,
want, flags);
}
extern "C" int ceph_statxat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
struct ceph_statx *stx, unsigned int want, unsigned int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return cmount->get_client()->statxat(dirfd, relpath, stx, cmount->default_perms,
want, flags);
}
extern "C" int ceph_statx(struct ceph_mount_info *cmount, const char *path,
struct ceph_statx *stx, unsigned int want, unsigned int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return cmount->get_client()->statx(path, stx, cmount->default_perms,
want, flags);
}
extern "C" int ceph_fsetattrx(struct ceph_mount_info *cmount, int fd,
struct ceph_statx *stx, int mask)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fsetattrx(fd, stx, mask, cmount->default_perms);
}
extern "C" int ceph_setattrx(struct ceph_mount_info *cmount, const char *relpath,
struct ceph_statx *stx, int mask, int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return cmount->get_client()->setattrx(relpath, stx, mask,
cmount->default_perms, flags);
}
// *xattr() calls supporting samba/vfs
extern "C" int ceph_getxattr(struct ceph_mount_info *cmount, const char *path, const char *name, void *value, size_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->getxattr(path, name, value, size, cmount->default_perms);
}
extern "C" int ceph_lgetxattr(struct ceph_mount_info *cmount, const char *path, const char *name, void *value, size_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lgetxattr(path, name, value, size, cmount->default_perms);
}
extern "C" int ceph_fgetxattr(struct ceph_mount_info *cmount, int fd, const char *name, void *value, size_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fgetxattr(fd, name, value, size, cmount->default_perms);
}
extern "C" int ceph_listxattr(struct ceph_mount_info *cmount, const char *path, char *list, size_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->listxattr(path, list, size, cmount->default_perms);
}
extern "C" int ceph_llistxattr(struct ceph_mount_info *cmount, const char *path, char *list, size_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->llistxattr(path, list, size, cmount->default_perms);
}
extern "C" int ceph_flistxattr(struct ceph_mount_info *cmount, int fd, char *list, size_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->flistxattr(fd, list, size, cmount->default_perms);
}
extern "C" int ceph_removexattr(struct ceph_mount_info *cmount, const char *path, const char *name)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->removexattr(path, name, cmount->default_perms);
}
extern "C" int ceph_lremovexattr(struct ceph_mount_info *cmount, const char *path, const char *name)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lremovexattr(path, name, cmount->default_perms);
}
extern "C" int ceph_fremovexattr(struct ceph_mount_info *cmount, int fd, const char *name)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fremovexattr(fd, name, cmount->default_perms);
}
extern "C" int ceph_setxattr(struct ceph_mount_info *cmount, const char *path, const char *name, const void *value, size_t size, int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->setxattr(path, name, value, size, flags, cmount->default_perms);
}
extern "C" int ceph_lsetxattr(struct ceph_mount_info *cmount, const char *path, const char *name, const void *value, size_t size, int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lsetxattr(path, name, value, size, flags, cmount->default_perms);
}
extern "C" int ceph_fsetxattr(struct ceph_mount_info *cmount, int fd, const char *name, const void *value, size_t size, int flags)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fsetxattr(fd, name, value, size, flags, cmount->default_perms);
}
/* end xattr support */
extern "C" int ceph_stat(struct ceph_mount_info *cmount, const char *path, struct stat *stbuf)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->stat(path, stbuf, cmount->default_perms);
}
extern "C" int ceph_fstat(struct ceph_mount_info *cmount, int fd, struct stat *stbuf)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fstat(fd, stbuf, cmount->default_perms);
}
extern int ceph_lstat(struct ceph_mount_info *cmount, const char *path, struct stat *stbuf)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lstat(path, stbuf, cmount->default_perms);
}
extern "C" int ceph_chmod(struct ceph_mount_info *cmount, const char *path, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->chmod(path, mode, cmount->default_perms);
}
extern "C" int ceph_lchmod(struct ceph_mount_info *cmount, const char *path, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lchmod(path, mode, cmount->default_perms);
}
extern "C" int ceph_fchmod(struct ceph_mount_info *cmount, int fd, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fchmod(fd, mode, cmount->default_perms);
}
extern "C" int ceph_chmodat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
mode_t mode, int flags) {
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->chmodat(dirfd, relpath, mode, flags, cmount->default_perms);
}
extern "C" int ceph_chown(struct ceph_mount_info *cmount, const char *path,
int uid, int gid)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->chown(path, uid, gid, cmount->default_perms);
}
extern "C" int ceph_fchown(struct ceph_mount_info *cmount, int fd,
int uid, int gid)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fchown(fd, uid, gid, cmount->default_perms);
}
extern "C" int ceph_lchown(struct ceph_mount_info *cmount, const char *path,
int uid, int gid)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lchown(path, uid, gid, cmount->default_perms);
}
extern "C" int ceph_chownat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
uid_t uid, gid_t gid, int flags) {
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->chownat(dirfd, relpath, uid, gid, flags, cmount->default_perms);
}
extern "C" int ceph_utime(struct ceph_mount_info *cmount, const char *path,
struct utimbuf *buf)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->utime(path, buf, cmount->default_perms);
}
extern "C" int ceph_futime(struct ceph_mount_info *cmount, int fd,
struct utimbuf *buf)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->futime(fd, buf, cmount->default_perms);
}
extern "C" int ceph_utimes(struct ceph_mount_info *cmount, const char *path,
struct timeval times[2])
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->utimes(path, times, cmount->default_perms);
}
extern "C" int ceph_lutimes(struct ceph_mount_info *cmount, const char *path,
struct timeval times[2])
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lutimes(path, times, cmount->default_perms);
}
extern "C" int ceph_futimes(struct ceph_mount_info *cmount, int fd,
struct timeval times[2])
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->futimes(fd, times, cmount->default_perms);
}
extern "C" int ceph_futimens(struct ceph_mount_info *cmount, int fd,
struct timespec times[2])
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->futimens(fd, times, cmount->default_perms);
}
extern "C" int ceph_utimensat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
struct timespec times[2], int flags) {
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->utimensat(dirfd, relpath, times, flags, cmount->default_perms);
}
extern "C" int ceph_flock(struct ceph_mount_info *cmount, int fd, int operation,
uint64_t owner)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->flock(fd, operation, owner);
}
extern "C" int ceph_truncate(struct ceph_mount_info *cmount, const char *path,
int64_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->truncate(path, size, cmount->default_perms);
}
// file ops
extern "C" int ceph_mknod(struct ceph_mount_info *cmount, const char *path,
mode_t mode, dev_t rdev)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->mknod(path, mode, cmount->default_perms, rdev);
}
extern "C" int ceph_open(struct ceph_mount_info *cmount, const char *path,
int flags, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->open(path, flags, cmount->default_perms, mode);
}
extern "C" int ceph_openat(struct ceph_mount_info *cmount, int dirfd, const char *relpath,
int flags, mode_t mode)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->openat(dirfd, relpath, flags, cmount->default_perms, mode);
}
extern "C" int ceph_open_layout(struct ceph_mount_info *cmount, const char *path, int flags,
mode_t mode, int stripe_unit, int stripe_count, int object_size, const char *data_pool)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->open(path, flags, cmount->default_perms, mode,
stripe_unit, stripe_count,
object_size, data_pool);
}
extern "C" int ceph_close(struct ceph_mount_info *cmount, int fd)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->close(fd);
}
extern "C" int64_t ceph_lseek(struct ceph_mount_info *cmount, int fd,
int64_t offset, int whence)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->lseek(fd, offset, whence);
}
extern "C" int ceph_read(struct ceph_mount_info *cmount, int fd, char *buf,
int64_t size, int64_t offset)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->read(fd, buf, size, offset);
}
extern "C" int ceph_preadv(struct ceph_mount_info *cmount, int fd,
const struct iovec *iov, int iovcnt, int64_t offset)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->preadv(fd, iov, iovcnt, offset);
}
extern "C" int ceph_write(struct ceph_mount_info *cmount, int fd, const char *buf,
int64_t size, int64_t offset)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->write(fd, buf, size, offset);
}
extern "C" int ceph_pwritev(struct ceph_mount_info *cmount, int fd,
const struct iovec *iov, int iovcnt, int64_t offset)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->pwritev(fd, iov, iovcnt, offset);
}
extern "C" int ceph_ftruncate(struct ceph_mount_info *cmount, int fd, int64_t size)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->ftruncate(fd, size, cmount->default_perms);
}
extern "C" int ceph_fsync(struct ceph_mount_info *cmount, int fd, int syncdataonly)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fsync(fd, syncdataonly);
}
extern "C" int ceph_fallocate(struct ceph_mount_info *cmount, int fd, int mode,
int64_t offset, int64_t length)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->fallocate(fd, mode, offset, length);
}
extern "C" int ceph_lazyio(class ceph_mount_info *cmount,
int fd, int enable)
{
return (cmount->get_client()->lazyio(fd, enable));
}
extern "C" int ceph_lazyio_propagate(class ceph_mount_info *cmount,
int fd, int64_t offset, size_t count)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return (cmount->get_client()->lazyio_propagate(fd, offset, count));
}
extern "C" int ceph_lazyio_synchronize(class ceph_mount_info *cmount,
int fd, int64_t offset, size_t count)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return (cmount->get_client()->lazyio_synchronize(fd, offset, count));
}
extern "C" int ceph_sync_fs(struct ceph_mount_info *cmount)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->sync_fs();
}
extern "C" int ceph_get_file_stripe_unit(struct ceph_mount_info *cmount, int fh)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
return l.stripe_unit;
}
extern "C" int ceph_get_path_stripe_unit(struct ceph_mount_info *cmount, const char *path)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
return l.stripe_unit;
}
extern "C" int ceph_get_file_stripe_count(struct ceph_mount_info *cmount, int fh)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
return l.stripe_count;
}
extern "C" int ceph_get_path_stripe_count(struct ceph_mount_info *cmount, const char *path)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
return l.stripe_count;
}
extern "C" int ceph_get_file_object_size(struct ceph_mount_info *cmount, int fh)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
return l.object_size;
}
extern "C" int ceph_get_path_object_size(struct ceph_mount_info *cmount, const char *path)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
return l.object_size;
}
extern "C" int ceph_get_file_pool(struct ceph_mount_info *cmount, int fh)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
return l.pool_id;
}
extern "C" int ceph_get_path_pool(struct ceph_mount_info *cmount, const char *path)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
return l.pool_id;
}
extern "C" int ceph_get_file_pool_name(struct ceph_mount_info *cmount, int fh, char *buf, size_t len)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
string name = cmount->get_client()->get_pool_name(l.pool_id);
if (len == 0)
return name.length();
if (name.length() > len)
return -CEPHFS_ERANGE;
strncpy(buf, name.c_str(), len);
return name.length();
}
extern "C" int ceph_get_pool_name(struct ceph_mount_info *cmount, int pool, char *buf, size_t len)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
string name = cmount->get_client()->get_pool_name(pool);
if (len == 0)
return name.length();
if (name.length() > len)
return -CEPHFS_ERANGE;
strncpy(buf, name.c_str(), len);
return name.length();
}
extern "C" int ceph_get_path_pool_name(struct ceph_mount_info *cmount, const char *path, char *buf, size_t len)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
string name = cmount->get_client()->get_pool_name(l.pool_id);
if (len == 0)
return name.length();
if (name.length() > len)
return -CEPHFS_ERANGE;
strncpy(buf, name.c_str(), len);
return name.length();
}
extern "C" int ceph_get_default_data_pool_name(struct ceph_mount_info *cmount, char *buf, size_t len)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
int64_t pool_id = cmount->get_client()->get_default_pool_id();
string name = cmount->get_client()->get_pool_name(pool_id);
if (len == 0)
return name.length();
if (name.length() > len)
return -CEPHFS_ERANGE;
strncpy(buf, name.c_str(), len);
return name.length();
}
extern "C" int ceph_get_file_layout(struct ceph_mount_info *cmount, int fh, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
if (stripe_unit)
*stripe_unit = l.stripe_unit;
if (stripe_count)
*stripe_count = l.stripe_count;
if (object_size)
*object_size = l.object_size;
if (pg_pool)
*pg_pool = l.pool_id;
return 0;
}
extern "C" int ceph_get_path_layout(struct ceph_mount_info *cmount, const char *path, int *stripe_unit, int *stripe_count, int *object_size, int *pg_pool)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
if (stripe_unit)
*stripe_unit = l.stripe_unit;
if (stripe_count)
*stripe_count = l.stripe_count;
if (object_size)
*object_size = l.object_size;
if (pg_pool)
*pg_pool = l.pool_id;
return 0;
}
extern "C" int ceph_get_file_replication(struct ceph_mount_info *cmount, int fh)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->fdescribe_layout(fh, &l);
if (r < 0)
return r;
int rep = cmount->get_client()->get_pool_replication(l.pool_id);
return rep;
}
extern "C" int ceph_get_path_replication(struct ceph_mount_info *cmount, const char *path)
{
file_layout_t l;
int r;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->describe_layout(path, &l, cmount->default_perms);
if (r < 0)
return r;
int rep = cmount->get_client()->get_pool_replication(l.pool_id);
return rep;
}
extern "C" int ceph_set_default_file_stripe_unit(struct ceph_mount_info *cmount,
int stripe)
{
// this option no longer exists
return -CEPHFS_EOPNOTSUPP;
}
extern "C" int ceph_set_default_file_stripe_count(struct ceph_mount_info *cmount,
int count)
{
// this option no longer exists
return -CEPHFS_EOPNOTSUPP;
}
extern "C" int ceph_set_default_object_size(struct ceph_mount_info *cmount, int size)
{
// this option no longer exists
return -CEPHFS_EOPNOTSUPP;
}
extern "C" int ceph_set_default_file_replication(struct ceph_mount_info *cmount,
int replication)
{
// this option no longer exists
return -CEPHFS_EOPNOTSUPP;
}
extern "C" int ceph_set_default_preferred_pg(struct ceph_mount_info *cmount, int osd)
{
// this option no longer exists
return -CEPHFS_EOPNOTSUPP;
}
extern "C" int ceph_get_file_extent_osds(struct ceph_mount_info *cmount, int fh,
int64_t offset, int64_t *length, int *osds, int nosds)
{
if (nosds < 0)
return -CEPHFS_EINVAL;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
vector<int> vosds;
int ret = cmount->get_client()->get_file_extent_osds(fh, offset, length, vosds);
if (ret < 0)
return ret;
if (!nosds)
return vosds.size();
if ((int)vosds.size() > nosds)
return -CEPHFS_ERANGE;
for (int i = 0; i < (int)vosds.size(); i++)
osds[i] = vosds[i];
return vosds.size();
}
extern "C" int ceph_get_osd_crush_location(struct ceph_mount_info *cmount,
int osd, char *path, size_t len)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (!path && len)
return -CEPHFS_EINVAL;
vector<pair<string, string> > loc;
int ret = cmount->get_client()->get_osd_crush_location(osd, loc);
if (ret)
return ret;
size_t needed = 0;
size_t cur = 0;
vector<pair<string, string> >::iterator it;
for (it = loc.begin(); it != loc.end(); ++it) {
string& type = it->first;
string& name = it->second;
needed += type.size() + name.size() + 2;
if (needed <= len) {
if (path)
strcpy(path + cur, type.c_str());
cur += type.size() + 1;
if (path)
strcpy(path + cur, name.c_str());
cur += name.size() + 1;
}
}
if (len == 0)
return needed;
if (needed > len)
return -CEPHFS_ERANGE;
return needed;
}
extern "C" int ceph_get_osd_addr(struct ceph_mount_info *cmount, int osd,
struct sockaddr_storage *addr)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (!addr)
return -CEPHFS_EINVAL;
entity_addr_t address;
int ret = cmount->get_client()->get_osd_addr(osd, address);
if (ret < 0)
return ret;
*addr = address.get_sockaddr_storage();
return 0;
}
extern "C" int ceph_get_file_stripe_address(struct ceph_mount_info *cmount, int fh,
int64_t offset, struct sockaddr_storage *addr, int naddr)
{
vector<entity_addr_t> address;
unsigned i;
int r;
if (naddr < 0)
return -CEPHFS_EINVAL;
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
r = cmount->get_client()->get_file_stripe_address(fh, offset, address);
if (r < 0)
return r;
for (i = 0; i < (unsigned)naddr && i < address.size(); i++)
addr[i] = address[i].get_sockaddr_storage();
/* naddr == 0: drop through and return actual size */
if (naddr && (address.size() > (unsigned)naddr))
return -CEPHFS_ERANGE;
return address.size();
}
extern "C" int ceph_localize_reads(struct ceph_mount_info *cmount, int val)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (!val)
cmount->get_client()->clear_filer_flags(CEPH_OSD_FLAG_LOCALIZE_READS);
else
cmount->get_client()->set_filer_flags(CEPH_OSD_FLAG_LOCALIZE_READS);
return 0;
}
extern "C" CephContext *ceph_get_mount_context(struct ceph_mount_info *cmount)
{
return cmount->get_ceph_context();
}
extern "C" int ceph_debug_get_fd_caps(struct ceph_mount_info *cmount, int fd)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->get_caps_issued(fd);
}
extern "C" int ceph_debug_get_file_caps(struct ceph_mount_info *cmount, const char *path)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->get_caps_issued(path, cmount->default_perms);
}
extern "C" int ceph_get_stripe_unit_granularity(struct ceph_mount_info *cmount)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return CEPH_MIN_STRIPE_UNIT;
}
extern "C" int ceph_get_pool_id(struct ceph_mount_info *cmount, const char *pool_name)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
if (!pool_name || !pool_name[0])
return -CEPHFS_EINVAL;
/* negative range reserved for errors */
int64_t pool_id = cmount->get_client()->get_pool_id(pool_name);
if (pool_id > 0x7fffffff)
return -CEPHFS_ERANGE;
/* get_pool_id error codes fit in int */
return (int)pool_id;
}
extern "C" int ceph_get_pool_replication(struct ceph_mount_info *cmount,
int pool_id)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->get_pool_replication(pool_id);
}
/* Low-level exports */
extern "C" int ceph_ll_lookup_root(struct ceph_mount_info *cmount,
Inode **parent)
{
*parent = cmount->get_client()->get_root();
if (*parent)
return 0;
return -CEPHFS_EFAULT;
}
extern "C" struct Inode *ceph_ll_get_inode(class ceph_mount_info *cmount,
vinodeno_t vino)
{
return (cmount->get_client())->ll_get_inode(vino);
}
extern "C" int ceph_ll_lookup_vino(
struct ceph_mount_info *cmount,
vinodeno_t vino,
Inode **inode)
{
return (cmount->get_client())->ll_lookup_vino(vino, cmount->default_perms, inode);
}
/**
* Populates the client cache with the requested inode, and its
* parent dentry.
*/
extern "C" int ceph_ll_lookup_inode(
struct ceph_mount_info *cmount,
struct inodeno_t ino,
Inode **inode)
{
return (cmount->get_client())->ll_lookup_inode(ino, cmount->default_perms, inode);
}
extern "C" int ceph_ll_lookup(struct ceph_mount_info *cmount,
Inode *parent, const char *name, Inode **out,
struct ceph_statx *stx, unsigned want,
unsigned flags, const UserPerm *perms)
{
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return (cmount->get_client())->ll_lookupx(parent, name, out, stx, want,
flags, *perms);
}
extern "C" int ceph_ll_put(class ceph_mount_info *cmount, Inode *in)
{
return (cmount->get_client()->ll_put(in));
}
extern "C" int ceph_ll_forget(class ceph_mount_info *cmount, Inode *in,
int count)
{
return (cmount->get_client()->ll_forget(in, count));
}
extern "C" int ceph_ll_walk(struct ceph_mount_info *cmount, const char* name, Inode **i,
struct ceph_statx *stx, unsigned int want, unsigned int flags,
const UserPerm *perms)
{
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return(cmount->get_client()->ll_walk(name, i, stx, want, flags, *perms));
}
extern "C" int ceph_ll_getattr(class ceph_mount_info *cmount,
Inode *in, struct ceph_statx *stx,
unsigned int want, unsigned int flags,
const UserPerm *perms)
{
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return (cmount->get_client()->ll_getattrx(in, stx, want, flags, *perms));
}
extern "C" int ceph_ll_setattr(class ceph_mount_info *cmount,
Inode *in, struct ceph_statx *stx,
int mask, const UserPerm *perms)
{
return (cmount->get_client()->ll_setattrx(in, stx, mask, *perms));
}
extern "C" int ceph_ll_open(class ceph_mount_info *cmount, Inode *in,
int flags, Fh **fh, const UserPerm *perms)
{
return (cmount->get_client()->ll_open(in, flags, fh, *perms));
}
extern "C" int ceph_ll_read(class ceph_mount_info *cmount, Fh* filehandle,
int64_t off, uint64_t len, char* buf)
{
bufferlist bl;
int r = 0;
r = cmount->get_client()->ll_read(filehandle, off, len, &bl);
if (r >= 0)
{
bl.begin().copy(bl.length(), buf);
r = bl.length();
}
return r;
}
extern "C" int ceph_ll_read_block(class ceph_mount_info *cmount,
Inode *in, uint64_t blockid,
char* buf, uint64_t offset,
uint64_t length,
struct ceph_file_layout* layout)
{
file_layout_t l;
int r = (cmount->get_client()->ll_read_block(in, blockid, buf, offset,
length, &l));
l.to_legacy(layout);
return r;
}
extern "C" int ceph_ll_write_block(class ceph_mount_info *cmount,
Inode *in, uint64_t blockid,
char *buf, uint64_t offset,
uint64_t length,
struct ceph_file_layout *layout,
uint64_t snapseq, uint32_t sync)
{
file_layout_t l;
int r = (cmount->get_client()->ll_write_block(in, blockid, buf, offset,
length, &l, snapseq, sync));
l.to_legacy(layout);
return r;
}
extern "C" int ceph_ll_commit_blocks(class ceph_mount_info *cmount,
Inode *in, uint64_t offset,
uint64_t range)
{
return (cmount->get_client()->ll_commit_blocks(in, offset, range));
}
extern "C" int ceph_ll_fsync(class ceph_mount_info *cmount,
Fh *fh, int syncdataonly)
{
return (cmount->get_client()->ll_fsync(fh, syncdataonly));
}
extern "C" int ceph_ll_sync_inode(class ceph_mount_info *cmount,
Inode *in, int syncdataonly)
{
return (cmount->get_client()->ll_sync_inode(in, syncdataonly));
}
extern "C" int ceph_ll_fallocate(class ceph_mount_info *cmount, Fh *fh,
int mode, int64_t offset, int64_t length)
{
return cmount->get_client()->ll_fallocate(fh, mode, offset, length);
}
extern "C" off_t ceph_ll_lseek(class ceph_mount_info *cmount,
Fh *fh, off_t offset, int whence)
{
return (cmount->get_client()->ll_lseek(fh, offset, whence));
}
extern "C" int ceph_ll_write(class ceph_mount_info *cmount,
Fh *fh, int64_t off, uint64_t len,
const char *data)
{
return (cmount->get_client()->ll_write(fh, off, len, data));
}
extern "C" int64_t ceph_ll_readv(class ceph_mount_info *cmount,
struct Fh *fh, const struct iovec *iov,
int iovcnt, int64_t off)
{
return (cmount->get_client()->ll_readv(fh, iov, iovcnt, off));
}
extern "C" int64_t ceph_ll_writev(class ceph_mount_info *cmount,
struct Fh *fh, const struct iovec *iov,
int iovcnt, int64_t off)
{
return (cmount->get_client()->ll_writev(fh, iov, iovcnt, off));
}
extern "C" int ceph_ll_close(class ceph_mount_info *cmount, Fh* fh)
{
return (cmount->get_client()->ll_release(fh));
}
extern "C" int ceph_ll_create(class ceph_mount_info *cmount,
Inode *parent, const char *name, mode_t mode,
int oflags, Inode **outp, Fh **fhp,
struct ceph_statx *stx, unsigned want,
unsigned lflags, const UserPerm *perms)
{
if (lflags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return (cmount->get_client())->ll_createx(parent, name, mode, oflags, outp,
fhp, stx, want, lflags, *perms);
}
extern "C" int ceph_ll_mknod(class ceph_mount_info *cmount, Inode *parent,
const char *name, mode_t mode, dev_t rdev,
Inode **out, struct ceph_statx *stx,
unsigned want, unsigned flags,
const UserPerm *perms)
{
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return (cmount->get_client())->ll_mknodx(parent, name, mode, rdev,
out, stx, want, flags, *perms);
}
extern "C" int ceph_ll_mkdir(class ceph_mount_info *cmount, Inode *parent,
const char *name, mode_t mode, Inode **out,
struct ceph_statx *stx, unsigned want,
unsigned flags, const UserPerm *perms)
{
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return cmount->get_client()->ll_mkdirx(parent, name, mode, out, stx, want,
flags, *perms);
}
extern "C" int ceph_ll_link(class ceph_mount_info *cmount,
Inode *in, Inode *newparent,
const char *name, const UserPerm *perms)
{
return cmount->get_client()->ll_link(in, newparent, name, *perms);
}
extern "C" int ceph_ll_opendir(class ceph_mount_info *cmount,
Inode *in,
struct ceph_dir_result **dirpp,
const UserPerm *perms)
{
return (cmount->get_client()->ll_opendir(in, O_RDONLY, (dir_result_t**) dirpp,
*perms));
}
extern "C" int ceph_ll_releasedir(class ceph_mount_info *cmount,
ceph_dir_result *dir)
{
return cmount->get_client()->ll_releasedir(reinterpret_cast<dir_result_t*>(dir));
}
extern "C" int ceph_ll_rename(class ceph_mount_info *cmount,
Inode *parent, const char *name,
Inode *newparent, const char *newname,
const UserPerm *perms)
{
return cmount->get_client()->ll_rename(parent, name, newparent,
newname, *perms);
}
extern "C" int ceph_ll_unlink(class ceph_mount_info *cmount, Inode *in,
const char *name, const UserPerm *perms)
{
return cmount->get_client()->ll_unlink(in, name, *perms);
}
extern "C" int ceph_ll_statfs(class ceph_mount_info *cmount,
Inode *in, struct statvfs *stbuf)
{
return (cmount->get_client()->ll_statfs(in, stbuf, cmount->default_perms));
}
extern "C" int ceph_ll_readlink(class ceph_mount_info *cmount, Inode *in,
char *buf, size_t bufsiz,
const UserPerm *perms)
{
return cmount->get_client()->ll_readlink(in, buf, bufsiz, *perms);
}
extern "C" int ceph_ll_symlink(class ceph_mount_info *cmount,
Inode *in, const char *name,
const char *value, Inode **out,
struct ceph_statx *stx, unsigned want,
unsigned flags, const UserPerm *perms)
{
if (flags & ~CEPH_REQ_FLAG_MASK)
return -CEPHFS_EINVAL;
return (cmount->get_client()->ll_symlinkx(in, name, value, out, stx, want,
flags, *perms));
}
extern "C" int ceph_ll_rmdir(class ceph_mount_info *cmount,
Inode *in, const char *name,
const UserPerm *perms)
{
return cmount->get_client()->ll_rmdir(in, name, *perms);
}
extern "C" int ceph_ll_getxattr(class ceph_mount_info *cmount,
Inode *in, const char *name, void *value,
size_t size, const UserPerm *perms)
{
return (cmount->get_client()->ll_getxattr(in, name, value, size, *perms));
}
extern "C" int ceph_ll_listxattr(struct ceph_mount_info *cmount,
Inode *in, char *list,
size_t buf_size, size_t *list_size,
const UserPerm *perms)
{
int res = cmount->get_client()->ll_listxattr(in, list, buf_size, *perms);
if (res >= 0) {
*list_size = (size_t)res;
return 0;
}
return res;
}
extern "C" int ceph_ll_setxattr(class ceph_mount_info *cmount,
Inode *in, const char *name,
const void *value, size_t size,
int flags, const UserPerm *perms)
{
return (cmount->get_client()->ll_setxattr(in, name, value, size, flags, *perms));
}
extern "C" int ceph_ll_removexattr(class ceph_mount_info *cmount,
Inode *in, const char *name,
const UserPerm *perms)
{
return (cmount->get_client()->ll_removexattr(in, name, *perms));
}
extern "C" int ceph_ll_getlk(struct ceph_mount_info *cmount,
Fh *fh, struct flock *fl, uint64_t owner)
{
return (cmount->get_client()->ll_getlk(fh, fl, owner));
}
extern "C" int ceph_ll_setlk(struct ceph_mount_info *cmount,
Fh *fh, struct flock *fl, uint64_t owner,
int sleep)
{
return (cmount->get_client()->ll_setlk(fh, fl, owner, sleep));
}
extern "C" int ceph_ll_lazyio(class ceph_mount_info *cmount,
Fh *fh, int enable)
{
return (cmount->get_client()->ll_lazyio(fh, enable));
}
extern "C" int ceph_ll_delegation(struct ceph_mount_info *cmount, Fh *fh,
unsigned cmd, ceph_deleg_cb_t cb, void *priv)
{
return (cmount->get_client()->ll_delegation(fh, cmd, cb, priv));
}
extern "C" uint32_t ceph_ll_stripe_unit(class ceph_mount_info *cmount,
Inode *in)
{
return (cmount->get_client()->ll_stripe_unit(in));
}
extern "C" uint32_t ceph_ll_file_layout(class ceph_mount_info *cmount,
Inode *in,
struct ceph_file_layout *layout)
{
file_layout_t l;
int r = (cmount->get_client()->ll_file_layout(in, &l));
l.to_legacy(layout);
return r;
}
uint64_t ceph_ll_snap_seq(class ceph_mount_info *cmount, Inode *in)
{
return (cmount->get_client()->ll_snap_seq(in));
}
extern "C" int ceph_ll_get_stripe_osd(class ceph_mount_info *cmount,
Inode *in, uint64_t blockno,
struct ceph_file_layout* layout)
{
file_layout_t l;
int r = (cmount->get_client()->ll_get_stripe_osd(in, blockno, &l));
l.to_legacy(layout);
return r;
}
extern "C" int ceph_ll_num_osds(class ceph_mount_info *cmount)
{
return (cmount->get_client()->ll_num_osds());
}
extern "C" int ceph_ll_osdaddr(class ceph_mount_info *cmount,
int osd, uint32_t *addr)
{
return (cmount->get_client()->ll_osdaddr(osd, addr));
}
extern "C" uint64_t ceph_ll_get_internal_offset(class ceph_mount_info *cmount,
Inode *in,
uint64_t blockno)
{
return (cmount->get_client()->ll_get_internal_offset(in, blockno));
}
extern "C" void ceph_buffer_free(char *buf)
{
if (buf) {
free(buf);
}
}
extern "C" uint32_t ceph_get_cap_return_timeout(class ceph_mount_info *cmount)
{
if (!cmount->is_mounted())
return 0;
return cmount->get_client()->mdsmap->get_session_autoclose().sec();
}
extern "C" int ceph_set_deleg_timeout(class ceph_mount_info *cmount, uint32_t timeout)
{
if (!cmount->is_mounted())
return -CEPHFS_ENOTCONN;
return cmount->get_client()->set_deleg_timeout(timeout);
}
extern "C" void ceph_set_session_timeout(class ceph_mount_info *cmount, unsigned timeout)
{
cmount->get_client()->set_session_timeout(timeout);
}
extern "C" void ceph_set_uuid(class ceph_mount_info *cmount, const char *uuid)
{
cmount->get_client()->set_uuid(std::string(uuid));
}
extern "C" int ceph_start_reclaim(class ceph_mount_info *cmount,
const char *uuid, unsigned flags)
{
if (!cmount->is_initialized()) {
int ret = cmount->init();
if (ret != 0)
return ret;
}
return cmount->get_client()->start_reclaim(std::string(uuid), flags,
cmount->get_filesystem());
}
extern "C" void ceph_finish_reclaim(class ceph_mount_info *cmount)
{
cmount->get_client()->finish_reclaim();
}
// This is deprecated, use ceph_ll_register_callbacks2 instead.
extern "C" void ceph_ll_register_callbacks(class ceph_mount_info *cmount,
struct ceph_client_callback_args *args)
{
cmount->get_client()->ll_register_callbacks(args);
}
extern "C" int ceph_ll_register_callbacks2(class ceph_mount_info *cmount,
struct ceph_client_callback_args *args)
{
return cmount->get_client()->ll_register_callbacks2(args);
}
extern "C" int ceph_get_snap_info(struct ceph_mount_info *cmount,
const char *path, struct snap_info *snap_info) {
Client::SnapInfo info;
int r = cmount->get_client()->get_snap_info(path, cmount->default_perms, &info);
if (r < 0) {
return r;
}
size_t i = 0;
auto nr_metadata = info.metadata.size();
snap_info->id = info.id.val;
snap_info->nr_snap_metadata = nr_metadata;
if (nr_metadata) {
snap_info->snap_metadata = (struct snap_metadata *)calloc(nr_metadata, sizeof(struct snap_metadata));
if (!snap_info->snap_metadata) {
return -CEPHFS_ENOMEM;
}
// fill with key, value pairs
for (auto &[key, value] : info.metadata) {
// len(key) + '\0' + len(value) + '\0'
char *kvp = (char *)malloc(key.size() + value.size() + 2);
if (!kvp) {
break;
}
char *_key = kvp;
char *_value = kvp + key.size() + 1;
memcpy(_key, key.c_str(), key.size());
_key[key.size()] = '\0';
memcpy(_value, value.c_str(), value.size());
_value[value.size()] = '\0';
snap_info->snap_metadata[i].key = _key;
snap_info->snap_metadata[i].value = _value;
++i;
}
}
if (nr_metadata && i != nr_metadata) {
ceph_free_snap_info_buffer(snap_info);
return -CEPHFS_ENOMEM;
}
return 0;
}
extern "C" void ceph_free_snap_info_buffer(struct snap_info *snap_info) {
for (size_t i = 0; i < snap_info->nr_snap_metadata; ++i) {
free((void *)snap_info->snap_metadata[i].key); // malloc'd memory is key+value composite
}
free(snap_info->snap_metadata);
}
| 64,989 | 26.596603 | 154 |
cc
|
null |
ceph-main/src/libcephsqlite.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2021 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License version 2.1, as published by
* the Free Software Foundation. See file COPYING.
*
*/
#include <boost/smart_ptr/intrusive_ptr.hpp>
#include <fmt/format.h>
#include <fcntl.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <regex>
#include <sstream>
#include <string_view>
#include <limits.h>
#include <string.h>
#include <sqlite3ext.h>
SQLITE_EXTENSION_INIT1
#include "include/ceph_assert.h"
#include "include/rados/librados.hpp"
#include "common/Clock.h"
#include "common/Formatter.h"
#include "common/ceph_argparse.h"
#include "common/ceph_mutex.h"
#include "common/common_init.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/perf_counters.h"
#include "common/version.h"
#include "include/libcephsqlite.h"
#include "SimpleRADOSStriper.h"
#define dout_subsys ceph_subsys_cephsqlite
#undef dout_prefix
#define dout_prefix *_dout << "cephsqlite: " << __func__ << ": "
#define d(cct,cluster,lvl) ldout((cct), (lvl)) << "(client." << cluster->get_instance_id() << ") "
#define dv(lvl) d(cct,cluster,(lvl))
#define df(lvl) d(f->io.cct,f->io.cluster,(lvl)) << f->loc << " "
enum {
P_FIRST = 0xf0000,
P_OP_OPEN,
P_OP_DELETE,
P_OP_ACCESS,
P_OP_FULLPATHNAME,
P_OP_CURRENTTIME,
P_OPF_CLOSE,
P_OPF_READ,
P_OPF_WRITE,
P_OPF_TRUNCATE,
P_OPF_SYNC,
P_OPF_FILESIZE,
P_OPF_LOCK,
P_OPF_UNLOCK,
P_OPF_CHECKRESERVEDLOCK,
P_OPF_FILECONTROL,
P_OPF_SECTORSIZE,
P_OPF_DEVICECHARACTERISTICS,
P_LAST,
};
using cctptr = boost::intrusive_ptr<CephContext>;
using rsptr = std::shared_ptr<librados::Rados>;
struct cephsqlite_appdata {
~cephsqlite_appdata() {
{
std::scoped_lock lock(cluster_mutex);
_disconnect();
}
if (logger) {
cct->get_perfcounters_collection()->remove(logger.get());
}
if (striper_logger) {
cct->get_perfcounters_collection()->remove(striper_logger.get());
}
}
int setup_perf() {
ceph_assert(cct);
PerfCountersBuilder plb(cct.get(), "libcephsqlite_vfs", P_FIRST, P_LAST);
plb.add_time_avg(P_OP_OPEN, "op_open", "Time average of Open operations");
plb.add_time_avg(P_OP_DELETE, "op_delete", "Time average of Delete operations");
plb.add_time_avg(P_OP_ACCESS, "op_access", "Time average of Access operations");
plb.add_time_avg(P_OP_FULLPATHNAME, "op_fullpathname", "Time average of FullPathname operations");
plb.add_time_avg(P_OP_CURRENTTIME, "op_currenttime", "Time average of Currenttime operations");
plb.add_time_avg(P_OPF_CLOSE, "opf_close", "Time average of Close file operations");
plb.add_time_avg(P_OPF_READ, "opf_read", "Time average of Read file operations");
plb.add_time_avg(P_OPF_WRITE, "opf_write", "Time average of Write file operations");
plb.add_time_avg(P_OPF_TRUNCATE, "opf_truncate", "Time average of Truncate file operations");
plb.add_time_avg(P_OPF_SYNC, "opf_sync", "Time average of Sync file operations");
plb.add_time_avg(P_OPF_FILESIZE, "opf_filesize", "Time average of FileSize file operations");
plb.add_time_avg(P_OPF_LOCK, "opf_lock", "Time average of Lock file operations");
plb.add_time_avg(P_OPF_UNLOCK, "opf_unlock", "Time average of Unlock file operations");
plb.add_time_avg(P_OPF_CHECKRESERVEDLOCK, "opf_checkreservedlock", "Time average of CheckReservedLock file operations");
plb.add_time_avg(P_OPF_FILECONTROL, "opf_filecontrol", "Time average of FileControl file operations");
plb.add_time_avg(P_OPF_SECTORSIZE, "opf_sectorsize", "Time average of SectorSize file operations");
plb.add_time_avg(P_OPF_DEVICECHARACTERISTICS, "opf_devicecharacteristics", "Time average of DeviceCharacteristics file operations");
logger.reset(plb.create_perf_counters());
if (int rc = SimpleRADOSStriper::config_logger(cct.get(), "libcephsqlite_striper", &striper_logger); rc < 0) {
return rc;
}
cct->get_perfcounters_collection()->add(logger.get());
cct->get_perfcounters_collection()->add(striper_logger.get());
return 0;
}
std::pair<cctptr, rsptr> get_cluster() {
std::scoped_lock lock(cluster_mutex);
if (!cct) {
if (int rc = _open(nullptr); rc < 0) {
ceph_abort("could not open connection to ceph");
}
}
return {cct, cluster};
}
int connect() {
std::scoped_lock lock(cluster_mutex);
return _connect();
}
int reconnect() {
std::scoped_lock lock(cluster_mutex);
_disconnect();
return _connect();
}
int maybe_reconnect(rsptr _cluster) {
std::scoped_lock lock(cluster_mutex);
if (!cluster || cluster == _cluster) {
ldout(cct, 10) << "reconnecting to RADOS" << dendl;
_disconnect();
return _connect();
} else {
ldout(cct, 10) << "already reconnected" << dendl;
return 0;
}
}
int open(CephContext* _cct) {
std::scoped_lock lock(cluster_mutex);
return _open(_cct);
}
std::unique_ptr<PerfCounters> logger;
std::shared_ptr<PerfCounters> striper_logger;
private:
int _open(CephContext* _cct) {
if (!_cct) {
std::vector<const char*> env_args;
env_to_vec(env_args, "CEPH_ARGS");
std::string cluster, conf_file_list; // unused
CephInitParameters iparams = ceph_argparse_early_args(env_args, CEPH_ENTITY_TYPE_CLIENT, &cluster, &conf_file_list);
cct = cctptr(common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0), false);
cct->_conf.parse_config_files(nullptr, &std::cerr, 0);
cct->_conf.parse_env(cct->get_module_type()); // environment variables override
cct->_conf.apply_changes(nullptr);
common_init_finish(cct.get());
} else {
cct = cctptr(_cct);
}
if (int rc = setup_perf(); rc < 0) {
return rc;
}
if (int rc = _connect(); rc < 0) {
return rc;
}
return 0;
}
void _disconnect() {
if (cluster) {
cluster.reset();
}
}
int _connect() {
ceph_assert(cct);
auto _cluster = rsptr(new librados::Rados());
ldout(cct, 5) << "initializing RADOS handle as " << cct->_conf->name << dendl;
if (int rc = _cluster->init_with_context(cct.get()); rc < 0) {
lderr(cct) << "cannot initialize RADOS: " << cpp_strerror(rc) << dendl;
return rc;
}
if (int rc = _cluster->connect(); rc < 0) {
lderr(cct) << "cannot connect: " << cpp_strerror(rc) << dendl;
return rc;
}
auto s = _cluster->get_addrs();
ldout(cct, 5) << "completed connection to RADOS with address " << s << dendl;
cluster = std::move(_cluster);
return 0;
}
ceph::mutex cluster_mutex = ceph::make_mutex("libcephsqlite");;
cctptr cct;
rsptr cluster;
};
struct cephsqlite_fileloc {
std::string pool;
std::string radosns;
std::string name;
};
struct cephsqlite_fileio {
cctptr cct;
rsptr cluster; // anchor for ioctx
librados::IoCtx ioctx;
std::unique_ptr<SimpleRADOSStriper> rs;
};
std::ostream& operator<<(std::ostream &out, const cephsqlite_fileloc& fileloc) {
return out
<< "["
<< fileloc.pool
<< ":"
<< fileloc.radosns
<< "/"
<< fileloc.name
<< "]"
;
}
struct cephsqlite_file {
sqlite3_file base;
struct sqlite3_vfs* vfs = nullptr;
int flags = 0;
// There are 5 lock states: https://sqlite.org/c3ref/c_lock_exclusive.html
int lock = 0;
struct cephsqlite_fileloc loc{};
struct cephsqlite_fileio io{};
};
#define getdata(vfs) (*((cephsqlite_appdata*)((vfs)->pAppData)))
static int Lock(sqlite3_file *file, int ilock)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << std::hex << ilock << dendl;
auto& lock = f->lock;
ceph_assert(!f->io.rs->is_locked() || lock > SQLITE_LOCK_NONE);
ceph_assert(lock <= ilock);
if (!f->io.rs->is_locked() && ilock > SQLITE_LOCK_NONE) {
if (int rc = f->io.rs->lock(0); rc < 0) {
df(5) << "failed: " << rc << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_IOERR;
}
}
lock = ilock;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_LOCK, end-start);
return SQLITE_OK;
}
static int Unlock(sqlite3_file *file, int ilock)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << std::hex << ilock << dendl;
auto& lock = f->lock;
ceph_assert(lock == SQLITE_LOCK_NONE || (lock > SQLITE_LOCK_NONE && f->io.rs->is_locked()));
ceph_assert(lock >= ilock);
if (ilock <= SQLITE_LOCK_NONE && SQLITE_LOCK_NONE < lock) {
if (int rc = f->io.rs->unlock(); rc < 0) {
df(5) << "failed: " << rc << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_IOERR;
}
}
lock = ilock;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_UNLOCK, end-start);
return SQLITE_OK;
}
static int CheckReservedLock(sqlite3_file *file, int *result)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << dendl;
*result = 0;
auto& lock = f->lock;
if (lock > SQLITE_LOCK_SHARED) {
*result = 1;
}
df(10);
f->io.rs->print_lockers(*_dout);
*_dout << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_CHECKRESERVEDLOCK, end-start);
return SQLITE_OK;
}
static int Close(sqlite3_file *file)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << dendl;
f->~cephsqlite_file();
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_CLOSE, end-start);
return SQLITE_OK;
}
static int Read(sqlite3_file *file, void *buf, int len, sqlite_int64 off)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << buf << " " << off << "~" << len << dendl;
if (int rc = f->io.rs->read(buf, len, off); rc < 0) {
df(5) << "read failed: " << cpp_strerror(rc) << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_IOERR_READ;
} else {
df(5) << "= " << rc << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_READ, end-start);
if (rc < len) {
memset(buf, 0, len-rc);
return SQLITE_IOERR_SHORT_READ;
} else {
return SQLITE_OK;
}
}
}
static int Write(sqlite3_file *file, const void *buf, int len, sqlite_int64 off)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << off << "~" << len << dendl;
if (int rc = f->io.rs->write(buf, len, off); rc < 0) {
df(5) << "write failed: " << cpp_strerror(rc) << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_IOERR_WRITE;
} else {
df(5) << "= " << rc << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_WRITE, end-start);
return SQLITE_OK;
}
}
static int Truncate(sqlite3_file *file, sqlite_int64 size)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << size << dendl;
if (int rc = f->io.rs->truncate(size); rc < 0) {
df(5) << "truncate failed: " << cpp_strerror(rc) << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_IOERR;
}
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_TRUNCATE, end-start);
return SQLITE_OK;
}
static int Sync(sqlite3_file *file, int flags)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << flags << dendl;
if (int rc = f->io.rs->flush(); rc < 0) {
df(5) << "failed: " << cpp_strerror(rc) << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_IOERR;
}
df(5) << " = 0" << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_SYNC, end-start);
return SQLITE_OK;
}
static int FileSize(sqlite3_file *file, sqlite_int64 *osize)
{
auto f = (cephsqlite_file*)file;
auto start = ceph::coarse_mono_clock::now();
df(5) << dendl;
uint64_t size = 0;
if (int rc = f->io.rs->stat(&size); rc < 0) {
df(5) << "stat failed: " << cpp_strerror(rc) << dendl;
if (rc == -EBLOCKLISTED) {
getdata(f->vfs).maybe_reconnect(f->io.cluster);
}
return SQLITE_NOTFOUND;
}
*osize = (sqlite_int64)size;
df(5) << "= " << size << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_FILESIZE, end-start);
return SQLITE_OK;
}
static bool parsepath(std::string_view path, struct cephsqlite_fileloc* fileloc)
{
static const std::regex re1{"^/*(\\*[[:digit:]]+):([[:alnum:]\\-_.]*)/([[:alnum:]\\-._]+)$"};
static const std::regex re2{"^/*([[:alnum:]\\-_.]+):([[:alnum:]\\-_.]*)/([[:alnum:]\\-._]+)$"};
std::cmatch cm;
if (!std::regex_match(path.data(), cm, re1)) {
if (!std::regex_match(path.data(), cm, re2)) {
return false;
}
}
fileloc->pool = cm[1];
fileloc->radosns = cm[2];
fileloc->name = cm[3];
return true;
}
static int makestriper(sqlite3_vfs* vfs, cctptr cct, rsptr cluster, const cephsqlite_fileloc& loc, cephsqlite_fileio* io)
{
bool gotmap = false;
d(cct,cluster,10) << loc << dendl;
enoent_retry:
if (loc.pool[0] == '*') {
std::string err;
int64_t id = strict_strtoll(loc.pool.c_str()+1, 10, &err);
ceph_assert(err.empty());
if (int rc = cluster->ioctx_create2(id, io->ioctx); rc < 0) {
if (rc == -ENOENT && !gotmap) {
cluster->wait_for_latest_osdmap();
gotmap = true;
goto enoent_retry;
}
d(cct,cluster,1) << "cannot create ioctx: " << cpp_strerror(rc) << dendl;
return rc;
}
} else {
if (int rc = cluster->ioctx_create(loc.pool.c_str(), io->ioctx); rc < 0) {
if (rc == -ENOENT && !gotmap) {
cluster->wait_for_latest_osdmap();
gotmap = true;
goto enoent_retry;
}
d(cct,cluster,1) << "cannot create ioctx: " << cpp_strerror(rc) << dendl;
return rc;
}
}
if (!loc.radosns.empty())
io->ioctx.set_namespace(loc.radosns);
io->rs = std::make_unique<SimpleRADOSStriper>(io->ioctx, loc.name);
io->rs->set_logger(getdata(vfs).striper_logger);
io->rs->set_lock_timeout(cct->_conf.get_val<std::chrono::milliseconds>("cephsqlite_lock_renewal_timeout"));
io->rs->set_lock_interval(cct->_conf.get_val<std::chrono::milliseconds>("cephsqlite_lock_renewal_interval"));
io->rs->set_blocklist_the_dead(cct->_conf.get_val<bool>("cephsqlite_blocklist_dead_locker"));
io->cluster = std::move(cluster);
io->cct = cct;
return 0;
}
static int SectorSize(sqlite3_file* sf)
{
static const int size = 65536;
auto start = ceph::coarse_mono_clock::now();
auto f = (cephsqlite_file*)sf;
df(5) << " = " << size << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_SECTORSIZE, end-start);
return size;
}
static int FileControl(sqlite3_file* sf, int op, void *arg)
{
auto f = (cephsqlite_file*)sf;
auto start = ceph::coarse_mono_clock::now();
df(5) << op << ", " << arg << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_FILECONTROL, end-start);
return SQLITE_NOTFOUND;
}
static int DeviceCharacteristics(sqlite3_file* sf)
{
auto f = (cephsqlite_file*)sf;
auto start = ceph::coarse_mono_clock::now();
df(5) << dendl;
static const int c = 0
|SQLITE_IOCAP_ATOMIC
|SQLITE_IOCAP_POWERSAFE_OVERWRITE
|SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN
|SQLITE_IOCAP_SAFE_APPEND
;
auto end = ceph::coarse_mono_clock::now();
getdata(f->vfs).logger->tinc(P_OPF_DEVICECHARACTERISTICS, end-start);
return c;
}
static int Open(sqlite3_vfs *vfs, const char *name, sqlite3_file *file,
int flags, int *oflags)
{
static const sqlite3_io_methods io = {
1, /* iVersion */
Close, /* xClose */
Read, /* xRead */
Write, /* xWrite */
Truncate, /* xTruncate */
Sync, /* xSync */
FileSize, /* xFileSize */
Lock, /* xLock */
Unlock, /* xUnlock */
CheckReservedLock, /* xCheckReservedLock */
FileControl, /* xFileControl */
SectorSize, /* xSectorSize */
DeviceCharacteristics /* xDeviceCharacteristics */
};
auto start = ceph::coarse_mono_clock::now();
bool gotmap = false;
auto [cct, cluster] = getdata(vfs).get_cluster();
/* we are not going to create temporary files */
if (name == NULL) {
dv(-1) << " cannot open temporary database" << dendl;
return SQLITE_CANTOPEN;
}
auto path = std::string_view(name);
if (path == ":memory:") {
dv(-1) << " cannot open temporary database" << dendl;
return SQLITE_IOERR;
}
dv(5) << path << " flags=" << std::hex << flags << dendl;
auto f = new (file)cephsqlite_file();
f->vfs = vfs;
if (!parsepath(path, &f->loc)) {
ceph_assert(0); /* xFullPathname validates! */
}
f->flags = flags;
enoent_retry:
if (int rc = makestriper(vfs, cct, cluster, f->loc, &f->io); rc < 0) {
f->~cephsqlite_file();
dv(-1) << "cannot open striper" << dendl;
return SQLITE_IOERR;
}
if (flags & SQLITE_OPEN_CREATE) {
dv(10) << "OPEN_CREATE" << dendl;
if (int rc = f->io.rs->create(); rc < 0 && rc != -EEXIST) {
if (rc == -ENOENT && !gotmap) {
/* we may have an out of date OSDMap which cancels the op in the
* Objecter. Try to get a new one and retry. This is mostly noticable
* in testing when pools are getting created/deleted left and right.
*/
dv(5) << "retrying create after getting latest OSDMap" << dendl;
cluster->wait_for_latest_osdmap();
gotmap = true;
goto enoent_retry;
}
dv(5) << "file cannot be created: " << cpp_strerror(rc) << dendl;
return SQLITE_IOERR;
}
}
if (int rc = f->io.rs->open(); rc < 0) {
if (rc == -ENOENT && !gotmap) {
/* See comment above for create case. */
dv(5) << "retrying open after getting latest OSDMap" << dendl;
cluster->wait_for_latest_osdmap();
gotmap = true;
goto enoent_retry;
}
dv(10) << "cannot open striper: " << cpp_strerror(rc) << dendl;
return rc;
}
if (oflags) {
*oflags = flags;
}
f->base.pMethods = &io;
auto end = ceph::coarse_mono_clock::now();
getdata(vfs).logger->tinc(P_OP_OPEN, end-start);
return SQLITE_OK;
}
/*
** Delete the file identified by argument path. If the dsync parameter
** is non-zero, then ensure the file-system modification to delete the
** file has been synced to disk before returning.
*/
static int Delete(sqlite3_vfs* vfs, const char* path, int dsync)
{
auto start = ceph::coarse_mono_clock::now();
auto [cct, cluster] = getdata(vfs).get_cluster();
dv(5) << "'" << path << "', " << dsync << dendl;
cephsqlite_fileloc fileloc;
if (!parsepath(path, &fileloc)) {
dv(5) << "path does not parse!" << dendl;
return SQLITE_NOTFOUND;
}
cephsqlite_fileio io;
if (int rc = makestriper(vfs, cct, cluster, fileloc, &io); rc < 0) {
dv(-1) << "cannot open striper" << dendl;
return SQLITE_IOERR;
}
if (int rc = io.rs->lock(0); rc < 0) {
return SQLITE_IOERR;
}
if (int rc = io.rs->remove(); rc < 0) {
dv(5) << "= " << rc << dendl;
return SQLITE_IOERR_DELETE;
}
/* No need to unlock */
dv(5) << "= 0" << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(vfs).logger->tinc(P_OP_DELETE, end-start);
return SQLITE_OK;
}
/*
** Query the file-system to see if the named file exists, is readable or
** is both readable and writable.
*/
static int Access(sqlite3_vfs* vfs, const char* path, int flags, int* result)
{
auto start = ceph::coarse_mono_clock::now();
auto [cct, cluster] = getdata(vfs).get_cluster();
dv(5) << path << " " << std::hex << flags << dendl;
cephsqlite_fileloc fileloc;
if (!parsepath(path, &fileloc)) {
dv(5) << "path does not parse!" << dendl;
return SQLITE_NOTFOUND;
}
cephsqlite_fileio io;
if (int rc = makestriper(vfs, cct, cluster, fileloc, &io); rc < 0) {
dv(-1) << "cannot open striper" << dendl;
return SQLITE_IOERR;
}
if (int rc = io.rs->open(); rc < 0) {
if (rc == -ENOENT) {
*result = 0;
return SQLITE_OK;
} else {
dv(10) << "cannot open striper: " << cpp_strerror(rc) << dendl;
*result = 0;
return SQLITE_IOERR;
}
}
uint64_t size = 0;
if (int rc = io.rs->stat(&size); rc < 0) {
dv(5) << "= " << rc << " (" << cpp_strerror(rc) << ")" << dendl;
*result = 0;
} else {
dv(5) << "= 0" << dendl;
*result = 1;
}
auto end = ceph::coarse_mono_clock::now();
getdata(vfs).logger->tinc(P_OP_ACCESS, end-start);
return SQLITE_OK;
}
/* This method is only called once for each database. It provides a chance to
* reformat the path into a canonical format.
*/
static int FullPathname(sqlite3_vfs* vfs, const char* ipath, int opathlen, char* opath)
{
auto start = ceph::coarse_mono_clock::now();
auto path = std::string_view(ipath);
auto [cct, cluster] = getdata(vfs).get_cluster();
dv(5) << "1: " << path << dendl;
cephsqlite_fileloc fileloc;
if (!parsepath(path, &fileloc)) {
dv(5) << "path does not parse!" << dendl;
return SQLITE_NOTFOUND;
}
dv(5) << " parsed " << fileloc << dendl;
auto p = fmt::format("{}:{}/{}", fileloc.pool, fileloc.radosns, fileloc.name);
if (p.size() >= (size_t)opathlen) {
dv(5) << "path too long!" << dendl;
return SQLITE_CANTOPEN;
}
strcpy(opath, p.c_str());
dv(5) << " output " << p << dendl;
auto end = ceph::coarse_mono_clock::now();
getdata(vfs).logger->tinc(P_OP_FULLPATHNAME, end-start);
return SQLITE_OK;
}
static int CurrentTime(sqlite3_vfs* vfs, sqlite3_int64* time)
{
auto start = ceph::coarse_mono_clock::now();
auto [cct, cluster] = getdata(vfs).get_cluster();
dv(5) << time << dendl;
auto t = ceph_clock_now();
*time = t.to_msec() + 2440587.5*86400000; /* julian days since 1970 converted to ms */
auto end = ceph::coarse_mono_clock::now();
getdata(vfs).logger->tinc(P_OP_CURRENTTIME, end-start);
return SQLITE_OK;
}
LIBCEPHSQLITE_API int cephsqlite_setcct(CephContext* _cct, char** ident)
{
ldout(_cct, 1) << "cct: " << _cct << dendl;
if (sqlite3_api == nullptr) {
lderr(_cct) << "API violation: must have sqlite3 init libcephsqlite" << dendl;
return -EINVAL;
}
auto vfs = sqlite3_vfs_find("ceph");
if (!vfs) {
lderr(_cct) << "API violation: must have sqlite3 init libcephsqlite" << dendl;
return -EINVAL;
}
auto& appd = getdata(vfs);
if (int rc = appd.open(_cct); rc < 0) {
return rc;
}
auto [cct, cluster] = appd.get_cluster();
auto s = cluster->get_addrs();
if (ident) {
*ident = strdup(s.c_str());
}
ldout(cct, 1) << "complete" << dendl;
return 0;
}
static void f_perf(sqlite3_context* ctx, int argc, sqlite3_value** argv)
{
auto vfs = (sqlite3_vfs*)sqlite3_user_data(ctx);
auto [cct, cluster] = getdata(vfs).get_cluster();
dv(10) << dendl;
auto&& appd = getdata(vfs);
JSONFormatter f(false);
f.open_object_section("ceph_perf");
appd.logger->dump_formatted(&f, false, false);
appd.striper_logger->dump_formatted(&f, false, false);
f.close_section();
{
CachedStackStringStream css;
f.flush(*css);
auto sv = css->strv();
dv(20) << " = " << sv << dendl;
sqlite3_result_text(ctx, sv.data(), sv.size(), SQLITE_TRANSIENT);
}
}
static void f_status(sqlite3_context* ctx, int argc, sqlite3_value** argv)
{
auto vfs = (sqlite3_vfs*)sqlite3_user_data(ctx);
auto [cct, cluster] = getdata(vfs).get_cluster();
dv(10) << dendl;
JSONFormatter f(false);
f.open_object_section("ceph_status");
f.dump_int("id", cluster->get_instance_id());
f.dump_string("addr", cluster->get_addrs());
f.close_section();
{
CachedStackStringStream css;
f.flush(*css);
auto sv = css->strv();
dv(20) << " = " << sv << dendl;
sqlite3_result_text(ctx, sv.data(), sv.size(), SQLITE_TRANSIENT);
}
}
static int autoreg(sqlite3* db, char** err, const struct sqlite3_api_routines* thunk)
{
auto vfs = sqlite3_vfs_find("ceph");
if (!vfs) {
ceph_abort("ceph vfs not found");
}
if (int rc = sqlite3_create_function(db, "ceph_perf", 0, SQLITE_UTF8, vfs, f_perf, nullptr, nullptr); rc) {
return rc;
}
if (int rc = sqlite3_create_function(db, "ceph_status", 0, SQLITE_UTF8, vfs, f_status, nullptr, nullptr); rc) {
return rc;
}
return SQLITE_OK;
}
/* You may wonder why we have an atexit handler? After all, atexit/exit creates
* a mess for multithreaded programs. Well, sqlite3 does not have an API for
* orderly removal of extensions. And, in fact, any API we might make
* unofficially (such as "sqlite3_cephsqlite_fini") would potentially race with
* other threads interacting with sqlite3 + the "ceph" VFS. There is a method
* for removing a VFS but it's not called by sqlite3 in any error scenario and
* there is no mechanism within sqlite3 to tell a VFS to unregister itself.
*
* This all would be mostly okay if /bin/sqlite3 did not call exit(3), but it
* does. (This occurs only for the sqlite3 binary, not when used as a library.)
* exit(3) calls destructors on all static-duration structures for the program.
* This breaks any outstanding threads created by the librados handle in all
* sorts of fantastic ways from C++ exceptions to memory faults. In general,
* Ceph libraries are not tolerant of exit(3) (_exit(3) is okay!). Applications
* must clean up after themselves or _exit(3).
*
* So, we have an atexit handler for libcephsqlite. This simply shuts down the
* RADOS handle. We can be assured that this occurs before any ceph library
* static-duration structures are destructed due to ordering guarantees by
* exit(3). Generally, we only see this called when the VFS is used by
* /bin/sqlite3 and only during sqlite3 error scenarios (like I/O errors
* arrising from blocklisting).
*/
static void cephsqlite_atexit()
{
if (auto vfs = sqlite3_vfs_find("ceph"); vfs) {
if (vfs->pAppData) {
auto&& appd = getdata(vfs);
delete &appd;
vfs->pAppData = nullptr;
}
}
}
LIBCEPHSQLITE_API int sqlite3_cephsqlite_init(sqlite3* db, char** err, const sqlite3_api_routines* api)
{
SQLITE_EXTENSION_INIT2(api);
auto vfs = sqlite3_vfs_find("ceph");
if (!vfs) {
vfs = (sqlite3_vfs*) calloc(1, sizeof(sqlite3_vfs));
auto appd = new cephsqlite_appdata;
vfs->iVersion = 2;
vfs->szOsFile = sizeof(struct cephsqlite_file);
vfs->mxPathname = 4096;
vfs->zName = "ceph";
vfs->pAppData = appd;
vfs->xOpen = Open;
vfs->xDelete = Delete;
vfs->xAccess = Access;
vfs->xFullPathname = FullPathname;
vfs->xCurrentTimeInt64 = CurrentTime;
if (int rc = sqlite3_vfs_register(vfs, 0); rc) {
delete appd;
free(vfs);
return rc;
}
}
if (int rc = std::atexit(cephsqlite_atexit); rc) {
return SQLITE_INTERNAL;
}
if (int rc = sqlite3_auto_extension((void(*)(void))autoreg); rc) {
return rc;
}
if (int rc = autoreg(db, err, api); rc) {
return rc;
}
return SQLITE_OK_LOAD_PERMANENTLY;
}
| 27,898 | 28.87045 | 136 |
cc
|
null |
ceph-main/src/librados-config.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <iostream>
#include <boost/program_options/cmdline.hpp>
#include <boost/program_options/option.hpp>
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/program_options/variables_map.hpp>
#include "include/rados/librados.h"
#include "ceph_ver.h"
namespace po = boost::program_options;
int main(int argc, const char **argv)
{
po::options_description desc{"usage: librados-config [option]"};
desc.add_options()
("help,h", "print this help message")
("version", "library version")
("vernum", "library version code")
("release", "print release name");
po::parsed_options parsed =
po::command_line_parser(argc, argv).options(desc).run();
po::variables_map vm;
po::store(parsed, vm);
po::notify(vm);
if (vm.count("help")) {
std::cout << desc << std::endl;
} else if (vm.count("version")) {
int maj, min, ext;
rados_version(&maj, &min, &ext);
std::cout << maj << "." << min << "." << ext << std::endl;
} else if (vm.count("vernum")) {
std::cout << std::hex << LIBRADOS_VERSION_CODE << std::dec << std::endl;
} else if (vm.count("release")) {
std::cout << CEPH_RELEASE_NAME << ' '
<< '(' << CEPH_RELEASE_TYPE << ')'
<< std::endl;
} else {
std::cerr << argv[0] << ": -h or --help for usage" << std::endl;
return 1;
}
}
| 1,799 | 29 | 76 |
cc
|
null |
ceph-main/src/loadclass.sh
|
#!/usr/bin/env bash
fname=$1
[ -z "$fname" ] && exit
[ -e $fname ] || { echo "file no found: $fname"; exit; }
name="`nm $fname | grep __cls_name__ | sed 's/.*__cls_name__//g' | head -1`"
[ -z "$name" ] && exit
ver="`nm $fname | grep __cls_ver__ | sed 's/.*__cls_ver__//g' | sed 's/_/\./g' | head -1`"
[ -z "$ver" ] && exit
echo loading $name v$ver
fl=`file $fname`
arch=""
[ `echo "$fl" | grep -c i386` -gt 0 ] && arch="i386"
[ `echo "$fl" | grep -c x86-64` -gt 0 ] && arch="x86-64"
[ -z "$arch" ] && { echo "lib architecture not identified"; exit; }
`dirname $0`/ceph class add $name $ver $arch --in-data=$fname
| 624 | 22.148148 | 90 |
sh
|
null |
ceph-main/src/mrgw.sh
|
#!/usr/bin/env bash
set -e
rgw_frontend=${RGW_FRONTEND:-"beast"}
script_root=$(dirname "$0")
script_root=$(cd "$script_root" && pwd)
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
if [ -e CMakeCache.txt ]; then
script_root=$PWD
elif [ -e "$script_root"/../${BUILD_DIR}/CMakeCache.txt ]; then
cd "$script_root"/../${BUILD_DIR}
script_root=$PWD
fi
#ceph_bin=$script_root/bin
vstart_path=$(dirname "$0")
[ "$#" -lt 3 ] && echo "usage: $0 <name> <port> <ssl-port> [params...]" && exit 1
name=$1
port=$2
ssl_port=$3
cert_param=""
port_param="port=$port"
if [ "$ssl_port" -gt 0 ]; then
cert_param="ssl_certificate=./cert.pem"
if [ "$rgw_frontend" = "civetweb" ]; then
port_param="port=${port} port=${ssl_port}s"
else
port_param="port=${port} ssl_port=${ssl_port}"
fi
fi
if [ -n "$RGW_FRONTEND_THREADS" ]; then
set_frontend_threads="num_threads=$RGW_FRONTEND_THREADS"
fi
shift 3
run_root=$script_root/run/$name
pidfile=$run_root/out/radosgw.${port}.pid
asokfile=$run_root/out/radosgw.${port}.asok
logfile=$run_root/out/radosgw.${port}.log
"$vstart_path"/mstop.sh "$name" radosgw "$port"
"$vstart_path"/mrun "$name" ceph -c "$run_root"/ceph.conf \
-k "$run_root"/keyring auth get-or-create client.rgw."$port" mon \
'allow rw' osd 'allow rwx' mgr 'allow rw' >> "$run_root"/keyring
"$vstart_path"/mrun "$name" radosgw --rgw-frontends="$rgw_frontend $port_param $set_frontend_threads $cert_param" \
-n client.rgw."$port" --pid-file="$pidfile" \
--admin-socket="$asokfile" "$@" --log-file="$logfile"
| 1,543 | 27.072727 | 115 |
sh
|
null |
ceph-main/src/mstart.sh
|
#!/bin/sh
usage="usage: $0 <name> [vstart options]..\n"
usage_exit() {
printf "$usage"
exit
}
[ $# -lt 1 ] && usage_exit
instance=$1
shift
vstart_path=`dirname $0`
root_path=`dirname $0`
root_path=`(cd $root_path; pwd)`
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
if [ -e CMakeCache.txt ]; then
root_path=$PWD
elif [ -e $root_path/../${BUILD_DIR}/CMakeCache.txt ]; then
cd $root_path/../${BUILD_DIR}
root_path=$PWD
fi
RUN_ROOT_PATH=${root_path}/run
mkdir -p $RUN_ROOT_PATH
if [ -z "$CLUSTERS_LIST" ]
then
CLUSTERS_LIST=$RUN_ROOT_PATH/.clusters.list
fi
if [ ! -f $CLUSTERS_LIST ]; then
touch $CLUSTERS_LIST
fi
pos=`grep -n -w $instance $CLUSTERS_LIST`
if [ $? -ne 0 ]; then
echo $instance >> $CLUSTERS_LIST
pos=`grep -n -w $instance $CLUSTERS_LIST`
fi
pos=`echo $pos | cut -d: -f1`
base_port=$((6800+pos*20))
rgw_port=$((8000+pos*1))
[ -z "$VSTART_DEST" ] && export VSTART_DEST=$RUN_ROOT_PATH/$instance
[ -z "$CEPH_PORT" ] && export CEPH_PORT=$base_port
[ -z "$CEPH_RGW_PORT" ] && export CEPH_RGW_PORT=$rgw_port
mkdir -p $VSTART_DEST
echo "Cluster dest path: $VSTART_DEST"
echo "monitors base port: $CEPH_PORT"
echo "rgw base port: $CEPH_RGW_PORT"
$vstart_path/vstart.sh "$@"
| 1,208 | 18.190476 | 68 |
sh
|
null |
ceph-main/src/mstop.sh
|
#!/usr/bin/env bash
set -e
script_root=`dirname $0`
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
if [ -e CMakeCache.txt ]; then
script_root=$PWD
elif [ -e $script_root/../${BUILD_DIR}/CMakeCache.txt ]; then
script_root=`(cd $script_root/../${BUILD_DIR}; pwd)`
fi
[ "$#" -lt 1 ] && echo "usage: $0 <name> [entity [id]]" && exit 1
name=$1
entity=$2
id=$3
run_root=$script_root/run/$name
pidpath=$run_root/out
if [ "$entity" == "" ]; then
pfiles=`ls $pidpath/*.pid` || true
elif [ "$id" == "" ]; then
pfiles=`ls $pidpath/$entity.*.pid` || true
else
pfiles=`ls $pidpath/$entity.$id.pid` || true
fi
MAX_RETRIES=20
for pidfile in $pfiles; do
pid=`cat $pidfile`
fname=`echo $pidfile | sed 's/.*\///g'`
[ "$pid" == "" ] && exit
[ $pid -eq 0 ] && exit
echo pid=$pid
extra_check=""
entity=`echo $fname | sed 's/\..*//g'`
name=`echo $fname | sed 's/\.pid$//g'`
[ "$entity" == "radosgw" ] && extra_check="-e lt-radosgw"
echo entity=$entity pid=$pid name=$name
counter=0
signal=""
while ps -p $pid -o args= | grep -q -e $entity $extracheck ; do
if [[ "$counter" -gt MAX_RETRIES ]]; then
signal="-9"
fi
cmd="kill $signal $pid"
printf "$cmd...\n"
$cmd
sleep 1
counter=$((counter+1))
continue
done
done
| 1,274 | 20.25 | 65 |
sh
|
null |
ceph-main/src/multi-dump.sh
|
#!/usr/bin/env bash
#
# multi-dump.sh
#
# Dumps interesting information about the Ceph cluster at a series of epochs.
#
### Functions
usage() {
cat <<EOF
multi-dump.sh: dumps out ceph maps
-D Enable diff-mode
-e <start-epoch> What epoch to end with.
-h This help message
-s <start-epoch> What epoch to start with. Defaults to 1.
-t <map-type> What type of map to dump. Defaults to osdmap.
Valid map types are: osdmap,
EOF
}
cleanup() {
[ -n ${TEMPDIR} ] && rm -rf "${TEMPDIR}"
}
die() {
echo $@
exit 1
}
dump_osdmap() {
for v in `seq $START_EPOCH $END_EPOCH`; do
./ceph osd getmap $v -o $TEMPDIR/$v >> $TEMPDIR/cephtool-out \
|| die "cephtool failed to dump epoch $v"
done
if [ $DIFFMODE -eq 1 ]; then
for v in `seq $START_EPOCH $END_EPOCH`; do
./osdmaptool --print $TEMPDIR/$v > $TEMPDIR/$v.out
done
cat $TEMPDIR/$START_EPOCH.out
E=$((END_EPOCH-1))
for v in `seq $START_EPOCH $E`; do
S=$((v+1))
echo "************** $S **************"
diff $TEMPDIR/$v.out $TEMPDIR/$S.out
done
else
for v in `seq $START_EPOCH $END_EPOCH`; do
echo "************** $v **************"
./osdmaptool --print $TEMPDIR/$v \
|| die "osdmaptool failed to print epoch $v"
done
fi
}
### Setup
trap cleanup INT TERM EXIT
TEMPDIR=`mktemp -d`
MYDIR=`dirname $0`
MYDIR=`readlink -f $MYDIR`
MAP_TYPE=osdmap
cd $MYDIR
### Parse arguments
DIFFMODE=0
START_EPOCH=1
END_EPOCH=0
while getopts "De:hs:t:" flag; do
case $flag in
D) DIFFMODE=1;;
e) END_EPOCH=$OPTARG;;
h) usage
exit 0
;;
s) START_EPOCH=$OPTARG;;
t) MAP_TYPE=$OPTARG;;
*) usage
exit 1;;
esac
done
[ $END_EPOCH -eq 0 ] && die "You must supply an end epoch with -e"
### Dump maps
case $MAP_TYPE in
"osdmap") dump_osdmap;;
*) die "sorry, don't know how to handle MAP_TYPE '$MAP_TYPE'"
esac
exit 0
| 2,364 | 23.132653 | 78 |
sh
|
null |
ceph-main/src/perf_histogram.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 OVH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_PERF_HISTOGRAM_H
#define CEPH_COMMON_PERF_HISTOGRAM_H
#include "common/Formatter.h"
#include "include/int_types.h"
#include <array>
#include <atomic>
#include <memory>
#include "include/ceph_assert.h"
class PerfHistogramCommon {
public:
enum scale_type_d : uint8_t {
SCALE_LINEAR = 1,
SCALE_LOG2 = 2,
};
struct axis_config_d {
const char *m_name = nullptr;
scale_type_d m_scale_type = SCALE_LINEAR;
int64_t m_min = 0;
int64_t m_quant_size = 0;
int32_t m_buckets = 0;
axis_config_d() = default;
axis_config_d(const char* name,
scale_type_d scale_type,
int64_t min,
int64_t quant_size,
int32_t buckets)
: m_name(name),
m_scale_type(scale_type),
m_min(min),
m_quant_size(quant_size),
m_buckets(buckets)
{}
};
protected:
/// Dump configuration of one axis to a formatter
static void dump_formatted_axis(ceph::Formatter *f, const axis_config_d &ac);
/// Quantize given value and convert to bucket number on given axis
static int64_t get_bucket_for_axis(int64_t value, const axis_config_d &ac);
/// Calculate inclusive ranges of axis values for each bucket on that axis
static std::vector<std::pair<int64_t, int64_t>> get_axis_bucket_ranges(
const axis_config_d &ac);
};
/// PerfHistogram does trace a histogram of input values. It's an extended
/// version of a standard histogram which does trace characteristics of a single
/// one value only. In this implementation, values can be traced in multiple
/// dimensions - i.e. we can create a histogram of input request size (first
/// dimension) and processing latency (second dimension). Creating standard
/// histogram out of such multidimensional one is trivial and requires summing
/// values across dimensions we're not interested in.
template <int DIM = 2>
class PerfHistogram : public PerfHistogramCommon {
public:
/// Initialize new histogram object
PerfHistogram(std::initializer_list<axis_config_d> axes_config) {
ceph_assert(axes_config.size() == DIM &&
"Invalid number of axis configuration objects");
int i = 0;
for (const auto &ac : axes_config) {
ceph_assertf(ac.m_buckets > 0,
"Must have at least one bucket on axis");
ceph_assertf(ac.m_quant_size > 0,
"Quantization unit must be non-zero positive integer value");
m_axes_config[i++] = ac;
}
m_rawData.reset(new std::atomic<uint64_t>[get_raw_size()]);
}
/// Copy from other histogram object
PerfHistogram(const PerfHistogram &other)
: m_axes_config(other.m_axes_config) {
int64_t size = get_raw_size();
m_rawData.reset(new std::atomic<uint64_t>[size]);
for (int64_t i = 0; i < size; i++) {
m_rawData[i] = other.m_rawData[i];
}
}
/// Set all histogram values to 0
void reset() {
auto size = get_raw_size();
for (auto i = size; --i >= 0;) {
m_rawData[i] = 0;
}
}
/// Increase counter for given axis values by one
template <typename... T>
void inc(T... axis) {
auto index = get_raw_index_for_value(axis...);
m_rawData[index] += 1;
}
/// Increase counter for given axis buckets by one
template <typename... T>
void inc_bucket(T... bucket) {
auto index = get_raw_index_for_bucket(bucket...);
m_rawData[index] += 1;
}
/// Read value from given bucket
template <typename... T>
uint64_t read_bucket(T... bucket) const {
auto index = get_raw_index_for_bucket(bucket...);
return m_rawData[index];
}
/// Dump data to a Formatter object
void dump_formatted(ceph::Formatter *f) const {
// Dump axes configuration
f->open_array_section("axes");
for (auto &ac : m_axes_config) {
dump_formatted_axis(f, ac);
}
f->close_section();
// Dump histogram values
dump_formatted_values(f);
}
protected:
/// Raw data stored as linear space, internal indexes are calculated on
/// demand.
std::unique_ptr<std::atomic<uint64_t>[]> m_rawData;
/// Configuration of axes
std::array<axis_config_d, DIM> m_axes_config;
/// Dump histogram counters to a formatter
void dump_formatted_values(ceph::Formatter *f) const {
visit_values([f](int) { f->open_array_section("values"); },
[f](int64_t value) { f->dump_unsigned("value", value); },
[f](int) { f->close_section(); });
}
/// Get number of all histogram counters
int64_t get_raw_size() {
int64_t ret = 1;
for (const auto &ac : m_axes_config) {
ret *= ac.m_buckets;
}
return ret;
}
/// Calculate m_rawData index from axis values
template <typename... T>
int64_t get_raw_index_for_value(T... axes) const {
static_assert(sizeof...(T) == DIM, "Incorrect number of arguments");
return get_raw_index_internal<0>(get_bucket_for_axis, 0, axes...);
}
/// Calculate m_rawData index from axis bucket numbers
template <typename... T>
int64_t get_raw_index_for_bucket(T... buckets) const {
static_assert(sizeof...(T) == DIM, "Incorrect number of arguments");
return get_raw_index_internal<0>(
[](int64_t bucket, const axis_config_d &ac) {
ceph_assertf(bucket >= 0, "Bucket index can not be negative");
ceph_assertf(bucket < ac.m_buckets, "Bucket index too large");
return bucket;
},
0, buckets...);
}
template <int level = 0, typename F, typename... T>
int64_t get_raw_index_internal(F bucket_evaluator, int64_t startIndex,
int64_t value, T... tail) const {
static_assert(level + 1 + sizeof...(T) == DIM,
"Internal consistency check");
auto &ac = m_axes_config[level];
auto bucket = bucket_evaluator(value, ac);
return get_raw_index_internal<level + 1>(
bucket_evaluator, ac.m_buckets * startIndex + bucket, tail...);
}
template <int level, typename F>
int64_t get_raw_index_internal(F, int64_t startIndex) const {
static_assert(level == DIM, "Internal consistency check");
return startIndex;
}
/// Visit all histogram counters, call onDimensionEnter / onDimensionLeave
/// when starting / finishing traversal
/// on given axis, call onValue when dumping raw histogram counter value.
template <typename FDE, typename FV, typename FDL>
void visit_values(FDE onDimensionEnter, FV onValue, FDL onDimensionLeave,
int level = 0, int startIndex = 0) const {
if (level == DIM) {
onValue(m_rawData[startIndex]);
return;
}
onDimensionEnter(level);
auto &ac = m_axes_config[level];
startIndex *= ac.m_buckets;
for (int32_t i = 0; i < ac.m_buckets; ++i, ++startIndex) {
visit_values(onDimensionEnter, onValue, onDimensionLeave, level + 1,
startIndex);
}
onDimensionLeave(level);
}
};
#endif
| 7,206 | 30.334783 | 80 |
h
|
null |
ceph-main/src/ps-ceph.pl
|
#!/usr/bin/perl
use strict;
#
# ps-ceph.pl: Displays a list of ceph processes running locally
#
# Copyright (C) 2010, Dreamhost
#
# This is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License version 2.1, as published by the Free Software
# Foundation. See file COPYING.
#
sub is_ceph_proc {
my $cmdline = @_[0];
return 0 if $cmdline =~ /\bps-ceph.pl\b/;
return 1 if $cmdline =~ /\bceph\b/;
return 1 if $cmdline =~ /\bceph-fuse\b/;
return 1 if $cmdline =~ /\brbd-nbd\b/;
return 1 if $cmdline =~ /\brbd-fuse\b/;
return 1 if $cmdline =~ /\bceph-mds\b/;
return 1 if $cmdline =~ /\bceph-mon\b/;
return 1 if $cmdline =~ /\bceph-osd\b/;
return 1 if $cmdline =~ /\bceph-mgr\b/;
return 1 if $cmdline =~ /\brbd-mirror\b/;
return 1 if $cmdline =~ /\bradosgw\b/;
return 1 if $cmdline =~ /\bosdmaptool\b/;
return 1 if $cmdline =~ /\brados\b/;
return 1 if $cmdline =~ /test_/;
return 1 if $cmdline =~ /\bvstart.sh\b/;
return 0;
}
opendir PROC, "/proc";
while(my $pid = readdir PROC) {
next if $pid =~ /\D/; # not a pid
next if !-o "/proc/$pid"; # not ours
open CMDLINE, "/proc/$pid/cmdline" or next;
my $cmdline = <CMDLINE>;
$cmdline =~ s/[^\x20-\x7e]/ /g;
close CMDLINE;
if (is_ceph_proc($cmdline)) {
print "$pid\t$cmdline\n";
}
}
| 1,516 | 29.959184 | 63 |
pl
|
null |
ceph-main/src/push_to_qemu.pl
|
#!/usr/bin/perl
use strict;
my $usage = "./push_to_qemu.pl <path_to_qemu>\n";
my $qemu = shift @ARGV || die $usage;
die $usage unless -d $qemu;
die "not in a git tree" unless `cd $qemu && git rev-parse HEAD`;
my $dir = '.';
until (-d "$dir/.git") {
$dir .= "/..";
}
print "pushing changed shared files from $dir to $qemu...\n";
system "cat $dir/src/include/rbd_types.h | sed 's/__u32/uint32_t/g; s/__u8/uint8_t/g; s/__.*16/uint16_t/g; s/__.*32/uint32_t/g; s/__.*64/uint64_t/g; s/_FS_CEPH_RBD/QEMU_BLOCK_RBD_TYPES_H/g; s/^\t/ /g' | expand | grep -v \"linux/types.h\" > $qemu/block/rbd_types.h";
print "done.\n";
| 626 | 27.5 | 268 |
pl
|
null |
ceph-main/src/stop.sh
|
#!/usr/bin/env bash
# -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
# vim: softtabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2013 Inktank <[email protected]>
# Copyright (C) 2013 Cloudwatt <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
test -d dev/osd0/. && test -e dev/sudo && SUDO="sudo"
if [ -e CMakeCache.txt ]; then
[ -z "$CEPH_BIN" ] && CEPH_BIN=bin
fi
if [ -n "$VSTART_DEST" ]; then
CEPH_CONF_PATH=$VSTART_DEST
else
CEPH_CONF_PATH="$PWD"
fi
conf_fn="$CEPH_CONF_PATH/ceph.conf"
if [ -z "$CEPHADM" ]; then
CEPHADM="${CEPH_BIN}/cephadm"
fi
MYUID=$(id -u)
MYNAME=$(id -nu)
do_killall() {
local pname="ceph-run.*$1"
if [ $1 == "ganesha.nfsd" ]; then
pname=$1
fi
pg=`pgrep -u $MYUID -f $pname`
[ -n "$pg" ] && kill $pg
$SUDO killall -u $MYNAME $1
}
maybe_kill() {
local p=$1
shift
local step=$1
shift
case $step in
0)
# killing processes
pkill -SIGTERM -u $MYUID $p
return 1
;;
[1-5])
# wait for processes to stop
if pkill -0 -u $MYUID $p; then
# $p is still alive
return 1
fi
;;
8)
# kill and print if some left
if pkill -0 -u $MYUID $p; then
echo "WARNING: $p did not orderly shutdown, killing it hard!" >&2
pkill -SIGKILL -u $MYUID $p
fi
;;
esac
}
do_killcephadm() {
local FSID=$($CEPH_BIN/ceph -c $conf_fn fsid)
if [ -n "$FSID" ]; then
sudo $CEPHADM rm-cluster --fsid $FSID --force
fi
}
do_umountall() {
#VSTART_IP_PORTS is of the format as below
#"[v[num]:IP:PORT/0,v[num]:IP:PORT/0][v[num]:IP:PORT/0,v[num]:IP:PORT/0]..."
VSTART_IP_PORTS=$("${CEPH_BIN}"/ceph -c $conf_fn mon metadata 2>/dev/null | jq -j '.[].addrs')
#SRC_MNT_ARRAY is of the format as below
#SRC_MNT_ARRAY[0] = IP:PORT,IP:PORT,IP:PORT:/
#SRC_MNT_ARRAY[1] = MNT_POINT1
#SRC_MNT_ARRAY[2] = IP:PORT:/ #Could be mounted using single mon IP
#SRC_MNT_ARRAY[3] = MNT_POINT2
#...
SRC_MNT_ARRAY=($(findmnt -t ceph -n --raw --output=source,target))
LEN_SRC_MNT_ARRAY=${#SRC_MNT_ARRAY[@]}
for (( i=0; i<${LEN_SRC_MNT_ARRAY}; i=$((i+2)) ))
do
# The first IP:PORT among the list is checked against vstart monitor IP:PORTS
IP_PORT1=$(echo ${SRC_MNT_ARRAY[$i]} | awk -F ':/' '{print $1}' | awk -F ',' '{print $1}')
if [[ "$VSTART_IP_PORTS" == *"$IP_PORT1"* ]]
then
CEPH_MNT=${SRC_MNT_ARRAY[$((i+1))]}
[ -n "$CEPH_MNT" ] && sudo umount -f $CEPH_MNT
fi
done
#Get fuse mounts of the cluster
num_of_ceph_mdss=$(ps -e | grep \ ceph-mds$ | wc -l)
if test $num_of_ceph_mdss -ne 0; then
CEPH_FUSE_MNTS=$("${CEPH_BIN}"/ceph -c $conf_fn tell mds.* client ls 2>/dev/null | grep mount_point | tr -d '",' | awk '{print $2}')
[ -n "$CEPH_FUSE_MNTS" ] && sudo umount -f $CEPH_FUSE_MNTS
fi
}
usage="usage: $0 [all] [mon] [mds] [osd] [rgw] [nfs] [--crimson] [--cephadm]\n"
stop_all=1
stop_mon=0
stop_mds=0
stop_osd=0
stop_mgr=0
stop_rgw=0
stop_ganesha=0
ceph_osd=ceph-osd
stop_cephadm=0
while [ $# -ge 1 ]; do
case $1 in
all )
stop_all=1
;;
mon | ceph-mon )
stop_mon=1
stop_all=0
;;
mgr | ceph-mgr )
stop_mgr=1
stop_all=0
;;
mds | ceph-mds )
stop_mds=1
stop_all=0
;;
osd | ceph-osd )
stop_osd=1
stop_all=0
;;
rgw | ceph-rgw )
stop_rgw=1
stop_all=0
;;
nfs | ganesha.nfsd )
stop_ganesha=1
stop_all=0
;;
--crimson)
ceph_osd=crimson-osd
;;
--cephadm)
stop_cephadm=1
stop_all=0
;;
* )
printf "$usage"
exit
esac
shift
done
if [ $stop_all -eq 1 ]; then
if "${CEPH_BIN}"/ceph -s --connect-timeout 1 -c $conf_fn >/dev/null 2>&1; then
# Umount mounted filesystems from vstart cluster
do_umountall
fi
if "${CEPH_BIN}"/rbd device list -c $conf_fn >/dev/null 2>&1; then
"${CEPH_BIN}"/rbd device list -c $conf_fn | tail -n +2 |
while read DEV; do
# While it is currently possible to create an rbd image with
# whitespace chars in its name, krbd will refuse mapping such
# an image, so we can safely split on whitespace here. (The
# same goes for whitespace chars in names of the pools that
# contain rbd images).
DEV="$(echo "${DEV}" | tr -s '[:space:]' | awk '{ print $5 }')"
sudo "${CEPH_BIN}"/rbd device unmap "${DEV}" -c $conf_fn
done
if [ -n "$("${CEPH_BIN}"/rbd device list -c $conf_fn)" ]; then
echo "WARNING: Some rbd images are still mapped!" >&2
fi
fi
daemons="$(sudo $CEPHADM ls 2> /dev/null)"
if [ $? -eq 0 -a "$daemons" != "[]" ]; then
do_killcephadm
fi
# killing processes
to_kill="$ceph_osd ceph-mon ceph-mds ceph-mgr radosgw lt-radosgw apache2 ganesha.nfsd cephfs-top"
since_kill=0
for step in 0 1 1 2 3 5 8; do
sleep $step
since_kill=$((since_kill + step))
survivors=''
for p in $to_kill; do
if ! maybe_kill "$p" $step; then
survivors+=" $p"
fi
done
if [ -z "$survivors" ]; then
break
fi
to_kill=$survivors
if [ $since_kill -gt 0 ]; then
echo "WARNING: $to_kill still alive after $since_kill seconds" >&2
fi
done
pkill -u $MYUID -f valgrind.bin.\*ceph-mon
$SUDO pkill -u $MYUID -f valgrind.bin.\*$ceph_osd
pkill -u $MYUID -f valgrind.bin.\*ceph-mds
asok_dir=`dirname $("${CEPH_BIN}"/ceph-conf -c ${conf_fn} --show-config-value admin_socket)`
rm -rf "${asok_dir}"
else
[ $stop_mon -eq 1 ] && do_killall ceph-mon
[ $stop_mds -eq 1 ] && do_killall ceph-mds
[ $stop_osd -eq 1 ] && do_killall $ceph_osd
[ $stop_mgr -eq 1 ] && do_killall ceph-mgr
[ $stop_ganesha -eq 1 ] && do_killall ganesha.nfsd
[ $stop_rgw -eq 1 ] && do_killall radosgw lt-radosgw apache2
[ $stop_cephadm -eq 1 ] && do_killcephadm
fi
| 6,966 | 28.396624 | 140 |
sh
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.