repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/qa/standalone/mon/mon-created-time.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 SUSE LINUX GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7125" # git grep '\<7125\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_mon_created_time() {
local dir=$1
run_mon $dir a || return 1
ceph mon dump || return 1
if test "$(ceph mon dump 2>/dev/null | sed -n '/created/p' | awk '{print $NF}')"x = ""x ; then
return 1
fi
if test "$(ceph mon dump 2>/dev/null | sed -n '/created/p' | awk '{print $NF}')"x = "0.000000"x ; then
return 1
fi
}
main mon-created-time "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/mon/mon-created-time.sh"
# End:
| 1,548 | 27.163636 | 106 | sh |
null | ceph-main/qa/standalone/mon/mon-handle-forward.sh | #!/usr/bin/env bash
#
# Copyright (C) 2013 Cloudwatt <[email protected]>
# Copyright (C) 2014,2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
setup $dir || return 1
MONA=127.0.0.1:7300
MONB=127.0.0.1:7301
(
FSID=$(uuidgen)
export CEPH_ARGS
CEPH_ARGS+="--fsid=$FSID --auth-supported=none "
CEPH_ARGS+="--mon-host=$MONA,$MONB "
run_mon $dir a --public-addr $MONA || return 1
run_mon $dir b --public-addr $MONB || return 1
)
timeout 360 ceph --mon-host-override $MONA mon stat || return 1
# check that MONB is indeed a peon
ceph --admin-daemon $(get_asok_path mon.b) mon_status |
grep '"peon"' || return 1
# when the leader ( MONA ) is used, there is no message forwarding
ceph --mon-host-override $MONA osd pool create POOL1 12
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep 'mon_command(.*"POOL1"' $dir/mon.a.log || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.b) log flush || return 1
grep 'mon_command(.*"POOL1"' $dir/mon.b.log && return 1
# when the peon ( MONB ) is used, the message is forwarded to the leader
ceph --mon-host-override $MONB osd pool create POOL2 12
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.b) log flush || return 1
grep 'forward_request.*mon_command(.*"POOL2"' $dir/mon.b.log || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep ' forward(mon_command(.*"POOL2"' $dir/mon.a.log || return 1
# forwarded messages must retain features from the original connection
features=$(sed -n -e 's|.*127.0.0.1:0.*accept features \([0-9][0-9]*\)|\1|p' < \
$dir/mon.b.log)
grep ' forward(mon_command(.*"POOL2".*con_features '$features $dir/mon.a.log || return 1
teardown $dir || return 1
}
main mon-handle-forward "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 TESTS=test/mon/mon-handle-forward.sh check"
# End:
| 2,612 | 39.2 | 92 | sh |
null | ceph-main/qa/standalone/mon/mon-last-epoch-clean.sh | #!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7302" # git grep '\<7105\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function check_lec_equals_pools() {
local pool_id=$1
report=$(ceph report)
lec=$(echo $report | \
jq '.osdmap_clean_epochs.min_last_epoch_clean')
if [[ -z "$pool_id" ]]; then
pools=($(echo $report | \
jq \
".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
" select(.floor == $lec) | .poolid"))
[[ ${#pools[*]} -eq 2 ]] || ( echo $report ; return 1 )
else
floor=($(echo $report | \
jq \
".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
" select(.poolid == $pool_id) | .floor"))
[[ $lec -eq $floor ]] || ( echo $report ; return 1 )
fi
return 0
}
function check_lec_lower_than_pool() {
local pool_id=$1
[[ -z "$pool_id" ]] && ( echo "expected pool_id as parameter" ; exit 1 )
report=$(ceph report)
lec=$(echo $report | \
jq '.osdmap_clean_epochs.min_last_epoch_clean')
floor=($(echo $report | \
jq \
".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
" select(.poolid == $pool_id) | .floor"))
[[ $lec -lt $floor ]] || ( echo $report ; return 1 )
return 0
}
function check_floor_pool_greater_than_pool() {
local pool_a=$1
local pool_b=$1
[[ -z "$pool_a" ]] && ( echo "expected id as first parameter" ; exit 1 )
[[ -z "$pool_b" ]] && ( echo "expected id as second parameter" ; exit 1 )
report=$(ceph report)
floor_a=($(echo $report | \
jq \
".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
" select(.poolid == $pool_a) | .floor"))
floor_b=($(echo $report | \
jq \
".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
" select(.poolid == $pool_b) | .floor"))
[[ $floor_a -gt $floor_b ]] || ( echo $report ; return 1 )
return 0
}
function check_lec_honours_osd() {
local osd=$1
report=$(ceph report)
lec=$(echo $report | \
jq '.osdmap_clean_epochs.min_last_epoch_clean')
if [[ -z "$osd" ]]; then
osds=($(echo $report | \
jq \
".osdmap_clean_epochs.osd_epochs[] |" \
" select(.epoch >= $lec) | .id"))
[[ ${#osds[*]} -eq 3 ]] || ( echo $report ; return 1 )
else
epoch=($(echo $report | \
jq \
".osdmap_clean_epochs.osd_epochs[] |" \
" select(.id == $id) | .epoch"))
[[ ${#epoch[*]} -eq 1 ]] || ( echo $report ; return 1 )
[[ ${epoch[0]} -ge $lec ]] || ( echo $report ; return 1 )
fi
return 0
}
function validate_fc() {
report=$(ceph report)
lec=$(echo $report | \
jq '.osdmap_clean_epochs.min_last_epoch_clean')
osdm_fc=$(echo $report | \
jq '.osdmap_first_committed')
[[ $lec -eq $osdm_fc ]] || ( echo $report ; return 1 )
return 0
}
function get_fc_lc_diff() {
report=$(ceph report)
osdm_fc=$(echo $report | \
jq '.osdmap_first_committed')
osdm_lc=$(echo $report | \
jq '.osdmap_last_committed')
echo $((osdm_lc - osdm_fc))
}
function get_pool_id() {
local pn=$1
[[ -z "$pn" ]] && ( echo "expected pool name as argument" ; exit 1 )
report=$(ceph report)
pool_id=$(echo $report | \
jq ".osdmap.pools[] | select(.pool_name == \"$pn\") | .pool")
[[ $pool_id -ge 0 ]] || \
( echo "unexpected pool id for pool \'$pn\': $pool_id" ; return -1 )
echo $pool_id
return 0
}
function wait_for_total_num_maps() {
# rip wait_for_health, becaue it's easier than deduplicating the code
local -a delays=($(get_timeout_delays $TIMEOUT .1))
local -i loop=0
local -i v_diff=$1
while [[ $(get_fc_lc_diff) -gt $v_diff ]]; do
if (( $loop >= ${#delays[*]} )) ; then
echo "maps were not trimmed"
return 1
fi
sleep ${delays[$loop]}
loop+=1
done
}
function TEST_mon_last_clean_epoch() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
osd_pid=$(cat $dir/osd.2.pid)
sleep 5
ceph tell 'osd.*' injectargs '--osd-beacon-report-interval 10' || exit 1
ceph tell 'mon.*' injectargs \
'--mon-min-osdmap-epochs 2 --paxos-service-trim-min 1' || exit 1
create_pool foo 32
create_pool bar 32
foo_id=$(get_pool_id "foo")
bar_id=$(get_pool_id "bar")
[[ $foo_id -lt 0 ]] && ( echo "couldn't find pool 'foo' id" ; exit 1 )
[[ $bar_id -lt 0 ]] && ( echo "couldn't find pool 'bar' id" ; exit 1 )
# no real clue why we are getting these warnings, but let's make them go
# away so we can be happy.
ceph osd set-full-ratio 0.97
ceph osd set-backfillfull-ratio 0.97
wait_for_health_ok || exit 1
pre_map_diff=$(get_fc_lc_diff)
wait_for_total_num_maps 2
post_map_diff=$(get_fc_lc_diff)
[[ $post_map_diff -le $pre_map_diff ]] || exit 1
pre_map_diff=$post_map_diff
ceph osd pool set foo size 3
ceph osd pool set bar size 3
wait_for_health_ok || exit 1
check_lec_equals_pools || exit 1
check_lec_honours_osd || exit 1
validate_fc || exit 1
# down osd.2; expected result (because all pools' size equals 3):
# - number of committed maps increase over 2
# - lec equals fc
# - lec equals osd.2's epoch
# - all pools have floor equal to lec
while kill $osd_pid ; do sleep 1 ; done
ceph osd out 2
sleep 5 # seriously, just to make sure things settle; we may not need this.
# generate some maps
for ((i=0; i <= 10; ++i)); do
ceph osd set noup
sleep 1
ceph osd unset noup
sleep 1
done
post_map_diff=$(get_fc_lc_diff)
[[ $post_map_diff -gt 2 ]] || exit 1
validate_fc || exit 1
check_lec_equals_pools || exit 1
check_lec_honours_osd 2 || exit 1
# adjust pool 'bar' size to 2; expect:
# - number of committed maps still over 2
# - lec equals fc
# - lec equals pool 'foo' floor
# - pool 'bar' floor greater than pool 'foo'
ceph osd pool set bar size 2
diff_ver=$(get_fc_lc_diff)
[[ $diff_ver -gt 2 ]] || exit 1
validate_fc || exit 1
check_lec_equals_pools $foo_id || exit 1
check_lec_lower_than_pool $bar_id || exit 1
check_floor_pool_greater_than_pool $bar_id $foo_id || exit 1
# set pool 'foo' size to 2; expect:
# - health_ok
# - lec equals pools
# - number of committed maps decreases
# - lec equals fc
pre_map_diff=$(get_fc_lc_diff)
ceph osd pool set foo size 2 || exit 1
wait_for_clean || exit 1
check_lec_equals_pools || exit 1
validate_fc || exit 1
if ! wait_for_total_num_maps 2 ; then
post_map_diff=$(get_fc_lc_diff)
# number of maps is decreasing though, right?
[[ $post_map_diff -lt $pre_map_diff ]] || exit 1
fi
# bring back osd.2; expect:
# - health_ok
# - lec equals fc
# - number of committed maps equals 2
# - all pools have floor equal to lec
pre_map_diff=$(get_fc_lc_diff)
activate_osd $dir 2 || exit 1
wait_for_health_ok || exit 1
validate_fc || exit 1
check_lec_equals_pools || exit 1
if ! wait_for_total_num_maps 2 ; then
post_map_diff=$(get_fc_lc_diff)
# number of maps is decreasing though, right?
[[ $post_map_diff -lt $pre_map_diff ]] || exit 1
fi
return 0
}
main mon-last-clean-epoch "$@"
| 7,495 | 23.337662 | 83 | sh |
null | ceph-main/qa/standalone/mon/mon-osdmap-prune.sh | #!/bin/bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
base_test=$CEPH_ROOT/qa/workunits/mon/test_mon_osdmap_prune.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7115"
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none --mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_osdmap_prune() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
sleep 5
# we are getting OSD_OUT_OF_ORDER_FULL health errors, and it's not clear
# why. so, to make the health checks happy, mask those errors.
ceph osd set-full-ratio 0.97
ceph osd set-backfillfull-ratio 0.97
ceph config set osd osd_beacon_report_interval 10 || return 1
ceph config set mon mon_debug_extra_checks true || return 1
ceph config set mon mon_min_osdmap_epochs 100 || return 1
ceph config set mon mon_osdmap_full_prune_enabled true || return 1
ceph config set mon mon_osdmap_full_prune_min 200 || return 1
ceph config set mon mon_osdmap_full_prune_interval 10 || return 1
ceph config set mon mon_osdmap_full_prune_txsize 100 || return 1
bash -x $base_test || return 1
return 0
}
main mon-osdmap-prune "$@"
| 1,440 | 23.844828 | 76 | sh |
null | ceph-main/qa/standalone/mon/mon-ping.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 SUSE LINUX GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7119" # git grep '\<7119\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_mon_ping() {
local dir=$1
run_mon $dir a || return 1
ceph ping mon.a || return 1
}
main mon-ping "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/mon/mon-ping.sh"
# End:
| 1,270 | 26.042553 | 83 | sh |
null | ceph-main/qa/standalone/mon/mon-scrub.sh | #!/usr/bin/env bash
#
# Copyright (C) 2014 Cloudwatt <[email protected]>
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7120" # git grep '\<7120\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_mon_scrub() {
local dir=$1
run_mon $dir a || return 1
ceph mon scrub || return 1
}
main mon-scrub "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/mon/mon-scrub.sh"
# End:
| 1,398 | 26.98 | 83 | sh |
null | ceph-main/qa/standalone/mon/mon-seesaw.sh | #!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON_A="127.0.0.1:7139" # git grep '\<7139\>' : there must be only one
export CEPH_MON_B="127.0.0.1:7141" # git grep '\<7141\>' : there must be only one
export CEPH_MON_C="127.0.0.1:7142" # git grep '\<7142\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
export BASE_CEPH_ARGS=$CEPH_ARGS
CEPH_ARGS+="--mon-host=$CEPH_MON_A "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_mon_seesaw() {
local dir=$1
setup $dir || return
# start with 1 mon
run_mon $dir aa --public-addr $CEPH_MON_A || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
wait_for_quorum 300 1 || return 1
# add in a second
run_mon $dir bb --public-addr $CEPH_MON_B || return 1
CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B"
wait_for_quorum 300 2 || return 1
# remove the first one
ceph mon rm aa || return 1
CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_B"
sleep 5
wait_for_quorum 300 1 || return 1
# do some stuff that requires the osds be able to communicate with the
# mons. (see http://tracker.ceph.com/issues/17558)
ceph osd pool create foo 8
rados -p foo bench 1 write
wait_for_clean || return 1
# nuke monstore so that it will rejoin (otherwise we get
# "not in monmap and have been in a quorum before; must have been removed"
rm -rf $dir/aa
# add a back in
# (use a different addr to avoid bind issues)
run_mon $dir aa --public-addr $CEPH_MON_C || return 1
CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_C,$CEPH_MON_B"
wait_for_quorum 300 2 || return 1
}
main mon-seesaw "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/mon/mon-ping.sh"
# End:
| 2,144 | 28.383562 | 85 | sh |
null | ceph-main/qa/standalone/mon/osd-crush.sh | #!/usr/bin/env bash
#
# Copyright (C) 2014 Cloudwatt <[email protected]>
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7104" # git grep '\<7104\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | ${SED} -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_crush_rule_create_simple() {
local dir=$1
run_mon $dir a || return 1
ceph --format xml osd crush rule dump replicated_rule | \
egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
grep '<op>choose_firstn</op><num>0</num><type>osd</type>' || return 1
local rule=rule0
local root=host1
ceph osd crush add-bucket $root host
local failure_domain=osd
ceph osd crush rule create-simple $rule $root $failure_domain || return 1
ceph osd crush rule create-simple $rule $root $failure_domain 2>&1 | \
grep "$rule already exists" || return 1
ceph --format xml osd crush rule dump $rule | \
egrep '<op>take</op><item>[^<]+</item><item_name>'$root'</item_name>' | \
grep '<op>choose_firstn</op><num>0</num><type>'$failure_domain'</type>' || return 1
ceph osd crush rule rm $rule || return 1
}
function TEST_crush_rule_dump() {
local dir=$1
run_mon $dir a || return 1
local rule=rule1
ceph osd crush rule create-erasure $rule || return 1
test $(ceph --format json osd crush rule dump $rule | \
jq ".rule_name == \"$rule\"") == true || return 1
test $(ceph --format json osd crush rule dump | \
jq "map(select(.rule_name == \"$rule\")) | length == 1") == true || return 1
! ceph osd crush rule dump non_existent_rule || return 1
ceph osd crush rule rm $rule || return 1
}
function TEST_crush_rule_rm() {
local rule=erasure2
run_mon $dir a || return 1
ceph osd crush rule create-erasure $rule default || return 1
ceph osd crush rule ls | grep $rule || return 1
ceph osd crush rule rm $rule || return 1
! ceph osd crush rule ls | grep $rule || return 1
}
function TEST_crush_rule_create_erasure() {
local dir=$1
run_mon $dir a || return 1
# should have at least one OSD
run_osd $dir 0 || return 1
local rule=rule3
#
# create a new rule with the default profile, implicitly
#
ceph osd crush rule create-erasure $rule || return 1
ceph osd crush rule create-erasure $rule 2>&1 | \
grep "$rule already exists" || return 1
ceph --format xml osd crush rule dump $rule | \
egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
grep '<op>chooseleaf_indep</op><num>0</num><type>host</type>' || return 1
ceph osd crush rule rm $rule || return 1
! ceph osd crush rule ls | grep $rule || return 1
#
# create a new rule with the default profile, explicitly
#
ceph osd crush rule create-erasure $rule default || return 1
ceph osd crush rule ls | grep $rule || return 1
ceph osd crush rule rm $rule || return 1
! ceph osd crush rule ls | grep $rule || return 1
#
# create a new rule and the default profile, implicitly
#
ceph osd erasure-code-profile rm default || return 1
! ceph osd erasure-code-profile ls | grep default || return 1
ceph osd crush rule create-erasure $rule || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep 'profile set default' $dir/mon.a.log || return 1
ceph osd erasure-code-profile ls | grep default || return 1
ceph osd crush rule rm $rule || return 1
! ceph osd crush rule ls | grep $rule || return 1
}
function TEST_add_rule_failed() {
local dir=$1
run_mon $dir a || return 1
local root=host1
ceph osd crush add-bucket $root host
ceph osd crush rule create-simple test_rule1 $root osd firstn || return 1
ceph osd crush rule create-simple test_rule2 $root osd firstn || return 1
ceph osd getcrushmap > $dir/crushmap || return 1
crushtool --decompile $dir/crushmap > $dir/crushmap.txt || return 1
for i in $(seq 3 255)
do
cat <<EOF
rule test_rule$i {
id $i
type replicated
step take $root
step choose firstn 0 type osd
step emit
}
EOF
done >> $dir/crushmap.txt
crushtool --compile $dir/crushmap.txt -o $dir/crushmap || return 1
ceph osd setcrushmap -i $dir/crushmap || return 1
ceph osd crush rule create-simple test_rule_nospace $root osd firstn 2>&1 | grep "Error ENOSPC" || return 1
}
function TEST_crush_rename_bucket() {
local dir=$1
run_mon $dir a || return 1
ceph osd crush add-bucket host1 host
ceph osd tree
! ceph osd tree | grep host2 || return 1
ceph osd crush rename-bucket host1 host2 || return 1
ceph osd tree
ceph osd tree | grep host2 || return 1
ceph osd crush rename-bucket host1 host2 || return 1 # idempotency
ceph osd crush rename-bucket nonexistent something 2>&1 | grep "Error ENOENT" || return 1
}
function TEST_crush_ls_node() {
local dir=$1
run_mon $dir a || return 1
ceph osd crush add-bucket default1 root
ceph osd crush add-bucket host1 host
ceph osd crush move host1 root=default1
ceph osd crush ls default1 | grep host1 || return 1
ceph osd crush ls default2 2>&1 | grep "Error ENOENT" || return 1
}
function TEST_crush_reject_empty() {
local dir=$1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
# should have at least one OSD
run_osd $dir 0 || return 1
create_rbd_pool || return 1
local empty_map=$dir/empty_map
:> $empty_map.txt
crushtool -c $empty_map.txt -o $empty_map.map || return 1
expect_failure $dir "Error EINVAL" \
ceph osd setcrushmap -i $empty_map.map || return 1
}
main osd-crush "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/mon/osd-crush.sh"
# End:
| 6,754 | 33.28934 | 111 | sh |
null | ceph-main/qa/standalone/mon/osd-df.sh | #!/bin/bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7113" # git grep '\<7113\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_osd_df() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
run_osd $dir 5 || return 1
# normal case
ceph osd df --f json-pretty | grep osd.0 || return 1
ceph osd df --f json-pretty | grep osd.1 || return 1
ceph osd df --f json-pretty | grep osd.2 || return 1
ceph osd df --f json-pretty | grep osd.3 || return 1
ceph osd df --f json-pretty | grep osd.4 || return 1
ceph osd df --f json-pretty | grep osd.5 || return 1
# filter by device class
osd_class=$(ceph osd crush get-device-class 0)
ceph osd df class $osd_class --f json-pretty | grep 'osd.0' || return 1
# post-nautilus we require filter-type no more
ceph osd df $osd_class --f json-pretty | grep 'osd.0' || return 1
ceph osd crush rm-device-class 0 || return 1
ceph osd crush set-device-class aaa 0 || return 1
ceph osd df aaa --f json-pretty | grep 'osd.0' || return 1
ceph osd df aaa --f json-pretty | grep 'osd.1' && return 1
# reset osd.1's device class
ceph osd crush rm-device-class 0 || return 1
ceph osd crush set-device-class $osd_class 0 || return 1
# filter by crush node
ceph osd df osd.0 --f json-pretty | grep osd.0 || return 1
ceph osd df osd.0 --f json-pretty | grep osd.1 && return 1
ceph osd crush move osd.0 root=default host=foo || return 1
ceph osd crush move osd.1 root=default host=foo || return 1
ceph osd crush move osd.2 root=default host=foo || return 1
ceph osd crush move osd.3 root=default host=bar || return 1
ceph osd crush move osd.4 root=default host=bar || return 1
ceph osd crush move osd.5 root=default host=bar || return 1
ceph osd df tree foo --f json-pretty | grep foo || return 1
ceph osd df tree foo --f json-pretty | grep bar && return 1
ceph osd df foo --f json-pretty | grep osd.0 || return 1
ceph osd df foo --f json-pretty | grep osd.1 || return 1
ceph osd df foo --f json-pretty | grep osd.2 || return 1
ceph osd df foo --f json-pretty | grep osd.3 && return 1
ceph osd df foo --f json-pretty | grep osd.4 && return 1
ceph osd df foo --f json-pretty | grep osd.5 && return 1
ceph osd df tree bar --f json-pretty | grep bar || return 1
ceph osd df tree bar --f json-pretty | grep foo && return 1
ceph osd df bar --f json-pretty | grep osd.0 && return 1
ceph osd df bar --f json-pretty | grep osd.1 && return 1
ceph osd df bar --f json-pretty | grep osd.2 && return 1
ceph osd df bar --f json-pretty | grep osd.3 || return 1
ceph osd df bar --f json-pretty | grep osd.4 || return 1
ceph osd df bar --f json-pretty | grep osd.5 || return 1
# filter by pool
ceph osd crush rm-device-class all || return 1
ceph osd crush set-device-class nvme 0 1 3 4 || return 1
ceph osd crush rule create-replicated nvme-rule default host nvme || return 1
ceph osd pool create nvme-pool 12 12 nvme-rule || return 1
ceph osd df nvme-pool --f json-pretty | grep osd.0 || return 1
ceph osd df nvme-pool --f json-pretty | grep osd.1 || return 1
ceph osd df nvme-pool --f json-pretty | grep osd.2 && return 1
ceph osd df nvme-pool --f json-pretty | grep osd.3 || return 1
ceph osd df nvme-pool --f json-pretty | grep osd.4 || return 1
ceph osd df nvme-pool --f json-pretty | grep osd.5 && return 1
teardown $dir || return 1
}
main osd-df "$@"
| 4,107 | 40.918367 | 83 | sh |
null | ceph-main/qa/standalone/mon/osd-erasure-code-profile.sh | #!/usr/bin/env bash
#
# Copyright (C) 2014 Cloudwatt <[email protected]>
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7220" # git grep '\<7220\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_set() {
local dir=$1
local id=$2
run_mon $dir a || return 1
local profile=myprofile
#
# no key=value pairs : use the default configuration
#
ceph osd erasure-code-profile set $profile 2>&1 || return 1
ceph osd erasure-code-profile get $profile | \
grep plugin=jerasure || return 1
ceph osd erasure-code-profile rm $profile
#
# key=value pairs override the default
#
ceph osd erasure-code-profile set $profile \
key=value plugin=isa || return 1
ceph osd erasure-code-profile get $profile | \
grep -e key=value -e plugin=isa || return 1
#
# --force is required to override an existing profile
#
! ceph osd erasure-code-profile set $profile > $dir/out 2>&1 || return 1
grep 'will not override' $dir/out || return 1
ceph osd erasure-code-profile set $profile key=other --force || return 1
ceph osd erasure-code-profile get $profile | \
grep key=other || return 1
ceph osd erasure-code-profile rm $profile # cleanup
}
function TEST_ls() {
local dir=$1
local id=$2
run_mon $dir a || return 1
local profile=myprofile
! ceph osd erasure-code-profile ls | grep $profile || return 1
ceph osd erasure-code-profile set $profile 2>&1 || return 1
ceph osd erasure-code-profile ls | grep $profile || return 1
ceph --format xml osd erasure-code-profile ls | \
grep "<profile>$profile</profile>" || return 1
ceph osd erasure-code-profile rm $profile # cleanup
}
function TEST_rm() {
local dir=$1
local id=$2
run_mon $dir a || return 1
local profile=myprofile
ceph osd erasure-code-profile set $profile 2>&1 || return 1
ceph osd erasure-code-profile ls | grep $profile || return 1
ceph osd erasure-code-profile rm $profile || return 1
! ceph osd erasure-code-profile ls | grep $profile || return 1
ceph osd erasure-code-profile rm WRONG 2>&1 | \
grep "WRONG does not exist" || return 1
ceph osd erasure-code-profile set $profile || return 1
create_pool poolname 12 12 erasure $profile || return 1
! ceph osd erasure-code-profile rm $profile > $dir/out 2>&1 || return 1
grep "poolname.*using.*$profile" $dir/out || return 1
ceph osd pool delete poolname poolname --yes-i-really-really-mean-it || return 1
ceph osd erasure-code-profile rm $profile || return 1
ceph osd erasure-code-profile rm $profile # cleanup
}
function TEST_get() {
local dir=$1
local id=$2
run_mon $dir a || return 1
local default_profile=default
ceph osd erasure-code-profile get $default_profile | \
grep plugin=jerasure || return 1
ceph --format xml osd erasure-code-profile get $default_profile | \
grep '<plugin>jerasure</plugin>' || return 1
! ceph osd erasure-code-profile get WRONG > $dir/out 2>&1 || return 1
grep -q "unknown erasure code profile 'WRONG'" $dir/out || return 1
}
function TEST_set_idempotent() {
local dir=$1
local id=$2
run_mon $dir a || return 1
#
# The default profile is set using a code path different from
# ceph osd erasure-code-profile set: verify that it is idempotent,
# as if it was using the same code path.
#
ceph osd erasure-code-profile set default k=2 m=2 2>&1 || return 1
local profile
#
# Because plugin=jerasure is the default, it uses a slightly
# different code path where defaults (m=1 for instance) are added
# implicitly.
#
profile=profileidempotent1
! ceph osd erasure-code-profile ls | grep $profile || return 1
ceph osd erasure-code-profile set $profile k=2 crush-failure-domain=osd 2>&1 || return 1
ceph osd erasure-code-profile ls | grep $profile || return 1
ceph osd erasure-code-profile set $profile k=2 crush-failure-domain=osd 2>&1 || return 1
ceph osd erasure-code-profile rm $profile # cleanup
#
# In the general case the profile is exactly what is on
#
profile=profileidempotent2
! ceph osd erasure-code-profile ls | grep $profile || return 1
ceph osd erasure-code-profile set $profile plugin=lrc k=4 m=2 l=3 crush-failure-domain=osd 2>&1 || return 1
ceph osd erasure-code-profile ls | grep $profile || return 1
ceph osd erasure-code-profile set $profile plugin=lrc k=4 m=2 l=3 crush-failure-domain=osd 2>&1 || return 1
ceph osd erasure-code-profile rm $profile # cleanup
}
function TEST_format_invalid() {
local dir=$1
local profile=profile
# osd_pool_default_erasure-code-profile is
# valid JSON but not of the expected type
run_mon $dir a \
--osd_pool_default_erasure-code-profile 1 || return 1
! ceph osd erasure-code-profile set $profile > $dir/out 2>&1 || return 1
cat $dir/out
grep 'must be a JSON object' $dir/out || return 1
}
function TEST_format_json() {
local dir=$1
# osd_pool_default_erasure-code-profile is JSON
expected='"plugin":"isa"'
run_mon $dir a \
--osd_pool_default_erasure-code-profile "{$expected}" || return 1
ceph --format json osd erasure-code-profile get default | \
grep "$expected" || return 1
}
function TEST_format_plain() {
local dir=$1
# osd_pool_default_erasure-code-profile is plain text
expected='"plugin":"isa"'
run_mon $dir a \
--osd_pool_default_erasure-code-profile "plugin=isa" || return 1
ceph --format json osd erasure-code-profile get default | \
grep "$expected" || return 1
}
function TEST_profile_k_sanity() {
local dir=$1
local profile=profile-sanity
run_mon $dir a || return 1
expect_failure $dir 'k must be a multiple of (k + m) / l' \
ceph osd erasure-code-profile set $profile \
plugin=lrc \
l=1 \
k=1 \
m=1 || return 1
if erasure_code_plugin_exists isa ; then
expect_failure $dir 'k=1 must be >= 2' \
ceph osd erasure-code-profile set $profile \
plugin=isa \
k=1 \
m=1 || return 1
else
echo "SKIP because plugin isa has not been built"
fi
expect_failure $dir 'k=1 must be >= 2' \
ceph osd erasure-code-profile set $profile \
plugin=jerasure \
k=1 \
m=1 || return 1
}
function TEST_invalid_crush_failure_domain() {
local dir=$1
run_mon $dir a || return 1
local profile=ec_profile
local crush_failure_domain=invalid_failure_domain
! ceph osd erasure-code-profile set $profile k=4 m=2 crush-failure-domain=$crush_failure_domain 2>&1 || return 1
}
main osd-erasure-code-profile "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/mon/osd-erasure-code-profile.sh"
# End:
| 7,883 | 31.713693 | 116 | sh |
null | ceph-main/qa/standalone/mon/osd-pool-create.sh | #!/usr/bin/env bash
#
# Copyright (C) 2013, 2014 Cloudwatt <[email protected]>
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7105" # git grep '\<7105\>' : there must be only one
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
export CEPH_ARGS
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
# Before http://tracker.ceph.com/issues/8307 the invalid profile was created
function TEST_erasure_invalid_profile() {
local dir=$1
run_mon $dir a || return 1
local poolname=pool_erasure
local notaprofile=not-a-valid-erasure-code-profile
! ceph osd pool create $poolname 12 12 erasure $notaprofile || return 1
! ceph osd erasure-code-profile ls | grep $notaprofile || return 1
}
function TEST_erasure_crush_rule() {
local dir=$1
run_mon $dir a || return 1
#
# choose the crush rule used with an erasure coded pool
#
local crush_rule=myrule
! ceph osd crush rule ls | grep $crush_rule || return 1
ceph osd crush rule create-erasure $crush_rule
ceph osd crush rule ls | grep $crush_rule
local poolname
poolname=pool_erasure1
! ceph --format json osd dump | grep '"crush_rule":1' || return 1
ceph osd pool create $poolname 12 12 erasure default $crush_rule
ceph --format json osd dump | grep '"crush_rule":1' || return 1
#
# a crush rule by the name of the pool is implicitly created
#
poolname=pool_erasure2
ceph osd erasure-code-profile set myprofile
ceph osd pool create $poolname 12 12 erasure myprofile
ceph osd crush rule ls | grep $poolname || return 1
#
# a non existent crush rule given in argument is an error
# http://tracker.ceph.com/issues/9304
#
poolname=pool_erasure3
! ceph osd pool create $poolname 12 12 erasure myprofile INVALIDRULE || return 1
}
function TEST_erasure_code_profile_default() {
local dir=$1
run_mon $dir a || return 1
ceph osd erasure-code-profile rm default || return 1
! ceph osd erasure-code-profile ls | grep default || return 1
ceph osd pool create $poolname 12 12 erasure default
ceph osd erasure-code-profile ls | grep default || return 1
}
function TEST_erasure_crush_stripe_unit() {
local dir=$1
# the default stripe unit is used to initialize the pool
run_mon $dir a --public-addr $CEPH_MON
stripe_unit=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
eval local $(ceph osd erasure-code-profile get myprofile | grep k=)
stripe_width = $((stripe_unit * k))
ceph osd pool create pool_erasure 12 12 erasure
ceph --format json osd dump | tee $dir/osd.json
grep '"stripe_width":'$stripe_width $dir/osd.json > /dev/null || return 1
}
function TEST_erasure_crush_stripe_unit_padded() {
local dir=$1
# setting osd_pool_erasure_code_stripe_unit modifies the stripe_width
# and it is padded as required by the default plugin
profile+=" plugin=jerasure"
profile+=" technique=reed_sol_van"
k=4
profile+=" k=$k"
profile+=" m=2"
actual_stripe_unit=2048
desired_stripe_unit=$((actual_stripe_unit - 1))
actual_stripe_width=$((actual_stripe_unit * k))
run_mon $dir a \
--osd_pool_erasure_code_stripe_unit $desired_stripe_unit \
--osd_pool_default_erasure_code_profile "$profile" || return 1
ceph osd pool create pool_erasure 12 12 erasure
ceph osd dump | tee $dir/osd.json
grep "stripe_width $actual_stripe_width" $dir/osd.json > /dev/null || return 1
}
function TEST_erasure_code_pool() {
local dir=$1
run_mon $dir a || return 1
ceph --format json osd dump > $dir/osd.json
local expected='"erasure_code_profile":"default"'
! grep "$expected" $dir/osd.json || return 1
ceph osd pool create erasurecodes 12 12 erasure
ceph --format json osd dump | tee $dir/osd.json
grep "$expected" $dir/osd.json > /dev/null || return 1
ceph osd pool create erasurecodes 12 12 erasure 2>&1 | \
grep 'already exists' || return 1
ceph osd pool create erasurecodes 12 12 2>&1 | \
grep 'cannot change to type replicated' || return 1
}
function TEST_replicated_pool_with_rule() {
local dir=$1
run_mon $dir a
local rule=rule0
local root=host1
ceph osd crush add-bucket $root host
local failure_domain=osd
local poolname=mypool
ceph osd crush rule create-simple $rule $root $failure_domain || return 1
ceph osd crush rule ls | grep $rule
ceph osd pool create $poolname 12 12 replicated $rule || return 1
rule_id=`ceph osd crush rule dump $rule | grep "rule_id" | awk -F[' ':,] '{print $4}'`
ceph osd pool get $poolname crush_rule 2>&1 | \
grep "crush_rule: $rule_id" || return 1
#non-existent crush rule
ceph osd pool create newpool 12 12 replicated non-existent 2>&1 | \
grep "doesn't exist" || return 1
}
function TEST_erasure_code_pool_lrc() {
local dir=$1
run_mon $dir a || return 1
ceph osd erasure-code-profile set LRCprofile \
plugin=lrc \
mapping=DD_ \
layers='[ [ "DDc", "" ] ]' || return 1
ceph --format json osd dump > $dir/osd.json
local expected='"erasure_code_profile":"LRCprofile"'
local poolname=erasurecodes
! grep "$expected" $dir/osd.json || return 1
ceph osd pool create $poolname 12 12 erasure LRCprofile
ceph --format json osd dump | tee $dir/osd.json
grep "$expected" $dir/osd.json > /dev/null || return 1
ceph osd crush rule ls | grep $poolname || return 1
}
function TEST_replicated_pool() {
local dir=$1
run_mon $dir a || return 1
ceph osd pool create replicated 12 12 replicated replicated_rule || return 1
ceph osd pool create replicated 12 12 replicated replicated_rule 2>&1 | \
grep 'already exists' || return 1
# default is replicated
ceph osd pool create replicated1 12 12 || return 1
# default is replicated, pgp_num = pg_num
ceph osd pool create replicated2 12 || return 1
ceph osd pool create replicated 12 12 erasure 2>&1 | \
grep 'cannot change to type erasure' || return 1
}
function TEST_no_pool_delete() {
local dir=$1
run_mon $dir a || return 1
ceph osd pool create foo 1 || return 1
ceph tell mon.a injectargs -- --no-mon-allow-pool-delete || return 1
! ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1
ceph tell mon.a injectargs -- --mon-allow-pool-delete || return 1
ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1
}
function TEST_utf8_cli() {
local dir=$1
run_mon $dir a || return 1
# Hopefully it's safe to include literal UTF-8 characters to test
# the fix for http://tracker.ceph.com/issues/7387. If it turns out
# to not be OK (when is the default encoding *not* UTF-8?), maybe
# the character '黄' can be replaced with the escape $'\xe9\xbb\x84'
OLDLANG="$LANG"
export LANG=en_US.UTF-8
ceph osd pool create 黄 16 || return 1
ceph osd lspools 2>&1 | \
grep "黄" || return 1
ceph -f json-pretty osd dump | \
python3 -c "import json; import sys; json.load(sys.stdin)" || return 1
ceph osd pool delete 黄 黄 --yes-i-really-really-mean-it
export LANG="$OLDLANG"
}
function check_pool_priority() {
local dir=$1
shift
local pools=$1
shift
local spread="$1"
shift
local results="$1"
setup $dir || return 1
EXTRA_OPTS="--debug_allow_any_pool_priority=true"
export EXTRA_OPTS
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
# Add pool 0 too
for i in $(seq 0 $pools)
do
num=$(expr $i + 1)
ceph osd pool create test${num} 1 1
done
wait_for_clean || return 1
for i in $(seq 0 $pools)
do
num=$(expr $i + 1)
ceph osd pool set test${num} recovery_priority $(expr $i \* $spread)
done
#grep "recovery_priority.*pool set" out/mon.a.log
bin/ceph osd dump
# Restart everything so mon converts the priorities
kill_daemons
run_mon $dir a || return 1
run_mgr $dir x || return 1
activate_osd $dir 0 || return 1
activate_osd $dir 1 || return 1
activate_osd $dir 2 || return 1
sleep 5
grep convert $dir/mon.a.log
ceph osd dump
pos=1
for i in $(ceph osd dump | grep ^pool | sed 's/.*recovery_priority //' | awk '{ print $1 }')
do
result=$(echo $results | awk "{ print \$${pos} }")
# A value of 0 is an unset value so sed/awk gets "pool"
if test $result = "0"
then
result="pool"
fi
test "$result" = "$i" || return 1
pos=$(expr $pos + 1)
done
}
function TEST_pool_pos_only_prio() {
local dir=$1
check_pool_priority $dir 20 5 "0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10" || return 1
}
function TEST_pool_neg_only_prio() {
local dir=$1
check_pool_priority $dir 20 -5 "0 0 -1 -1 -2 -2 -3 -3 -4 -4 -5 -5 -6 -6 -7 -7 -8 -8 -9 -9 -10" || return 1
}
function TEST_pool_both_prio() {
local dir=$1
check_pool_priority $dir 20 "5 - 50" "-10 -9 -8 -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10" || return 1
}
function TEST_pool_both_prio_no_neg() {
local dir=$1
check_pool_priority $dir 20 "2 - 4" "-4 -2 0 0 1 1 2 2 3 3 4 5 5 6 6 7 7 8 8 9 10" || return 1
}
function TEST_pool_both_prio_no_pos() {
local dir=$1
check_pool_priority $dir 20 "2 - 36" "-10 -9 -8 -8 -7 -7 -6 -6 -5 -5 -4 -3 -3 -2 -2 -1 -1 0 0 2 4" || return 1
}
main osd-pool-create "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/mon/osd-pool-create.sh"
# End:
| 10,491 | 33.064935 | 113 | sh |
null | ceph-main/qa/standalone/mon/osd-pool-df.sh | #!/usr/bin/env bash
#
# Copyright (C) 2017 Tencent <[email protected]>
#
# Author: Chang Liu <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7113" # git grep '\<7113\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_ceph_df() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
run_osd $dir 5 || return 1
run_mgr $dir x || return 1
profile+=" plugin=jerasure"
profile+=" technique=reed_sol_van"
profile+=" k=4"
profile+=" m=2"
profile+=" crush-failure-domain=osd"
ceph osd erasure-code-profile set ec42profile ${profile}
local rep_poolname=testcephdf_replicate
local ec_poolname=testcephdf_erasurecode
create_pool $rep_poolname 6 6 replicated
create_pool $ec_poolname 6 6 erasure ec42profile
flush_pg_stats
local global_avail=`ceph df -f json | jq '.stats.total_avail_bytes'`
local rep_avail=`ceph df -f json | jq '.pools | map(select(.name == "'$rep_poolname'"))[0].stats.max_avail'`
local ec_avail=`ceph df -f json | jq '.pools | map(select(.name == "'$ec_poolname'"))[0].stats.max_avail'`
echo "${global_avail} >= ${rep_avail}*3" | bc || return 1
echo "${global_avail} >= ${ec_avail}*1.5" | bc || return 1
ceph osd pool delete $rep_poolname $rep_poolname --yes-i-really-really-mean-it
ceph osd pool delete $ec_poolname $ec_poolname --yes-i-really-really-mean-it
ceph osd erasure-code-profile rm ec42profile
teardown $dir || return 1
}
main osd-pool-df "$@"
| 2,547 | 32.090909 | 112 | sh |
null | ceph-main/qa/standalone/mon/test_pool_quota.sh | #!/usr/bin/env bash
#
# Generic pool quota test
#
# Includes
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:17108" # git grep '\<17108\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
$func $dir || return 1
done
}
function TEST_pool_quota() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
local poolname=testquota
create_pool $poolname 20
local objects=`ceph df detail | grep -w $poolname|awk '{print $3}'`
local bytes=`ceph df detail | grep -w $poolname|awk '{print $4}'`
echo $objects
echo $bytes
if [ $objects != 'N/A' ] || [ $bytes != 'N/A' ] ;
then
return 1
fi
ceph osd pool set-quota $poolname max_objects 1000
ceph osd pool set-quota $poolname max_bytes 1024
objects=`ceph df detail | grep -w $poolname|awk '{print $3}'`
bytes=`ceph df detail | grep -w $poolname|awk '{print $4}'`
if [ $objects != '1000' ] || [ $bytes != '1K' ] ;
then
return 1
fi
ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
teardown $dir || return 1
}
main testpoolquota
| 1,496 | 22.390625 | 85 | sh |
null | ceph-main/qa/standalone/osd-backfill/osd-backfill-prio.sh | #!/usr/bin/env bash
#
# Copyright (C) 2019 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
# Fix port????
export CEPH_MON="127.0.0.1:7114" # git grep '\<7114\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON --osd_max_backfills=1 --debug_reserver=20 "
CEPH_ARGS+="--osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10 "
# Set osd op queue = wpq for the tests. Backfill priority is not
# considered by mclock_scheduler leading to unexpected results.
CEPH_ARGS+="--osd-op-queue=wpq "
export objects=50
export poolprefix=test
export FORCE_PRIO="254" # See OSD_BACKFILL_PRIORITY_FORCED
export DEGRADED_PRIO="150" # See OSD_BACKFILL_DEGRADED_PRIORITY_BASE + 10
export NORMAL_PRIO="110" # See OSD_BACKFILL_PRIORITY_BASE + 10
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_backfill_priority() {
local dir=$1
local pools=10
local OSDS=5
# size 2 -> 1 means degraded by 1, so add 1 to base prio
local degraded_prio=$(expr $DEGRADED_PRIO + 1)
local max_tries=10
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
ceph osd pool set "${poolprefix}$p" size 2
done
sleep 5
wait_for_clean || return 1
ceph pg dump pgs
# Find 3 pools with a pg with the same primaries but second
# replica on another osd.
local PG1
local POOLNUM1
local pool1
local chk_osd1_1
local chk_osd1_2
local PG2
local POOLNUM2
local pool2
local chk_osd2
local PG3
local POOLNUM3
local pool3
for p in $(seq 1 $pools)
do
ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting
local test_osd1=$(head -1 $dir/acting)
local test_osd2=$(tail -1 $dir/acting)
if [ -z "$PG1" ];
then
PG1="${p}.0"
POOLNUM1=$p
pool1="${poolprefix}$p"
chk_osd1_1=$test_osd1
chk_osd1_2=$test_osd2
elif [ -z "$PG2" -a $chk_osd1_1 = $test_osd1 -a $chk_osd1_2 != $test_osd2 ];
then
PG2="${p}.0"
POOLNUM2=$p
pool2="${poolprefix}$p"
chk_osd2=$test_osd2
elif [ -n "$PG2" -a $chk_osd1_1 = $test_osd1 -a $chk_osd1_2 != $test_osd2 -a "$chk_osd2" != $test_osd2 ];
then
PG3="${p}.0"
POOLNUM3=$p
pool3="${poolprefix}$p"
break
fi
done
rm -f $dir/acting
if [ "$pool2" = "" -o "pool3" = "" ];
then
echo "Failure to find appropirate PGs"
return 1
fi
for p in $(seq 1 $pools)
do
if [ $p != $POOLNUM1 -a $p != $POOLNUM2 -a $p != $POOLNUM3 ];
then
delete_pool ${poolprefix}$p
fi
done
ceph osd pool set $pool2 size 1 --yes-i-really-mean-it
ceph osd pool set $pool3 size 1 --yes-i-really-mean-it
wait_for_clean || return 1
dd if=/dev/urandom of=$dir/data bs=1M count=10
p=1
for pname in $pool1 $pool2 $pool3
do
for i in $(seq 1 $objects)
do
rados -p ${pname} put obj${i}-p${p} $dir/data
done
p=$(expr $p + 1)
done
local otherosd=$(get_not_primary $pool1 obj1-p1)
ceph pg dump pgs
ERRORS=0
ceph osd set nobackfill
ceph osd set noout
# Get a pg to want to backfill and quickly force it
# to be preempted.
ceph osd pool set $pool3 size 2
sleep 2
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
# 3. Item is in progress, adjust priority with no higher priority waiting
for i in $(seq 1 $max_tries)
do
if ! ceph pg force-backfill $PG3 2>&1 | grep -q "doesn't require backfilling"; then
break
fi
if [ "$i" = "$max_tries" ]; then
echo "ERROR: Didn't appear to be able to force-backfill"
ERRORS=$(expr $ERRORS + 1)
fi
sleep 2
done
flush_pg_stats || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
ceph osd out osd.$chk_osd1_2
sleep 2
flush_pg_stats || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
ceph pg dump pgs
ceph osd pool set $pool2 size 2
sleep 2
flush_pg_stats || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
cat $dir/out
ceph pg dump pgs
PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG1}\")).prio")
if [ "$PRIO" != "$NORMAL_PRIO" ];
then
echo "The normal PG ${PG1} doesn't have prio $NORMAL_PRIO queued waiting"
ERRORS=$(expr $ERRORS + 1)
fi
# Using eval will strip double-quotes from item
eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG3} ];
then
echo "The force-backfill PG $PG3 didn't become the in progress item"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio')
if [ "$PRIO" != $FORCE_PRIO ];
then
echo "The force-backfill PG ${PG3} doesn't have prio $FORCE_PRIO"
ERRORS=$(expr $ERRORS + 1)
fi
fi
# 1. Item is queued, re-queue with new priority
for i in $(seq 1 $max_tries)
do
if ! ceph pg force-backfill $PG2 2>&1 | grep -q "doesn't require backfilling"; then
break
fi
if [ "$i" = "$max_tries" ]; then
echo "ERROR: Didn't appear to be able to force-backfill"
ERRORS=$(expr $ERRORS + 1)
fi
sleep 2
done
sleep 2
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
cat $dir/out
PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG2}\")).prio")
if [ "$PRIO" != "$FORCE_PRIO" ];
then
echo "The second force-backfill PG ${PG2} doesn't have prio $FORCE_PRIO"
ERRORS=$(expr $ERRORS + 1)
fi
flush_pg_stats || return 1
# 4. Item is in progress, if higher priority items waiting prempt item
ceph pg cancel-force-backfill $PG3 || return 1
sleep 2
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
cat $dir/out
PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG3}\")).prio")
if [ "$PRIO" != "$degraded_prio" ];
then
echo "After cancel-force-backfill PG ${PG3} doesn't have prio $degraded_prio"
ERRORS=$(expr $ERRORS + 1)
fi
eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG2} ];
then
echo "The force-recovery PG $PG2 didn't become the in progress item"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio')
if [ "$PRIO" != $FORCE_PRIO ];
then
echo "The first force-recovery PG ${PG2} doesn't have prio $FORCE_PRIO"
ERRORS=$(expr $ERRORS + 1)
fi
fi
ceph pg cancel-force-backfill $PG2 || return 1
sleep 5
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
# 2. Item is queued, re-queue and preempt because new priority higher than an in progress item
flush_pg_stats || return 1
ceph pg force-backfill $PG3 || return 1
sleep 2
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
cat $dir/out
PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG2}\")).prio")
if [ "$PRIO" != "$degraded_prio" ];
then
echo "After cancel-force-backfill PG ${PG2} doesn't have prio $degraded_prio"
ERRORS=$(expr $ERRORS + 1)
fi
eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG3} ];
then
echo "The force-backfill PG $PG3 didn't get promoted to an in progress item"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio')
if [ "$PRIO" != $FORCE_PRIO ];
then
echo "The force-backfill PG ${PG2} doesn't have prio $FORCE_PRIO"
ERRORS=$(expr $ERRORS + 1)
fi
fi
ceph osd unset noout
ceph osd unset nobackfill
wait_for_clean "CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations" || return 1
ceph pg dump pgs
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_pgstate_history
if [ $ERRORS != "0" ];
then
echo "$ERRORS error(s) found"
else
echo TEST PASSED
fi
delete_pool $pool1
delete_pool $pool2
delete_pool $pool3
kill_daemons $dir || return 1
return $ERRORS
}
#
# Show that pool recovery_priority is added to the backfill priority
#
# Create 2 pools with 2 OSDs with different primarys
# pool 1 with recovery_priority 1
# pool 2 with recovery_priority 2
#
# Start backfill by changing the pool sizes from 1 to 2
# Use dump_recovery_reservations to verify priorities
function TEST_backfill_pool_priority() {
local dir=$1
local pools=3 # Don't assume the first 2 pools are exact what we want
local OSDS=2
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
ceph osd pool set "${poolprefix}$p" size 2
done
sleep 5
wait_for_clean || return 1
ceph pg dump pgs
# Find 2 pools with different primaries which
# means the replica must be on another osd.
local PG1
local POOLNUM1
local pool1
local chk_osd1_1
local chk_osd1_2
local PG2
local POOLNUM2
local pool2
local chk_osd2_1
local chk_osd2_2
for p in $(seq 1 $pools)
do
ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting
local test_osd1=$(head -1 $dir/acting)
local test_osd2=$(tail -1 $dir/acting)
if [ -z "$PG1" ];
then
PG1="${p}.0"
POOLNUM1=$p
pool1="${poolprefix}$p"
chk_osd1_1=$test_osd1
chk_osd1_2=$test_osd2
elif [ $chk_osd1_1 != $test_osd1 ];
then
PG2="${p}.0"
POOLNUM2=$p
pool2="${poolprefix}$p"
chk_osd2_1=$test_osd1
chk_osd2_2=$test_osd2
break
fi
done
rm -f $dir/acting
if [ "$pool2" = "" ];
then
echo "Failure to find appropirate PGs"
return 1
fi
for p in $(seq 1 $pools)
do
if [ $p != $POOLNUM1 -a $p != $POOLNUM2 ];
then
delete_pool ${poolprefix}$p
fi
done
pool1_extra_prio=1
pool2_extra_prio=2
# size 2 -> 1 means degraded by 1, so add 1 to base prio
pool1_prio=$(expr $DEGRADED_PRIO + 1 + $pool1_extra_prio)
pool2_prio=$(expr $DEGRADED_PRIO + 1 + $pool2_extra_prio)
ceph osd pool set $pool1 size 1 --yes-i-really-mean-it
ceph osd pool set $pool1 recovery_priority $pool1_extra_prio
ceph osd pool set $pool2 size 1 --yes-i-really-mean-it
ceph osd pool set $pool2 recovery_priority $pool2_extra_prio
wait_for_clean || return 1
dd if=/dev/urandom of=$dir/data bs=1M count=10
p=1
for pname in $pool1 $pool2
do
for i in $(seq 1 $objects)
do
rados -p ${pname} put obj${i}-p${p} $dir/data
done
p=$(expr $p + 1)
done
local otherosd=$(get_not_primary $pool1 obj1-p1)
ceph pg dump pgs
ERRORS=0
ceph osd pool set $pool1 size 2
ceph osd pool set $pool2 size 2
sleep 5
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/dump.${chk_osd1_1}.out
echo osd.${chk_osd1_1}
cat $dir/dump.${chk_osd1_1}.out
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_2}) dump_recovery_reservations > $dir/dump.${chk_osd1_2}.out
echo osd.${chk_osd1_2}
cat $dir/dump.${chk_osd1_2}.out
# Using eval will strip double-quotes from item
eval ITEM=$(cat $dir/dump.${chk_osd1_1}.out | jq '.local_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG1} ];
then
echo "The primary PG ${PG1} didn't become the in progress item"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/dump.${chk_osd1_1}.out | jq '.local_reservations.in_progress[0].prio')
if [ "$PRIO" != $pool1_prio ];
then
echo "The primary PG ${PG1} doesn't have prio $pool1_prio"
ERRORS=$(expr $ERRORS + 1)
fi
fi
# Using eval will strip double-quotes from item
eval ITEM=$(cat $dir/dump.${chk_osd1_2}.out | jq '.remote_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG1} ];
then
echo "The primary PG ${PG1} didn't become the in progress item on remote"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/dump.${chk_osd1_2}.out | jq '.remote_reservations.in_progress[0].prio')
if [ "$PRIO" != $pool1_prio ];
then
echo "The primary PG ${PG1} doesn't have prio $pool1_prio on remote"
ERRORS=$(expr $ERRORS + 1)
fi
fi
# Using eval will strip double-quotes from item
eval ITEM=$(cat $dir/dump.${chk_osd2_1}.out | jq '.local_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG2} ];
then
echo "The primary PG ${PG2} didn't become the in progress item"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/dump.${chk_osd2_1}.out | jq '.local_reservations.in_progress[0].prio')
if [ "$PRIO" != $pool2_prio ];
then
echo "The primary PG ${PG2} doesn't have prio $pool2_prio"
ERRORS=$(expr $ERRORS + 1)
fi
fi
# Using eval will strip double-quotes from item
eval ITEM=$(cat $dir/dump.${chk_osd2_2}.out | jq '.remote_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG2} ];
then
echo "The primary PG $PG2 didn't become the in progress item on remote"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/dump.${chk_osd2_2}.out | jq '.remote_reservations.in_progress[0].prio')
if [ "$PRIO" != $pool2_prio ];
then
echo "The primary PG ${PG2} doesn't have prio $pool2_prio on remote"
ERRORS=$(expr $ERRORS + 1)
fi
fi
wait_for_clean || return 1
if [ $ERRORS != "0" ];
then
echo "$ERRORS error(s) found"
else
echo TEST PASSED
fi
delete_pool $pool1
delete_pool $pool2
kill_daemons $dir || return 1
return $ERRORS
}
main osd-backfill-prio "$@"
# Local Variables:
# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-prio.sh"
# End:
| 15,875 | 29.355641 | 128 | sh |
null | ceph-main/qa/standalone/osd-backfill/osd-backfill-recovery-log.sh | #!/usr/bin/env bash
#
# Copyright (C) 2019 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
# Fix port????
export CEPH_MON="127.0.0.1:7129" # git grep '\<7129\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON --osd_max_backfills=1 --debug_reserver=20 "
CEPH_ARGS+="--osd_mclock_override_recovery_settings=true "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function _common_test() {
local dir=$1
local extra_opts="$2"
local loglen="$3"
local dupslen="$4"
local objects="$5"
local moreobjects=${6:-0}
local OSDS=6
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
export EXTRA_OPTS=" $extra_opts"
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
create_pool test 1 1
for j in $(seq 1 $objects)
do
rados -p test put obj-${j} /etc/passwd
done
# Mark out all OSDs for this pool
ceph osd out $(ceph pg dump pgs --format=json | jq '.pg_stats[0].up[]')
if [ "$moreobjects" != "0" ]; then
for j in $(seq 1 $moreobjects)
do
rados -p test put obj-more-${j} /etc/passwd
done
fi
sleep 1
wait_for_clean
flush_pg_stats
newprimary=$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')
kill_daemons
ERRORS=0
_objectstore_tool_nodown $dir $newprimary --no-mon-config --pgid 1.0 --op log | tee $dir/result.log
LOGLEN=$(jq '.pg_log_t.log | length' $dir/result.log)
if [ $LOGLEN != "$loglen" ]; then
echo "FAILED: Wrong log length got $LOGLEN (expected $loglen)"
ERRORS=$(expr $ERRORS + 1)
fi
DUPSLEN=$(jq '.pg_log_t.dups | length' $dir/result.log)
if [ $DUPSLEN != "$dupslen" ]; then
echo "FAILED: Wrong dups length got $DUPSLEN (expected $dupslen)"
ERRORS=$(expr $ERRORS + 1)
fi
grep "copy_up_to\|copy_after" $dir/osd.*.log
rm -f $dir/result.log
if [ $ERRORS != "0" ]; then
echo TEST FAILED
return 1
fi
}
# Cause copy_up_to() to only partially copy logs, copy additional dups, and trim dups
function TEST_backfill_log_1() {
local dir=$1
_common_test $dir "--osd_min_pg_log_entries=1 --osd_max_pg_log_entries=2 --osd_pg_log_dups_tracked=10" 2 8 150
}
# Cause copy_up_to() to only partially copy logs, copy additional dups
function TEST_backfill_log_2() {
local dir=$1
_common_test $dir "--osd_min_pg_log_entries=1 --osd_max_pg_log_entries=2" 2 148 150
}
# Cause copy_after() to only copy logs, no dups
function TEST_recovery_1() {
local dir=$1
_common_test $dir "--osd_min_pg_log_entries=50 --osd_max_pg_log_entries=50 --osd_pg_log_dups_tracked=60 --osd_pg_log_trim_min=10" 40 0 40
}
# Cause copy_after() to copy logs with dups
function TEST_recovery_2() {
local dir=$1
_common_test $dir "--osd_min_pg_log_entries=150 --osd_max_pg_log_entries=150 --osd_pg_log_dups_tracked=3000 --osd_pg_log_trim_min=10" 151 10 141 20
}
main osd-backfill-recovery-log "$@"
# Local Variables:
# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-recovery-log.sh"
# End:
| 3,941 | 27.157143 | 151 | sh |
null | ceph-main/qa/standalone/osd-backfill/osd-backfill-space.sh | #!/usr/bin/env bash
#
# Copyright (C) 2018 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7180" # git grep '\<7180\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10 "
CEPH_ARGS+="--fake_statfs_for_testing=3686400 "
CEPH_ARGS+="--osd_max_backfills=10 "
CEPH_ARGS+="--osd_mclock_override_recovery_settings=true "
export objects=600
export poolprefix=test
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function get_num_in_state() {
local state=$1
local expression
expression+="select(contains(\"${state}\"))"
ceph --format json pg dump pgs 2>/dev/null | \
jq ".pg_stats | [.[] | .state | $expression] | length"
}
function wait_for_not_state() {
local state=$1
local num_in_state=-1
local cur_in_state
local -a delays=($(get_timeout_delays $2 5))
local -i loop=0
flush_pg_stats || return 1
while test $(get_num_pgs) == 0 ; do
sleep 1
done
while true ; do
cur_in_state=$(get_num_in_state ${state})
test $cur_in_state = "0" && break
if test $cur_in_state != $num_in_state ; then
loop=0
num_in_state=$cur_in_state
elif (( $loop >= ${#delays[*]} )) ; then
ceph pg dump pgs
return 1
fi
sleep ${delays[$loop]}
loop+=1
done
return 0
}
function wait_for_not_backfilling() {
local timeout=$1
wait_for_not_state backfilling $timeout
}
function wait_for_not_activating() {
local timeout=$1
wait_for_not_state activating $timeout
}
# All tests are created in an environment which has fake total space
# of 3600K (3686400) which can hold 600 6K replicated objects or
# 200 18K shards of erasure coded objects. For a k=3, m=2 EC pool
# we have a theoretical 54K object but with the chunk size of 4K
# and a rounding of 4K to account for the chunks is 36K max object
# which is ((36K / 3) + 4K) * 200 = 3200K which is 88% of
# 3600K for a shard.
# Create 2 pools with size 1
# Write enough data that only 1 pool pg can fit per osd
# Incresase the pool size to 2
# On 3 OSDs this should result in 1 OSD with overlapping replicas,
# so both pools can't fit. We assume pgid 1.0 and 2.0 won't
# map to the same 2 OSDs.
# At least 1 pool shouldn't have room to backfill
# All other pools should go active+clean
function TEST_backfill_test_simple() {
local dir=$1
local pools=2
local OSDS=3
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
ceph osd set-backfillfull-ratio .85
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
ceph osd pool set "${poolprefix}$p" size 1 --yes-i-really-mean-it
done
wait_for_clean || return 1
# This won't work is if the 2 pools primary and only osds
# are the same.
dd if=/dev/urandom of=$dir/datafile bs=1024 count=4
for o in $(seq 1 $objects)
do
for p in $(seq 1 $pools)
do
rados -p "${poolprefix}$p" put obj$o $dir/datafile
done
done
ceph pg dump pgs
for p in $(seq 1 $pools)
do
ceph osd pool set "${poolprefix}$p" size 2
done
sleep 30
wait_for_not_backfilling 1200 || return 1
wait_for_not_activating 60 || return 1
ERRORS=0
if [ "$(ceph pg dump pgs | grep +backfill_toofull | wc -l)" != "1" ];
then
echo "One pool should have been in backfill_toofull"
ERRORS="$(expr $ERRORS + 1)"
fi
expected="$(expr $pools - 1)"
if [ "$(ceph pg dump pgs | grep active+clean | wc -l)" != "$expected" ];
then
echo "$expected didn't finish backfill"
ERRORS="$(expr $ERRORS + 1)"
fi
ceph pg dump pgs
if [ $ERRORS != "0" ];
then
return 1
fi
for i in $(seq 1 $pools)
do
delete_pool "${poolprefix}$i"
done
kill_daemons $dir || return 1
! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1
}
# Create 8 pools of size 1 on 20 OSDs
# Write 4K * 600 objects (only 1 pool pg can fit on any given osd)
# Increase pool size to 2
# At least 1 pool shouldn't have room to backfill
# All other pools should go active+clean
function TEST_backfill_test_multi() {
local dir=$1
local pools=8
local OSDS=20
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
ceph osd set-backfillfull-ratio .85
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
ceph osd pool set "${poolprefix}$p" size 1 --yes-i-really-mean-it
done
wait_for_clean || return 1
dd if=/dev/urandom of=$dir/datafile bs=1024 count=4
for o in $(seq 1 $objects)
do
for p in $(seq 1 $pools)
do
rados -p "${poolprefix}$p" put obj$o $dir/datafile
done
done
ceph pg dump pgs
for p in $(seq 1 $pools)
do
ceph osd pool set "${poolprefix}$p" size 2
done
sleep 30
wait_for_not_backfilling 1200 || return 1
wait_for_not_activating 60 || return 1
ERRORS=0
full="$(ceph pg dump pgs | grep +backfill_toofull | wc -l)"
if [ "$full" -lt "1" ];
then
echo "At least one pool should have been in backfill_toofull"
ERRORS="$(expr $ERRORS + 1)"
fi
expected="$(expr $pools - $full)"
if [ "$(ceph pg dump pgs | grep active+clean | wc -l)" != "$expected" ];
then
echo "$expected didn't finish backfill"
ERRORS="$(expr $ERRORS + 1)"
fi
ceph pg dump pgs
ceph status
ceph status --format=json-pretty > $dir/stat.json
eval SEV=$(jq '.health.checks.PG_BACKFILL_FULL.severity' $dir/stat.json)
if [ "$SEV" != "HEALTH_WARN" ]; then
echo "PG_BACKFILL_FULL severity $SEV not HEALTH_WARN"
ERRORS="$(expr $ERRORS + 1)"
fi
eval MSG=$(jq '.health.checks.PG_BACKFILL_FULL.summary.message' $dir/stat.json)
if [ "$MSG" != "Low space hindering backfill (add storage if this doesn't resolve itself): 4 pgs backfill_toofull" ]; then
echo "PG_BACKFILL_FULL message '$MSG' mismatched"
ERRORS="$(expr $ERRORS + 1)"
fi
rm -f $dir/stat.json
if [ $ERRORS != "0" ];
then
return 1
fi
for i in $(seq 1 $pools)
do
delete_pool "${poolprefix}$i"
done
# Work around for http://tracker.ceph.com/issues/38195
kill_daemons $dir #|| return 1
! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1
}
# To make sure that when 2 pg try to backfill at the same time to
# the same target. This might be covered by the simple test above
# but this makes sure we get it.
#
# Create 10 pools of size 2 and identify 2 that have the same
# non-primary osd.
# Delete all other pools
# Set size to 1 and write 4K * 600 to each pool
# Set size back to 2
# The 2 pools should race to backfill.
# One pool goes active+clean
# The other goes acitve+...+backfill_toofull
function TEST_backfill_test_sametarget() {
local dir=$1
local pools=10
local OSDS=5
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
ceph osd set-backfillfull-ratio .85
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
ceph osd pool set "${poolprefix}$p" size 2
done
sleep 5
wait_for_clean || return 1
ceph pg dump pgs
# Find 2 pools with a pg that distinct primaries but second
# replica on the same osd.
local PG1
local POOLNUM1
local pool1
local chk_osd1
local chk_osd2
local PG2
local POOLNUM2
local pool2
for p in $(seq 1 $pools)
do
ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting
local test_osd1=$(head -1 $dir/acting)
local test_osd2=$(tail -1 $dir/acting)
if [ $p = "1" ];
then
PG1="${p}.0"
POOLNUM1=$p
pool1="${poolprefix}$p"
chk_osd1=$test_osd1
chk_osd2=$test_osd2
elif [ $chk_osd1 != $test_osd1 -a $chk_osd2 = $test_osd2 ];
then
PG2="${p}.0"
POOLNUM2=$p
pool2="${poolprefix}$p"
break
fi
done
rm -f $dir/acting
if [ "$pool2" = "" ];
then
echo "Failure to find appropirate PGs"
return 1
fi
for p in $(seq 1 $pools)
do
if [ $p != $POOLNUM1 -a $p != $POOLNUM2 ];
then
delete_pool ${poolprefix}$p
fi
done
ceph osd pool set $pool1 size 1 --yes-i-really-mean-it
ceph osd pool set $pool2 size 1 --yes-i-really-mean-it
wait_for_clean || return 1
dd if=/dev/urandom of=$dir/datafile bs=1024 count=4
for i in $(seq 1 $objects)
do
rados -p $pool1 put obj$i $dir/datafile
rados -p $pool2 put obj$i $dir/datafile
done
ceph osd pool set $pool1 size 2
ceph osd pool set $pool2 size 2
sleep 30
wait_for_not_backfilling 1200 || return 1
wait_for_not_activating 60 || return 1
ERRORS=0
if [ "$(ceph pg dump pgs | grep +backfill_toofull | wc -l)" != "1" ];
then
echo "One pool should have been in backfill_toofull"
ERRORS="$(expr $ERRORS + 1)"
fi
if [ "$(ceph pg dump pgs | grep active+clean | wc -l)" != "1" ];
then
echo "One didn't finish backfill"
ERRORS="$(expr $ERRORS + 1)"
fi
ceph pg dump pgs
if [ $ERRORS != "0" ];
then
return 1
fi
delete_pool $pool1
delete_pool $pool2
kill_daemons $dir || return 1
! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1
}
# 2 pools can't both backfill to a target which has other data
# 1 of the pools has objects that increase from 1024 to 2611 bytes
#
# Write to fill pool which is size 1
# Take fill pool osd down (other 2 pools must go to the remaining OSDs
# Save an export of data on fill OSD and restart it
# Write an intial 1K to pool1 which has pg 2.0
# Export 2.0 from non-fillpool OSD don't wait for it to start-up
# Take down fillpool OSD
# Put 1K object version of 2.0 on fillpool OSD
# Put back fillpool data on fillpool OSD
# With fillpool down write 2611 byte objects
# Take down $osd and bring back $fillosd simultaneously
# Wait for backfilling
# One PG will be able to backfill its remaining data
# One PG must get backfill_toofull
function TEST_backfill_multi_partial() {
local dir=$1
local EC=$2
local pools=2
local OSDS=3
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
ceph osd set-backfillfull-ratio .85
ceph osd set-require-min-compat-client luminous
create_pool fillpool 1 1
ceph osd pool set fillpool size 1 --yes-i-really-mean-it
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
ceph osd pool set "${poolprefix}$p" size 2
done
wait_for_clean || return 1
# Partially fill an osd
# We have room for 600 6K replicated objects, if we create 2611 byte objects
# there is 3600K - (2611 * 600) = 2070K, so the fill pool and one
# replica from the other 2 is 85% of 3600K
dd if=/dev/urandom of=$dir/datafile bs=2611 count=1
for o in $(seq 1 $objects)
do
rados -p fillpool put obj-fill-${o} $dir/datafile
done
local fillosd=$(get_primary fillpool obj-fill-1)
osd=$(expr $fillosd + 1)
if [ "$osd" = "$OSDS" ]; then
osd="0"
fi
kill_daemon $dir/osd.$fillosd.pid TERM
ceph osd out osd.$fillosd
_objectstore_tool_nodown $dir $fillosd --op export-remove --pgid 1.0 --file $dir/fillexport.out || return 1
activate_osd $dir $fillosd || return 1
ceph pg dump pgs
dd if=/dev/urandom of=$dir/datafile bs=1024 count=1
for o in $(seq 1 $objects)
do
rados -p "${poolprefix}1" put obj-1-${o} $dir/datafile
done
ceph pg dump pgs
# The $osd OSD is started, but we don't wait so we can kill $fillosd at the same time
_objectstore_tool_nowait $dir $osd --op export --pgid 2.0 --file $dir/export.out
kill_daemon $dir/osd.$fillosd.pid TERM
_objectstore_tool_nodown $dir $fillosd --force --op remove --pgid 2.0
_objectstore_tool_nodown $dir $fillosd --op import --pgid 2.0 --file $dir/export.out || return 1
_objectstore_tool_nodown $dir $fillosd --op import --pgid 1.0 --file $dir/fillexport.out || return 1
ceph pg dump pgs
sleep 20
ceph pg dump pgs
# re-write everything
dd if=/dev/urandom of=$dir/datafile bs=2611 count=1
for o in $(seq 1 $objects)
do
for p in $(seq 1 $pools)
do
rados -p "${poolprefix}$p" put obj-${p}-${o} $dir/datafile
done
done
kill_daemon $dir/osd.$osd.pid TERM
ceph osd out osd.$osd
activate_osd $dir $fillosd || return 1
ceph osd in osd.$fillosd
sleep 30
wait_for_not_backfilling 1200 || return 1
wait_for_not_activating 60 || return 1
flush_pg_stats || return 1
ceph pg dump pgs
ERRORS=0
if [ "$(get_num_in_state backfill_toofull)" != "1" ];
then
echo "One PG should be in backfill_toofull"
ERRORS="$(expr $ERRORS + 1)"
fi
if [ "$(get_num_in_state active+clean)" != "2" ];
then
echo "Two PGs should be active+clean after one PG completed backfill"
ERRORS="$(expr $ERRORS + 1)"
fi
if [ $ERRORS != "0" ];
then
return 1
fi
delete_pool fillpool
for i in $(seq 1 $pools)
do
delete_pool "${poolprefix}$i"
done
kill_daemons $dir || return 1
! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1
}
# Make sure that the amount of bytes already on the replica doesn't
# cause an out of space condition
#
# Create 1 pool and write 4K * 600 objects
# Remove 25% (150) of the objects with one OSD down (noout set)
# Increase the size of the remaining 75% (450) of the objects to 6K
# Bring back down OSD
# The pool should go active+clean
function TEST_backfill_grow() {
local dir=$1
local poolname="test"
local OSDS=3
run_mon $dir a || return 1
run_mgr $dir x || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
ceph osd set-backfillfull-ratio .85
create_pool $poolname 1 1
ceph osd pool set $poolname size 3
sleep 5
wait_for_clean || return 1
dd if=/dev/urandom of=${dir}/4kdata bs=1k count=4
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i $dir/4kdata
done
local PG=$(get_pg $poolname obj1)
# Remember primary during the backfill
local primary=$(get_primary $poolname obj1)
local otherosd=$(get_not_primary $poolname obj1)
ceph osd set noout
kill_daemons $dir TERM $otherosd || return 1
rmobjects=$(expr $objects / 4)
for i in $(seq 1 $rmobjects)
do
rados -p $poolname rm obj$i
done
dd if=/dev/urandom of=${dir}/6kdata bs=6k count=1
for i in $(seq $(expr $rmobjects + 1) $objects)
do
rados -p $poolname put obj$i $dir/6kdata
done
activate_osd $dir $otherosd || return 1
ceph tell osd.$primary debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
delete_pool $poolname
kill_daemons $dir || return 1
! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1
}
# Create a 5 shard EC pool on 6 OSD cluster
# Fill 1 OSD with 2600K of data take that osd down.
# Write the EC pool on 5 OSDs
# Take down 1 (must contain an EC shard)
# Bring up OSD with fill data
# Not enought room to backfill to partially full OSD
function TEST_ec_backfill_simple() {
local dir=$1
local EC=$2
local pools=1
local OSDS=6
local k=3
local m=2
local ecobjects=$(expr $objects / $k)
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
ceph osd set-backfillfull-ratio .85
create_pool fillpool 1 1
ceph osd pool set fillpool size 1 --yes-i-really-mean-it
# Partially fill an osd
# We have room for 200 18K replicated objects, if we create 13K objects
# there is only 3600K - (13K * 200) = 1000K which won't hold
# a k=3 shard below ((18K / 3) + 4K) * 200 = 2000K
# Actual usage per shard is 8K * 200 = 1600K because 18K/3 is 6K which
# rounds to 8K. The 2000K is the ceiling on the 18K * 200 = 3600K logical
# bytes in the pool.
dd if=/dev/urandom of=$dir/datafile bs=1024 count=13
for o in $(seq 1 $ecobjects)
do
rados -p fillpool put obj$o $dir/datafile
done
local fillosd=$(get_primary fillpool obj1)
osd=$(expr $fillosd + 1)
if [ "$osd" = "$OSDS" ]; then
osd="0"
fi
sleep 5
kill_daemon $dir/osd.$fillosd.pid TERM
ceph osd out osd.$fillosd
sleep 2
ceph osd erasure-code-profile set ec-profile k=$k m=$m crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1
for p in $(seq 1 $pools)
do
ceph osd pool create "${poolprefix}$p" 1 1 erasure ec-profile
done
# Can't wait for clean here because we created a stale pg
#wait_for_clean || return 1
sleep 5
ceph pg dump pgs
dd if=/dev/urandom of=$dir/datafile bs=1024 count=18
for o in $(seq 1 $ecobjects)
do
for p in $(seq 1 $pools)
do
rados -p "${poolprefix}$p" put obj$o $dir/datafile
done
done
kill_daemon $dir/osd.$osd.pid TERM
ceph osd out osd.$osd
activate_osd $dir $fillosd || return 1
ceph osd in osd.$fillosd
sleep 30
ceph pg dump pgs
wait_for_not_backfilling 1200 || return 1
wait_for_not_activating 60 || return 1
ceph pg dump pgs
ERRORS=0
if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep +backfill_toofull | wc -l)" != "1" ]; then
echo "One pool should have been in backfill_toofull"
ERRORS="$(expr $ERRORS + 1)"
fi
if [ $ERRORS != "0" ];
then
return 1
fi
delete_pool fillpool
for i in $(seq 1 $pools)
do
delete_pool "${poolprefix}$i"
done
kill_daemons $dir || return 1
}
function osdlist() {
local OSDS=$1
local excludeosd=$2
osds=""
for osd in $(seq 0 $(expr $OSDS - 1))
do
if [ $osd = $excludeosd ];
then
continue
fi
if [ -n "$osds" ]; then
osds="${osds} "
fi
osds="${osds}${osd}"
done
echo $osds
}
# Create a pool with size 1 and fill with data so that only 1 EC shard can fit.
# Write data to 2 EC pools mapped to the same OSDs (excluding filled one)
# Remap the last OSD to partially full OSD on both pools
# The 2 pools should race to backfill.
# One pool goes active+clean
# The other goes acitve+...+backfill_toofull
function TEST_ec_backfill_multi() {
local dir=$1
local EC=$2
local pools=2
local OSDS=6
local k=3
local m=2
local ecobjects=$(expr $objects / $k)
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
# This test requires that shards from 2 different pools
# fit on a given OSD, but both will not fix. I'm using
# making the fillosd plus 1 shard use 75% of the space,
# leaving not enough to be under the 85% set here.
ceph osd set-backfillfull-ratio .85
ceph osd set-require-min-compat-client luminous
create_pool fillpool 1 1
ceph osd pool set fillpool size 1 --yes-i-really-mean-it
# Partially fill an osd
# We have room for 200 18K replicated objects, if we create 9K objects
# there is only 3600K - (9K * 200) = 1800K which will only hold
# one k=3 shard below ((12K / 3) + 4K) * 200 = 1600K
# The actual data will be (12K / 3) * 200 = 800K because the extra
# is the reservation padding for chunking.
dd if=/dev/urandom of=$dir/datafile bs=1024 count=9
for o in $(seq 1 $ecobjects)
do
rados -p fillpool put obj$o $dir/datafile
done
local fillosd=$(get_primary fillpool obj1)
ceph osd erasure-code-profile set ec-profile k=3 m=2 crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1
nonfillosds="$(osdlist $OSDS $fillosd)"
for p in $(seq 1 $pools)
do
ceph osd pool create "${poolprefix}$p" 1 1 erasure ec-profile
ceph osd pg-upmap "$(expr $p + 1).0" $nonfillosds
done
# Can't wait for clean here because we created a stale pg
#wait_for_clean || return 1
sleep 15
ceph pg dump pgs
dd if=/dev/urandom of=$dir/datafile bs=1024 count=12
for o in $(seq 1 $ecobjects)
do
for p in $(seq 1 $pools)
do
rados -p "${poolprefix}$p" put obj$o-$p $dir/datafile
done
done
ceph pg dump pgs
for p in $(seq 1 $pools)
do
ceph osd pg-upmap $(expr $p + 1).0 ${nonfillosds% *} $fillosd
done
sleep 30
wait_for_not_backfilling 1200 || return 1
wait_for_not_activating 60 || return 1
ceph pg dump pgs
ERRORS=0
if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep +backfill_toofull | wc -l)" != "1" ];
then
echo "One pool should have been in backfill_toofull"
ERRORS="$(expr $ERRORS + 1)"
fi
if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep active+clean | wc -l)" != "1" ];
then
echo "One didn't finish backfill"
ERRORS="$(expr $ERRORS + 1)"
fi
if [ $ERRORS != "0" ];
then
return 1
fi
delete_pool fillpool
for i in $(seq 1 $pools)
do
delete_pool "${poolprefix}$i"
done
kill_daemons $dir || return 1
}
# Similar to TEST_ec_backfill_multi but one of the ec pools
# already had some data on the target OSD
# Create a pool with size 1 and fill with data so that only 1 EC shard can fit.
# Write a small amount of data to 1 EC pool that still includes the filled one
# Take down fillosd with noout set
# Write data to 2 EC pools mapped to the same OSDs (excluding filled one)
# Remap the last OSD to partially full OSD on both pools
# The 2 pools should race to backfill.
# One pool goes active+clean
# The other goes acitve+...+backfill_toofull
function SKIP_TEST_ec_backfill_multi_partial() {
local dir=$1
local EC=$2
local pools=2
local OSDS=5
local k=3
local m=2
local ecobjects=$(expr $objects / $k)
local lastosd=$(expr $OSDS - 1)
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
# This test requires that shards from 2 different pools
# fit on a given OSD, but both will not fix. I'm using
# making the fillosd plus 1 shard use 75% of the space,
# leaving not enough to be under the 85% set here.
ceph osd set-backfillfull-ratio .85
ceph osd set-require-min-compat-client luminous
create_pool fillpool 1 1
ceph osd pool set fillpool size 1 --yes-i-really-mean-it
# last osd
ceph osd pg-upmap 1.0 $lastosd
# Partially fill an osd
# We have room for 200 18K replicated objects, if we create 9K objects
# there is only 3600K - (9K * 200) = 1800K which will only hold
# one k=3 shard below ((12K / 3) + 4K) * 200 = 1600K
# The actual data will be (12K / 3) * 200 = 800K because the extra
# is the reservation padding for chunking.
dd if=/dev/urandom of=$dir/datafile bs=1024 count=9
for o in $(seq 1 $ecobjects)
do
rados -p fillpool put obj$o $dir/datafile
done
local fillosd=$(get_primary fillpool obj1)
ceph osd erasure-code-profile set ec-profile k=3 m=2 crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1
nonfillosds="$(osdlist $OSDS $fillosd)"
for p in $(seq 1 $pools)
do
ceph osd pool create "${poolprefix}$p" 1 1 erasure ec-profile
ceph osd pg-upmap "$(expr $p + 1).0" $(seq 0 $lastosd)
done
# Can't wait for clean here because we created a stale pg
#wait_for_clean || return 1
sleep 15
ceph pg dump pgs
dd if=/dev/urandom of=$dir/datafile bs=1024 count=1
for o in $(seq 1 $ecobjects)
do
rados -p "${poolprefix}1" put obj$o-1 $dir/datafile
done
for p in $(seq 1 $pools)
do
ceph osd pg-upmap "$(expr $p + 1).0" $(seq 0 $(expr $lastosd - 1))
done
ceph pg dump pgs
#ceph osd set noout
#kill_daemons $dir TERM osd.$lastosd || return 1
dd if=/dev/urandom of=$dir/datafile bs=1024 count=12
for o in $(seq 1 $ecobjects)
do
for p in $(seq 1 $pools)
do
rados -p "${poolprefix}$p" put obj$o-$p $dir/datafile
done
done
ceph pg dump pgs
# Now backfill lastosd by adding back into the upmap
for p in $(seq 1 $pools)
do
ceph osd pg-upmap "$(expr $p + 1).0" $(seq 0 $lastosd)
done
#activate_osd $dir $lastosd || return 1
#ceph tell osd.0 debug kick_recovery_wq 0
sleep 30
ceph pg dump pgs
wait_for_not_backfilling 1200 || return 1
wait_for_not_activating 60 || return 1
ceph pg dump pgs
ERRORS=0
if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep +backfill_toofull | wc -l)" != "1" ];
then
echo "One pool should have been in backfill_toofull"
ERRORS="$(expr $ERRORS + 1)"
fi
if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep active+clean | wc -l)" != "1" ];
then
echo "One didn't finish backfill"
ERRORS="$(expr $ERRORS + 1)"
fi
if [ $ERRORS != "0" ];
then
return 1
fi
delete_pool fillpool
for i in $(seq 1 $pools)
do
delete_pool "${poolprefix}$i"
done
kill_daemons $dir || return 1
}
function SKIP_TEST_ec_backfill_multi_partial() {
local dir=$1
local EC=$2
local pools=2
local OSDS=6
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
# Below we need to fit 3200K in 3600K which is 88%
# so set to 90%
ceph osd set-backfillfull-ratio .90
ceph osd set-require-min-compat-client luminous
create_pool fillpool 1 1
ceph osd pool set fillpool size 1 --yes-i-really-mean-it
# Partially fill an osd
# We have room for 200 48K ec objects, if we create 4k replicated objects
# there is 3600K - (4K * 200) = 2800K which won't hold 2 k=3 shard
# of 200 12K objects which takes ((12K / 3) + 4K) * 200 = 1600K each.
# On the other OSDs 2 * 1600K = 3200K which is 88% of 3600K.
dd if=/dev/urandom of=$dir/datafile bs=1024 count=4
for o in $(seq 1 $objects)
do
rados -p fillpool put obj$o $dir/datafile
done
local fillosd=$(get_primary fillpool obj1)
osd=$(expr $fillosd + 1)
if [ "$osd" = "$OSDS" ]; then
osd="0"
fi
sleep 5
kill_daemon $dir/osd.$fillosd.pid TERM
ceph osd out osd.$fillosd
sleep 2
ceph osd erasure-code-profile set ec-profile k=3 m=2 crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1
for p in $(seq 1 $pools)
do
ceph osd pool create "${poolprefix}$p" 1 1 erasure ec-profile
done
# Can't wait for clean here because we created a stale pg
#wait_for_clean || return 1
sleep 5
ceph pg dump pgs
dd if=/dev/urandom of=$dir/datafile bs=1024 count=12
for o in $(seq 1 $objects)
do
for p in $(seq 1 $pools)
do
rados -p "${poolprefix}$p" put obj$o $dir/datafile
done
done
#ceph pg map 2.0 --format=json | jq '.'
kill_daemon $dir/osd.$osd.pid TERM
ceph osd out osd.$osd
_objectstore_tool_nodown $dir $osd --op export --pgid 2.0 --file $dir/export.out
_objectstore_tool_nodown $dir $fillosd --op import --pgid 2.0 --file $dir/export.out
activate_osd $dir $fillosd || return 1
ceph osd in osd.$fillosd
sleep 30
wait_for_not_backfilling 1200 || return 1
wait_for_not_activating 60 || return 1
ERRORS=0
if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep +backfill_toofull | wc -l)" != "1" ];
then
echo "One pool should have been in backfill_toofull"
ERRORS="$(expr $ERRORS + 1)"
fi
if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep active+clean | wc -l)" != "1" ];
then
echo "One didn't finish backfill"
ERRORS="$(expr $ERRORS + 1)"
fi
ceph pg dump pgs
if [ $ERRORS != "0" ];
then
return 1
fi
delete_pool fillpool
for i in $(seq 1 $pools)
do
delete_pool "${poolprefix}$i"
done
kill_daemons $dir || return 1
}
# Create 1 EC pool
# Write 200 12K objects ((12K / 3) + 4K) *200) = 1600K
# Take 1 shard's OSD down (with noout set)
# Remove 50 objects ((12K / 3) + 4k) * 50) = 400K
# Write 150 36K objects (grow 150 objects) 2400K
# But there is already 1600K usage so backfill
# would be too full if it didn't account for existing data
# Bring back down OSD so it must backfill
# It should go active+clean taking into account data already there
function TEST_ec_backfill_grow() {
local dir=$1
local poolname="test"
local OSDS=6
local k=3
local m=2
local ecobjects=$(expr $objects / $k)
run_mon $dir a || return 1
run_mgr $dir x || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
ceph osd set-backfillfull-ratio .85
ceph osd set-require-min-compat-client luminous
ceph osd erasure-code-profile set ec-profile k=$k m=$m crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1
ceph osd pool create $poolname 1 1 erasure ec-profile
wait_for_clean || return 1
dd if=/dev/urandom of=${dir}/12kdata bs=1k count=12
for i in $(seq 1 $ecobjects)
do
rados -p $poolname put obj$i $dir/12kdata
done
local PG=$(get_pg $poolname obj1)
# Remember primary during the backfill
local primary=$(get_primary $poolname obj1)
local otherosd=$(get_not_primary $poolname obj1)
ceph osd set noout
kill_daemons $dir TERM $otherosd || return 1
rmobjects=$(expr $ecobjects / 4)
for i in $(seq 1 $rmobjects)
do
rados -p $poolname rm obj$i
done
dd if=/dev/urandom of=${dir}/36kdata bs=1k count=36
for i in $(seq $(expr $rmobjects + 1) $ecobjects)
do
rados -p $poolname put obj$i $dir/36kdata
done
activate_osd $dir $otherosd || return 1
ceph tell osd.$primary debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
main osd-backfill-space "$@"
# Local Variables:
# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-space.sh"
# End:
| 31,780 | 26.001699 | 134 | sh |
null | ceph-main/qa/standalone/osd-backfill/osd-backfill-stats.sh | #!/usr/bin/env bash
#
# Copyright (C) 2017 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
# Fix port????
export CEPH_MON="127.0.0.1:7114" # git grep '\<7114\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10 "
export margin=10
export objects=200
export poolname=test
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function below_margin() {
local -i check=$1
shift
local -i target=$1
return $(( $check <= $target && $check >= $target - $margin ? 0 : 1 ))
}
function above_margin() {
local -i check=$1
shift
local -i target=$1
return $(( $check >= $target && $check <= $target + $margin ? 0 : 1 ))
}
FIND_UPACT='grep "pg[[]${PG}.*backfilling.*PeeringState::update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/"'
FIND_FIRST='grep "pg[[]${PG}.*backfilling.*PeeringState::update_calc_stats $which " $log | grep -F " ${UPACT}${addp}" | grep -v est | head -1 | sed "s/.* \([0-9]*\)$/\1/"'
FIND_LAST='grep "pg[[]${PG}.*backfilling.*PeeringState::update_calc_stats $which " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/"'
function check() {
local dir=$1
local PG=$2
local primary=$3
local type=$4
local degraded_start=$5
local degraded_end=$6
local misplaced_start=$7
local misplaced_end=$8
local primary_start=${9:-}
local primary_end=${10:-}
local check_setup=${11:-true}
local log=$(grep -l +backfilling $dir/osd.$primary.log)
if [ $check_setup = "true" ];
then
local alllogs=$(grep -l +backfilling $dir/osd.*.log)
if [ "$(echo "$alllogs" | wc -w)" != "1" ];
then
echo "Test setup failure, a single OSD should have performed backfill"
return 1
fi
fi
local addp=" "
if [ "$type" = "erasure" ];
then
addp="p"
fi
UPACT=$(eval $FIND_UPACT)
[ -n "$UPACT" ] || return 1
# Check 3rd line at start because of false recovery starts
local which="degraded"
FIRST=$(eval $FIND_FIRST)
[ -n "$FIRST" ] || return 1
below_margin $FIRST $degraded_start || return 1
LAST=$(eval $FIND_LAST)
[ -n "$LAST" ] || return 1
above_margin $LAST $degraded_end || return 1
# Check 3rd line at start because of false recovery starts
which="misplaced"
FIRST=$(eval $FIND_FIRST)
[ -n "$FIRST" ] || return 1
below_margin $FIRST $misplaced_start || return 1
LAST=$(eval $FIND_LAST)
[ -n "$LAST" ] || return 1
above_margin $LAST $misplaced_end || return 1
# This is the value of set into MISSING_ON_PRIMARY
if [ -n "$primary_start" ];
then
which="shard $primary"
FIRST=$(eval $FIND_FIRST)
[ -n "$FIRST" ] || return 1
below_margin $FIRST $primary_start || return 1
LAST=$(eval $FIND_LAST)
[ -n "$LAST" ] || return 1
above_margin $LAST $primary_end || return 1
fi
}
# [1] -> [1, 0, 2]
# degraded 1000 -> 0
# state: active+undersized+degraded+remapped+backfilling
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 1000 0 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-27 09:44:23.531466 22'500 26:617 [1,0,2] 1 [1] 1 0'0 2017-10-27 09:43:44.654882 0'0 2017-10-27 09:43:44.654882
function TEST_backfill_sizeup() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
run_osd $dir 5 || return 1
create_pool $poolname 1 1
ceph osd pool set $poolname size 1 --yes-i-really-mean-it
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
ceph osd set nobackfill
ceph osd pool set $poolname size 3
sleep 2
ceph osd unset nobackfill
wait_for_clean || return 1
local primary=$(get_primary $poolname obj1)
local PG=$(get_pg $poolname obj1)
local degraded=$(expr $objects \* 2)
check $dir $PG $primary replicated $degraded 0 0 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [1] -> [0, 2, 4]
# degraded 1000 -> 0
# misplaced 500 -> 0
# state: active+undersized+degraded+remapped+backfilling
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 1000 500 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-27 09:48:53.326849 22'500 26:603 [0,2,4] 0 [1] 1 0'0 2017-10-27 09:48:13.236253 0'0 2017-10-27 09:48:13.236253
function TEST_backfill_sizeup_out() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
run_osd $dir 5 || return 1
create_pool $poolname 1 1
ceph osd pool set $poolname size 1 --yes-i-really-mean-it
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local PG=$(get_pg $poolname obj1)
# Remember primary during the backfill
local primary=$(get_primary $poolname obj1)
ceph osd set nobackfill
ceph osd out osd.$primary
ceph osd pool set $poolname size 3
sleep 2
ceph osd unset nobackfill
wait_for_clean || return 1
local degraded=$(expr $objects \* 2)
check $dir $PG $primary replicated $degraded 0 $objects 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [1 0] -> [1,2]/[1,0]
# misplaced 500 -> 0
# state: active+remapped+backfilling
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 0 500 0 0 100 100 active+remapped+backfilling 2017-10-27 09:51:18.800517 22'500 25:570 [1,2] 1 [1,0] 1 0'0 2017-10-27 09:50:40.441274 0'0 2017-10-27 09:50:40.441274
function TEST_backfill_out() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
run_osd $dir 5 || return 1
create_pool $poolname 1 1
ceph osd pool set $poolname size 2
sleep 5
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local PG=$(get_pg $poolname obj1)
# Remember primary during the backfill
local primary=$(get_primary $poolname obj1)
ceph osd set nobackfill
ceph osd out osd.$(get_not_primary $poolname obj1)
sleep 2
ceph osd unset nobackfill
wait_for_clean || return 1
check $dir $PG $primary replicated 0 0 $objects 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [0, 1] -> [0, 2]/[0]
# osd 1 down/out
# degraded 500 -> 0
# state: active+undersized+degraded+remapped+backfilling
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 500 0 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-27 09:53:24.051091 22'500 27:719 [0,2] 0 [0] 0 0'0 2017-10-27 09:52:43.188368 0'0 2017-10-27 09:52:43.188368
function TEST_backfill_down_out() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
run_osd $dir 5 || return 1
create_pool $poolname 1 1
ceph osd pool set $poolname size 2
sleep 5
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local PG=$(get_pg $poolname obj1)
# Remember primary during the backfill
local primary=$(get_primary $poolname obj1)
local otherosd=$(get_not_primary $poolname obj1)
ceph osd set nobackfill
kill $(cat $dir/osd.${otherosd}.pid)
ceph osd down osd.${otherosd}
ceph osd out osd.${otherosd}
sleep 2
ceph osd unset nobackfill
wait_for_clean || return 1
check $dir $PG $primary replicated $objects 0 0 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [1, 0] -> [2, 3, 4]
# degraded 500 -> 0
# misplaced 1000 -> 0
# state: active+undersized+degraded+remapped+backfilling
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 500 1000 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-27 09:55:50.375722 23'500 27:553 [2,4,3] 2 [1,0] 1 0'0 2017-10-27 09:55:10.230919 0'0 2017-10-27 09:55:10.230919
function TEST_backfill_out2() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
run_osd $dir 5 || return 1
create_pool $poolname 1 1
ceph osd pool set $poolname size 2
sleep 5
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local PG=$(get_pg $poolname obj1)
# Remember primary during the backfill
local primary=$(get_primary $poolname obj1)
local otherosd=$(get_not_primary $poolname obj1)
ceph osd set nobackfill
ceph osd pool set $poolname size 3
ceph osd out osd.${otherosd}
ceph osd out osd.${primary}
# Primary might change before backfill starts
sleep 2
primary=$(get_primary $poolname obj1)
ceph osd unset nobackfill
ceph tell osd.$primary get_latest_osdmap
ceph tell osd.$primary debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
local misplaced=$(expr $objects \* 2)
check $dir $PG $primary replicated $objects 0 $misplaced 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [0,1] -> [2,4,3]/[0,1]
# degraded 1000 -> 0
# misplaced 1000 -> 500
# state ends at active+clean+remapped [2,4,3]/[2,4,3,0]
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 1000 1000 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-30 18:21:45.995149 19'500 23:1817 [2,4,3] 2 [0,1] 0 0'0 2017-10-30 18:21:05.109904 0'0 2017-10-30 18:21:05.109904
# ENDS:
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 0 500 0 0 5 5 active+clean+remapped 2017-10-30 18:22:42.293730 19'500 25:2557 [2,4,3] 2 [2,4,3,0] 2 0'0 2017-10-30 18:21:05.109904 0'0 2017-10-30 18:21:05.109904
function TEST_backfill_sizeup4_allout() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
create_pool $poolname 1 1
ceph osd pool set $poolname size 2
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local PG=$(get_pg $poolname obj1)
# Remember primary during the backfill
local primary=$(get_primary $poolname obj1)
local otherosd=$(get_not_primary $poolname obj1)
ceph osd set nobackfill
ceph osd out osd.$otherosd
ceph osd out osd.$primary
ceph osd pool set $poolname size 4
# Primary might change before backfill starts
sleep 2
primary=$(get_primary $poolname obj1)
ceph osd unset nobackfill
ceph tell osd.$primary get_latest_osdmap
ceph tell osd.$primary debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
local misdeg=$(expr $objects \* 2)
check $dir $PG $primary replicated $misdeg 0 $misdeg $objects || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [1,2,0] -> [3]/[1,2]
# misplaced 1000 -> 500
# state ends at active+clean+remapped [3]/[3,1]
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 0 1000 0 0 100 100 active+remapped+backfilling 2017-11-28 19:13:56.092439 21'500 31:790 [3] 3 [1,2] 1 0'0 2017-11-28 19:13:28.698661 0'0 2017-11-28 19:13:28.698661
function TEST_backfill_remapped() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
create_pool $poolname 1 1
ceph osd pool set $poolname size 3
sleep 5
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local PG=$(get_pg $poolname obj1)
# Remember primary during the backfill
local primary=$(get_primary $poolname obj1)
local otherosd=$(get_not_primary $poolname obj1)
ceph osd set nobackfill
ceph osd out osd.${otherosd}
for i in $(get_osds $poolname obj1)
do
if [ $i = $primary -o $i = $otherosd ];
then
continue
fi
ceph osd out osd.$i
break
done
ceph osd out osd.${primary}
ceph osd pool set $poolname size 2
sleep 2
# primary may change due to invalidating the old pg_temp, which was [1,2,0],
# but up_primary (3) chooses [0,1] for acting.
primary=$(get_primary $poolname obj1)
ceph osd unset nobackfill
ceph tell osd.$primary get_latest_osdmap
ceph tell osd.$primary debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
local misplaced=$(expr $objects \* 2)
check $dir $PG $primary replicated 0 0 $misplaced $objects "" "" false || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [1,0,2] -> [4,3,NONE]/[1,0,2]
# misplaced 1500 -> 500
# state ends at active+clean+remapped [4,3,NONE]/[4,3,2]
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 0 1500 0 0 100 100 active+degraded+remapped+backfilling 2017-10-31 16:53:39.467126 19'500 23:615 [4,3,NONE] 4 [1,0,2] 1 0'0 2017-10-31 16:52:59.624429 0'0 2017-10-31 16:52:59.624429
# ENDS:
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 0 500 0 0 5 5 active+clean+remapped 2017-10-31 16:48:34.414040 19'500 25:2049 [4,3,NONE] 4 [4,3,2] 4 0'0 2017-10-31 16:46:58.203440 0'0 2017-10-31 16:46:58.203440
function TEST_backfill_ec_all_out() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
create_pool $poolname 1 1 erasure myprofile
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local PG=$(get_pg $poolname obj1)
# Remember primary during the backfill
local primary=$(get_primary $poolname obj1)
ceph osd set nobackfill
for o in $(get_osds $poolname obj1)
do
ceph osd out osd.$o
done
# Primary might change before backfill starts
sleep 2
primary=$(get_primary $poolname obj1)
ceph osd unset nobackfill
ceph tell osd.$primary get_latest_osdmap
ceph tell osd.$primary debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
local misplaced=$(expr $objects \* 3)
check $dir $PG $primary erasure 0 0 $misplaced $objects || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [1,0,2] -> [4, 0, 2]
# misplaced 500 -> 0
# active+remapped+backfilling
#
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 0 500 0 0 100 100 active+remapped+backfilling 2017-11-08 18:05:39.036420 24'500 27:742 [4,0,2] 4 [1,0,2] 1 0'0 2017-11-08 18:04:58.697315 0'0 2017-11-08 18:04:58.697315
function TEST_backfill_ec_prim_out() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
create_pool $poolname 1 1 erasure myprofile
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local PG=$(get_pg $poolname obj1)
# Remember primary during the backfill
local primary=$(get_primary $poolname obj1)
ceph osd set nobackfill
ceph osd out osd.$primary
# Primary might change before backfill starts
sleep 2
primary=$(get_primary $poolname obj1)
ceph osd unset nobackfill
ceph tell osd.$primary get_latest_osdmap
ceph tell osd.$primary debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
local misplaced=$(expr $objects \* 3)
check $dir $PG $primary erasure 0 0 $objects 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [1,0] -> [1,2]
# degraded 500 -> 0
# misplaced 1000 -> 0
#
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 500 1000 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-11-06 14:02:29.439105 24'500 29:1020 [4,3,5] 4 [1,NONE,2] 1 0'0 2017-11-06 14:01:46.509963 0'0 2017-11-06 14:01:46.509963
function TEST_backfill_ec_down_all_out() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
run_osd $dir 5 || return 1
ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
create_pool $poolname 1 1 erasure myprofile
ceph osd pool set $poolname min_size 2
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local PG=$(get_pg $poolname obj1)
# Remember primary during the backfill
local primary=$(get_primary $poolname obj1)
local otherosd=$(get_not_primary $poolname obj1)
local allosds=$(get_osds $poolname obj1)
ceph osd set nobackfill
kill $(cat $dir/osd.${otherosd}.pid)
ceph osd down osd.${otherosd}
for o in $allosds
do
ceph osd out osd.$o
done
# Primary might change before backfill starts
sleep 2
primary=$(get_primary $poolname obj1)
ceph osd unset nobackfill
ceph tell osd.$primary get_latest_osdmap
ceph tell osd.$primary debug kick_recovery_wq 0
sleep 2
flush_pg_stats
# Wait for recovery to finish
# Can't use wait_for_clean() because state goes from active+undersized+degraded+remapped+backfilling
# to active+undersized+remapped
while(true)
do
if test "$(ceph --format json pg dump pgs |
jq '.pg_stats | [.[] | .state | select(. == "incomplete")] | length')" -ne "0"
then
sleep 2
continue
fi
break
done
ceph pg dump pgs
for i in $(seq 1 240)
do
if ceph pg dump pgs | grep ^$PG | grep -qv backfilling
then
break
fi
if [ $i = "240" ];
then
echo "Timeout waiting for recovery to finish"
return 1
fi
sleep 1
done
ceph pg dump pgs
local misplaced=$(expr $objects \* 2)
check $dir $PG $primary erasure $objects 0 $misplaced 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [1,0,2] -> [1,3,2]
# degraded 500 -> 0
# active+backfilling+degraded
#
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 500 0 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-11-06 13:57:25.412322 22'500 28:794 [1,3,2] 1 [1,NONE,2] 1 0'0 2017-11-06 13:54:58.033906 0'0 2017-11-06 13:54:58.033906
function TEST_backfill_ec_down_out() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
run_osd $dir 5 || return 1
ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
create_pool $poolname 1 1 erasure myprofile
ceph osd pool set $poolname min_size 2
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local PG=$(get_pg $poolname obj1)
# Remember primary during the backfill
local primary=$(get_primary $poolname obj1)
local otherosd=$(get_not_primary $poolname obj1)
ceph osd set nobackfill
kill $(cat $dir/osd.${otherosd}.pid)
ceph osd down osd.${otherosd}
ceph osd out osd.${otherosd}
# Primary might change before backfill starts
sleep 2
primary=$(get_primary $poolname obj1)
ceph osd unset nobackfill
ceph tell osd.$primary get_latest_osdmap
ceph tell osd.$primary debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
local misplaced=$(expr $objects \* 2)
check $dir $PG $primary erasure $objects 0 0 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
main osd-backfill-stats "$@"
# Local Variables:
# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-stats.sh"
# End:
| 26,343 | 33.572178 | 300 | sh |
null | ceph-main/qa/standalone/osd/bad-inc-map.sh | #!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
mon_port=$(get_unused_port)
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:$mon_port"
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
set -e
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_bad_inc_map() {
local dir=$1
run_mon $dir a
run_mgr $dir x
run_osd $dir 0
run_osd $dir 1
run_osd $dir 2
ceph config set osd.2 osd_inject_bad_map_crc_probability 1
# osd map churn
create_pool foo 8
ceph osd pool set foo min_size 1
ceph osd pool set foo min_size 2
sleep 5
# make sure all the OSDs are still up
TIMEOUT=10 wait_for_osd up 0
TIMEOUT=10 wait_for_osd up 1
TIMEOUT=10 wait_for_osd up 2
# check for the signature in the log
grep "injecting map crc failure" $dir/osd.2.log || return 1
grep "bailing because last" $dir/osd.2.log || return 1
echo success
delete_pool foo
kill_daemons $dir || return 1
}
main bad-inc-map "$@"
# Local Variables:
# compile-command: "make -j4 && ../qa/run-standalone.sh bad-inc-map.sh"
# End:
| 1,360 | 20.603175 | 73 | sh |
null | ceph-main/qa/standalone/osd/divergent-priors.sh | #!/usr/bin/env bash
#
# Copyright (C) 2019 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
# This should multiple of 6
export loglen=12
export divisor=3
export trim=$(expr $loglen / 2)
export DIVERGENT_WRITE=$(expr $trim / $divisor)
export DIVERGENT_REMOVE=$(expr $trim / $divisor)
export DIVERGENT_CREATE=$(expr $trim / $divisor)
export poolname=test
export testobjects=100
# Fix port????
export CEPH_MON="127.0.0.1:7115" # git grep '\<7115\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
# so we will not force auth_log_shard to be acting_primary
CEPH_ARGS+="--osd_force_auth_primary_missing_objects=1000000 "
CEPH_ARGS+="--osd_debug_pg_log_writeout=true "
CEPH_ARGS+="--osd_min_pg_log_entries=$loglen --osd_max_pg_log_entries=$loglen --osd_pg_log_trim_min=$trim "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
# Special case divergence test
# Test handling of divergent entries with prior_version
# prior to log_tail
# based on qa/tasks/divergent_prior.py
function TEST_divergent() {
local dir=$1
# something that is always there
local dummyfile='/etc/fstab'
local dummyfile2='/etc/resolv.conf'
local num_osds=3
local osds="$(seq 0 $(expr $num_osds - 1))"
run_mon $dir a || return 1
run_mgr $dir x || return 1
for i in $osds
do
run_osd $dir $i || return 1
done
ceph osd set noout
ceph osd set noin
ceph osd set nodown
create_pool $poolname 1 1
ceph osd pool set $poolname size 3
ceph osd pool set $poolname min_size 2
flush_pg_stats || return 1
wait_for_clean || return 1
# determine primary
local divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')"
echo "primary and soon to be divergent is $divergent"
ceph pg dump pgs
local non_divergent=""
for i in $osds
do
if [ "$i" = "$divergent" ]; then
continue
fi
non_divergent="$non_divergent $i"
done
echo "writing initial objects"
# write a bunch of objects
for i in $(seq 1 $testobjects)
do
rados -p $poolname put existing_$i $dummyfile
done
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
local pgid=$(get_pg $poolname existing_1)
# blackhole non_divergent
echo "blackholing osds $non_divergent"
ceph pg dump pgs
for i in $non_divergent
do
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) config set objectstore_blackhole 1
done
local case5=$testobjects
local case3=$(expr $testobjects - 1)
# Write some soon to be divergent
echo 'writing divergent object'
rados -p $poolname put existing_$case5 $dummyfile &
echo 'create missing divergent object'
inject_eio rep data $poolname existing_$case3 $dir 0 || return 1
rados -p $poolname get existing_$case3 $dir/existing &
sleep 10
killall -9 rados
# kill all the osds but leave divergent in
echo 'killing all the osds'
ceph pg dump pgs
kill_daemons $dir KILL osd || return 1
for i in $osds
do
ceph osd down osd.$i
done
for i in $non_divergent
do
ceph osd out osd.$i
done
# bring up non-divergent
echo "bringing up non_divergent $non_divergent"
ceph pg dump pgs
for i in $non_divergent
do
activate_osd $dir $i || return 1
done
for i in $non_divergent
do
ceph osd in osd.$i
done
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
# write 1 non-divergent object (ensure that old divergent one is divergent)
objname="existing_$(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)"
echo "writing non-divergent object $objname"
ceph pg dump pgs
rados -p $poolname put $objname $dummyfile2
# ensure no recovery of up osds first
echo 'delay recovery'
ceph pg dump pgs
for i in $non_divergent
do
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) set_recovery_delay 100000
done
# bring in our divergent friend
echo "revive divergent $divergent"
ceph pg dump pgs
ceph osd set noup
activate_osd $dir $divergent
sleep 5
echo 'delay recovery divergent'
ceph pg dump pgs
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) set_recovery_delay 100000
ceph osd unset noup
wait_for_osd up 0
wait_for_osd up 1
wait_for_osd up 2
ceph pg dump pgs
echo 'wait for peering'
ceph pg dump pgs
rados -p $poolname put foo $dummyfile
echo "killing divergent $divergent"
ceph pg dump pgs
kill_daemons $dir KILL osd.$divergent
#_objectstore_tool_nodown $dir $divergent --op log --pgid $pgid
echo "reviving divergent $divergent"
ceph pg dump pgs
activate_osd $dir $divergent
sleep 20
echo "allowing recovery"
ceph pg dump pgs
# Set osd_recovery_delay_start back to 0 and kick the queue
for i in $osds
do
ceph tell osd.$i debug kick_recovery_wq 0
done
echo 'reading divergent objects'
ceph pg dump pgs
for i in $(seq 1 $(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE))
do
rados -p $poolname get existing_$i $dir/existing || return 1
done
rm -f $dir/existing
grep _merge_object_divergent_entries $(find $dir -name '*osd*log')
# Check for _merge_object_divergent_entries for case #5
if ! grep -q "_merge_object_divergent_entries.*cannot roll back, removing and adding to missing" $(find $dir -name '*osd*log')
then
echo failure
return 1
fi
echo "success"
delete_pool $poolname
kill_daemons $dir || return 1
}
function TEST_divergent_ec() {
local dir=$1
# something that is always there
local dummyfile='/etc/fstab'
local dummyfile2='/etc/resolv.conf'
local num_osds=3
local osds="$(seq 0 $(expr $num_osds - 1))"
run_mon $dir a || return 1
run_mgr $dir x || return 1
for i in $osds
do
run_osd $dir $i || return 1
done
ceph osd set noout
ceph osd set noin
ceph osd set nodown
create_ec_pool $poolname true k=2 m=1 || return 1
flush_pg_stats || return 1
wait_for_clean || return 1
# determine primary
local divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')"
echo "primary and soon to be divergent is $divergent"
ceph pg dump pgs
local non_divergent=""
for i in $osds
do
if [ "$i" = "$divergent" ]; then
continue
fi
non_divergent="$non_divergent $i"
done
echo "writing initial objects"
# write a bunch of objects
for i in $(seq 1 $testobjects)
do
rados -p $poolname put existing_$i $dummyfile
done
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
local pgid=$(get_pg $poolname existing_1)
# blackhole non_divergent
echo "blackholing osds $non_divergent"
ceph pg dump pgs
for i in $non_divergent
do
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) config set objectstore_blackhole 1
done
# Write some soon to be divergent
echo 'writing divergent object'
rados -p $poolname put existing_$testobjects $dummyfile2 &
sleep 1
rados -p $poolname put existing_$testobjects $dummyfile &
rados -p $poolname mksnap snap1
rados -p $poolname put existing_$(expr $testobjects - 1) $dummyfile &
sleep 10
killall -9 rados
# kill all the osds but leave divergent in
echo 'killing all the osds'
ceph pg dump pgs
kill_daemons $dir KILL osd || return 1
for i in $osds
do
ceph osd down osd.$i
done
for i in $non_divergent
do
ceph osd out osd.$i
done
# bring up non-divergent
echo "bringing up non_divergent $non_divergent"
ceph pg dump pgs
for i in $non_divergent
do
activate_osd $dir $i || return 1
done
for i in $non_divergent
do
ceph osd in osd.$i
done
sleep 5
#WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
# write 1 non-divergent object (ensure that old divergent one is divergent)
objname="existing_$(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)"
echo "writing non-divergent object $objname"
ceph pg dump pgs
rados -p $poolname put $objname $dummyfile2
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
# Dump logs
for i in $non_divergent
do
kill_daemons $dir KILL osd.$i || return 1
_objectstore_tool_nodown $dir $i --op log --pgid $pgid
activate_osd $dir $i || return 1
done
_objectstore_tool_nodown $dir $divergent --op log --pgid $pgid
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
# ensure no recovery of up osds first
echo 'delay recovery'
ceph pg dump pgs
for i in $non_divergent
do
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) set_recovery_delay 100000
done
# bring in our divergent friend
echo "revive divergent $divergent"
ceph pg dump pgs
ceph osd set noup
activate_osd $dir $divergent
sleep 5
echo 'delay recovery divergent'
ceph pg dump pgs
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) set_recovery_delay 100000
ceph osd unset noup
wait_for_osd up 0
wait_for_osd up 1
wait_for_osd up 2
ceph pg dump pgs
echo 'wait for peering'
ceph pg dump pgs
rados -p $poolname put foo $dummyfile
echo "killing divergent $divergent"
ceph pg dump pgs
kill_daemons $dir KILL osd.$divergent
#_objectstore_tool_nodown $dir $divergent --op log --pgid $pgid
echo "reviving divergent $divergent"
ceph pg dump pgs
activate_osd $dir $divergent
sleep 20
echo "allowing recovery"
ceph pg dump pgs
# Set osd_recovery_delay_start back to 0 and kick the queue
for i in $osds
do
ceph tell osd.$i debug kick_recovery_wq 0
done
echo 'reading divergent objects'
ceph pg dump pgs
for i in $(seq 1 $(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE))
do
rados -p $poolname get existing_$i $dir/existing || return 1
done
rm -f $dir/existing
grep _merge_object_divergent_entries $(find $dir -name '*osd*log')
# Check for _merge_object_divergent_entries for case #3
# XXX: Not reproducing this case
# if ! grep -q "_merge_object_divergent_entries.* missing, .* adjusting" $(find $dir -name '*osd*log')
# then
# echo failure
# return 1
# fi
# Check for _merge_object_divergent_entries for case #4
if ! grep -q "_merge_object_divergent_entries.*rolled back" $(find $dir -name '*osd*log')
then
echo failure
return 1
fi
echo "success"
delete_pool $poolname
kill_daemons $dir || return 1
}
# Special case divergence test with ceph-objectstore-tool export/remove/import
# Test handling of divergent entries with prior_version
# prior to log_tail and a ceph-objectstore-tool export/import
# based on qa/tasks/divergent_prior2.py
function TEST_divergent_2() {
local dir=$1
# something that is always there
local dummyfile='/etc/fstab'
local dummyfile2='/etc/resolv.conf'
local num_osds=3
local osds="$(seq 0 $(expr $num_osds - 1))"
run_mon $dir a || return 1
run_mgr $dir x || return 1
for i in $osds
do
run_osd $dir $i || return 1
done
ceph osd set noout
ceph osd set noin
ceph osd set nodown
create_pool $poolname 1 1
ceph osd pool set $poolname size 3
ceph osd pool set $poolname min_size 2
flush_pg_stats || return 1
wait_for_clean || return 1
# determine primary
local divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')"
echo "primary and soon to be divergent is $divergent"
ceph pg dump pgs
local non_divergent=""
for i in $osds
do
if [ "$i" = "$divergent" ]; then
continue
fi
non_divergent="$non_divergent $i"
done
echo "writing initial objects"
# write a bunch of objects
for i in $(seq 1 $testobjects)
do
rados -p $poolname put existing_$i $dummyfile
done
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
local pgid=$(get_pg $poolname existing_1)
# blackhole non_divergent
echo "blackholing osds $non_divergent"
ceph pg dump pgs
for i in $non_divergent
do
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) config set objectstore_blackhole 1
done
# Do some creates to hit case 2
echo 'create new divergent objects'
for i in $(seq 1 $DIVERGENT_CREATE)
do
rados -p $poolname create newobject_$i &
done
# Write some soon to be divergent
echo 'writing divergent objects'
for i in $(seq 1 $DIVERGENT_WRITE)
do
rados -p $poolname put existing_$i $dummyfile2 &
done
# Remove some soon to be divergent
echo 'remove divergent objects'
for i in $(seq 1 $DIVERGENT_REMOVE)
do
rmi=$(expr $i + $DIVERGENT_WRITE)
rados -p $poolname rm existing_$rmi &
done
sleep 10
killall -9 rados
# kill all the osds but leave divergent in
echo 'killing all the osds'
ceph pg dump pgs
kill_daemons $dir KILL osd || return 1
for i in $osds
do
ceph osd down osd.$i
done
for i in $non_divergent
do
ceph osd out osd.$i
done
# bring up non-divergent
echo "bringing up non_divergent $non_divergent"
ceph pg dump pgs
for i in $non_divergent
do
activate_osd $dir $i || return 1
done
for i in $non_divergent
do
ceph osd in osd.$i
done
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
# write 1 non-divergent object (ensure that old divergent one is divergent)
objname="existing_$(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)"
echo "writing non-divergent object $objname"
ceph pg dump pgs
rados -p $poolname put $objname $dummyfile2
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
# ensure no recovery of up osds first
echo 'delay recovery'
ceph pg dump pgs
for i in $non_divergent
do
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) set_recovery_delay 100000
done
# bring in our divergent friend
echo "revive divergent $divergent"
ceph pg dump pgs
ceph osd set noup
activate_osd $dir $divergent
sleep 5
echo 'delay recovery divergent'
ceph pg dump pgs
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) set_recovery_delay 100000
ceph osd unset noup
wait_for_osd up 0
wait_for_osd up 1
wait_for_osd up 2
ceph pg dump pgs
echo 'wait for peering'
ceph pg dump pgs
rados -p $poolname put foo $dummyfile
# At this point the divergent_priors should have been detected
echo "killing divergent $divergent"
ceph pg dump pgs
kill_daemons $dir KILL osd.$divergent
# export a pg
expfile=$dir/exp.$$.out
_objectstore_tool_nodown $dir $divergent --op export-remove --pgid $pgid --file $expfile
_objectstore_tool_nodown $dir $divergent --op import --file $expfile
echo "reviving divergent $divergent"
ceph pg dump pgs
activate_osd $dir $divergent
wait_for_osd up $divergent
sleep 20
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) dump_ops_in_flight
echo "allowing recovery"
ceph pg dump pgs
# Set osd_recovery_delay_start back to 0 and kick the queue
for i in $osds
do
ceph tell osd.$i debug kick_recovery_wq 0
done
echo 'reading divergent objects'
ceph pg dump pgs
for i in $(seq 1 $(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE))
do
rados -p $poolname get existing_$i $dir/existing || return 1
done
for i in $(seq 1 $DIVERGENT_CREATE)
do
rados -p $poolname get newobject_$i $dir/existing
done
rm -f $dir/existing
grep _merge_object_divergent_entries $(find $dir -name '*osd*log')
# Check for _merge_object_divergent_entries for case #1
if ! grep -q "_merge_object_divergent_entries: more recent entry found:" $(find $dir -name '*osd*log')
then
echo failure
return 1
fi
# Check for _merge_object_divergent_entries for case #2
if ! grep -q "_merge_object_divergent_entries.*prior_version or op type indicates creation" $(find $dir -name '*osd*log')
then
echo failure
return 1
fi
echo "success"
rm $dir/$expfile
delete_pool $poolname
kill_daemons $dir || return 1
}
# this is the same as case _2 above, except we enable pg autoscaling in order
# to reproduce https://tracker.ceph.com/issues/41816
function TEST_divergent_3() {
local dir=$1
# something that is always there
local dummyfile='/etc/fstab'
local dummyfile2='/etc/resolv.conf'
local num_osds=3
local osds="$(seq 0 $(expr $num_osds - 1))"
run_mon $dir a || return 1
run_mgr $dir x || return 1
for i in $osds
do
run_osd $dir $i || return 1
done
ceph osd set noout
ceph osd set noin
ceph osd set nodown
create_pool $poolname 1 1
ceph osd pool set $poolname size 3
ceph osd pool set $poolname min_size 2
# reproduce https://tracker.ceph.com/issues/41816
ceph osd pool set $poolname pg_autoscale_mode on
divergent=-1
start_time=$(date +%s)
max_duration=300
while [ "$divergent" -le -1 ]
do
flush_pg_stats || return 1
wait_for_clean || return 1
# determine primary
divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')"
echo "primary and soon to be divergent is $divergent"
ceph pg dump pgs
current_time=$(date +%s)
elapsed_time=$(expr $current_time - $start_time)
if [ "$elapsed_time" -gt "$max_duration" ]; then
echo "timed out waiting for divergent"
return 1
fi
done
local non_divergent=""
for i in $osds
do
if [ "$i" = "$divergent" ]; then
continue
fi
non_divergent="$non_divergent $i"
done
echo "writing initial objects"
# write a bunch of objects
for i in $(seq 1 $testobjects)
do
rados -p $poolname put existing_$i $dummyfile
done
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
local pgid=$(get_pg $poolname existing_1)
# blackhole non_divergent
echo "blackholing osds $non_divergent"
ceph pg dump pgs
for i in $non_divergent
do
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) config set objectstore_blackhole 1
done
# Do some creates to hit case 2
echo 'create new divergent objects'
for i in $(seq 1 $DIVERGENT_CREATE)
do
rados -p $poolname create newobject_$i &
done
# Write some soon to be divergent
echo 'writing divergent objects'
for i in $(seq 1 $DIVERGENT_WRITE)
do
rados -p $poolname put existing_$i $dummyfile2 &
done
# Remove some soon to be divergent
echo 'remove divergent objects'
for i in $(seq 1 $DIVERGENT_REMOVE)
do
rmi=$(expr $i + $DIVERGENT_WRITE)
rados -p $poolname rm existing_$rmi &
done
sleep 10
killall -9 rados
# kill all the osds but leave divergent in
echo 'killing all the osds'
ceph pg dump pgs
kill_daemons $dir KILL osd || return 1
for i in $osds
do
ceph osd down osd.$i
done
for i in $non_divergent
do
ceph osd out osd.$i
done
# bring up non-divergent
echo "bringing up non_divergent $non_divergent"
ceph pg dump pgs
for i in $non_divergent
do
activate_osd $dir $i || return 1
done
for i in $non_divergent
do
ceph osd in osd.$i
done
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
# write 1 non-divergent object (ensure that old divergent one is divergent)
objname="existing_$(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)"
echo "writing non-divergent object $objname"
ceph pg dump pgs
rados -p $poolname put $objname $dummyfile2
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
# ensure no recovery of up osds first
echo 'delay recovery'
ceph pg dump pgs
for i in $non_divergent
do
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) set_recovery_delay 100000
done
# bring in our divergent friend
echo "revive divergent $divergent"
ceph pg dump pgs
ceph osd set noup
activate_osd $dir $divergent
sleep 5
echo 'delay recovery divergent'
ceph pg dump pgs
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) set_recovery_delay 100000
ceph osd unset noup
wait_for_osd up 0
wait_for_osd up 1
wait_for_osd up 2
ceph pg dump pgs
echo 'wait for peering'
ceph pg dump pgs
rados -p $poolname put foo $dummyfile
# At this point the divergent_priors should have been detected
echo "killing divergent $divergent"
ceph pg dump pgs
kill_daemons $dir KILL osd.$divergent
# export a pg
expfile=$dir/exp.$$.out
_objectstore_tool_nodown $dir $divergent --op export-remove --pgid $pgid --file $expfile
_objectstore_tool_nodown $dir $divergent --op import --file $expfile
echo "reviving divergent $divergent"
ceph pg dump pgs
activate_osd $dir $divergent
wait_for_osd up $divergent
sleep 20
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) dump_ops_in_flight
echo "allowing recovery"
ceph pg dump pgs
# Set osd_recovery_delay_start back to 0 and kick the queue
for i in $osds
do
ceph tell osd.$i debug kick_recovery_wq 0
done
echo 'reading divergent objects'
ceph pg dump pgs
for i in $(seq 1 $(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE))
do
rados -p $poolname get existing_$i $dir/existing || return 1
done
for i in $(seq 1 $DIVERGENT_CREATE)
do
rados -p $poolname get newobject_$i $dir/existing
done
rm -f $dir/existing
grep _merge_object_divergent_entries $(find $dir -name '*osd*log')
# Check for _merge_object_divergent_entries for case #1
if ! grep -q "_merge_object_divergent_entries: more recent entry found:" $(find $dir -name '*osd*log')
then
echo failure
return 1
fi
# Check for _merge_object_divergent_entries for case #2
if ! grep -q "_merge_object_divergent_entries.*prior_version or op type indicates creation" $(find $dir -name '*osd*log')
then
echo failure
return 1
fi
echo "success"
rm $dir/$expfile
delete_pool $poolname
kill_daemons $dir || return 1
}
main divergent-priors "$@"
# Local Variables:
# compile-command: "make -j4 && ../qa/run-standalone.sh divergent-priors.sh"
# End:
| 23,442 | 26.386682 | 130 | sh |
null | ceph-main/qa/standalone/osd/ec-error-rollforward.sh | #!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
# Fix port????
export CEPH_MON="127.0.0.1:7132" # git grep '\<7132\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
export margin=10
export objects=200
export poolname=test
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_ec_error_rollforward() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
ceph osd erasure-code-profile set ec-profile m=2 k=2 crush-failure-domain=osd
ceph osd pool create ec 1 1 erasure ec-profile
rados -p ec put foo /etc/passwd
kill -STOP $(cat $dir/osd.2.pid)
rados -p ec rm foo &
pids="$!"
sleep 1
rados -p ec rm a &
pids+=" $!"
rados -p ec rm b &
pids+=" $!"
rados -p ec rm c &
pids+=" $!"
sleep 1
# Use SIGKILL so stopped osd.2 will terminate
# and kill_daemons waits for daemons to die
kill_daemons $dir KILL osd
kill $pids
wait
activate_osd $dir 0 || return 1
activate_osd $dir 1 || return 1
activate_osd $dir 2 || return 1
activate_osd $dir 3 || return 1
wait_for_clean || return 1
}
main ec-error-rollforward "$@"
| 1,612 | 23.074627 | 83 | sh |
null | ceph-main/qa/standalone/osd/osd-bench.sh | #!/usr/bin/env bash
#
# Copyright (C) 2014 Cloudwatt <[email protected]>
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7106" # git grep '\<7106\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--debug-bluestore 20 "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_bench() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
local osd_bench_small_size_max_iops=$(CEPH_ARGS='' ceph-conf \
--show-config-value osd_bench_small_size_max_iops)
local osd_bench_large_size_max_throughput=$(CEPH_ARGS='' ceph-conf \
--show-config-value osd_bench_large_size_max_throughput)
local osd_bench_max_block_size=$(CEPH_ARGS='' ceph-conf \
--show-config-value osd_bench_max_block_size)
local osd_bench_duration=$(CEPH_ARGS='' ceph-conf \
--show-config-value osd_bench_duration)
#
# block size too high
#
expect_failure $dir osd_bench_max_block_size \
ceph tell osd.0 bench 1024 $((osd_bench_max_block_size + 1)) || return 1
#
# count too high for small (< 1MB) block sizes
#
local bsize=1024
local max_count=$(($bsize * $osd_bench_duration * $osd_bench_small_size_max_iops))
expect_failure $dir bench_small_size_max_iops \
ceph tell osd.0 bench $(($max_count + 1)) $bsize || return 1
#
# count too high for large (>= 1MB) block sizes
#
local bsize=$((1024 * 1024 + 1))
local max_count=$(($osd_bench_large_size_max_throughput * $osd_bench_duration))
expect_failure $dir osd_bench_large_size_max_throughput \
ceph tell osd.0 bench $(($max_count + 1)) $bsize || return 1
#
# default values should work
#
ceph tell osd.0 bench || return 1
#
# test object_size < block_size
ceph tell osd.0 bench 10 14456 4444 3
#
#
# test object_size < block_size & object_size = 0(default value)
#
ceph tell osd.0 bench 1 14456
}
main osd-bench "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bench.sh"
# End:
| 3,021 | 29.836735 | 86 | sh |
null | ceph-main/qa/standalone/osd/osd-bluefs-volume-ops.sh | #!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
[ `uname` = FreeBSD ] && exit 0
function run() {
local dir=$1
shift
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_bluestore() {
local dir=$1
local flimit=$(ulimit -n)
if [ $flimit -lt 1536 ]; then
echo "Low open file limit ($flimit), test may fail. Increase to 1536 or higher and retry if that happens."
fi
export CEPH_MON="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--bluestore_block_size=2147483648 "
CEPH_ARGS+="--bluestore_block_db_create=true "
CEPH_ARGS+="--bluestore_block_db_size=1073741824 "
CEPH_ARGS+="--bluestore_block_wal_size=536870912 "
CEPH_ARGS+="--bluestore_block_wal_create=true "
CEPH_ARGS+="--bluestore_fsck_on_mount=true "
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
osd_pid0=$(cat $dir/osd.0.pid)
run_osd $dir 1 || return 1
osd_pid1=$(cat $dir/osd.1.pid)
run_osd $dir 2 || return 1
osd_pid2=$(cat $dir/osd.2.pid)
run_osd $dir 3 || return 1
osd_pid3=$(cat $dir/osd.3.pid)
sleep 5
create_pool foo 16
# write some objects
timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
echo "after bench"
# kill
while kill $osd_pid0; do sleep 1 ; done
ceph osd down 0
while kill $osd_pid1; do sleep 1 ; done
ceph osd down 1
while kill $osd_pid2; do sleep 1 ; done
ceph osd down 2
while kill $osd_pid3; do sleep 1 ; done
ceph osd down 3
# expand slow devices
ceph-bluestore-tool --path $dir/0 fsck || return 1
ceph-bluestore-tool --path $dir/1 fsck || return 1
ceph-bluestore-tool --path $dir/2 fsck || return 1
ceph-bluestore-tool --path $dir/3 fsck || return 1
truncate $dir/0/block -s 4294967296 # 4GB
ceph-bluestore-tool --path $dir/0 bluefs-bdev-expand || return 1
truncate $dir/1/block -s 4311744512 # 4GB + 16MB
ceph-bluestore-tool --path $dir/1 bluefs-bdev-expand || return 1
truncate $dir/2/block -s 4295099392 # 4GB + 129KB
ceph-bluestore-tool --path $dir/2 bluefs-bdev-expand || return 1
truncate $dir/3/block -s 4293918720 # 4GB - 1MB
ceph-bluestore-tool --path $dir/3 bluefs-bdev-expand || return 1
# slow, DB, WAL -> slow, DB
ceph-bluestore-tool --path $dir/0 fsck || return 1
ceph-bluestore-tool --path $dir/1 fsck || return 1
ceph-bluestore-tool --path $dir/2 fsck || return 1
ceph-bluestore-tool --path $dir/3 fsck || return 1
ceph-bluestore-tool --path $dir/0 bluefs-bdev-sizes
ceph-bluestore-tool --path $dir/0 \
--devs-source $dir/0/block.wal \
--dev-target $dir/0/block.db \
--command bluefs-bdev-migrate || return 1
ceph-bluestore-tool --path $dir/0 fsck || return 1
# slow, DB, WAL -> slow, WAL
ceph-bluestore-tool --path $dir/1 \
--devs-source $dir/1/block.db \
--dev-target $dir/1/block \
--command bluefs-bdev-migrate || return 1
ceph-bluestore-tool --path $dir/1 fsck || return 1
# slow, DB, WAL -> slow
ceph-bluestore-tool --path $dir/2 \
--devs-source $dir/2/block.wal \
--devs-source $dir/2/block.db \
--dev-target $dir/2/block \
--command bluefs-bdev-migrate || return 1
ceph-bluestore-tool --path $dir/2 fsck || return 1
# slow, DB, WAL -> slow, WAL (negative case)
ceph-bluestore-tool --path $dir/3 \
--devs-source $dir/3/block.db \
--dev-target $dir/3/block.wal \
--command bluefs-bdev-migrate
# Migration to WAL is unsupported
if [ $? -eq 0 ]; then
return 1
fi
ceph-bluestore-tool --path $dir/3 fsck || return 1
# slow, DB, WAL -> slow, DB (WAL to slow then slow to DB)
ceph-bluestore-tool --path $dir/3 \
--devs-source $dir/3/block.wal \
--dev-target $dir/3/block \
--command bluefs-bdev-migrate || return 1
ceph-bluestore-tool --path $dir/3 fsck || return 1
ceph-bluestore-tool --path $dir/3 \
--devs-source $dir/3/block \
--dev-target $dir/3/block.db \
--command bluefs-bdev-migrate || return 1
ceph-bluestore-tool --path $dir/3 fsck || return 1
activate_osd $dir 0 || return 1
osd_pid0=$(cat $dir/osd.0.pid)
activate_osd $dir 1 || return 1
osd_pid1=$(cat $dir/osd.1.pid)
activate_osd $dir 2 || return 1
osd_pid2=$(cat $dir/osd.2.pid)
activate_osd $dir 3 || return 1
osd_pid3=$(cat $dir/osd.3.pid)
wait_for_clean || return 1
# write some objects
timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
# kill
while kill $osd_pid0; do sleep 1 ; done
ceph osd down 0
while kill $osd_pid1; do sleep 1 ; done
ceph osd down 1
while kill $osd_pid2; do sleep 1 ; done
ceph osd down 2
while kill $osd_pid3; do sleep 1 ; done
ceph osd down 3
# slow, DB -> slow, DB, WAL
ceph-bluestore-tool --path $dir/0 fsck || return 1
dd if=/dev/zero of=$dir/0/wal count=512 bs=1M
ceph-bluestore-tool --path $dir/0 \
--dev-target $dir/0/wal \
--command bluefs-bdev-new-wal || return 1
ceph-bluestore-tool --path $dir/0 fsck || return 1
# slow, WAL -> slow, DB, WAL
ceph-bluestore-tool --path $dir/1 fsck || return 1
dd if=/dev/zero of=$dir/1/db count=1024 bs=1M
ceph-bluestore-tool --path $dir/1 \
--dev-target $dir/1/db \
--command bluefs-bdev-new-db || return 1
ceph-bluestore-tool --path $dir/1 \
--devs-source $dir/1/block \
--dev-target $dir/1/block.db \
--command bluefs-bdev-migrate || return 1
ceph-bluestore-tool --path $dir/1 fsck || return 1
# slow -> slow, DB, WAL
ceph-bluestore-tool --path $dir/2 fsck || return 1
ceph-bluestore-tool --path $dir/2 \
--command bluefs-bdev-new-db || return 1
ceph-bluestore-tool --path $dir/2 \
--command bluefs-bdev-new-wal || return 1
ceph-bluestore-tool --path $dir/2 \
--devs-source $dir/2/block \
--dev-target $dir/2/block.db \
--command bluefs-bdev-migrate || return 1
ceph-bluestore-tool --path $dir/2 fsck || return 1
# slow, DB -> slow, WAL
ceph-bluestore-tool --path $dir/3 fsck || return 1
ceph-bluestore-tool --path $dir/3 \
--command bluefs-bdev-new-wal || return 1
ceph-bluestore-tool --path $dir/3 \
--devs-source $dir/3/block.db \
--dev-target $dir/3/block \
--command bluefs-bdev-migrate || return 1
ceph-bluestore-tool --path $dir/3 fsck || return 1
activate_osd $dir 0 || return 1
osd_pid0=$(cat $dir/osd.0.pid)
activate_osd $dir 1 || return 1
osd_pid1=$(cat $dir/osd.1.pid)
activate_osd $dir 2 || return 1
osd_pid2=$(cat $dir/osd.2.pid)
activate_osd $dir 3 || return 1
osd_pid3=$(cat $dir/osd.3.pid)
# write some objects
timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
# kill
while kill $osd_pid0; do sleep 1 ; done
ceph osd down 0
while kill $osd_pid1; do sleep 1 ; done
ceph osd down 1
while kill $osd_pid2; do sleep 1 ; done
ceph osd down 2
while kill $osd_pid3; do sleep 1 ; done
ceph osd down 3
# slow, DB1, WAL -> slow, DB2, WAL
ceph-bluestore-tool --path $dir/0 fsck || return 1
dd if=/dev/zero of=$dir/0/db2 count=1024 bs=1M
ceph-bluestore-tool --path $dir/0 \
--devs-source $dir/0/block.db \
--dev-target $dir/0/db2 \
--command bluefs-bdev-migrate || return 1
ceph-bluestore-tool --path $dir/0 fsck || return 1
# slow, DB, WAL1 -> slow, DB, WAL2
dd if=/dev/zero of=$dir/0/wal2 count=512 bs=1M
ceph-bluestore-tool --path $dir/0 \
--devs-source $dir/0/block.wal \
--dev-target $dir/0/wal2 \
--command bluefs-bdev-migrate || return 1
rm -rf $dir/0/wal
ceph-bluestore-tool --path $dir/0 fsck || return 1
# slow, DB + WAL -> slow, DB2 -> slow
ceph-bluestore-tool --path $dir/1 fsck || return 1
dd if=/dev/zero of=$dir/1/db2 count=1024 bs=1M
ceph-bluestore-tool --path $dir/1 \
--devs-source $dir/1/block.db \
--devs-source $dir/1/block.wal \
--dev-target $dir/1/db2 \
--command bluefs-bdev-migrate || return 1
rm -rf $dir/1/db
ceph-bluestore-tool --path $dir/1 fsck || return 1
ceph-bluestore-tool --path $dir/1 \
--devs-source $dir/1/block.db \
--dev-target $dir/1/block \
--command bluefs-bdev-migrate || return 1
rm -rf $dir/1/db2
ceph-bluestore-tool --path $dir/1 fsck || return 1
# slow -> slow, DB (negative case)
ceph-objectstore-tool --type bluestore --data-path $dir/2 \
--op fsck --no-mon-config || return 1
dd if=/dev/zero of=$dir/2/db2 count=1024 bs=1M
ceph-bluestore-tool --path $dir/2 \
--devs-source $dir/2/block \
--dev-target $dir/2/db2 \
--command bluefs-bdev-migrate
# Migration from slow-only to new device is unsupported
if [ $? -eq 0 ]; then
return 1
fi
ceph-bluestore-tool --path $dir/2 fsck || return 1
# slow + DB + WAL -> slow, DB2
dd if=/dev/zero of=$dir/2/db2 count=1024 bs=1M
ceph-bluestore-tool --path $dir/2 \
--devs-source $dir/2/block \
--devs-source $dir/2/block.db \
--devs-source $dir/2/block.wal \
--dev-target $dir/2/db2 \
--command bluefs-bdev-migrate || return 1
ceph-bluestore-tool --path $dir/2 fsck || return 1
# slow + WAL -> slow2, WAL2
dd if=/dev/zero of=$dir/3/wal2 count=1024 bs=1M
ceph-bluestore-tool --path $dir/3 \
--devs-source $dir/3/block \
--devs-source $dir/3/block.wal \
--dev-target $dir/3/wal2 \
--command bluefs-bdev-migrate || return 1
ceph-bluestore-tool --path $dir/3 fsck || return 1
activate_osd $dir 0 || return 1
osd_pid0=$(cat $dir/osd.0.pid)
activate_osd $dir 1 || return 1
osd_pid1=$(cat $dir/osd.1.pid)
activate_osd $dir 2 || return 1
osd_pid2=$(cat $dir/osd.2.pid)
activate_osd $dir 3 || return 1
osd_pid3=$(cat $dir/osd.3.pid)
# write some objects
timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
wait_for_clean || return 1
}
function TEST_bluestore2() {
local dir=$1
local flimit=$(ulimit -n)
if [ $flimit -lt 1536 ]; then
echo "Low open file limit ($flimit), test may fail. Increase to 1536 or higher and retry if that happens."
fi
export CEPH_MON="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--bluestore_block_size=4294967296 "
CEPH_ARGS+="--bluestore_block_db_create=true "
CEPH_ARGS+="--bluestore_block_db_size=1073741824 "
CEPH_ARGS+="--bluestore_block_wal_create=false "
CEPH_ARGS+="--bluestore_fsck_on_mount=true "
CEPH_ARGS+="--osd_pool_default_size=1 "
CEPH_ARGS+="--osd_pool_default_min_size=1 "
CEPH_ARGS+="--bluestore_debug_enforce_settings=ssd "
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
osd_pid0=$(cat $dir/osd.0.pid)
sleep 5
create_pool foo 16
retry = 0
while [[ $retry -le 5 ]]; do
# write some objects
timeout 60 rados bench -p foo 10 write --write-omap --no-cleanup #|| return 1
#give RocksDB some time to cooldown and put files to slow level(s)
sleep 10
db_used=$( ceph tell osd.0 perf dump bluefs | jq ".bluefs.db_used_bytes" )
spilled_over=$( ceph tell osd.0 perf dump bluefs | jq ".bluefs.slow_used_bytes" )
((retry+=1))
test $spilled_over -eq 0 || break
done
test $spilled_over -gt 0 || return 1
while kill $osd_pid0; do sleep 1 ; done
ceph osd down 0
ceph-bluestore-tool --path $dir/0 \
--devs-source $dir/0/block.db \
--dev-target $dir/0/block \
--command bluefs-bdev-migrate || return 1
ceph-bluestore-tool --path $dir/0 \
--command bluefs-bdev-sizes || return 1
ceph-bluestore-tool --path $dir/0 \
--command fsck || return 1
activate_osd $dir 0 || return 1
osd_pid0=$(cat $dir/osd.0.pid)
wait_for_clean || return 1
}
function TEST_bluestore_expand() {
local dir=$1
local flimit=$(ulimit -n)
if [ $flimit -lt 1536 ]; then
echo "Low open file limit ($flimit), test may fail. Increase to 1536 or higher and retry if that happens."
fi
export CEPH_MON="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--bluestore_block_size=4294967296 "
CEPH_ARGS+="--bluestore_block_db_create=true "
CEPH_ARGS+="--bluestore_block_db_size=1073741824 "
CEPH_ARGS+="--bluestore_block_wal_create=false "
CEPH_ARGS+="--bluestore_fsck_on_mount=true "
CEPH_ARGS+="--osd_pool_default_size=1 "
CEPH_ARGS+="--osd_pool_default_min_size=1 "
CEPH_ARGS+="--bluestore_debug_enforce_settings=ssd "
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
osd_pid0=$(cat $dir/osd.0.pid)
sleep 5
create_pool foo 16
# write some objects
timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
sleep 5
total_space_before=$( ceph tell osd.0 perf dump bluefs | jq ".bluefs.slow_total_bytes" )
free_space_before=`ceph tell osd.0 bluestore bluefs device info | grep "BDEV_SLOW" -A 2 | grep free | cut -d':' -f 2 | cut -d"," -f 1 | cut -d' ' -f 2`
# kill
while kill $osd_pid0; do sleep 1 ; done
ceph osd down 0
# destage allocation to file before expand (in case fast-shutdown skipped that step)
ceph-bluestore-tool --log-file $dir/bluestore_tool.log --path $dir/0 allocmap || return 1
# expand slow devices
ceph-bluestore-tool --log-file $dir/bluestore_tool.log --path $dir/0 fsck || return 1
requested_space=4294967296 # 4GB
truncate $dir/0/block -s $requested_space
ceph-bluestore-tool --log-file $dir/bluestore_tool.log --path $dir/0 bluefs-bdev-expand || return 1
# slow, DB, WAL -> slow, DB
ceph-bluestore-tool --log-file $dir/bluestore_tool.log --path $dir/0 fsck || return 1
# compare allocation-file with RocksDB state
ceph-bluestore-tool --log-file $dir/bluestore_tool.log --path $dir/0 qfsck || return 1
ceph-bluestore-tool --log-file $dir/bluestore_tool.log --path $dir/0 bluefs-bdev-sizes
activate_osd $dir 0 || return 1
osd_pid0=$(cat $dir/osd.0.pid)
wait_for_clean || return 1
total_space_after=$( ceph tell osd.0 perf dump bluefs | jq ".bluefs.slow_total_bytes" )
free_space_after=`ceph tell osd.0 bluestore bluefs device info | grep "BDEV_SLOW" -A 2 | grep free | cut -d':' -f 2 | cut -d"," -f 1 | cut -d' ' -f 2`
if [$total_space_after != $requested_space]; then
echo "total_space_after = $total_space_after"
echo "requested_space = $requested_space"
return 1;
fi
total_space_added=$((total_space_after - total_space_before))
free_space_added=$((free_space_after - free_space_before))
let new_used_space=($total_space_added - $free_space_added)
echo $new_used_space
# allow upto 128KB to be consumed
if [ $new_used_space -gt 131072 ]; then
echo "total_space_added = $total_space_added"
echo "free_space_added = $free_space_added"
return 1;
fi
# kill
while kill $osd_pid0; do sleep 1 ; done
ceph osd down 0
ceph-bluestore-tool --log-file $dir/bluestore_tool.log --path $dir/0 qfsck || return 1
}
main osd-bluefs-volume-ops "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bluefs-volume-ops.sh"
# End:
| 16,101 | 31.333333 | 155 | sh |
null | ceph-main/qa/standalone/osd/osd-config.sh | #!/usr/bin/env bash
#
# Copyright (C) 2014 Cloudwatt <[email protected]>
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7100" # git grep '\<7100\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_config_init() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
local stale=1000
local cache=500
run_osd $dir 0 \
--osd-map-cache-size=$cache \
--osd-pg-epoch-persisted-max-stale=$stale \
|| return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1
}
function TEST_config_track() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
local osd_map_cache_size=$(CEPH_ARGS='' ceph-conf \
--show-config-value osd_map_cache_size)
local osd_pg_epoch_persisted_max_stale=$(CEPH_ARGS='' ceph-conf \
--show-config-value osd_pg_epoch_persisted_max_stale)
#
# increase the osd_pg_epoch_persisted_max_stale above the default cache_size
#
! grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1
local stale=$(($osd_map_cache_size * 2))
ceph tell osd.0 injectargs "--osd-pg-epoch-persisted-max-stale $stale" || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1
rm $dir/osd.0.log
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log reopen || return 1
}
function TEST_default_adjustment() {
a=$(ceph-osd --no-mon-config --show-config-value rgw_torrent_origin)
b=$(ceph-osd --no-mon-config --show-config-value rgw_torrent_origin --default-rgw-torrent-origin default)
c=$(ceph-osd --no-mon-config --show-config-value rgw_torrent_origin --default-rgw-torrent-origin arg)
[ "$a" != "default" ] || return 1
[ "$b" = "default" ] || return 1
[ "$c" = "arg" ] || return 1
a=$(ceph-osd --no-mon-config --show-config-value log_to_file)
b=$(ceph-osd --no-mon-config --show-config-value log_to_file --default-log-to-file=false)
c=$(ceph-osd --no-mon-config --show-config-value log_to_file --default-log-to-file=false --log-to-file)
[ "$a" = "true" ] || return 1
[ "$b" = "false" ] || return 1
[ "$c" = "true" ] || return 1
}
main osd-config "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/osd/osd-config.sh"
# End:
| 3,502 | 34.744898 | 109 | sh |
null | ceph-main/qa/standalone/osd/osd-copy-from.sh | #!/usr/bin/env bash
#
# Copyright (C) 2014 Cloudwatt <[email protected]>
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
# Author: Sage Weil <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7111" # git grep '\<7111\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_copy_from() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
create_rbd_pool || return 1
# success
rados -p rbd put foo $(which rados)
rados -p rbd cp foo foo2
rados -p rbd stat foo2
# failure
ceph tell osd.\* injectargs -- --osd-debug-inject-copyfrom-error
! rados -p rbd cp foo foo3
! rados -p rbd stat foo3
# success again
ceph tell osd.\* injectargs -- --no-osd-debug-inject-copyfrom-error
! rados -p rbd cp foo foo3
rados -p rbd stat foo3
}
main osd-copy-from "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bench.sh"
# End:
| 1,940 | 27.130435 | 83 | sh |
null | ceph-main/qa/standalone/osd/osd-dup.sh | #!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
[ `uname` = FreeBSD ] && exit 0
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
# avoid running out of fds in rados bench
CEPH_ARGS+="--filestore_wbthrottle_xfs_ios_hard_limit=900 "
CEPH_ARGS+="--filestore_wbthrottle_btrfs_ios_hard_limit=900 "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
main osd-dup "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/osd/osd-dup.sh"
# End:
| 845 | 26.290323 | 83 | sh |
null | ceph-main/qa/standalone/osd/osd-fast-mark-down.sh | #!/usr/bin/env bash
#
# Copyright (C) 2016 Piotr Dałek <[email protected]>
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Piotr Dałek <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
MAX_PROPAGATION_TIME=30
function run() {
local dir=$1
shift
rm -f $dir/*.pid
export CEPH_MON="127.0.0.1:7126" # git grep '\<7126\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
OLD_ARGS=$CEPH_ARGS
CEPH_ARGS+="--osd-fast-fail-on-connection-refused=false "
echo "Ensuring old behavior is there..."
test_fast_kill $dir && (echo "OSDs died too early! Old behavior doesn't work." ; return 1)
CEPH_ARGS=$OLD_ARGS"--osd-fast-fail-on-connection-refused=true "
OLD_ARGS=$CEPH_ARGS
CEPH_ARGS=$OLD_ARGS"--ms_type=async --mon-host=$CEPH_MON"
echo "Testing async msgr..."
test_fast_kill $dir || return 1
return 0
}
function test_fast_kill() {
# create cluster with 3 osds
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=3 || return 1
run_mgr $dir x || return 1
for oi in {0..2}; do
run_osd $dir $oi || return 1
pids[$oi]=$(cat $dir/osd.$oi.pid)
done
create_rbd_pool || return 1
# make some objects so osds to ensure connectivity between osds
timeout 20 rados -p rbd bench 10 write -b 4096 --max-objects 128 --no-cleanup || return 1
sleep 1
killid=0
previd=0
# kill random osd and see if after max MAX_PROPAGATION_TIME, the osd count decreased.
for i in {1..2}; do
while [ $killid -eq $previd ]; do
killid=${pids[$RANDOM%${#pids[@]}]}
done
previd=$killid
kill -9 $killid
time_left=$MAX_PROPAGATION_TIME
down_osds=0
while [ $time_left -gt 0 ]; do
sleep 1
time_left=$[$time_left - 1];
grep -m 1 -c -F "ms_handle_refused" $dir/osd.*.log > /dev/null
if [ $? -ne 0 ]; then
continue
fi
down_osds=$(ceph osd tree | grep -c down)
if [ $down_osds -lt $i ]; then
# osds not marked down yet, try again in a second
continue
elif [ $down_osds -gt $i ]; then
echo Too many \($down_osds\) osds died!
return 1
else
break
fi
done
if [ $down_osds -lt $i ]; then
echo Killed the OSD, yet it is not marked down
ceph osd tree
return 1
fi
done
pkill -SIGTERM rados
teardown $dir || return 1
}
main osd-fast-mark-down "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/osd/osd-fast-mark-down.sh"
# End:
| 3,112 | 26.794643 | 94 | sh |
null | ceph-main/qa/standalone/osd/osd-force-create-pg.sh | #!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7145" # git grep '\<7145\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_reuse_id() {
local dir=$1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
ceph osd pool create foo 50 || return 1
wait_for_clean || return 1
kill_daemons $dir TERM osd.0
kill_daemons $dir TERM osd.1
kill_daemons $dir TERM osd.2
ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.0 --force
ceph-objectstore-tool --data-path $dir/1 --op remove --pgid 1.0 --force
ceph-objectstore-tool --data-path $dir/2 --op remove --pgid 1.0 --force
activate_osd $dir 0 || return 1
activate_osd $dir 1 || return 1
activate_osd $dir 2 || return 1
sleep 10
ceph pg ls | grep 1.0 | grep stale || return 1
ceph osd force-create-pg 1.0 --yes-i-really-mean-it || return 1
wait_for_clean || return 1
}
main osd-force-create-pg "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/osd/osd-force-create-pg.sh"
# End:
| 1,599 | 28.62963 | 87 | sh |
null | ceph-main/qa/standalone/osd/osd-markdown.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 Intel <[email protected]>
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Xiaoxi Chen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7108" # git grep '\<7108\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function markdown_N_impl() {
markdown_times=$1
total_time=$2
sleeptime=$3
for i in `seq 1 $markdown_times`
do
# check the OSD is UP
ceph tell osd.0 get_latest_osdmap || return 1
ceph osd tree
ceph osd tree | grep osd.0 |grep up || return 1
# mark the OSD down.
# override any dup setting in the environment to ensure we do this
# exactly once (modulo messenger failures, at least; we can't *actually*
# provide exactly-once semantics for mon commands).
( unset CEPH_CLI_TEST_DUP_COMMAND ; ceph osd down 0 )
sleep $sleeptime
done
}
function TEST_markdown_exceed_maxdown_count() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
create_rbd_pool || return 1
# 3+1 times within 300s, osd should stay dead on the 4th time
local count=3
local sleeptime=10
local period=300
ceph tell osd.0 injectargs '--osd_max_markdown_count '$count'' || return 1
ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1
markdown_N_impl $(($count+1)) $period $sleeptime
# down N+1 times ,the osd.0 should die
ceph osd tree | grep down | grep osd.0 || return 1
}
function TEST_markdown_boot() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
create_rbd_pool || return 1
# 3 times within 120s, should stay up
local count=3
local sleeptime=10
local period=120
ceph tell osd.0 injectargs '--osd_max_markdown_count '$count'' || return 1
ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1
markdown_N_impl $count $period $sleeptime
#down N times, osd.0 should be up
sleep 15 # give osd plenty of time to notice and come back up
ceph tell osd.0 get_latest_osdmap || return 1
ceph osd tree | grep up | grep osd.0 || return 1
}
function TEST_markdown_boot_exceed_time() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
create_rbd_pool || return 1
# 3+1 times, but over 40s, > 20s, so should stay up
local count=3
local period=20
local sleeptime=10
ceph tell osd.0 injectargs '--osd_max_markdown_count '$count'' || return 1
ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1
markdown_N_impl $(($count+1)) $period $sleeptime
sleep 15 # give osd plenty of time to notice and come back up
ceph tell osd.0 get_latest_osdmap || return 1
ceph osd tree | grep up | grep osd.0 || return 1
}
function TEST_osd_stop() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
osd_0_pid=$(cat $dir/osd.0.pid)
ps -p $osd_0_pid || return 1
ceph osd tree | grep osd.0 | grep up || return 1
ceph osd stop osd.0
sleep 15 # give osd plenty of time to notice and exit
ceph osd tree | grep down | grep osd.0 || return 1
! ps -p $osd_0_pid || return 1
}
main osd-markdown "$@"
| 4,487 | 28.92 | 83 | sh |
null | ceph-main/qa/standalone/osd/osd-reactivate.sh | #!/usr/bin/env bash
#
# Author: Vicente Cheng <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7122" # git grep '\<7122\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_reactivate() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
kill_daemons $dir TERM osd || return 1
ready_path=$dir"/0/ready"
activate_path=$dir"/0/active"
# trigger mkfs again
rm -rf $ready_path $activate_path
activate_osd $dir 0 || return 1
}
main osd-reactivate "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/osd/osd-reactivate.sh"
# End:
| 1,537 | 25.982456 | 83 | sh |
null | ceph-main/qa/standalone/osd/osd-recovery-prio.sh | #!/usr/bin/env bash
#
# Copyright (C) 2019 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
# Fix port????
export CEPH_MON="127.0.0.1:7114" # git grep '\<7114\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON --osd_max_backfills=1 --debug_reserver=20 "
# Set osd op queue = wpq for the tests. Recovery priority is not
# considered by mclock_scheduler leading to unexpected results.
CEPH_ARGS+="--osd-op-queue=wpq "
export objects=200
export poolprefix=test
export FORCE_PRIO="255" # See OSD_RECOVERY_PRIORITY_FORCED
export NORMAL_PRIO="190" # See OSD_RECOVERY_PRIORITY_BASE + 10
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_recovery_priority() {
local dir=$1
local pools=10
local OSDS=5
local max_tries=10
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
ceph osd pool set "${poolprefix}$p" size 2
done
sleep 5
wait_for_clean || return 1
ceph pg dump pgs
# Find 3 pools with a pg with the same primaries but second
# replica on another osd.
local PG1
local POOLNUM1
local pool1
local chk_osd1_1
local chk_osd1_2
local PG2
local POOLNUM2
local pool2
local chk_osd2
local PG3
local POOLNUM3
local pool3
for p in $(seq 1 $pools)
do
ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting
local test_osd1=$(head -1 $dir/acting)
local test_osd2=$(tail -1 $dir/acting)
if [ -z "$PG1" ];
then
PG1="${p}.0"
POOLNUM1=$p
pool1="${poolprefix}$p"
chk_osd1_1=$test_osd1
chk_osd1_2=$test_osd2
elif [ -z "$PG2" -a $chk_osd1_1 = $test_osd1 -a $chk_osd1_2 != $test_osd2 ];
then
PG2="${p}.0"
POOLNUM2=$p
pool2="${poolprefix}$p"
chk_osd2=$test_osd2
elif [ -n "$PG2" -a $chk_osd1_1 = $test_osd1 -a $chk_osd1_2 != $test_osd2 -a "$chk_osd2" != $test_osd2 ];
then
PG3="${p}.0"
POOLNUM3=$p
pool3="${poolprefix}$p"
break
fi
done
rm -f $dir/acting
if [ "$pool2" = "" -o "pool3" = "" ];
then
echo "Failure to find appropirate PGs"
return 1
fi
for p in $(seq 1 $pools)
do
if [ $p != $POOLNUM1 -a $p != $POOLNUM2 -a $p != $POOLNUM3 ];
then
delete_pool ${poolprefix}$p
fi
done
ceph osd pool set $pool2 size 1 --yes-i-really-mean-it
ceph osd pool set $pool3 size 1 --yes-i-really-mean-it
wait_for_clean || return 1
dd if=/dev/urandom of=$dir/data bs=1M count=10
p=1
for pname in $pool1 $pool2 $pool3
do
for i in $(seq 1 $objects)
do
rados -p ${pname} put obj${i}-p${p} $dir/data
done
p=$(expr $p + 1)
done
local otherosd=$(get_not_primary $pool1 obj1-p1)
ceph pg dump pgs
ERRORS=0
ceph osd set norecover
ceph osd set noout
# Get a pg to want to recover and quickly force it
# to be preempted.
ceph osd pool set $pool3 size 2
sleep 2
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
# 3. Item is in progress, adjust priority with no higher priority waiting
for i in $(seq 1 $max_tries)
do
if ! ceph pg force-recovery $PG3 2>&1 | grep -q "doesn't require recovery"; then
break
fi
if [ "$i" = "$max_tries" ]; then
echo "ERROR: Didn't appear to be able to force-recovery"
ERRORS=$(expr $ERRORS + 1)
fi
sleep 2
done
flush_pg_stats || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
ceph osd out osd.$chk_osd1_2
sleep 2
flush_pg_stats || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
ceph pg dump pgs
ceph osd pool set $pool2 size 2
sleep 2
flush_pg_stats || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
cat $dir/out
ceph pg dump pgs
PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG1}\")).prio")
if [ "$PRIO" != "$NORMAL_PRIO" ];
then
echo "The normal PG ${PG1} doesn't have prio $NORMAL_PRIO queued waiting"
ERRORS=$(expr $ERRORS + 1)
fi
# Using eval will strip double-quotes from item
eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG3} ];
then
echo "The first force-recovery PG $PG3 didn't become the in progress item"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio')
if [ "$PRIO" != $FORCE_PRIO ];
then
echo "The first force-recovery PG ${PG3} doesn't have prio $FORCE_PRIO"
ERRORS=$(expr $ERRORS + 1)
fi
fi
# 1. Item is queued, re-queue with new priority
for i in $(seq 1 $max_tries)
do
if ! ceph pg force-recovery $PG2 2>&1 | grep -q "doesn't require recovery"; then
break
fi
if [ "$i" = "$max_tries" ]; then
echo "ERROR: Didn't appear to be able to force-recovery"
ERRORS=$(expr $ERRORS + 1)
fi
sleep 2
done
sleep 2
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
cat $dir/out
PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG2}\")).prio")
if [ "$PRIO" != "$FORCE_PRIO" ];
then
echo "The second force-recovery PG ${PG2} doesn't have prio $FORCE_PRIO"
ERRORS=$(expr $ERRORS + 1)
fi
flush_pg_stats || return 1
# 4. Item is in progress, if higher priority items waiting prempt item
#ceph osd unset norecover
ceph pg cancel-force-recovery $PG3 || return 1
sleep 2
#ceph osd set norecover
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
cat $dir/out
PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG3}\")).prio")
if [ "$PRIO" != "$NORMAL_PRIO" ];
then
echo "After cancel-recovery PG ${PG3} doesn't have prio $NORMAL_PRIO"
ERRORS=$(expr $ERRORS + 1)
fi
eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG2} ];
then
echo "The force-recovery PG $PG2 didn't become the in progress item"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio')
if [ "$PRIO" != $FORCE_PRIO ];
then
echo "The first force-recovery PG ${PG2} doesn't have prio $FORCE_PRIO"
ERRORS=$(expr $ERRORS + 1)
fi
fi
ceph pg cancel-force-recovery $PG2 || return 1
sleep 5
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
# 2. Item is queued, re-queue and preempt because new priority higher than an in progress item
flush_pg_stats || return 1
ceph pg force-recovery $PG3 || return 1
sleep 2
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
cat $dir/out
PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG2}\")).prio")
if [ "$PRIO" != "$NORMAL_PRIO" ];
then
echo "After cancel-force-recovery PG ${PG3} doesn't have prio $NORMAL_PRIO"
ERRORS=$(expr $ERRORS + 1)
fi
eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG3} ];
then
echo "The force-recovery PG $PG3 didn't get promoted to an in progress item"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio')
if [ "$PRIO" != $FORCE_PRIO ];
then
echo "The force-recovery PG ${PG2} doesn't have prio $FORCE_PRIO"
ERRORS=$(expr $ERRORS + 1)
fi
fi
ceph osd unset noout
ceph osd unset norecover
wait_for_clean "CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations" || return 1
ceph pg dump pgs
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_pgstate_history
if [ $ERRORS != "0" ];
then
echo "$ERRORS error(s) found"
else
echo TEST PASSED
fi
delete_pool $pool1
delete_pool $pool2
delete_pool $pool3
kill_daemons $dir || return 1
return $ERRORS
}
#
# Show that pool recovery_priority is added to recovery priority
#
# Create 2 pools with 2 OSDs with different primarys
# pool 1 with recovery_priority 1
# pool 2 with recovery_priority 2
#
# Start recovery by changing the pool sizes from 1 to 2
# Use dump_recovery_reservations to verify priorities
function TEST_recovery_pool_priority() {
local dir=$1
local pools=3 # Don't assume the first 2 pools are exact what we want
local OSDS=2
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
ceph osd pool set "${poolprefix}$p" size 2
done
sleep 5
wait_for_clean || return 1
ceph pg dump pgs
# Find 2 pools with different primaries which
# means the replica must be on another osd.
local PG1
local POOLNUM1
local pool1
local chk_osd1_1
local chk_osd1_2
local PG2
local POOLNUM2
local pool2
local chk_osd2_1
local chk_osd2_2
for p in $(seq 1 $pools)
do
ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting
local test_osd1=$(head -1 $dir/acting)
local test_osd2=$(tail -1 $dir/acting)
if [ -z "$PG1" ];
then
PG1="${p}.0"
POOLNUM1=$p
pool1="${poolprefix}$p"
chk_osd1_1=$test_osd1
chk_osd1_2=$test_osd2
elif [ $chk_osd1_1 != $test_osd1 ];
then
PG2="${p}.0"
POOLNUM2=$p
pool2="${poolprefix}$p"
chk_osd2_1=$test_osd1
chk_osd2_2=$test_osd2
break
fi
done
rm -f $dir/acting
if [ "$pool2" = "" ];
then
echo "Failure to find appropirate PGs"
return 1
fi
for p in $(seq 1 $pools)
do
if [ $p != $POOLNUM1 -a $p != $POOLNUM2 ];
then
delete_pool ${poolprefix}$p
fi
done
pool1_extra_prio=1
pool2_extra_prio=2
pool1_prio=$(expr $NORMAL_PRIO + $pool1_extra_prio)
pool2_prio=$(expr $NORMAL_PRIO + $pool2_extra_prio)
ceph osd pool set $pool1 size 1 --yes-i-really-mean-it
ceph osd pool set $pool1 recovery_priority $pool1_extra_prio
ceph osd pool set $pool2 size 1 --yes-i-really-mean-it
ceph osd pool set $pool2 recovery_priority $pool2_extra_prio
wait_for_clean || return 1
dd if=/dev/urandom of=$dir/data bs=1M count=10
p=1
for pname in $pool1 $pool2
do
for i in $(seq 1 $objects)
do
rados -p ${pname} put obj${i}-p${p} $dir/data
done
p=$(expr $p + 1)
done
local otherosd=$(get_not_primary $pool1 obj1-p1)
ceph pg dump pgs
ERRORS=0
ceph osd pool set $pool1 size 2
ceph osd pool set $pool2 size 2
# Wait for both PGs to be in recovering state
ceph pg dump pgs
# Wait for recovery to start
set -o pipefail
count=0
while(true)
do
if test $(ceph --format json pg dump pgs |
jq '.pg_stats | .[] | .state | contains("recovering")' | grep -c true) == "2"
then
break
fi
sleep 2
if test "$count" -eq "10"
then
echo "Recovery never started on both PGs"
return 1
fi
count=$(expr $count + 1)
done
set +o pipefail
ceph pg dump pgs
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/dump.${chk_osd1_1}.out
echo osd.${chk_osd1_1}
cat $dir/dump.${chk_osd1_1}.out
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_2}) dump_recovery_reservations > $dir/dump.${chk_osd1_2}.out
echo osd.${chk_osd1_2}
cat $dir/dump.${chk_osd1_2}.out
# Using eval will strip double-quotes from item
eval ITEM=$(cat $dir/dump.${chk_osd1_1}.out | jq '.local_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG1} ];
then
echo "The primary PG for $pool1 didn't become the in progress item"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/dump.${chk_osd1_1}.out | jq '.local_reservations.in_progress[0].prio')
if [ "$PRIO" != $pool1_prio ];
then
echo "The primary PG ${PG1} doesn't have prio $pool1_prio"
ERRORS=$(expr $ERRORS + 1)
fi
fi
# Using eval will strip double-quotes from item
eval ITEM=$(cat $dir/dump.${chk_osd1_2}.out | jq '.remote_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG1} ];
then
echo "The primary PG for $pool1 didn't become the in progress item on remote"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/dump.${chk_osd1_2}.out | jq '.remote_reservations.in_progress[0].prio')
if [ "$PRIO" != $pool1_prio ];
then
echo "The primary PG ${PG1} doesn't have prio $pool1_prio on remote"
ERRORS=$(expr $ERRORS + 1)
fi
fi
# Using eval will strip double-quotes from item
eval ITEM=$(cat $dir/dump.${chk_osd2_1}.out | jq '.local_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG2} ];
then
echo "The primary PG for $pool2 didn't become the in progress item"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/dump.${chk_osd2_1}.out | jq '.local_reservations.in_progress[0].prio')
if [ "$PRIO" != $pool2_prio ];
then
echo "The primary PG ${PG2} doesn't have prio $pool2_prio"
ERRORS=$(expr $ERRORS + 1)
fi
fi
# Using eval will strip double-quotes from item
eval ITEM=$(cat $dir/dump.${chk_osd2_2}.out | jq '.remote_reservations.in_progress[0].item')
if [ "$ITEM" != ${PG2} ];
then
echo "The primary PG $PG2 didn't become the in progress item on remote"
ERRORS=$(expr $ERRORS + 1)
else
PRIO=$(cat $dir/dump.${chk_osd2_2}.out | jq '.remote_reservations.in_progress[0].prio')
if [ "$PRIO" != $pool2_prio ];
then
echo "The primary PG ${PG2} doesn't have prio $pool2_prio on remote"
ERRORS=$(expr $ERRORS + 1)
fi
fi
wait_for_clean || return 1
if [ $ERRORS != "0" ];
then
echo "$ERRORS error(s) found"
else
echo TEST PASSED
fi
delete_pool $pool1
delete_pool $pool2
kill_daemons $dir || return 1
return $ERRORS
}
main osd-recovery-prio "$@"
# Local Variables:
# compile-command: "make -j4 && ../qa/run-standalone.sh osd-recovery-prio.sh"
# End:
| 16,123 | 28.694291 | 128 | sh |
null | ceph-main/qa/standalone/osd/osd-recovery-space.sh | #!/usr/bin/env bash
#
# Copyright (C) 2018 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7221" # git grep '\<7221\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--osd_max_backfills=10 "
CEPH_ARGS+="--osd_mclock_override_recovery_settings=true "
export objects=600
export poolprefix=test
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function get_num_in_state() {
local state=$1
local expression
expression+="select(contains(\"${state}\"))"
ceph --format json pg dump pgs 2>/dev/null | \
jq ".pg_stats | [.[] | .state | $expression] | length"
}
function wait_for_state() {
local state=$1
local cur_in_state
local -a delays=($(get_timeout_delays $2 5))
local -i loop=0
flush_pg_stats || return 1
while test $(get_num_pgs) == 0 ; do
sleep 1
done
while true ; do
cur_in_state=$(get_num_in_state ${state})
test $cur_in_state -gt 0 && break
if (( $loop >= ${#delays[*]} )) ; then
ceph pg dump pgs
return 1
fi
sleep ${delays[$loop]}
loop+=1
done
return 0
}
function wait_for_recovery_toofull() {
local timeout=$1
wait_for_state recovery_toofull $timeout
}
# Create 1 pools with size 1
# set ful-ratio to 50%
# Write data 600 5K (3000K)
# Inject fake_statfs_for_testing to 3600K (83% full)
# Incresase the pool size to 2
# The pool shouldn't have room to recovery
function TEST_recovery_test_simple() {
local dir=$1
local pools=1
local OSDS=2
run_mon $dir a || return 1
run_mgr $dir x || return 1
export CEPH_ARGS
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
ceph osd set-nearfull-ratio .40
ceph osd set-backfillfull-ratio .45
ceph osd set-full-ratio .50
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
ceph osd pool set "${poolprefix}$p" size 1 --yes-i-really-mean-it
done
wait_for_clean || return 1
dd if=/dev/urandom of=$dir/datafile bs=1024 count=5
for o in $(seq 1 $objects)
do
rados -p "${poolprefix}$p" put obj$o $dir/datafile
done
for o in $(seq 0 $(expr $OSDS - 1))
do
ceph tell osd.$o injectargs '--fake_statfs_for_testing 3686400' || return 1
done
sleep 5
ceph pg dump pgs
for p in $(seq 1 $pools)
do
ceph osd pool set "${poolprefix}$p" size 2
done
# If this times out, we'll detected errors below
wait_for_recovery_toofull 30
ERRORS=0
if [ "$(ceph pg dump pgs | grep +recovery_toofull | wc -l)" != "1" ];
then
echo "One pool should have been in recovery_toofull"
ERRORS="$(expr $ERRORS + 1)"
fi
ceph pg dump pgs
ceph status
ceph status --format=json-pretty > $dir/stat.json
eval SEV=$(jq '.health.checks.PG_RECOVERY_FULL.severity' $dir/stat.json)
if [ "$SEV" != "HEALTH_ERR" ]; then
echo "PG_RECOVERY_FULL severity $SEV not HEALTH_ERR"
ERRORS="$(expr $ERRORS + 1)"
fi
eval MSG=$(jq '.health.checks.PG_RECOVERY_FULL.summary.message' $dir/stat.json)
if [ "$MSG" != "Full OSDs blocking recovery: 1 pg recovery_toofull" ]; then
echo "PG_RECOVERY_FULL message '$MSG' mismatched"
ERRORS="$(expr $ERRORS + 1)"
fi
rm -f $dir/stat.json
if [ $ERRORS != "0" ];
then
return 1
fi
for i in $(seq 1 $pools)
do
delete_pool "${poolprefix}$i"
done
kill_daemons $dir || return 1
}
main osd-recovery-space "$@"
# Local Variables:
# compile-command: "make -j4 && ../qa/run-standalone.sh osd-recovery-space.sh"
# End:
| 4,533 | 24.615819 | 83 | sh |
null | ceph-main/qa/standalone/osd/osd-recovery-stats.sh | #!/usr/bin/env bash
#
# Copyright (C) 2017 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
# Fix port????
export CEPH_MON="127.0.0.1:7115" # git grep '\<7115\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
# so we will not force auth_log_shard to be acting_primary
CEPH_ARGS+="--osd_force_auth_primary_missing_objects=1000000 "
export margin=10
export objects=200
export poolname=test
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function below_margin() {
local -i check=$1
shift
local -i target=$1
return $(( $check <= $target && $check >= $target - $margin ? 0 : 1 ))
}
function above_margin() {
local -i check=$1
shift
local -i target=$1
return $(( $check >= $target && $check <= $target + $margin ? 0 : 1 ))
}
FIND_UPACT='grep "pg[[]${PG}.*recovering.*PeeringState::update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/"'
FIND_FIRST='grep "pg[[]${PG}.*recovering.*PeeringState::update_calc_stats $which " $log | grep -F " ${UPACT}${addp}" | grep -v est | head -1 | sed "s/.* \([0-9]*\)$/\1/"'
FIND_LAST='grep "pg[[]${PG}.*recovering.*PeeringState::update_calc_stats $which " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/"'
function check() {
local dir=$1
local PG=$2
local primary=$3
local type=$4
local degraded_start=$5
local degraded_end=$6
local misplaced_start=$7
local misplaced_end=$8
local primary_start=${9:-}
local primary_end=${10:-}
local log=$dir/osd.${primary}.log
local addp=" "
if [ "$type" = "erasure" ];
then
addp="p"
fi
UPACT=$(eval $FIND_UPACT)
# Check 3rd line at start because of false recovery starts
local which="degraded"
FIRST=$(eval $FIND_FIRST)
below_margin $FIRST $degraded_start || return 1
LAST=$(eval $FIND_LAST)
above_margin $LAST $degraded_end || return 1
# Check 3rd line at start because of false recovery starts
which="misplaced"
FIRST=$(eval $FIND_FIRST)
below_margin $FIRST $misplaced_start || return 1
LAST=$(eval $FIND_LAST)
above_margin $LAST $misplaced_end || return 1
# This is the value of set into MISSING_ON_PRIMARY
if [ -n "$primary_start" ];
then
which="shard $primary"
FIRST=$(eval $FIND_FIRST)
below_margin $FIRST $primary_start || return 1
LAST=$(eval $FIND_LAST)
above_margin $LAST $primary_end || return 1
fi
}
# [1,0,?] -> [1,2,4]
# degraded 500 -> 0
# active+recovering+degraded
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 0 500 0 0 0 500 500 active+recovering+degraded 2017-11-17 19:27:36.493828 28'500 32:603 [1,2,4] 1 [1,2,4] 1 0'0 2017-11-17 19:27:05.915467 0'0 2017-11-17 19:27:05.915467
function do_recovery_out1() {
local dir=$1
shift
local type=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
run_osd $dir 5 || return 1
if [ $type = "erasure" ];
then
ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
create_pool $poolname 1 1 $type myprofile
else
create_pool $poolname 1 1 $type
fi
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local primary=$(get_primary $poolname obj1)
local PG=$(get_pg $poolname obj1)
# Only 2 OSDs so only 1 not primary
local otherosd=$(get_not_primary $poolname obj1)
ceph osd set norecover
kill $(cat $dir/osd.${otherosd}.pid)
ceph osd down osd.${otherosd}
ceph osd out osd.${otherosd}
ceph osd unset norecover
ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
check $dir $PG $primary $type $objects 0 0 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
function TEST_recovery_replicated_out1() {
local dir=$1
do_recovery_out1 $dir replicated || return 1
}
function TEST_recovery_erasure_out1() {
local dir=$1
do_recovery_out1 $dir erasure || return 1
}
# [0, 1] -> [2,3,4,5]
# degraded 1000 -> 0
# misplaced 1000 -> 0
# missing on primary 500 -> 0
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 500 1000 1000 0 0 500 500 active+recovering+degraded 2017-10-27 09:38:37.453438 22'500 25:394 [2,4,3,5] 2 [2,4,3,5] 2 0'0 2017-10-27 09:37:58.046748 0'0 2017-10-27 09:37:58.046748
function TEST_recovery_sizeup() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
run_osd $dir 5 || return 1
create_pool $poolname 1 1
ceph osd pool set $poolname size 2
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local primary=$(get_primary $poolname obj1)
local PG=$(get_pg $poolname obj1)
# Only 2 OSDs so only 1 not primary
local otherosd=$(get_not_primary $poolname obj1)
ceph osd set norecover
ceph osd out osd.$primary osd.$otherosd
ceph osd pool set test size 4
ceph osd unset norecover
# Get new primary
primary=$(get_primary $poolname obj1)
ceph tell osd.${primary} debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
local degraded=$(expr $objects \* 2)
local misplaced=$(expr $objects \* 2)
local log=$dir/osd.${primary}.log
check $dir $PG $primary replicated $degraded 0 $misplaced 0 $objects 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [0, 1, 2, 4] -> [3, 5]
# misplaced 1000 -> 0
# missing on primary 500 -> 0
# active+recovering+degraded
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 500 500 0 1000 0 0 500 500 active+recovering+degraded 2017-10-27 09:34:50.012261 22'500 27:118 [3,5] 3 [3,5] 3 0'0 2017-10-27 09:34:08.617248 0'0 2017-10-27 09:34:08.617248
function TEST_recovery_sizedown() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
run_osd $dir 4 || return 1
run_osd $dir 5 || return 1
create_pool $poolname 1 1
ceph osd pool set $poolname size 4
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local primary=$(get_primary $poolname obj1)
local PG=$(get_pg $poolname obj1)
# Only 2 OSDs so only 1 not primary
local allosds=$(get_osds $poolname obj1)
ceph osd set norecover
for osd in $allosds
do
ceph osd out osd.$osd
done
ceph osd pool set test size 2
ceph osd unset norecover
ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
# Get new primary
primary=$(get_primary $poolname obj1)
local misplaced=$(expr $objects \* 2)
local log=$dir/osd.${primary}.log
check $dir $PG $primary replicated 0 0 $misplaced 0 || return 1
UPACT=$(grep "pg[[]${PG}.*recovering.*update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/")
# This is the value of set into MISSING_ON_PRIMARY
FIRST=$(grep "pg[[]${PG}.*recovering.*update_calc_stats shard $primary " $log | grep -F " $UPACT " | head -1 | sed "s/.* \([0-9]*\)$/\1/")
below_margin $FIRST $objects || return 1
LAST=$(grep "pg[[]${PG}.*recovering.*update_calc_stats shard $primary " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/")
above_margin $LAST 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [1] -> [1,2]
# degraded 300 -> 200
# active+recovering+undersized+degraded
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 100 0 300 0 0 0 100 100 active+recovering+undersized+degraded 2017-11-17 17:16:15.302943 13'500 16:643 [1,2] 1 [1,2] 1 0'0 2017-11-17 17:15:34.985563 0'0 2017-11-17 17:15:34.985563
function TEST_recovery_undersized() {
local dir=$1
local osds=3
run_mon $dir a || return 1
run_mgr $dir x || return 1
for i in $(seq 0 $(expr $osds - 1))
do
run_osd $dir $i || return 1
done
create_pool $poolname 1 1
ceph osd pool set $poolname size 1 --yes-i-really-mean-it
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local primary=$(get_primary $poolname obj1)
local PG=$(get_pg $poolname obj1)
ceph osd set norecover
# Mark any osd not the primary (only 1 replica so also has no replica)
for i in $(seq 0 $(expr $osds - 1))
do
if [ $i = $primary ];
then
continue
fi
ceph osd out osd.$i
break
done
ceph osd pool set test size 4
ceph osd unset norecover
ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0
# Give extra sleep time because code below doesn't have the sophistication of wait_for_clean()
sleep 10
flush_pg_stats || return 1
# Wait for recovery to finish
# Can't use wait_for_clean() because state goes from active+recovering+undersized+degraded
# to active+undersized+degraded
for i in $(seq 1 300)
do
if ceph pg dump pgs | grep ^$PG | grep -qv recovering
then
break
fi
if [ $i = "300" ];
then
echo "Timeout waiting for recovery to finish"
return 1
fi
sleep 1
done
# Get new primary
primary=$(get_primary $poolname obj1)
local log=$dir/osd.${primary}.log
local first_degraded=$(expr $objects \* 3)
local last_degraded=$(expr $objects \* 2)
check $dir $PG $primary replicated $first_degraded $last_degraded 0 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
# [1,0,2] -> [1,3,NONE]/[1,3,2]
# degraded 100 -> 0
# misplaced 100 -> 100
# active+recovering+degraded+remapped
# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
# 1.0 100 0 100 100 0 0 100 100 active+recovering+degraded+remapped 2017-11-27 21:24:20.851243 18'500 23:618 [1,3,NONE] 1 [1,3,2] 1 0'0 2017-11-27 21:23:39.395242 0'0 2017-11-27 21:23:39.395242
function TEST_recovery_erasure_remapped() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
create_pool $poolname 1 1 erasure myprofile
ceph osd pool set $poolname min_size 2
wait_for_clean || return 1
for i in $(seq 1 $objects)
do
rados -p $poolname put obj$i /dev/null
done
local primary=$(get_primary $poolname obj1)
local PG=$(get_pg $poolname obj1)
local otherosd=$(get_not_primary $poolname obj1)
ceph osd set norecover
kill $(cat $dir/osd.${otherosd}.pid)
ceph osd down osd.${otherosd}
ceph osd out osd.${otherosd}
# Mark osd not the primary and not down/out osd as just out
for i in 0 1 2 3
do
if [ $i = $primary ];
then
continue
fi
if [ $i = $otherosd ];
then
continue
fi
ceph osd out osd.$i
break
done
ceph osd unset norecover
ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
local log=$dir/osd.${primary}.log
check $dir $PG $primary erasure $objects 0 $objects $objects || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
function TEST_recovery_multi() {
local dir=$1
local osds=6
run_mon $dir a || return 1
run_mgr $dir x || return 1
for i in $(seq 0 $(expr $osds - 1))
do
run_osd $dir $i || return 1
done
create_pool $poolname 1 1
ceph osd pool set $poolname size 3
ceph osd pool set $poolname min_size 1
wait_for_clean || return 1
rados -p $poolname put obj1 /dev/null
local primary=$(get_primary $poolname obj1)
local otherosd=$(get_not_primary $poolname obj1)
ceph osd set noout
ceph osd set norecover
kill $(cat $dir/osd.${otherosd}.pid)
ceph osd down osd.${otherosd}
local half=$(expr $objects / 2)
for i in $(seq 2 $half)
do
rados -p $poolname put obj$i /dev/null
done
kill $(cat $dir/osd.${primary}.pid)
ceph osd down osd.${primary}
activate_osd $dir ${otherosd}
sleep 3
for i in $(seq $(expr $half + 1) $objects)
do
rados -p $poolname put obj$i /dev/null
done
local PG=$(get_pg $poolname obj1)
local otherosd=$(get_not_primary $poolname obj$objects)
ceph osd unset noout
ceph osd out osd.$primary osd.$otherosd
activate_osd $dir ${primary}
sleep 3
ceph osd pool set test size 4
ceph osd unset norecover
ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0
sleep 2
wait_for_clean || return 1
# Get new primary
primary=$(get_primary $poolname obj1)
local log=$dir/osd.${primary}.log
check $dir $PG $primary replicated 399 0 300 0 99 0 || return 1
delete_pool $poolname
kill_daemons $dir || return 1
}
main osd-recovery-stats "$@"
# Local Variables:
# compile-command: "make -j4 && ../qa/run-standalone.sh osd-recovery-stats.sh"
# End:
| 16,055 | 30.298246 | 289 | sh |
null | ceph-main/qa/standalone/osd/osd-rep-recov-eio.sh | #!/usr/bin/env bash
#
# Copyright (C) 2017 Red Hat <[email protected]>
#
#
# Author: Kefu Chai <[email protected]>
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
warnings=10
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7140" # git grep '\<7140\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
# set warning amount in case default changes
run_mon $dir a --mon_osd_warn_num_repaired=$warnings || return 1
run_mgr $dir x || return 1
ceph osd pool create foo 8 || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function setup_osds() {
local count=$1
shift
local type=$1
for id in $(seq 0 $(expr $count - 1)) ; do
run_osd${type} $dir $id || return 1
done
wait_for_clean || return 1
}
function get_state() {
local pgid=$1
local sname=state
ceph --format json pg dump pgs 2>/dev/null | \
jq -r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname"
}
function rados_put() {
local dir=$1
local poolname=$2
local objname=${3:-SOMETHING}
for marker in AAA BBB CCCC DDDD ; do
printf "%*s" 1024 $marker
done > $dir/ORIGINAL
#
# get and put an object, compare they are equal
#
rados --pool $poolname put $objname $dir/ORIGINAL || return 1
}
function rados_get() {
local dir=$1
local poolname=$2
local objname=${3:-SOMETHING}
local expect=${4:-ok}
#
# Expect a failure to get object
#
if [ $expect = "fail" ];
then
! rados --pool $poolname get $objname $dir/COPY
return
fi
#
# Expect hang trying to get object
#
if [ $expect = "hang" ];
then
timeout 5 rados --pool $poolname get $objname $dir/COPY
test "$?" = "124"
return
fi
#
# get an object, compare with $dir/ORIGINAL
#
rados --pool $poolname get $objname $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
rm $dir/COPY
}
function rados_get_data() {
local inject=$1
shift
local dir=$1
local poolname=pool-rep
local objname=obj-$inject-$$
local pgid=$(get_pg $poolname $objname)
rados_put $dir $poolname $objname || return 1
inject_$inject rep data $poolname $objname $dir 0 || return 1
rados_get $dir $poolname $objname || return 1
wait_for_clean
COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
test "$COUNT" = "1" || return 1
flush_pg_stats
COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
test "$COUNT" = "1" || return 1
local object_osds=($(get_osds $poolname $objname))
local primary=${object_osds[0]}
local bad_peer=${object_osds[1]}
inject_$inject rep data $poolname $objname $dir 0 || return 1
inject_$inject rep data $poolname $objname $dir 1 || return 1
# Force primary to pull from the bad peer, so we can repair it too!
set_config osd $primary osd_debug_feed_pullee $bad_peer || return 1
rados_get $dir $poolname $objname || return 1
# Wait until automatic repair of bad peer is done
wait_for_clean || return 1
inject_$inject rep data $poolname $objname $dir 0 || return 1
inject_$inject rep data $poolname $objname $dir 2 || return 1
rados_get $dir $poolname $objname || return 1
wait_for_clean
COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
test "$COUNT" = "3" || return 1
flush_pg_stats
COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
test "$COUNT" = "4" || return 1
inject_$inject rep data $poolname $objname $dir 0 || return 1
inject_$inject rep data $poolname $objname $dir 1 || return 1
inject_$inject rep data $poolname $objname $dir 2 || return 1
rados_get $dir $poolname $objname hang || return 1
wait_for_clean
# After hang another repair couldn't happen, so count stays the same
COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
test "$COUNT" = "3" || return 1
flush_pg_stats
COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
test "$COUNT" = "4" || return 1
}
function TEST_rados_get_with_eio() {
local dir=$1
setup_osds 4 || return 1
local poolname=pool-rep
create_pool $poolname 1 1 || return 1
wait_for_clean || return 1
rados_get_data eio $dir || return 1
delete_pool $poolname
}
function TEST_rados_repair_warning() {
local dir=$1
local OBJS=$(expr $warnings + 1)
setup_osds 4 || return 1
local poolname=pool-rep
create_pool $poolname 1 1 || return 1
wait_for_clean || return 1
local poolname=pool-rep
local objbase=obj-warn
local inject=eio
for i in $(seq 1 $OBJS)
do
rados_put $dir $poolname ${objbase}-$i || return 1
inject_$inject rep data $poolname ${objbase}-$i $dir 0 || return 1
rados_get $dir $poolname ${objbase}-$i || return 1
done
local pgid=$(get_pg $poolname ${objbase}-1)
local object_osds=($(get_osds $poolname ${objbase}-1))
local primary=${object_osds[0]}
local bad_peer=${object_osds[1]}
wait_for_clean
COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
test "$COUNT" = "$OBJS" || return 1
flush_pg_stats
COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
test "$COUNT" = "$OBJS" || return 1
ceph health | grep -q "Too many repaired reads on 1 OSDs" || return 1
ceph health detail | grep -q "osd.$primary had $OBJS reads repaired" || return 1
ceph health mute OSD_TOO_MANY_REPAIRS
set -o pipefail
# Should mute this
ceph health | $(! grep -q "Too many repaired reads on 1 OSDs") || return 1
set +o pipefail
for i in $(seq 1 $OBJS)
do
inject_$inject rep data $poolname ${objbase}-$i $dir 0 || return 1
inject_$inject rep data $poolname ${objbase}-$i $dir 1 || return 1
# Force primary to pull from the bad peer, so we can repair it too!
set_config osd $primary osd_debug_feed_pullee $bad_peer || return 1
rados_get $dir $poolname ${objbase}-$i || return 1
done
wait_for_clean
COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
test "$COUNT" = "$(expr $OBJS \* 2)" || return 1
flush_pg_stats
COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
test "$COUNT" = "$(expr $OBJS \* 3)" || return 1
# Give mon a chance to notice additional OSD and unmute
# The default tick time is 5 seconds
CHECKTIME=10
LOOPS=0
while(true)
do
sleep 1
if ceph health | grep -q "Too many repaired reads on 2 OSDs"
then
break
fi
LOOPS=$(expr $LOOPS + 1)
if test "$LOOPS" = "$CHECKTIME"
then
echo "Too many repaired reads not seen after $CHECKTIME seconds"
return 1
fi
done
ceph health detail | grep -q "osd.$primary had $(expr $OBJS \* 2) reads repaired" || return 1
ceph health detail | grep -q "osd.$bad_peer had $OBJS reads repaired" || return 1
delete_pool $poolname
}
# Test backfill with unfound object
function TEST_rep_backfill_unfound() {
local dir=$1
local objname=myobject
local lastobj=300
# Must be between 1 and $lastobj
local testobj=obj250
export CEPH_ARGS
CEPH_ARGS+=' --osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10'
setup_osds 3 || return 1
local poolname=test-pool
create_pool $poolname 1 1 || return 1
wait_for_clean || return 1
ceph pg dump pgs
rados_put $dir $poolname $objname || return 1
local -a initial_osds=($(get_osds $poolname $objname))
local last_osd=${initial_osds[-1]}
kill_daemons $dir TERM osd.${last_osd} 2>&2 < /dev/null || return 1
ceph osd down ${last_osd} || return 1
ceph osd out ${last_osd} || return 1
ceph pg dump pgs
dd if=/dev/urandom of=${dir}/ORIGINAL bs=1024 count=4
for i in $(seq 1 $lastobj)
do
rados --pool $poolname put obj${i} $dir/ORIGINAL || return 1
done
inject_eio rep data $poolname $testobj $dir 0 || return 1
inject_eio rep data $poolname $testobj $dir 1 || return 1
activate_osd $dir ${last_osd} || return 1
ceph osd in ${last_osd} || return 1
sleep 15
for tmp in $(seq 1 360); do
state=$(get_state 2.0)
echo $state | grep backfill_unfound
if [ "$?" = "0" ]; then
break
fi
echo "$state "
sleep 1
done
ceph pg dump pgs
ceph pg 2.0 list_unfound | grep -q $testobj || return 1
# Command should hang because object is unfound
timeout 5 rados -p $poolname get $testobj $dir/CHECK
test $? = "124" || return 1
ceph pg 2.0 mark_unfound_lost delete
wait_for_clean || return 1
for i in $(seq 1 $lastobj)
do
if [ obj${i} = "$testobj" ]; then
# Doesn't exist anymore
! rados -p $poolname get $testobj $dir/CHECK || return 1
else
rados --pool $poolname get obj${i} $dir/CHECK || return 1
diff -q $dir/ORIGINAL $dir/CHECK || return 1
fi
done
rm -f ${dir}/ORIGINAL ${dir}/CHECK
delete_pool $poolname
}
# Test recovery with unfound object
function TEST_rep_recovery_unfound() {
local dir=$1
local objname=myobject
local lastobj=100
# Must be between 1 and $lastobj
local testobj=obj75
setup_osds 3 || return 1
local poolname=test-pool
create_pool $poolname 1 1 || return 1
wait_for_clean || return 1
ceph pg dump pgs
rados_put $dir $poolname $objname || return 1
local -a initial_osds=($(get_osds $poolname $objname))
local last_osd=${initial_osds[-1]}
kill_daemons $dir TERM osd.${last_osd} 2>&2 < /dev/null || return 1
ceph osd down ${last_osd} || return 1
ceph osd out ${last_osd} || return 1
ceph pg dump pgs
dd if=/dev/urandom of=${dir}/ORIGINAL bs=1024 count=4
for i in $(seq 1 $lastobj)
do
rados --pool $poolname put obj${i} $dir/ORIGINAL || return 1
done
inject_eio rep data $poolname $testobj $dir 0 || return 1
inject_eio rep data $poolname $testobj $dir 1 || return 1
activate_osd $dir ${last_osd} || return 1
ceph osd in ${last_osd} || return 1
sleep 15
for tmp in $(seq 1 100); do
state=$(get_state 2.0)
echo $state | grep -v recovering
if [ "$?" = "0" ]; then
break
fi
echo "$state "
sleep 1
done
ceph pg dump pgs
ceph pg 2.0 list_unfound | grep -q $testobj || return 1
# Command should hang because object is unfound
timeout 5 rados -p $poolname get $testobj $dir/CHECK
test $? = "124" || return 1
ceph pg 2.0 mark_unfound_lost delete
wait_for_clean || return 1
for i in $(seq 1 $lastobj)
do
if [ obj${i} = "$testobj" ]; then
# Doesn't exist anymore
! rados -p $poolname get $testobj $dir/CHECK || return 1
else
rados --pool $poolname get obj${i} $dir/CHECK || return 1
diff -q $dir/ORIGINAL $dir/CHECK || return 1
fi
done
rm -f ${dir}/ORIGINAL ${dir}/CHECK
delete_pool $poolname
}
main osd-rep-recov-eio.sh "$@"
# Local Variables:
# compile-command: "cd ../../../build ; make -j4 && ../qa/run-standalone.sh osd-rep-recov-eio.sh"
# End:
| 12,222 | 27.895981 | 97 | sh |
null | ceph-main/qa/standalone/osd/osd-reuse-id.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7123" # git grep '\<7123\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_reuse_id() {
local dir=$1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
destroy_osd $dir 1 || return 1
run_osd $dir 1 || return 1
}
main osd-reuse-id "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/osd/osd-reuse-id.sh"
# End:
| 1,581 | 28.296296 | 87 | sh |
null | ceph-main/qa/standalone/osd/pg-split-merge.sh | #!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7147" # git grep '\<7147\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON --mon_min_osdmap_epochs=50 --paxos_service_trim_min=10"
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_a_merge_empty() {
local dir=$1
run_mon $dir a --osd_pool_default_size=3 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
ceph osd pool create foo 2 || return 1
ceph osd pool set foo pgp_num 1 || return 1
wait_for_clean || return 1
# note: we need 1.0 to have the same or more objects than 1.1
# 1.1
rados -p foo put foo1 /etc/passwd
rados -p foo put foo2 /etc/passwd
rados -p foo put foo3 /etc/passwd
rados -p foo put foo4 /etc/passwd
# 1.0
rados -p foo put foo5 /etc/passwd
rados -p foo put foo6 /etc/passwd
rados -p foo put foo8 /etc/passwd
rados -p foo put foo10 /etc/passwd
rados -p foo put foo11 /etc/passwd
rados -p foo put foo12 /etc/passwd
rados -p foo put foo16 /etc/passwd
wait_for_clean || return 1
ceph tell osd.1 config set osd_debug_no_purge_strays true
ceph osd pool set foo size 2 || return 1
wait_for_clean || return 1
kill_daemons $dir TERM osd.2 || return 1
ceph-objectstore-tool --data-path $dir/2 --op remove --pgid 1.1 --force || return 1
activate_osd $dir 2 || return 1
wait_for_clean || return 1
# osd.2: now 1.0 is there but 1.1 is not
# instantiate 1.1 on osd.2 with last_update=0'0 ('empty'), which is
# the problematic state... then let it merge with 1.0
ceph tell osd.2 config set osd_debug_no_acting_change true
ceph osd out 0 1
ceph osd pool set foo pg_num 1
sleep 5
ceph tell osd.2 config set osd_debug_no_acting_change false
# go back to osd.1 being primary, and 3x so the osd.2 copy doesn't get
# removed
ceph osd in 0 1
ceph osd pool set foo size 3
wait_for_clean || return 1
# scrub to ensure the osd.3 copy of 1.0 was incomplete (vs missing
# half of its objects).
ceph pg scrub 1.0
sleep 10
ceph log last debug
ceph pg ls
ceph pg ls | grep ' active.clean ' || return 1
}
function TEST_import_after_merge_and_gap() {
local dir=$1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
ceph osd pool create foo 2 || return 1
wait_for_clean || return 1
rados -p foo bench 3 write -b 1024 --no-cleanup || return 1
kill_daemons $dir TERM osd.0 || return 1
ceph-objectstore-tool --data-path $dir/0 --op export --pgid 1.1 --file $dir/1.1 --force || return 1
ceph-objectstore-tool --data-path $dir/0 --op export --pgid 1.0 --file $dir/1.0 --force || return 1
activate_osd $dir 0 || return 1
ceph osd pool set foo pg_num 1
sleep 5
while ceph daemon osd.0 perf dump | jq '.osd.numpg' | grep 2 ; do sleep 1 ; done
wait_for_clean || return 1
#
kill_daemons $dir TERM osd.0 || return 1
ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.0 --force || return 1
# this will import both halves the original pg
ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.1 --file $dir/1.1 || return 1
ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 || return 1
activate_osd $dir 0 || return 1
wait_for_clean || return 1
# make a map gap
for f in `seq 1 50` ; do
ceph osd set nodown
ceph osd unset nodown
done
# poke and prod to ensure last_epech_clean is big, reported to mon, and
# the osd is able to trim old maps
rados -p foo bench 1 write -b 1024 --no-cleanup || return 1
wait_for_clean || return 1
ceph tell osd.0 send_beacon
sleep 5
ceph osd set nodown
ceph osd unset nodown
sleep 5
kill_daemons $dir TERM osd.0 || return 1
# this should fail.. 1.1 still doesn't exist
! ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.1 --file $dir/1.1 || return 1
ceph-objectstore-tool --data-path $dir/0 --op export-remove --pgid 1.0 --force --file $dir/1.0.later || return 1
# this should fail too because of the gap
! ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.1 --file $dir/1.1 || return 1
! ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 || return 1
# we can force it...
ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.1 --file $dir/1.1 --force || return 1
ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 --force || return 1
# ...but the osd won't start, so remove it again.
ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.0 --force || return 1
ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.1 --force || return 1
ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0.later --force || return 1
activate_osd $dir 0 || return 1
wait_for_clean || return 1
}
function TEST_import_after_split() {
local dir=$1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
ceph osd pool create foo 1 || return 1
wait_for_clean || return 1
rados -p foo bench 3 write -b 1024 --no-cleanup || return 1
kill_daemons $dir TERM osd.0 || return 1
ceph-objectstore-tool --data-path $dir/0 --op export --pgid 1.0 --file $dir/1.0 --force || return 1
activate_osd $dir 0 || return 1
ceph osd pool set foo pg_num 2
sleep 5
while ceph daemon osd.0 perf dump | jq '.osd.numpg' | grep 1 ; do sleep 1 ; done
wait_for_clean || return 1
kill_daemons $dir TERM osd.0 || return 1
ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.0 --force || return 1
# this should fail because 1.1 (split child) is there
! ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 || return 1
ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.1 --force || return 1
# now it will work (1.1. is gone)
ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 || return 1
activate_osd $dir 0 || return 1
wait_for_clean || return 1
}
main pg-split-merge "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/osd/pg-split-merge.sh"
# End:
| 6,885 | 32.754902 | 116 | sh |
null | ceph-main/qa/standalone/osd/repeer-on-acting-back.sh | #!/usr/bin/env bash
#
# Copyright (C) 2020 ZTE Corporation <[email protected]>
#
# Author: xie xingguo <[email protected]>
# Author: Yan Jun <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export poolname=test
export testobjects=100
export loglen=12
export trim=$(expr $loglen / 2)
export CEPH_MON="127.0.0.1:7115" # git grep '\<7115\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
# so we will not force auth_log_shard to be acting_primary
CEPH_ARGS+="--osd_force_auth_primary_missing_objects=1000000 "
# use small pg_log settings, so we always do backfill instead of recovery
CEPH_ARGS+="--osd_min_pg_log_entries=$loglen --osd_max_pg_log_entries=$loglen --osd_pg_log_trim_min=$trim "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_repeer_on_down_acting_member_coming_back() {
local dir=$1
local dummyfile='/etc/fstab'
local num_osds=6
local osds="$(seq 0 $(expr $num_osds - 1))"
run_mon $dir a || return 1
run_mgr $dir x || return 1
for i in $osds
do
run_osd $dir $i || return 1
done
create_pool $poolname 1 1
ceph osd pool set $poolname size 3
ceph osd pool set $poolname min_size 2
local poolid=$(ceph pg dump pools -f json | jq '.pool_stats' | jq '.[].poolid')
local pgid=$poolid.0
# enable required feature-bits for upmap
ceph osd set-require-min-compat-client luminous
# reset up to [1,2,3]
ceph osd pg-upmap $pgid 1 2 3 || return 1
flush_pg_stats || return 1
wait_for_clean || return 1
echo "writing initial objects"
# write a bunch of objects
for i in $(seq 1 $testobjects)
do
rados -p $poolname put existing_$i $dummyfile
done
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
# reset up to [1,4,5]
ceph osd pg-upmap $pgid 1 4 5 || return 1
# wait for peering to complete
sleep 2
# make sure osd.2 belongs to current acting set
ceph pg $pgid query | jq '.acting' | grep 2 || return 1
# kill osd.2
kill_daemons $dir KILL osd.2 || return 1
ceph osd down osd.2
# again, wait for peering to complete
sleep 2
# osd.2 should have been moved out from acting set
ceph pg $pgid query | jq '.acting' | grep 2 && return 1
# bring up osd.2
activate_osd $dir 2 || return 1
wait_for_osd up 2
# again, wait for peering to complete
sleep 2
# primary should be able to re-add osd.2 into acting
ceph pg $pgid query | jq '.acting' | grep 2 || return 1
WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
if ! grep -q "Active: got notify from previous acting member.*, requesting pg_temp change" $(find $dir -name '*osd*log')
then
echo failure
return 1
fi
echo "success"
delete_pool $poolname
kill_daemons $dir || return 1
}
main repeer-on-acting-back "$@"
# Local Variables:
# compile-command: "make -j4 && ../qa/run-standalone.sh repeer-on-acting-back.sh"
# End:
| 3,773 | 28.030769 | 124 | sh |
null | ceph-main/qa/standalone/osd/repro_long_log.sh | #!/usr/bin/env bash
#
# Copyright (C) 2014 Cloudwatt <[email protected]>
# Copyright (C) 2018 Red Hat <[email protected]>
#
# Author: Josh Durgin <[email protected]>
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7100" # git grep '\<7100\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
PGID=
function test_log_size()
{
local PGID=$1
local EXPECTED=$2
local DUPS_EXPECTED=${3:-0}
ceph tell osd.\* flush_pg_stats
sleep 3
ceph pg $PGID query | jq .info.stats.log_size
ceph pg $PGID query | jq .info.stats.log_size | grep "${EXPECTED}"
ceph pg $PGID query | jq .info.stats.log_dups_size
ceph pg $PGID query | jq .info.stats.log_dups_size | grep "${DUPS_EXPECTED}"
}
function setup_log_test() {
local dir=$1
local which=$2
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
ceph osd pool create test 1 1 || true
POOL_ID=$(ceph osd dump --format json | jq '.pools[] | select(.pool_name == "test") | .pool')
PGID="${POOL_ID}.0"
# With 1 PG setting entries per osd 20 results in a target log of 20
ceph tell osd.\* injectargs -- --osd_target_pg_log_entries_per_osd 20 || return 1
ceph tell osd.\* injectargs -- --osd-min-pg-log-entries 20 || return 1
ceph tell osd.\* injectargs -- --osd-max-pg-log-entries 30 || return 1
ceph tell osd.\* injectargs -- --osd-pg-log-trim-min 10 || return 1
ceph tell osd.\* injectargs -- --osd_pg_log_dups_tracked 20 || return 1
touch $dir/foo
for i in $(seq 1 20)
do
rados -p test put foo $dir/foo || return 1
done
test_log_size $PGID 20 || return 1
rados -p test rm foo || return 1
# generate error entries
for i in $(seq 1 20)
do
rados -p test rm foo
done
# log should have been trimmed down to min_entries with one extra
test_log_size $PGID 21 || return 1
}
function TEST_repro_long_log1()
{
local dir=$1
setup_log_test $dir || return 1
# regular write should trim the log
rados -p test put foo $dir/foo || return 1
test_log_size $PGID 22 || return 1
}
function TEST_repro_long_log2()
{
local dir=$1
setup_log_test $dir || return 1
local PRIMARY=$(ceph pg $PGID query | jq '.info.stats.up_primary')
kill_daemons $dir TERM osd.$PRIMARY || return 1
CEPH_ARGS="--osd-max-pg-log-entries=2 --osd-pg-log-dups-tracked=3 --no-mon-config" ceph-objectstore-tool --data-path $dir/$PRIMARY --pgid $PGID --op trim-pg-log || return 1
activate_osd $dir $PRIMARY || return 1
wait_for_clean || return 1
test_log_size $PGID 21 18 || return 1
}
function TEST_trim_max_entries()
{
local dir=$1
setup_log_test $dir || return 1
ceph tell osd.\* injectargs -- --osd_target_pg_log_entries_per_osd 2 || return 1
ceph tell osd.\* injectargs -- --osd-min-pg-log-entries 2
ceph tell osd.\* injectargs -- --osd-pg-log-trim-min 2
ceph tell osd.\* injectargs -- --osd-pg-log-trim-max 4
ceph tell osd.\* injectargs -- --osd_pg_log_dups_tracked 0
# adding log entries, should only trim 4 and add one each time
rados -p test rm foo
test_log_size $PGID 18 || return 1
rados -p test rm foo
test_log_size $PGID 15 || return 1
rados -p test rm foo
test_log_size $PGID 12 || return 1
rados -p test rm foo
test_log_size $PGID 9 || return 1
rados -p test rm foo
test_log_size $PGID 6 || return 1
rados -p test rm foo
test_log_size $PGID 3 || return 1
# below trim_min
rados -p test rm foo
test_log_size $PGID 4 || return 1
rados -p test rm foo
test_log_size $PGID 3 || return 1
rados -p test rm foo
test_log_size $PGID 4 || return 1
rados -p test rm foo
test_log_size $PGID 3 || return 1
}
function TEST_trim_max_entries_with_dups()
{
local dir=$1
setup_log_test $dir || return 1
ceph tell osd.\* injectargs -- --osd_target_pg_log_entries_per_osd 2 || return 1
ceph tell osd.\* injectargs -- --osd-min-pg-log-entries 2
ceph tell osd.\* injectargs -- --osd-pg-log-trim-min 2
ceph tell osd.\* injectargs -- --osd-pg-log-trim-max 4
ceph tell osd.\* injectargs -- --osd_pg_log_dups_tracked 20 || return 1
# adding log entries, should only trim 4 and add one each time
# dups should be trimmed to 1
rados -p test rm foo
test_log_size $PGID 18 2 || return 1
rados -p test rm foo
test_log_size $PGID 15 6 || return 1
rados -p test rm foo
test_log_size $PGID 12 10 || return 1
rados -p test rm foo
test_log_size $PGID 9 14 || return 1
rados -p test rm foo
test_log_size $PGID 6 18 || return 1
rados -p test rm foo
test_log_size $PGID 3 20 || return 1
# below trim_min
rados -p test rm foo
test_log_size $PGID 4 20 || return 1
rados -p test rm foo
test_log_size $PGID 3 20 || return 1
rados -p test rm foo
test_log_size $PGID 4 20 || return 1
rados -p test rm foo
test_log_size $PGID 3 20 || return 1
}
main repro-long-log "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && ../qa/run-standalone.sh repro_long_log.sh"
# End:
| 6,109 | 29.858586 | 176 | sh |
null | ceph-main/qa/standalone/scrub/osd-mapper.sh | #!/usr/bin/env bash
# -*- mode:text; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
# vim: ts=8 sw=2 smarttab
#
# test the handling of a corrupted SnapMapper DB by Scrub
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
source $CEPH_ROOT/qa/standalone/scrub/scrub-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7144" # git grep '\<7144\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
export -n CEPH_CLI_TEST_DUP_COMMAND
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
# one clone & multiple snaps (according to the number of parameters)
function make_a_clone()
{
#turn off '-x' (but remember previous state)
local saved_echo_flag=${-//[^x]/}
set +x
local pool=$1
local obj=$2
echo $RANDOM | rados -p $pool put $obj - || return 1
shift 2
for snap in $@ ; do
rados -p $pool mksnap $snap || return 1
done
if [[ -n "$saved_echo_flag" ]]; then set -x; fi
}
function TEST_truncated_sna_record() {
local dir=$1
local -A cluster_conf=(
['osds_num']="3"
['pgs_in_pool']="4"
['pool_name']="test"
)
local extr_dbg=3
(( extr_dbg > 1 )) && echo "Dir: $dir"
standard_scrub_cluster $dir cluster_conf
ceph tell osd.* config set osd_stats_update_period_not_scrubbing "1"
ceph tell osd.* config set osd_stats_update_period_scrubbing "1"
local osdn=${cluster_conf['osds_num']}
local poolid=${cluster_conf['pool_id']}
local poolname=${cluster_conf['pool_name']}
local objname="objxxx"
# create an object and clone it
make_a_clone $poolname $objname snap01 snap02 || return 1
make_a_clone $poolname $objname snap13 || return 1
make_a_clone $poolname $objname snap24 snap25 || return 1
echo $RANDOM | rados -p $poolname put $objname - || return 1
#identify the PG and the primary OSD
local pgid=`ceph --format=json-pretty osd map $poolname $objname | jq -r '.pgid'`
local osd=`ceph --format=json-pretty osd map $poolname $objname | jq -r '.up[0]'`
echo "pgid is $pgid (primary: osd.$osd)"
# turn on the publishing of test data in the 'scrubber' section of 'pg query' output
set_query_debug $pgid
# verify the existence of these clones
(( extr_dbg >= 1 )) && rados --format json-pretty -p $poolname listsnaps $objname
# scrub the PG
ceph pg $pgid deep_scrub || return 1
# we aren't just waiting for the scrub to terminate, but also for the
# logs to be published
sleep 3
ceph pg dump pgs
until grep -a -q -- "event: --^^^^---- ScrubFinished" $dir/osd.$osd.log ; do
sleep 0.2
done
ceph pg dump pgs
ceph osd set noscrub || return 1
ceph osd set nodeep-scrub || return 1
sleep 5
grep -a -q -v "ERR" $dir/osd.$osd.log || return 1
# kill the OSDs
kill_daemons $dir TERM osd || return 1
(( extr_dbg >= 2 )) && ceph-kvstore-tool bluestore-kv $dir/0 dump "p"
(( extr_dbg >= 2 )) && ceph-kvstore-tool bluestore-kv $dir/2 dump "p" | grep -a SNA_
(( extr_dbg >= 2 )) && grep -a SNA_ /tmp/oo2.dump
(( extr_dbg >= 2 )) && ceph-kvstore-tool bluestore-kv $dir/2 dump p 2> /dev/null
local num_sna_b4=`ceph-kvstore-tool bluestore-kv $dir/$osd dump p 2> /dev/null | grep -a -e 'SNA_[0-9]_000000000000000[0-9]_000000000000000' \
| awk -e '{print $2;}' | wc -l`
for sdn in $(seq 0 $(expr $osdn - 1))
do
kvdir=$dir/$sdn
echo "corrupting the SnapMapper DB of osd.$sdn (db: $kvdir)"
(( extr_dbg >= 3 )) && ceph-kvstore-tool bluestore-kv $kvdir dump "p"
# truncate the 'mapping' (SNA_) entry corresponding to the snap13 clone
KY=`ceph-kvstore-tool bluestore-kv $kvdir dump p 2> /dev/null | grep -a -e 'SNA_[0-9]_0000000000000003_000000000000000' \
| awk -e '{print $2;}'`
(( extr_dbg >= 1 )) && echo "SNA key: $KY" | cat -v
tmp_fn1=`mktemp -p /tmp --suffix="_the_val"`
(( extr_dbg >= 1 )) && echo "Value dumped in: $tmp_fn1"
ceph-kvstore-tool bluestore-kv $kvdir get p "$KY" out $tmp_fn1 2> /dev/null
(( extr_dbg >= 2 )) && od -xc $tmp_fn1
NKY=${KY:0:-30}
ceph-kvstore-tool bluestore-kv $kvdir rm "p" "$KY" 2> /dev/null
ceph-kvstore-tool bluestore-kv $kvdir set "p" "$NKY" in $tmp_fn1 2> /dev/null
(( extr_dbg >= 1 )) || rm $tmp_fn1
done
orig_osd_args=" ${cluster_conf['osd_args']}"
orig_osd_args=" $(echo $orig_osd_args)"
(( extr_dbg >= 2 )) && echo "Copied OSD args: /$orig_osd_args/ /${orig_osd_args:1}/"
for sdn in $(seq 0 $(expr $osdn - 1))
do
CEPH_ARGS="$CEPH_ARGS $orig_osd_args" activate_osd $dir $sdn
done
sleep 1
for sdn in $(seq 0 $(expr $osdn - 1))
do
timeout 60 ceph tell osd.$sdn version
done
rados --format json-pretty -p $poolname listsnaps $objname
# when scrubbing now - we expect the scrub to emit a cluster log ERR message regarding SnapMapper internal inconsistency
ceph osd unset nodeep-scrub || return 1
ceph osd unset noscrub || return 1
# what is the primary now?
local cur_prim=`ceph --format=json-pretty osd map $poolname $objname | jq -r '.up[0]'`
ceph pg dump pgs
sleep 2
ceph pg $pgid deep_scrub || return 1
sleep 5
ceph pg dump pgs
(( extr_dbg >= 1 )) && grep -a "ERR" $dir/osd.$cur_prim.log
grep -a -q "ERR" $dir/osd.$cur_prim.log || return 1
# but did we fix the snap issue? let's try scrubbing again
local prev_err_cnt=`grep -a "ERR" $dir/osd.$cur_prim.log | wc -l`
echo "prev count: $prev_err_cnt"
# scrub again. No errors expected this time
ceph pg $pgid deep_scrub || return 1
sleep 5
ceph pg dump pgs
(( extr_dbg >= 1 )) && grep -a "ERR" $dir/osd.$cur_prim.log
local current_err_cnt=`grep -a "ERR" $dir/osd.$cur_prim.log | wc -l`
(( extr_dbg >= 1 )) && echo "current count: $current_err_cnt"
(( current_err_cnt == prev_err_cnt )) || return 1
kill_daemons $dir TERM osd || return 1
kvdir=$dir/$cur_prim
(( extr_dbg >= 2 )) && ceph-kvstore-tool bluestore-kv $kvdir dump p 2> /dev/null | grep -a -e 'SNA_[0-9]_' \
| awk -e '{print $2;}'
local num_sna_full=`ceph-kvstore-tool bluestore-kv $kvdir dump p 2> /dev/null | grep -a -e 'SNA_[0-9]_000000000000000[0-9]_000000000000000' \
| awk -e '{print $2;}' | wc -l`
(( num_sna_full == num_sna_b4 )) || return 1
return 0
}
main osd-mapper "$@"
| 6,662 | 35.409836 | 146 | sh |
null | ceph-main/qa/standalone/scrub/osd-recovery-scrub.sh | #! /usr/bin/env bash
#
# Copyright (C) 2017 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7124" # git grep '\<7124\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--osd-op-queue=wpq "
export -n CEPH_CLI_TEST_DUP_COMMAND
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
$func $dir || return 1
done
}
# Simple test for "not scheduling scrubs due to active recovery"
# OSD::sched_scrub() called on all OSDs during ticks
function TEST_recovery_scrub_1() {
local dir=$1
local poolname=test
TESTDATA="testdata.$$"
OSDS=4
PGS=1
OBJECTS=100
ERRORS=0
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-interval-randomize-ratio=0 "
ceph_osd_args+="--osd_scrub_backoff_ratio=0 "
ceph_osd_args+="--osd_stats_update_period_not_scrubbing=3 "
ceph_osd_args+="--osd_stats_update_period_scrubbing=2"
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd --osd_scrub_during_recovery=false || return 1
done
# Create a pool with $PGS pgs
create_pool $poolname $PGS $PGS
wait_for_clean || return 1
poolid=$(ceph osd dump | grep "^pool.*[']test[']" | awk '{ print $2 }')
ceph pg dump pgs
dd if=/dev/urandom of=$TESTDATA bs=1M count=50
for i in $(seq 1 $OBJECTS)
do
rados -p $poolname put obj${i} $TESTDATA
done
rm -f $TESTDATA
ceph osd pool set $poolname size 4
# Wait for recovery to start
set -o pipefail
count=0
while(true)
do
if ceph --format json pg dump pgs |
jq '.pg_stats | [.[] | .state | contains("recovering")]' | grep -q true
then
break
fi
sleep 2
if test "$count" -eq "10"
then
echo "Recovery never started"
return 1
fi
count=$(expr $count + 1)
done
set +o pipefail
ceph pg dump pgs
sleep 10
# Work around for http://tracker.ceph.com/issues/38195
kill_daemons $dir #|| return 1
declare -a err_strings
err_strings[0]="not scheduling scrubs due to active recovery"
for osd in $(seq 0 $(expr $OSDS - 1))
do
grep "not scheduling scrubs" $dir/osd.${osd}.log
done
for err_string in "${err_strings[@]}"
do
found=false
count=0
for osd in $(seq 0 $(expr $OSDS - 1))
do
if grep -q "$err_string" $dir/osd.${osd}.log
then
found=true
count=$(expr $count + 1)
fi
done
if [ "$found" = "false" ]; then
echo "Missing log message '$err_string'"
ERRORS=$(expr $ERRORS + 1)
fi
[ $count -eq $OSDS ] || return 1
done
teardown $dir || return 1
if [ $ERRORS != "0" ];
then
echo "TEST FAILED WITH $ERRORS ERRORS"
return 1
fi
echo "TEST PASSED"
return 0
}
##
# a modified version of wait_for_scrub(), which terminates if the Primary
# of the to-be-scrubbed PG changes
#
# Given the *last_scrub*, wait for scrub to happen on **pgid**. It
# will fail if scrub does not complete within $TIMEOUT seconds. The
# repair is complete whenever the **get_last_scrub_stamp** function
# reports a timestamp different from the one given in argument.
#
# @param pgid the id of the PG
# @param the primary OSD when started
# @param last_scrub timestamp of the last scrub for *pgid*
# @return 0 on success, 1 on error
#
function wait_for_scrub_mod() {
local pgid=$1
local orig_primary=$2
local last_scrub="$3"
local sname=${4:-last_scrub_stamp}
for ((i=0; i < $TIMEOUT; i++)); do
sleep 0.2
if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
return 0
fi
sleep 1
# are we still the primary?
local current_primary=`bin/ceph pg $pgid query | jq '.acting[0]' `
if [ $orig_primary != $current_primary ]; then
echo $orig_primary no longer primary for $pgid
return 0
fi
done
return 1
}
##
# A modified version of pg_scrub()
#
# Run scrub on **pgid** and wait until it completes. The pg_scrub
# function will fail if repair does not complete within $TIMEOUT
# seconds. The pg_scrub is complete whenever the
# **get_last_scrub_stamp** function reports a timestamp different from
# the one stored before starting the scrub, or whenever the Primary
# changes.
#
# @param pgid the id of the PG
# @return 0 on success, 1 on error
#
function pg_scrub_mod() {
local pgid=$1
local last_scrub=$(get_last_scrub_stamp $pgid)
# locate the primary
local my_primary=`bin/ceph pg $pgid query | jq '.acting[0]' `
local recovery=false
ceph pg scrub $pgid
#ceph --format json pg dump pgs | jq ".pg_stats | .[] | select(.pgid == \"$pgid\") | .state"
if ceph --format json pg dump pgs | jq ".pg_stats | .[] | select(.pgid == \"$pgid\") | .state" | grep -q recovering
then
recovery=true
fi
wait_for_scrub_mod $pgid $my_primary "$last_scrub" || return 1
if test $recovery = "true"
then
return 2
fi
}
# Same as wait_background() except that it checks for exit code 2 and bumps recov_scrub_count
function wait_background_check() {
# We extract the PIDS from the variable name
pids=${!1}
return_code=0
for pid in $pids; do
wait $pid
retcode=$?
if test $retcode -eq 2
then
recov_scrub_count=$(expr $recov_scrub_count + 1)
elif test $retcode -ne 0
then
# If one process failed then return 1
return_code=1
fi
done
# We empty the variable reporting that all process ended
eval "$1=''"
return $return_code
}
# osd_scrub_during_recovery=true make sure scrub happens
function TEST_recovery_scrub_2() {
local dir=$1
local poolname=test
TESTDATA="testdata.$$"
OSDS=8
PGS=32
OBJECTS=40
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-interval-randomize-ratio=0 "
ceph_osd_args+="--osd_scrub_backoff_ratio=0 "
ceph_osd_args+="--osd_stats_update_period_not_scrubbing=3 "
ceph_osd_args+="--osd_stats_update_period_scrubbing=2"
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd --osd_scrub_during_recovery=true --osd_recovery_sleep=10 \
$ceph_osd_args || return 1
done
# Create a pool with $PGS pgs
create_pool $poolname $PGS $PGS
wait_for_clean || return 1
poolid=$(ceph osd dump | grep "^pool.*[']test[']" | awk '{ print $2 }')
dd if=/dev/urandom of=$TESTDATA bs=1M count=50
for i in $(seq 1 $OBJECTS)
do
rados -p $poolname put obj${i} $TESTDATA
done
rm -f $TESTDATA
ceph osd pool set $poolname size 3
ceph pg dump pgs
# note that the following will be needed if the mclock scheduler is specified
#ceph tell osd.* config get osd_mclock_override_recovery_settings
# the '_max_active' is expected to be 0
ceph tell osd.1 config get osd_recovery_max_active
# both next parameters are expected to be >=3
ceph tell osd.1 config get osd_recovery_max_active_hdd
ceph tell osd.1 config get osd_recovery_max_active_ssd
# Wait for recovery to start
count=0
while(true)
do
#ceph --format json pg dump pgs | jq '.pg_stats | [.[].state]'
if test $(ceph --format json pg dump pgs |
jq '.pg_stats | [.[].state]'| grep recovering | wc -l) -ge 2
then
break
fi
sleep 2
if test "$count" -eq "10"
then
echo "Not enough recovery started simultaneously"
return 1
fi
count=$(expr $count + 1)
done
ceph pg dump pgs
pids=""
recov_scrub_count=0
for pg in $(seq 0 $(expr $PGS - 1))
do
run_in_background pids pg_scrub_mod $poolid.$(printf "%x" $pg)
done
wait_background_check pids
return_code=$?
if [ $return_code -ne 0 ]; then return $return_code; fi
ERRORS=0
if test $recov_scrub_count -eq 0
then
echo "No scrubs occurred while PG recovering"
ERRORS=$(expr $ERRORS + 1)
fi
pidfile=$(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid')
pid=$(cat $pidfile)
if ! kill -0 $pid
then
echo "OSD crash occurred"
#tail -100 $dir/osd.0.log
ERRORS=$(expr $ERRORS + 1)
fi
# Work around for http://tracker.ceph.com/issues/38195
kill_daemons $dir #|| return 1
declare -a err_strings
err_strings[0]="not scheduling scrubs due to active recovery"
for osd in $(seq 0 $(expr $OSDS - 1))
do
grep "not scheduling scrubs" $dir/osd.${osd}.log
done
for err_string in "${err_strings[@]}"
do
found=false
for osd in $(seq 0 $(expr $OSDS - 1))
do
if grep "$err_string" $dir/osd.${osd}.log > /dev/null;
then
found=true
fi
done
if [ "$found" = "true" ]; then
echo "Found log message not expected '$err_string'"
ERRORS=$(expr $ERRORS + 1)
fi
done
teardown $dir || return 1
if [ $ERRORS != "0" ];
then
echo "TEST FAILED WITH $ERRORS ERRORS"
return 1
fi
echo "TEST PASSED"
return 0
}
main osd-recovery-scrub "$@"
# Local Variables:
# compile-command: "cd build ; make -j4 && \
# ../qa/run-standalone.sh osd-recovery-scrub.sh"
# End:
| 10,362 | 27.008108 | 119 | sh |
null | ceph-main/qa/standalone/scrub/osd-scrub-dump.sh | #!/usr/bin/env bash
#
# Copyright (C) 2019 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
MAX_SCRUBS=4
SCRUB_SLEEP=3
POOL_SIZE=3
function run() {
local dir=$1
shift
local CHUNK_MAX=5
export CEPH_MON="127.0.0.1:7184" # git grep '\<7184\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--osd_max_scrubs=$MAX_SCRUBS "
CEPH_ARGS+="--osd_shallow_scrub_chunk_max=$CHUNK_MAX "
CEPH_ARGS+="--osd_scrub_sleep=$SCRUB_SLEEP "
CEPH_ARGS+="--osd_pool_default_size=$POOL_SIZE "
# Set scheduler to "wpq" until there's a reliable way to query scrub states
# with "--osd-scrub-sleep" set to 0. The "mclock_scheduler" overrides the
# scrub sleep to 0 and as a result the checks in the test fail.
CEPH_ARGS+="--osd_op_queue=wpq "
export -n CEPH_CLI_TEST_DUP_COMMAND
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_recover_unexpected() {
local dir=$1
shift
local OSDS=6
local PGS=16
local POOLS=3
local OBJS=1000
run_mon $dir a || return 1
run_mgr $dir x || return 1
for o in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $o
done
for i in $(seq 1 $POOLS)
do
create_pool test$i $PGS $PGS
done
wait_for_clean || return 1
dd if=/dev/urandom of=datafile bs=4k count=2
for i in $(seq 1 $POOLS)
do
for j in $(seq 1 $OBJS)
do
rados -p test$i put obj$j datafile
done
done
rm datafile
ceph osd set noscrub
ceph osd set nodeep-scrub
for qpg in $(ceph pg dump pgs --format=json-pretty | jq '.pg_stats[].pgid')
do
eval pg=$qpg # strip quotes around qpg
ceph tell $pg scrub
done
ceph pg dump pgs
max=$(CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_scrub_reservations | jq '.osd_max_scrubs')
if [ $max != $MAX_SCRUBS ]; then
echo "ERROR: Incorrect osd_max_scrubs from dump_scrub_reservations"
return 1
fi
ceph osd unset noscrub
ok=false
for i in $(seq 0 300)
do
ceph pg dump pgs
if ceph pg dump pgs | grep '+scrubbing'; then
ok=true
break
fi
sleep 1
done
if test $ok = "false"; then
echo "ERROR: Test set-up failed no scrubbing"
return 1
fi
local total=0
local zerocount=0
local maxzerocount=3
while(true)
do
pass=0
for o in $(seq 0 $(expr $OSDS - 1))
do
CEPH_ARGS='' ceph daemon $(get_asok_path osd.$o) dump_scrub_reservations
scrubs=$(CEPH_ARGS='' ceph daemon $(get_asok_path osd.$o) dump_scrub_reservations | jq '.scrubs_local + .scrubs_remote')
if [ $scrubs -gt $MAX_SCRUBS ]; then
echo "ERROR: More than $MAX_SCRUBS currently reserved"
return 1
fi
pass=$(expr $pass + $scrubs)
done
if [ $pass = "0" ]; then
zerocount=$(expr $zerocount + 1)
fi
if [ $zerocount -gt $maxzerocount ]; then
break
fi
total=$(expr $total + $pass)
if [ $total -gt 0 ]; then
# already saw some reservations, so wait longer to avoid excessive over-counting.
# Note the loop itself takes about 2-3 seconds
sleep $(expr $SCRUB_SLEEP - 2)
else
sleep 0.5
fi
done
# Check that there are no more scrubs
for i in $(seq 0 5)
do
if ceph pg dump pgs | grep '+scrubbing'; then
echo "ERROR: Extra scrubs after test completion...not expected"
return 1
fi
sleep $SCRUB_SLEEP
done
echo $total total reservations seen
# Sort of arbitraty number based on PGS * POOLS * POOL_SIZE as the number of total scrub
# reservations that must occur. However, the loop above might see the same reservation more
# than once.
actual_reservations=$(expr $PGS \* $POOLS \* $POOL_SIZE)
if [ $total -lt $actual_reservations ]; then
echo "ERROR: Unexpectedly low amount of scrub reservations seen during test"
return 1
fi
return 0
}
main osd-scrub-dump "$@"
# Local Variables:
# compile-command: "cd build ; make check && \
# ../qa/run-standalone.sh osd-scrub-dump.sh"
# End:
| 4,814 | 25.75 | 122 | sh |
null | ceph-main/qa/standalone/scrub/osd-scrub-repair.sh | #!/usr/bin/env bash
#
# Copyright (C) 2014 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
set -x
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
if [ `uname` = FreeBSD ]; then
# erasure coding overwrites are only tested on Bluestore
# erasure coding on filestore is unsafe
# http://docs.ceph.com/en/latest/rados/operations/erasure-code/#erasure-coding-with-overwrites
use_ec_overwrite=false
else
use_ec_overwrite=true
fi
# Test development and debugging
# Set to "yes" in order to ignore diff errors and save results to update test
getjson="no"
# Filter out mtime and local_mtime dates, version, prior_version and last_reqid (client) from any object_info.
jqfilter='def walk(f):
. as $in
| if type == "object" then
reduce keys[] as $key
( {}; . + { ($key): ($in[$key] | walk(f)) } ) | f
elif type == "array" then map( walk(f) ) | f
else f
end;
walk(if type == "object" then del(.mtime) else . end)
| walk(if type == "object" then del(.local_mtime) else . end)
| walk(if type == "object" then del(.last_reqid) else . end)
| walk(if type == "object" then del(.version) else . end)
| walk(if type == "object" then del(.prior_version) else . end)'
sortkeys='import json; import sys ; JSON=sys.stdin.read() ; ud = json.loads(JSON) ; print(json.dumps(ud, sort_keys=True, indent=2))'
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7107" # git grep '\<7107\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--osd-skip-data-digest=false "
export -n CEPH_CLI_TEST_DUP_COMMAND
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function add_something() {
local dir=$1
local poolname=$2
local obj=${3:-SOMETHING}
local scrub=${4:-noscrub}
if [ "$scrub" = "noscrub" ];
then
ceph osd set noscrub || return 1
ceph osd set nodeep-scrub || return 1
else
ceph osd unset noscrub || return 1
ceph osd unset nodeep-scrub || return 1
fi
local payload=ABCDEF
echo $payload > $dir/ORIGINAL
rados --pool $poolname put $obj $dir/ORIGINAL || return 1
}
#
# Corrupt one copy of a replicated pool
#
function TEST_corrupt_and_repair_replicated() {
local dir=$1
local poolname=rbd
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
add_something $dir $poolname || return 1
corrupt_and_repair_one $dir $poolname $(get_not_primary $poolname SOMETHING) || return 1
# Reproduces http://tracker.ceph.com/issues/8914
corrupt_and_repair_one $dir $poolname $(get_primary $poolname SOMETHING) || return 1
}
#
# Allow repair to be scheduled when some recovering is still undergoing on the same OSD
#
function TEST_allow_repair_during_recovery() {
local dir=$1
local poolname=rbd
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 --osd_scrub_during_recovery=false \
--osd_repair_during_recovery=true \
--osd_debug_pretend_recovery_active=true || return 1
run_osd $dir 1 --osd_scrub_during_recovery=false \
--osd_repair_during_recovery=true \
--osd_debug_pretend_recovery_active=true || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
add_something $dir $poolname || return 1
corrupt_and_repair_one $dir $poolname $(get_not_primary $poolname SOMETHING) || return 1
}
#
# Skip non-repair scrub correctly during recovery
#
function TEST_skip_non_repair_during_recovery() {
local dir=$1
local poolname=rbd
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 --osd_scrub_during_recovery=false \
--osd_repair_during_recovery=true \
--osd_debug_pretend_recovery_active=true || return 1
run_osd $dir 1 --osd_scrub_during_recovery=false \
--osd_repair_during_recovery=true \
--osd_debug_pretend_recovery_active=true || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
add_something $dir $poolname || return 1
scrub_and_not_schedule $dir $poolname $(get_not_primary $poolname SOMETHING) || return 1
}
function scrub_and_not_schedule() {
local dir=$1
local poolname=$2
local osd=$3
#
# 1) start a non-repair scrub
#
local pg=$(get_pg $poolname SOMETHING)
local last_scrub=$(get_last_scrub_stamp $pg)
ceph pg scrub $pg
#
# 2) Assure the scrub is not scheduled
#
for ((i=0; i < 3; i++)); do
if test "$(get_last_scrub_stamp $pg)" '>' "$last_scrub" ; then
return 1
fi
sleep 1
done
#
# 3) Access to the file must OK
#
objectstore_tool $dir $osd SOMETHING list-attrs || return 1
rados --pool $poolname get SOMETHING $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
}
function corrupt_and_repair_two() {
local dir=$1
local poolname=$2
local first=$3
local second=$4
#
# 1) remove the corresponding file from the OSDs
#
pids=""
run_in_background pids objectstore_tool $dir $first SOMETHING remove
run_in_background pids objectstore_tool $dir $second SOMETHING remove
wait_background pids
return_code=$?
if [ $return_code -ne 0 ]; then return $return_code; fi
#
# 2) repair the PG
#
local pg=$(get_pg $poolname SOMETHING)
repair $pg
#
# 3) The files must be back
#
pids=""
run_in_background pids objectstore_tool $dir $first SOMETHING list-attrs
run_in_background pids objectstore_tool $dir $second SOMETHING list-attrs
wait_background pids
return_code=$?
if [ $return_code -ne 0 ]; then return $return_code; fi
rados --pool $poolname get SOMETHING $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
}
#
# 1) add an object
# 2) remove the corresponding file from a designated OSD
# 3) repair the PG
# 4) check that the file has been restored in the designated OSD
#
function corrupt_and_repair_one() {
local dir=$1
local poolname=$2
local osd=$3
#
# 1) remove the corresponding file from the OSD
#
objectstore_tool $dir $osd SOMETHING remove || return 1
#
# 2) repair the PG
#
local pg=$(get_pg $poolname SOMETHING)
repair $pg
#
# 3) The file must be back
#
objectstore_tool $dir $osd SOMETHING list-attrs || return 1
rados --pool $poolname get SOMETHING $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
}
function corrupt_and_repair_erasure_coded() {
local dir=$1
local poolname=$2
add_something $dir $poolname || return 1
local primary=$(get_primary $poolname SOMETHING)
local -a osds=($(get_osds $poolname SOMETHING | sed -e "s/$primary//"))
local not_primary_first=${osds[0]}
local not_primary_second=${osds[1]}
# Reproduces http://tracker.ceph.com/issues/10017
corrupt_and_repair_one $dir $poolname $primary || return 1
# Reproduces http://tracker.ceph.com/issues/10409
corrupt_and_repair_one $dir $poolname $not_primary_first || return 1
corrupt_and_repair_two $dir $poolname $not_primary_first $not_primary_second || return 1
corrupt_and_repair_two $dir $poolname $primary $not_primary_first || return 1
}
function auto_repair_erasure_coded() {
local dir=$1
local allow_overwrites=$2
local poolname=ecpool
# Launch a cluster with 5 seconds scrub interval
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-auto-repair=true \
--osd-deep-scrub-interval=5 \
--osd-scrub-max-interval=5 \
--osd-scrub-min-interval=5 \
--osd-scrub-interval-randomize-ratio=0"
for id in $(seq 0 2) ; do
run_osd $dir $id $ceph_osd_args || return 1
done
create_rbd_pool || return 1
wait_for_clean || return 1
# Create an EC pool
create_ec_pool $poolname $allow_overwrites k=2 m=1 || return 1
# Put an object
local payload=ABCDEF
echo $payload > $dir/ORIGINAL
rados --pool $poolname put SOMETHING $dir/ORIGINAL || return 1
# Remove the object from one shard physically
# Restarted osd get $ceph_osd_args passed
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING remove || return 1
# Wait for auto repair
local pgid=$(get_pg $poolname SOMETHING)
wait_for_scrub $pgid "$(get_last_scrub_stamp $pgid)"
wait_for_clean || return 1
# Verify - the file should be back
# Restarted osd get $ceph_osd_args passed
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING list-attrs || return 1
rados --pool $poolname get SOMETHING $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
}
function TEST_auto_repair_erasure_coded_appends() {
auto_repair_erasure_coded $1 false
}
function TEST_auto_repair_erasure_coded_overwrites() {
if [ "$use_ec_overwrite" = "true" ]; then
auto_repair_erasure_coded $1 true
fi
}
# initiate a scrub, then check for the (expected) 'scrubbing' and the
# (not expected until an error was identified) 'repair'
# Arguments: osd#, pg, sleep time
function initiate_and_fetch_state() {
local the_osd="osd.$1"
local pgid=$2
local last_scrub=$(get_last_scrub_stamp $pgid)
set_config "osd" "$1" "osd_scrub_sleep" "$3"
set_config "osd" "$1" "osd_scrub_auto_repair" "true"
flush_pg_stats
date --rfc-3339=ns
# note: must initiate a "regular" (periodic) deep scrub - not an operator-initiated one
env CEPH_ARGS= ceph --format json daemon $(get_asok_path $the_osd) deep_scrub "$pgid"
env CEPH_ARGS= ceph --format json daemon $(get_asok_path $the_osd) scrub "$pgid"
# wait for 'scrubbing' to appear
for ((i=0; i < 80; i++)); do
st=`ceph pg $pgid query --format json | jq '.state' `
echo $i ") state now: " $st
case "$st" in
*scrubbing*repair* ) echo "found scrub+repair"; return 1;; # PR #41258 should have prevented this
*scrubbing* ) echo "found scrub"; return 0;;
*inconsistent* ) echo "Got here too late. Scrub has already finished"; return 1;;
*recovery* ) echo "Got here too late. Scrub has already finished."; return 1;;
* ) echo $st;;
esac
if [ $((i % 10)) == 4 ]; then
echo "loop --------> " $i
fi
sleep 0.3
done
echo "Timeout waiting for deep-scrub of " $pgid " on " $the_osd " to start"
return 1
}
function wait_end_of_scrub() { # osd# pg
local the_osd="osd.$1"
local pgid=$2
for ((i=0; i < 40; i++)); do
st=`ceph pg $pgid query --format json | jq '.state' `
echo "wait-scrub-end state now: " $st
[[ $st =~ (.*scrubbing.*) ]] || break
if [ $((i % 5)) == 4 ] ; then
flush_pg_stats
fi
sleep 0.3
done
if [[ $st =~ (.*scrubbing.*) ]]
then
# a timeout
return 1
fi
return 0
}
function TEST_auto_repair_bluestore_tag() {
local dir=$1
local poolname=testpool
# Launch a cluster with 3 seconds scrub interval
run_mon $dir a || return 1
run_mgr $dir x || return 1
# Set scheduler to "wpq" until there's a reliable way to query scrub states
# with "--osd-scrub-sleep" set to 0. The "mclock_scheduler" overrides the
# scrub sleep to 0 and as a result the checks in the test fail.
local ceph_osd_args="--osd-scrub-auto-repair=true \
--osd_deep_scrub_randomize_ratio=0 \
--osd-scrub-interval-randomize-ratio=0 \
--osd-op-queue=wpq"
for id in $(seq 0 2) ; do
run_osd $dir $id $ceph_osd_args || return 1
done
create_pool $poolname 1 1 || return 1
ceph osd pool set $poolname size 2
wait_for_clean || return 1
# Put an object
local payload=ABCDEF
echo $payload > $dir/ORIGINAL
rados --pool $poolname put SOMETHING $dir/ORIGINAL || return 1
# Remove the object from one shard physically
# Restarted osd get $ceph_osd_args passed
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING remove || return 1
local pgid=$(get_pg $poolname SOMETHING)
local primary=$(get_primary $poolname SOMETHING)
echo "Affected PG " $pgid " w/ primary " $primary
local last_scrub_stamp="$(get_last_scrub_stamp $pgid)"
initiate_and_fetch_state $primary $pgid "3.0"
r=$?
echo "initiate_and_fetch_state ret: " $r
set_config "osd" "$1" "osd_scrub_sleep" "0"
if [ $r -ne 0 ]; then
return 1
fi
wait_end_of_scrub "$primary" "$pgid" || return 1
ceph pg dump pgs
# Verify - the file should be back
# Restarted osd get $ceph_osd_args passed
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING list-attrs || return 1
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING get-bytes $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
grep scrub_finish $dir/osd.${primary}.log
}
function TEST_auto_repair_bluestore_basic() {
local dir=$1
local poolname=testpool
# Launch a cluster with 5 seconds scrub interval
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-auto-repair=true \
--osd_deep_scrub_randomize_ratio=0 \
--osd-scrub-interval-randomize-ratio=0"
for id in $(seq 0 2) ; do
run_osd $dir $id $ceph_osd_args || return 1
done
create_pool $poolname 1 1 || return 1
ceph osd pool set $poolname size 2
wait_for_clean || return 1
# Put an object
local payload=ABCDEF
echo $payload > $dir/ORIGINAL
rados --pool $poolname put SOMETHING $dir/ORIGINAL || return 1
# Remove the object from one shard physically
# Restarted osd get $ceph_osd_args passed
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING remove || return 1
local pgid=$(get_pg $poolname SOMETHING)
local primary=$(get_primary $poolname SOMETHING)
local last_scrub_stamp="$(get_last_scrub_stamp $pgid)"
ceph tell $pgid deep_scrub
ceph tell $pgid scrub
# Wait for auto repair
wait_for_scrub $pgid "$last_scrub_stamp" || return 1
wait_for_clean || return 1
ceph pg dump pgs
# Verify - the file should be back
# Restarted osd get $ceph_osd_args passed
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING list-attrs || return 1
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING get-bytes $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
grep scrub_finish $dir/osd.${primary}.log
}
function TEST_auto_repair_bluestore_scrub() {
local dir=$1
local poolname=testpool
# Launch a cluster with 5 seconds scrub interval
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-auto-repair=true \
--osd_deep_scrub_randomize_ratio=0 \
--osd-scrub-interval-randomize-ratio=0 \
--osd-scrub-backoff-ratio=0"
for id in $(seq 0 2) ; do
run_osd $dir $id $ceph_osd_args || return 1
done
create_pool $poolname 1 1 || return 1
ceph osd pool set $poolname size 2
wait_for_clean || return 1
# Put an object
local payload=ABCDEF
echo $payload > $dir/ORIGINAL
rados --pool $poolname put SOMETHING $dir/ORIGINAL || return 1
# Remove the object from one shard physically
# Restarted osd get $ceph_osd_args passed
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING remove || return 1
local pgid=$(get_pg $poolname SOMETHING)
local primary=$(get_primary $poolname SOMETHING)
local last_scrub_stamp="$(get_last_scrub_stamp $pgid)"
ceph tell $pgid scrub
# Wait for scrub -> auto repair
wait_for_scrub $pgid "$last_scrub_stamp" || return 1
ceph pg dump pgs
# Actually this causes 2 scrubs, so we better wait a little longer
sleep 5
wait_for_clean || return 1
ceph pg dump pgs
# Verify - the file should be back
# Restarted osd get $ceph_osd_args passed
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING list-attrs || return 1
rados --pool $poolname get SOMETHING $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
grep scrub_finish $dir/osd.${primary}.log
# This should have caused 1 object to be repaired
COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
test "$COUNT" = "1" || return 1
}
function TEST_auto_repair_bluestore_failed() {
local dir=$1
local poolname=testpool
# Launch a cluster with 5 seconds scrub interval
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-auto-repair=true \
--osd_deep_scrub_randomize_ratio=0 \
--osd-scrub-interval-randomize-ratio=0"
for id in $(seq 0 2) ; do
run_osd $dir $id $ceph_osd_args || return 1
done
create_pool $poolname 1 1 || return 1
ceph osd pool set $poolname size 2
wait_for_clean || return 1
# Put an object
local payload=ABCDEF
echo $payload > $dir/ORIGINAL
for i in $(seq 1 10)
do
rados --pool $poolname put obj$i $dir/ORIGINAL || return 1
done
# Remove the object from one shard physically
# Restarted osd get $ceph_osd_args passed
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) obj1 remove || return 1
# obj2 can't be repaired
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) obj2 remove || return 1
objectstore_tool $dir $(get_primary $poolname SOMETHING) obj2 rm-attr _ || return 1
local pgid=$(get_pg $poolname obj1)
local primary=$(get_primary $poolname obj1)
local last_scrub_stamp="$(get_last_scrub_stamp $pgid)"
ceph tell $pgid deep_scrub
ceph tell $pgid scrub
# Wait for auto repair
wait_for_scrub $pgid "$last_scrub_stamp" || return 1
wait_for_clean || return 1
flush_pg_stats
grep scrub_finish $dir/osd.${primary}.log
grep -q "scrub_finish.*still present after re-scrub" $dir/osd.${primary}.log || return 1
ceph pg dump pgs
ceph pg dump pgs | grep -q "^${pgid}.*+failed_repair" || return 1
# Verify - obj1 should be back
# Restarted osd get $ceph_osd_args passed
objectstore_tool $dir $(get_not_primary $poolname obj1) obj1 list-attrs || return 1
rados --pool $poolname get obj1 $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
grep scrub_finish $dir/osd.${primary}.log
# Make it repairable
objectstore_tool $dir $(get_primary $poolname SOMETHING) obj2 remove || return 1
repair $pgid
sleep 2
flush_pg_stats
ceph pg dump pgs
ceph pg dump pgs | grep -q -e "^${pgid}.* active+clean " -e "^${pgid}.* active+clean+wait " || return 1
grep scrub_finish $dir/osd.${primary}.log
}
function TEST_auto_repair_bluestore_failed_norecov() {
local dir=$1
local poolname=testpool
# Launch a cluster with 5 seconds scrub interval
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-auto-repair=true \
--osd_deep_scrub_randomize_ratio=0 \
--osd-scrub-interval-randomize-ratio=0"
for id in $(seq 0 2) ; do
run_osd $dir $id $ceph_osd_args || return 1
done
create_pool $poolname 1 1 || return 1
ceph osd pool set $poolname size 2
wait_for_clean || return 1
# Put an object
local payload=ABCDEF
echo $payload > $dir/ORIGINAL
for i in $(seq 1 10)
do
rados --pool $poolname put obj$i $dir/ORIGINAL || return 1
done
# Remove the object from one shard physically
# Restarted osd get $ceph_osd_args passed
# obj1 can't be repaired
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) obj1 remove || return 1
objectstore_tool $dir $(get_primary $poolname SOMETHING) obj1 rm-attr _ || return 1
# obj2 can't be repaired
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) obj2 remove || return 1
objectstore_tool $dir $(get_primary $poolname SOMETHING) obj2 rm-attr _ || return 1
local pgid=$(get_pg $poolname obj1)
local primary=$(get_primary $poolname obj1)
local last_scrub_stamp="$(get_last_scrub_stamp $pgid)"
ceph tell $pgid deep_scrub
ceph tell $pgid scrub
# Wait for auto repair
wait_for_scrub $pgid "$last_scrub_stamp" || return 1
wait_for_clean || return 1
flush_pg_stats
grep -q "scrub_finish.*present with no repair possible" $dir/osd.${primary}.log || return 1
ceph pg dump pgs
ceph pg dump pgs | grep -q "^${pgid}.*+failed_repair" || return 1
}
function TEST_repair_stats() {
local dir=$1
local poolname=testpool
local OSDS=2
local OBJS=30
# This need to be an even number
local REPAIRS=20
# Launch a cluster with 5 seconds scrub interval
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd_deep_scrub_randomize_ratio=0 \
--osd-scrub-interval-randomize-ratio=0"
for id in $(seq 0 $(expr $OSDS - 1)) ; do
run_osd $dir $id $ceph_osd_args || return 1
done
create_pool $poolname 1 1 || return 1
ceph osd pool set $poolname size 2
wait_for_clean || return 1
# Put an object
local payload=ABCDEF
echo $payload > $dir/ORIGINAL
for i in $(seq 1 $OBJS)
do
rados --pool $poolname put obj$i $dir/ORIGINAL || return 1
done
# Remove the object from one shard physically
# Restarted osd get $ceph_osd_args passed
local other=$(get_not_primary $poolname obj1)
local pgid=$(get_pg $poolname obj1)
local primary=$(get_primary $poolname obj1)
kill_daemons $dir TERM osd.$other >&2 < /dev/null || return 1
kill_daemons $dir TERM osd.$primary >&2 < /dev/null || return 1
for i in $(seq 1 $REPAIRS)
do
# Remove from both osd.0 and osd.1
OSD=$(expr $i % 2)
_objectstore_tool_nodown $dir $OSD obj$i remove || return 1
done
activate_osd $dir $primary $ceph_osd_args || return 1
activate_osd $dir $other $ceph_osd_args || return 1
wait_for_clean || return 1
repair $pgid
wait_for_clean || return 1
ceph pg dump pgs
flush_pg_stats
# This should have caused 1 object to be repaired
ceph pg $pgid query | jq '.info.stats.stat_sum'
COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
test "$COUNT" = "$REPAIRS" || return 1
ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats[] | select(.osd == $primary )"
COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats[] | select(.osd == $primary ).num_shards_repaired")
test "$COUNT" = "$(expr $REPAIRS / 2)" || return 1
ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats[] | select(.osd == $other )"
COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats[] | select(.osd == $other ).num_shards_repaired")
test "$COUNT" = "$(expr $REPAIRS / 2)" || return 1
ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum"
COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
test "$COUNT" = "$REPAIRS" || return 1
}
function TEST_repair_stats_ec() {
local dir=$1
local poolname=testpool
local OSDS=3
local OBJS=30
# This need to be an even number
local REPAIRS=26
local allow_overwrites=false
# Launch a cluster with 5 seconds scrub interval
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd_deep_scrub_randomize_ratio=0 \
--osd-scrub-interval-randomize-ratio=0"
for id in $(seq 0 $(expr $OSDS - 1)) ; do
run_osd $dir $id $ceph_osd_args || return 1
done
# Create an EC pool
create_ec_pool $poolname $allow_overwrites k=2 m=1 || return 1
# Put an object
local payload=ABCDEF
echo $payload > $dir/ORIGINAL
for i in $(seq 1 $OBJS)
do
rados --pool $poolname put obj$i $dir/ORIGINAL || return 1
done
# Remove the object from one shard physically
# Restarted osd get $ceph_osd_args passed
local other=$(get_not_primary $poolname obj1)
local pgid=$(get_pg $poolname obj1)
local primary=$(get_primary $poolname obj1)
kill_daemons $dir TERM osd.$other >&2 < /dev/null || return 1
kill_daemons $dir TERM osd.$primary >&2 < /dev/null || return 1
for i in $(seq 1 $REPAIRS)
do
# Remove from both osd.0 and osd.1
OSD=$(expr $i % 2)
_objectstore_tool_nodown $dir $OSD obj$i remove || return 1
done
activate_osd $dir $primary $ceph_osd_args || return 1
activate_osd $dir $other $ceph_osd_args || return 1
wait_for_clean || return 1
repair $pgid
wait_for_clean || return 1
ceph pg dump pgs
flush_pg_stats
# This should have caused 1 object to be repaired
ceph pg $pgid query | jq '.info.stats.stat_sum'
COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
test "$COUNT" = "$REPAIRS" || return 1
for osd in $(seq 0 $(expr $OSDS - 1)) ; do
if [ $osd = $other -o $osd = $primary ]; then
repair=$(expr $REPAIRS / 2)
else
repair="0"
fi
ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats[] | select(.osd == $osd )"
COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats[] | select(.osd == $osd ).num_shards_repaired")
test "$COUNT" = "$repair" || return 1
done
ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum"
COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
test "$COUNT" = "$REPAIRS" || return 1
}
function corrupt_and_repair_jerasure() {
local dir=$1
local allow_overwrites=$2
local poolname=ecpool
run_mon $dir a || return 1
run_mgr $dir x || return 1
for id in $(seq 0 3) ; do
run_osd $dir $id || return 1
done
create_rbd_pool || return 1
wait_for_clean || return 1
create_ec_pool $poolname $allow_overwrites k=2 m=2 || return 1
corrupt_and_repair_erasure_coded $dir $poolname || return 1
}
function TEST_corrupt_and_repair_jerasure_appends() {
corrupt_and_repair_jerasure $1 false
}
function TEST_corrupt_and_repair_jerasure_overwrites() {
if [ "$use_ec_overwrite" = "true" ]; then
corrupt_and_repair_jerasure $1 true
fi
}
function corrupt_and_repair_lrc() {
local dir=$1
local allow_overwrites=$2
local poolname=ecpool
run_mon $dir a || return 1
run_mgr $dir x || return 1
for id in $(seq 0 9) ; do
run_osd $dir $id || return 1
done
create_rbd_pool || return 1
wait_for_clean || return 1
create_ec_pool $poolname $allow_overwrites k=4 m=2 l=3 plugin=lrc || return 1
corrupt_and_repair_erasure_coded $dir $poolname || return 1
}
function TEST_corrupt_and_repair_lrc_appends() {
corrupt_and_repair_lrc $1 false
}
function TEST_corrupt_and_repair_lrc_overwrites() {
if [ "$use_ec_overwrite" = "true" ]; then
corrupt_and_repair_lrc $1 true
fi
}
function unfound_erasure_coded() {
local dir=$1
local allow_overwrites=$2
local poolname=ecpool
local payload=ABCDEF
run_mon $dir a || return 1
run_mgr $dir x || return 1
for id in $(seq 0 3) ; do
run_osd $dir $id || return 1
done
create_ec_pool $poolname $allow_overwrites k=2 m=2 || return 1
add_something $dir $poolname || return 1
local primary=$(get_primary $poolname SOMETHING)
local -a osds=($(get_osds $poolname SOMETHING | sed -e "s/$primary//"))
local not_primary_first=${osds[0]}
local not_primary_second=${osds[1]}
local not_primary_third=${osds[2]}
#
# 1) remove the corresponding file from the OSDs
#
pids=""
run_in_background pids objectstore_tool $dir $not_primary_first SOMETHING remove
run_in_background pids objectstore_tool $dir $not_primary_second SOMETHING remove
run_in_background pids objectstore_tool $dir $not_primary_third SOMETHING remove
wait_background pids
return_code=$?
if [ $return_code -ne 0 ]; then return $return_code; fi
#
# 2) repair the PG
#
local pg=$(get_pg $poolname SOMETHING)
repair $pg
#
# 3) check pg state
#
# it may take a bit to appear due to mon/mgr asynchrony
for f in `seq 1 60`; do
ceph -s | grep "1/1 objects unfound" && break
sleep 1
done
ceph -s|grep "4 up" || return 1
ceph -s|grep "4 in" || return 1
ceph -s|grep "1/1 objects unfound" || return 1
}
function TEST_unfound_erasure_coded_appends() {
unfound_erasure_coded $1 false
}
function TEST_unfound_erasure_coded_overwrites() {
if [ "$use_ec_overwrite" = "true" ]; then
unfound_erasure_coded $1 true
fi
}
#
# list_missing for EC pool
#
function list_missing_erasure_coded() {
local dir=$1
local allow_overwrites=$2
local poolname=ecpool
run_mon $dir a || return 1
run_mgr $dir x || return 1
for id in $(seq 0 2) ; do
run_osd $dir $id || return 1
done
create_rbd_pool || return 1
wait_for_clean || return 1
create_ec_pool $poolname $allow_overwrites k=2 m=1 || return 1
# Put an object and remove the two shards (including primary)
add_something $dir $poolname MOBJ0 || return 1
local -a osds0=($(get_osds $poolname MOBJ0))
# Put another object and remove two shards (excluding primary)
add_something $dir $poolname MOBJ1 || return 1
local -a osds1=($(get_osds $poolname MOBJ1))
# Stop all osd daemons
for id in $(seq 0 2) ; do
kill_daemons $dir TERM osd.$id >&2 < /dev/null || return 1
done
id=${osds0[0]}
ceph-objectstore-tool --data-path $dir/$id \
MOBJ0 remove || return 1
id=${osds0[1]}
ceph-objectstore-tool --data-path $dir/$id \
MOBJ0 remove || return 1
id=${osds1[1]}
ceph-objectstore-tool --data-path $dir/$id \
MOBJ1 remove || return 1
id=${osds1[2]}
ceph-objectstore-tool --data-path $dir/$id \
MOBJ1 remove || return 1
for id in $(seq 0 2) ; do
activate_osd $dir $id >&2 || return 1
done
create_rbd_pool || return 1
wait_for_clean || return 1
# Get get - both objects should in the same PG
local pg=$(get_pg $poolname MOBJ0)
# Repair the PG, which triggers the recovering,
# and should mark the object as unfound
repair $pg
for i in $(seq 0 120) ; do
[ $i -lt 60 ] || return 1
matches=$(ceph pg $pg list_unfound | egrep "MOBJ0|MOBJ1" | wc -l)
[ $matches -eq 2 ] && break
done
}
function TEST_list_missing_erasure_coded_appends() {
list_missing_erasure_coded $1 false
}
function TEST_list_missing_erasure_coded_overwrites() {
if [ "$use_ec_overwrite" = "true" ]; then
list_missing_erasure_coded $1 true
fi
}
#
# Corrupt one copy of a replicated pool
#
function TEST_corrupt_scrub_replicated() {
local dir=$1
local poolname=csr_pool
local total_objs=19
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
create_pool foo 1 || return 1
create_pool $poolname 1 1 || return 1
wait_for_clean || return 1
for i in $(seq 1 $total_objs) ; do
objname=ROBJ${i}
add_something $dir $poolname $objname || return 1
rados --pool $poolname setomapheader $objname hdr-$objname || return 1
rados --pool $poolname setomapval $objname key-$objname val-$objname || return 1
done
# Increase file 1 MB + 1KB
dd if=/dev/zero of=$dir/new.ROBJ19 bs=1024 count=1025
rados --pool $poolname put $objname $dir/new.ROBJ19 || return 1
rm -f $dir/new.ROBJ19
local pg=$(get_pg $poolname ROBJ0)
local primary=$(get_primary $poolname ROBJ0)
# Compute an old omap digest and save oi
CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) \
config set osd_deep_scrub_update_digest_min_age 0
CEPH_ARGS='' ceph daemon $(get_asok_path osd.1) \
config set osd_deep_scrub_update_digest_min_age 0
pg_deep_scrub $pg
for i in $(seq 1 $total_objs) ; do
objname=ROBJ${i}
# Alternate corruption between osd.0 and osd.1
local osd=$(expr $i % 2)
case $i in
1)
# Size (deep scrub data_digest too)
local payload=UVWXYZZZ
echo $payload > $dir/CORRUPT
objectstore_tool $dir $osd $objname set-bytes $dir/CORRUPT || return 1
;;
2)
# digest (deep scrub only)
local payload=UVWXYZ
echo $payload > $dir/CORRUPT
objectstore_tool $dir $osd $objname set-bytes $dir/CORRUPT || return 1
;;
3)
# missing
objectstore_tool $dir $osd $objname remove || return 1
;;
4)
# Modify omap value (deep scrub only)
objectstore_tool $dir $osd $objname set-omap key-$objname $dir/CORRUPT || return 1
;;
5)
# Delete omap key (deep scrub only)
objectstore_tool $dir $osd $objname rm-omap key-$objname || return 1
;;
6)
# Add extra omap key (deep scrub only)
echo extra > $dir/extra-val
objectstore_tool $dir $osd $objname set-omap key2-$objname $dir/extra-val || return 1
rm $dir/extra-val
;;
7)
# Modify omap header (deep scrub only)
echo -n newheader > $dir/hdr
objectstore_tool $dir $osd $objname set-omaphdr $dir/hdr || return 1
rm $dir/hdr
;;
8)
rados --pool $poolname setxattr $objname key1-$objname val1-$objname || return 1
rados --pool $poolname setxattr $objname key2-$objname val2-$objname || return 1
# Break xattrs
echo -n bad-val > $dir/bad-val
objectstore_tool $dir $osd $objname set-attr _key1-$objname $dir/bad-val || return 1
objectstore_tool $dir $osd $objname rm-attr _key2-$objname || return 1
echo -n val3-$objname > $dir/newval
objectstore_tool $dir $osd $objname set-attr _key3-$objname $dir/newval || return 1
rm $dir/bad-val $dir/newval
;;
9)
objectstore_tool $dir $osd $objname get-attr _ > $dir/robj9-oi
echo -n D > $dir/change
rados --pool $poolname put $objname $dir/change
objectstore_tool $dir $osd $objname set-attr _ $dir/robj9-oi
rm $dir/oi $dir/change
;;
# ROBJ10 must be handled after digests are re-computed by a deep scrub below
# ROBJ11 must be handled with config change before deep scrub
# ROBJ12 must be handled with config change before scrubs
# ROBJ13 must be handled before scrubs
14)
echo -n bad-val > $dir/bad-val
objectstore_tool $dir 0 $objname set-attr _ $dir/bad-val || return 1
objectstore_tool $dir 1 $objname rm-attr _ || return 1
rm $dir/bad-val
;;
15)
objectstore_tool $dir $osd $objname rm-attr _ || return 1
;;
16)
objectstore_tool $dir 0 $objname rm-attr snapset || return 1
echo -n bad-val > $dir/bad-val
objectstore_tool $dir 1 $objname set-attr snapset $dir/bad-val || return 1
;;
17)
# Deep-scrub only (all replicas are diffent than the object info
local payload=ROBJ17
echo $payload > $dir/new.ROBJ17
objectstore_tool $dir 0 $objname set-bytes $dir/new.ROBJ17 || return 1
objectstore_tool $dir 1 $objname set-bytes $dir/new.ROBJ17 || return 1
;;
18)
# Deep-scrub only (all replicas are diffent than the object info
local payload=ROBJ18
echo $payload > $dir/new.ROBJ18
objectstore_tool $dir 0 $objname set-bytes $dir/new.ROBJ18 || return 1
objectstore_tool $dir 1 $objname set-bytes $dir/new.ROBJ18 || return 1
# Make one replica have a different object info, so a full repair must happen too
objectstore_tool $dir $osd $objname corrupt-info || return 1
;;
19)
# Set osd-max-object-size smaller than this object's size
esac
done
local pg=$(get_pg $poolname ROBJ0)
ceph tell osd.\* injectargs -- --osd-max-object-size=1048576
inject_eio rep data $poolname ROBJ11 $dir 0 || return 1 # shard 0 of [1, 0], osd.1
inject_eio rep mdata $poolname ROBJ12 $dir 1 || return 1 # shard 1 of [1, 0], osd.0
inject_eio rep mdata $poolname ROBJ13 $dir 1 || return 1 # shard 1 of [1, 0], osd.0
inject_eio rep data $poolname ROBJ13 $dir 0 || return 1 # shard 0 of [1, 0], osd.1
pg_scrub $pg
ERRORS=0
declare -a s_err_strings
err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:30259878:::ROBJ15:head : candidate had a missing info key"
err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:33aca486:::ROBJ18:head : object info inconsistent "
err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:5c7b2c47:::ROBJ16:head : candidate had a corrupt snapset"
err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:5c7b2c47:::ROBJ16:head : candidate had a missing snapset key"
err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:5c7b2c47:::ROBJ16:head : failed to pick suitable object info"
err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:86586531:::ROBJ8:head : attr value mismatch '_key1-ROBJ8', attr name mismatch '_key3-ROBJ8', attr name mismatch '_key2-ROBJ8'"
err_strings[6]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:bc819597:::ROBJ12:head : candidate had a stat error"
err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:c0c86b1d:::ROBJ14:head : candidate had a missing info key"
err_strings[8]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:c0c86b1d:::ROBJ14:head : candidate had a corrupt info"
err_strings[9]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:c0c86b1d:::ROBJ14:head : failed to pick suitable object info"
err_strings[10]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ce3f1d6a:::ROBJ1:head : candidate size 9 info size 7 mismatch"
err_strings[11]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ce3f1d6a:::ROBJ1:head : size 9 != size 7 from auth oi 3:ce3f1d6a:::ROBJ1:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 3 dd 2ddbf8f5 od f5fba2c6 alloc_hint [[]0 0 0[]][)], size 9 != size 7 from shard 0"
err_strings[12]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:d60617f9:::ROBJ13:head : candidate had a stat error"
err_strings[13]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 3:f2a5b2a4:::ROBJ3:head : missing"
err_strings[14]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ffdb2004:::ROBJ9:head : candidate size 1 info size 7 mismatch"
err_strings[15]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ffdb2004:::ROBJ9:head : object info inconsistent "
err_strings[16]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 3:c0c86b1d:::ROBJ14:head : no '_' attr"
err_strings[17]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 3:5c7b2c47:::ROBJ16:head : can't decode 'snapset' attr .* no longer understand old encoding version 3 < 97: Malformed input"
err_strings[18]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub : stat mismatch, got 19/19 objects, 0/0 clones, 18/19 dirty, 18/19 omap, 0/0 pinned, 0/0 hit_set_archive, 0/0 whiteouts, 1049713/1049720 bytes, 0/0 manifest objects, 0/0 hit_set_archive bytes."
err_strings[19]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 1 missing, 8 inconsistent objects"
err_strings[20]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 18 errors"
err_strings[21]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:123a5f55:::ROBJ19:head : size 1049600 > 1048576 is too large"
for err_string in "${err_strings[@]}"
do
if ! grep -q "$err_string" $dir/osd.${primary}.log
then
echo "Missing log message '$err_string'"
ERRORS=$(expr $ERRORS + 1)
fi
done
rados list-inconsistent-pg $poolname > $dir/json || return 1
# Check pg count
test $(jq '. | length' $dir/json) = "1" || return 1
# Check pgid
test $(jq -r '.[0]' $dir/json) = $pg || return 1
rados list-inconsistent-obj $pg > $dir/json || return 1
# Get epoch for repair-get requests
epoch=$(jq .epoch $dir/json)
jq "$jqfilter" << EOF | jq '.inconsistents' | python3 -c "$sortkeys" > $dir/checkcsjson
{
"inconsistents": [
{
"shards": [
{
"size": 7,
"errors": [],
"osd": 0,
"primary": false
},
{
"object_info": {
"oid": {
"oid": "ROBJ1",
"key": "",
"snapid": -2,
"hash": 1454963827,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'58",
"prior_version": "21'3",
"last_reqid": "osd.1.0:57",
"user_version": 3,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xf5fba2c6",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"size": 9,
"errors": [
"size_mismatch_info",
"obj_size_info_mismatch"
],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ1",
"key": "",
"snapid": -2,
"hash": 1454963827,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'58",
"prior_version": "21'3",
"last_reqid": "osd.1.0:57",
"user_version": 3,
"size": 7,
"mtime": "2018-04-05 14:33:19.804040",
"local_mtime": "2018-04-05 14:33:19.804839",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xf5fba2c6",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"size_mismatch_info",
"obj_size_info_mismatch"
],
"errors": [
"size_mismatch"
],
"object": {
"version": 3,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ1"
}
},
{
"shards": [
{
"errors": [
"stat_error"
],
"osd": 0,
"primary": false
},
{
"size": 7,
"errors": [],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ12",
"key": "",
"snapid": -2,
"hash": 3920199997,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'56",
"prior_version": "43'36",
"last_reqid": "osd.1.0:55",
"user_version": 36,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x067f306a",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"stat_error"
],
"errors": [],
"object": {
"version": 36,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ12"
}
},
{
"shards": [
{
"errors": [
"stat_error"
],
"osd": 0,
"primary": false
},
{
"size": 7,
"errors": [],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ13",
"key": "",
"snapid": -2,
"hash": 2682806379,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'59",
"prior_version": "45'39",
"last_reqid": "osd.1.0:58",
"user_version": 39,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x6441854d",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"stat_error"
],
"errors": [],
"object": {
"version": 39,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ13"
}
},
{
"shards": [
{
"object_info": "bad-val",
"size": 7,
"errors": [
"info_corrupted"
],
"osd": 0,
"primary": false
},
{
"size": 7,
"errors": [
"info_missing"
],
"osd": 1,
"primary": true
}
],
"union_shard_errors": [
"info_missing",
"info_corrupted"
],
"errors": [],
"object": {
"version": 0,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ14"
}
},
{
"shards": [
{
"object_info": {
"oid": {
"oid": "ROBJ15",
"key": "",
"snapid": -2,
"hash": 504996876,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'49",
"prior_version": "49'45",
"last_reqid": "osd.1.0:48",
"user_version": 45,
"size": 7,
"mtime": "2018-04-05 14:33:29.498969",
"local_mtime": "2018-04-05 14:33:29.499890",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x2d2a4d6e",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"size": 7,
"errors": [],
"osd": 0,
"primary": false
},
{
"size": 7,
"errors": [
"info_missing"
],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ15",
"key": "",
"snapid": -2,
"hash": 504996876,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'49",
"prior_version": "49'45",
"last_reqid": "osd.1.0:48",
"user_version": 45,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x2d2a4d6e",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"info_missing"
],
"errors": [],
"object": {
"version": 45,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ15"
}
},
{
"errors": [],
"object": {
"locator": "",
"name": "ROBJ16",
"nspace": "",
"snap": "head",
"version": 0
},
"shards": [
{
"errors": [
"snapset_missing"
],
"osd": 0,
"primary": false,
"size": 7
},
{
"errors": [
"snapset_corrupted"
],
"osd": 1,
"primary": true,
"snapset": "bad-val",
"size": 7
}
],
"union_shard_errors": [
"snapset_missing",
"snapset_corrupted"
]
},
{
"errors": [
"object_info_inconsistency"
],
"object": {
"locator": "",
"name": "ROBJ18",
"nspace": "",
"snap": "head"
},
"selected_object_info": {
"alloc_hint_flags": 255,
"data_digest": "0x2ddbf8f5",
"expected_object_size": 0,
"expected_write_size": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"lost": 0,
"manifest": {
"type": 0
},
"oid": {
"hash": 1629828556,
"key": "",
"max": 0,
"namespace": "",
"oid": "ROBJ18",
"pool": 3,
"snapid": -2
},
"omap_digest": "0xddc3680f",
"size": 7,
"truncate_seq": 0,
"truncate_size": 0,
"user_version": 54,
"watchers": {}
},
"shards": [
{
"errors": [],
"object_info": {
"alloc_hint_flags": 0,
"data_digest": "0x2ddbf8f5",
"expected_object_size": 0,
"expected_write_size": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"lost": 0,
"manifest": {
"type": 0
},
"oid": {
"hash": 1629828556,
"key": "",
"max": 0,
"namespace": "",
"oid": "ROBJ18",
"pool": 3,
"snapid": -2
},
"omap_digest": "0xddc3680f",
"size": 7,
"truncate_seq": 0,
"truncate_size": 0,
"user_version": 54,
"watchers": {}
},
"osd": 0,
"primary": false,
"size": 7
},
{
"errors": [],
"object_info": {
"alloc_hint_flags": 255,
"data_digest": "0x2ddbf8f5",
"expected_object_size": 0,
"expected_write_size": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"lost": 0,
"manifest": {
"type": 0
},
"oid": {
"hash": 1629828556,
"key": "",
"max": 0,
"namespace": "",
"oid": "ROBJ18",
"pool": 3,
"snapid": -2
},
"omap_digest": "0xddc3680f",
"size": 7,
"truncate_seq": 0,
"truncate_size": 0,
"user_version": 54,
"watchers": {}
},
"osd": 1,
"primary": true,
"size": 7
}
],
"union_shard_errors": []
},
{
"object": {
"name": "ROBJ19",
"nspace": "",
"locator": "",
"snap": "head",
"version": 58
},
"errors": [
"size_too_large"
],
"union_shard_errors": [],
"selected_object_info": {
"oid": {
"oid": "ROBJ19",
"key": "",
"snapid": -2,
"hash": 2868534344,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "63'59",
"prior_version": "63'58",
"last_reqid": "osd.1.0:58",
"user_version": 58,
"size": 1049600,
"mtime": "2019-08-09T23:33:58.340709+0000",
"local_mtime": "2019-08-09T23:33:58.345676+0000",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x3dde0ef3",
"omap_digest": "0xbffddd28",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"shards": [
{
"osd": 0,
"primary": false,
"errors": [],
"size": 1049600
},
{
"osd": 1,
"primary": true,
"errors": [],
"size": 1049600
}
]
},
{
"shards": [
{
"size": 7,
"errors": [],
"osd": 0,
"primary": false
},
{
"errors": [
"missing"
],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ3",
"key": "",
"snapid": -2,
"hash": 625845583,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'61",
"prior_version": "25'9",
"last_reqid": "osd.1.0:60",
"user_version": 9,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x00b35dfd",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"missing"
],
"errors": [],
"object": {
"version": 9,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ3"
}
},
{
"shards": [
{
"attrs": [
{
"Base64": false,
"value": "bad-val",
"name": "key1-ROBJ8"
},
{
"Base64": false,
"value": "val2-ROBJ8",
"name": "key2-ROBJ8"
}
],
"size": 7,
"errors": [],
"osd": 0,
"primary": false
},
{
"attrs": [
{
"Base64": false,
"value": "val1-ROBJ8",
"name": "key1-ROBJ8"
},
{
"Base64": false,
"value": "val3-ROBJ8",
"name": "key3-ROBJ8"
}
],
"size": 7,
"errors": [],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ8",
"key": "",
"snapid": -2,
"hash": 2359695969,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "79'66",
"prior_version": "79'65",
"last_reqid": "client.4554.0:1",
"user_version": 79,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xd6be81dc",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [],
"errors": [
"attr_value_mismatch",
"attr_name_mismatch"
],
"object": {
"version": 66,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ8"
}
},
{
"shards": [
{
"object_info": {
"oid": {
"oid": "ROBJ9",
"key": "",
"snapid": -2,
"hash": 537189375,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "95'67",
"prior_version": "51'64",
"last_reqid": "client.4649.0:1",
"user_version": 80,
"size": 1,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2b63260d",
"omap_digest": "0x2eecc539",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"size": 1,
"errors": [],
"osd": 0,
"primary": false
},
{
"object_info": {
"oid": {
"oid": "ROBJ9",
"key": "",
"snapid": -2,
"hash": 537189375,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'64",
"prior_version": "37'27",
"last_reqid": "osd.1.0:63",
"user_version": 27,
"size": 7,
"mtime": "2018-04-05 14:33:25.352485",
"local_mtime": "2018-04-05 14:33:25.353746",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x2eecc539",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"size": 1,
"errors": [
"obj_size_info_mismatch"
],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ9",
"key": "",
"snapid": -2,
"hash": 537189375,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "95'67",
"prior_version": "51'64",
"last_reqid": "client.4649.0:1",
"user_version": 80,
"size": 1,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2b63260d",
"omap_digest": "0x2eecc539",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"obj_size_info_mismatch"
],
"errors": [
"object_info_inconsistency"
],
"object": {
"version": 67,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ9"
}
}
],
"epoch": 0
}
EOF
jq "$jqfilter" $dir/json | jq '.inconsistents' | python3 -c "$sortkeys" > $dir/csjson
multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1
if test $getjson = "yes"
then
jq '.' $dir/json > save1.json
fi
if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null;
then
jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-obj.json || return 1
fi
objname=ROBJ9
# Change data and size again because digest was recomputed
echo -n ZZZ > $dir/change
rados --pool $poolname put $objname $dir/change
# Set one to an even older value
objectstore_tool $dir 0 $objname set-attr _ $dir/robj9-oi
rm $dir/oi $dir/change
objname=ROBJ10
objectstore_tool $dir 1 $objname get-attr _ > $dir/oi
rados --pool $poolname setomapval $objname key2-$objname val2-$objname
objectstore_tool $dir 0 $objname set-attr _ $dir/oi
objectstore_tool $dir 1 $objname set-attr _ $dir/oi
rm $dir/oi
inject_eio rep data $poolname ROBJ11 $dir 0 || return 1 # shard 0 of [1, 0], osd.1
inject_eio rep mdata $poolname ROBJ12 $dir 1 || return 1 # shard 1 of [1, 0], osd.0
inject_eio rep mdata $poolname ROBJ13 $dir 1 || return 1 # shard 1 of [1, 0], osd.0
inject_eio rep data $poolname ROBJ13 $dir 0 || return 1 # shard 0 of [1, 0], osd.1
# ROBJ19 won't error this time
ceph tell osd.\* injectargs -- --osd-max-object-size=134217728
pg_deep_scrub $pg
err_strings=()
err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:30259878:::ROBJ15:head : candidate had a missing info key"
err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:33aca486:::ROBJ18:head : data_digest 0xbd89c912 != data_digest 0x2ddbf8f5 from auth oi 3:33aca486:::ROBJ18:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 54 dd 2ddbf8f5 od ddc3680f alloc_hint [[]0 0 255[]][)], object info inconsistent "
err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:33aca486:::ROBJ18:head : data_digest 0xbd89c912 != data_digest 0x2ddbf8f5 from auth oi 3:33aca486:::ROBJ18:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 54 dd 2ddbf8f5 od ddc3680f alloc_hint [[]0 0 255[]][)]"
err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:33aca486:::ROBJ18:head : failed to pick suitable auth object"
err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:5c7b2c47:::ROBJ16:head : candidate had a corrupt snapset"
err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:5c7b2c47:::ROBJ16:head : candidate had a missing snapset key"
err_strings[6]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:5c7b2c47:::ROBJ16:head : failed to pick suitable object info"
err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:86586531:::ROBJ8:head : attr value mismatch '_key1-ROBJ8', attr name mismatch '_key3-ROBJ8', attr name mismatch '_key2-ROBJ8'"
err_strings[8]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:87abbf36:::ROBJ11:head : candidate had a read error"
err_strings[9]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:8aa5320e:::ROBJ17:head : data_digest 0x5af0c3ef != data_digest 0x2ddbf8f5 from auth oi 3:8aa5320e:::ROBJ17:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 51 dd 2ddbf8f5 od e9572720 alloc_hint [[]0 0 0[]][)]"
err_strings[10]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:8aa5320e:::ROBJ17:head : data_digest 0x5af0c3ef != data_digest 0x2ddbf8f5 from auth oi 3:8aa5320e:::ROBJ17:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 51 dd 2ddbf8f5 od e9572720 alloc_hint [[]0 0 0[]][)]"
err_strings[11]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:8aa5320e:::ROBJ17:head : failed to pick suitable auth object"
err_strings[12]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:8b55fa4b:::ROBJ7:head : omap_digest 0xefced57a != omap_digest 0x6a73cc07 from shard 1"
err_strings[13]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:8b55fa4b:::ROBJ7:head : omap_digest 0x6a73cc07 != omap_digest 0xefced57a from auth oi 3:8b55fa4b:::ROBJ7:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 21 dd 2ddbf8f5 od efced57a alloc_hint [[]0 0 0[]][)]"
err_strings[14]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:a53c12e8:::ROBJ6:head : omap_digest 0x689ee887 != omap_digest 0x179c919f from shard 1, omap_digest 0x689ee887 != omap_digest 0x179c919f from auth oi 3:a53c12e8:::ROBJ6:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 18 dd 2ddbf8f5 od 179c919f alloc_hint [[]0 0 0[]][)]"
err_strings[15]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:b1f19cbd:::ROBJ10:head : omap_digest 0xa8dd5adc != omap_digest 0xc2025a24 from auth oi 3:b1f19cbd:::ROBJ10:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 30 dd 2ddbf8f5 od c2025a24 alloc_hint [[]0 0 0[]][)]"
err_strings[16]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:b1f19cbd:::ROBJ10:head : omap_digest 0xa8dd5adc != omap_digest 0xc2025a24 from auth oi 3:b1f19cbd:::ROBJ10:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 30 dd 2ddbf8f5 od c2025a24 alloc_hint [[]0 0 0[]][)]"
err_strings[17]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:b1f19cbd:::ROBJ10:head : failed to pick suitable auth object"
err_strings[18]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:bc819597:::ROBJ12:head : candidate had a stat error"
err_strings[19]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:c0c86b1d:::ROBJ14:head : candidate had a missing info key"
err_strings[20]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:c0c86b1d:::ROBJ14:head : candidate had a corrupt info"
err_strings[21]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:c0c86b1d:::ROBJ14:head : failed to pick suitable object info"
err_strings[22]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ce3f1d6a:::ROBJ1:head : candidate size 9 info size 7 mismatch"
err_strings[23]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ce3f1d6a:::ROBJ1:head : data_digest 0x2d4a11c2 != data_digest 0x2ddbf8f5 from shard 0, data_digest 0x2d4a11c2 != data_digest 0x2ddbf8f5 from auth oi 3:ce3f1d6a:::ROBJ1:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 3 dd 2ddbf8f5 od f5fba2c6 alloc_hint [[]0 0 0[]][)], size 9 != size 7 from auth oi 3:ce3f1d6a:::ROBJ1:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 3 dd 2ddbf8f5 od f5fba2c6 alloc_hint [[]0 0 0[]][)], size 9 != size 7 from shard 0"
err_strings[24]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:d60617f9:::ROBJ13:head : candidate had a read error"
err_strings[25]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:d60617f9:::ROBJ13:head : candidate had a stat error"
err_strings[26]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:d60617f9:::ROBJ13:head : failed to pick suitable object info"
err_strings[27]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:e97ce31e:::ROBJ2:head : data_digest 0x578a4830 != data_digest 0x2ddbf8f5 from shard 1, data_digest 0x578a4830 != data_digest 0x2ddbf8f5 from auth oi 3:e97ce31e:::ROBJ2:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 6 dd 2ddbf8f5 od f8e11918 alloc_hint [[]0 0 0[]][)]"
err_strings[28]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 3:f2a5b2a4:::ROBJ3:head : missing"
err_strings[29]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:f4981d31:::ROBJ4:head : omap_digest 0xd7178dfe != omap_digest 0xe2d46ea4 from shard 1, omap_digest 0xd7178dfe != omap_digest 0xe2d46ea4 from auth oi 3:f4981d31:::ROBJ4:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 12 dd 2ddbf8f5 od e2d46ea4 alloc_hint [[]0 0 0[]][)]"
err_strings[30]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:f4bfd4d1:::ROBJ5:head : omap_digest 0x1a862a41 != omap_digest 0x6cac8f6 from shard 1"
err_strings[31]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:f4bfd4d1:::ROBJ5:head : omap_digest 0x6cac8f6 != omap_digest 0x1a862a41 from auth oi 3:f4bfd4d1:::ROBJ5:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 15 dd 2ddbf8f5 od 1a862a41 alloc_hint [[]0 0 0[]][)]"
err_strings[32]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:ffdb2004:::ROBJ9:head : candidate size 3 info size 7 mismatch"
err_strings[33]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:ffdb2004:::ROBJ9:head : object info inconsistent "
err_strings[34]="log_channel[(]cluster[)] log [[]ERR[]] : deep-scrub [0-9]*[.]0 3:c0c86b1d:::ROBJ14:head : no '_' attr"
err_strings[35]="log_channel[(]cluster[)] log [[]ERR[]] : deep-scrub [0-9]*[.]0 3:5c7b2c47:::ROBJ16:head : can't decode 'snapset' attr .* no longer understand old encoding version 3 < 97: Malformed input"
err_strings[36]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 deep-scrub : stat mismatch, got 19/19 objects, 0/0 clones, 18/19 dirty, 18/19 omap, 0/0 pinned, 0/0 hit_set_archive, 0/0 whiteouts, 1049715/1049716 bytes, 0/0 manifest objects, 0/0 hit_set_archive bytes."
err_strings[37]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 deep-scrub 1 missing, 11 inconsistent objects"
err_strings[38]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 deep-scrub 35 errors"
for err_string in "${err_strings[@]}"
do
if ! grep -q "$err_string" $dir/osd.${primary}.log
then
echo "Missing log message '$err_string'"
ERRORS=$(expr $ERRORS + 1)
fi
done
rados list-inconsistent-pg $poolname > $dir/json || return 1
# Check pg count
test $(jq '. | length' $dir/json) = "1" || return 1
# Check pgid
test $(jq -r '.[0]' $dir/json) = $pg || return 1
rados list-inconsistent-obj $pg > $dir/json || return 1
# Get epoch for repair-get requests
epoch=$(jq .epoch $dir/json)
jq "$jqfilter" << EOF | jq '.inconsistents' | python3 -c "$sortkeys" > $dir/checkcsjson
{
"inconsistents": [
{
"shards": [
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xf5fba2c6",
"size": 7,
"errors": [],
"osd": 0,
"primary": false
},
{
"object_info": {
"oid": {
"oid": "ROBJ1",
"key": "",
"snapid": -2,
"hash": 1454963827,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'58",
"prior_version": "21'3",
"last_reqid": "osd.1.0:57",
"user_version": 3,
"size": 7,
"mtime": "2018-04-05 14:33:19.804040",
"local_mtime": "2018-04-05 14:33:19.804839",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xf5fba2c6",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"data_digest": "0x2d4a11c2",
"omap_digest": "0xf5fba2c6",
"size": 9,
"errors": [
"data_digest_mismatch_info",
"size_mismatch_info",
"obj_size_info_mismatch"
],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ1",
"key": "",
"snapid": -2,
"hash": 1454963827,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'58",
"prior_version": "21'3",
"last_reqid": "osd.1.0:57",
"user_version": 3,
"size": 7,
"mtime": "2018-04-05 14:33:19.804040",
"local_mtime": "2018-04-05 14:33:19.804839",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xf5fba2c6",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"data_digest_mismatch_info",
"size_mismatch_info",
"obj_size_info_mismatch"
],
"errors": [
"data_digest_mismatch",
"size_mismatch"
],
"object": {
"version": 3,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ1"
}
},
{
"shards": [
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xa8dd5adc",
"size": 7,
"errors": [
"omap_digest_mismatch_info"
],
"osd": 0,
"primary": false
},
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xa8dd5adc",
"size": 7,
"errors": [
"omap_digest_mismatch_info"
],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"alloc_hint_flags": 0,
"data_digest": "0x2ddbf8f5",
"expected_object_size": 0,
"expected_write_size": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"lost": 0,
"manifest": {
"type": 0
},
"oid": {
"hash": 3174666125,
"key": "",
"max": 0,
"namespace": "",
"oid": "ROBJ10",
"pool": 3,
"snapid": -2
},
"omap_digest": "0xc2025a24",
"size": 7,
"truncate_seq": 0,
"truncate_size": 0,
"user_version": 30,
"watchers": {}
},
"union_shard_errors": [
"omap_digest_mismatch_info"
],
"errors": [],
"object": {
"version": 30,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ10"
}
},
{
"shards": [
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xa03cef03",
"size": 7,
"errors": [],
"osd": 0,
"primary": false
},
{
"size": 7,
"errors": [
"read_error"
],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ11",
"key": "",
"snapid": -2,
"hash": 1828574689,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'52",
"prior_version": "41'33",
"last_reqid": "osd.1.0:51",
"user_version": 33,
"size": 7,
"mtime": "2018-04-05 14:33:26.761286",
"local_mtime": "2018-04-05 14:33:26.762368",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xa03cef03",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"read_error"
],
"errors": [],
"object": {
"version": 33,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ11"
}
},
{
"shards": [
{
"errors": [
"stat_error"
],
"osd": 0,
"primary": false
},
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x067f306a",
"size": 7,
"errors": [],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ12",
"key": "",
"snapid": -2,
"hash": 3920199997,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'56",
"prior_version": "43'36",
"last_reqid": "osd.1.0:55",
"user_version": 36,
"size": 7,
"mtime": "2018-04-05 14:33:27.460958",
"local_mtime": "2018-04-05 14:33:27.462109",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x067f306a",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"stat_error"
],
"errors": [],
"object": {
"version": 36,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ12"
}
},
{
"shards": [
{
"errors": [
"stat_error"
],
"osd": 0,
"primary": false
},
{
"size": 7,
"errors": [
"read_error"
],
"osd": 1,
"primary": true
}
],
"union_shard_errors": [
"stat_error",
"read_error"
],
"errors": [],
"object": {
"version": 0,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ13"
}
},
{
"shards": [
{
"object_info": "bad-val",
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x4f14f849",
"size": 7,
"errors": [
"info_corrupted"
],
"osd": 0,
"primary": false
},
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x4f14f849",
"size": 7,
"errors": [
"info_missing"
],
"osd": 1,
"primary": true
}
],
"union_shard_errors": [
"info_missing",
"info_corrupted"
],
"errors": [],
"object": {
"version": 0,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ14"
}
},
{
"shards": [
{
"object_info": {
"oid": {
"oid": "ROBJ15",
"key": "",
"snapid": -2,
"hash": 504996876,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'49",
"prior_version": "49'45",
"last_reqid": "osd.1.0:48",
"user_version": 45,
"size": 7,
"mtime": "2018-04-05 14:33:29.498969",
"local_mtime": "2018-04-05 14:33:29.499890",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x2d2a4d6e",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x2d2a4d6e",
"size": 7,
"errors": [],
"osd": 0,
"primary": false
},
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x2d2a4d6e",
"size": 7,
"errors": [
"info_missing"
],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ15",
"key": "",
"snapid": -2,
"hash": 504996876,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'49",
"prior_version": "49'45",
"last_reqid": "osd.1.0:48",
"user_version": 45,
"size": 7,
"mtime": "2018-04-05 14:33:29.498969",
"local_mtime": "2018-04-05 14:33:29.499890",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x2d2a4d6e",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"info_missing"
],
"errors": [],
"object": {
"version": 45,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ15"
}
},
{
"errors": [],
"object": {
"locator": "",
"name": "ROBJ16",
"nspace": "",
"snap": "head",
"version": 0
},
"shards": [
{
"data_digest": "0x2ddbf8f5",
"errors": [
"snapset_missing"
],
"omap_digest": "0x8b699207",
"osd": 0,
"primary": false,
"size": 7
},
{
"snapset": "bad-val",
"data_digest": "0x2ddbf8f5",
"errors": [
"snapset_corrupted"
],
"omap_digest": "0x8b699207",
"osd": 1,
"primary": true,
"size": 7
}
],
"union_shard_errors": [
"snapset_missing",
"snapset_corrupted"
]
},
{
"errors": [],
"object": {
"locator": "",
"name": "ROBJ17",
"nspace": "",
"snap": "head"
},
"selected_object_info": {
"alloc_hint_flags": 0,
"data_digest": "0x2ddbf8f5",
"expected_object_size": 0,
"expected_write_size": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"lost": 0,
"manifest": {
"type": 0
},
"oid": {
"hash": 1884071249,
"key": "",
"max": 0,
"namespace": "",
"oid": "ROBJ17",
"pool": 3,
"snapid": -2
},
"omap_digest": "0xe9572720",
"size": 7,
"truncate_seq": 0,
"truncate_size": 0,
"user_version": 51,
"watchers": {}
},
"shards": [
{
"data_digest": "0x5af0c3ef",
"errors": [
"data_digest_mismatch_info"
],
"omap_digest": "0xe9572720",
"osd": 0,
"primary": false,
"size": 7
},
{
"data_digest": "0x5af0c3ef",
"errors": [
"data_digest_mismatch_info"
],
"omap_digest": "0xe9572720",
"osd": 1,
"primary": true,
"size": 7
}
],
"union_shard_errors": [
"data_digest_mismatch_info"
]
},
{
"errors": [
"object_info_inconsistency"
],
"object": {
"locator": "",
"name": "ROBJ18",
"nspace": "",
"snap": "head"
},
"selected_object_info": {
"alloc_hint_flags": 255,
"data_digest": "0x2ddbf8f5",
"expected_object_size": 0,
"expected_write_size": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"lost": 0,
"manifest": {
"type": 0
},
"oid": {
"hash": 1629828556,
"key": "",
"max": 0,
"namespace": "",
"oid": "ROBJ18",
"pool": 3,
"snapid": -2
},
"omap_digest": "0xddc3680f",
"size": 7,
"truncate_seq": 0,
"truncate_size": 0,
"user_version": 54,
"watchers": {}
},
"shards": [
{
"data_digest": "0xbd89c912",
"errors": [
"data_digest_mismatch_info"
],
"object_info": {
"alloc_hint_flags": 0,
"data_digest": "0x2ddbf8f5",
"expected_object_size": 0,
"expected_write_size": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"lost": 0,
"manifest": {
"type": 0
},
"oid": {
"hash": 1629828556,
"key": "",
"max": 0,
"namespace": "",
"oid": "ROBJ18",
"pool": 3,
"snapid": -2
},
"omap_digest": "0xddc3680f",
"size": 7,
"truncate_seq": 0,
"truncate_size": 0,
"user_version": 54,
"watchers": {}
},
"omap_digest": "0xddc3680f",
"osd": 0,
"primary": false,
"size": 7
},
{
"data_digest": "0xbd89c912",
"errors": [
"data_digest_mismatch_info"
],
"object_info": {
"alloc_hint_flags": 255,
"data_digest": "0x2ddbf8f5",
"expected_object_size": 0,
"expected_write_size": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"lost": 0,
"manifest": {
"type": 0
},
"oid": {
"hash": 1629828556,
"key": "",
"max": 0,
"namespace": "",
"oid": "ROBJ18",
"pool": 3,
"snapid": -2
},
"omap_digest": "0xddc3680f",
"size": 7,
"truncate_seq": 0,
"truncate_size": 0,
"user_version": 54,
"watchers": {}
},
"omap_digest": "0xddc3680f",
"osd": 1,
"primary": true,
"size": 7
}
],
"union_shard_errors": [
"data_digest_mismatch_info"
]
},
{
"shards": [
{
"data_digest": "0x578a4830",
"omap_digest": "0xf8e11918",
"size": 7,
"errors": [
"data_digest_mismatch_info"
],
"osd": 0,
"primary": false
},
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xf8e11918",
"size": 7,
"errors": [],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ2",
"key": "",
"snapid": -2,
"hash": 2026323607,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'60",
"prior_version": "23'6",
"last_reqid": "osd.1.0:59",
"user_version": 6,
"size": 7,
"mtime": "2018-04-05 14:33:20.498756",
"local_mtime": "2018-04-05 14:33:20.499704",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xf8e11918",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"data_digest_mismatch_info"
],
"errors": [
"data_digest_mismatch"
],
"object": {
"version": 6,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ2"
}
},
{
"shards": [
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x00b35dfd",
"size": 7,
"errors": [],
"osd": 0,
"primary": false
},
{
"errors": [
"missing"
],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ3",
"key": "",
"snapid": -2,
"hash": 625845583,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'61",
"prior_version": "25'9",
"last_reqid": "osd.1.0:60",
"user_version": 9,
"size": 7,
"mtime": "2018-04-05 14:33:21.189382",
"local_mtime": "2018-04-05 14:33:21.190446",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x00b35dfd",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"missing"
],
"errors": [],
"object": {
"version": 9,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ3"
}
},
{
"shards": [
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xd7178dfe",
"size": 7,
"errors": [
"omap_digest_mismatch_info"
],
"osd": 0,
"primary": false
},
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xe2d46ea4",
"size": 7,
"errors": [],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ4",
"key": "",
"snapid": -2,
"hash": 2360875311,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'62",
"prior_version": "27'12",
"last_reqid": "osd.1.0:61",
"user_version": 12,
"size": 7,
"mtime": "2018-04-05 14:33:21.862313",
"local_mtime": "2018-04-05 14:33:21.863261",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xe2d46ea4",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"omap_digest_mismatch_info"
],
"errors": [
"omap_digest_mismatch"
],
"object": {
"version": 12,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ4"
}
},
{
"shards": [
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x1a862a41",
"size": 7,
"errors": [],
"osd": 0,
"primary": false
},
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x06cac8f6",
"size": 7,
"errors": [
"omap_digest_mismatch_info"
],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ5",
"key": "",
"snapid": -2,
"hash": 2334915887,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'63",
"prior_version": "29'15",
"last_reqid": "osd.1.0:62",
"user_version": 15,
"size": 7,
"mtime": "2018-04-05 14:33:22.589300",
"local_mtime": "2018-04-05 14:33:22.590376",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x1a862a41",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"omap_digest_mismatch_info"
],
"errors": [
"omap_digest_mismatch"
],
"object": {
"version": 15,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ5"
}
},
{
"shards": [
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x689ee887",
"size": 7,
"errors": [
"omap_digest_mismatch_info"
],
"osd": 0,
"primary": false
},
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x179c919f",
"size": 7,
"errors": [],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ6",
"key": "",
"snapid": -2,
"hash": 390610085,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'54",
"prior_version": "31'18",
"last_reqid": "osd.1.0:53",
"user_version": 18,
"size": 7,
"mtime": "2018-04-05 14:33:23.289188",
"local_mtime": "2018-04-05 14:33:23.290130",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x179c919f",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"omap_digest_mismatch_info"
],
"errors": [
"omap_digest_mismatch"
],
"object": {
"version": 18,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ6"
}
},
{
"shards": [
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xefced57a",
"size": 7,
"errors": [],
"osd": 0,
"primary": false
},
{
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x6a73cc07",
"size": 7,
"errors": [
"omap_digest_mismatch_info"
],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ7",
"key": "",
"snapid": -2,
"hash": 3529485009,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'53",
"prior_version": "33'21",
"last_reqid": "osd.1.0:52",
"user_version": 21,
"size": 7,
"mtime": "2018-04-05 14:33:23.979658",
"local_mtime": "2018-04-05 14:33:23.980731",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xefced57a",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"omap_digest_mismatch_info"
],
"errors": [
"omap_digest_mismatch"
],
"object": {
"version": 21,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ7"
}
},
{
"shards": [
{
"attrs": [
{
"Base64": false,
"value": "bad-val",
"name": "key1-ROBJ8"
},
{
"Base64": false,
"value": "val2-ROBJ8",
"name": "key2-ROBJ8"
}
],
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xd6be81dc",
"size": 7,
"errors": [],
"osd": 0,
"primary": false
},
{
"attrs": [
{
"Base64": false,
"value": "val1-ROBJ8",
"name": "key1-ROBJ8"
},
{
"Base64": false,
"value": "val3-ROBJ8",
"name": "key3-ROBJ8"
}
],
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xd6be81dc",
"size": 7,
"errors": [],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ8",
"key": "",
"snapid": -2,
"hash": 2359695969,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "79'66",
"prior_version": "79'65",
"last_reqid": "client.4554.0:1",
"user_version": 79,
"size": 7,
"mtime": "2018-04-05 14:34:05.598688",
"local_mtime": "2018-04-05 14:34:05.599698",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xd6be81dc",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [],
"errors": [
"attr_value_mismatch",
"attr_name_mismatch"
],
"object": {
"version": 66,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ8"
}
},
{
"shards": [
{
"object_info": {
"oid": {
"oid": "ROBJ9",
"key": "",
"snapid": -2,
"hash": 537189375,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "51'64",
"prior_version": "37'27",
"last_reqid": "osd.1.0:63",
"user_version": 27,
"size": 7,
"mtime": "2018-04-05 14:33:25.352485",
"local_mtime": "2018-04-05 14:33:25.353746",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0x2eecc539",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"data_digest": "0x1f26fb26",
"omap_digest": "0x2eecc539",
"size": 3,
"errors": [
"obj_size_info_mismatch"
],
"osd": 0,
"primary": false
},
{
"object_info": {
"oid": {
"oid": "ROBJ9",
"key": "",
"snapid": -2,
"hash": 537189375,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "119'68",
"prior_version": "51'64",
"last_reqid": "client.4834.0:1",
"user_version": 81,
"size": 3,
"mtime": "2018-04-05 14:35:01.500659",
"local_mtime": "2018-04-05 14:35:01.502117",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x1f26fb26",
"omap_digest": "0x2eecc539",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"data_digest": "0x1f26fb26",
"omap_digest": "0x2eecc539",
"size": 3,
"errors": [],
"osd": 1,
"primary": true
}
],
"selected_object_info": {
"oid": {
"oid": "ROBJ9",
"key": "",
"snapid": -2,
"hash": 537189375,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "119'68",
"prior_version": "51'64",
"last_reqid": "client.4834.0:1",
"user_version": 81,
"size": 3,
"mtime": "2018-04-05 14:35:01.500659",
"local_mtime": "2018-04-05 14:35:01.502117",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest",
"omap_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x1f26fb26",
"omap_digest": "0x2eecc539",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"obj_size_info_mismatch"
],
"errors": [
"object_info_inconsistency"
],
"object": {
"version": 68,
"snap": "head",
"locator": "",
"nspace": "",
"name": "ROBJ9"
}
}
],
"epoch": 0
}
EOF
jq "$jqfilter" $dir/json | jq '.inconsistents' | python3 -c "$sortkeys" > $dir/csjson
multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1
if test $getjson = "yes"
then
jq '.' $dir/json > save2.json
fi
if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null;
then
jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-obj.json || return 1
fi
repair $pg
wait_for_clean
# This hangs if the repair doesn't work
timeout 30 rados -p $poolname get ROBJ17 $dir/robj17.out || return 1
timeout 30 rados -p $poolname get ROBJ18 $dir/robj18.out || return 1
# Even though we couldn't repair all of the introduced errors, we can fix ROBJ17
diff -q $dir/new.ROBJ17 $dir/robj17.out || return 1
rm -f $dir/new.ROBJ17 $dir/robj17.out || return 1
diff -q $dir/new.ROBJ18 $dir/robj18.out || return 1
rm -f $dir/new.ROBJ18 $dir/robj18.out || return 1
if [ $ERRORS != "0" ];
then
echo "TEST FAILED WITH $ERRORS ERRORS"
return 1
fi
ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it
}
#
# Test scrub errors for an erasure coded pool
#
function corrupt_scrub_erasure() {
local dir=$1
local allow_overwrites=$2
local poolname=ecpool
local total_objs=7
run_mon $dir a || return 1
run_mgr $dir x || return 1
for id in $(seq 0 2) ; do
run_osd $dir $id || return 1
done
create_rbd_pool || return 1
create_pool foo 1
create_ec_pool $poolname $allow_overwrites k=2 m=1 stripe_unit=2K --force || return 1
wait_for_clean || return 1
for i in $(seq 1 $total_objs) ; do
objname=EOBJ${i}
add_something $dir $poolname $objname || return 1
local osd=$(expr $i % 2)
case $i in
1)
# Size (deep scrub data_digest too)
local payload=UVWXYZZZ
echo $payload > $dir/CORRUPT
objectstore_tool $dir $osd $objname set-bytes $dir/CORRUPT || return 1
;;
2)
# Corrupt EC shard
dd if=/dev/urandom of=$dir/CORRUPT bs=2048 count=1
objectstore_tool $dir $osd $objname set-bytes $dir/CORRUPT || return 1
;;
3)
# missing
objectstore_tool $dir $osd $objname remove || return 1
;;
4)
rados --pool $poolname setxattr $objname key1-$objname val1-$objname || return 1
rados --pool $poolname setxattr $objname key2-$objname val2-$objname || return 1
# Break xattrs
echo -n bad-val > $dir/bad-val
objectstore_tool $dir $osd $objname set-attr _key1-$objname $dir/bad-val || return 1
objectstore_tool $dir $osd $objname rm-attr _key2-$objname || return 1
echo -n val3-$objname > $dir/newval
objectstore_tool $dir $osd $objname set-attr _key3-$objname $dir/newval || return 1
rm $dir/bad-val $dir/newval
;;
5)
# Corrupt EC shard
dd if=/dev/urandom of=$dir/CORRUPT bs=2048 count=2
objectstore_tool $dir $osd $objname set-bytes $dir/CORRUPT || return 1
;;
6)
objectstore_tool $dir 0 $objname rm-attr hinfo_key || return 1
echo -n bad-val > $dir/bad-val
objectstore_tool $dir 1 $objname set-attr hinfo_key $dir/bad-val || return 1
;;
7)
local payload=MAKETHISDIFFERENTFROMOTHEROBJECTS
echo $payload > $dir/DIFFERENT
rados --pool $poolname put $objname $dir/DIFFERENT || return 1
# Get hinfo_key from EOBJ1
objectstore_tool $dir 0 EOBJ1 get-attr hinfo_key > $dir/hinfo
objectstore_tool $dir 0 $objname set-attr hinfo_key $dir/hinfo || return 1
rm -f $dir/hinfo
;;
esac
done
local pg=$(get_pg $poolname EOBJ0)
pg_scrub $pg
rados list-inconsistent-pg $poolname > $dir/json || return 1
# Check pg count
test $(jq '. | length' $dir/json) = "1" || return 1
# Check pgid
test $(jq -r '.[0]' $dir/json) = $pg || return 1
rados list-inconsistent-obj $pg > $dir/json || return 1
# Get epoch for repair-get requests
epoch=$(jq .epoch $dir/json)
jq "$jqfilter" << EOF | jq '.inconsistents' | python3 -c "$sortkeys" > $dir/checkcsjson
{
"inconsistents": [
{
"shards": [
{
"size": 2048,
"errors": [],
"shard": 2,
"osd": 0,
"primary": false
},
{
"object_info": {
"oid": {
"oid": "EOBJ1",
"key": "",
"snapid": -2,
"hash": 560836233,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "27'1",
"prior_version": "0'0",
"last_reqid": "client.4184.0:1",
"user_version": 1,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"size": 9,
"shard": 0,
"errors": [
"size_mismatch_info",
"obj_size_info_mismatch"
],
"osd": 1,
"primary": true
},
{
"size": 2048,
"shard": 1,
"errors": [],
"osd": 2,
"primary": false
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ1",
"key": "",
"snapid": -2,
"hash": 560836233,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "27'1",
"prior_version": "0'0",
"last_reqid": "client.4184.0:1",
"user_version": 1,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"size_mismatch_info",
"obj_size_info_mismatch"
],
"errors": [
"size_mismatch"
],
"object": {
"version": 1,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ1"
}
},
{
"shards": [
{
"size": 2048,
"errors": [],
"shard": 2,
"osd": 0,
"primary": false
},
{
"shard": 0,
"errors": [
"missing"
],
"osd": 1,
"primary": true
},
{
"size": 2048,
"shard": 1,
"errors": [],
"osd": 2,
"primary": false
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ3",
"key": "",
"snapid": -2,
"hash": 3125668237,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "39'3",
"prior_version": "0'0",
"last_reqid": "client.4252.0:1",
"user_version": 3,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"missing"
],
"errors": [],
"object": {
"version": 3,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ3"
}
},
{
"shards": [
{
"attrs": [
{
"Base64": false,
"value": "bad-val",
"name": "key1-EOBJ4"
},
{
"Base64": false,
"value": "val2-EOBJ4",
"name": "key2-EOBJ4"
}
],
"size": 2048,
"errors": [],
"shard": 2,
"osd": 0,
"primary": false
},
{
"osd": 1,
"primary": true,
"shard": 0,
"errors": [],
"size": 2048,
"attrs": [
{
"Base64": false,
"value": "val1-EOBJ4",
"name": "key1-EOBJ4"
},
{
"Base64": false,
"value": "val2-EOBJ4",
"name": "key2-EOBJ4"
}
]
},
{
"osd": 2,
"primary": false,
"shard": 1,
"errors": [],
"size": 2048,
"attrs": [
{
"Base64": false,
"value": "val1-EOBJ4",
"name": "key1-EOBJ4"
},
{
"Base64": false,
"value": "val3-EOBJ4",
"name": "key3-EOBJ4"
}
]
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ4",
"key": "",
"snapid": -2,
"hash": 1618759290,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "45'6",
"prior_version": "45'5",
"last_reqid": "client.4294.0:1",
"user_version": 6,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [],
"errors": [
"attr_value_mismatch",
"attr_name_mismatch"
],
"object": {
"version": 6,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ4"
}
},
{
"shards": [
{
"size": 2048,
"errors": [],
"shard": 2,
"osd": 0,
"primary": false
},
{
"object_info": {
"oid": {
"oid": "EOBJ5",
"key": "",
"snapid": -2,
"hash": 2918945441,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "59'7",
"prior_version": "0'0",
"last_reqid": "client.4382.0:1",
"user_version": 7,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"size": 4096,
"shard": 0,
"errors": [
"size_mismatch_info",
"obj_size_info_mismatch"
],
"osd": 1,
"primary": true
},
{
"size": 2048,
"shard": 1,
"errors": [],
"osd": 2,
"primary": false
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ5",
"key": "",
"snapid": -2,
"hash": 2918945441,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "59'7",
"prior_version": "0'0",
"last_reqid": "client.4382.0:1",
"user_version": 7,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"size_mismatch_info",
"obj_size_info_mismatch"
],
"errors": [
"size_mismatch"
],
"object": {
"version": 7,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ5"
}
},
{
"errors": [],
"object": {
"locator": "",
"name": "EOBJ6",
"nspace": "",
"snap": "head",
"version": 8
},
"selected_object_info": {
"oid": {
"oid": "EOBJ6",
"key": "",
"snapid": -2,
"hash": 3050890866,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "65'8",
"prior_version": "0'0",
"last_reqid": "client.4418.0:1",
"user_version": 8,
"size": 7,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"shards": [
{
"errors": [
"hinfo_missing"
],
"osd": 0,
"primary": false,
"shard": 2,
"size": 2048
},
{
"errors": [
"hinfo_corrupted"
],
"osd": 1,
"primary": true,
"shard": 0,
"hashinfo": "bad-val",
"size": 2048
},
{
"errors": [],
"osd": 2,
"primary": false,
"shard": 1,
"size": 2048,
"hashinfo": {
"cumulative_shard_hashes": [
{
"hash": 80717615,
"shard": 0
},
{
"hash": 1534491824,
"shard": 1
},
{
"hash": 80717615,
"shard": 2
}
],
"total_chunk_size": 2048
}
}
],
"union_shard_errors": [
"hinfo_missing",
"hinfo_corrupted"
]
},
{
"errors": [
"hinfo_inconsistency"
],
"object": {
"locator": "",
"name": "EOBJ7",
"nspace": "",
"snap": "head",
"version": 10
},
"selected_object_info": {
"oid": {
"oid": "EOBJ7",
"key": "",
"snapid": -2,
"hash": 3258066308,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "75'10",
"prior_version": "75'9",
"last_reqid": "client.4482.0:1",
"user_version": 10,
"size": 34,
"mtime": "",
"local_mtime": "",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x136e4e27",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"shards": [
{
"hashinfo": {
"cumulative_shard_hashes": [
{
"hash": 80717615,
"shard": 0
},
{
"hash": 1534491824,
"shard": 1
},
{
"hash": 80717615,
"shard": 2
}
],
"total_chunk_size": 2048
},
"errors": [],
"osd": 0,
"primary": false,
"shard": 2,
"size": 2048
},
{
"hashinfo": {
"cumulative_shard_hashes": [
{
"hash": 1534350760,
"shard": 0
},
{
"hash": 1534491824,
"shard": 1
},
{
"hash": 1534350760,
"shard": 2
}
],
"total_chunk_size": 2048
},
"errors": [],
"osd": 1,
"primary": true,
"shard": 0,
"size": 2048
},
{
"hashinfo": {
"cumulative_shard_hashes": [
{
"hash": 1534350760,
"shard": 0
},
{
"hash": 1534491824,
"shard": 1
},
{
"hash": 1534350760,
"shard": 2
}
],
"total_chunk_size": 2048
},
"errors": [],
"osd": 2,
"primary": false,
"shard": 1,
"size": 2048
}
],
"union_shard_errors": []
}
],
"epoch": 0
}
EOF
jq "$jqfilter" $dir/json | jq '.inconsistents' | python3 -c "$sortkeys" > $dir/csjson
multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1
if test $getjson = "yes"
then
jq '.' $dir/json > save3.json
fi
if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null;
then
jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-obj.json || return 1
fi
pg_deep_scrub $pg
rados list-inconsistent-pg $poolname > $dir/json || return 1
# Check pg count
test $(jq '. | length' $dir/json) = "1" || return 1
# Check pgid
test $(jq -r '.[0]' $dir/json) = $pg || return 1
rados list-inconsistent-obj $pg > $dir/json || return 1
# Get epoch for repair-get requests
epoch=$(jq .epoch $dir/json)
if [ "$allow_overwrites" = "true" ]
then
jq "$jqfilter" << EOF | jq '.inconsistents' | python3 -c "$sortkeys" > $dir/checkcsjson
{
"inconsistents": [
{
"shards": [
{
"data_digest": "0x00000000",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 2,
"osd": 0,
"primary": false
},
{
"object_info": {
"oid": {
"oid": "EOBJ1",
"key": "",
"snapid": -2,
"hash": 560836233,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "27'1",
"prior_version": "0'0",
"last_reqid": "client.4184.0:1",
"user_version": 1,
"size": 7,
"mtime": "2018-04-05 14:31:33.837147",
"local_mtime": "2018-04-05 14:31:33.840763",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"size": 9,
"shard": 0,
"errors": [
"read_error",
"size_mismatch_info",
"obj_size_info_mismatch"
],
"osd": 1,
"primary": true
},
{
"data_digest": "0x00000000",
"omap_digest": "0xffffffff",
"size": 2048,
"shard": 1,
"errors": [],
"osd": 2,
"primary": false
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ1",
"key": "",
"snapid": -2,
"hash": 560836233,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "27'1",
"prior_version": "0'0",
"last_reqid": "client.4184.0:1",
"user_version": 1,
"size": 7,
"mtime": "2018-04-05 14:31:33.837147",
"local_mtime": "2018-04-05 14:31:33.840763",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"read_error",
"size_mismatch_info",
"obj_size_info_mismatch"
],
"errors": [
"size_mismatch"
],
"object": {
"version": 1,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ1"
}
},
{
"shards": [
{
"data_digest": "0x00000000",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 2,
"osd": 0,
"primary": false
},
{
"shard": 0,
"errors": [
"missing"
],
"osd": 1,
"primary": true
},
{
"data_digest": "0x00000000",
"omap_digest": "0xffffffff",
"size": 2048,
"shard": 1,
"errors": [],
"osd": 2,
"primary": false
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ3",
"key": "",
"snapid": -2,
"hash": 3125668237,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "39'3",
"prior_version": "0'0",
"last_reqid": "client.4252.0:1",
"user_version": 3,
"size": 7,
"mtime": "2018-04-05 14:31:46.841145",
"local_mtime": "2018-04-05 14:31:46.844996",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"missing"
],
"errors": [],
"object": {
"version": 3,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ3"
}
},
{
"shards": [
{
"attrs": [
{
"Base64": false,
"value": "bad-val",
"name": "key1-EOBJ4"
},
{
"Base64": false,
"value": "val2-EOBJ4",
"name": "key2-EOBJ4"
}
],
"data_digest": "0x00000000",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 2,
"osd": 0,
"primary": false
},
{
"attrs": [
{
"Base64": false,
"value": "val1-EOBJ4",
"name": "key1-EOBJ4"
},
{
"Base64": false,
"value": "val2-EOBJ4",
"name": "key2-EOBJ4"
}
],
"data_digest": "0x00000000",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 0,
"osd": 1,
"primary": true
},
{
"attrs": [
{
"Base64": false,
"value": "val1-EOBJ4",
"name": "key1-EOBJ4"
},
{
"Base64": false,
"value": "val3-EOBJ4",
"name": "key3-EOBJ4"
}
],
"data_digest": "0x00000000",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 1,
"osd": 2,
"primary": false
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ4",
"key": "",
"snapid": -2,
"hash": 1618759290,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "45'6",
"prior_version": "45'5",
"last_reqid": "client.4294.0:1",
"user_version": 6,
"size": 7,
"mtime": "2018-04-05 14:31:54.663622",
"local_mtime": "2018-04-05 14:31:54.664527",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [],
"errors": [
"attr_value_mismatch",
"attr_name_mismatch"
],
"object": {
"version": 6,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ4"
}
},
{
"shards": [
{
"data_digest": "0x00000000",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 2,
"osd": 0,
"primary": false
},
{
"object_info": {
"oid": {
"oid": "EOBJ5",
"key": "",
"snapid": -2,
"hash": 2918945441,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "59'7",
"prior_version": "0'0",
"last_reqid": "client.4382.0:1",
"user_version": 7,
"size": 7,
"mtime": "2018-04-05 14:32:12.929161",
"local_mtime": "2018-04-05 14:32:12.934707",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"size": 4096,
"errors": [
"read_error",
"size_mismatch_info",
"obj_size_info_mismatch"
],
"shard": 0,
"osd": 1,
"primary": true
},
{
"data_digest": "0x00000000",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 1,
"osd": 2,
"primary": false
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ5",
"key": "",
"snapid": -2,
"hash": 2918945441,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "59'7",
"prior_version": "0'0",
"last_reqid": "client.4382.0:1",
"user_version": 7,
"size": 7,
"mtime": "2018-04-05 14:32:12.929161",
"local_mtime": "2018-04-05 14:32:12.934707",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"read_error",
"size_mismatch_info",
"obj_size_info_mismatch"
],
"errors": [
"size_mismatch"
],
"object": {
"version": 7,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ5"
}
},
{
"object": {
"name": "EOBJ6",
"nspace": "",
"locator": "",
"snap": "head",
"version": 8
},
"errors": [],
"union_shard_errors": [
"read_error",
"hinfo_missing",
"hinfo_corrupted"
],
"selected_object_info": {
"oid": {
"oid": "EOBJ6",
"key": "",
"snapid": -2,
"hash": 3050890866,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "65'8",
"prior_version": "0'0",
"last_reqid": "client.4418.0:1",
"user_version": 8,
"size": 7,
"mtime": "2018-04-05 14:32:20.634116",
"local_mtime": "2018-04-05 14:32:20.637999",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"shards": [
{
"osd": 0,
"primary": false,
"shard": 2,
"errors": [
"read_error",
"hinfo_missing"
],
"size": 2048
},
{
"osd": 1,
"primary": true,
"shard": 0,
"errors": [
"read_error",
"hinfo_corrupted"
],
"size": 2048,
"hashinfo": "bad-val"
},
{
"osd": 2,
"primary": false,
"shard": 1,
"errors": [],
"size": 2048,
"omap_digest": "0xffffffff",
"data_digest": "0x00000000",
"hashinfo": {
"cumulative_shard_hashes": [
{
"hash": 80717615,
"shard": 0
},
{
"hash": 1534491824,
"shard": 1
},
{
"hash": 80717615,
"shard": 2
}
],
"total_chunk_size": 2048
}
}
]
},
{
"object": {
"name": "EOBJ7",
"nspace": "",
"locator": "",
"snap": "head",
"version": 10
},
"errors": [
"hinfo_inconsistency"
],
"union_shard_errors": [],
"selected_object_info": {
"oid": {
"oid": "EOBJ7",
"key": "",
"snapid": -2,
"hash": 3258066308,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "75'10",
"prior_version": "75'9",
"last_reqid": "client.4482.0:1",
"user_version": 10,
"size": 34,
"mtime": "2018-04-05 14:32:33.058782",
"local_mtime": "2018-04-05 14:32:33.059679",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x136e4e27",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"shards": [
{
"osd": 0,
"primary": false,
"shard": 2,
"errors": [],
"size": 2048,
"omap_digest": "0xffffffff",
"data_digest": "0x00000000",
"hashinfo": {
"cumulative_shard_hashes": [
{
"hash": 80717615,
"shard": 0
},
{
"hash": 1534491824,
"shard": 1
},
{
"hash": 80717615,
"shard": 2
}
],
"total_chunk_size": 2048
}
},
{
"osd": 1,
"primary": true,
"shard": 0,
"errors": [],
"size": 2048,
"omap_digest": "0xffffffff",
"data_digest": "0x00000000",
"hashinfo": {
"cumulative_shard_hashes": [
{
"hash": 1534350760,
"shard": 0
},
{
"hash": 1534491824,
"shard": 1
},
{
"hash": 1534350760,
"shard": 2
}
],
"total_chunk_size": 2048
}
},
{
"osd": 2,
"primary": false,
"shard": 1,
"errors": [],
"size": 2048,
"omap_digest": "0xffffffff",
"data_digest": "0x00000000",
"hashinfo": {
"cumulative_shard_hashes": [
{
"hash": 1534350760,
"shard": 0
},
{
"hash": 1534491824,
"shard": 1
},
{
"hash": 1534350760,
"shard": 2
}
],
"total_chunk_size": 2048
}
}
]
}
],
"epoch": 0
}
EOF
else
jq "$jqfilter" << EOF | jq '.inconsistents' | python3 -c "$sortkeys" > $dir/checkcsjson
{
"inconsistents": [
{
"shards": [
{
"data_digest": "0x04cfa72f",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 2,
"osd": 0,
"primary": false
},
{
"object_info": {
"oid": {
"oid": "EOBJ1",
"key": "",
"snapid": -2,
"hash": 560836233,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "27'1",
"prior_version": "0'0",
"last_reqid": "client.4192.0:1",
"user_version": 1,
"size": 7,
"mtime": "2018-04-05 14:30:10.688009",
"local_mtime": "2018-04-05 14:30:10.691774",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"size": 9,
"shard": 0,
"errors": [
"read_error",
"size_mismatch_info",
"obj_size_info_mismatch"
],
"osd": 1,
"primary": true
},
{
"data_digest": "0x04cfa72f",
"omap_digest": "0xffffffff",
"size": 2048,
"shard": 1,
"errors": [],
"osd": 2,
"primary": false
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ1",
"key": "",
"snapid": -2,
"hash": 560836233,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "27'1",
"prior_version": "0'0",
"last_reqid": "client.4192.0:1",
"user_version": 1,
"size": 7,
"mtime": "2018-04-05 14:30:10.688009",
"local_mtime": "2018-04-05 14:30:10.691774",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"read_error",
"size_mismatch_info",
"obj_size_info_mismatch"
],
"errors": [
"size_mismatch"
],
"object": {
"version": 1,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ1"
}
},
{
"shards": [
{
"size": 2048,
"errors": [
"ec_hash_error"
],
"shard": 2,
"osd": 0,
"primary": false
},
{
"data_digest": "0x04cfa72f",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 0,
"osd": 1,
"primary": true
},
{
"data_digest": "0x04cfa72f",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 1,
"osd": 2,
"primary": false
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ2",
"key": "",
"snapid": -2,
"hash": 562812377,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "33'2",
"prior_version": "0'0",
"last_reqid": "client.4224.0:1",
"user_version": 2,
"size": 7,
"mtime": "2018-04-05 14:30:14.152945",
"local_mtime": "2018-04-05 14:30:14.154014",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"ec_hash_error"
],
"errors": [],
"object": {
"version": 2,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ2"
}
},
{
"shards": [
{
"data_digest": "0x04cfa72f",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 2,
"osd": 0,
"primary": false
},
{
"osd": 1,
"primary": true,
"shard": 0,
"errors": [
"missing"
]
},
{
"data_digest": "0x04cfa72f",
"omap_digest": "0xffffffff",
"size": 2048,
"shard": 1,
"errors": [],
"osd": 2,
"primary": false
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ3",
"key": "",
"snapid": -2,
"hash": 3125668237,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "39'3",
"prior_version": "0'0",
"last_reqid": "client.4258.0:1",
"user_version": 3,
"size": 7,
"mtime": "2018-04-05 14:30:18.875544",
"local_mtime": "2018-04-05 14:30:18.880153",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"missing"
],
"errors": [],
"object": {
"version": 3,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ3"
}
},
{
"shards": [
{
"attrs": [
{
"Base64": false,
"value": "bad-val",
"name": "key1-EOBJ4"
},
{
"Base64": false,
"value": "val2-EOBJ4",
"name": "key2-EOBJ4"
}
],
"data_digest": "0x04cfa72f",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 2,
"osd": 0,
"primary": false
},
{
"osd": 1,
"primary": true,
"shard": 0,
"errors": [],
"size": 2048,
"omap_digest": "0xffffffff",
"data_digest": "0x04cfa72f",
"attrs": [
{
"Base64": false,
"value": "val1-EOBJ4",
"name": "key1-EOBJ4"
},
{
"Base64": false,
"value": "val2-EOBJ4",
"name": "key2-EOBJ4"
}
]
},
{
"osd": 2,
"primary": false,
"shard": 1,
"errors": [],
"size": 2048,
"omap_digest": "0xffffffff",
"data_digest": "0x04cfa72f",
"attrs": [
{
"Base64": false,
"value": "val1-EOBJ4",
"name": "key1-EOBJ4"
},
{
"Base64": false,
"value": "val3-EOBJ4",
"name": "key3-EOBJ4"
}
]
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ4",
"key": "",
"snapid": -2,
"hash": 1618759290,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "45'6",
"prior_version": "45'5",
"last_reqid": "client.4296.0:1",
"user_version": 6,
"size": 7,
"mtime": "2018-04-05 14:30:22.271983",
"local_mtime": "2018-04-05 14:30:22.272840",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [],
"errors": [
"attr_value_mismatch",
"attr_name_mismatch"
],
"object": {
"version": 6,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ4"
}
},
{
"shards": [
{
"data_digest": "0x04cfa72f",
"omap_digest": "0xffffffff",
"size": 2048,
"errors": [],
"shard": 2,
"osd": 0,
"primary": false
},
{
"object_info": {
"oid": {
"oid": "EOBJ5",
"key": "",
"snapid": -2,
"hash": 2918945441,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "59'7",
"prior_version": "0'0",
"last_reqid": "client.4384.0:1",
"user_version": 7,
"size": 7,
"mtime": "2018-04-05 14:30:35.162395",
"local_mtime": "2018-04-05 14:30:35.166390",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"size": 4096,
"shard": 0,
"errors": [
"read_error",
"size_mismatch_info",
"obj_size_info_mismatch"
],
"osd": 1,
"primary": true
},
{
"data_digest": "0x04cfa72f",
"omap_digest": "0xffffffff",
"size": 2048,
"shard": 1,
"errors": [],
"osd": 2,
"primary": false
}
],
"selected_object_info": {
"oid": {
"oid": "EOBJ5",
"key": "",
"snapid": -2,
"hash": 2918945441,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "59'7",
"prior_version": "0'0",
"last_reqid": "client.4384.0:1",
"user_version": 7,
"size": 7,
"mtime": "2018-04-05 14:30:35.162395",
"local_mtime": "2018-04-05 14:30:35.166390",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"union_shard_errors": [
"read_error",
"size_mismatch_info",
"obj_size_info_mismatch"
],
"errors": [
"size_mismatch"
],
"object": {
"version": 7,
"snap": "head",
"locator": "",
"nspace": "",
"name": "EOBJ5"
}
},
{
"object": {
"name": "EOBJ6",
"nspace": "",
"locator": "",
"snap": "head",
"version": 8
},
"errors": [],
"union_shard_errors": [
"read_error",
"hinfo_missing",
"hinfo_corrupted"
],
"selected_object_info": {
"oid": {
"oid": "EOBJ6",
"key": "",
"snapid": -2,
"hash": 3050890866,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "65'8",
"prior_version": "0'0",
"last_reqid": "client.4420.0:1",
"user_version": 8,
"size": 7,
"mtime": "2018-04-05 14:30:40.914673",
"local_mtime": "2018-04-05 14:30:40.917705",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x2ddbf8f5",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"shards": [
{
"osd": 0,
"primary": false,
"shard": 2,
"errors": [
"read_error",
"hinfo_missing"
],
"size": 2048
},
{
"osd": 1,
"primary": true,
"shard": 0,
"errors": [
"read_error",
"hinfo_corrupted"
],
"size": 2048,
"hashinfo": "bad-val"
},
{
"osd": 2,
"primary": false,
"shard": 1,
"errors": [],
"size": 2048,
"omap_digest": "0xffffffff",
"data_digest": "0x04cfa72f",
"hashinfo": {
"cumulative_shard_hashes": [
{
"hash": 80717615,
"shard": 0
},
{
"hash": 1534491824,
"shard": 1
},
{
"hash": 80717615,
"shard": 2
}
],
"total_chunk_size": 2048
}
}
]
},
{
"object": {
"name": "EOBJ7",
"nspace": "",
"locator": "",
"snap": "head",
"version": 10
},
"errors": [
"hinfo_inconsistency"
],
"union_shard_errors": [
"ec_hash_error"
],
"selected_object_info": {
"oid": {
"oid": "EOBJ7",
"key": "",
"snapid": -2,
"hash": 3258066308,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "75'10",
"prior_version": "75'9",
"last_reqid": "client.4486.0:1",
"user_version": 10,
"size": 34,
"mtime": "2018-04-05 14:30:50.995009",
"local_mtime": "2018-04-05 14:30:50.996112",
"lost": 0,
"flags": [
"dirty",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x136e4e27",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"shards": [
{
"osd": 0,
"primary": false,
"shard": 2,
"errors": [
"ec_hash_error"
],
"size": 2048,
"hashinfo": {
"cumulative_shard_hashes": [
{
"hash": 80717615,
"shard": 0
},
{
"hash": 1534491824,
"shard": 1
},
{
"hash": 80717615,
"shard": 2
}
],
"total_chunk_size": 2048
}
},
{
"osd": 1,
"primary": true,
"shard": 0,
"errors": [],
"size": 2048,
"omap_digest": "0xffffffff",
"data_digest": "0x5b7455a8",
"hashinfo": {
"cumulative_shard_hashes": [
{
"hash": 1534350760,
"shard": 0
},
{
"hash": 1534491824,
"shard": 1
},
{
"hash": 1534350760,
"shard": 2
}
],
"total_chunk_size": 2048
}
},
{
"osd": 2,
"primary": false,
"shard": 1,
"errors": [],
"size": 2048,
"omap_digest": "0xffffffff",
"data_digest": "0x5b7455a8",
"hashinfo": {
"cumulative_shard_hashes": [
{
"hash": 1534350760,
"shard": 0
},
{
"hash": 1534491824,
"shard": 1
},
{
"hash": 1534350760,
"shard": 2
}
],
"total_chunk_size": 2048
}
}
]
}
],
"epoch": 0
}
EOF
fi
jq "$jqfilter" $dir/json | jq '.inconsistents' | python3 -c "$sortkeys" > $dir/csjson
multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1
if test $getjson = "yes"
then
if [ "$allow_overwrites" = "true" ]
then
num=4
else
num=5
fi
jq '.' $dir/json > save${num}.json
fi
if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null;
then
jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-obj.json || return 1
fi
ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it
}
function TEST_corrupt_scrub_erasure_appends() {
corrupt_scrub_erasure $1 false
}
function TEST_corrupt_scrub_erasure_overwrites() {
if [ "$use_ec_overwrite" = "true" ]; then
corrupt_scrub_erasure $1 true
fi
}
#
# Test to make sure that a periodic scrub won't cause deep-scrub info to be lost
#
function TEST_periodic_scrub_replicated() {
local dir=$1
local poolname=psr_pool
local objname=POBJ
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-interval-randomize-ratio=0 --osd-deep-scrub-randomize-ratio=0 "
ceph_osd_args+="--osd_scrub_backoff_ratio=0"
run_osd $dir 0 $ceph_osd_args || return 1
run_osd $dir 1 $ceph_osd_args || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
create_pool $poolname 1 1 || return 1
wait_for_clean || return 1
local osd=0
add_something $dir $poolname $objname scrub || return 1
local primary=$(get_primary $poolname $objname)
local pg=$(get_pg $poolname $objname)
# Add deep-scrub only error
local payload=UVWXYZ
echo $payload > $dir/CORRUPT
# Uses $ceph_osd_args for osd restart
objectstore_tool $dir $osd $objname set-bytes $dir/CORRUPT || return 1
# No scrub information available, so expect failure
set -o pipefail
! rados list-inconsistent-obj $pg | jq '.' || return 1
set +o pipefail
pg_deep_scrub $pg || return 1
# Make sure bad object found
rados list-inconsistent-obj $pg | jq '.' | grep -q $objname || return 1
flush_pg_stats
local last_scrub=$(get_last_scrub_stamp $pg)
# Fake a schedule scrub
ceph tell $pg scrub || return 1
# Wait for schedule regular scrub
wait_for_scrub $pg "$last_scrub"
# It needed to be upgraded
grep -q "Deep scrub errors, upgrading scrub to deep-scrub" $dir/osd.${primary}.log || return 1
# Bad object still known
rados list-inconsistent-obj $pg | jq '.' | grep -q $objname || return 1
# Can't upgrade with this set
ceph osd set nodeep-scrub
# Let map change propagate to OSDs
ceph tell osd.0 get_latest_osdmap
flush_pg_stats
sleep 5
# Fake a schedule scrub
ceph tell $pg scrub || return 1
# Wait for schedule regular scrub
# to notice scrub and skip it
local found=false
for i in $(seq 14 -1 0)
do
sleep 1
! grep -q "Regular scrub skipped due to deep-scrub errors and nodeep-scrub set" $dir/osd.${primary}.log || { found=true ; break; }
echo Time left: $i seconds
done
test $found = "true" || return 1
# Bad object still known
rados list-inconsistent-obj $pg | jq '.' | grep -q $objname || return 1
flush_pg_stats
# Request a regular scrub and it will be done
pg_scrub $pg
grep -q "Regular scrub request, deep-scrub details will be lost" $dir/osd.${primary}.log || return 1
# deep-scrub error is no longer present
rados list-inconsistent-obj $pg | jq '.' | grep -qv $objname || return 1
}
function TEST_scrub_warning() {
local dir=$1
local poolname=psr_pool
local objname=POBJ
local scrubs=5
local deep_scrubs=5
local i1_day=86400
local i7_days=$(calc $i1_day \* 7)
local i14_days=$(calc $i1_day \* 14)
local overdue=0.5
local conf_overdue_seconds=$(calc $i7_days + $i1_day + \( $i7_days \* $overdue \) )
local pool_overdue_seconds=$(calc $i14_days + $i1_day + \( $i14_days \* $overdue \) )
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x --mon_warn_pg_not_scrubbed_ratio=${overdue} --mon_warn_pg_not_deep_scrubbed_ratio=${overdue} || return 1
run_osd $dir 0 $ceph_osd_args --osd_scrub_backoff_ratio=0 || return 1
for i in $(seq 1 $(expr $scrubs + $deep_scrubs))
do
create_pool $poolname-$i 1 1 || return 1
wait_for_clean || return 1
if [ $i = "1" ];
then
ceph osd pool set $poolname-$i scrub_max_interval $i14_days
fi
if [ $i = $(expr $scrubs + 1) ];
then
ceph osd pool set $poolname-$i deep_scrub_interval $i14_days
fi
done
# Only 1 osd
local primary=0
ceph osd set noscrub || return 1
ceph osd set nodeep-scrub || return 1
ceph config set global osd_scrub_interval_randomize_ratio 0
ceph config set global osd_deep_scrub_randomize_ratio 0
ceph config set global osd_scrub_max_interval ${i7_days}
ceph config set global osd_deep_scrub_interval ${i7_days}
# Fake schedule scrubs
for i in $(seq 1 $scrubs)
do
if [ $i = "1" ];
then
overdue_seconds=$pool_overdue_seconds
else
overdue_seconds=$conf_overdue_seconds
fi
ceph tell ${i}.0 scrub $(expr ${overdue_seconds} + ${i}00) || return 1
done
# Fake schedule deep scrubs
for i in $(seq $(expr $scrubs + 1) $(expr $scrubs + $deep_scrubs))
do
if [ $i = "$(expr $scrubs + 1)" ];
then
overdue_seconds=$pool_overdue_seconds
else
overdue_seconds=$conf_overdue_seconds
fi
ceph tell ${i}.0 deep_scrub $(expr ${overdue_seconds} + ${i}00) || return 1
done
flush_pg_stats
ceph health
ceph health detail
ceph health | grep -q " pgs not deep-scrubbed in time" || return 1
ceph health | grep -q " pgs not scrubbed in time" || return 1
# note that the 'ceph tell pg deep_scrub' command now also sets the regular scrub
# time-stamp. I.e. - all 'late for deep scrubbing' pgs are also late for
# regular scrubbing. For now, we'll allow both responses.
COUNT=$(ceph health detail | grep "not scrubbed since" | wc -l)
if (( $COUNT != $scrubs && $COUNT != $(expr $scrubs+$deep_scrubs) )); then
ceph health detail | grep "not scrubbed since"
return 1
fi
COUNT=$(ceph health detail | grep "not deep-scrubbed since" | wc -l)
if [ "$COUNT" != $deep_scrubs ]; then
ceph health detail | grep "not deep-scrubbed since"
return 1
fi
}
#
# Corrupt snapset in replicated pool
#
function TEST_corrupt_snapset_scrub_rep() {
local dir=$1
local poolname=csr_pool
local total_objs=2
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
create_pool foo 1 || return 1
create_pool $poolname 1 1 || return 1
wait_for_clean || return 1
for i in $(seq 1 $total_objs) ; do
objname=ROBJ${i}
add_something $dir $poolname $objname || return 1
rados --pool $poolname setomapheader $objname hdr-$objname || return 1
rados --pool $poolname setomapval $objname key-$objname val-$objname || return 1
done
local pg=$(get_pg $poolname ROBJ0)
local primary=$(get_primary $poolname ROBJ0)
rados -p $poolname mksnap snap1
echo -n head_of_snapshot_data > $dir/change
for i in $(seq 1 $total_objs) ; do
objname=ROBJ${i}
# Alternate corruption between osd.0 and osd.1
local osd=$(expr $i % 2)
case $i in
1)
rados --pool $poolname put $objname $dir/change
objectstore_tool $dir $osd --head $objname clear-snapset corrupt || return 1
;;
2)
rados --pool $poolname put $objname $dir/change
objectstore_tool $dir $osd --head $objname clear-snapset corrupt || return 1
;;
esac
done
rm $dir/change
pg_scrub $pg
rados list-inconsistent-pg $poolname > $dir/json || return 1
# Check pg count
test $(jq '. | length' $dir/json) = "1" || return 1
# Check pgid
test $(jq -r '.[0]' $dir/json) = $pg || return 1
rados list-inconsistent-obj $pg > $dir/json || return 1
jq "$jqfilter" << EOF | jq '.inconsistents' | python3 -c "$sortkeys" > $dir/checkcsjson
{
"epoch": 34,
"inconsistents": [
{
"object": {
"name": "ROBJ1",
"nspace": "",
"locator": "",
"snap": "head",
"version": 8
},
"errors": [
"snapset_inconsistency"
],
"union_shard_errors": [],
"selected_object_info": {
"oid": {
"oid": "ROBJ1",
"key": "",
"snapid": -2,
"hash": 1454963827,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "24'8",
"prior_version": "21'3",
"last_reqid": "client.4195.0:1",
"user_version": 8,
"size": 21,
"mtime": "2018-04-05 14:35:43.286117",
"local_mtime": "2018-04-05 14:35:43.288990",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x53acb008",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"shards": [
{
"osd": 0,
"primary": false,
"errors": [],
"size": 21,
"snapset": {
"clones": [
{
"overlap": "[]",
"size": 7,
"snap": 1,
"snaps": [
1
]
}
],
"seq": 1
}
},
{
"osd": 1,
"primary": true,
"errors": [],
"size": 21,
"snapset": {
"clones": [],
"seq": 0
}
}
]
},
{
"object": {
"name": "ROBJ2",
"nspace": "",
"locator": "",
"snap": "head",
"version": 10
},
"errors": [
"snapset_inconsistency"
],
"union_shard_errors": [],
"selected_object_info": {
"oid": {
"oid": "ROBJ2",
"key": "",
"snapid": -2,
"hash": 2026323607,
"max": 0,
"pool": 3,
"namespace": ""
},
"version": "28'10",
"prior_version": "23'6",
"last_reqid": "client.4223.0:1",
"user_version": 10,
"size": 21,
"mtime": "2018-04-05 14:35:48.326856",
"local_mtime": "2018-04-05 14:35:48.328097",
"lost": 0,
"flags": [
"dirty",
"omap",
"data_digest"
],
"truncate_seq": 0,
"truncate_size": 0,
"data_digest": "0x53acb008",
"omap_digest": "0xffffffff",
"expected_object_size": 0,
"expected_write_size": 0,
"alloc_hint_flags": 0,
"manifest": {
"type": 0
},
"watchers": {}
},
"shards": [
{
"osd": 0,
"primary": false,
"errors": [],
"size": 21,
"snapset": {
"clones": [],
"seq": 0
}
},
{
"osd": 1,
"primary": true,
"errors": [],
"size": 21,
"snapset": {
"clones": [
{
"overlap": "[]",
"size": 7,
"snap": 1,
"snaps": [
1
]
}
],
"seq": 1
}
}
]
}
]
}
EOF
jq "$jqfilter" $dir/json | jq '.inconsistents' | python3 -c "$sortkeys" > $dir/csjson
multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1
if test $getjson = "yes"
then
jq '.' $dir/json > save6.json
fi
if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null;
then
jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-obj.json || return 1
fi
ERRORS=0
declare -a err_strings
err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid [0-9]*:.*:::ROBJ1:head : snapset inconsistent"
err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid [0-9]*:.*:::ROBJ2:head : snapset inconsistent"
err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 [0-9]*:.*:::ROBJ1:1 : is an unexpected clone"
err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub : stat mismatch, got 3/4 objects, 1/2 clones, 3/4 dirty, 3/4 omap, 0/0 pinned, 0/0 hit_set_archive, 0/0 whiteouts, 49/56 bytes, 0/0 manifest objects, 0/0 hit_set_archive bytes."
err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 0 missing, 2 inconsistent objects"
err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 4 errors"
for err_string in "${err_strings[@]}"
do
if ! grep -q "$err_string" $dir/osd.${primary}.log
then
echo "Missing log message '$err_string'"
ERRORS=$(expr $ERRORS + 1)
fi
done
if [ $ERRORS != "0" ];
then
echo "TEST FAILED WITH $ERRORS ERRORS"
return 1
fi
ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it
}
function TEST_request_scrub_priority() {
local dir=$1
local poolname=psr_pool
local objname=POBJ
local OBJECTS=64
local PGS=8
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-interval-randomize-ratio=0 --osd-deep-scrub-randomize-ratio=0 "
ceph_osd_args+="--osd_scrub_backoff_ratio=0"
run_osd $dir 0 $ceph_osd_args || return 1
create_pool $poolname $PGS $PGS || return 1
wait_for_clean || return 1
local osd=0
add_something $dir $poolname $objname noscrub || return 1
local primary=$(get_primary $poolname $objname)
local pg=$(get_pg $poolname $objname)
poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
local otherpgs
for i in $(seq 0 $(expr $PGS - 1))
do
opg="${poolid}.${i}"
if [ "$opg" = "$pg" ]; then
continue
fi
otherpgs="${otherpgs}${opg} "
local other_last_scrub=$(get_last_scrub_stamp $pg)
# Fake a schedule scrub
ceph tell $opg scrub $opg || return 1
done
sleep 15
flush_pg_stats
# Request a regular scrub and it will be done
local last_scrub=$(get_last_scrub_stamp $pg)
ceph pg scrub $pg
ceph osd unset noscrub || return 1
ceph osd unset nodeep-scrub || return 1
wait_for_scrub $pg "$last_scrub"
for opg in $otherpgs $pg
do
wait_for_scrub $opg "$other_last_scrub"
done
# Verify that the requested scrub ran first
grep "log_channel.*scrub ok" $dir/osd.${primary}.log | grep -v purged_snaps | head -1 | sed 's/.*[[]DBG[]]//' | grep -q $pg || return 1
}
main osd-scrub-repair "$@"
# Local Variables:
# compile-command: "cd build ; make -j4 && \
# ../qa/run-standalone.sh osd-scrub-repair.sh"
# End:
| 173,160 | 26.679188 | 597 | sh |
null | ceph-main/qa/standalone/scrub/osd-scrub-snaps.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
# Test development and debugging
# Set to "yes" in order to ignore diff errors and save results to update test
getjson="no"
jqfilter='.inconsistents'
sortkeys='import json; import sys ; JSON=sys.stdin.read() ; ud = json.loads(JSON) ; print ( json.dumps(ud, sort_keys=True, indent=2) )'
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7121" # git grep '\<7121\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
export -n CEPH_CLI_TEST_DUP_COMMAND
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function create_scenario() {
local dir=$1
local poolname=$2
local TESTDATA=$3
local osd=$4
SNAP=1
rados -p $poolname mksnap snap${SNAP}
dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP}
rados -p $poolname put obj1 $TESTDATA
rados -p $poolname put obj5 $TESTDATA
rados -p $poolname put obj3 $TESTDATA
for i in `seq 6 14`
do rados -p $poolname put obj${i} $TESTDATA
done
SNAP=2
rados -p $poolname mksnap snap${SNAP}
dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP}
rados -p $poolname put obj5 $TESTDATA
SNAP=3
rados -p $poolname mksnap snap${SNAP}
dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP}
rados -p $poolname put obj3 $TESTDATA
SNAP=4
rados -p $poolname mksnap snap${SNAP}
dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP}
rados -p $poolname put obj5 $TESTDATA
rados -p $poolname put obj2 $TESTDATA
SNAP=5
rados -p $poolname mksnap snap${SNAP}
SNAP=6
rados -p $poolname mksnap snap${SNAP}
dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP}
rados -p $poolname put obj5 $TESTDATA
SNAP=7
rados -p $poolname mksnap snap${SNAP}
rados -p $poolname rm obj4
rados -p $poolname rm obj16
rados -p $poolname rm obj2
kill_daemons $dir TERM osd || return 1
# Don't need to use ceph_objectstore_tool() function because osd stopped
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj1)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" --force remove || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj5 | grep \"snapid\":2)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" remove || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj5 | grep \"snapid\":1)"
OBJ5SAVE="$JSON"
# Starts with a snapmap
ceph-kvstore-tool bluestore-kv $dir/${osd} list 2> /dev/null > $dir/drk.log
grep SNA_ $dir/drk.log
grep "^[pm].*SNA_.*[.]1[.]obj5[.][.]$" $dir/drk.log || return 1
ceph-objectstore-tool --data-path $dir/${osd} --rmtype nosnapmap "$JSON" remove || return 1
# Check that snapmap is stil there
ceph-kvstore-tool bluestore-kv $dir/${osd} list 2> /dev/null > $dir/drk.log
grep SNA_ $dir/drk.log
grep "^[pm].*SNA_.*[.]1[.]obj5[.][.]$" $dir/drk.log || return 1
rm -f $dir/drk.log
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj5 | grep \"snapid\":4)"
dd if=/dev/urandom of=$TESTDATA bs=256 count=18
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" set-bytes $TESTDATA || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj3)"
dd if=/dev/urandom of=$TESTDATA bs=256 count=15
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" set-bytes $TESTDATA || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj4 | grep \"snapid\":7)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" remove || return 1
# Starts with a snapmap
ceph-kvstore-tool bluestore-kv $dir/${osd} list 2> /dev/null > $dir/drk.log
grep SNA_ $dir/drk.log
grep "^[pm].*SNA_.*[.]7[.]obj16[.][.]$" $dir/drk.log || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj16 | grep \"snapid\":7)"
ceph-objectstore-tool --data-path $dir/${osd} --rmtype snapmap "$JSON" remove || return 1
# Check that snapmap is now removed
ceph-kvstore-tool bluestore-kv $dir/${osd} list 2> /dev/null > $dir/drk.log
grep SNA_ $dir/drk.log
! grep "^[pm].*SNA_.*[.]7[.]obj16[.][.]$" $dir/drk.log || return 1
rm -f $dir/drk.log
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj2)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" rm-attr snapset || return 1
# Create a clone which isn't in snapset and doesn't have object info
JSON="$(echo "$OBJ5SAVE" | sed s/snapid\":1/snapid\":7/)"
dd if=/dev/urandom of=$TESTDATA bs=256 count=7
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" set-bytes $TESTDATA || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj6)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj7)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset corrupt || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj8)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset seq || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj9)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset clone_size || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj10)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset clone_overlap || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj11)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset clones || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj12)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset head || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj13)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset snaps || return 1
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj14)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset size || return 1
echo "garbage" > $dir/bad
JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj15)"
ceph-objectstore-tool --data-path $dir/${osd} "$JSON" set-attr snapset $dir/bad || return 1
rm -f $dir/bad
return 0
}
function TEST_scrub_snaps() {
local dir=$1
local poolname=test
local OBJS=16
local OSDS=1
TESTDATA="testdata.$$"
run_mon $dir a --osd_pool_default_size=$OSDS || return 1
run_mgr $dir x || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
# All scrubs done manually. Don't want any unexpected scheduled scrubs.
ceph osd set noscrub || return 1
ceph osd set nodeep-scrub || return 1
# Create a pool with a single pg
create_pool $poolname 1 1
wait_for_clean || return 1
poolid=$(ceph osd dump | grep "^pool.*[']test[']" | awk '{ print $2 }')
dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
for i in `seq 1 $OBJS`
do
rados -p $poolname put obj${i} $TESTDATA
done
local primary=$(get_primary $poolname obj1)
create_scenario $dir $poolname $TESTDATA $primary || return 1
rm -f $TESTDATA
for osd in $(seq 0 $(expr $OSDS - 1))
do
activate_osd $dir $osd || return 1
done
ceph tell osd.* config set osd_shallow_scrub_chunk_max 25
ceph tell osd.* config set osd_shallow_scrub_chunk_min 5
ceph tell osd.* config set osd_pg_stat_report_interval_max 1
wait_for_clean || return 1
ceph tell osd.* config get osd_shallow_scrub_chunk_max
ceph tell osd.* config get osd_shallow_scrub_chunk_min
ceph tell osd.* config get osd_pg_stat_report_interval_max
ceph tell osd.* config get osd_scrub_chunk_max
ceph tell osd.* config get osd_scrub_chunk_min
local pgid="${poolid}.0"
if ! pg_scrub "$pgid" ; then
return 1
fi
test "$(grep "_scan_snaps start" $dir/osd.${primary}.log | wc -l)" = "2" || return 1
rados list-inconsistent-pg $poolname > $dir/json || return 1
# Check pg count
test $(jq '. | length' $dir/json) = "1" || return 1
# Check pgid
test $(jq -r '.[0]' $dir/json) = $pgid || return 1
rados list-inconsistent-obj $pgid > $dir/json || return 1
# The injected snapshot errors with a single copy pool doesn't
# see object errors because all the issues are detected by
# comparing copies.
jq "$jqfilter" << EOF | python3 -c "$sortkeys" > $dir/checkcsjson
{
"epoch": 17,
"inconsistents": []
}
EOF
jq "$jqfilter" $dir/json | python3 -c "$sortkeys" > $dir/csjson
multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1
rados list-inconsistent-snapset $pgid > $dir/json || return 1
jq "$jqfilter" << EOF | python3 -c "$sortkeys" > $dir/checkcsjson
{
"inconsistents": [
{
"errors": [
"headless"
],
"snap": 1,
"locator": "",
"nspace": "",
"name": "obj1"
},
{
"errors": [
"size_mismatch"
],
"snap": 1,
"locator": "",
"nspace": "",
"name": "obj10"
},
{
"errors": [
"headless"
],
"snap": 1,
"locator": "",
"nspace": "",
"name": "obj11"
},
{
"errors": [
"size_mismatch"
],
"snap": 1,
"locator": "",
"nspace": "",
"name": "obj14"
},
{
"errors": [
"headless"
],
"snap": 1,
"locator": "",
"nspace": "",
"name": "obj6"
},
{
"errors": [
"headless"
],
"snap": 1,
"locator": "",
"nspace": "",
"name": "obj7"
},
{
"errors": [
"size_mismatch"
],
"snap": 1,
"locator": "",
"nspace": "",
"name": "obj9"
},
{
"errors": [
"headless"
],
"snap": 4,
"locator": "",
"nspace": "",
"name": "obj2"
},
{
"errors": [
"size_mismatch"
],
"snap": 4,
"locator": "",
"nspace": "",
"name": "obj5"
},
{
"errors": [
"headless"
],
"snap": 7,
"locator": "",
"nspace": "",
"name": "obj2"
},
{
"errors": [
"info_missing",
"headless"
],
"snap": 7,
"locator": "",
"nspace": "",
"name": "obj5"
},
{
"name": "obj10",
"nspace": "",
"locator": "",
"snap": "head",
"snapset": {
"seq": 1,
"clones": [
{
"snap": 1,
"size": 1032,
"overlap": "????",
"snaps": [
1
]
}
]
},
"errors": []
},
{
"extra clones": [
1
],
"errors": [
"extra_clones"
],
"snap": "head",
"locator": "",
"nspace": "",
"name": "obj11",
"snapset": {
"seq": 1,
"clones": []
}
},
{
"name": "obj14",
"nspace": "",
"locator": "",
"snap": "head",
"snapset": {
"seq": 1,
"clones": [
{
"snap": 1,
"size": 1033,
"overlap": "[]",
"snaps": [
1
]
}
]
},
"errors": []
},
{
"errors": [
"snapset_corrupted"
],
"snap": "head",
"locator": "",
"nspace": "",
"name": "obj15"
},
{
"extra clones": [
7,
4
],
"errors": [
"snapset_missing",
"extra_clones"
],
"snap": "head",
"locator": "",
"nspace": "",
"name": "obj2"
},
{
"errors": [
"size_mismatch"
],
"snap": "head",
"locator": "",
"nspace": "",
"name": "obj3",
"snapset": {
"seq": 3,
"clones": [
{
"snap": 1,
"size": 1032,
"overlap": "[]",
"snaps": [
1
]
},
{
"snap": 3,
"size": 256,
"overlap": "[]",
"snaps": [
3,
2
]
}
]
}
},
{
"missing": [
7
],
"errors": [
"clone_missing"
],
"snap": "head",
"locator": "",
"nspace": "",
"name": "obj4",
"snapset": {
"seq": 7,
"clones": [
{
"snap": 7,
"size": 1032,
"overlap": "[]",
"snaps": [
7,
6,
5,
4,
3,
2,
1
]
}
]
}
},
{
"missing": [
2,
1
],
"extra clones": [
7
],
"errors": [
"extra_clones",
"clone_missing"
],
"snap": "head",
"locator": "",
"nspace": "",
"name": "obj5",
"snapset": {
"seq": 6,
"clones": [
{
"snap": 1,
"size": 1032,
"overlap": "[]",
"snaps": [
1
]
},
{
"snap": 2,
"size": 256,
"overlap": "[]",
"snaps": [
2
]
},
{
"snap": 4,
"size": 512,
"overlap": "[]",
"snaps": [
4,
3
]
},
{
"snap": 6,
"size": 1024,
"overlap": "[]",
"snaps": [
6,
5
]
}
]
}
},
{
"extra clones": [
1
],
"errors": [
"extra_clones"
],
"snap": "head",
"locator": "",
"nspace": "",
"name": "obj6",
"snapset": {
"seq": 1,
"clones": []
}
},
{
"extra clones": [
1
],
"errors": [
"extra_clones"
],
"snap": "head",
"locator": "",
"nspace": "",
"name": "obj7",
"snapset": {
"seq": 0,
"clones": []
}
},
{
"errors": [
"snapset_error"
],
"snap": "head",
"locator": "",
"nspace": "",
"name": "obj8",
"snapset": {
"seq": 0,
"clones": [
{
"snap": 1,
"size": 1032,
"overlap": "[]",
"snaps": [
1
]
}
]
}
},
{
"name": "obj9",
"nspace": "",
"locator": "",
"snap": "head",
"snapset": {
"seq": 1,
"clones": [
{
"snap": 1,
"size": "????",
"overlap": "[]",
"snaps": [
1
]
}
]
},
"errors": []
}
],
"epoch": 20
}
EOF
jq "$jqfilter" $dir/json | python3 -c "$sortkeys" > $dir/csjson
multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1
if test $getjson = "yes"
then
jq '.' $dir/json > save1.json
fi
if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null;
then
jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-snap.json || return 1
fi
pidfiles=$(find $dir 2>/dev/null | grep 'osd[^/]*\.pid')
pids=""
for pidfile in ${pidfiles}
do
pids+="$(cat $pidfile) "
done
ERRORS=0
for i in `seq 1 7`
do
rados -p $poolname rmsnap snap$i
done
sleep 5
local -i loop=0
while ceph pg dump pgs | grep -q snaptrim;
do
if ceph pg dump pgs | grep -q snaptrim_error;
then
break
fi
sleep 2
loop+=1
if (( $loop >= 10 )) ; then
ERRORS=$(expr $ERRORS + 1)
break
fi
done
ceph pg dump pgs
for pid in $pids
do
if ! kill -0 $pid
then
echo "OSD Crash occurred"
ERRORS=$(expr $ERRORS + 1)
fi
done
kill_daemons $dir || return 1
declare -a err_strings
err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj10:.* : is missing in clone_overlap"
err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:7 : no '_' attr"
err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:7 : is an unexpected clone"
err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:4 : on disk size [(]4608[)] does not match object info size [(]512[)] adjusted for ondisk to [(]512[)]"
err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj5:head : expected clone .*:::obj5:2"
err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj5:head : expected clone .*:::obj5:1"
err_strings[6]="log_channel[(]cluster[)] log [[]INF[]] : scrub [0-9]*[.]0 .*:::obj5:head : 2 missing clone[(]s[)]"
err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj8:head : snaps.seq not set"
err_strings[8]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj7:1 : is an unexpected clone"
err_strings[9]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj3:head : on disk size [(]3840[)] does not match object info size [(]768[)] adjusted for ondisk to [(]768[)]"
err_strings[10]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj6:1 : is an unexpected clone"
err_strings[11]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:head : no 'snapset' attr"
err_strings[12]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:7 : clone ignored due to missing snapset"
err_strings[13]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:4 : clone ignored due to missing snapset"
err_strings[14]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj4:head : expected clone .*:::obj4:7"
err_strings[15]="log_channel[(]cluster[)] log [[]INF[]] : scrub [0-9]*[.]0 .*:::obj4:head : 1 missing clone[(]s[)]"
err_strings[16]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj1:1 : is an unexpected clone"
err_strings[17]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj9:1 : is missing in clone_size"
err_strings[18]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj11:1 : is an unexpected clone"
err_strings[19]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj14:1 : size 1032 != clone_size 1033"
err_strings[20]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 20 errors"
err_strings[21]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj15:head : can't decode 'snapset' attr "
err_strings[22]="log_channel[(]cluster[)] log [[]ERR[]] : osd[.][0-9]* found snap mapper error on pg 1.0 oid 1:461f8b5e:::obj16:7 snaps missing in mapper, should be: {1, 2, 3, 4, 5, 6, 7} ...repaired"
for err_string in "${err_strings[@]}"
do
if ! grep "$err_string" $dir/osd.${primary}.log > /dev/null;
then
echo "Missing log message '$err_string'"
ERRORS=$(expr $ERRORS + 1)
fi
done
if [ $ERRORS != "0" ];
then
echo "TEST FAILED WITH $ERRORS ERRORS"
return 1
fi
echo "TEST PASSED"
return 0
}
function _scrub_snaps_multi() {
local dir=$1
local poolname=test
local OBJS=16
local OSDS=2
local which=$2
TESTDATA="testdata.$$"
run_mon $dir a --osd_pool_default_size=$OSDS || return 1
run_mgr $dir x || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
# All scrubs done manually. Don't want any unexpected scheduled scrubs.
ceph osd set noscrub || return 1
ceph osd set nodeep-scrub || return 1
# Create a pool with a single pg
create_pool $poolname 1 1
wait_for_clean || return 1
poolid=$(ceph osd dump | grep "^pool.*[']test[']" | awk '{ print $2 }')
dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
for i in `seq 1 $OBJS`
do
rados -p $poolname put obj${i} $TESTDATA
done
local primary=$(get_primary $poolname obj1)
local replica=$(get_not_primary $poolname obj1)
eval create_scenario $dir $poolname $TESTDATA \$$which || return 1
rm -f $TESTDATA
for osd in $(seq 0 $(expr $OSDS - 1))
do
activate_osd $dir $osd || return 1
done
ceph tell osd.* config set osd_shallow_scrub_chunk_max 3
ceph tell osd.* config set osd_shallow_scrub_chunk_min 3
ceph tell osd.* config set osd_scrub_chunk_min 3
ceph tell osd.* config set osd_pg_stat_report_interval_max 1
wait_for_clean || return 1
local pgid="${poolid}.0"
if ! pg_scrub "$pgid" ; then
return 1
fi
test "$(grep "_scan_snaps start" $dir/osd.${primary}.log | wc -l)" -gt "3" || return 1
test "$(grep "_scan_snaps start" $dir/osd.${replica}.log | wc -l)" -gt "3" || return 1
rados list-inconsistent-pg $poolname > $dir/json || return 1
# Check pg count
test $(jq '. | length' $dir/json) = "1" || return 1
# Check pgid
test $(jq -r '.[0]' $dir/json) = $pgid || return 1
rados list-inconsistent-obj $pgid --format=json-pretty
rados list-inconsistent-snapset $pgid > $dir/json || return 1
# Since all of the snapshots on the primary is consistent there are no errors here
if [ $which = "replica" ];
then
scruberrors="20"
jq "$jqfilter" << EOF | python3 -c "$sortkeys" > $dir/checkcsjson
{
"epoch": 23,
"inconsistents": []
}
EOF
else
scruberrors="30"
jq "$jqfilter" << EOF | python3 -c "$sortkeys" > $dir/checkcsjson
{
"epoch": 23,
"inconsistents": [
{
"name": "obj10",
"nspace": "",
"locator": "",
"snap": 1,
"errors": [
"size_mismatch"
]
},
{
"name": "obj11",
"nspace": "",
"locator": "",
"snap": 1,
"errors": [
"headless"
]
},
{
"name": "obj14",
"nspace": "",
"locator": "",
"snap": 1,
"errors": [
"size_mismatch"
]
},
{
"name": "obj6",
"nspace": "",
"locator": "",
"snap": 1,
"errors": [
"headless"
]
},
{
"name": "obj7",
"nspace": "",
"locator": "",
"snap": 1,
"errors": [
"headless"
]
},
{
"name": "obj9",
"nspace": "",
"locator": "",
"snap": 1,
"errors": [
"size_mismatch"
]
},
{
"name": "obj5",
"nspace": "",
"locator": "",
"snap": 7,
"errors": [
"info_missing",
"headless"
]
},
{
"name": "obj10",
"nspace": "",
"locator": "",
"snap": "head",
"snapset": {
"seq": 1,
"clones": [
{
"snap": 1,
"size": 1032,
"overlap": "????",
"snaps": [
1
]
}
]
},
"errors": []
},
{
"name": "obj11",
"nspace": "",
"locator": "",
"snap": "head",
"snapset": {
"seq": 1,
"clones": []
},
"errors": [
"extra_clones"
],
"extra clones": [
1
]
},
{
"name": "obj14",
"nspace": "",
"locator": "",
"snap": "head",
"snapset": {
"seq": 1,
"clones": [
{
"snap": 1,
"size": 1033,
"overlap": "[]",
"snaps": [
1
]
}
]
},
"errors": []
},
{
"name": "obj5",
"nspace": "",
"locator": "",
"snap": "head",
"snapset": {
"seq": 6,
"clones": [
{
"snap": 1,
"size": 1032,
"overlap": "[]",
"snaps": [
1
]
},
{
"snap": 2,
"size": 256,
"overlap": "[]",
"snaps": [
2
]
},
{
"snap": 4,
"size": 512,
"overlap": "[]",
"snaps": [
4,
3
]
},
{
"snap": 6,
"size": 1024,
"overlap": "[]",
"snaps": [
6,
5
]
}
]
},
"errors": [
"extra_clones"
],
"extra clones": [
7
]
},
{
"name": "obj6",
"nspace": "",
"locator": "",
"snap": "head",
"snapset": {
"seq": 1,
"clones": []
},
"errors": [
"extra_clones"
],
"extra clones": [
1
]
},
{
"name": "obj7",
"nspace": "",
"locator": "",
"snap": "head",
"snapset": {
"seq": 0,
"clones": []
},
"errors": [
"extra_clones"
],
"extra clones": [
1
]
},
{
"name": "obj8",
"nspace": "",
"locator": "",
"snap": "head",
"snapset": {
"seq": 0,
"clones": [
{
"snap": 1,
"size": 1032,
"overlap": "[]",
"snaps": [
1
]
}
]
},
"errors": [
"snapset_error"
]
},
{
"name": "obj9",
"nspace": "",
"locator": "",
"snap": "head",
"snapset": {
"seq": 1,
"clones": [
{
"snap": 1,
"size": "????",
"overlap": "[]",
"snaps": [
1
]
}
]
},
"errors": []
}
]
}
EOF
fi
jq "$jqfilter" $dir/json | python3 -c "$sortkeys" > $dir/csjson
multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1
if test $getjson = "yes"
then
jq '.' $dir/json > save1.json
fi
if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null;
then
jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-snap.json || return 1
fi
pidfiles=$(find $dir 2>/dev/null | grep 'osd[^/]*\.pid')
pids=""
for pidfile in ${pidfiles}
do
pids+="$(cat $pidfile) "
done
ERRORS=0
# When removing snapshots with a corrupt replica, it crashes.
# See http://tracker.ceph.com/issues/23875
if [ $which = "primary" ];
then
for i in `seq 1 7`
do
rados -p $poolname rmsnap snap$i
done
sleep 5
local -i loop=0
while ceph pg dump pgs | grep -q snaptrim;
do
if ceph pg dump pgs | grep -q snaptrim_error;
then
break
fi
sleep 2
loop+=1
if (( $loop >= 10 )) ; then
ERRORS=$(expr $ERRORS + 1)
break
fi
done
fi
ceph pg dump pgs
for pid in $pids
do
if ! kill -0 $pid
then
echo "OSD Crash occurred"
ERRORS=$(expr $ERRORS + 1)
fi
done
kill_daemons $dir || return 1
declare -a err_strings
err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj4:7 : missing"
err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] soid .*:::obj3:head : size 3840 != size 768 from auth oi"
err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj5:1 : missing"
err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj5:2 : missing"
err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] soid .*:::obj5:4 : size 4608 != size 512 from auth oi"
err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid .*:::obj5:7 : failed to pick suitable object info"
err_strings[6]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj1:head : missing"
err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub ${scruberrors} errors"
for err_string in "${err_strings[@]}"
do
if ! grep "$err_string" $dir/osd.${primary}.log > /dev/null;
then
echo "Missing log message '$err_string'"
ERRORS=$(expr $ERRORS + 1)
fi
done
# Check replica specific messages
declare -a rep_err_strings
osd=$(eval echo \$$which)
rep_err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : osd[.][0-9]* found snap mapper error on pg 1.0 oid 1:461f8b5e:::obj16:7 snaps missing in mapper, should be: {1, 2, 3, 4, 5, 6, 7} ...repaired"
for err_string in "${rep_err_strings[@]}"
do
if ! grep "$err_string" $dir/osd.${osd}.log > /dev/null;
then
echo "Missing log message '$err_string'"
ERRORS=$(expr $ERRORS + 1)
fi
done
if [ $ERRORS != "0" ];
then
echo "TEST FAILED WITH $ERRORS ERRORS"
return 1
fi
echo "TEST PASSED"
return 0
}
function TEST_scrub_snaps_replica() {
local dir=$1
ORIG_ARGS=$CEPH_ARGS
CEPH_ARGS+=" --osd_scrub_chunk_min=3 --osd_scrub_chunk_max=20 --osd_shallow_scrub_chunk_min=3 --osd_shallow_scrub_chunk_max=3 --osd_pg_stat_report_interval_max=1"
_scrub_snaps_multi $dir replica
err=$?
CEPH_ARGS=$ORIG_ARGS
return $err
}
function TEST_scrub_snaps_primary() {
local dir=$1
ORIG_ARGS=$CEPH_ARGS
CEPH_ARGS+=" --osd_scrub_chunk_min=3 --osd_scrub_chunk_max=20 --osd_shallow_scrub_chunk_min=3 --osd_shallow_scrub_chunk_max=3 --osd_pg_stat_report_interval_max=1"
_scrub_snaps_multi $dir primary
err=$?
CEPH_ARGS=$ORIG_ARGS
return $err
}
main osd-scrub-snaps "$@"
# Local Variables:
# compile-command: "cd build ; make -j4 && \
# ../qa/run-standalone.sh osd-scrub-snaps.sh"
# End:
| 33,375 | 27.070648 | 207 | sh |
null | ceph-main/qa/standalone/scrub/osd-scrub-test.sh | #!/usr/bin/env bash
#
# Copyright (C) 2018 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
source $CEPH_ROOT/qa/standalone/scrub/scrub-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7138" # git grep '\<7138\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
export -n CEPH_CLI_TEST_DUP_COMMAND
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
echo "-------------- Prepare Test $func -------------------"
setup $dir || return 1
echo "-------------- Run Test $func -----------------------"
$func $dir || return 1
echo "-------------- Teardown Test $func ------------------"
teardown $dir || return 1
echo "-------------- Complete Test $func ------------------"
done
}
function TEST_scrub_test() {
local dir=$1
local poolname=test
local OSDS=3
local objects=15
TESTDATA="testdata.$$"
run_mon $dir a --osd_pool_default_size=3 || return 1
run_mgr $dir x || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
# Create a pool with a single pg
create_pool $poolname 1 1
wait_for_clean || return 1
poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
for i in `seq 1 $objects`
do
rados -p $poolname put obj${i} $TESTDATA
done
rm -f $TESTDATA
local primary=$(get_primary $poolname obj1)
local otherosd=$(get_not_primary $poolname obj1)
if [ "$otherosd" = "2" ];
then
local anotherosd="0"
else
local anotherosd="2"
fi
CORRUPT_DATA="corrupt-data.$$"
dd if=/dev/urandom of=$CORRUPT_DATA bs=512 count=1
objectstore_tool $dir $anotherosd obj1 set-bytes $CORRUPT_DATA
rm -f $CORRUPT_DATA
local pgid="${poolid}.0"
pg_deep_scrub "$pgid" || return 1
ceph pg dump pgs | grep ^${pgid} | grep -q -- +inconsistent || return 1
test "$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_scrub_errors')" = "2" || return 1
ceph osd out $primary
wait_for_clean || return 1
pg_deep_scrub "$pgid" || return 1
test "$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_scrub_errors')" = "2" || return 1
test "$(ceph pg $pgid query | jq '.peer_info[0].stats.stat_sum.num_scrub_errors')" = "2" || return 1
ceph pg dump pgs | grep ^${pgid} | grep -q -- +inconsistent || return 1
ceph osd in $primary
wait_for_clean || return 1
repair "$pgid" || return 1
wait_for_clean || return 1
# This sets up the test after we've repaired with previous primary has old value
test "$(ceph pg $pgid query | jq '.peer_info[0].stats.stat_sum.num_scrub_errors')" = "2" || return 1
ceph pg dump pgs | grep ^${pgid} | grep -vq -- +inconsistent || return 1
ceph osd out $primary
wait_for_clean || return 1
test "$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_scrub_errors')" = "0" || return 1
test "$(ceph pg $pgid query | jq '.peer_info[0].stats.stat_sum.num_scrub_errors')" = "0" || return 1
test "$(ceph pg $pgid query | jq '.peer_info[1].stats.stat_sum.num_scrub_errors')" = "0" || return 1
ceph pg dump pgs | grep ^${pgid} | grep -vq -- +inconsistent || return 1
}
# Grab year-month-day
DATESED="s/\([0-9]*-[0-9]*-[0-9]*\).*/\1/"
DATEFORMAT="%Y-%m-%d"
function check_dump_scrubs() {
local primary=$1
local sched_time_check="$2"
local deadline_check="$3"
DS="$(CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) dump_scrubs)"
# use eval to drop double-quotes
eval SCHED_TIME=$(echo $DS | jq '.[0].sched_time')
test $(echo $SCHED_TIME | sed $DATESED) = $(date +${DATEFORMAT} -d "now + $sched_time_check") || return 1
# use eval to drop double-quotes
eval DEADLINE=$(echo $DS | jq '.[0].deadline')
test $(echo $DEADLINE | sed $DATESED) = $(date +${DATEFORMAT} -d "now + $deadline_check") || return 1
}
function TEST_interval_changes() {
local poolname=test
local OSDS=2
local objects=10
# Don't assume how internal defaults are set
local day="$(expr 24 \* 60 \* 60)"
local week="$(expr $day \* 7)"
local min_interval=$day
local max_interval=$week
local WAIT_FOR_UPDATE=15
TESTDATA="testdata.$$"
# This min scrub interval results in 30 seconds backoff time
run_mon $dir a --osd_pool_default_size=$OSDS || return 1
run_mgr $dir x || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd --osd_scrub_min_interval=$min_interval --osd_scrub_max_interval=$max_interval --osd_scrub_interval_randomize_ratio=0 || return 1
done
# Create a pool with a single pg
create_pool $poolname 1 1
wait_for_clean || return 1
local poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
for i in `seq 1 $objects`
do
rados -p $poolname put obj${i} $TESTDATA
done
rm -f $TESTDATA
local primary=$(get_primary $poolname obj1)
# Check initial settings from above (min 1 day, min 1 week)
check_dump_scrubs $primary "1 day" "1 week" || return 1
# Change global osd_scrub_min_interval to 2 days
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) config set osd_scrub_min_interval $(expr $day \* 2)
sleep $WAIT_FOR_UPDATE
check_dump_scrubs $primary "2 days" "1 week" || return 1
# Change global osd_scrub_max_interval to 2 weeks
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) config set osd_scrub_max_interval $(expr $week \* 2)
sleep $WAIT_FOR_UPDATE
check_dump_scrubs $primary "2 days" "2 week" || return 1
# Change pool osd_scrub_min_interval to 3 days
ceph osd pool set $poolname scrub_min_interval $(expr $day \* 3)
sleep $WAIT_FOR_UPDATE
check_dump_scrubs $primary "3 days" "2 week" || return 1
# Change pool osd_scrub_max_interval to 3 weeks
ceph osd pool set $poolname scrub_max_interval $(expr $week \* 3)
sleep $WAIT_FOR_UPDATE
check_dump_scrubs $primary "3 days" "3 week" || return 1
}
function TEST_scrub_extended_sleep() {
local dir=$1
local poolname=test
local OSDS=3
local objects=15
TESTDATA="testdata.$$"
DAY=$(date +%w)
# Handle wrap
if [ "$DAY" -ge "4" ];
then
DAY="0"
fi
# Start after 2 days in case we are near midnight
DAY_START=$(expr $DAY + 2)
DAY_END=$(expr $DAY + 3)
run_mon $dir a --osd_pool_default_size=3 || return 1
run_mgr $dir x || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd --osd_scrub_sleep=0 \
--osd_scrub_extended_sleep=20 \
--bluestore_cache_autotune=false \
--osd_deep_scrub_randomize_ratio=0.0 \
--osd_scrub_interval_randomize_ratio=0 \
--osd_scrub_begin_week_day=$DAY_START \
--osd_scrub_end_week_day=$DAY_END \
|| return 1
done
# Create a pool with a single pg
create_pool $poolname 1 1
wait_for_clean || return 1
# Trigger a scrub on a PG
local pgid=$(get_pg $poolname SOMETHING)
local primary=$(get_primary $poolname SOMETHING)
local last_scrub=$(get_last_scrub_stamp $pgid)
ceph tell $pgid scrub || return 1
# Allow scrub to start extended sleep
PASSED="false"
for ((i=0; i < 15; i++)); do
if grep -q "scrub state.*, sleeping" $dir/osd.${primary}.log
then
PASSED="true"
break
fi
sleep 1
done
# Check that extended sleep was triggered
if [ $PASSED = "false" ];
then
return 1
fi
# release scrub to run after extended sleep finishes
ceph tell osd.$primary config set osd_scrub_begin_week_day 0
ceph tell osd.$primary config set osd_scrub_end_week_day 0
# Due to extended sleep, the scrub should not be done within 20 seconds
# but test up to 10 seconds and make sure it happens by 25 seconds.
count=0
PASSED="false"
for ((i=0; i < 25; i++)); do
count=$(expr $count + 1)
if test "$(get_last_scrub_stamp $pgid)" '>' "$last_scrub" ; then
# Did scrub run too soon?
if [ $count -lt "10" ];
then
return 1
fi
PASSED="true"
break
fi
sleep 1
done
# Make sure scrub eventually ran
if [ $PASSED = "false" ];
then
return 1
fi
}
function _scrub_abort() {
local dir=$1
local poolname=test
local OSDS=3
local objects=1000
local type=$2
TESTDATA="testdata.$$"
if test $type = "scrub";
then
stopscrub="noscrub"
check="noscrub"
else
stopscrub="nodeep-scrub"
check="nodeep_scrub"
fi
run_mon $dir a --osd_pool_default_size=3 || return 1
run_mgr $dir x || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
# Set scheduler to "wpq" until there's a reliable way to query scrub
# states with "--osd-scrub-sleep" set to 0. The "mclock_scheduler"
# overrides the scrub sleep to 0 and as a result the checks in the
# test fail.
run_osd $dir $osd --osd_pool_default_pg_autoscale_mode=off \
--osd_deep_scrub_randomize_ratio=0.0 \
--osd_scrub_sleep=5.0 \
--osd_scrub_interval_randomize_ratio=0 \
--osd_op_queue=wpq || return 1
done
# Create a pool with a single pg
create_pool $poolname 1 1
wait_for_clean || return 1
poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
for i in `seq 1 $objects`
do
rados -p $poolname put obj${i} $TESTDATA
done
rm -f $TESTDATA
local primary=$(get_primary $poolname obj1)
local pgid="${poolid}.0"
ceph tell $pgid $type || return 1
# deep-scrub won't start without scrub noticing
if [ "$type" = "deep_scrub" ];
then
ceph tell $pgid scrub || return 1
fi
# Wait for scrubbing to start
set -o pipefail
found="no"
for i in $(seq 0 200)
do
flush_pg_stats
if ceph pg dump pgs | grep ^$pgid| grep -q "scrubbing"
then
found="yes"
#ceph pg dump pgs
break
fi
done
set +o pipefail
if test $found = "no";
then
echo "Scrubbing never started"
return 1
fi
ceph osd set $stopscrub
if [ "$type" = "deep_scrub" ];
then
ceph osd set noscrub
fi
# Wait for scrubbing to end
set -o pipefail
for i in $(seq 0 200)
do
flush_pg_stats
if ceph pg dump pgs | grep ^$pgid | grep -q "scrubbing"
then
continue
fi
#ceph pg dump pgs
break
done
set +o pipefail
sleep 5
if ! grep "$check set, aborting" $dir/osd.${primary}.log
then
echo "Abort not seen in log"
return 1
fi
local last_scrub=$(get_last_scrub_stamp $pgid)
ceph config set osd "osd_scrub_sleep" "0.1"
ceph osd unset $stopscrub
if [ "$type" = "deep_scrub" ];
then
ceph osd unset noscrub
fi
TIMEOUT=$(($objects / 2))
wait_for_scrub $pgid "$last_scrub" || return 1
}
function TEST_scrub_abort() {
local dir=$1
_scrub_abort $dir scrub
}
function TEST_deep_scrub_abort() {
local dir=$1
_scrub_abort $dir deep_scrub
}
function TEST_scrub_permit_time() {
local dir=$1
local poolname=test
local OSDS=3
local objects=15
TESTDATA="testdata.$$"
run_mon $dir a --osd_pool_default_size=3 || return 1
run_mgr $dir x || return 1
local scrub_begin_hour=$(date -d '2 hour ago' +"%H" | sed 's/^0//')
local scrub_end_hour=$(date -d '1 hour ago' +"%H" | sed 's/^0//')
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd --bluestore_cache_autotune=false \
--osd_deep_scrub_randomize_ratio=0.0 \
--osd_scrub_interval_randomize_ratio=0 \
--osd_scrub_begin_hour=$scrub_begin_hour \
--osd_scrub_end_hour=$scrub_end_hour || return 1
done
# Create a pool with a single pg
create_pool $poolname 1 1
wait_for_clean || return 1
# Trigger a scrub on a PG
local pgid=$(get_pg $poolname SOMETHING)
local primary=$(get_primary $poolname SOMETHING)
local last_scrub=$(get_last_scrub_stamp $pgid)
# If we don't specify an amount of time to subtract from
# current time to set last_scrub_stamp, it sets the deadline
# back by osd_max_interval which would cause the time permit checking
# to be skipped. Set back 1 day, the default scrub_min_interval.
ceph tell $pgid scrub $(( 24 * 60 * 60 )) || return 1
# Scrub should not run
for ((i=0; i < 30; i++)); do
if test "$(get_last_scrub_stamp $pgid)" '>' "$last_scrub" ; then
return 1
fi
sleep 1
done
}
# a test to recreate the problem described in bug #52901 - setting 'noscrub'
# without explicitly preventing deep scrubs made the PG 'unscrubable'.
# Fixed by PR#43521
function TEST_just_deep_scrubs() {
local dir=$1
local -A cluster_conf=(
['osds_num']="3"
['pgs_in_pool']="4"
['pool_name']="test"
)
standard_scrub_cluster $dir cluster_conf
local poolid=${cluster_conf['pool_id']}
local poolname=${cluster_conf['pool_name']}
echo "Pool: $poolname : $poolid"
TESTDATA="testdata.$$"
local objects=15
dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
for i in `seq 1 $objects`
do
rados -p $poolname put obj${i} $TESTDATA
done
rm -f $TESTDATA
# set both 'no scrub' & 'no deep-scrub', then request a deep-scrub.
# we do not expect to see the scrub scheduled.
ceph osd set noscrub || return 1
ceph osd set nodeep-scrub || return 1
sleep 6 # the 'noscrub' command takes a long time to reach the OSDs
local now_is=`date -I"ns"`
declare -A sched_data
local pgid="${poolid}.2"
# turn on the publishing of test data in the 'scrubber' section of 'pg query' output
set_query_debug $pgid
extract_published_sch $pgid $now_is $now_is sched_data
local saved_last_stamp=${sched_data['query_last_stamp']}
local dbg_counter_at_start=${sched_data['query_scrub_seq']}
echo "test counter @ start: $dbg_counter_at_start"
ceph pg $pgid deep_scrub
sleep 5 # 5s is the 'pg dump' interval
declare -A sc_data_2
extract_published_sch $pgid $now_is $now_is sc_data_2
echo "test counter @ should show no change: " ${sc_data_2['query_scrub_seq']}
(( ${sc_data_2['dmp_last_duration']} == 0)) || return 1
(( ${sc_data_2['query_scrub_seq']} == $dbg_counter_at_start)) || return 1
# unset the 'no deep-scrub'. Deep scrubbing should start now.
ceph osd unset nodeep-scrub || return 1
sleep 5
declare -A expct_qry_duration=( ['query_last_duration']="0" ['query_last_duration_neg']="not0" )
sc_data_2=()
echo "test counter @ should be higher than before the unset: " ${sc_data_2['query_scrub_seq']}
wait_any_cond $pgid 10 $saved_last_stamp expct_qry_duration "WaitingAfterScrub " sc_data_2 || return 1
}
function TEST_dump_scrub_schedule() {
local dir=$1
local poolname=test
local OSDS=3
local objects=15
TESTDATA="testdata.$$"
run_mon $dir a --osd_pool_default_size=$OSDS || return 1
run_mgr $dir x || return 1
# Set scheduler to "wpq" until there's a reliable way to query scrub states
# with "--osd-scrub-sleep" set to 0. The "mclock_scheduler" overrides the
# scrub sleep to 0 and as a result the checks in the test fail.
local ceph_osd_args="--osd_deep_scrub_randomize_ratio=0 \
--osd_scrub_interval_randomize_ratio=0 \
--osd_scrub_backoff_ratio=0.0 \
--osd_op_queue=wpq \
--osd_scrub_sleep=0.2"
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd $ceph_osd_args|| return 1
done
# Create a pool with a single pg
create_pool $poolname 1 1
wait_for_clean || return 1
poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
for i in `seq 1 $objects`
do
rados -p $poolname put obj${i} $TESTDATA
done
rm -f $TESTDATA
local pgid="${poolid}.0"
local now_is=`date -I"ns"`
# before the scrubbing starts
# last scrub duration should be 0. The scheduling data should show
# a time in the future:
# e.g. 'periodic scrub scheduled @ 2021-10-12T20:32:43.645168+0000'
declare -A expct_starting=( ['query_active']="false" ['query_is_future']="true" ['query_schedule']="scrub scheduled" )
declare -A sched_data
extract_published_sch $pgid $now_is "2019-10-12T20:32:43.645168+0000" sched_data
schedule_against_expected sched_data expct_starting "initial"
(( ${sched_data['dmp_last_duration']} == 0)) || return 1
echo "last-scrub --- " ${sched_data['query_last_scrub']}
#
# step 1: scrub once (mainly to ensure there is no urgency to scrub)
#
saved_last_stamp=${sched_data['query_last_stamp']}
ceph tell osd.* config set osd_scrub_sleep "0"
ceph pg deep-scrub $pgid
ceph pg scrub $pgid
# wait for the 'last duration' entries to change. Note that the 'dump' one will need
# up to 5 seconds to sync
sleep 5
sched_data=()
declare -A expct_qry_duration=( ['query_last_duration']="0" ['query_last_duration_neg']="not0" )
wait_any_cond $pgid 10 $saved_last_stamp expct_qry_duration "WaitingAfterScrub " sched_data || return 1
# verify that 'pg dump' also shows the change in last_scrub_duration
sched_data=()
declare -A expct_dmp_duration=( ['dmp_last_duration']="0" ['dmp_last_duration_neg']="not0" )
wait_any_cond $pgid 10 $saved_last_stamp expct_dmp_duration "WaitingAfterScrub_dmp " sched_data || return 1
sleep 2
#
# step 2: set noscrub and request a "periodic scrub". Watch for the change in the 'is the scrub
# scheduled for the future' value
#
ceph tell osd.* config set osd_scrub_chunk_max "3" || return 1
ceph tell osd.* config set osd_scrub_sleep "1.0" || return 1
ceph osd set noscrub || return 1
sleep 2
saved_last_stamp=${sched_data['query_last_stamp']}
ceph pg $pgid scrub
sleep 1
sched_data=()
declare -A expct_scrub_peri_sched=( ['query_is_future']="false" )
wait_any_cond $pgid 10 $saved_last_stamp expct_scrub_peri_sched "waitingBeingScheduled" sched_data || return 1
# note: the induced change in 'last_scrub_stamp' that we've caused above, is by itself not a publish-stats
# trigger. Thus it might happen that the information in 'pg dump' will not get updated here. Do not expect
# 'dmp_is_future' to follow 'query_is_future' without a good reason
## declare -A expct_scrub_peri_sched_dmp=( ['dmp_is_future']="false" )
## wait_any_cond $pgid 15 $saved_last_stamp expct_scrub_peri_sched_dmp "waitingBeingScheduled" sched_data || echo "must be fixed"
#
# step 3: allow scrubs. Watch for the conditions during the scrubbing
#
saved_last_stamp=${sched_data['query_last_stamp']}
ceph osd unset noscrub
declare -A cond_active=( ['query_active']="true" )
sched_data=()
wait_any_cond $pgid 10 $saved_last_stamp cond_active "WaitingActive " sched_data || return 1
# check for pg-dump to show being active. But if we see 'query_active' being reset - we've just
# missed it.
declare -A cond_active_dmp=( ['dmp_state_has_scrubbing']="true" ['query_active']="false" )
sched_data=()
wait_any_cond $pgid 10 $saved_last_stamp cond_active_dmp "WaitingActive " sched_data || return 1
}
function TEST_pg_dump_objects_scrubbed() {
local dir=$1
local poolname=test
local OSDS=3
local objects=15
local timeout=10
TESTDATA="testdata.$$"
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=$OSDS || return 1
run_mgr $dir x || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
# Create a pool with a single pg
create_pool $poolname 1 1
wait_for_clean || return 1
poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
for i in `seq 1 $objects`
do
rados -p $poolname put obj${i} $TESTDATA
done
rm -f $TESTDATA
local pgid="${poolid}.0"
#Trigger a scrub on a PG
pg_scrub $pgid || return 1
test "$(ceph pg $pgid query | jq '.info.stats.objects_scrubbed')" '=' $objects || return 1
teardown $dir || return 1
}
main osd-scrub-test "$@"
# Local Variables:
# compile-command: "cd build ; make -j4 && \
# ../qa/run-standalone.sh osd-scrub-test.sh"
# End:
| 21,614 | 31.165179 | 152 | sh |
null | ceph-main/qa/standalone/scrub/osd-unexpected-clone.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 Intel <[email protected]>
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Xiaoxi Chen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7144" # git grep '\<7144\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
export -n CEPH_CLI_TEST_DUP_COMMAND
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_recover_unexpected() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
ceph osd pool create foo 1
rados -p foo put foo /etc/passwd
rados -p foo mksnap snap
rados -p foo put foo /etc/group
wait_for_clean || return 1
local osd=$(get_primary foo foo)
JSON=`objectstore_tool $dir $osd --op list foo | grep snapid.:1`
echo "JSON is $JSON"
rm -f $dir/_ $dir/data
objectstore_tool $dir $osd "$JSON" get-attr _ > $dir/_ || return 1
objectstore_tool $dir $osd "$JSON" get-bytes $dir/data || return 1
rados -p foo rmsnap snap
sleep 5
objectstore_tool $dir $osd "$JSON" set-bytes $dir/data || return 1
objectstore_tool $dir $osd "$JSON" set-attr _ $dir/_ || return 1
sleep 5
ceph pg repair 1.0 || return 1
sleep 10
ceph log last
# make sure osds are still up
timeout 60 ceph tell osd.0 version || return 1
timeout 60 ceph tell osd.1 version || return 1
timeout 60 ceph tell osd.2 version || return 1
}
main osd-unexpected-clone "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bench.sh"
# End:
| 2,466 | 26.411111 | 83 | sh |
null | ceph-main/qa/standalone/scrub/scrub-helpers.sh | #!/usr/bin/env bash
# @file scrub-helpers.sh
# @brief a collection of bash functions useful for scrub standalone tests
#
# extract_published_sch()
#
# Use the output from both 'ceph pg dump pgs' and 'ceph pg x.x query' commands to determine
# the published scrub scheduling status of a given PG.
#
# $1: pg id
# $2: 'current' time to compare to
# $3: an additional time-point to compare to
# $4: [out] dictionary
#
function extract_published_sch() {
local pgn="$1"
local -n dict=$4 # a ref to the in/out dictionary
local current_time=$2
local extra_time=$3
local extr_dbg=1 # note: 3 and above leave some temp files around
#turn off '-x' (but remember previous state)
local saved_echo_flag=${-//[^x]/}
set +x
(( extr_dbg >= 3 )) && ceph pg dump pgs -f json-pretty >> /tmp/a_dmp$$
(( extr_dbg >= 3 )) && ceph pg $1 query -f json-pretty >> /tmp/a_qry$$
from_dmp=`ceph pg dump pgs -f json-pretty | jq -r --arg pgn "$pgn" --arg extra_dt "$extra_time" --arg current_dt "$current_time" '[
[[.pg_stats[]] | group_by(.pg_stats)][0][0] |
[.[] |
select(has("pgid") and .pgid == $pgn) |
(.dmp_stat_part=(.scrub_schedule | if test(".*@.*") then (split(" @ ")|first) else . end)) |
(.dmp_when_part=(.scrub_schedule | if test(".*@.*") then (split(" @ ")|last) else "0" end)) |
[ {
dmp_pg_state: .state,
dmp_state_has_scrubbing: (.state | test(".*scrub.*";"i")),
dmp_last_duration:.last_scrub_duration,
dmp_schedule: .dmp_stat_part,
dmp_schedule_at: .dmp_when_part,
dmp_is_future: ( .dmp_when_part > $current_dt ),
dmp_vs_date: ( .dmp_when_part > $extra_dt ),
dmp_reported_epoch: .reported_epoch,
dmp_seq: .reported_seq
}] ]][][][]'`
(( extr_dbg >= 2 )) && echo "from pg dump pg: $from_dmp"
(( extr_dbg >= 2 )) && echo "query output:"
(( extr_dbg >= 2 )) && ceph pg $1 query -f json-pretty | awk -e '/scrubber/,/agent_state/ {print;}'
from_qry=`ceph pg $1 query -f json-pretty | jq -r --arg extra_dt "$extra_time" --arg current_dt "$current_time" --arg spt "'" '
. |
(.q_stat_part=((.scrubber.schedule// "-") | if test(".*@.*") then (split(" @ ")|first) else . end)) |
(.q_when_part=((.scrubber.schedule// "0") | if test(".*@.*") then (split(" @ ")|last) else "0" end)) |
(.q_when_is_future=(.q_when_part > $current_dt)) |
(.q_vs_date=(.q_when_part > $extra_dt)) |
{
query_epoch: .epoch,
query_seq: .info.stats.reported_seq,
query_active: (.scrubber | if has("active") then .active else "bug" end),
query_schedule: .q_stat_part,
query_schedule_at: .q_when_part,
query_last_duration: .info.stats.last_scrub_duration,
query_last_stamp: .info.history.last_scrub_stamp,
query_last_scrub: (.info.history.last_scrub| sub($spt;"x") ),
query_is_future: .q_when_is_future,
query_vs_date: .q_vs_date,
query_scrub_seq: .scrubber.test_sequence
}
'`
(( extr_dbg >= 1 )) && echo $from_qry " " $from_dmp | jq -s -r 'add | "(",(to_entries | .[] | "["+(.key)+"]="+(.value|@sh)),")"'
# note that using a ref to an associative array directly is tricky. Instead - we are copying:
local -A dict_src=`echo $from_qry " " $from_dmp | jq -s -r 'add | "(",(to_entries | .[] | "["+(.key)+"]="+(.value|@sh)),")"'`
dict=()
for k in "${!dict_src[@]}"; do dict[$k]=${dict_src[$k]}; done
if [[ -n "$saved_echo_flag" ]]; then set -x; fi
}
# query the PG, until any of the conditions in the 'expected' array are met
#
# A condition may be negated by an additional entry in the 'expected' array. Its
# form should be:
# key: the original key, with a "_neg" suffix;
# Value: not checked
#
# $1: pg id
# $2: max retries
# $3: a date to use in comparisons
# $4: set of K/V conditions
# $5: debug message
# $6: [out] the results array
function wait_any_cond() {
local pgid="$1"
local retries=$2
local cmp_date=$3
local -n ep=$4
local -n out_array=$6
local -A sc_data
local extr_dbg=2
#turn off '-x' (but remember previous state)
local saved_echo_flag=${-//[^x]/}
set +x
local now_is=`date -I"ns"`
(( extr_dbg >= 2 )) && echo "waiting for any condition ($5): pg:$pgid dt:$cmp_date ($retries retries)"
for i in $(seq 1 $retries)
do
sleep 0.5
extract_published_sch $pgid $now_is $cmp_date sc_data
(( extr_dbg >= 4 )) && echo "${sc_data['dmp_last_duration']}"
(( extr_dbg >= 4 )) && echo "----> loop: $i ~ ${sc_data['dmp_last_duration']} / " ${sc_data['query_vs_date']} " / ${sc_data['dmp_is_future']}"
(( extr_dbg >= 2 )) && echo "--> loop: $i ~ ${sc_data['query_active']} / ${sc_data['query_seq']} / ${sc_data['dmp_seq']} " \
"/ ${sc_data['query_is_future']} / ${sc_data['query_last_stamp']} / ${sc_data['query_schedule']} %%% ${!ep[@]}"
# perform schedule_against_expected(), but with slightly different out-messages behaviour
for k_ref in "${!ep[@]}"
do
(( extr_dbg >= 3 )) && echo "key is $k_ref"
# is this a real key, or just a negation flag for another key??
[[ $k_ref =~ "_neg" ]] && continue
local act_val=${sc_data[$k_ref]}
local exp_val=${ep[$k_ref]}
# possible negation? look for a matching key
local neg_key="${k_ref}_neg"
(( extr_dbg >= 3 )) && echo "neg-key is $neg_key"
if [ -v 'ep[$neg_key]' ]; then
is_neg=1
else
is_neg=0
fi
(( extr_dbg >= 1 )) && echo "key is $k_ref: negation:$is_neg # expected: $exp_val # in actual: $act_val"
is_eq=0
[[ $exp_val == $act_val ]] && is_eq=1
if (($is_eq ^ $is_neg))
then
echo "$5 - '$k_ref' actual value ($act_val) matches expected ($exp_val) (negation: $is_neg)"
for k in "${!sc_data[@]}"; do out_array[$k]=${sc_data[$k]}; done
if [[ -n "$saved_echo_flag" ]]; then set -x; fi
return 0
fi
done
done
echo "$5: wait_any_cond(): failure. Note: query-active=${sc_data['query_active']}"
if [[ -n "$saved_echo_flag" ]]; then set -x; fi
return 1
}
# schedule_against_expected()
#
# Compare the scrub scheduling state collected by extract_published_sch() to a set of expected values.
# All values are expected to match.
#
# $1: the published scheduling state
# $2: a set of conditions to verify
# $3: text to be echoed for a failed match
#
function schedule_against_expected() {
local -n dict=$1 # a ref to the published state
local -n ep=$2 # the expected results
local extr_dbg=1
# turn off '-x' (but remember previous state)
local saved_echo_flag=${-//[^x]/}
set +x
(( extr_dbg >= 1 )) && echo "-- - comparing:"
for k_ref in "${!ep[@]}"
do
local act_val=${dict[$k_ref]}
local exp_val=${ep[$k_ref]}
(( extr_dbg >= 1 )) && echo "key is " $k_ref " expected: " $exp_val " in actual: " $act_val
if [[ $exp_val != $act_val ]]
then
echo "$3 - '$k_ref' actual value ($act_val) differs from expected ($exp_val)"
echo '####################################################^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
if [[ -n "$saved_echo_flag" ]]; then set -x; fi
return 1
fi
done
if [[ -n "$saved_echo_flag" ]]; then set -x; fi
return 0
}
# Start the cluster "nodes" and create a pool for testing.
#
# The OSDs are started with a set of parameters aimed in creating a repeatable
# and stable scrub sequence:
# - no scrub randomizations/backoffs
# - no autoscaler
#
# $1: the test directory
# $2: [in/out] an array of configuration values
#
# The function adds/updates the configuration dictionary with the name of the
# pool created, and its ID.
#
# Argument 2 might look like this:
#
# declare -A test_conf=(
# ['osds_num']="3"
# ['pgs_in_pool']="7"
# ['extras']="--extra1 --extra2"
# ['pool_name']="testpl"
# )
function standard_scrub_cluster() {
local dir=$1
local -n args=$2
local OSDS=${args['osds_num']:-"3"}
local pg_num=${args['pgs_in_pool']:-"8"}
local poolname="${args['pool_name']:-test}"
args['pool_name']=$poolname
local extra_pars=${args['extras']}
local debug_msg=${args['msg']:-"dbg"}
# turn off '-x' (but remember previous state)
local saved_echo_flag=${-//[^x]/}
set +x
run_mon $dir a --osd_pool_default_size=$OSDS || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd_deep_scrub_randomize_ratio=0 \
--osd_scrub_interval_randomize_ratio=0 \
--osd_scrub_backoff_ratio=0.0 \
--osd_pool_default_pg_autoscale_mode=off \
--osd_pg_stat_report_interval_max=1 \
$extra_pars"
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd $(echo $ceph_osd_args) || return 1
done
create_pool $poolname $pg_num $pg_num
wait_for_clean || return 1
# update the in/out 'args' with the ID of the new pool
sleep 1
name_n_id=`ceph osd dump | awk '/^pool.*'$poolname'/ { gsub(/'"'"'/," ",$3); print $3," ", $2}'`
echo "standard_scrub_cluster: $debug_msg: test pool is $name_n_id"
args['pool_id']="${name_n_id##* }"
args['osd_args']=$ceph_osd_args
if [[ -n "$saved_echo_flag" ]]; then set -x; fi
}
# Start the cluster "nodes" and create a pool for testing - wpq version.
#
# A variant of standard_scrub_cluster() that selects the wpq scheduler and sets a value to
# osd_scrub_sleep. To be used when the test is attempting to "catch" the scrubber during an
# ongoing scrub.
#
# See standard_scrub_cluster() for more details.
#
# $1: the test directory
# $2: [in/out] an array of configuration values
# $3: osd_scrub_sleep
#
# The function adds/updates the configuration dictionary with the name of the
# pool created, and its ID.
function standard_scrub_wpq_cluster() {
local dir=$1
local -n conf=$2
local osd_sleep=$3
conf['extras']=" --osd_op_queue=wpq --osd_scrub_sleep=$osd_sleep ${conf['extras']}"
standard_scrub_cluster $dir conf || return 1
}
# A debug flag is set for the PG specified, causing the 'pg query' command to display
# an additional 'scrub sessions counter' field.
#
# $1: PG id
#
function set_query_debug() {
local pgid=$1
local prim_osd=`ceph pg dump pgs_brief | \
awk -v pg="^$pgid" -n -e '$0 ~ pg { print(gensub(/[^0-9]*([0-9]+).*/,"\\\\1","g",$5)); }' `
echo "Setting scrub debug data. Primary for $pgid is $prim_osd"
CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.$prim_osd) \
scrubdebug $pgid set sessions
}
| 10,515 | 33.706271 | 151 | sh |
null | ceph-main/qa/standalone/special/test-failure.sh | #!/usr/bin/env bash
set -ex
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7202" # git grep '\<7202\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_failure_log() {
local dir=$1
cat > $dir/test_failure.log << EOF
This is a fake log file
*
*
*
*
*
This ends the fake log file
EOF
# Test fails
return 1
}
function TEST_failure_core_only() {
local dir=$1
run_mon $dir a || return 1
kill_daemons $dir SEGV mon 5
return 0
}
main test_failure "$@"
| 880 | 16.979592 | 83 | sh |
null | ceph-main/qa/workunits/ceph-helpers-root.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
#######################################################################
function distro_id() {
source /etc/os-release
echo $ID
}
function distro_version() {
source /etc/os-release
echo $VERSION
}
function install() {
if [ $(distro_id) = "ubuntu" ]; then
sudo apt-get purge -y gcc
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
fi
for package in "$@" ; do
install_one $package
done
if [ $(distro_id) = "ubuntu" ]; then
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-11 11
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-11 11
sudo update-alternatives --install /usr/bin/cc cc /usr/bin/gcc 11
sudo update-alternatives --set cc /usr/bin/gcc
sudo update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++ 11
sudo update-alternatives --set c++ /usr/bin/g++
fi
}
function install_one() {
case $(distro_id) in
ubuntu|debian|devuan|softiron)
sudo env DEBIAN_FRONTEND=noninteractive apt-get install -y "$@"
;;
centos|fedora|rhel)
sudo yum install -y "$@"
;;
opensuse*|suse|sles)
sudo zypper --non-interactive install "$@"
;;
*)
echo "$(distro_id) is unknown, $@ will have to be installed manually."
;;
esac
}
function install_pkg_on_ubuntu {
local project=$1
shift
local sha1=$1
shift
local codename=$1
shift
local force=$1
shift
local pkgs=$@
local missing_pkgs
if [ $force = "force" ]; then
missing_pkgs="$@"
else
for pkg in $pkgs; do
if ! dpkg -s $pkg &> /dev/null; then
missing_pkgs+=" $pkg"
fi
done
fi
if test -n "$missing_pkgs"; then
local shaman_url="https://shaman.ceph.com/api/repos/${project}/master/${sha1}/ubuntu/${codename}/repo"
sudo curl --silent --location $shaman_url --output /etc/apt/sources.list.d/$project.list
sudo env DEBIAN_FRONTEND=noninteractive apt-get update -y -o Acquire::Languages=none -o Acquire::Translation=none || true
sudo env DEBIAN_FRONTEND=noninteractive apt-get install --allow-unauthenticated -y $missing_pkgs
fi
}
#######################################################################
function control_osd() {
local action=$1
local id=$2
sudo systemctl $action ceph-osd@$id
return 0
}
#######################################################################
function pool_read_write() {
local size=${1:-1}
local dir=/tmp
local timeout=360
local test_pool=test_pool
ceph osd pool delete $test_pool $test_pool --yes-i-really-really-mean-it || return 1
ceph osd pool create $test_pool 4 || return 1
ceph osd pool set $test_pool size $size --yes-i-really-mean-it || return 1
ceph osd pool set $test_pool min_size $size || return 1
ceph osd pool application enable $test_pool rados
echo FOO > $dir/BAR
timeout $timeout rados --pool $test_pool put BAR $dir/BAR || return 1
timeout $timeout rados --pool $test_pool get BAR $dir/BAR.copy || return 1
diff $dir/BAR $dir/BAR.copy || return 1
ceph osd pool delete $test_pool $test_pool --yes-i-really-really-mean-it || return 1
}
#######################################################################
set -x
"$@"
| 3,944 | 29.346154 | 122 | sh |
null | ceph-main/qa/workunits/false.sh | #!/bin/sh -ex
false | 20 | 6 | 13 | sh |
null | ceph-main/qa/workunits/kernel_untar_build.sh | #!/usr/bin/env bash
set -ex
wget -O linux.tar.gz http://download.ceph.com/qa/linux-5.4.tar.gz
mkdir t
cd t
tar xzf ../linux.tar.gz
cd linux*
make defconfig
make -j`grep -c processor /proc/cpuinfo`
cd ..
if ! rm -rv linux* ; then
echo "uh oh rm -r failed, it left behind:"
find .
exit 1
fi
cd ..
rm -rv t linux*
| 326 | 14.571429 | 65 | sh |
null | ceph-main/qa/workunits/post-file.sh | #!/usr/bin/env bash
set -ex
what="$1"
[ -z "$what" ] && what=/etc/udev/rules.d
sudo ceph-post-file -d ceph-test-workunit $what
echo OK
| 137 | 14.333333 | 47 | sh |
null | ceph-main/qa/workunits/test_telemetry_pacific.sh | #!/bin/bash -ex
# Set up ident details for cluster
ceph config set mgr mgr/telemetry/channel_ident true
ceph config set mgr mgr/telemetry/organization 'ceph-qa'
ceph config set mgr mgr/telemetry/description 'upgrade test cluster'
# Opt-in
ceph telemetry on --license sharing-1-0
# Check last_opt_revision
LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
if [ $LAST_OPT_REVISION -ne 3 ]; then
echo "last_opt_revision is incorrect."
exit 1
fi
# Check reports
ceph telemetry show
ceph telemetry show-device
ceph telemetry show-all
echo OK
| 573 | 22.916667 | 72 | sh |
null | ceph-main/qa/workunits/test_telemetry_pacific_x.sh | #!/bin/bash -ex
# Assert that we're still opted in
LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
if [ $LAST_OPT_REVISION -ne 3 ]; then
echo "last_opt_revision is incorrect"
exit 1
fi
# Check the warning:
STATUS=$(ceph -s)
if ! [[ $STATUS == *"Telemetry requires re-opt-in"* ]]
then
echo "STATUS does not contain re-opt-in warning"
exit 1
fi
# Check new collections
COLLECTIONS=$(ceph telemetry collection ls)
NEW_COLLECTIONS=("perf_perf" "basic_mds_metadata" "basic_pool_usage" "basic_rook_v01" "perf_memory_metrics")
for col in ${NEW_COLLECTIONS[@]}; do
if ! [[ $COLLECTIONS == *$col* ]];
then
echo "COLLECTIONS does not contain" "'"$col"'."
exit 1
fi
done
# Run preview commands
ceph telemetry preview
ceph telemetry preview-device
ceph telemetry preview-all
# Opt in to new collections
ceph telemetry on --license sharing-1-0
ceph telemetry enable channel perf
# Check the warning:
timeout=60
STATUS=$(ceph -s)
until [[ $STATUS != *"Telemetry requires re-opt-in"* ]] || [ $timeout -le 0 ]; do
STATUS=$(ceph -s)
sleep 1
timeout=$(( timeout - 1 ))
done
if [ $timeout -le 0 ]; then
echo "STATUS should not contain re-opt-in warning at this point"
exit 1
fi
# Run show commands
ceph telemetry show
ceph telemetry show-device
ceph telemetry show
# Opt out
ceph telemetry off
echo OK
| 1,374 | 21.916667 | 108 | sh |
null | ceph-main/qa/workunits/test_telemetry_quincy.sh | #!/bin/bash -ex
# Set up ident details for cluster
ceph config set mgr mgr/telemetry/channel_ident true
ceph config set mgr mgr/telemetry/organization 'ceph-qa'
ceph config set mgr mgr/telemetry/description 'upgrade test cluster'
#Run preview commands
ceph telemetry preview
ceph telemetry preview-device
ceph telemetry preview-all
# Assert that new collections are available
COLLECTIONS=$(ceph telemetry collection ls)
NEW_COLLECTIONS=("perf_perf" "basic_mds_metadata" "basic_pool_usage" "basic_rook_v01" "perf_memory_metrics")
for col in ${NEW_COLLECTIONS[@]}; do
if ! [[ $COLLECTIONS == *$col* ]];
then
echo "COLLECTIONS does not contain" "'"$col"'."
exit 1
fi
done
# Opt-in
ceph telemetry on --license sharing-1-0
# Enable perf channel
ceph telemetry enable channel perf
# For quincy, the last_opt_revision remains at 1 since last_opt_revision
# was phased out for fresh installs of quincy.
LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
if [ $LAST_OPT_REVISION -ne 1 ]; then
echo "last_opt_revision is incorrect"
exit 1
fi
# Run show commands
ceph telemetry show
ceph telemetry show-device
ceph telemetry show-all
echo OK
| 1,191 | 25.488889 | 108 | sh |
null | ceph-main/qa/workunits/test_telemetry_quincy_x.sh | #!/bin/bash -ex
# For quincy, the last_opt_revision remains at 1 since last_opt_revision
# was phased out for fresh installs of quincy.
LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
if [ $LAST_OPT_REVISION -ne 1 ]; then
echo "last_opt_revision is incorrect"
exit 1
fi
# Check the warning:
ceph -s
COLLECTIONS=$(ceph telemetry collection ls)
NEW_COLLECTIONS=("perf_perf" "basic_mds_metadata" "basic_pool_usage" "basic_rook_v01" "perf_memory_metrics")
for col in ${NEW_COLLECTIONS[@]}; do
if ! [[ $COLLECTIONS == *$col* ]];
then
echo "COLLECTIONS does not contain" "'"$col"'."
exit 1
fi
done
#Run preview commands
ceph telemetry preview
ceph telemetry preview-device
ceph telemetry preview-all
# Opt in to new collections
# Currently, no new collections between latest quincy and reef (dev)
# Run show commands
ceph telemetry show
ceph telemetry show-device
ceph telemetry show
# Opt out
ceph telemetry off
echo OK
| 977 | 22.853659 | 108 | sh |
null | ceph-main/qa/workunits/true.sh | #!/bin/sh -ex
true
| 20 | 4.25 | 13 | sh |
null | ceph-main/qa/workunits/caps/mon_commands.sh | #!/bin/sh -ex
ceph-authtool --create-keyring k --gen-key -p --name client.xx
ceph auth add -i k client.xx mon "allow command foo; allow command bar *; allow command baz ...; allow command foo add * mon allow\\ rwx osd allow\\ *"
( ceph -k k -n client.xx foo || true ) | grep 'unrecog'
( ceph -k k -n client.xx foo ooo || true ) | grep 'Access denied'
( ceph -k k -n client.xx fo || true ) | grep 'Access denied'
( ceph -k k -n client.xx fooo || true ) | grep 'Access denied'
( ceph -k k -n client.xx bar || true ) | grep 'Access denied'
( ceph -k k -n client.xx bar a || true ) | grep 'unrecog'
( ceph -k k -n client.xx bar a b c || true ) | grep 'Access denied'
( ceph -k k -n client.xx ba || true ) | grep 'Access denied'
( ceph -k k -n client.xx barr || true ) | grep 'Access denied'
( ceph -k k -n client.xx baz || true ) | grep -v 'Access denied'
( ceph -k k -n client.xx baz a || true ) | grep -v 'Access denied'
( ceph -k k -n client.xx baz a b || true ) | grep -v 'Access denied'
( ceph -k k -n client.xx foo add osd.1 -i k mon 'allow rwx' osd 'allow *' || true ) | grep 'unrecog'
( ceph -k k -n client.xx foo add osd a b c -i k mon 'allow rwx' osd 'allow *' || true ) | grep 'Access denied'
( ceph -k k -n client.xx foo add osd a b c -i k mon 'allow *' || true ) | grep 'Access denied'
echo OK | 1,352 | 53.12 | 151 | sh |
null | ceph-main/qa/workunits/ceph-tests/ceph-admin-commands.sh | #!/bin/sh -ex
ceph -s
rados lspools
rbd ls
# check that the monitors work
ceph osd set nodown
ceph osd unset nodown
exit 0
| 125 | 10.454545 | 30 | sh |
null | ceph-main/qa/workunits/cephadm/create_iscsi_disks.sh | #!/bin/bash -ex
# Create some file-backed iSCSI targets and attach them locally.
# Exit if it's not CentOS
if ! grep -q rhel /etc/*-release; then
echo "The script only supports CentOS."
exit 1
fi
[ -z "$SUDO" ] && SUDO=sudo
# 15 GB
DISK_FILE_SIZE="16106127360"
$SUDO yum install -y targetcli iscsi-initiator-utils
TARGET_NAME="iqn.2003-01.org.linux-iscsi.$(hostname).x8664:sn.foobar"
$SUDO targetcli /iscsi create ${TARGET_NAME}
$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1/portals delete 0.0.0.0 3260
$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1/portals create 127.0.0.1 3260
$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1 set attribute generate_node_acls=1
$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1 set attribute demo_mode_write_protect=0
for i in $(seq 3); do
# Create truncated files, and add them as luns
DISK_FILE="/tmp/disk${i}"
$SUDO truncate --size ${DISK_FILE_SIZE} ${DISK_FILE}
$SUDO targetcli /backstores/fileio create "lun${i}" ${DISK_FILE}
# Workaround for https://tracker.ceph.com/issues/47758
$SUDO targetcli "/backstores/fileio/lun${i}" set attribute optimal_sectors=0
$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1/luns create "/backstores/fileio/lun${i}"
done
$SUDO iscsiadm -m discovery -t sendtargets -p 127.0.0.1
$SUDO iscsiadm -m node -p 127.0.0.1 -T ${TARGET_NAME} -l
| 1,325 | 34.837838 | 87 | sh |
null | ceph-main/qa/workunits/cephadm/test_adoption.sh | #!/bin/bash -ex
SCRIPT_NAME=$(basename ${BASH_SOURCE[0]})
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CEPHADM_SRC_DIR=${SCRIPT_DIR}/../../../src/cephadm
CORPUS_COMMIT=9cd9ad020d93b0b420924fec55da307aff8bd422
[ -z "$SUDO" ] && SUDO=sudo
[ -d "$TMPDIR" ] || TMPDIR=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
trap "$SUDO rm -rf $TMPDIR" EXIT
if [ -z "$CEPHADM" ]; then
CEPHADM=`mktemp -p $TMPDIR tmp.cephadm.XXXXXX`
${CEPHADM_SRC_DIR}/build.sh "$CEPHADM"
fi
# at this point, we need $CEPHADM set
if ! [ -x "$CEPHADM" ]; then
echo "cephadm not found. Please set \$CEPHADM"
exit 1
fi
# combine into a single var
CEPHADM_BIN="$CEPHADM"
CEPHADM="$SUDO $CEPHADM_BIN"
## adopt
CORPUS_GIT_SUBMOD="cephadm-adoption-corpus"
GIT_CLONE_DIR=${TMPDIR}/${CORPUS_GIT_SUBMOD}
git clone https://github.com/ceph/$CORPUS_GIT_SUBMOD $GIT_CLONE_DIR
git -C $GIT_CLONE_DIR checkout $CORPUS_COMMIT
CORPUS_DIR=${GIT_CLONE_DIR}/archive
for subdir in `ls ${CORPUS_DIR}`; do
for tarfile in `ls ${CORPUS_DIR}/${subdir} | grep .tgz`; do
tarball=${CORPUS_DIR}/${subdir}/${tarfile}
FSID_LEGACY=`echo "$tarfile" | cut -c 1-36`
TMP_TAR_DIR=`mktemp -d -p $TMPDIR`
$SUDO tar xzvf $tarball -C $TMP_TAR_DIR
NAMES=$($CEPHADM ls --legacy-dir $TMP_TAR_DIR | jq -r '.[].name')
for name in $NAMES; do
$CEPHADM adopt \
--style legacy \
--legacy-dir $TMP_TAR_DIR \
--name $name
# validate after adopt
out=$($CEPHADM ls | jq '.[]' \
| jq 'select(.name == "'$name'")')
echo $out | jq -r '.style' | grep 'cephadm'
echo $out | jq -r '.fsid' | grep $FSID_LEGACY
done
# clean-up before next iter
$CEPHADM rm-cluster --fsid $FSID_LEGACY --force
$SUDO rm -rf $TMP_TAR_DIR
done
done
echo "OK"
| 1,838 | 29.147541 | 67 | sh |
null | ceph-main/qa/workunits/cephadm/test_cephadm.sh | #!/bin/bash -ex
SCRIPT_NAME=$(basename ${BASH_SOURCE[0]})
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# cleanup during exit
[ -z "$CLEANUP" ] && CLEANUP=true
FSID='00000000-0000-0000-0000-0000deadbeef'
# images that are used
IMAGE_MAIN=${IMAGE_MAIN:-'quay.ceph.io/ceph-ci/ceph:main'}
IMAGE_PACIFIC=${IMAGE_PACIFIC:-'quay.ceph.io/ceph-ci/ceph:pacific'}
#IMAGE_OCTOPUS=${IMAGE_OCTOPUS:-'quay.ceph.io/ceph-ci/ceph:octopus'}
IMAGE_DEFAULT=${IMAGE_MAIN}
OSD_IMAGE_NAME="${SCRIPT_NAME%.*}_osd.img"
OSD_IMAGE_SIZE='6G'
OSD_TO_CREATE=2
OSD_VG_NAME=${SCRIPT_NAME%.*}
OSD_LV_NAME=${SCRIPT_NAME%.*}
# TMPDIR for test data
[ -d "$TMPDIR" ] || TMPDIR=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
[ -d "$TMPDIR_TEST_MULTIPLE_MOUNTS" ] || TMPDIR_TEST_MULTIPLE_MOUNTS=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
CEPHADM_SRC_DIR=${SCRIPT_DIR}/../../../src/cephadm
CEPHADM_SAMPLES_DIR=${CEPHADM_SRC_DIR}/samples
[ -z "$SUDO" ] && SUDO=sudo
# If cephadm is already installed on the system, use that one, avoid building
# # one if we can.
if [ -z "$CEPHADM" ] && command -v cephadm >/dev/null ; then
CEPHADM="$(command -v cephadm)"
fi
if [ -z "$CEPHADM" ]; then
CEPHADM=`mktemp -p $TMPDIR tmp.cephadm.XXXXXX`
${CEPHADM_SRC_DIR}/build.sh "$CEPHADM"
NO_BUILD_INFO=1
fi
# at this point, we need $CEPHADM set
if ! [ -x "$CEPHADM" ]; then
echo "cephadm not found. Please set \$CEPHADM"
exit 1
fi
# add image to args
CEPHADM_ARGS="$CEPHADM_ARGS --image $IMAGE_DEFAULT"
# combine into a single var
CEPHADM_BIN="$CEPHADM"
CEPHADM="$SUDO $CEPHADM_BIN $CEPHADM_ARGS"
# clean up previous run(s)?
$CEPHADM rm-cluster --fsid $FSID --force
$SUDO vgchange -an $OSD_VG_NAME || true
loopdev=$($SUDO losetup -a | grep $(basename $OSD_IMAGE_NAME) | awk -F : '{print $1}')
if ! [ "$loopdev" = "" ]; then
$SUDO losetup -d $loopdev
fi
function cleanup()
{
if [ $CLEANUP = false ]; then
# preserve the TMPDIR state
echo "========================"
echo "!!! CLEANUP=$CLEANUP !!!"
echo
echo "TMPDIR=$TMPDIR"
echo "========================"
return
fi
dump_all_logs $FSID
rm -rf $TMPDIR
}
trap cleanup EXIT
function expect_false()
{
set -x
if eval "$@"; then return 1; else return 0; fi
}
# expect_return_code $expected_code $command ...
function expect_return_code()
{
set -x
local expected_code="$1"
shift
local command="$@"
set +e
eval "$command"
local return_code="$?"
set -e
if [ ! "$return_code" -eq "$expected_code" ]; then return 1; else return 0; fi
}
function is_available()
{
local name="$1"
local condition="$2"
local tries="$3"
local num=0
while ! eval "$condition"; do
num=$(($num + 1))
if [ "$num" -ge $tries ]; then
echo "$name is not available"
false
fi
sleep 5
done
echo "$name is available"
true
}
function dump_log()
{
local fsid="$1"
local name="$2"
local num_lines="$3"
if [ -z $num_lines ]; then
num_lines=100
fi
echo '-------------------------'
echo 'dump daemon log:' $name
echo '-------------------------'
$CEPHADM logs --fsid $fsid --name $name -- --no-pager -n $num_lines
}
function dump_all_logs()
{
local fsid="$1"
local names=$($CEPHADM ls | jq -r '.[] | select(.fsid == "'$fsid'").name')
echo 'dumping logs for daemons: ' $names
for name in $names; do
dump_log $fsid $name
done
}
function nfs_stop()
{
# stop the running nfs server
local units="nfs-server nfs-kernel-server"
for unit in $units; do
if systemctl --no-pager status $unit > /dev/null; then
$SUDO systemctl stop $unit
fi
done
# ensure the NFS port is no longer in use
expect_false "$SUDO ss -tlnp '( sport = :nfs )' | grep LISTEN"
}
## prepare + check host
$SUDO $CEPHADM check-host
## run a gather-facts (output to stdout)
$SUDO $CEPHADM gather-facts
## NOTE: cephadm version is, as of around May 2023, no longer basing the
## output for `cephadm version` on the version of the containers. The version
## reported is that of the "binary" and is determined during the ceph build.
## `cephadm version` should NOT require sudo/root.
$CEPHADM_BIN version
$CEPHADM_BIN version | grep 'cephadm version'
# Typically cmake should be running the cephadm build script with CLI arguments
# that embed version info into the "binary". If not using a cephadm build via
# cmake you can set `NO_BUILD_INFO` to skip this check.
if [ -z "$NO_BUILD_INFO" ]; then
$CEPHADM_BIN version | grep -v 'UNSET'
$CEPHADM_BIN version | grep -v 'UNKNOWN'
fi
## test shell before bootstrap, when crash dir isn't (yet) present on this host
$CEPHADM shell --fsid $FSID -- ceph -v | grep 'ceph version'
$CEPHADM shell --fsid $FSID -e FOO=BAR -- printenv | grep FOO=BAR
# test stdin
echo foo | $CEPHADM shell -- cat | grep -q foo
# the shell commands a bit above this seems to cause the
# /var/lib/ceph/<fsid> directory to be made. Since we now
# check in bootstrap that there are no clusters with the same
# fsid based on the directory existing, we need to make sure
# this directory is gone before bootstrapping. We can
# accomplish this with another rm-cluster
$CEPHADM rm-cluster --fsid $FSID --force
## bootstrap
ORIG_CONFIG=`mktemp -p $TMPDIR`
CONFIG=`mktemp -p $TMPDIR`
MONCONFIG=`mktemp -p $TMPDIR`
KEYRING=`mktemp -p $TMPDIR`
IP=127.0.0.1
cat <<EOF > $ORIG_CONFIG
[global]
log to file = true
osd crush chooseleaf type = 0
EOF
$CEPHADM bootstrap \
--mon-id a \
--mgr-id x \
--mon-ip $IP \
--fsid $FSID \
--config $ORIG_CONFIG \
--output-config $CONFIG \
--output-keyring $KEYRING \
--output-pub-ssh-key $TMPDIR/ceph.pub \
--allow-overwrite \
--skip-mon-network \
--skip-monitoring-stack
test -e $CONFIG
test -e $KEYRING
rm -f $ORIG_CONFIG
$SUDO test -e /var/log/ceph/$FSID/ceph-mon.a.log
$SUDO test -e /var/log/ceph/$FSID/ceph-mgr.x.log
for u in ceph.target \
ceph-$FSID.target \
[email protected] \
[email protected]; do
systemctl is-enabled $u
systemctl is-active $u
done
systemctl | grep system-ceph | grep -q .slice # naming is escaped and annoying
# check ceph -s works (via shell w/ passed config/keyring)
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph -s | grep $FSID
for t in mon mgr node-exporter prometheus grafana; do
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph orch apply $t --unmanaged
done
## ls
$CEPHADM ls | jq '.[]' | jq 'select(.name == "mon.a").fsid' \
| grep $FSID
$CEPHADM ls | jq '.[]' | jq 'select(.name == "mgr.x").fsid' \
| grep $FSID
# make sure the version is returned correctly
$CEPHADM ls | jq '.[]' | jq 'select(.name == "mon.a").version' | grep -q \\.
## deploy
# add mon.b
cp $CONFIG $MONCONFIG
echo "public addrv = [v2:$IP:3301,v1:$IP:6790]" >> $MONCONFIG
jq --null-input \
--arg fsid $FSID \
--arg name mon.b \
--arg keyring /var/lib/ceph/$FSID/mon.a/keyring \
--arg config "$MONCONFIG" \
'{"fsid": $fsid, "name": $name, "params":{"keyring": $keyring, "config": $config}}' | \
$CEPHADM _orch deploy
for u in [email protected]; do
systemctl is-enabled $u
systemctl is-active $u
done
cond="$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph mon stat | grep '2 mons'"
is_available "mon.b" "$cond" 30
# add mgr.y
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph auth get-or-create mgr.y \
mon 'allow profile mgr' \
osd 'allow *' \
mds 'allow *' > $TMPDIR/keyring.mgr.y
jq --null-input \
--arg fsid $FSID \
--arg name mgr.y \
--arg keyring $TMPDIR/keyring.mgr.y \
--arg config "$CONFIG" \
'{"fsid": $fsid, "name": $name, "params":{"keyring": $keyring, "config": $config}}' | \
$CEPHADM _orch deploy
for u in [email protected]; do
systemctl is-enabled $u
systemctl is-active $u
done
for f in `seq 1 30`; do
if $CEPHADM shell --fsid $FSID \
--config $CONFIG --keyring $KEYRING -- \
ceph -s -f json-pretty \
| jq '.mgrmap.num_standbys' | grep -q 1 ; then break; fi
sleep 1
done
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph -s -f json-pretty \
| jq '.mgrmap.num_standbys' | grep -q 1
# add osd.{1,2,..}
dd if=/dev/zero of=$TMPDIR/$OSD_IMAGE_NAME bs=1 count=0 seek=$OSD_IMAGE_SIZE
loop_dev=$($SUDO losetup -f)
$SUDO vgremove -f $OSD_VG_NAME || true
$SUDO losetup $loop_dev $TMPDIR/$OSD_IMAGE_NAME
$SUDO pvcreate $loop_dev && $SUDO vgcreate $OSD_VG_NAME $loop_dev
# osd bootstrap keyring
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph auth get client.bootstrap-osd > $TMPDIR/keyring.bootstrap.osd
# create lvs first so ceph-volume doesn't overlap with lv creation
for id in `seq 0 $((--OSD_TO_CREATE))`; do
$SUDO lvcreate -l $((100/$OSD_TO_CREATE))%VG -n $OSD_LV_NAME.$id $OSD_VG_NAME
done
for id in `seq 0 $((--OSD_TO_CREATE))`; do
device_name=/dev/$OSD_VG_NAME/$OSD_LV_NAME.$id
CEPH_VOLUME="$CEPHADM ceph-volume \
--fsid $FSID \
--config $CONFIG \
--keyring $TMPDIR/keyring.bootstrap.osd --"
# prepare the osd
$CEPH_VOLUME lvm prepare --bluestore --data $device_name --no-systemd
$CEPH_VOLUME lvm batch --no-auto $device_name --yes --no-systemd
# osd id and osd fsid
$CEPH_VOLUME lvm list --format json $device_name > $TMPDIR/osd.map
osd_id=$($SUDO cat $TMPDIR/osd.map | jq -cr '.. | ."ceph.osd_id"? | select(.)')
osd_fsid=$($SUDO cat $TMPDIR/osd.map | jq -cr '.. | ."ceph.osd_fsid"? | select(.)')
# deploy the osd
jq --null-input \
--arg fsid $FSID \
--arg name osd.$osd_id \
--arg keyring $TMPDIR/keyring.bootstrap.osd \
--arg config "$CONFIG" \
--arg osd_fsid $osd_fsid \
'{"fsid": $fsid, "name": $name, "params":{"keyring": $keyring, "config": $config, "osd_fsid": $osd_fsid}}' | \
$CEPHADM _orch deploy
done
# add node-exporter
jq --null-input \
--arg fsid $FSID \
--arg name node-exporter.a \
'{"fsid": $fsid, "name": $name}' | \
${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy
cond="curl 'http://localhost:9100' | grep -q 'Node Exporter'"
is_available "node-exporter" "$cond" 10
# add prometheus
jq --null-input \
--arg fsid $FSID \
--arg name prometheus.a \
--argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/prometheus.json)" \
'{"fsid": $fsid, "name": $name, "config_blobs": $config_blobs}' | \
${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy
cond="curl 'localhost:9095/api/v1/query?query=up'"
is_available "prometheus" "$cond" 10
# add grafana
jq --null-input \
--arg fsid $FSID \
--arg name grafana.a \
--argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/grafana.json)" \
'{"fsid": $fsid, "name": $name, "config_blobs": $config_blobs}' | \
${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy
cond="curl --insecure 'https://localhost:3000' | grep -q 'grafana'"
is_available "grafana" "$cond" 50
# add nfs-ganesha
nfs_stop
nfs_rados_pool=$(cat ${CEPHADM_SAMPLES_DIR}/nfs.json | jq -r '.["pool"]')
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph osd pool create $nfs_rados_pool 64
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
rados --pool nfs-ganesha --namespace nfs-ns create conf-nfs.a
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph orch pause
jq --null-input \
--arg fsid $FSID \
--arg name nfs.a \
--arg keyring "$KEYRING" \
--arg config "$CONFIG" \
--argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/nfs.json)" \
'{"fsid": $fsid, "name": $name, "params": {"keyring": $keyring, "config": $config}, "config_blobs": $config_blobs}' | \
${CEPHADM} _orch deploy
cond="$SUDO ss -tlnp '( sport = :nfs )' | grep 'ganesha.nfsd'"
is_available "nfs" "$cond" 10
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph orch resume
# add alertmanager via custom container
alertmanager_image=$(cat ${CEPHADM_SAMPLES_DIR}/custom_container.json | jq -r '.image')
tcp_ports=$(jq .ports ${CEPHADM_SAMPLES_DIR}/custom_container.json)
jq --null-input \
--arg fsid $FSID \
--arg name container.alertmanager.a \
--arg keyring $TMPDIR/keyring.bootstrap.osd \
--arg config "$CONFIG" \
--arg image "$alertmanager_image" \
--argjson tcp_ports "${tcp_ports}" \
--argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/custom_container.json)" \
'{"fsid": $fsid, "name": $name, "image": $image, "params": {"keyring": $keyring, "config": $config, "tcp_ports": $tcp_ports}, "config_blobs": $config_blobs}' | \
${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy
cond="$CEPHADM enter --fsid $FSID --name container.alertmanager.a -- test -f \
/etc/alertmanager/alertmanager.yml"
is_available "alertmanager.yml" "$cond" 10
cond="curl 'http://localhost:9093' | grep -q 'Alertmanager'"
is_available "alertmanager" "$cond" 10
## run
# WRITE ME
## unit
$CEPHADM unit --fsid $FSID --name mon.a -- is-enabled
$CEPHADM unit --fsid $FSID --name mon.a -- is-active
expect_false $CEPHADM unit --fsid $FSID --name mon.xyz -- is-active
$CEPHADM unit --fsid $FSID --name mon.a -- disable
expect_false $CEPHADM unit --fsid $FSID --name mon.a -- is-enabled
$CEPHADM unit --fsid $FSID --name mon.a -- enable
$CEPHADM unit --fsid $FSID --name mon.a -- is-enabled
$CEPHADM unit --fsid $FSID --name mon.a -- status
$CEPHADM unit --fsid $FSID --name mon.a -- stop
expect_return_code 3 $CEPHADM unit --fsid $FSID --name mon.a -- status
$CEPHADM unit --fsid $FSID --name mon.a -- start
## shell
$CEPHADM shell --fsid $FSID -- true
$CEPHADM shell --fsid $FSID -- test -d /var/log/ceph
expect_false $CEPHADM --timeout 10 shell --fsid $FSID -- sleep 60
$CEPHADM --timeout 60 shell --fsid $FSID -- sleep 10
$CEPHADM shell --fsid $FSID --mount $TMPDIR $TMPDIR_TEST_MULTIPLE_MOUNTS -- stat /mnt/$(basename $TMPDIR)
## enter
expect_false $CEPHADM enter
$CEPHADM enter --fsid $FSID --name mon.a -- test -d /var/lib/ceph/mon/ceph-a
$CEPHADM enter --fsid $FSID --name mgr.x -- test -d /var/lib/ceph/mgr/ceph-x
$CEPHADM enter --fsid $FSID --name mon.a -- pidof ceph-mon
expect_false $CEPHADM enter --fsid $FSID --name mgr.x -- pidof ceph-mon
$CEPHADM enter --fsid $FSID --name mgr.x -- pidof ceph-mgr
# this triggers a bug in older versions of podman, including 18.04's 1.6.2
#expect_false $CEPHADM --timeout 5 enter --fsid $FSID --name mon.a -- sleep 30
$CEPHADM --timeout 60 enter --fsid $FSID --name mon.a -- sleep 10
## ceph-volume
$CEPHADM ceph-volume --fsid $FSID -- inventory --format=json \
| jq '.[]'
## preserve test state
[ $CLEANUP = false ] && exit 0
## rm-daemon
# mon and osd require --force
expect_false $CEPHADM rm-daemon --fsid $FSID --name mon.a
# mgr does not
$CEPHADM rm-daemon --fsid $FSID --name mgr.x
expect_false $CEPHADM zap-osds --fsid $FSID
$CEPHADM zap-osds --fsid $FSID --force
## rm-cluster
expect_false $CEPHADM rm-cluster --fsid $FSID --zap-osds
$CEPHADM rm-cluster --fsid $FSID --force --zap-osds
echo PASS
| 15,287 | 31.185263 | 165 | sh |
null | ceph-main/qa/workunits/cephadm/test_dashboard_e2e.sh | #!/bin/bash -ex
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
DASHBOARD_FRONTEND_DIR=${SCRIPT_DIR}/../../../src/pybind/mgr/dashboard/frontend
[ -z "$SUDO" ] && SUDO=sudo
install_common () {
NODEJS_VERSION="16"
if grep -q debian /etc/*-release; then
$SUDO apt-get update
# https://github.com/nodesource/distributions#manual-installation
$SUDO apt-get install curl gpg
KEYRING=/usr/share/keyrings/nodesource.gpg
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource.gpg.key | gpg --dearmor | $SUDO tee "$KEYRING" >/dev/null
DISTRO="$(source /etc/lsb-release; echo $DISTRIB_CODENAME)"
VERSION="node_$NODEJS_VERSION.x"
echo "deb [signed-by=$KEYRING] https://deb.nodesource.com/$VERSION $DISTRO main" | $SUDO tee /etc/apt/sources.list.d/nodesource.list
echo "deb-src [signed-by=$KEYRING] https://deb.nodesource.com/$VERSION $DISTRO main" | $SUDO tee -a /etc/apt/sources.list.d/nodesource.list
$SUDO apt-get update
$SUDO apt-get install nodejs
elif grep -q rhel /etc/*-release; then
$SUDO yum module -y enable nodejs:$NODEJS_VERSION
$SUDO yum install -y jq npm
else
echo "Unsupported distribution."
exit 1
fi
}
install_chrome () {
if grep -q debian /etc/*-release; then
$SUDO bash -c 'echo "deb [arch=amd64] https://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google-chrome.list'
curl -fsSL https://dl.google.com/linux/linux_signing_key.pub | $SUDO apt-key add -
$SUDO apt-get update
$SUDO apt-get install -y google-chrome-stable
$SUDO apt-get install -y xvfb
$SUDO rm /etc/apt/sources.list.d/google-chrome.list
elif grep -q rhel /etc/*-release; then
$SUDO dd of=/etc/yum.repos.d/google-chrome.repo status=none <<EOF
[google-chrome]
name=google-chrome
baseurl=https://dl.google.com/linux/chrome/rpm/stable/\$basearch
enabled=1
gpgcheck=1
gpgkey=https://dl-ssl.google.com/linux/linux_signing_key.pub
EOF
$SUDO yum install -y google-chrome-stable
$SUDO rm /etc/yum.repos.d/google-chrome.repo
# Cypress dependencies
$SUDO yum install -y xorg-x11-server-Xvfb gtk2-devel gtk3-devel libnotify-devel GConf2 nss.x86_64 libXScrnSaver alsa-lib
else
echo "Unsupported distribution."
exit 1
fi
}
cypress_run () {
local specs="$1"
local timeout="$2"
local override_config="excludeSpecPattern=*.po.ts,retries=0,specPattern=${specs}"
if [ x"$timeout" != "x" ]; then
override_config="${override_config},defaultCommandTimeout=${timeout}"
fi
npx cypress run --browser chrome --headless --config "$override_config"
}
install_common
install_chrome
CYPRESS_BASE_URL=$(ceph mgr services | jq -r .dashboard)
export CYPRESS_BASE_URL
cd $DASHBOARD_FRONTEND_DIR
# This is required for Cypress to understand typescript
npm ci --unsafe-perm
npx cypress verify
npx cypress info
# Take `orch device ls` and `orch ps` as ground truth.
ceph orch device ls --refresh
ceph orch ps --refresh
sleep 10 # the previous call is asynchronous
ceph orch device ls --format=json | tee cypress/fixtures/orchestrator/inventory.json
ceph orch ps --format=json | tee cypress/fixtures/orchestrator/services.json
DASHBOARD_ADMIN_SECRET_FILE="/tmp/dashboard-admin-secret.txt"
printf 'admin' > "${DASHBOARD_ADMIN_SECRET_FILE}"
ceph dashboard ac-user-set-password admin -i "${DASHBOARD_ADMIN_SECRET_FILE}" --force-password
# Run Dashboard e2e tests.
# These tests are designed with execution order in mind, since orchestrator operations
# are likely to change cluster state, we can't just run tests in arbitrarily order.
# See /ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/ folder.
find cypress # List all specs
cypress_run "cypress/e2e/orchestrator/01-hosts.e2e-spec.ts"
# Hosts are removed and added in the previous step. Do a refresh again.
ceph orch device ls --refresh
sleep 10
ceph orch device ls --format=json | tee cypress/fixtures/orchestrator/inventory.json
cypress_run "cypress/e2e/orchestrator/03-inventory.e2e-spec.ts"
cypress_run "cypress/e2e/orchestrator/04-osds.e2e-spec.ts" 300000
| 4,224 | 38.12037 | 147 | sh |
null | ceph-main/qa/workunits/cephadm/test_iscsi_etc_hosts.sh | #!/bin/bash
# checks if the container and host's /etc/hosts files match
# Necessary to avoid potential bugs caused by podman making
# edits to /etc/hosts file in the container
# exits with code 1 if host and iscsi container /etc/hosts do no match
set -ex
ISCSI_DAEMON=$(sudo /home/ubuntu/cephtest/cephadm ls | jq -r '.[] | select(.service_name == "iscsi.foo") | .name')
sudo /home/ubuntu/cephtest/cephadm enter --name $ISCSI_DAEMON -- cat /etc/hosts > iscsi_daemon_etc_hosts.txt
if cmp --silent /etc/hosts iscsi_daemon_etc_hosts.txt; then
echo "Daemon and host /etc/hosts files successfully matched"
else
echo "ERROR: /etc/hosts on host did not match /etc/hosts in the iscsi container!"
echo "Host /etc/hosts:"
cat /etc/hosts
echo "Iscsi container /etc/hosts:"
cat iscsi_daemon_etc_hosts.txt
exit 1
fi
| 819 | 36.272727 | 114 | sh |
null | ceph-main/qa/workunits/cephadm/test_iscsi_pids_limit.sh | #!/bin/bash
# checks if the containers default pids-limit (4096) is removed and Iscsi
# containers continue to run
# exits 1 if fails
set -ex
ISCSI_CONT_IDS=$(sudo podman ps -qa --filter='name=iscsi')
CONT_COUNT=$(echo ${ISCSI_CONT_IDS} | wc -w)
test ${CONT_COUNT} -eq 2
for i in ${ISCSI_CONT_IDS}
do
test $(sudo podman exec ${i} cat /sys/fs/cgroup/pids/pids.max) == max
done
for i in ${ISCSI_CONT_IDS}
do
sudo podman exec ${i} /bin/sh -c 'for j in {0..20000}; do sleep 300 & done'
done
for i in ${ISCSI_CONT_IDS}
do
SLEEP_COUNT=$(sudo podman exec ${i} /bin/sh -c 'ps -ef | grep -c sleep')
test ${SLEEP_COUNT} -gt 20000
done
echo OK
| 648 | 20.633333 | 77 | sh |
null | ceph-main/qa/workunits/cephadm/test_repos.sh | #!/bin/bash -ex
SCRIPT_NAME=$(basename ${BASH_SOURCE[0]})
SCRIPT_DIR=$(dirname ${BASH_SOURCE[0]})
CEPHADM_SRC_DIR=${SCRIPT_DIR}/../../../src/cephadm
[ -d "$TMPDIR" ] || TMPDIR=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
trap "$SUDO rm -rf $TMPDIR" EXIT
if [ -z "$CEPHADM" ]; then
CEPHADM=`mktemp -p $TMPDIR tmp.cephadm.XXXXXX`
${CEPHADM_SRC_DIR}/build.sh "$CEPHADM"
fi
# this is a pretty weak test, unfortunately, since the
# package may also be in the base OS.
function test_install_uninstall() {
( sudo apt update && \
sudo apt -y install cephadm && \
sudo $CEPHADM install && \
sudo apt -y remove cephadm ) || \
( sudo yum -y install cephadm && \
sudo $CEPHADM install && \
sudo yum -y remove cephadm ) || \
( sudo dnf -y install cephadm && \
sudo $CEPHADM install && \
sudo dnf -y remove cephadm ) || \
( sudo zypper -n install cephadm && \
sudo $CEPHADM install && \
sudo zypper -n remove cephadm )
}
sudo $CEPHADM -v add-repo --release octopus
test_install_uninstall
sudo $CEPHADM -v rm-repo
sudo $CEPHADM -v add-repo --dev main
test_install_uninstall
sudo $CEPHADM -v rm-repo
sudo $CEPHADM -v add-repo --release 15.2.7
test_install_uninstall
sudo $CEPHADM -v rm-repo
echo OK.
| 1,250 | 26.195652 | 63 | sh |
null | ceph-main/qa/workunits/cephtool/test.sh | #!/usr/bin/env bash
# -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
# vim: ts=8 sw=8 ft=bash smarttab
set -x
source $(dirname $0)/../../standalone/ceph-helpers.sh
set -e
set -o functrace
PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
SUDO=${SUDO:-sudo}
export CEPH_DEV=1
function check_no_osd_down()
{
! ceph osd dump | grep ' down '
}
function wait_no_osd_down()
{
max_run=300
for i in $(seq 1 $max_run) ; do
if ! check_no_osd_down ; then
echo "waiting for osd(s) to come back up ($i/$max_run)"
sleep 1
else
break
fi
done
check_no_osd_down
}
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
function expect_true()
{
set -x
if ! "$@"; then return 1; else return 0; fi
}
TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
trap "rm -fr $TEMP_DIR" 0
TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
#
# retry_eagain max cmd args ...
#
# retry cmd args ... if it exits on error and its output contains the
# string EAGAIN, at most $max times
#
function retry_eagain()
{
local max=$1
shift
local status
local tmpfile=$TEMP_DIR/retry_eagain.$$
local count
for count in $(seq 1 $max) ; do
status=0
"$@" > $tmpfile 2>&1 || status=$?
if test $status = 0 ||
! grep --quiet EAGAIN $tmpfile ; then
break
fi
sleep 1
done
if test $count = $max ; then
echo retried with non zero exit status, $max times: "$@" >&2
fi
cat $tmpfile
rm $tmpfile
return $status
}
#
# map_enxio_to_eagain cmd arg ...
#
# add EAGAIN to the output of cmd arg ... if the output contains
# ENXIO.
#
function map_enxio_to_eagain()
{
local status=0
local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
"$@" > $tmpfile 2>&1 || status=$?
if test $status != 0 &&
grep --quiet ENXIO $tmpfile ; then
echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
fi
cat $tmpfile
rm $tmpfile
return $status
}
function check_response()
{
expected_string=$1
retcode=$2
expected_retcode=$3
if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
echo "return code invalid: got $retcode, expected $expected_retcode" >&2
exit 1
fi
if ! grep --quiet -- "$expected_string" $TMPFILE ; then
echo "Didn't find $expected_string in output" >&2
cat $TMPFILE >&2
exit 1
fi
}
function get_config_value_or_die()
{
local target config_opt raw val
target=$1
config_opt=$2
raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
if [[ $? -ne 0 ]]; then
echo "error obtaining config opt '$config_opt' from '$target': $raw"
exit 1
fi
raw=`echo $raw | sed -e 's/[{} "]//g'`
val=`echo $raw | cut -f2 -d:`
echo "$val"
return 0
}
function expect_config_value()
{
local target config_opt expected_val val
target=$1
config_opt=$2
expected_val=$3
val=$(get_config_value_or_die $target $config_opt)
if [[ "$val" != "$expected_val" ]]; then
echo "expected '$expected_val', got '$val'"
exit 1
fi
}
function ceph_watch_start()
{
local whatch_opt=--watch
if [ -n "$1" ]; then
whatch_opt=--watch-$1
if [ -n "$2" ]; then
whatch_opt+=" --watch-channel $2"
fi
fi
CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
ceph $whatch_opt > $CEPH_WATCH_FILE &
CEPH_WATCH_PID=$!
# wait until the "ceph" client is connected and receiving
# log messages from monitor
for i in `seq 3`; do
grep -q "cluster" $CEPH_WATCH_FILE && break
sleep 1
done
}
function ceph_watch_wait()
{
local regexp=$1
local timeout=30
if [ -n "$2" ]; then
timeout=$2
fi
for i in `seq ${timeout}`; do
grep -q "$regexp" $CEPH_WATCH_FILE && break
sleep 1
done
kill $CEPH_WATCH_PID
if ! grep "$regexp" $CEPH_WATCH_FILE; then
echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
cat $CEPH_WATCH_FILE >&2
return 1
fi
}
function test_mon_injectargs()
{
ceph tell osd.0 injectargs --no-osd_enable_op_tracker
ceph tell osd.0 config get osd_enable_op_tracker | grep false
ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500'
ceph tell osd.0 config get osd_enable_op_tracker | grep true
ceph tell osd.0 config get osd_op_history_duration | grep 500
ceph tell osd.0 injectargs --no-osd_enable_op_tracker
ceph tell osd.0 config get osd_enable_op_tracker | grep false
ceph tell osd.0 injectargs -- --osd_enable_op_tracker
ceph tell osd.0 config get osd_enable_op_tracker | grep true
ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600'
ceph tell osd.0 config get osd_enable_op_tracker | grep true
ceph tell osd.0 config get osd_op_history_duration | grep 600
ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200'
ceph tell osd.0 config get osd_deep_scrub_interval | grep 2419200
ceph tell osd.0 injectargs -- '--mon_probe_timeout 2'
ceph tell osd.0 config get mon_probe_timeout | grep 2
ceph tell osd.0 injectargs -- '--mon-lease 6'
ceph tell osd.0 config get mon_lease | grep 6
# osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 2> $TMPFILE || return 1
check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
ceph tell osd.0 injectargs -- '--osd_op_history_duration'
}
function test_mon_injectargs_SI()
{
# Test SI units during injectargs and 'config set'
# We only aim at testing the units are parsed accordingly
# and don't intend to test whether the options being set
# actually expect SI units to be passed.
# Keep in mind that all integer based options that are not based on bytes
# (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
# base 10.
initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
check_response "(22) Invalid argument"
# now test with injectargs
ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
}
function test_mon_injectargs_IEC()
{
# Test IEC units during injectargs and 'config set'
# We only aim at testing the units are parsed accordingly
# and don't intend to test whether the options being set
# actually expect IEC units to be passed.
# Keep in mind that all integer based options that are based on bytes
# (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
# unit modifiers (for backwards compatibility and convenience) and be parsed
# to base 2.
initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn")
$SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000
expect_config_value "mon.a" "mon_data_size_warn" 15000000000
$SUDO ceph daemon mon.a config set mon_data_size_warn 15G
expect_config_value "mon.a" "mon_data_size_warn" 16106127360
$SUDO ceph daemon mon.a config set mon_data_size_warn 16Gi
expect_config_value "mon.a" "mon_data_size_warn" 17179869184
$SUDO ceph daemon mon.a config set mon_data_size_warn 10F > $TMPFILE || true
check_response "(22) Invalid argument"
# now test with injectargs
ceph tell mon.a injectargs '--mon_data_size_warn 15000000000'
expect_config_value "mon.a" "mon_data_size_warn" 15000000000
ceph tell mon.a injectargs '--mon_data_size_warn 15G'
expect_config_value "mon.a" "mon_data_size_warn" 16106127360
ceph tell mon.a injectargs '--mon_data_size_warn 16Gi'
expect_config_value "mon.a" "mon_data_size_warn" 17179869184
expect_false ceph tell mon.a injectargs '--mon_data_size_warn 10F'
$SUDO ceph daemon mon.a config set mon_data_size_warn $initial_value
}
function test_tiering_agent()
{
local slow=slow_eviction
local fast=fast_eviction
ceph osd pool create $slow 1 1
ceph osd pool application enable $slow rados
ceph osd pool create $fast 1 1
ceph osd tier add $slow $fast
ceph osd tier cache-mode $fast writeback
ceph osd tier set-overlay $slow $fast
ceph osd pool set $fast hit_set_type bloom
rados -p $slow put obj1 /etc/group
ceph osd pool set $fast target_max_objects 1
ceph osd pool set $fast hit_set_count 1
ceph osd pool set $fast hit_set_period 5
# wait for the object to be evicted from the cache
local evicted
evicted=false
for i in `seq 1 300` ; do
if ! rados -p $fast ls | grep obj1 ; then
evicted=true
break
fi
sleep 1
done
$evicted # assert
# the object is proxy read and promoted to the cache
rados -p $slow get obj1 - >/dev/null
# wait for the promoted object to be evicted again
evicted=false
for i in `seq 1 300` ; do
if ! rados -p $fast ls | grep obj1 ; then
evicted=true
break
fi
sleep 1
done
$evicted # assert
ceph osd tier remove-overlay $slow
ceph osd tier remove $slow $fast
ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
}
function test_tiering_1()
{
# tiering
ceph osd pool create slow 2
ceph osd pool application enable slow rados
ceph osd pool create slow2 2
ceph osd pool application enable slow2 rados
ceph osd pool create cache 2
ceph osd pool create cache2 2
ceph osd tier add slow cache
ceph osd tier add slow cache2
expect_false ceph osd tier add slow2 cache
# application metadata should propagate to the tiers
ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow") | .application_metadata["rados"]' | grep '{}'
ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow2") | .application_metadata["rados"]' | grep '{}'
ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache") | .application_metadata["rados"]' | grep '{}'
ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache2") | .application_metadata["rados"]' | grep '{}'
# forward is removed/deprecated
expect_false ceph osd tier cache-mode cache forward
expect_false ceph osd tier cache-mode cache forward --yes-i-really-mean-it
# test some state transitions
ceph osd tier cache-mode cache writeback
expect_false ceph osd tier cache-mode cache readonly
expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
ceph osd tier cache-mode cache proxy
ceph osd tier cache-mode cache readproxy
ceph osd tier cache-mode cache none
ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
ceph osd tier cache-mode cache none
ceph osd tier cache-mode cache writeback
ceph osd tier cache-mode cache proxy
ceph osd tier cache-mode cache writeback
expect_false ceph osd tier cache-mode cache none
expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
# test with dirty objects in the tier pool
# tier pool currently set to 'writeback'
rados -p cache put /etc/passwd /etc/passwd
flush_pg_stats
# 1 dirty object in pool 'cache'
ceph osd tier cache-mode cache proxy
expect_false ceph osd tier cache-mode cache none
expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
ceph osd tier cache-mode cache writeback
# remove object from tier pool
rados -p cache rm /etc/passwd
rados -p cache cache-flush-evict-all
flush_pg_stats
# no dirty objects in pool 'cache'
ceph osd tier cache-mode cache proxy
ceph osd tier cache-mode cache none
ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
TRIES=0
while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
do
grep 'currently creating pgs' $TMPFILE
TRIES=$(( $TRIES + 1 ))
test $TRIES -ne 60
sleep 3
done
expect_false ceph osd pool set cache pg_num 4
ceph osd tier cache-mode cache none
ceph osd tier set-overlay slow cache
expect_false ceph osd tier set-overlay slow cache2
expect_false ceph osd tier remove slow cache
ceph osd tier remove-overlay slow
ceph osd tier set-overlay slow cache2
ceph osd tier remove-overlay slow
ceph osd tier remove slow cache
ceph osd tier add slow2 cache
expect_false ceph osd tier set-overlay slow cache
ceph osd tier set-overlay slow2 cache
ceph osd tier remove-overlay slow2
ceph osd tier remove slow2 cache
ceph osd tier remove slow cache2
# make sure a non-empty pool fails
rados -p cache2 put /etc/passwd /etc/passwd
while ! ceph df | grep cache2 | grep ' 1 ' ; do
echo waiting for pg stats to flush
sleep 2
done
expect_false ceph osd tier add slow cache2
ceph osd tier add slow cache2 --force-nonempty
ceph osd tier remove slow cache2
ceph osd pool ls | grep cache2
ceph osd pool ls -f json-pretty | grep cache2
ceph osd pool ls detail | grep cache2
ceph osd pool ls detail -f json-pretty | grep cache2
ceph osd pool delete slow slow --yes-i-really-really-mean-it
ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
ceph osd pool delete cache cache --yes-i-really-really-mean-it
ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
}
function test_tiering_2()
{
# make sure we can't clobber snapshot state
ceph osd pool create snap_base 2
ceph osd pool application enable snap_base rados
ceph osd pool create snap_cache 2
ceph osd pool mksnap snap_cache snapname
expect_false ceph osd tier add snap_base snap_cache
ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
}
function test_tiering_3()
{
# make sure we can't create snapshot on tier
ceph osd pool create basex 2
ceph osd pool application enable basex rados
ceph osd pool create cachex 2
ceph osd tier add basex cachex
expect_false ceph osd pool mksnap cache snapname
ceph osd tier remove basex cachex
ceph osd pool delete basex basex --yes-i-really-really-mean-it
ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
}
function test_tiering_4()
{
# make sure we can't create an ec pool tier
ceph osd pool create eccache 2 2 erasure
expect_false ceph osd set-require-min-compat-client bobtail
ceph osd pool create repbase 2
ceph osd pool application enable repbase rados
expect_false ceph osd tier add repbase eccache
ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
}
function test_tiering_5()
{
# convenient add-cache command
ceph osd pool create slow 2
ceph osd pool application enable slow rados
ceph osd pool create cache3 2
ceph osd tier add-cache slow cache3 1024000
ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
ceph osd tier remove slow cache3 2> $TMPFILE || true
check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
ceph osd tier remove-overlay slow
ceph osd tier remove slow cache3
ceph osd pool ls | grep cache3
ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
! ceph osd pool ls | grep cache3 || exit 1
ceph osd pool delete slow slow --yes-i-really-really-mean-it
}
function test_tiering_6()
{
# check add-cache whether work
ceph osd pool create datapool 2
ceph osd pool application enable datapool rados
ceph osd pool create cachepool 2
ceph osd tier add-cache datapool cachepool 1024000
ceph osd tier cache-mode cachepool writeback
rados -p datapool put object /etc/passwd
rados -p cachepool stat object
rados -p cachepool cache-flush object
rados -p datapool stat object
ceph osd tier remove-overlay datapool
ceph osd tier remove datapool cachepool
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
}
function test_tiering_7()
{
# protection against pool removal when used as tiers
ceph osd pool create datapool 2
ceph osd pool application enable datapool rados
ceph osd pool create cachepool 2
ceph osd tier add-cache datapool cachepool 1024000
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
check_response "EBUSY: pool 'datapool' has tiers cachepool"
ceph osd tier remove-overlay datapool
ceph osd tier remove datapool cachepool
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
}
function test_tiering_8()
{
## check health check
ceph osd set notieragent
ceph osd pool create datapool 2
ceph osd pool application enable datapool rados
ceph osd pool create cache4 2
ceph osd tier add-cache datapool cache4 1024000
ceph osd tier cache-mode cache4 writeback
tmpfile=$(mktemp|grep tmp)
dd if=/dev/zero of=$tmpfile bs=4K count=1
ceph osd pool set cache4 target_max_objects 200
ceph osd pool set cache4 target_max_bytes 1000000
rados -p cache4 put foo1 $tmpfile
rados -p cache4 put foo2 $tmpfile
rm -f $tmpfile
flush_pg_stats
ceph df | grep datapool | grep ' 2 '
ceph osd tier remove-overlay datapool
ceph osd tier remove datapool cache4
ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
ceph osd unset notieragent
}
function test_tiering_9()
{
# make sure 'tier remove' behaves as we expect
# i.e., removing a tier from a pool that's not its base pool only
# results in a 'pool foo is now (or already was) not a tier of bar'
#
ceph osd pool create basepoolA 2
ceph osd pool application enable basepoolA rados
ceph osd pool create basepoolB 2
ceph osd pool application enable basepoolB rados
poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
ceph osd pool create cache5 2
ceph osd pool create cache6 2
ceph osd tier add basepoolA cache5
ceph osd tier add basepoolB cache6
ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
}
function test_auth()
{
expect_false ceph auth add client.xx mon 'invalid' osd "allow *"
expect_false ceph auth add client.xx mon 'allow *' osd "allow *" invalid "allow *"
ceph auth add client.xx mon 'allow *' osd "allow *"
ceph auth export client.xx >client.xx.keyring
ceph auth add client.xx -i client.xx.keyring
rm -f client.xx.keyring
ceph auth list | grep client.xx
ceph auth ls | grep client.xx
ceph auth get client.xx | grep caps | grep mon
ceph auth get client.xx | grep caps | grep osd
ceph auth get-key client.xx
ceph auth print-key client.xx
ceph auth print_key client.xx
ceph auth caps client.xx osd "allow rw"
expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
ceph auth get client.xx | grep osd | grep "allow rw"
ceph auth caps client.xx mon 'allow command "osd tree"'
ceph auth export | grep client.xx
ceph auth export -o authfile
ceph auth import -i authfile
ceph auth export -o authfile2
diff authfile authfile2
rm authfile authfile2
ceph auth del client.xx
expect_false ceph auth get client.xx
# (almost) interactive mode
echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph
ceph auth get client.xx
# script mode
echo 'auth del client.xx' | ceph
expect_false ceph auth get client.xx
}
function test_auth_profiles()
{
ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
mgr 'allow profile read-only'
ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
mgr 'allow profile read-write'
ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
ceph auth export > client.xx.keyring
# read-only is allowed all read-only commands (auth excluded)
ceph -n client.xx-profile-ro -k client.xx.keyring status
ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
# read-only gets access denied for rw commands or auth commands
ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
check_response "EACCES: access denied"
# read-write is allowed for all read-write commands (except auth)
ceph -n client.xx-profile-rw -k client.xx.keyring status
ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
ceph -n client.xx-profile-rw -k client.xx.keyring fs dump
ceph -n client.xx-profile-rw -k client.xx.keyring log foo
ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
# read-write gets access denied for auth commands
ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
check_response "EACCES: access denied"
# role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
ceph -n client.xx-profile-rd -k client.xx.keyring auth export
ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
ceph -n client.xx-profile-rd -k client.xx.keyring status
ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
check_response "EACCES: access denied"
# read-only 'mon' subsystem commands are allowed
ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
# but read-write 'mon' commands are not
ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring fs dump >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
# add a new role-definer with the existing role-definer
ceph -n client.xx-profile-rd -k client.xx.keyring \
auth add client.xx-profile-rd2 mon 'allow profile role-definer'
ceph -n client.xx-profile-rd -k client.xx.keyring \
auth export > client.xx.keyring.2
# remove old role-definer using the new role-definer
ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
auth del client.xx-profile-rd
# remove the remaining role-definer with admin
ceph auth del client.xx-profile-rd2
rm -f client.xx.keyring client.xx.keyring.2
}
function test_mon_caps()
{
ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
chmod +r $TEMP_DIR/ceph.client.bug.keyring
ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
# pass --no-mon-config since we are looking for the permission denied error
rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
cat $TMPFILE
check_response "Permission denied"
rm -rf $TEMP_DIR/ceph.client.bug.keyring
ceph auth del client.bug
ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
chmod +r $TEMP_DIR/ceph.client.bug.keyring
ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
check_response "Permission denied"
}
function test_mon_misc()
{
# with and without verbosity
ceph osd dump | grep '^epoch'
ceph --concise osd dump | grep '^epoch'
ceph osd df | grep 'MIN/MAX VAR'
# df
ceph df > $TMPFILE
grep RAW $TMPFILE
grep -v DIRTY $TMPFILE
ceph df detail > $TMPFILE
grep DIRTY $TMPFILE
ceph df --format json > $TMPFILE
grep 'total_bytes' $TMPFILE
grep -v 'dirty' $TMPFILE
ceph df detail --format json > $TMPFILE
grep 'rd_bytes' $TMPFILE
grep 'dirty' $TMPFILE
ceph df --format xml | grep '<total_bytes>'
ceph df detail --format xml | grep '<rd_bytes>'
ceph fsid
ceph health
ceph health detail
ceph health --format json-pretty
ceph health detail --format xml-pretty
ceph time-sync-status
ceph node ls
for t in mon osd mds mgr ; do
ceph node ls $t
done
ceph_watch_start
mymsg="this is a test log message $$.$(date)"
ceph log "$mymsg"
ceph log last | grep "$mymsg"
ceph log last 100 | grep "$mymsg"
ceph_watch_wait "$mymsg"
ceph mgr stat
ceph mgr dump
ceph mgr dump | jq -e '.active_clients[0].name'
ceph mgr module ls
ceph mgr module enable restful
expect_false ceph mgr module enable foodne
ceph mgr module enable foodne --force
ceph mgr module disable foodne
ceph mgr module disable foodnebizbangbash
ceph mon metadata a
ceph mon metadata
ceph mon count-metadata ceph_version
ceph mon versions
ceph mgr metadata
ceph mgr versions
ceph mgr count-metadata ceph_version
ceph versions
ceph node ls
}
function check_mds_active()
{
fs_name=$1
ceph fs get $fs_name | grep active
}
function wait_mds_active()
{
fs_name=$1
max_run=300
for i in $(seq 1 $max_run) ; do
if ! check_mds_active $fs_name ; then
echo "waiting for an active MDS daemon ($i/$max_run)"
sleep 5
else
break
fi
done
check_mds_active $fs_name
}
function get_mds_gids()
{
fs_name=$1
ceph fs get $fs_name --format=json | python3 -c "import json; import sys; print(' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()]))"
}
function fail_all_mds()
{
fs_name=$1
ceph fs set $fs_name cluster_down true
mds_gids=$(get_mds_gids $fs_name)
for mds_gid in $mds_gids ; do
ceph mds fail $mds_gid
done
if check_mds_active $fs_name ; then
echo "An active MDS remains, something went wrong"
ceph fs get $fs_name
exit -1
fi
}
function remove_all_fs()
{
existing_fs=$(ceph fs ls --format=json | python3 -c "import json; import sys; print(' '.join([fs['name'] for fs in json.load(sys.stdin)]))")
for fs_name in $existing_fs ; do
echo "Removing fs ${fs_name}..."
fail_all_mds $fs_name
echo "Removing existing filesystem '${fs_name}'..."
ceph fs rm $fs_name --yes-i-really-mean-it
echo "Removed '${fs_name}'."
done
}
# So that tests requiring MDS can skip if one is not configured
# in the cluster at all
function mds_exists()
{
ceph auth ls | grep "^mds"
}
# some of the commands are just not idempotent.
function without_test_dup_command()
{
if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
$@
else
local saved=${CEPH_CLI_TEST_DUP_COMMAND}
unset CEPH_CLI_TEST_DUP_COMMAND
$@
CEPH_CLI_TEST_DUP_COMMAND=saved
fi
}
function test_mds_tell()
{
local FS_NAME=cephfs
if ! mds_exists ; then
echo "Skipping test, no MDS found"
return
fi
remove_all_fs
ceph osd pool create fs_data 16
ceph osd pool create fs_metadata 16
ceph fs new $FS_NAME fs_metadata fs_data
wait_mds_active $FS_NAME
# Test injectargs by GID
old_mds_gids=$(get_mds_gids $FS_NAME)
echo Old GIDs: $old_mds_gids
for mds_gid in $old_mds_gids ; do
ceph tell mds.$mds_gid injectargs "--debug-mds 20"
done
expect_false ceph tell mds.a injectargs mds_max_file_recover -1
# Test respawn by rank
without_test_dup_command ceph tell mds.0 respawn
new_mds_gids=$old_mds_gids
while [ $new_mds_gids -eq $old_mds_gids ] ; do
sleep 5
new_mds_gids=$(get_mds_gids $FS_NAME)
done
echo New GIDs: $new_mds_gids
# Test respawn by ID
without_test_dup_command ceph tell mds.a respawn
new_mds_gids=$old_mds_gids
while [ $new_mds_gids -eq $old_mds_gids ] ; do
sleep 5
new_mds_gids=$(get_mds_gids $FS_NAME)
done
echo New GIDs: $new_mds_gids
remove_all_fs
ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
}
function test_mon_mds()
{
local FS_NAME=cephfs
remove_all_fs
ceph osd pool create fs_data 16
ceph osd pool create fs_metadata 16
ceph fs new $FS_NAME fs_metadata fs_data
ceph fs set $FS_NAME cluster_down true
ceph fs set $FS_NAME cluster_down false
ceph mds compat rm_incompat 4
ceph mds compat rm_incompat 4
# We don't want any MDSs to be up, their activity can interfere with
# the "current_epoch + 1" checking below if they're generating updates
fail_all_mds $FS_NAME
ceph mds compat show
ceph fs dump
ceph fs get $FS_NAME
for mds_gid in $(get_mds_gids $FS_NAME) ; do
ceph mds metadata $mds_id
done
ceph mds metadata
ceph mds versions
ceph mds count-metadata os
# XXX mds fail, but how do you undo it?
mdsmapfile=$TEMP_DIR/mdsmap.$$
current_epoch=$(ceph fs dump -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
[ -s $mdsmapfile ]
rm $mdsmapfile
ceph osd pool create data2 16
ceph osd pool create data3 16
data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
ceph fs add_data_pool cephfs $data2_pool
ceph fs add_data_pool cephfs $data3_pool
ceph fs add_data_pool cephfs 100 >& $TMPFILE || true
check_response "Error ENOENT"
ceph fs add_data_pool cephfs foobarbaz >& $TMPFILE || true
check_response "Error ENOENT"
ceph fs rm_data_pool cephfs $data2_pool
ceph fs rm_data_pool cephfs $data3_pool
ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
ceph fs set cephfs max_mds 4
ceph fs set cephfs max_mds 3
ceph fs set cephfs max_mds 256
expect_false ceph fs set cephfs max_mds 257
ceph fs set cephfs max_mds 4
ceph fs set cephfs max_mds 256
expect_false ceph fs set cephfs max_mds 257
expect_false ceph fs set cephfs max_mds asdf
expect_false ceph fs set cephfs inline_data true
ceph fs set cephfs inline_data true --yes-i-really-really-mean-it
ceph fs set cephfs inline_data yes --yes-i-really-really-mean-it
ceph fs set cephfs inline_data 1 --yes-i-really-really-mean-it
expect_false ceph fs set cephfs inline_data --yes-i-really-really-mean-it
ceph fs set cephfs inline_data false
ceph fs set cephfs inline_data no
ceph fs set cephfs inline_data 0
expect_false ceph fs set cephfs inline_data asdf
ceph fs set cephfs max_file_size 1048576
expect_false ceph fs set cephfs max_file_size 123asdf
expect_false ceph fs set cephfs allow_new_snaps
ceph fs set cephfs allow_new_snaps true
ceph fs set cephfs allow_new_snaps 0
ceph fs set cephfs allow_new_snaps false
ceph fs set cephfs allow_new_snaps no
expect_false ceph fs set cephfs allow_new_snaps taco
# we should never be able to add EC pools as data or metadata pools
# create an ec-pool...
ceph osd pool create mds-ec-pool 16 16 erasure
set +e
ceph fs add_data_pool cephfs mds-ec-pool 2>$TMPFILE
check_response 'erasure-code' $? 22
set -e
ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
fail_all_mds $FS_NAME
set +e
# Check that rmfailed requires confirmation
expect_false ceph mds rmfailed 0
ceph mds rmfailed 0 --yes-i-really-mean-it
set -e
# Check that `fs new` is no longer permitted
expect_false ceph fs new cephfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
# Check that 'fs reset' runs
ceph fs reset $FS_NAME --yes-i-really-mean-it
# Check that creating a second FS fails by default
ceph osd pool create fs_metadata2 16
ceph osd pool create fs_data2 16
set +e
expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
set -e
# Check that setting enable_multiple enables creation of second fs
ceph fs flag set enable_multiple true --yes-i-really-mean-it
ceph fs new cephfs2 fs_metadata2 fs_data2
# Clean up multi-fs stuff
fail_all_mds cephfs2
ceph fs rm cephfs2 --yes-i-really-mean-it
ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
fail_all_mds $FS_NAME
# Clean up to enable subsequent fs new tests
ceph fs rm $FS_NAME --yes-i-really-mean-it
set +e
ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
check_response 'erasure-code' $? 22
ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
check_response 'already used by filesystem' $? 22
ceph fs new $FS_NAME mds-ec-pool fs_data --force 2>$TMPFILE
check_response 'erasure-code' $? 22
ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
check_response 'erasure-code' $? 22
set -e
# ... new create a cache tier in front of the EC pool...
ceph osd pool create mds-tier 2
ceph osd tier add mds-ec-pool mds-tier
ceph osd tier set-overlay mds-ec-pool mds-tier
tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
# Use of a readonly tier should be forbidden
ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
set +e
ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
set -e
# Use of a writeback tier should enable FS creation
ceph osd tier cache-mode mds-tier writeback
ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
# While a FS exists using the tiered pools, I should not be allowed
# to remove the tier
set +e
ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
check_response 'in use by CephFS' $? 16
ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
check_response 'in use by CephFS' $? 16
set -e
fail_all_mds $FS_NAME
ceph fs rm $FS_NAME --yes-i-really-mean-it
# ... but we should be forbidden from using the cache pool in the FS directly.
set +e
ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
check_response 'in use as a cache tier' $? 22
ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
check_response 'already used by filesystem' $? 22
ceph fs new $FS_NAME mds-tier fs_data --force 2>$TMPFILE
check_response 'in use as a cache tier' $? 22
ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
check_response 'already used by filesystem' $? 22
ceph fs new $FS_NAME mds-tier mds-tier --force 2>$TMPFILE
check_response 'in use as a cache tier' $? 22
set -e
# Clean up tier + EC pools
ceph osd tier remove-overlay mds-ec-pool
ceph osd tier remove mds-ec-pool mds-tier
# Create a FS using the 'cache' pool now that it's no longer a tier
ceph fs new $FS_NAME fs_metadata mds-tier --force
# We should be forbidden from using this pool as a tier now that
# it's in use for CephFS
set +e
ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
check_response 'in use by CephFS' $? 16
set -e
fail_all_mds $FS_NAME
ceph fs rm $FS_NAME --yes-i-really-mean-it
# We should be permitted to use an EC pool with overwrites enabled
# as the data pool...
ceph osd pool set mds-ec-pool allow_ec_overwrites true
ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
fail_all_mds $FS_NAME
ceph fs rm $FS_NAME --yes-i-really-mean-it
# ...but not as the metadata pool
set +e
ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
check_response 'already used by filesystem' $? 22
ceph fs new $FS_NAME mds-ec-pool fs_data --force 2>$TMPFILE
check_response 'erasure-code' $? 22
set -e
ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
# Create a FS and check that we can subsequently add a cache tier to it
ceph fs new $FS_NAME fs_metadata fs_data --force
# Adding overlay to FS pool should be permitted, RADOS clients handle this.
ceph osd tier add fs_metadata mds-tier
ceph osd tier cache-mode mds-tier writeback
ceph osd tier set-overlay fs_metadata mds-tier
# Removing tier should be permitted because the underlying pool is
# replicated (#11504 case)
ceph osd tier cache-mode mds-tier proxy
ceph osd tier remove-overlay fs_metadata
ceph osd tier remove fs_metadata mds-tier
ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
# Clean up FS
fail_all_mds $FS_NAME
ceph fs rm $FS_NAME --yes-i-really-mean-it
ceph mds stat
# ceph mds tell mds.a getmap
# ceph mds rm
# ceph mds rmfailed
# ceph mds set_state
ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
}
function test_mon_mds_metadata()
{
local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
test "$nmons" -gt 0
ceph fs dump |
sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
while read gid id rank; do
ceph mds metadata ${gid} | grep '"hostname":'
ceph mds metadata ${id} | grep '"hostname":'
ceph mds metadata ${rank} | grep '"hostname":'
local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
test "$n" -eq "$nmons"
done
expect_false ceph mds metadata UNKNOWN
}
function test_mon_mon()
{
# print help message
ceph --help mon
# -h works even when some arguments are passed
ceph osd dump -h | grep 'osd dump'
ceph osd dump 123 -h | grep 'osd dump'
# no mon add/remove
ceph mon dump
ceph mon getmap -o $TEMP_DIR/monmap.$$
[ -s $TEMP_DIR/monmap.$$ ]
# ceph mon tell
first=$(ceph mon dump -f json | jq -r '.mons[0].name')
ceph tell mon.$first mon_status
# test mon features
ceph mon feature ls
ceph mon feature set kraken --yes-i-really-mean-it
expect_false ceph mon feature set abcd
expect_false ceph mon feature set abcd --yes-i-really-mean-it
# test elector
expect_failure $TEMP_DIR ceph mon add disallowed_leader $first
ceph mon set election_strategy disallow
ceph mon add disallowed_leader $first
ceph mon set election_strategy connectivity
ceph mon rm disallowed_leader $first
ceph mon set election_strategy classic
expect_failure $TEMP_DIR ceph mon rm disallowed_leader $first
# test mon stat
# don't check output, just ensure it does not fail.
ceph mon stat
ceph mon stat -f json | jq '.'
}
function test_mon_priority_and_weight()
{
for i in 0 1 65535; do
ceph mon set-weight a $i
w=$(ceph mon dump --format=json-pretty 2>/dev/null | jq '.mons[0].weight')
[[ "$w" == "$i" ]]
done
for i in -1 65536; do
expect_false ceph mon set-weight a $i
done
}
function gen_secrets_file()
{
# lets assume we can have the following types
# all - generates both cephx and lockbox, with mock dm-crypt key
# cephx - only cephx
# no_cephx - lockbox and dm-crypt, no cephx
# no_lockbox - dm-crypt and cephx, no lockbox
# empty - empty file
# empty_json - correct json, empty map
# bad_json - bad json :)
#
local t=$1
if [[ -z "$t" ]]; then
t="all"
fi
fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
echo $fn
if [[ "$t" == "empty" ]]; then
return 0
fi
echo "{" > $fn
if [[ "$t" == "bad_json" ]]; then
echo "asd: ; }" >> $fn
return 0
elif [[ "$t" == "empty_json" ]]; then
echo "}" >> $fn
return 0
fi
cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
if [[ "$t" == "all" ]]; then
echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
elif [[ "$t" == "cephx" ]]; then
echo "$cephx_secret" >> $fn
elif [[ "$t" == "no_cephx" ]]; then
echo "$lb_secret,$dmcrypt_key" >> $fn
elif [[ "$t" == "no_lockbox" ]]; then
echo "$cephx_secret,$dmcrypt_key" >> $fn
else
echo "unknown gen_secrets_file() type \'$fn\'"
return 1
fi
echo "}" >> $fn
return 0
}
function test_mon_osd_create_destroy()
{
ceph osd new 2>&1 | grep 'EINVAL'
ceph osd new '' -1 2>&1 | grep 'EINVAL'
ceph osd new '' 10 2>&1 | grep 'EINVAL'
old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
old_osds=$(ceph osd ls)
num_osds=$(ceph osd ls | wc -l)
uuid=$(uuidgen)
id=$(ceph osd new $uuid 2>/dev/null)
for i in $old_osds; do
[[ "$i" != "$id" ]]
done
ceph osd find $id
id2=`ceph osd new $uuid 2>/dev/null`
[[ $id2 == $id ]]
ceph osd new $uuid $id
id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
uuid2=$(uuidgen)
id2=$(ceph osd new $uuid2)
ceph osd find $id2
[[ "$id2" != "$id" ]]
ceph osd new $uuid $id2 2>&1 | grep EEXIST
ceph osd new $uuid2 $id2
# test with secrets
empty_secrets=$(gen_secrets_file "empty")
empty_json=$(gen_secrets_file "empty_json")
all_secrets=$(gen_secrets_file "all")
cephx_only=$(gen_secrets_file "cephx")
no_cephx=$(gen_secrets_file "no_cephx")
no_lockbox=$(gen_secrets_file "no_lockbox")
bad_json=$(gen_secrets_file "bad_json")
# empty secrets should be idempotent
new_id=$(ceph osd new $uuid $id -i $empty_secrets)
[[ "$new_id" == "$id" ]]
# empty json, thus empty secrets
new_id=$(ceph osd new $uuid $id -i $empty_json)
[[ "$new_id" == "$id" ]]
ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
ceph osd rm $id
ceph osd rm $id2
ceph osd setmaxosd $old_maxosd
ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
osds=$(ceph osd ls)
id=$(ceph osd new $uuid -i $all_secrets)
for i in $osds; do
[[ "$i" != "$id" ]]
done
ceph osd find $id
# validate secrets and dm-crypt are set
k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
s=$(cat $all_secrets | jq '.cephx_secret')
[[ $k == $s ]]
k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
jq '.key')
s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
[[ $k == $s ]]
ceph config-key exists dm-crypt/osd/$uuid/luks
osds=$(ceph osd ls)
id2=$(ceph osd new $uuid2 -i $cephx_only)
for i in $osds; do
[[ "$i" != "$id2" ]]
done
ceph osd find $id2
k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
s=$(cat $all_secrets | jq '.cephx_secret')
[[ $k == $s ]]
expect_false ceph auth get-key client.osd-lockbox.$uuid2
expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
ceph osd destroy osd.$id2 --yes-i-really-mean-it
ceph osd destroy $id2 --yes-i-really-mean-it
ceph osd find $id2
expect_false ceph auth get-key osd.$id2
ceph osd dump | grep osd.$id2 | grep destroyed
id3=$id2
uuid3=$(uuidgen)
ceph osd new $uuid3 $id3 -i $all_secrets
ceph osd dump | grep osd.$id3 | expect_false grep destroyed
ceph auth get-key client.osd-lockbox.$uuid3
ceph auth get-key osd.$id3
ceph config-key exists dm-crypt/osd/$uuid3/luks
ceph osd purge-new osd.$id3 --yes-i-really-mean-it
expect_false ceph osd find $id2
expect_false ceph auth get-key osd.$id2
expect_false ceph auth get-key client.osd-lockbox.$uuid3
expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
ceph osd purge osd.$id3 --yes-i-really-mean-it
ceph osd purge-new osd.$id3 --yes-i-really-mean-it # idempotent
ceph osd purge osd.$id --yes-i-really-mean-it
ceph osd purge 123456 --yes-i-really-mean-it
expect_false ceph osd find $id
expect_false ceph auth get-key osd.$id
expect_false ceph auth get-key client.osd-lockbox.$uuid
expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
rm $empty_secrets $empty_json $all_secrets $cephx_only \
$no_cephx $no_lockbox $bad_json
for i in $(ceph osd ls); do
[[ "$i" != "$id" ]]
[[ "$i" != "$id2" ]]
[[ "$i" != "$id3" ]]
done
[[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
ceph osd setmaxosd $old_maxosd
}
function test_mon_config_key()
{
key=asdfasdfqwerqwreasdfuniquesa123df
ceph config-key list | grep -c $key | grep 0
ceph config-key get $key | grep -c bar | grep 0
ceph config-key set $key bar
ceph config-key get $key | grep bar
ceph config-key list | grep -c $key | grep 1
ceph config-key dump | grep $key | grep bar
ceph config-key rm $key
expect_false ceph config-key get $key
ceph config-key list | grep -c $key | grep 0
ceph config-key dump | grep -c $key | grep 0
}
function test_mon_osd()
{
#
# osd blocklist
#
bl=192.168.0.1:0/1000
ceph osd blocklist add $bl
ceph osd blocklist ls | grep $bl
ceph osd blocklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
ceph osd dump --format=json-pretty | grep $bl
ceph osd dump | grep $bl
ceph osd blocklist rm $bl
ceph osd blocklist ls | expect_false grep $bl
bl=192.168.0.1
# test without nonce, invalid nonce
ceph osd blocklist add $bl
ceph osd blocklist ls | grep $bl
ceph osd blocklist rm $bl
ceph osd blocklist ls | expect_false grep $bl
expect_false "ceph osd blocklist add $bl/-1"
expect_false "ceph osd blocklist add $bl/foo"
# test with invalid address
expect_false "ceph osd blocklist add 1234.56.78.90/100"
# test range blocklisting
bl=192.168.0.1:0/24
ceph osd blocklist range add $bl
ceph osd blocklist ls | grep $bl
ceph osd blocklist range rm $bl
ceph osd blocklist ls | expect_false grep $bl
bad_bl=192.168.0.1/33
expect_false ceph osd blocklist range add $bad_bl
# Test `clear`
ceph osd blocklist add $bl
ceph osd blocklist ls | grep $bl
ceph osd blocklist clear
ceph osd blocklist ls | expect_false grep $bl
# deprecated syntax?
ceph osd blacklist ls
#
# osd crush
#
ceph osd crush reweight-all
ceph osd crush tunables legacy
ceph osd crush show-tunables | grep argonaut
ceph osd crush tunables bobtail
ceph osd crush show-tunables | grep bobtail
ceph osd crush tunables firefly
ceph osd crush show-tunables | grep firefly
ceph osd crush set-tunable straw_calc_version 0
ceph osd crush get-tunable straw_calc_version | grep 0
ceph osd crush set-tunable straw_calc_version 1
ceph osd crush get-tunable straw_calc_version | grep 1
#
# require-min-compat-client
expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
ceph osd get-require-min-compat-client | grep luminous
ceph osd dump | grep 'require_min_compat_client luminous'
#
# osd scrub
#
# blocking
ceph osd scrub 0 --block
ceph osd deep-scrub 0 --block
# how do I tell when these are done?
ceph osd scrub 0
ceph osd deep-scrub 0
ceph osd repair 0
# pool scrub, force-recovery/backfill
pool_names=`rados lspools`
for pool_name in $pool_names
do
ceph osd pool scrub $pool_name
ceph osd pool deep-scrub $pool_name
ceph osd pool repair $pool_name
ceph osd pool force-recovery $pool_name
ceph osd pool cancel-force-recovery $pool_name
ceph osd pool force-backfill $pool_name
ceph osd pool cancel-force-backfill $pool_name
done
for f in noup nodown noin noout noscrub nodeep-scrub nobackfill \
norebalance norecover notieragent
do
ceph osd set $f
ceph osd unset $f
done
expect_false ceph osd set bogus
expect_false ceph osd unset bogus
for f in sortbitwise recover_deletes require_jewel_osds \
require_kraken_osds
do
expect_false ceph osd set $f
expect_false ceph osd unset $f
done
ceph osd require-osd-release reef
# can't lower
expect_false ceph osd require-osd-release quincy
expect_false ceph osd require-osd-release pacific
# these are no-ops but should succeed.
ceph osd set noup
ceph osd down 0
ceph osd dump | grep 'osd.0 down'
ceph osd unset noup
max_run=1000
for ((i=0; i < $max_run; i++)); do
if ! ceph osd dump | grep 'osd.0 up'; then
echo "waiting for osd.0 to come back up ($i/$max_run)"
sleep 1
else
break
fi
done
ceph osd dump | grep 'osd.0 up'
ceph osd dump | grep 'osd.0 up'
# ceph osd find expects the OsdName, so both ints and osd.n should work.
ceph osd find 1
ceph osd find osd.1
expect_false ceph osd find osd.xyz
expect_false ceph osd find xyz
expect_false ceph osd find 0.1
ceph --format plain osd find 1 # falls back to json-pretty
if [ `uname` == Linux ]; then
ceph osd metadata 1 | grep 'distro'
ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
fi
ceph osd out 0
ceph osd dump | grep 'osd.0.*out'
ceph osd in 0
ceph osd dump | grep 'osd.0.*in'
ceph osd find 0
ceph osd info 0
ceph osd info osd.0
expect_false ceph osd info osd.xyz
expect_false ceph osd info xyz
expect_false ceph osd info 42
expect_false ceph osd info osd.42
ceph osd info
info_json=$(ceph osd info --format=json | jq -cM '.')
dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
if [[ "${info_json}" != "${dump_json}" ]]; then
echo "waiting for OSDs to settle"
sleep 10
info_json=$(ceph osd info --format=json | jq -cM '.')
dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
[[ "${info_json}" == "${dump_json}" ]]
fi
info_json=$(ceph osd info 0 --format=json | jq -cM '.')
dump_json=$(ceph osd dump --format=json | \
jq -cM '.osds[] | select(.osd == 0)')
[[ "${info_json}" == "${dump_json}" ]]
info_plain="$(ceph osd info)"
dump_plain="$(ceph osd dump | grep '^osd')"
[[ "${info_plain}" == "${dump_plain}" ]]
info_plain="$(ceph osd info 0)"
dump_plain="$(ceph osd dump | grep '^osd.0')"
[[ "${info_plain}" == "${dump_plain}" ]]
ceph osd add-nodown 0 1
ceph health detail | grep 'NODOWN'
ceph osd rm-nodown 0 1
! ceph health detail | grep 'NODOWN'
ceph osd out 0 # so we can mark it as noin later
ceph osd add-noin 0
ceph health detail | grep 'NOIN'
ceph osd rm-noin 0
! ceph health detail | grep 'NOIN'
ceph osd in 0
ceph osd add-noout 0
ceph health detail | grep 'NOOUT'
ceph osd rm-noout 0
! ceph health detail | grep 'NOOUT'
# test osd id parse
expect_false ceph osd add-noup 797er
expect_false ceph osd add-nodown u9uwer
expect_false ceph osd add-noin 78~15
expect_false ceph osd rm-noup 1234567
expect_false ceph osd rm-nodown fsadf7
expect_false ceph osd rm-noout 790-fd
ids=`ceph osd ls-tree default`
for osd in $ids
do
ceph osd add-nodown $osd
ceph osd add-noout $osd
done
ceph -s | grep 'NODOWN'
ceph -s | grep 'NOOUT'
ceph osd rm-nodown any
ceph osd rm-noout all
! ceph -s | grep 'NODOWN'
! ceph -s | grep 'NOOUT'
# test crush node flags
ceph osd add-noup osd.0
ceph osd add-nodown osd.0
ceph osd add-noin osd.0
ceph osd add-noout osd.0
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
ceph osd rm-noup osd.0
ceph osd rm-nodown osd.0
ceph osd rm-noin osd.0
ceph osd rm-noout osd.0
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
ceph osd crush add-bucket foo host root=default
ceph osd add-noup foo
ceph osd add-nodown foo
ceph osd add-noin foo
ceph osd add-noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
ceph osd rm-noup foo
ceph osd rm-nodown foo
ceph osd rm-noin foo
ceph osd rm-noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
ceph osd add-noup foo
ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
ceph osd crush rm foo
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
ceph osd set-group noup osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
ceph osd set-group noup,nodown osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
ceph osd set-group noup,nodown,noin osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
ceph osd set-group noup,nodown,noin,noout osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
ceph osd unset-group noup osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
ceph osd unset-group noup,nodown osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
ceph osd unset-group noup,nodown,noin osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
ceph osd unset-group noup,nodown,noin,noout osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
ceph osd set-group noup,nodown,noin,noout osd.0 osd.1
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noup'
ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noin'
ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noout'
ceph osd unset-group noup,nodown,noin,noout osd.0 osd.1
ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
ceph osd dump -f json-pretty | jq ".osds[1].state" | expect_false grep 'noup\|nodown\|noin\|noout'
ceph osd set-group noup all
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
ceph osd unset-group noup all
ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
# crush node flags
ceph osd crush add-bucket foo host root=default
ceph osd set-group noup foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
ceph osd set-group noup,nodown foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
ceph osd set-group noup,nodown,noin foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
ceph osd set-group noup,nodown,noin,noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
ceph osd unset-group noup foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
ceph osd unset-group noup,nodown foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
ceph osd unset-group noup,nodown,noin foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
ceph osd unset-group noup,nodown,noin,noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin\|noout'
ceph osd set-group noin,noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
ceph osd unset-group noin,noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
ceph osd set-group noup,nodown,noin,noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
ceph osd crush rm foo
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
# test device class flags
osd_0_device_class=$(ceph osd crush get-device-class osd.0)
ceph osd set-group noup $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
ceph osd set-group noup,nodown $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
ceph osd set-group noup,nodown,noin $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
ceph osd set-group noup,nodown,noin,noout $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
ceph osd unset-group noup $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
ceph osd unset-group noup,nodown $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
ceph osd unset-group noup,nodown,noin $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
ceph osd unset-group noup,nodown,noin,noout $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin\|noout'
ceph osd set-group noin,noout $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
ceph osd unset-group noin,noout $osd_0_device_class
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep $osd_0_device_class
# make sure mark out preserves weight
ceph osd reweight osd.0 .5
ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
ceph osd out 0
ceph osd in 0
ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
ceph osd getmap -o $f
[ -s $f ]
rm $f
save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
[ "$save" -gt 0 ]
ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
ceph osd setmaxosd 10
ceph osd getmaxosd | grep 'max_osd = 10'
ceph osd setmaxosd $save
ceph osd getmaxosd | grep "max_osd = $save"
for id in `ceph osd ls` ; do
retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
done
ceph osd rm 0 2>&1 | grep 'EBUSY'
local old_osds=$(echo $(ceph osd ls))
id=`ceph osd create`
ceph osd find $id
ceph osd lost $id --yes-i-really-mean-it
expect_false ceph osd setmaxosd $id
local new_osds=$(echo $(ceph osd ls))
for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
ceph osd rm $id
done
uuid=`uuidgen`
id=`ceph osd create $uuid`
id2=`ceph osd create $uuid`
[ "$id" = "$id2" ]
ceph osd rm $id
ceph --help osd
# reset max_osd.
ceph osd setmaxosd $id
ceph osd getmaxosd | grep "max_osd = $save"
local max_osd=$save
ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
id=`ceph osd create $uuid $max_osd`
[ "$id" = "$max_osd" ]
ceph osd find $id
max_osd=$((max_osd + 1))
ceph osd getmaxosd | grep "max_osd = $max_osd"
ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
id2=`ceph osd create $uuid`
[ "$id" = "$id2" ]
id2=`ceph osd create $uuid $id`
[ "$id" = "$id2" ]
uuid=`uuidgen`
local gap_start=$max_osd
id=`ceph osd create $uuid $((gap_start + 100))`
[ "$id" = "$((gap_start + 100))" ]
max_osd=$((id + 1))
ceph osd getmaxosd | grep "max_osd = $max_osd"
ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
#
# When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
# is repeated and consumes two osd id, not just one.
#
local next_osd=$gap_start
id=`ceph osd create $(uuidgen)`
[ "$id" = "$next_osd" ]
next_osd=$((id + 1))
id=`ceph osd create $(uuidgen) $next_osd`
[ "$id" = "$next_osd" ]
local new_osds=$(echo $(ceph osd ls))
for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
[ $id -ge $save ]
ceph osd rm $id
done
ceph osd setmaxosd $save
ceph osd ls
ceph osd pool create data 16
ceph osd pool application enable data rados
ceph osd lspools | grep data
ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
ceph osd pool delete data data --yes-i-really-really-mean-it
ceph osd pause
ceph osd dump | grep 'flags.*pauserd,pausewr'
ceph osd unpause
ceph osd tree
ceph osd tree up
ceph osd tree down
ceph osd tree in
ceph osd tree out
ceph osd tree destroyed
ceph osd tree up in
ceph osd tree up out
ceph osd tree down in
ceph osd tree down out
ceph osd tree out down
expect_false ceph osd tree up down
expect_false ceph osd tree up destroyed
expect_false ceph osd tree down destroyed
expect_false ceph osd tree up down destroyed
expect_false ceph osd tree in out
expect_false ceph osd tree up foo
ceph osd metadata
ceph osd count-metadata os
ceph osd versions
ceph osd perf
ceph osd blocked-by
ceph osd stat | grep up
}
function test_mon_crush()
{
f=$TEMP_DIR/map.$$
epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
[ -s $f ]
[ "$epoch" -gt 1 ]
nextepoch=$(( $epoch + 1 ))
echo epoch $epoch nextepoch $nextepoch
rm -f $f.epoch
expect_false ceph osd setcrushmap $nextepoch -i $f
gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
echo gotepoch $gotepoch
[ "$gotepoch" -eq "$nextepoch" ]
# should be idempotent
gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
echo epoch $gotepoch
[ "$gotepoch" -eq "$nextepoch" ]
rm $f
}
function test_mon_osd_pool()
{
#
# osd pool
#
ceph osd pool create data 16
ceph osd pool application enable data rados
ceph osd pool mksnap data datasnap
rados -p data lssnap | grep datasnap
ceph osd pool rmsnap data datasnap
expect_false ceph osd pool rmsnap pool_fake snapshot
ceph osd pool delete data data --yes-i-really-really-mean-it
ceph osd pool create data2 16
ceph osd pool application enable data2 rados
ceph osd pool rename data2 data3
ceph osd lspools | grep data3
ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
ceph osd pool create replicated 16 16 replicated
ceph osd pool create replicated 1 16 replicated
ceph osd pool create replicated 16 16 # default is replicated
ceph osd pool create replicated 16 # default is replicated, pgp_num = pg_num
ceph osd pool application enable replicated rados
# should fail because the type is not the same
expect_false ceph osd pool create replicated 16 16 erasure
ceph osd lspools | grep replicated
ceph osd pool create ec_test 1 1 erasure
ceph osd pool application enable ec_test rados
set +e
ceph osd count-metadata osd_objectstore | grep 'bluestore'
if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
else
ceph osd pool set ec_test allow_ec_overwrites true || return 1
expect_false ceph osd pool set ec_test allow_ec_overwrites false
fi
set -e
ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
# test create pool with rule
ceph osd erasure-code-profile set foo foo
ceph osd erasure-code-profile ls | grep foo
ceph osd crush rule create-erasure foo foo
ceph osd pool create erasure 16 16 erasure foo
expect_false ceph osd erasure-code-profile rm foo
ceph osd pool delete erasure erasure --yes-i-really-really-mean-it
ceph osd crush rule rm foo
ceph osd erasure-code-profile rm foo
# autoscale mode
ceph osd pool create modeon --autoscale-mode=on
ceph osd dump | grep modeon | grep 'autoscale_mode on'
ceph osd pool create modewarn --autoscale-mode=warn
ceph osd dump | grep modewarn | grep 'autoscale_mode warn'
ceph osd pool create modeoff --autoscale-mode=off
ceph osd dump | grep modeoff | grep 'autoscale_mode off'
ceph osd pool delete modeon modeon --yes-i-really-really-mean-it
ceph osd pool delete modewarn modewarn --yes-i-really-really-mean-it
ceph osd pool delete modeoff modeoff --yes-i-really-really-mean-it
}
function test_mon_osd_pool_quota()
{
#
# test osd pool set/get quota
#
# create tmp pool
ceph osd pool create tmp-quota-pool 32
ceph osd pool application enable tmp-quota-pool rados
#
# set erroneous quotas
#
expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
#
# set valid quotas
#
ceph osd pool set-quota tmp-quota-pool max_bytes 10
ceph osd pool set-quota tmp-quota-pool max_objects 10M
#
# get quotas in json-pretty format
#
ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
grep '"quota_max_objects":.*10000000'
ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
grep '"quota_max_bytes":.*10'
#
# get quotas
#
ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 B'
ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10.*M objects'
#
# set valid quotas with unit prefix
#
ceph osd pool set-quota tmp-quota-pool max_bytes 10K
#
# get quotas
#
ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
#
# set valid quotas with unit prefix
#
ceph osd pool set-quota tmp-quota-pool max_bytes 10Ki
#
# get quotas
#
ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
#
#
# reset pool quotas
#
ceph osd pool set-quota tmp-quota-pool max_bytes 0
ceph osd pool set-quota tmp-quota-pool max_objects 0
#
# test N/A quotas
#
ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
#
# cleanup tmp pool
ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
}
function test_mon_pg()
{
# Make sure we start healthy.
wait_for_health_ok
ceph pg debug unfound_objects_exist
ceph pg debug degraded_pgs_exist
ceph pg deep-scrub 1.0
ceph pg dump
ceph pg dump pgs_brief --format=json
ceph pg dump pgs --format=json
ceph pg dump pools --format=json
ceph pg dump osds --format=json
ceph pg dump sum --format=json
ceph pg dump all --format=json
ceph pg dump pgs_brief osds --format=json
ceph pg dump pools osds pgs_brief --format=json
ceph pg dump_json
ceph pg dump_pools_json
ceph pg dump_stuck inactive
ceph pg dump_stuck unclean
ceph pg dump_stuck stale
ceph pg dump_stuck undersized
ceph pg dump_stuck degraded
ceph pg ls
ceph pg ls 1
ceph pg ls stale
expect_false ceph pg ls scrubq
ceph pg ls active stale repair recovering
ceph pg ls 1 active
ceph pg ls 1 active stale
ceph pg ls-by-primary osd.0
ceph pg ls-by-primary osd.0 1
ceph pg ls-by-primary osd.0 active
ceph pg ls-by-primary osd.0 active stale
ceph pg ls-by-primary osd.0 1 active stale
ceph pg ls-by-osd osd.0
ceph pg ls-by-osd osd.0 1
ceph pg ls-by-osd osd.0 active
ceph pg ls-by-osd osd.0 active stale
ceph pg ls-by-osd osd.0 1 active stale
ceph pg ls-by-pool rbd
ceph pg ls-by-pool rbd active stale
# can't test this...
# ceph pg force_create_pg
ceph pg getmap -o $TEMP_DIR/map.$$
[ -s $TEMP_DIR/map.$$ ]
ceph pg map 1.0 | grep acting
ceph pg repair 1.0
ceph pg scrub 1.0
ceph osd set-full-ratio .962
ceph osd dump | grep '^full_ratio 0.962'
ceph osd set-backfillfull-ratio .912
ceph osd dump | grep '^backfillfull_ratio 0.912'
ceph osd set-nearfull-ratio .892
ceph osd dump | grep '^nearfull_ratio 0.892'
# Check health status
ceph osd set-nearfull-ratio .913
ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
ceph health detail | grep OSD_OUT_OF_ORDER_FULL
ceph osd set-nearfull-ratio .892
ceph osd set-backfillfull-ratio .963
ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
ceph health detail | grep OSD_OUT_OF_ORDER_FULL
ceph osd set-backfillfull-ratio .912
# Check injected full results
$SUDO ceph tell osd.0 injectfull nearfull
wait_for_health "OSD_NEARFULL"
ceph health detail | grep "osd.0 is near full"
$SUDO ceph tell osd.0 injectfull none
wait_for_health_ok
$SUDO ceph tell osd.1 injectfull backfillfull
wait_for_health "OSD_BACKFILLFULL"
ceph health detail | grep "osd.1 is backfill full"
$SUDO ceph tell osd.1 injectfull none
wait_for_health_ok
$SUDO ceph tell osd.2 injectfull failsafe
# failsafe and full are the same as far as the monitor is concerned
wait_for_health "OSD_FULL"
ceph health detail | grep "osd.2 is full"
$SUDO ceph tell osd.2 injectfull none
wait_for_health_ok
$SUDO ceph tell osd.0 injectfull full
wait_for_health "OSD_FULL"
ceph health detail | grep "osd.0 is full"
$SUDO ceph tell osd.0 injectfull none
wait_for_health_ok
ceph pg stat | grep 'pgs:'
ceph pg 1.0 query
ceph tell 1.0 query
first=$(ceph mon dump -f json | jq -r '.mons[0].name')
ceph tell mon.$first quorum enter
ceph quorum_status
ceph report | grep osd_stats
ceph status
ceph -s
#
# tell osd version
#
ceph tell osd.0 version
expect_false ceph tell osd.9999 version
expect_false ceph tell osd.foo version
# back to pg stuff
ceph tell osd.0 dump_pg_recovery_stats | grep Started
ceph osd reweight 0 0.9
expect_false ceph osd reweight 0 -1
ceph osd reweight osd.0 1
ceph osd primary-affinity osd.0 .9
expect_false ceph osd primary-affinity osd.0 -2
expect_false ceph osd primary-affinity osd.9999 .5
ceph osd primary-affinity osd.0 1
ceph osd pool set rbd size 2
ceph osd pg-temp 1.0 0 1
ceph osd pg-temp 1.0 osd.1 osd.0
expect_false ceph osd pg-temp 1.0 0 1 2
expect_false ceph osd pg-temp asdf qwer
expect_false ceph osd pg-temp 1.0 asdf
ceph osd pg-temp 1.0 # cleanup pg-temp
ceph pg repeer 1.0
expect_false ceph pg repeer 0.0 # pool 0 shouldn't exist anymore
# don't test ceph osd primary-temp for now
}
function test_mon_osd_pool_set()
{
TEST_POOL_GETSET=pool_getset
expect_false ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio -0.3
expect_true ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio 1
ceph osd pool application enable $TEST_POOL_GETSET rados
ceph osd pool set $TEST_POOL_GETSET pg_autoscale_mode off
wait_for_clean
ceph osd pool get $TEST_POOL_GETSET all
for s in pg_num pgp_num size min_size crush_rule target_size_ratio; do
ceph osd pool get $TEST_POOL_GETSET $s
done
old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
(( new_size = old_size + 1 ))
ceph osd pool set $TEST_POOL_GETSET size $new_size --yes-i-really-mean-it
ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
ceph osd pool set $TEST_POOL_GETSET size $old_size --yes-i-really-mean-it
ceph osd pool create pool_erasure 1 1 erasure
ceph osd pool application enable pool_erasure rados
wait_for_clean
set +e
ceph osd pool set pool_erasure size 4444 2>$TMPFILE
check_response 'not change the size'
set -e
ceph osd pool get pool_erasure erasure_code_profile
ceph osd pool rm pool_erasure pool_erasure --yes-i-really-really-mean-it
for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub bulk; do
ceph osd pool set $TEST_POOL_GETSET $flag false
ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
ceph osd pool set $TEST_POOL_GETSET $flag true
ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
ceph osd pool set $TEST_POOL_GETSET $flag 1
ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
ceph osd pool set $TEST_POOL_GETSET $flag 0
ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
done
ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
ceph osd pool set $TEST_POOL_GETSET recovery_priority -5
ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: -5'
ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority -11
expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority 11
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio -3
expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio abc
expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 0.1
expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 1
ceph osd pool get $TEST_POOL_GETSET target_size_ratio | grep 'target_size_ratio: 1'
ceph osd pool set $TEST_POOL_GETSET nopgchange 1
expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
ceph osd pool set $TEST_POOL_GETSET nopgchange 0
ceph osd pool set $TEST_POOL_GETSET pg_num 10
wait_for_clean
ceph osd pool set $TEST_POOL_GETSET pgp_num 10
expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 0
expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 0
old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
wait_for_clean
ceph osd pool set $TEST_POOL_GETSET nosizechange 1
expect_false ceph osd pool set $TEST_POOL_GETSET size 2
expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
ceph osd pool set $TEST_POOL_GETSET nosizechange 0
ceph osd pool set $TEST_POOL_GETSET size 2
wait_for_clean
ceph osd pool set $TEST_POOL_GETSET min_size 2
expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
ceph osd pool get rbd crush_rule | grep 'crush_rule: '
ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
ceph osd pool set $TEST_POOL_GETSET compression_mode unset
ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
ceph osd pool set $TEST_POOL_GETSET csum_type unset
ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET $size 100
ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
ceph osd pool set $TEST_POOL_GETSET $size 0
ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
done
ceph osd pool set $TEST_POOL_GETSET nodelete 1
expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
ceph osd pool set $TEST_POOL_GETSET nodelete 0
ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
}
function test_mon_osd_tiered_pool_set()
{
# this is really a tier pool
ceph osd pool create real-tier 2
ceph osd tier add rbd real-tier
# expect us to be unable to set negative values for hit_set_*
for o in hit_set_period hit_set_count hit_set_fpp; do
expect_false ceph osd pool set real_tier $o -1
done
# and hit_set_fpp should be in range 0..1
expect_false ceph osd pool set real_tier hit_set_fpp 2
ceph osd pool set real-tier hit_set_type explicit_hash
ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
ceph osd pool set real-tier hit_set_type explicit_object
ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
ceph osd pool set real-tier hit_set_type bloom
ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
ceph osd pool set real-tier hit_set_period 123
ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
ceph osd pool set real-tier hit_set_count 12
ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
ceph osd pool set real-tier hit_set_fpp .01
ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
ceph osd pool set real-tier target_max_objects 123
ceph osd pool get real-tier target_max_objects | \
grep 'target_max_objects:[ \t]\+123'
ceph osd pool set real-tier target_max_bytes 123456
ceph osd pool get real-tier target_max_bytes | \
grep 'target_max_bytes:[ \t]\+123456'
ceph osd pool set real-tier cache_target_dirty_ratio .123
ceph osd pool get real-tier cache_target_dirty_ratio | \
grep 'cache_target_dirty_ratio:[ \t]\+0.123'
expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
ceph osd pool set real-tier cache_target_dirty_high_ratio .123
ceph osd pool get real-tier cache_target_dirty_high_ratio | \
grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
ceph osd pool set real-tier cache_target_full_ratio .123
ceph osd pool get real-tier cache_target_full_ratio | \
grep 'cache_target_full_ratio:[ \t]\+0.123'
ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
ceph osd pool set real-tier cache_target_full_ratio 1.0
ceph osd pool set real-tier cache_target_full_ratio 0
expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
ceph osd pool set real-tier cache_min_flush_age 123
ceph osd pool get real-tier cache_min_flush_age | \
grep 'cache_min_flush_age:[ \t]\+123'
ceph osd pool set real-tier cache_min_evict_age 234
ceph osd pool get real-tier cache_min_evict_age | \
grep 'cache_min_evict_age:[ \t]\+234'
# iec vs si units
ceph osd pool set real-tier target_max_objects 1K
ceph osd pool get real-tier target_max_objects | grep 1000
for o in target_max_bytes target_size_bytes compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
ceph osd pool set real-tier $o 1Ki # no i suffix
val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
[[ $val == 1024 ]]
ceph osd pool set real-tier $o 1M # with i suffix
val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
[[ $val == 1048576 ]]
done
# this is not a tier pool
ceph osd pool create fake-tier 2
ceph osd pool application enable fake-tier rados
wait_for_clean
expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
expect_false ceph osd pool get fake-tier hit_set_type
expect_false ceph osd pool set fake-tier hit_set_type explicit_object
expect_false ceph osd pool get fake-tier hit_set_type
expect_false ceph osd pool set fake-tier hit_set_type bloom
expect_false ceph osd pool get fake-tier hit_set_type
expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
expect_false ceph osd pool set fake-tier hit_set_period 123
expect_false ceph osd pool get fake-tier hit_set_period
expect_false ceph osd pool set fake-tier hit_set_count 12
expect_false ceph osd pool get fake-tier hit_set_count
expect_false ceph osd pool set fake-tier hit_set_fpp .01
expect_false ceph osd pool get fake-tier hit_set_fpp
expect_false ceph osd pool set fake-tier target_max_objects 123
expect_false ceph osd pool get fake-tier target_max_objects
expect_false ceph osd pool set fake-tier target_max_bytes 123456
expect_false ceph osd pool get fake-tier target_max_bytes
expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
expect_false ceph osd pool get fake-tier cache_target_full_ratio
expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
expect_false ceph osd pool set fake-tier cache_min_flush_age 123
expect_false ceph osd pool get fake-tier cache_min_flush_age
expect_false ceph osd pool set fake-tier cache_min_evict_age 234
expect_false ceph osd pool get fake-tier cache_min_evict_age
ceph osd tier remove rbd real-tier
ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
}
function test_mon_osd_erasure_code()
{
ceph osd erasure-code-profile set fooprofile a=b c=d
ceph osd erasure-code-profile set fooprofile a=b c=d
expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
ceph osd erasure-code-profile set fooprofile a=b c=d e=f
expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
# make sure rule-foo doesn't work anymore
expect_false ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host
ceph osd erasure-code-profile set barprofile crush-failure-domain=host
# clean up
ceph osd erasure-code-profile rm fooprofile
ceph osd erasure-code-profile rm barprofile
# try weird k and m values
expect_false ceph osd erasure-code-profile set badk k=1 m=1
expect_false ceph osd erasure-code-profile set badk k=1 m=2
expect_false ceph osd erasure-code-profile set badk k=0 m=2
expect_false ceph osd erasure-code-profile set badk k=-1 m=2
expect_false ceph osd erasure-code-profile set badm k=2 m=0
expect_false ceph osd erasure-code-profile set badm k=2 m=-1
ceph osd erasure-code-profile set good k=2 m=1
ceph osd erasure-code-profile rm good
}
function test_mon_osd_misc()
{
set +e
# expect error about missing 'pool' argument
ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
# expect error about unused argument foo
ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
# expect "not in range" for invalid overload percentage
ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
set -e
local old_bytes_per_osd=$(ceph config get mgr mon_reweight_min_bytes_per_osd)
local old_pgs_per_osd=$(ceph config get mgr mon_reweight_min_pgs_per_osd)
# otherwise ceph-mgr complains like:
# Error EDOM: Refusing to reweight: we only have 5372 kb used across all osds!
# Error EDOM: Refusing to reweight: we only have 20 PGs across 3 osds!
ceph config set mgr mon_reweight_min_bytes_per_osd 0
ceph config set mgr mon_reweight_min_pgs_per_osd 0
ceph osd reweight-by-utilization 110
ceph osd reweight-by-utilization 110 .5
expect_false ceph osd reweight-by-utilization 110 0
expect_false ceph osd reweight-by-utilization 110 -0.1
ceph osd test-reweight-by-utilization 110 .5 --no-increasing
ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
ceph osd reweight-by-pg 110
ceph osd test-reweight-by-pg 110 .5
ceph osd reweight-by-pg 110 rbd
ceph osd reweight-by-pg 110 .5 rbd
expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
# restore the setting
ceph config set mgr mon_reweight_min_bytes_per_osd $old_bytes_per_osd
ceph config set mgr mon_reweight_min_pgs_per_osd $old_pgs_per_osd
}
function test_admin_heap_profiler()
{
do_test=1
set +e
# expect 'heap' commands to be correctly parsed
ceph tell osd.0 heap stats 2>$TMPFILE
if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
echo "tcmalloc not enabled; skip heap profiler test"
do_test=0
fi
set -e
[[ $do_test -eq 0 ]] && return 0
$SUDO ceph tell osd.0 heap start_profiler
$SUDO ceph tell osd.0 heap dump
$SUDO ceph tell osd.0 heap stop_profiler
$SUDO ceph tell osd.0 heap release
}
function test_osd_bench()
{
# test osd bench limits
# As we should not rely on defaults (as they may change over time),
# lets inject some values and perform some simple tests
# max iops: 10 # 100 IOPS
# max throughput: 10485760 # 10MB/s
# max block size: 2097152 # 2MB
# duration: 10 # 10 seconds
local args="\
--osd-bench-duration 10 \
--osd-bench-max-block-size 2097152 \
--osd-bench-large-size-max-throughput 10485760 \
--osd-bench-small-size-max-iops 10"
ceph tell osd.0 injectargs ${args## }
# anything with a bs larger than 2097152 must fail
expect_false ceph tell osd.0 bench 1 2097153
# but using 'osd_bench_max_bs' must succeed
ceph tell osd.0 bench 1 2097152
# we assume 1MB as a large bs; anything lower is a small bs
# for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
# max count: 409600 (bytes)
# more than max count must not be allowed
expect_false ceph tell osd.0 bench 409601 4096
# but 409600 must be succeed
ceph tell osd.0 bench 409600 4096
# for a large bs, we are limited by throughput.
# for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
# the max count will be (10MB * 10s) = 100MB
# max count: 104857600 (bytes)
# more than max count must not be allowed
expect_false ceph tell osd.0 bench 104857601 2097152
# up to max count must be allowed
ceph tell osd.0 bench 104857600 2097152
}
function test_osd_negative_filestore_merge_threshold()
{
$SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
expect_config_value "osd.0" "filestore_merge_threshold" -1
}
function test_mon_tell()
{
for m in mon.a mon.b; do
ceph tell $m sessions
ceph_watch_start debug audit
ceph tell mon.a sessions
ceph_watch_wait "${m} \[DBG\] from.*cmd='sessions' args=\[\]: dispatch"
done
expect_false ceph tell mon.foo version
}
function test_mon_ping()
{
ceph ping mon.a
ceph ping mon.b
expect_false ceph ping mon.foo
ceph ping mon.\*
}
function test_mon_deprecated_commands()
{
# current DEPRECATED commands are marked with FLAG(DEPRECATED)
#
# Testing should be accomplished by setting
# 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
# each one of these commands.
ceph tell mon.* injectargs '--mon-debug-deprecated-as-obsolete'
expect_false ceph config-key list 2> $TMPFILE
check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
ceph tell mon.* injectargs '--no-mon-debug-deprecated-as-obsolete'
}
function test_mon_cephdf_commands()
{
# ceph df detail:
# pool section:
# RAW USED The near raw used per pool in raw total
ceph osd pool create cephdf_for_test 1 1 replicated
ceph osd pool application enable cephdf_for_test rados
ceph osd pool set cephdf_for_test size 2
dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
#wait for update
for i in `seq 1 10`; do
rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
sleep 1
done
# "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
# to sync mon with osd
flush_pg_stats
local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
stored=`ceph df detail --format=json | jq "$jq_filter.stored * 2"`
stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
rm ./cephdf_for_test
expect_false test $stored != $stored_raw
}
function test_mon_pool_application()
{
ceph osd pool create app_for_test 16
ceph osd pool application enable app_for_test rbd
expect_false ceph osd pool application enable app_for_test rgw
ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
ceph osd pool ls detail | grep "application rbd,rgw"
ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
expect_false ceph osd pool application set app_for_test cephfs key value
ceph osd pool application set app_for_test rbd key1 value1
ceph osd pool application set app_for_test rbd key2 value2
ceph osd pool application set app_for_test rgw key1 value1
ceph osd pool application get app_for_test rbd key1 | grep 'value1'
ceph osd pool application get app_for_test rbd key2 | grep 'value2'
ceph osd pool application get app_for_test rgw key1 | grep 'value1'
ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
ceph osd pool application rm app_for_test rgw key1
ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
ceph osd pool application rm app_for_test rbd key2
ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
ceph osd pool application rm app_for_test rbd key1
ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
ceph osd pool application rm app_for_test rbd key1 # should be idempotent
expect_false ceph osd pool application disable app_for_test rgw
ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
ceph osd pool ls detail | grep "application rbd"
ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
ceph osd pool ls detail | grep -v "application "
ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
}
function test_mon_tell_help_command()
{
ceph tell mon.a help | grep sync_force
ceph tell mon.a -h | grep sync_force
ceph tell mon.a config -h | grep 'config diff get'
# wrong target
expect_false ceph tell mon.zzz help
}
function test_mon_stdin_stdout()
{
echo foo | ceph config-key set test_key -i -
ceph config-key get test_key -o - | grep -c foo | grep -q 1
}
function test_osd_tell_help_command()
{
ceph tell osd.1 help
expect_false ceph tell osd.100 help
}
function test_osd_compact()
{
ceph tell osd.1 compact
$SUDO ceph daemon osd.1 compact
}
function test_mds_tell_help_command()
{
local FS_NAME=cephfs
if ! mds_exists ; then
echo "Skipping test, no MDS found"
return
fi
remove_all_fs
ceph osd pool create fs_data 16
ceph osd pool create fs_metadata 16
ceph fs new $FS_NAME fs_metadata fs_data
wait_mds_active $FS_NAME
ceph tell mds.a help
expect_false ceph tell mds.z help
remove_all_fs
ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
}
function test_mgr_tell()
{
ceph tell mgr version
}
function test_mgr_devices()
{
ceph device ls
expect_false ceph device info doesnotexist
expect_false ceph device get-health-metrics doesnotexist
}
function test_per_pool_scrub_status()
{
ceph osd pool create noscrub_pool 16
ceph osd pool create noscrub_pool2 16
ceph -s | expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
ceph -s --format json | \
jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail |
expect_false grep -q "Pool .* has .*scrub.* flag"
ceph health detail | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
expect_false grep -q "Pool .* has .*scrub.* flag"
ceph osd pool set noscrub_pool noscrub 1
ceph -s | expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
ceph -s --format json | \
jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
expect_true grep -q "Pool noscrub_pool has noscrub flag"
ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
ceph osd pool set noscrub_pool nodeep-scrub 1
ceph osd pool set noscrub_pool2 nodeep-scrub 1
ceph -s | expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
ceph -s --format json | \
jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
expect_true grep -q "Pool noscrub_pool has noscrub flag"
ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
ceph health detail | expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
ceph health detail | expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
ceph osd pool rm noscrub_pool noscrub_pool --yes-i-really-really-mean-it
ceph osd pool rm noscrub_pool2 noscrub_pool2 --yes-i-really-really-mean-it
}
#
# New tests should be added to the TESTS array below
#
# Individual tests may be run using the '-t <testname>' argument
# The user can specify '-t <testname>' as many times as she wants
#
# Tests will be run in order presented in the TESTS array, or in
# the order specified by the '-t <testname>' options.
#
# '-l' will list all the available test names
# '-h' will show usage
#
# The test maintains backward compatibility: not specifying arguments
# will run all tests following the order they appear in the TESTS array.
#
set +x
MON_TESTS+=" mon_injectargs"
MON_TESTS+=" mon_injectargs_SI"
for i in `seq 9`; do
MON_TESTS+=" tiering_$i";
done
MON_TESTS+=" auth"
MON_TESTS+=" auth_profiles"
MON_TESTS+=" mon_misc"
MON_TESTS+=" mon_mon"
MON_TESTS+=" mon_osd"
MON_TESTS+=" mon_config_key"
MON_TESTS+=" mon_crush"
MON_TESTS+=" mon_osd_create_destroy"
MON_TESTS+=" mon_osd_pool"
MON_TESTS+=" mon_osd_pool_quota"
MON_TESTS+=" mon_pg"
MON_TESTS+=" mon_osd_pool_set"
MON_TESTS+=" mon_osd_tiered_pool_set"
MON_TESTS+=" mon_osd_erasure_code"
MON_TESTS+=" mon_osd_misc"
MON_TESTS+=" mon_tell"
MON_TESTS+=" mon_ping"
MON_TESTS+=" mon_deprecated_commands"
MON_TESTS+=" mon_caps"
MON_TESTS+=" mon_cephdf_commands"
MON_TESTS+=" mon_tell_help_command"
MON_TESTS+=" mon_stdin_stdout"
OSD_TESTS+=" osd_bench"
OSD_TESTS+=" osd_negative_filestore_merge_threshold"
OSD_TESTS+=" tiering_agent"
OSD_TESTS+=" admin_heap_profiler"
OSD_TESTS+=" osd_tell_help_command"
OSD_TESTS+=" osd_compact"
OSD_TESTS+=" per_pool_scrub_status"
MDS_TESTS+=" mds_tell"
MDS_TESTS+=" mon_mds"
MDS_TESTS+=" mon_mds_metadata"
MDS_TESTS+=" mds_tell_help_command"
MGR_TESTS+=" mgr_tell"
MGR_TESTS+=" mgr_devices"
TESTS+=$MON_TESTS
TESTS+=$OSD_TESTS
TESTS+=$MDS_TESTS
TESTS+=$MGR_TESTS
#
# "main" follows
#
function list_tests()
{
echo "AVAILABLE TESTS"
for i in $TESTS; do
echo " $i"
done
}
function usage()
{
echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
}
tests_to_run=()
sanity_check=true
while [[ $# -gt 0 ]]; do
opt=$1
case "$opt" in
"-l" )
do_list=1
;;
"--asok-does-not-need-root" )
SUDO=""
;;
"--no-sanity-check" )
sanity_check=false
;;
"--test-mon" )
tests_to_run+="$MON_TESTS"
;;
"--test-osd" )
tests_to_run+="$OSD_TESTS"
;;
"--test-mds" )
tests_to_run+="$MDS_TESTS"
;;
"--test-mgr" )
tests_to_run+="$MGR_TESTS"
;;
"-t" )
shift
if [[ -z "$1" ]]; then
echo "missing argument to '-t'"
usage ;
exit 1
fi
tests_to_run+=" $1"
;;
"-h" )
usage ;
exit 0
;;
esac
shift
done
if [[ $do_list -eq 1 ]]; then
list_tests ;
exit 0
fi
ceph osd pool create rbd 16
if test -z "$tests_to_run" ; then
tests_to_run="$TESTS"
fi
if $sanity_check ; then
wait_no_osd_down
fi
for i in $tests_to_run; do
if $sanity_check ; then
check_no_osd_down
fi
set -x
test_${i}
set +x
done
if $sanity_check ; then
check_no_osd_down
fi
set -x
echo OK
| 106,096 | 34.460227 | 173 | sh |
null | ceph-main/qa/workunits/cephtool/test_daemon.sh | #!/usr/bin/env bash
set -ex
expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
echo note: assuming mon.a is on the current host
# can set to 'sudo ./ceph' to execute tests from current dir for development
CEPH=${CEPH:-'sudo ceph'}
${CEPH} daemon mon.a version | grep version
# get debug_ms setting and strip it, painfully for reuse
old_ms=$(${CEPH} daemon mon.a config get debug_ms | \
grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g')
${CEPH} daemon mon.a config set debug_ms 13
new_ms=$(${CEPH} daemon mon.a config get debug_ms | \
grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g')
[ "$new_ms" = "13/13" ]
${CEPH} daemon mon.a config set debug_ms $old_ms
new_ms=$(${CEPH} daemon mon.a config get debug_ms | \
grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g')
[ "$new_ms" = "$old_ms" ]
# unregistered/non-existent command
expect_false ${CEPH} daemon mon.a bogus_command_blah foo
set +e
OUTPUT=$(${CEPH} -c /not/a/ceph.conf daemon mon.a help 2>&1)
# look for EINVAL
if [ $? != 22 ] ; then exit 1; fi
if ! echo "$OUTPUT" | grep -q '.*open.*/not/a/ceph.conf'; then
echo "didn't find expected error in bad conf search"
exit 1
fi
set -e
echo OK
| 1,184 | 25.931818 | 76 | sh |
null | ceph-main/qa/workunits/cephtool/test_kvstore_tool.sh | #!/usr/bin/env bash
set -x
source $(dirname $0)/../../standalone/ceph-helpers.sh
set -e
set -o functrace
PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
SUDO=${SUDO:-sudo}
export CEPH_DEV=1
echo note: test ceph_kvstore_tool with bluestore
expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
TEMP_DIR=$(mktemp -d ./cephtool.XXX)
trap "rm -fr $TEMP_DIR" 0
TEMP_FILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
function test_ceph_kvstore_tool()
{
# create a data directory
ceph-objectstore-tool --data-path ${TEMP_DIR} --op mkfs --no-mon-config
# list
origin_kv_nums=`ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list 2>/dev/null | wc -l`
# exists
prefix=`ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list 2>/dev/null | head -n 1 | awk '{print $1}'`
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists ${prefix}
expect_false ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists ${prefix}notexist
# list-crc
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list-crc
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list-crc ${prefix}
# list with prefix
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list ${prefix}
# set
echo "helloworld" >> ${TEMP_FILE}
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} set TESTPREFIX TESTKEY in ${TEMP_FILE}
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists TESTPREFIX TESTKEY
# get
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} get TESTPREFIX TESTKEY out ${TEMP_FILE}.bak
diff ${TEMP_FILE} ${TEMP_FILE}.bak
# rm
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} rm TESTPREFIX TESTKEY
expect_false ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists TESTPREFIX TESTKEY
# compact
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} compact
# destructive-repair
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} destructive-repair
current_kv_nums=`ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list 2>/dev/null | wc -l`
test ${origin_kv_nums} -eq ${current_kv_nums}
}
test_ceph_kvstore_tool
echo OK
| 1,976 | 26.458333 | 101 | sh |
null | ceph-main/qa/workunits/client/test.sh | #!/bin/sh
set -ex
ceph_test_client
| 37 | 5.333333 | 16 | sh |
null | ceph-main/qa/workunits/cls/test_cls_2pc_queue.sh | #!/bin/sh -e
ceph_test_cls_2pc_queue
exit 0
| 46 | 6.833333 | 23 | sh |
null | ceph-main/qa/workunits/cls/test_cls_cas.sh | #!/bin/sh -e
GTEST_FILTER=${CLS_CAS_GTEST_FILTER:-*}
ceph_test_cls_cas --gtest_filter=${GTEST_FILTER}
exit 0
| 111 | 15 | 48 | sh |
null | ceph-main/qa/workunits/cls/test_cls_cmpomap.sh | #!/bin/sh -e
ceph_test_cls_cmpomap
exit 0
| 44 | 6.5 | 21 | sh |
null | ceph-main/qa/workunits/cls/test_cls_hello.sh | #!/bin/sh -e
ceph_test_cls_hello
exit 0
| 42 | 6.166667 | 19 | sh |
null | ceph-main/qa/workunits/cls/test_cls_journal.sh | #!/bin/sh -e
GTEST_FILTER=${CLS_JOURNAL_GTEST_FILTER:-*}
ceph_test_cls_journal --gtest_filter=${GTEST_FILTER}
exit 0
| 119 | 16.142857 | 52 | sh |
null | ceph-main/qa/workunits/cls/test_cls_lock.sh | #!/bin/sh -e
ceph_test_cls_lock
exit 0
| 41 | 6 | 18 | sh |
null | ceph-main/qa/workunits/cls/test_cls_log.sh | #!/bin/sh -e
ceph_test_cls_log
exit 0
| 40 | 5.833333 | 17 | sh |
null | ceph-main/qa/workunits/cls/test_cls_numops.sh | #!/bin/sh -e
ceph_test_cls_numops
exit 0
| 43 | 6.333333 | 20 | sh |
null | ceph-main/qa/workunits/cls/test_cls_rbd.sh | #!/bin/sh -e
GTEST_FILTER=${CLS_RBD_GTEST_FILTER:-*}
ceph_test_cls_rbd --gtest_filter=${GTEST_FILTER}
exit 0
| 111 | 15 | 48 | sh |
null | ceph-main/qa/workunits/cls/test_cls_refcount.sh | #!/bin/sh -e
ceph_test_cls_refcount
exit 0
| 45 | 6.666667 | 22 | sh |
null | ceph-main/qa/workunits/cls/test_cls_rgw.sh | #!/bin/sh -e
ceph_test_cls_rgw
#ceph_test_cls_rgw_meta
#ceph_test_cls_rgw_log
#ceph_test_cls_rgw_opstate
exit 0
| 114 | 11.777778 | 26 | sh |
null | ceph-main/qa/workunits/cls/test_cls_rgw_gc.sh | #!/bin/sh -e
ceph_test_cls_rgw_gc
exit 0
| 43 | 6.333333 | 20 | sh |
null | ceph-main/qa/workunits/cls/test_cls_rgw_stats.sh | #!/bin/sh -e
ceph_test_cls_rgw_stats
exit 0
| 46 | 6.833333 | 23 | sh |
null | ceph-main/qa/workunits/cls/test_cls_sdk.sh | #!/bin/sh -e
ceph_test_cls_sdk
exit 0
| 40 | 5.833333 | 17 | sh |
null | ceph-main/qa/workunits/direct_io/big.sh | #!/bin/sh -ex
echo "test large (16MB) dio write"
dd if=/dev/zero of=foo.big bs=16M count=1 oflag=direct
echo OK
| 114 | 15.428571 | 54 | sh |
null | ceph-main/qa/workunits/direct_io/misc.sh | #!/bin/sh -ex
# a few test cases from henry
echo "test read from hole"
dd if=/dev/zero of=dd3 bs=1 seek=1048576 count=0
dd if=dd3 of=/tmp/ddout1 skip=8 bs=512 count=2 iflag=direct
dd if=/dev/zero of=/tmp/dd3 bs=512 count=2
cmp /tmp/dd3 /tmp/ddout1
echo "other thing"
dd if=/dev/urandom of=/tmp/dd10 bs=500 count=1
dd if=/tmp/dd10 of=dd10 bs=512 seek=8388 count=1
dd if=dd10 of=/tmp/dd10out bs=512 skip=8388 count=1 iflag=direct
cmp /tmp/dd10 /tmp/dd10out
echo OK
| 466 | 26.470588 | 64 | sh |
null | ceph-main/qa/workunits/erasure-code/bench.html | <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd" >
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Erasure Code Plugins Benchmarks</title>
<link href="examples.css" rel="stylesheet" type="text/css">
<script language="javascript" type="text/javascript" src="jquery.js"></script>
<script language="javascript" type="text/javascript" src="jquery.flot.js"></script>
<script language="javascript" type="text/javascript" src="jquery.flot.categories.js"></script>
<script language="javascript" type="text/javascript" src="bench.js"></script>
<script language="javascript" type="text/javascript" src="plot.js"></script>
</head>
<body>
<div id="header">
<h2>Erasure Code Plugins Benchmarks</h2>
</div>
<div id="content">
<div class="demo-container">
<div id="encode" class="demo-placeholder"></div>
</div>
<p>encode: Y = GB/s, X = K/M</p>
<div class="demo-container">
<div id="decode" class="demo-placeholder"></div>
</div>
<p>decode: Y = GB/s, X = K/M/erasures</p>
</div>
</body>
</html>
| 1,168 | 32.4 | 98 | html |
null | ceph-main/qa/workunits/erasure-code/bench.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 Red Hat <[email protected]>
# Copyright (C) 2013,2014 Cloudwatt <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
# Test that it works from sources with:
#
# CEPH_ERASURE_CODE_BENCHMARK=src/ceph_erasure_code_benchmark \
# PLUGIN_DIRECTORY=build/lib \
# qa/workunits/erasure-code/bench.sh fplot jerasure |
# tee qa/workunits/erasure-code/bench.js
#
# This should start immediately and display:
#
# ...
# [ '2/1', .48035538612887358583 ],
# [ '3/2', .21648470405675016626 ],
# etc.
#
# and complete within a few seconds. The result can then be displayed with:
#
# firefox qa/workunits/erasure-code/bench.html
#
# Once it is confirmed to work, it can be run with a more significant
# volume of data so that the measures are more reliable:
#
# TOTAL_SIZE=$((4 * 1024 * 1024 * 1024)) \
# CEPH_ERASURE_CODE_BENCHMARK=src/ceph_erasure_code_benchmark \
# PLUGIN_DIRECTORY=build/lib \
# qa/workunits/erasure-code/bench.sh fplot jerasure |
# tee qa/workunits/erasure-code/bench.js
#
set -e
export PATH=/sbin:$PATH
: ${VERBOSE:=false}
: ${CEPH_ERASURE_CODE_BENCHMARK:=ceph_erasure_code_benchmark}
: ${PLUGIN_DIRECTORY:=/usr/lib/ceph/erasure-code}
: ${PLUGINS:=isa jerasure}
: ${TECHNIQUES:=vandermonde cauchy}
: ${TOTAL_SIZE:=$((1024 * 1024))}
: ${SIZE:=4096}
: ${PARAMETERS:=--parameter jerasure-per-chunk-alignment=true}
function bench_header() {
echo -e "seconds\tKB\tplugin\tk\tm\twork.\titer.\tsize\teras.\tcommand."
}
function bench() {
local plugin=$1
shift
local k=$1
shift
local m=$1
shift
local workload=$1
shift
local iterations=$1
shift
local size=$1
shift
local erasures=$1
shift
command=$(echo $CEPH_ERASURE_CODE_BENCHMARK \
--plugin $plugin \
--workload $workload \
--iterations $iterations \
--size $size \
--erasures $erasures \
--parameter k=$k \
--parameter m=$m \
--erasure-code-dir $PLUGIN_DIRECTORY)
result=$($command "$@")
echo -e "$result\t$plugin\t$k\t$m\t$workload\t$iterations\t$size\t$erasures\t$command ""$@"
}
function packetsize() {
local k=$1
local w=$2
local vector_wordsize=$3
local size=$4
local p=$(( ($size / $k / $w / $vector_wordsize ) * $vector_wordsize))
if [ $p -gt 3100 ] ; then
p=3100
fi
echo $p
}
function bench_run() {
local plugin=jerasure
local w=8
local VECTOR_WORDSIZE=16
local ks="2 3 4 6 10"
declare -A k2ms
k2ms[2]="1"
k2ms[3]="2"
k2ms[4]="2 3"
k2ms[6]="2 3 4"
k2ms[10]="3 4"
local isa2technique_vandermonde='reed_sol_van'
local isa2technique_cauchy='cauchy'
local jerasure2technique_vandermonde='reed_sol_van'
local jerasure2technique_cauchy='cauchy_good'
for technique in ${TECHNIQUES} ; do
for plugin in ${PLUGINS} ; do
eval technique_parameter=\$${plugin}2technique_${technique}
echo "serie encode_${technique}_${plugin}"
for k in $ks ; do
for m in ${k2ms[$k]} ; do
bench $plugin $k $m encode $(($TOTAL_SIZE / $SIZE)) $SIZE 0 \
--parameter packetsize=$(packetsize $k $w $VECTOR_WORDSIZE $SIZE) \
${PARAMETERS} \
--parameter technique=$technique_parameter
done
done
done
done
for technique in ${TECHNIQUES} ; do
for plugin in ${PLUGINS} ; do
eval technique_parameter=\$${plugin}2technique_${technique}
echo "serie decode_${technique}_${plugin}"
for k in $ks ; do
for m in ${k2ms[$k]} ; do
echo
for erasures in $(seq 1 $m) ; do
bench $plugin $k $m decode $(($TOTAL_SIZE / $SIZE)) $SIZE $erasures \
--parameter packetsize=$(packetsize $k $w $VECTOR_WORDSIZE $SIZE) \
${PARAMETERS} \
--parameter technique=$technique_parameter
done
done
done
done
done
}
function fplot() {
local serie
bench_run | while read seconds total plugin k m workload iteration size erasures rest ; do
if [ -z $seconds ] ; then
echo null,
elif [ $seconds = serie ] ; then
if [ "$serie" ] ; then
echo '];'
fi
local serie=`echo $total | sed 's/cauchy_\([0-9]\)/cauchy_good_\1/g'`
echo "var $serie = ["
else
local x
if [ $workload = encode ] ; then
x=$k/$m
else
x=$k/$m/$erasures
fi
echo "[ '$x', " $(echo "( $total / 1024 / 1024 ) / $seconds" | bc -ql) " ], "
fi
done
echo '];'
}
function main() {
bench_header
bench_run
}
if [ "$1" = fplot ] ; then
"$@"
else
main
fi
# Local Variables:
# compile-command: "\
# CEPH_ERASURE_CODE_BENCHMARK=../../../src/ceph_erasure_code_benchmark \
# PLUGIN_DIRECTORY=../../../build/lib \
# ./bench.sh
# "
# End:
| 5,694 | 28.507772 | 96 | sh |
null | ceph-main/qa/workunits/erasure-code/encode-decode-non-regression.sh | #!/usr/bin/env bash
#
# Copyright (C) 2014 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
set -ex
: ${CORPUS:=https://github.com/ceph/ceph-erasure-code-corpus.git}
: ${DIRECTORY:=$CEPH_ROOT/ceph-erasure-code-corpus}
# when running from sources, the current directory must have precedence
export PATH=:$PATH
if ! test -d $DIRECTORY ; then
git clone $CORPUS $DIRECTORY
fi
my_version=v$(ceph --version | cut -f3 -d ' ')
all_versions=$((ls -d $DIRECTORY/v* ; echo $DIRECTORY/$my_version ) | sort)
for version in $all_versions ; do
if test -d $version ; then
$version/non-regression.sh
fi
if test $version = $DIRECTORY/$my_version ; then
break
fi
done
| 1,196 | 28.195122 | 75 | sh |
null | ceph-main/qa/workunits/fs/cephfs_mirror_ha_gen.sh | #!/bin/bash -ex
#
# cephfs_mirror_ha_gen.sh - generate workload to synchronize
#
. $(dirname $0)/cephfs_mirror_helpers.sh
cleanup()
{
for i in `seq 1 $NR_DIRECTORIES`
do
local repo_name="${REPO_PATH_PFX}_$i"
for j in `seq 1 $NR_SNAPSHOTS`
do
snap_name=$repo_name/.snap/snap_$j
if test -d $snap_name; then
rmdir $snap_name
fi
done
done
exit 1
}
trap cleanup EXIT
configure_peer()
{
ceph mgr module enable mirroring
ceph fs snapshot mirror enable $PRIMARY_FS
ceph fs snapshot mirror peer_add $PRIMARY_FS client.mirror_remote@ceph $BACKUP_FS
for i in `seq 1 $NR_DIRECTORIES`
do
local repo_name="${REPO_PATH_PFX}_$i"
ceph fs snapshot mirror add $PRIMARY_FS "$MIRROR_SUBDIR/$repo_name"
done
}
create_snaps()
{
for i in `seq 1 $NR_DIRECTORIES`
do
local repo_name="${REPO_PATH_PFX}_$i"
for j in `seq 1 $NR_SNAPSHOTS`
do
snap_name=$repo_name/.snap/snap_$j
r=$(( $RANDOM % 100 + 5 ))
arr=($repo_name "reset" "--hard" "HEAD~$r")
exec_git_cmd "${arr[@]}"
mkdir $snap_name
store_checksum $snap_name
done
done
}
unset CEPH_CLI_TEST_DUP_COMMAND
echo "running generator on prmary file system..."
# setup git repos to be used as data set
setup_repos
# turn on mirroring, add peers...
configure_peer
# snapshots on primary
create_snaps
# do not cleanup when exiting on success..
trap - EXIT
| 1,541 | 21.028571 | 85 | sh |
null | ceph-main/qa/workunits/fs/cephfs_mirror_ha_verify.sh | #!/bin/bash -ex
#
# cephfs_mirror_ha_verify.sh - verify synchronized snapshots
#
. $(dirname $0)/cephfs_mirror_helpers.sh
echo "running verifier on secondary file system..."
for i in `seq 1 $NR_DIRECTORIES`
do
repo_name="${REPO_PATH_PFX}_$i"
for j in `seq 1 $NR_SNAPSHOTS`
do
for s in 1 1 2 4 4 4 4 4 8 8 8 8 16 16 32 64 64 128 128
do
sleep $s
snap_name=$repo_name/.snap/snap_$j
if test -d $repo_name; then
echo "checking snapshot [$snap_name] in $repo_name"
if test -d $snap_name; then
echo "generating hash for $snap_name"
cksum=''
calc_checksum $snap_name cksum
ret=$(compare_checksum $cksum $snap_name)
if [ $ret -ne 0 ]; then
echo "checksum failed $snap_name ($cksum)"
return $ret
else
echo "checksum matched $snap_name ($cksum)"
break
fi
fi
fi
done
echo "couldn't complete verification for: $snap_name"
done
done
echo "verify done!"
| 1,223 | 28.853659 | 67 | sh |
null | ceph-main/qa/workunits/fs/cephfs_mirror_helpers.sh | PRIMARY_FS='dc'
BACKUP_FS='dc-backup'
REPO=ceph-qa-suite
REPO_DIR=ceph_repo
REPO_PATH_PFX="$REPO_DIR/$REPO"
NR_DIRECTORIES=4
NR_SNAPSHOTS=4
MIRROR_SUBDIR='/mirror'
calc_checksum()
{
local path=$1
local -n ref=$2
ref=`find -L $path -type f -exec md5sum {} + | awk '{ print $1 }' | md5sum | awk '{ print $1 }'`
}
store_checksum()
{
local path=$1
local cksum='' #something invalid
local fhash=`echo -n $path | md5sum | awk '{ print $1 }'`
calc_checksum $path cksum
echo -n $cksum > "/tmp/primary-$fhash"
}
compare_checksum()
{
local ret=0
local cksum=$1
local path=$2
local fhash=`echo -n $path | md5sum | awk '{ print $1 }'`
local cksum_ondisk=`cat /tmp/primary-$fhash`
if [ $cksum != $cksum_ondisk ]; then
echo "$cksum <> $cksum_ondisk"
ret=1
fi
echo $ret
}
exec_git_cmd()
{
local arg=("$@")
local repo_name=${arg[0]}
local cmd=${arg[@]:1}
git --git-dir "$repo_name/.git" $cmd
}
clone_repo()
{
local repo_name=$1
git clone --branch giant "http://github.com/ceph/$REPO" $repo_name
}
setup_repos()
{
mkdir "$REPO_DIR"
for i in `seq 1 $NR_DIRECTORIES`
do
local repo_name="${REPO_PATH_PFX}_$i"
mkdir $repo_name
clone_repo $repo_name
done
}
| 1,289 | 18.253731 | 101 | sh |
null | ceph-main/qa/workunits/fs/fscrypt.sh | #!/usr/bin/env bash
set -xe
mydir=`dirname $0`
if [ $# -ne 2 ]
then
echo "2 parameters are required!\n"
echo "Usage:"
echo " fscrypt.sh <type> <testdir>"
echo " type: should be any of 'none', 'unlocked' or 'locked'"
echo " testdir: the test direcotry name"
exit 1
fi
fscrypt=$1
testcase=$2
testdir=fscrypt_test_${fscrypt}_${testcase}
mkdir $testdir
XFSPROGS_DIR='xfprogs-dev-dir'
XFSTESTS_DIR='xfstest-dev-dir'
export XFS_IO_PROG="$(type -P xfs_io)"
# Setup the xfstests env
setup_xfstests_env()
{
git clone https://git.ceph.com/xfstests-dev.git $XFSTESTS_DIR --depth 1
pushd $XFSTESTS_DIR
. common/encrypt
popd
}
install_deps()
{
local system_value=$(sudo lsb_release -is | awk '{print tolower($0)}')
case $system_value in
"centos" | "centosstream" | "fedora")
sudo yum install -y inih-devel userspace-rcu-devel \
libblkid-devel gettext libedit-devel \
libattr-devel device-mapper-devel libicu-devel
;;
"ubuntu" | "debian")
sudo apt-get install -y libinih-dev liburcu-dev \
libblkid-dev gettext libedit-dev libattr1-dev \
libdevmapper-dev libicu-dev pkg-config
;;
*)
echo "Unsupported distro $system_value"
exit 1
;;
esac
}
# Install xfsprogs-dev from source to support "add_enckey" for xfs_io
install_xfsprogs()
{
local install_xfsprogs=0
xfs_io -c "help add_enckey" | grep -q 'not found' && install_xfsprogs=1
if [ $install_xfsprogs -eq 1 ]; then
install_deps
git clone https://git.ceph.com/xfsprogs-dev.git $XFSPROGS_DIR --depth 1
pushd $XFSPROGS_DIR
make
sudo make install
popd
fi
}
clean_up()
{
rm -rf $XFSPROGS_DIR
rm -rf $XFSTESTS_DIR
rm -rf $testdir
}
# For now will test the V2 encryption policy only as the
# V1 encryption policy is deprecated
install_xfsprogs
setup_xfstests_env
# Generate a fixed keying identifier
raw_key=$(_generate_raw_encryption_key)
keyid=$(_add_enckey $testdir "$raw_key" | awk '{print $NF}')
case ${fscrypt} in
"none")
# do nothing for the test directory and will test it
# as one non-encrypted directory.
pushd $testdir
${mydir}/../suites/${testcase}.sh
popd
clean_up
;;
"unlocked")
# set encrypt policy with the key provided and then
# the test directory will be encrypted & unlocked
_set_encpolicy $testdir $keyid
pushd $testdir
${mydir}/../suites/${testcase}.sh
popd
clean_up
;;
"locked")
# remove the key, then the test directory will be locked
# and any modification will be denied by requiring the key
_rm_enckey $testdir $keyid
clean_up
;;
*)
clean_up
echo "Unknown parameter $1"
exit 1
esac
| 2,581 | 20.516667 | 73 | sh |
null | ceph-main/qa/workunits/fs/snap-hierarchy.sh | #!/bin/sh
set -ex
if [ -d "$1" ]; then
mkdir -p -- "$1" && cd "$1"
fi
[ "$VERIFY" != verify ] && mkdir 1
[ "$VERIFY" != verify ] && mkdir 1/.snap/first
stat 1/.snap/first
[ "$VERIFY" != verify ] && mkdir 1/2
stat 1/.snap/first/2 && exit 1
[ "$VERIFY" != verify ] && mkdir 1/2/.snap/second
stat 1/2/.snap/second
[ "$VERIFY" != verify ] && touch 1/foo
stat 1/.snap/first/foo && exit 1
[ "$VERIFY" != verify ] && mkdir 1/.snap/third
stat 1/.snap/third/foo || exit 1
[ "$VERIFY" != verify ] && mkdir 1/2/3
[ "$VERIFY" != verify ] && mkdir 1/2/.snap/fourth
stat 1/2/.snap/fourth/3
exit 0
| 589 | 22.6 | 49 | sh |
null | ceph-main/qa/workunits/fs/test_o_trunc.sh | #!/bin/sh -ex
mydir=`dirname $0`
$mydir/test_o_trunc trunc.foo 600
echo OK
| 78 | 8.875 | 33 | sh |
null | ceph-main/qa/workunits/fs/test_python.sh | #!/bin/sh -ex
# Running as root because the filesystem root directory will be
# owned by uid 0, and that's where we're writing.
sudo python3 -m nose -v $(dirname $0)/../../../src/test/pybind/test_cephfs.py
exit 0
| 214 | 29.714286 | 77 | sh |
null | ceph-main/qa/workunits/fs/damage/test-first-damage.sh | #!/bin/bash
set -ex
FIRST_DAMAGE="first-damage.py"
FS=cephfs
METADATA_POOL=cephfs_meta
MOUNT=~/mnt/mnt.0
PYTHON=python3
function usage {
printf '%s: [--fs=<fs_name>] [--metadata-pool=<pool>] [--first-damage=</path/to/first-damage.py>]\n'
exit 1
}
function create {
ceph config set mds mds_bal_fragment_dirs 0
mkdir dir
DIR_INODE=$(stat -c '%i' dir)
touch dir/a
touch dir/"a space"
touch -- $(printf 'dir/\xff')
mkdir dir/.snap/1
mkdir dir/.snap/2
# two snaps
rm dir/a
mkdir dir/.snap/3
# not present in HEAD
touch dir/a
mkdir dir/.snap/4
# one snap
rm dir/a
touch dir/a
mkdir dir/.snap/5
# unlink then create
rm dir/a
touch dir/a
# unlink then create, HEAD not snapped
ls dir/.snap/*/
mkdir big
BIG_DIR_INODE=$(stat -c '%i' big)
for i in `seq 1 15000`; do
touch $(printf 'big/%08d' $i)
done
}
function flush {
ceph tell mds."$FS":0 flush journal
}
function damage {
local IS=$(printf '%llx.%08llx' "$DIR_INODE" 0)
local LS=$(ceph tell mds."$FS":0 dump snaps | jq .last_created)
local T=$(mktemp -p /tmp)
# nuke snap 1 version of "a"
rados --pool="$METADATA_POOL" getomapval "$IS" a_$(printf %x $((LS-4))) "$T"
printf '\xff\xff\xff\xf0' | dd of="$T" count=4 bs=1 conv=notrunc,nocreat
rados --pool="$METADATA_POOL" setomapval "$IS" a_$(printf %x $((LS-4))) --input-file="$T"
# nuke snap 4 version of "a"
rados --pool="$METADATA_POOL" getomapval "$IS" a_$(printf %x $((LS-1))) "$T"
printf '\xff\xff\xff\xff' | dd of="$T" count=4 bs=1 conv=notrunc,nocreat
rados --pool="$METADATA_POOL" setomapval "$IS" a_$(printf %x $((LS-1))) --input-file="$T"
# screw up HEAD
rados --pool="$METADATA_POOL" getomapval "$IS" a_head "$T"
printf '\xfe\xff\xff\xff' | dd of="$T" count=4 bs=1 conv=notrunc,nocreat
rados --pool="$METADATA_POOL" setomapval "$IS" a_head --input-file="$T"
# screw up HEAD on what dentry in big
IS=$(printf '%llx.%08llx' "$BIG_DIR_INODE" 0)
rados --pool="$METADATA_POOL" getomapval "$IS" 00009999_head "$T"
printf '\xfe\xff\xff\xff' | dd of="$T" count=4 bs=1 conv=notrunc,nocreat
rados --pool="$METADATA_POOL" setomapval "$IS" 00009999_head --input-file="$T"
rm -f "$T"
}
function recover {
flush
ceph fs fail "$FS"
sleep 5
cephfs-journal-tool --rank="$FS":0 event recover_dentries summary
cephfs-journal-tool --rank="$FS":0 journal reset
"$PYTHON" $FIRST_DAMAGE --debug /tmp/debug1 --memo /tmp/memo1 "$METADATA_POOL"
"$PYTHON" $FIRST_DAMAGE --debug /tmp/debug2 --memo /tmp/memo2 --repair-nosnap "$METADATA_POOL"
"$PYTHON" $FIRST_DAMAGE --debug /tmp/debug3 --memo /tmp/memo3 --remove "$METADATA_POOL"
ceph fs set "$FS" joinable true
}
function check {
stat dir || exit 1
stat dir/a || exit 1
for i in `seq 1 5`; do
stat dir/.snap/$i || exit 2
done
stat dir/.snap/2/a || exit 3
stat dir/.snap/5/a || exit 4
if stat dir/.snap/1/a; then
echo should be gone
exit 5
fi
if stat dir/.snap/3/a; then
echo should not ever exist
exit 6
fi
if stat dir/.snap/4/a; then
echo should be gone
exit 7
fi
}
function cleanup {
rmdir dir/.snap/*
find dir
rm -rf dir
}
function mount {
sudo --preserve-env=CEPH_CONF bin/mount.ceph :/ "$MOUNT" -o name=admin,noshare
df -h "$MOUNT"
}
function main {
eval set -- $(getopt --name "$0" --options '' --longoptions 'help,fs:,metadata-pool:,first-damage:,mount:,python:' -- "$@")
while [ "$#" -gt 0 ]; do
echo "$*"
echo "$1"
case "$1" in
-h|--help)
usage
;;
--fs)
FS="$2"
shift 2
;;
--metadata-pool)
METADATA_POOL="$2"
shift 2
;;
--mount)
MOUNT="$2"
shift 2
;;
--first-damage)
FIRST_DAMAGE="$2"
shift 2
;;
--python)
PYTHON="$2"
shift 2
;;
--)
shift
break
;;
*)
usage
;;
esac
done
mount
pushd "$MOUNT"
create
popd
sudo umount -f "$MOUNT"
# flush dentries/inodes to omap
flush
damage
recover
sleep 5 # for mds to join
mount
pushd "$MOUNT"
check
cleanup
popd
sudo umount -f "$MOUNT"
}
main "$@"
| 4,385 | 21.492308 | 125 | sh |
null | ceph-main/qa/workunits/fs/full/subvolume_clone.sh | #!/usr/bin/env bash
set -ex
# This testcase tests the 'ceph fs subvolume snapshot clone' when the osd is full.
# The clone fails with 'MetadataMgrException: -28 (error in write)' and
# truncates the config file of corresponding subvolume while updating the config file.
# Hence the subsequent subvolume commands on the clone fails with
# 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)' traceback.
# The osd is of the size 1GB. The full-ratios are set so that osd is treated full
# at around 600MB. The subvolume is created and 100MB is written.
# The subvolume is snapshotted and cloned ten times. Since the clone delay is set to 15 seconds,
# all the clones reach pending state for sure. Among ten clones, only few succeed and rest fails
# with ENOSPACE.
# At this stage, ".meta" config file of the failed clones are checked if it's truncated.
# and clone status command is checked for traceback.
# Note that the failed clones would be in retry loop and it's state would be 'pending' or 'in-progress'.
# It's state is not updated to 'failed' as the config update to gets ENOSPACE too.
ignore_failure() {
if "$@"; then return 0; else return 0; fi
}
expect_failure() {
if "$@"; then return 1; else return 0; fi
}
NUM_CLONES=10
ceph fs subvolume create cephfs sub_0
subvol_path_0=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
# For debugging
echo "Before ratios are set"
df $CEPH_MNT
ceph osd df
ceph osd set-full-ratio 0.6
ceph osd set-nearfull-ratio 0.50
ceph osd set-backfillfull-ratio 0.55
# For debugging
echo "After ratios are set"
df -h
ceph osd df
for i in {1..100};do sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path_0/1MB_file-$i status=progress bs=1M count=1 conv=fdatasync;done
# For debugging
echo "After subvolumes are written"
df -h $CEPH_MNT
ceph osd df
# snapshot
ceph fs subvolume snapshot create cephfs sub_0 snap_0
# Set clone snapshot delay
ceph config set mgr mgr/volumes/snapshot_clone_delay 15
# Schedule few clones, some would fail with no space
for i in $(eval echo {1..$NUM_CLONES});do ceph fs subvolume snapshot clone cephfs sub_0 snap_0 clone_$i;done
# Wait for osd is full
timeout=90
while [ $timeout -gt 0 ]
do
health=$(ceph health detail)
[[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
echo "Wating for osd to be full: $timeout"
sleep 1
let "timeout-=1"
done
# For debugging
echo "After osd is full"
df -h $CEPH_MNT
ceph osd df
# Check clone status, this should not crash
for i in $(eval echo {1..$NUM_CLONES})
do
ignore_failure ceph fs clone status cephfs clone_$i >/tmp/out_${PID}_file 2>/tmp/error_${PID}_file
cat /tmp/error_${PID}_file
if grep "complete" /tmp/out_${PID}_file; then
echo "The clone_$i is completed"
else
#in-progress/pending clones, No traceback should be found in stderr
echo clone_$i in PENDING/IN-PROGRESS
expect_failure sudo grep "Traceback" /tmp/error_${PID}_file
#config file should not be truncated and GLOBAL section should be found
sudo grep "GLOBAL" $CEPH_MNT/volumes/_nogroup/clone_$i/.meta
fi
done
# Hard cleanup
ignore_failure sudo rm -rf $CEPH_MNT/_index/clone/*
ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/clone_*
ignore_failure sudo rmdir $CEPH_MNT/volumes/_nogroup/sub_0/.snap/snap_0
ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/sub_0
#Set the ratios back for other full tests to run
ceph osd set-full-ratio 0.95
ceph osd set-nearfull-ratio 0.95
ceph osd set-backfillfull-ratio 0.95
#After test
echo "After test"
df -h $CEPH_MNT
ceph osd df
echo OK
| 3,553 | 30.175439 | 133 | sh |
null | ceph-main/qa/workunits/fs/full/subvolume_rm.sh | #!/usr/bin/env bash
set -ex
# This testcase tests the scenario of the 'ceph fs subvolume rm' mgr command
# when the osd is full. The command used to hang. The osd is of the size 1GB.
# The subvolume is created and 500MB file is written. The full-ratios are
# set below 500MB such that the osd is treated as full. Now the subvolume is
# is removed. This should be successful with the introduction of FULL
# capabilities which the mgr holds.
set -e
expect_failure() {
if "$@"; then return 1; else return 0; fi
}
ceph fs subvolume create cephfs sub_0
subvol_path=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
#For debugging
echo "Before write"
df -h
ceph osd df
sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/500MB_file-1 status=progress bs=1M count=500
ceph osd set-full-ratio 0.2
ceph osd set-nearfull-ratio 0.16
ceph osd set-backfillfull-ratio 0.18
timeout=30
while [ $timeout -gt 0 ]
do
health=$(ceph health detail)
[[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
echo "Wating for osd to be full: $timeout"
sleep 1
let "timeout-=1"
done
#For debugging
echo "After ratio set"
df -h
ceph osd df
#Delete subvolume
ceph fs subvolume rm cephfs sub_0
#Validate subvolume is deleted
expect_failure ceph fs subvolume info cephfs sub_0
#Wait for subvolume to delete data
trashdir=$CEPH_MNT/volumes/_deleting
timeout=30
while [ $timeout -gt 0 ]
do
[ -z "$(sudo ls -A $trashdir)" ] && echo "Trash directory $trashdir is empty" && break
echo "Wating for trash dir to be empty: $timeout"
sleep 1
let "timeout-=1"
done
#Set the ratios back for other full tests to run
ceph osd set-full-ratio 0.95
ceph osd set-nearfull-ratio 0.95
ceph osd set-backfillfull-ratio 0.95
#After test
echo "After test"
df -h
ceph osd df
echo OK
| 1,766 | 23.205479 | 93 | sh |
null | ceph-main/qa/workunits/fs/full/subvolume_snapshot_rm.sh | #!/usr/bin/env bash
set -ex
# This testcase tests the 'ceph fs subvolume snapshot rm' when the osd is full.
# The snapshot rm fails with 'MetadataMgrException: -28 (error in write)' and
# truncates the config file of corresponding subvolume. Hence the subsequent
# snapshot rm of the same snapshot fails with 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)'
# traceback.
# The osd is of the size 1GB. The subvolume is created and 800MB file is written.
# Then full-ratios are set below 500MB such that the osd is treated as full.
# The subvolume snapshot is taken which succeeds as no extra space is required
# for snapshot. Now, the removal of the snapshot fails with ENOSPACE as it
# fails to remove the snapshot metadata set. The snapshot removal fails
# but should not traceback and truncate the config file.
set -e
expect_failure() {
if "$@"; then return 1; else return 0; fi
}
ignore_failure() {
if "$@"; then return 0; else return 0; fi
}
ceph fs subvolume create cephfs sub_0
subvol_path=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
#For debugging
echo "Before write"
df $CEPH_MNT
ceph osd df
# Write 800MB file and set full ratio to around 200MB
ignore_failure sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/800MB_file-1 status=progress bs=1M count=800 conv=fdatasync
ceph osd set-full-ratio 0.2
ceph osd set-nearfull-ratio 0.16
ceph osd set-backfillfull-ratio 0.18
timeout=30
while [ $timeout -gt 0 ]
do
health=$(ceph health detail)
[[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
echo "Wating for osd to be full: $timeout"
sleep 1
let "timeout-=1"
done
#Take snapshot
ceph fs subvolume snapshot create cephfs sub_0 snap_0
#Remove snapshot fails but should not throw traceback
expect_failure ceph fs subvolume snapshot rm cephfs sub_0 snap_0 2>/tmp/error_${PID}_file
cat /tmp/error_${PID}_file
# No traceback should be found
expect_failure grep "Traceback" /tmp/error_${PID}_file
# Validate config file is not truncated and GLOBAL section exists
sudo grep "GLOBAL" $CEPH_MNT/volumes/_nogroup/sub_0/.meta
#For debugging
echo "After write"
df $CEPH_MNT
ceph osd df
# Snapshot removal with force option should succeed
ceph fs subvolume snapshot rm cephfs sub_0 snap_0 --force
#Cleanup from backend
ignore_failure sudo rm -f /tmp/error_${PID}_file
ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/sub_0
#Set the ratios back for other full tests to run
ceph osd set-full-ratio 0.95
ceph osd set-nearfull-ratio 0.95
ceph osd set-backfillfull-ratio 0.95
#After test
echo "After test"
df -h $CEPH_MNT
ceph osd df
echo OK
| 2,595 | 28.83908 | 123 | sh |
null | ceph-main/qa/workunits/fs/maxentries/maxentries.sh | #!/usr/bin/env bash
set -ex
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
function make_files()
{
set +x
temp_dir=`mktemp -d mkfile_test_XXXXXX`
for i in $(seq 1 $1)
do
echo -n | dd of="${temp_dir}/file_$i" conv=fsync || return 1
sync "${temp_dir}" || return 1
done
set -x
return 0
}
function make_dirs()
{
set +x
temp_dir=`mktemp -d mkdir_test_XXXXXX`
for i in $(seq 1 $1)
do
mkdir -p ${temp_dir}/dir_${i} || return 1
sync "${temp_dir}" || return 1
done
set -x
return 0
}
function make_nodes()
{
set +x
temp_dir=`mktemp -d mknod_test_XXXXXX`
for i in $(seq 1 $1)
do
mknod ${temp_dir}/fifo_${i} p || return 1
sync "${temp_dir}" || return 1
done
set -x
return 0
}
function rename_files()
{
set +x
temp_dir=`mktemp -d rename_test_XXXXXX`
mkdir -p ${temp_dir}/rename
for i in $(seq 1 $1)
do
touch ${temp_dir}/file_${i} || return 1
mv ${temp_dir}/file_${i} ${temp_dir}/rename/ || return 1
sync "${temp_dir}" || return 1
done
set -x
return 0
}
function make_symlinks()
{
set +x
temp_dir=`mktemp -d symlink_test_XXXXXX`
mkdir -p ${temp_dir}/symlink
touch ${temp_dir}/file
for i in $(seq 1 $1)
do
ln -s ../file ${temp_dir}/symlink/sym_${i} || return 1
sync "${temp_dir}" || return 1
done
set -x
return 0
}
function make_links()
{
set +x
temp_dir=`mktemp -d link_test_XXXXXX`
mkdir -p ${temp_dir}/link
touch ${temp_dir}/file
for i in $(seq 1 $1)
do
ln ${temp_dir}/file ${temp_dir}/link/link_${i} || return 1
sync "${temp_dir}" || return 1
done
set -x
return 0
}
function cleanup()
{
rm -rf *
}
test_dir="max_entries"
mkdir -p $test_dir
pushd $test_dir
dir_max_entries=100
ceph config set mds mds_dir_max_entries $dir_max_entries
ok_dir_max_entries=$dir_max_entries
fail_dir_max_entries=$((dir_max_entries+1))
# make files test
make_files $ok_dir_max_entries
expect_false make_files $fail_dir_max_entries
# make dirs test
make_dirs $ok_dir_max_entries
expect_false make_dirs $fail_dir_max_entries
# make nodes test
make_nodes $ok_dir_max_entries
expect_false make_nodes $fail_dir_max_entries
# rename files test
rename_files $ok_dir_max_entries
expect_false rename_files $fail_dir_max_entries
# symlink files test
make_symlinks $ok_dir_max_entries
expect_false make_symlinks $fail_dir_max_entries
# link files test
make_links $ok_dir_max_entries
expect_false make_links $fail_dir_max_entries
# no limit (e.g., default value)
dir_max_entries=0
ceph config set mds mds_dir_max_entries $dir_max_entries
make_files 500
make_dirs 500
make_nodes 500
rename_files 500
make_symlinks 500
make_links 500
cleanup
popd # $test_dir
echo OK
| 2,730 | 16.50641 | 64 | sh |
null | ceph-main/qa/workunits/fs/misc/acl.sh | #!/bin/sh -x
set -e
mkdir -p testdir
cd testdir
set +e
setfacl -d -m u:nobody:rw .
if test $? != 0; then
echo "Filesystem does not support ACL"
exit 0
fi
expect_failure() {
if "$@"; then return 1; else return 0; fi
}
set -e
c=0
while [ $c -lt 100 ]
do
c=`expr $c + 1`
# inherited ACL from parent directory's default ACL
mkdir d1
c1=`getfacl d1 | grep -c "nobody:rw"`
echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null
c2=`getfacl d1 | grep -c "nobody:rw"`
rmdir d1
if [ $c1 -ne 2 ] || [ $c2 -ne 2 ]
then
echo "ERROR: incorrect ACLs"
exit 1
fi
done
mkdir d1
# The ACL xattr only contains ACL header. ACL should be removed
# in this case.
setfattr -n system.posix_acl_access -v 0x02000000 d1
setfattr -n system.posix_acl_default -v 0x02000000 .
expect_failure getfattr -n system.posix_acl_access d1
expect_failure getfattr -n system.posix_acl_default .
rmdir d1
cd ..
rmdir testdir
echo OK
| 919 | 17.039216 | 63 | sh |
Subsets and Splits