repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/make-debs.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
set -xe
. /etc/os-release
base=${1:-/tmp/release}
releasedir=$base/$NAME/WORKDIR
rm -fr $(dirname $releasedir)
mkdir -p $releasedir
#
# remove all files not under git so they are not
# included in the distribution.
#
git clean -dxf
#
# git describe provides a version that is
# a) human readable
# b) is unique for each commit
# c) compares higher than any previous commit
# d) contains the short hash of the commit
#
vers=$(git describe --match "v*" | sed s/^v//)
./make-dist $vers
#
# rename the tarbal to match debian conventions and extract it
#
mv ceph-$vers.tar.bz2 $releasedir/ceph_$vers.orig.tar.bz2
tar -C $releasedir -jxf $releasedir/ceph_$vers.orig.tar.bz2
#
# copy the debian directory over and remove -dbg packages
# because they are large and take time to build
#
cp -a debian $releasedir/ceph-$vers/debian
cd $releasedir
perl -ni -e 'print if(!(/^Package: .*-dbg$/../^$/))' ceph-$vers/debian/control
perl -pi -e 's/--dbg-package.*//' ceph-$vers/debian/rules
#
# always set the debian version to 1 which is ok because the debian
# directory is included in the sources and the upstream version will
# change each time it is modified.
#
dvers="$vers-1"
#
# update the changelog to match the desired version
#
cd ceph-$vers
chvers=$(head -1 debian/changelog | perl -ne 's/.*\(//; s/\).*//; print')
if [ "$chvers" != "$dvers" ]; then
DEBEMAIL="[email protected]" dch -D $VERSION_CODENAME --force-distribution -b -v "$dvers" "new version"
fi
#
# create the packages
# a) with ccache to speed things up when building repeatedly
# b) do not sign the packages
# c) use half of the available processors
#
: ${NPROC:=$(($(nproc) / 2))}
if test $NPROC -gt 1 ; then
j=-j${NPROC}
fi
PATH=/usr/lib/ccache:$PATH dpkg-buildpackage $j -uc -us
cd ../..
mkdir -p $VERSION_CODENAME/conf
cat > $VERSION_CODENAME/conf/distributions <<EOF
Codename: $VERSION_CODENAME
Suite: stable
Components: main
Architectures: $(dpkg --print-architecture) source
EOF
if [ ! -e conf ]; then
ln -s $VERSION_CODENAME/conf conf
fi
reprepro --basedir $(pwd) include $VERSION_CODENAME WORKDIR/*.changes
#
# teuthology needs the version in the version file
#
echo $dvers > $VERSION_CODENAME/version
| 2,777 | 29.195652 | 105 | sh |
null | ceph-main/make-srpm.sh | #!/bin/sh
#
# Create a SRPM which can be used to build Ceph
#
# ./make-srpm.sh <version>
# rpmbuild --rebuild /tmp/ceph/ceph-<version>-0.el7.centos.src.rpm
#
./make-dist $1
rpmbuild -D"_sourcedir `pwd`" -D"_specdir `pwd`" -D"_srcrpmdir `pwd`" -bs ceph.spec
| 259 | 20.666667 | 83 | sh |
null | ceph-main/mingw_conf.sh | # MINGW Settings:
# Due to inconsistencies between distributions, mingw versions, binaries,
# and directories must be determined (or defined) prior to building.
# This script expects the following variables:
# * OS - currently ubuntu, rhel, or suse. In the future we may attempt to
# detect the platform.
# * MINGW_CMAKE_FILE - if set, a cmake toolchain file will be created
# * MINGW_POSIX_FLAGS - if set, Mingw Posix compatibility mode will be
# enabled by defining the according flags.
# -Common mingw settings-
MINGW_PREFIX="x86_64-w64-mingw32-"
MINGW_BASE="x86_64-w64-mingw32"
MINGW_CPP="${MINGW_BASE}-c++"
MINGW_DLLTOOL="${MINGW_BASE}-dlltool"
MINGW_WINDRES="${MINGW_BASE}-windres"
MINGW_STRIP="${MINGW_BASE}-strip"
MINGW_OBJCOPY="${MINGW_BASE}-objcopy"
# -Distribution specific mingw settings-
case "$OS" in
ubuntu)
mingwPosix="-posix"
mingwLibDir="/usr/lib/gcc"
mingwVersion="$(${MINGW_CPP}${mingwPosix} -dumpversion)"
mingwTargetLibDir="${mingwLibDir}/${MINGW_BASE}/${mingwVersion}"
mingwLibpthreadDir="/usr/${MINGW_BASE}/lib"
PTW32Include=/usr/share/mingw-w64/include
PTW32Lib=/usr/x86_64-w64-mingw32/lib
;;
rhel)
mingwPosix=""
mingwLibDir="/usr/lib64/gcc"
mingwVersion="$(${MINGW_CPP}${mingwPosix} -dumpversion)"
mingwTargetLibDir="/usr/${MINGW_BASE}/sys-root/mingw/bin"
mingwLibpthreadDir="$mingwTargetLibDir"
PTW32Include=/usr/x86_64-w64-mingw32/sys-root/mingw/include
PTW32Lib=/usr/x86_64-w64-mingw32/sys-root/mingw/lib
;;
suse)
mingwPosix=""
mingwLibDir="/usr/lib64/gcc"
mingwVersion="$(${MINGW_CPP}${mingwPosix} -dumpversion)"
mingwTargetLibDir="/usr/${MINGW_BASE}/sys-root/mingw/bin"
mingwLibpthreadDir="$mingwTargetLibDir"
PTW32Include=/usr/x86_64-w64-mingw32/sys-root/mingw/include
PTW32Lib=/usr/x86_64-w64-mingw32/sys-root/mingw/lib
;;
*)
echo "$ID is unknown, automatic mingw configuration is not possible."
exit 1
;;
esac
# -Common mingw settings, dependent upon distribution specific settings-
MINGW_FIND_ROOT_LIB_PATH="${mingwLibDir}/\${TOOLCHAIN_PREFIX}/${mingwVersion}"
MINGW_CC="${MINGW_BASE}-gcc${mingwPosix}"
MINGW_CXX="${MINGW_BASE}-g++${mingwPosix}"
# End MINGW configuration
if [[ -n $MINGW_CMAKE_FILE ]]; then
cat > $MINGW_CMAKE_FILE <<EOL
set(CMAKE_SYSTEM_NAME Windows)
set(TOOLCHAIN_PREFIX ${MINGW_BASE})
set(CMAKE_SYSTEM_PROCESSOR x86_64)
# We'll need to use posix threads in order to use
# C++11 features, such as std::thread.
set(CMAKE_C_COMPILER \${TOOLCHAIN_PREFIX}-gcc${mingwPosix})
set(CMAKE_CXX_COMPILER \${TOOLCHAIN_PREFIX}-g++${mingwPosix})
set(CMAKE_RC_COMPILER \${TOOLCHAIN_PREFIX}-windres)
set(CMAKE_FIND_ROOT_PATH /usr/\${TOOLCHAIN_PREFIX} ${MINGW_FIND_ROOT_LIB_PATH})
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# TODO: consider switching this to "ONLY". The issue with
# that is that all our libs should then be under
# CMAKE_FIND_ROOT_PATH and CMAKE_PREFIX_PATH would be ignored.
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH)
EOL
if [[ -n $MINGW_POSIX_FLAGS ]]; then
cat >> $MINGW_CMAKE_FILE <<EOL
# Some functions (e.g. localtime_r) will not be available unless we set
# the following flag.
add_definitions(-D_POSIX=1)
add_definitions(-D_POSIX_C_SOURCE=1)
add_definitions(-D_POSIX_=1)
add_definitions(-D_POSIX_THREADS=1)
EOL
fi
fi | 3,506 | 37.538462 | 79 | sh |
null | ceph-main/run-make-check.sh | #!/usr/bin/env bash
#
# Ceph distributed storage system
#
# Copyright (C) 2014 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
#
# To just look at what this script will do, run it like this:
#
# $ DRY_RUN=echo ./run-make-check.sh
#
source src/script/run-make.sh
set -e
function run() {
# to prevent OSD EMFILE death on tests, make sure ulimit >= 1024
$DRY_RUN ulimit -n $(ulimit -Hn)
if [ $(ulimit -n) -lt 1024 ];then
echo "***ulimit -n too small, better bigger than 1024 for test***"
return 1
fi
# increase the aio-max-nr, which is by default 65536. we could reach this
# limit while running seastar tests and bluestore tests.
local m=16
local procs="$(($(get_processors) * 2))"
if [ "${procs}" -gt $m ]; then
m="${procs}"
fi
local aiomax="$((65536 * procs))"
if [ "$(/sbin/sysctl -n fs.aio-max-nr )" -lt "${aiomax}" ]; then
$DRY_RUN sudo /sbin/sysctl -q -w fs.aio-max-nr="${aiomax}"
fi
CHECK_MAKEOPTS=${CHECK_MAKEOPTS:-$DEFAULT_MAKEOPTS}
if in_jenkins; then
if ! ctest $CHECK_MAKEOPTS --no-compress-output --output-on-failure --test-output-size-failed 1024000 -T Test; then
# do not return failure, as the jenkins publisher will take care of this
rm -fr ${TMPDIR:-/tmp}/ceph-asok.*
fi
else
if ! $DRY_RUN ctest $CHECK_MAKEOPTS --output-on-failure; then
rm -fr ${TMPDIR:-/tmp}/ceph-asok.*
return 1
fi
fi
}
function main() {
if [[ $EUID -eq 0 ]] ; then
echo "For best results, run this script as a normal user configured"
echo "with the ability to run commands as root via sudo."
fi
echo -n "Checking hostname sanity... "
if $DRY_RUN hostname --fqdn >/dev/null 2>&1 ; then
echo "OK"
else
echo "NOT OK"
echo "Please fix 'hostname --fqdn', otherwise 'make check' will fail"
return 1
fi
# uses run-make.sh to install-deps
FOR_MAKE_CHECK=1 prepare
configure "$@"
in_jenkins && echo "CI_DEBUG: Running 'build tests'"
build tests
echo "make check: successful build on $(git rev-parse HEAD)"
FOR_MAKE_CHECK=1 run
}
if [ "$0" = "$BASH_SOURCE" ]; then
main "$@"
fi
| 2,537 | 29.214286 | 123 | sh |
null | ceph-main/win32_build.sh | #!/usr/bin/env bash
set -e
set -o pipefail
SCRIPT_DIR="$(dirname "$BASH_SOURCE")"
SCRIPT_DIR="$(realpath "$SCRIPT_DIR")"
num_vcpus=$(nproc)
CEPH_DIR="${CEPH_DIR:-$SCRIPT_DIR}"
BUILD_DIR="${BUILD_DIR:-${CEPH_DIR}/build}"
DEPS_DIR="${DEPS_DIR:-$CEPH_DIR/build.deps}"
ZIP_DEST="${ZIP_DEST:-$BUILD_DIR/ceph.zip}"
CLEAN_BUILD=${CLEAN_BUILD:-}
SKIP_BUILD=${SKIP_BUILD:-}
# Usefull when packaging existing binaries.
SKIP_CMAKE=${SKIP_CMAKE:-}
SKIP_DLL_COPY=${SKIP_DLL_COPY:-}
SKIP_TESTS=${SKIP_TESTS:-}
SKIP_BINDIR_CLEAN=${SKIP_BINDIR_CLEAN:-}
# Use Ninja's default, it might be useful when having few cores.
NUM_WORKERS_DEFAULT=$(( $num_vcpus + 2 ))
NUM_WORKERS=${NUM_WORKERS:-$NUM_WORKERS_DEFAULT}
DEV_BUILD=${DEV_BUILD:-}
# Unless SKIP_ZIP is set, we're preparing an archive that contains the Ceph
# binaries, debug symbols as well as the required DLLs.
SKIP_ZIP=${SKIP_ZIP:-}
# By default, we'll move the debug symbols to separate files located in the
# ".debug" directory. If "EMBEDDED_DBG_SYM" is set, the debug symbols will
# remain embedded in the binaries.
#
# Unfortunately we cannot use pdb symbols when cross compiling. cv2pdb
# well as llvm rely on mspdb*.dll in order to support this proprietary format.
EMBEDDED_DBG_SYM=${EMBEDDED_DBG_SYM:-}
# Allow for OS specific customizations through the OS flag.
# Valid options are currently "ubuntu", "suse", and "rhel".
OS=${OS}
if [[ -z $OS ]]; then
source /etc/os-release
case "$ID" in
opensuse*|suse|sles)
OS="suse"
;;
rhel|centos)
OS="rhel"
;;
ubuntu)
OS="ubuntu"
;;
*)
echo "Unsupported Linux distro $ID."
echo "only SUSE, Ubuntu and RHEL are supported."
echo "Set the OS environment variable to override."
exit 1
;;
esac
fi
export OS="$OS"
# We'll have to be explicit here since auto-detecting doesn't work
# properly when cross compiling.
ALLOCATOR=${ALLOCATOR:-libc}
# Debug builds don't work with MINGW for the time being, failing with
# can't close <file>: File too big
# -Wa,-mbig-obj does not help.
CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-}
if [[ -z $CMAKE_BUILD_TYPE ]]; then
# By default, we're building release binaries with minimal debug information.
export CFLAGS="$CFLAGS -g1"
export CXXFLAGS="$CXXFLAGS -g1"
CMAKE_BUILD_TYPE=Release
fi
# Some tests can't use shared libraries yet due to unspecified dependencies.
# We'll do a static build by default for now.
ENABLE_SHARED=${ENABLE_SHARED:-OFF}
binDir="$BUILD_DIR/bin"
strippedBinDir="$BUILD_DIR/bin_stripped"
# GDB will look for this directory by default.
dbgDirname=".debug"
dbgSymbolDir="$strippedBinDir/${dbgDirname}"
depsSrcDir="$DEPS_DIR/src"
depsToolsetDir="$DEPS_DIR/mingw"
cmakeGenerator="Ninja"
lz4Dir="${depsToolsetDir}/lz4"
sslDir="${depsToolsetDir}/openssl"
boostDir="${depsToolsetDir}/boost"
zlibDir="${depsToolsetDir}/zlib"
backtraceDir="${depsToolsetDir}/libbacktrace"
snappyDir="${depsToolsetDir}/snappy"
winLibDir="${depsToolsetDir}/windows/lib"
wnbdSrcDir="${depsSrcDir}/wnbd"
wnbdLibDir="${depsToolsetDir}/wnbd/lib"
dokanSrcDir="${depsSrcDir}/dokany"
dokanLibDir="${depsToolsetDir}/dokany/lib"
depsDirs="$lz4Dir;$sslDir;$boostDir;$zlibDir;$backtraceDir;$snappyDir"
depsDirs+=";$winLibDir"
# Cmake recommends using CMAKE_PREFIX_PATH instead of link_directories.
# Still, some library dependencies may not include the full path (e.g. Boost
# sets the "-lz" flag through INTERFACE_LINK_LIBRARIES), which is why
# we have to ensure that the linker will be able to locate them.
linkDirs="$zlibDir/lib"
lz4Lib="${lz4Dir}/lib/dll/liblz4-1.dll"
lz4Include="${lz4Dir}/lib"
if [[ -n $CLEAN_BUILD ]]; then
echo "Cleaning up build dir: $BUILD_DIR"
rm -rf $BUILD_DIR
rm -rf $DEPS_DIR
fi
if [[ -z $SKIP_BINDIR_CLEAN ]]; then
echo "Cleaning up bin dir: $binDir"
rm -rf $binDir
fi
mkdir -p $BUILD_DIR
cd $BUILD_DIR
if [[ ! -f ${depsToolsetDir}/completed ]]; then
echo "Preparing dependencies: $DEPS_DIR. Log: ${BUILD_DIR}/build_deps.log"
NUM_WORKERS=$NUM_WORKERS DEPS_DIR=$DEPS_DIR OS="$OS"\
"$SCRIPT_DIR/win32_deps_build.sh" | tee "${BUILD_DIR}/build_deps.log"
fi
# Due to distribution specific mingw settings, the mingw.cmake file
# must be built prior to running cmake.
MINGW_CMAKE_FILE="$BUILD_DIR/mingw32.cmake"
MINGW_POSIX_FLAGS=1
source "$SCRIPT_DIR/mingw_conf.sh"
if [[ -z $SKIP_CMAKE ]]; then
# We'll need to cross compile Boost.Python before enabling
# "WITH_MGR".
echo "Generating solution. Log: ${BUILD_DIR}/cmake.log"
# This isn't propagated to some of the subprojects, we'll use an env variable
# for now.
export CMAKE_PREFIX_PATH=$depsDirs
if [[ -n $DEV_BUILD ]]; then
echo "Dev build enabled."
echo "Git versioning will be disabled."
ENABLE_GIT_VERSION="OFF"
WITH_CEPH_DEBUG_MUTEX="ON"
else
ENABLE_GIT_VERSION="ON"
WITH_CEPH_DEBUG_MUTEX="OFF"
fi
# As opposed to Linux, Windows shared libraries can't have unresolved
# symbols. Until we fix the dependencies (which are either unspecified
# or circular), we'll have to stick to static linking.
cmake -D CMAKE_PREFIX_PATH=$depsDirs \
-D MINGW_LINK_DIRECTORIES="$linkDirs" \
-D CMAKE_TOOLCHAIN_FILE="$MINGW_CMAKE_FILE" \
-D WITH_LIBCEPHSQLITE=OFF \
-D WITH_RDMA=OFF -D WITH_OPENLDAP=OFF \
-D WITH_GSSAPI=OFF -D WITH_XFS=OFF \
-D WITH_FUSE=OFF -D WITH_DOKAN=ON \
-D WITH_BLUESTORE=OFF -D WITH_LEVELDB=OFF \
-D WITH_LTTNG=OFF -D WITH_BABELTRACE=OFF -D WITH_JAEGER=OFF \
-D WITH_SYSTEM_BOOST=ON -D WITH_MGR=OFF -D WITH_KVS=OFF \
-D WITH_LIBCEPHFS=ON -D WITH_KRBD=OFF -D WITH_RADOSGW=OFF \
-D ENABLE_SHARED=$ENABLE_SHARED -D WITH_RBD=ON -D BUILD_GMOCK=ON \
-D WITH_CEPHFS=OFF -D WITH_MANPAGE=OFF \
-D WITH_MGR_DASHBOARD_FRONTEND=OFF -D WITH_SYSTEMD=OFF -D WITH_TESTS=ON \
-D LZ4_INCLUDE_DIR=$lz4Include -D LZ4_LIBRARY=$lz4Lib \
-D Backtrace_INCLUDE_DIR="$backtraceDir/include" \
-D Backtrace_LIBRARY="$backtraceDir/lib/libbacktrace.a" \
-D ENABLE_GIT_VERSION=$ENABLE_GIT_VERSION \
-D ALLOCATOR="$ALLOCATOR" -D CMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE \
-D WNBD_INCLUDE_DIRS="$wnbdSrcDir/include" \
-D WNBD_LIBRARIES="$wnbdLibDir/libwnbd.a" \
-D WITH_CEPH_DEBUG_MUTEX=$WITH_CEPH_DEBUG_MUTEX \
-D DOKAN_INCLUDE_DIRS="$dokanSrcDir/dokan" \
-D DOKAN_LIBRARIES="$dokanLibDir/libdokan.a" \
-G "$cmakeGenerator" \
$CEPH_DIR 2>&1 | tee "${BUILD_DIR}/cmake.log"
fi # [[ -z $SKIP_CMAKE ]]
if [[ -z $SKIP_BUILD ]]; then
echo "Building using $NUM_WORKERS workers. Log: ${BUILD_DIR}/build.log"
echo "" > "${BUILD_DIR}/build.log"
cd $BUILD_DIR
ninja_targets="rados rbd rbd-wnbd "
ninja_targets+=" ceph-conf ceph-immutable-object-cache"
ninja_targets+=" cephfs ceph-dokan"
# TODO: do we actually need the ceph compression libs?
ninja_targets+=" compressor ceph_lz4 ceph_snappy ceph_zlib ceph_zstd"
if [[ -z $SKIP_TESTS ]]; then
ninja_targets+=" tests ceph_radosacl ceph_scratchtool "
ninja_targets+=`ninja -t targets | grep ceph_test | cut -d ":" -f 1 | grep -v exe`
fi
ninja -v $ninja_targets 2>&1 | tee "${BUILD_DIR}/build.log"
fi
if [[ -z $SKIP_DLL_COPY ]]; then
# To adjust mingw paths, see 'mingw_conf.sh'.
required_dlls=(
$zlibDir/zlib1.dll
$lz4Dir/lib/dll/liblz4-1.dll
$sslDir/bin/libcrypto-1_1-x64.dll
$sslDir/bin/libssl-1_1-x64.dll
$mingwTargetLibDir/libstdc++-6.dll
$mingwTargetLibDir/libgcc_s_seh-1.dll
$mingwTargetLibDir/libssp*.dll
$mingwLibpthreadDir/libwinpthread-1.dll
$boostDir/lib/*.dll)
echo "Copying required dlls to $binDir."
cp ${required_dlls[@]} $binDir
fi
if [[ -z $SKIP_ZIP ]]; then
# Use a temp directory, in order to create a clean zip file
ZIP_TMPDIR=$(mktemp -d win_binaries.XXXXX)
if [[ -z $EMBEDDED_DBG_SYM ]]; then
echo "Extracting debug symbols from binaries."
rm -rf $strippedBinDir; mkdir $strippedBinDir
rm -rf $dbgSymbolDir; mkdir $dbgSymbolDir
# Strip files individually, to save time and space
for file in $binDir/*.exe $binDir/*.dll; do
dbgFilename=$(basename $file).debug
dbgFile="$dbgSymbolDir/$dbgFilename"
strippedFile="$strippedBinDir/$(basename $file)"
echo "Copying debug symbols: $dbgFile"
$MINGW_OBJCOPY --only-keep-debug $file $dbgFile
$MINGW_STRIP --strip-debug --strip-unneeded -o $strippedFile $file
$MINGW_OBJCOPY --remove-section .gnu_debuglink $strippedFile
$MINGW_OBJCOPY --add-gnu-debuglink=$dbgFile $strippedFile
done
# Copy any remaining files to the stripped directory
for file in $binDir/*; do
[[ ! -f $strippedBinDir/$(basename $file) ]] && \
cp $file $strippedBinDir
done
ln -s $strippedBinDir $ZIP_TMPDIR/ceph
else
ln -s $binDir $ZIP_TMPDIR/ceph
fi
echo "Building zip archive $ZIP_DEST."
# Include the README file in the archive
ln -s $CEPH_DIR/README.windows.rst $ZIP_TMPDIR/ceph/README.windows.rst
cd $ZIP_TMPDIR
[[ -f $ZIP_DEST ]] && rm $ZIP_DEST
zip -r $ZIP_DEST ceph
cd -
rm -rf $ZIP_TMPDIR/ceph/README.windows.rst $ZIP_TMPDIR
echo -e '\n WIN32 files zipped to: '$ZIP_DEST'\n'
fi
| 9,371 | 34.908046 | 88 | sh |
null | ceph-main/win32_deps_build.sh | #!/usr/bin/env bash
set -e
SCRIPT_DIR="$(dirname "$BASH_SOURCE")"
SCRIPT_DIR="$(realpath "$SCRIPT_DIR")"
num_vcpus=$(nproc)
NUM_WORKERS=${NUM_WORKERS:-$num_vcpus}
DEPS_DIR="${DEPS_DIR:-$SCRIPT_DIR/build.deps}"
depsSrcDir="$DEPS_DIR/src"
depsToolsetDir="$DEPS_DIR/mingw"
lz4SrcDir="${depsSrcDir}/lz4"
lz4Dir="${depsToolsetDir}/lz4"
lz4Tag="v1.9.2"
sslTag="OpenSSL_1_1_1c"
sslDir="${depsToolsetDir}/openssl"
sslSrcDir="${depsSrcDir}/openssl"
# For now, we'll keep the version number within the file path when not using git.
boostUrl="https://boostorg.jfrog.io/artifactory/main/release/1.82.0/source/boost_1_82_0.tar.gz"
boostSrcDir="${depsSrcDir}/boost_1_82_0"
boostDir="${depsToolsetDir}/boost"
zlibDir="${depsToolsetDir}/zlib"
zlibSrcDir="${depsSrcDir}/zlib"
backtraceDir="${depsToolsetDir}/libbacktrace"
backtraceSrcDir="${depsSrcDir}/libbacktrace"
snappySrcDir="${depsSrcDir}/snappy"
snappyDir="${depsToolsetDir}/snappy"
snappyTag="1.1.9"
# Additional Windows libraries, which aren't provided by Mingw
winLibDir="${depsToolsetDir}/windows/lib"
wnbdUrl="https://github.com/cloudbase/wnbd"
wnbdTag="main"
wnbdSrcDir="${depsSrcDir}/wnbd"
wnbdLibDir="${depsToolsetDir}/wnbd/lib"
dokanUrl="https://github.com/dokan-dev/dokany"
dokanTag="v2.0.5.1000"
dokanSrcDir="${depsSrcDir}/dokany"
dokanLibDir="${depsToolsetDir}/dokany/lib"
# Allow for OS specific customizations through the OS flag (normally
# passed through from win32_build).
# Valid options are currently "ubuntu", "rhel", and "suse".
OS=${OS:-"ubuntu"}
function _make() {
make -j $NUM_WORKERS $@
}
if [[ -d $DEPS_DIR ]]; then
echo "Cleaning up dependency build dir: $DEPS_DIR"
rm -rf $DEPS_DIR
fi
mkdir -p $DEPS_DIR
mkdir -p $depsToolsetDir
mkdir -p $depsSrcDir
echo "Installing required packages."
case "$OS" in
rhel)
# pkgconf needs https://bugzilla.redhat.com/show_bug.cgi?id=1975416
sudo yum -y --setopt=skip_missing_names_on_install=False install \
mingw64-gcc-c++ \
cmake \
pkgconf \
python3-devel \
autoconf \
libtool \
ninja-build \
zip \
python3-PyYAML \
gcc \
diffutils \
patch \
wget \
perl \
git-core
;;
ubuntu)
sudo apt-get update
sudo env DEBIAN_FRONTEND=noninteractive apt-get -y install \
mingw-w64 g++ cmake pkg-config \
python3-dev python3-yaml \
autoconf libtool ninja-build wget zip \
git
;;
suse)
for PKG in mingw64-cross-gcc-c++ mingw64-libgcc_s_seh1 mingw64-libstdc++6 \
cmake pkgconf python3-devel autoconf libtool ninja zip \
python3-PyYAML \
gcc patch wget git; do
rpm -q $PKG >/dev/null || zypper -n install $PKG
done
;;
esac
MINGW_CMAKE_FILE="$DEPS_DIR/mingw.cmake"
source "$SCRIPT_DIR/mingw_conf.sh"
echo "Building zlib."
cd $depsSrcDir
if [[ ! -d $zlibSrcDir ]]; then
git clone --depth 1 https://github.com/madler/zlib
fi
cd $zlibSrcDir
# Apparently the configure script is broken...
sed -e s/"PREFIX = *$"/"PREFIX = ${MINGW_PREFIX}"/ -i win32/Makefile.gcc
_make -f win32/Makefile.gcc
_make BINARY_PATH=$zlibDir \
INCLUDE_PATH=$zlibDir/include \
LIBRARY_PATH=$zlibDir/lib \
SHARED_MODE=1 \
-f win32/Makefile.gcc install
echo "Building lz4."
cd $depsToolsetDir
if [[ ! -d $lz4Dir ]]; then
git clone --branch $lz4Tag --depth 1 https://github.com/lz4/lz4
cd $lz4Dir
fi
cd $lz4Dir
_make BUILD_STATIC=no CC=${MINGW_CC%-posix*} \
DLLTOOL=${MINGW_DLLTOOL} \
WINDRES=${MINGW_WINDRES} \
TARGET_OS=Windows_NT
echo "Building OpenSSL."
cd $depsSrcDir
if [[ ! -d $sslSrcDir ]]; then
git clone --branch $sslTag --depth 1 https://github.com/openssl/openssl
cd $sslSrcDir
fi
cd $sslSrcDir
mkdir -p $sslDir
CROSS_COMPILE="${MINGW_PREFIX}" ./Configure \
mingw64 shared --prefix=$sslDir --libdir="$sslDir/lib"
_make depend
_make
_make install_sw
echo "Building boost."
cd $depsSrcDir
if [[ ! -d $boostSrcDir ]]; then
echo "Downloading boost."
wget -qO- $boostUrl | tar xz
fi
cd $boostSrcDir
echo "using gcc : mingw32 : ${MINGW_CXX} ;" > user-config.jam
# Workaround for https://github.com/boostorg/thread/issues/156
# Older versions of mingw provided a different pthread lib.
sed -i 's/lib$(libname)GC2.a/lib$(libname).a/g' ./libs/thread/build/Jamfile.v2
sed -i 's/mthreads/pthreads/g' ./tools/build/src/tools/gcc.py
sed -i 's/mthreads/pthreads/g' ./tools/build/src/tools/gcc.jam
sed -i 's/pthreads/mthreads/g' ./tools/build/src/tools/gcc.py
sed -i 's/pthreads/mthreads/g' ./tools/build/src/tools/gcc.jam
export PTW32_INCLUDE=${PTW32Include}
export PTW32_LIB=${PTW32Lib}
echo "Patching boost."
# Fix getting Windows page size
# TODO: send this upstream and maybe use a fork until it merges.
# Meanwhile, we might consider moving those to ceph/cmake/modules/BuildBoost.cmake.
# This cmake module will first have to be updated to support Mingw though.
patch -N boost/thread/pthread/thread_data.hpp <<EOL
--- boost/thread/pthread/thread_data.hpp 2019-10-11 15:26:15.678703586 +0300
+++ boost/thread/pthread/thread_data.hpp.new 2019-10-11 15:26:07.321463698 +0300
@@ -32,6 +32,10 @@
# endif
#endif
+#if defined(_WIN32)
+#include <windows.h>
+#endif
+
#include <pthread.h>
#include <unistd.h>
@@ -54,6 +58,10 @@
if (size==0) return;
#ifdef BOOST_THREAD_USES_GETPAGESIZE
std::size_t page_size = getpagesize();
+#elif _WIN32
+ SYSTEM_INFO system_info;
+ ::GetSystemInfo (&system_info);
+ std::size_t page_size = system_info.dwPageSize;
#else
std::size_t page_size = ::sysconf( _SC_PAGESIZE);
#endif
EOL
./bootstrap.sh
./b2 install --user-config=user-config.jam toolset=gcc-mingw32 \
target-os=windows release \
link=static,shared \
threadapi=win32 --prefix=$boostDir \
address-model=64 architecture=x86 \
binary-format=pe abi=ms -j $NUM_WORKERS \
-sZLIB_INCLUDE=$zlibDir/include -sZLIB_LIBRARY_PATH=$zlibDir/lib \
--without-python --without-mpi --without-log --without-wave
echo "Building libbacktrace."
cd $depsSrcDir
if [[ ! -d $backtraceSrcDir ]]; then
git clone --depth 1 https://github.com/ianlancetaylor/libbacktrace
fi
mkdir -p $backtraceSrcDir/build
cd $backtraceSrcDir/build
../configure --prefix=$backtraceDir --exec-prefix=$backtraceDir \
--host ${MINGW_BASE} --enable-host-shared \
--libdir="$backtraceDir/lib"
_make LDFLAGS="-no-undefined"
_make install
echo "Building snappy."
cd $depsSrcDir
if [[ ! -d $snappySrcDir ]]; then
git clone --branch $snappyTag --depth 1 https://github.com/google/snappy
cd $snappySrcDir
fi
mkdir -p $snappySrcDir/build
cd $snappySrcDir/build
cmake -DCMAKE_INSTALL_PREFIX=$snappyDir \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=ON \
-DSNAPPY_BUILD_TESTS=OFF \
-DSNAPPY_BUILD_BENCHMARKS=OFF \
-DCMAKE_TOOLCHAIN_FILE=$MINGW_CMAKE_FILE \
../
_make
_make install
cmake -DCMAKE_INSTALL_PREFIX=$snappyDir \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=OFF \
-DSNAPPY_BUILD_TESTS=OFF \
-DCMAKE_TOOLCHAIN_FILE=$MINGW_CMAKE_FILE \
../
_make
_make install
echo "Generating mswsock.lib."
# mswsock.lib is not provided by mingw, so we'll have to generate
# it.
mkdir -p $winLibDir
cat > $winLibDir/mswsock.def <<EOF
LIBRARY MSWSOCK.DLL
EXPORTS
AcceptEx@32
EnumProtocolsA@12
EnumProtocolsW@12
GetAcceptExSockaddrs@32
GetAddressByNameA@40
GetAddressByNameW@40
GetNameByTypeA@12
GetNameByTypeW@12
GetServiceA@28
GetServiceW@28
GetTypeByNameA@8
GetTypeByNameW@8
MigrateWinsockConfiguration@12
NPLoadNameSpaces@12
SetServiceA@24
SetServiceW@24
TransmitFile@28
WSARecvEx@16
dn_expand@20
getnetbyname@4
inet_network@4
rcmd@24
rexec@24rresvport@4
s_perror@8sethostname@8
EOF
$MINGW_DLLTOOL -d $winLibDir/mswsock.def \
-l $winLibDir/libmswsock.a
echo "Fetching libwnbd."
cd $depsSrcDir
if [[ ! -d $wnbdSrcDir ]]; then
git clone --branch $wnbdTag --depth 1 $wnbdUrl
fi
cd $wnbdSrcDir
mkdir -p $wnbdLibDir
$MINGW_DLLTOOL -d $wnbdSrcDir/libwnbd/libwnbd.def \
-D libwnbd.dll \
-l $wnbdLibDir/libwnbd.a
echo "Fetching dokany."
cd $depsSrcDir
if [[ ! -d $dokanSrcDir ]]; then
git clone --branch $dokanTag --depth 1 $dokanUrl
fi
mkdir -p $dokanLibDir
$MINGW_DLLTOOL -d $dokanSrcDir/dokan/dokan.def \
-l $dokanLibDir/libdokan.a
# That's probably the easiest way to deal with the dokan imports.
# dokan.h is defined in both ./dokan and ./sys while both are using
# sys/public.h without the "sys" prefix.
cp $dokanSrcDir/sys/public.h $dokanSrcDir/dokan
echo "Finished building Ceph dependencies."
touch $depsToolsetDir/completed
| 8,885 | 27.031546 | 95 | sh |
null | ceph-main/bin/git-archive-all.sh | #!/usr/bin/env bash
#
# File: git-archive-all.sh
#
# Description: A utility script that builds an archive file(s) of all
# git repositories and submodules in the current path.
# Useful for creating a single tarfile of a git super-
# project that contains other submodules.
#
# Examples: Use git-archive-all.sh to create archive distributions
# from git repositories. To use, simply do:
#
# cd $GIT_DIR; git-archive-all.sh
#
# where $GIT_DIR is the root of your git superproject.
#
# License: GPL3
#
###############################################################################
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
###############################################################################
# DEBUGGING
set -e
set -C # noclobber
# TRAP SIGNALS
trap 'cleanup' QUIT EXIT
# For security reasons, explicitly set the internal field separator
# to newline, space, tab
OLD_IFS=$IFS
IFS='
'
function cleanup () {
rm -rf $TMPDIR
IFS="$OLD_IFS"
}
function usage () {
echo "Usage is as follows:"
echo
echo "$PROGRAM <--version>"
echo " Prints the program version number on a line by itself and exits."
echo
echo "$PROGRAM <--usage|--help|-?>"
echo " Prints this usage output and exits."
echo
echo "$PROGRAM [--format <fmt>] [--prefix <path>] [--verbose|-v] [--separate|-s]"
echo " [--tree-ish|-t <tree-ish>] [--ignore pattern] [output_file]"
echo " Creates an archive for the entire git superproject, and its submodules"
echo " using the passed parameters, described below."
echo
echo " If '--format' is specified, the archive is created with the named"
echo " git archiver backend. Obviously, this must be a backend that git archive"
echo " understands. The format defaults to 'tar' if not specified."
echo
echo " If '--prefix' is specified, the archive's superproject and all submodules"
echo " are created with the <path> prefix named. The default is to not use one."
echo
echo " If '--separate' or '-s' is specified, individual archives will be created"
echo " for each of the superproject itself and its submodules. The default is to"
echo " concatenate individual archives into one larger archive."
echo
echo " If '--tree-ish' is specified, the archive will be created based on whatever"
echo " you define the tree-ish to be. Branch names, commit hash, etc. are acceptable."
echo " Defaults to HEAD if not specified. See git archive's documentation for more"
echo " information on what a tree-ish is."
echo
echo " If '--ignore' is specified, we will filter out any submodules that"
echo " match the specified pattern."
echo
echo " If 'output_file' is specified, the resulting archive is created as the"
echo " file named. This parameter is essentially a path that must be writeable."
echo " When combined with '--separate' ('-s') this path must refer to a directory."
echo " Without this parameter or when combined with '--separate' the resulting"
echo " archive(s) are named with a dot-separated path of the archived directory and"
echo " a file extension equal to their format (e.g., 'superdir.submodule1dir.tar')."
echo
echo " If '--verbose' or '-v' is specified, progress will be printed."
}
function version () {
echo "$PROGRAM version $VERSION"
}
# Internal variables and initializations.
readonly PROGRAM=`basename "$0"`
readonly VERSION=0.2
OLD_PWD="`pwd`"
TMPDIR=`mktemp -d "${TMPDIR:-/tmp}/$PROGRAM.XXXXXX"`
TMPFILE=`mktemp "$TMPDIR/$PROGRAM.XXXXXX"` # Create a place to store our work's progress
TOARCHIVE=`mktemp "$TMPDIR/$PROGRAM.toarchive.XXXXXX"`
OUT_FILE=$OLD_PWD # assume "this directory" without a name change by default
SEPARATE=0
VERBOSE=0
TARCMD=tar
[[ $(uname) == "Darwin" ]] && TARCMD=gnutar
FORMAT=tar
PREFIX=
TREEISH=HEAD
IGNORE=
# RETURN VALUES/EXIT STATUS CODES
readonly E_BAD_OPTION=254
readonly E_UNKNOWN=255
# Process command-line arguments.
while test $# -gt 0; do
case $1 in
--format )
shift
FORMAT="$1"
shift
;;
--prefix )
shift
PREFIX="$1"
shift
;;
--separate | -s )
shift
SEPARATE=1
;;
--tree-ish | -t )
shift
TREEISH="$1"
shift
;;
--ignore )
shift
IGNORE="$1"
shift
;;
--version )
version
exit
;;
--verbose | -v )
shift
VERBOSE=1
;;
-? | --usage | --help )
usage
exit
;;
-* )
echo "Unrecognized option: $1" >&2
usage
exit $E_BAD_OPTION
;;
* )
break
;;
esac
done
if [ ! -z "$1" ]; then
OUT_FILE="$1"
shift
fi
# Validate parameters; error early, error often.
if [ $SEPARATE -eq 1 -a ! -d $OUT_FILE ]; then
echo "When creating multiple archives, your destination must be a directory."
echo "If it's not, you risk being surprised when your files are overwritten."
exit
elif [ `git config -l | grep -q '^core\.bare=false'; echo $?` -ne 0 ]; then
echo "$PROGRAM must be run from a git working copy (i.e., not a bare repository)."
exit
fi
# Create the superproject's git-archive
if [ $VERBOSE -eq 1 ]; then
echo -n "creating superproject archive..."
fi
git archive --format=$FORMAT --prefix="$PREFIX" $TREEISH > $TMPDIR/$(basename "$(pwd)").$FORMAT
if [ $VERBOSE -eq 1 ]; then
echo "done"
fi
echo $TMPDIR/$(basename "$(pwd)").$FORMAT >| $TMPFILE # clobber on purpose
superfile=`head -n 1 $TMPFILE`
if [ $VERBOSE -eq 1 ]; then
echo -n "looking for subprojects..."
fi
# find all '.git' dirs, these show us the remaining to-be-archived dirs
# we only want directories that are below the current directory
find . -mindepth 2 -name '.git' -type d -print | sed -e 's/^\.\///' -e 's/\.git$//' >> $TOARCHIVE
# as of version 1.7.8, git places the submodule .git directories under the superprojects .git dir
# the submodules get a .git file that points to their .git dir. we need to find all of these too
find . -mindepth 2 -name '.git' -type f -print | xargs grep -l "gitdir" | sed -e 's/^\.\///' -e 's/\.git$//' >> $TOARCHIVE
if [ -n "$IGNORE" ]; then
cat $TOARCHIVE | grep -v $IGNORE > $TOARCHIVE.new
mv $TOARCHIVE.new $TOARCHIVE
fi
if [ $VERBOSE -eq 1 ]; then
echo "done"
echo " found:"
cat $TOARCHIVE | while read arch
do
echo " $arch"
done
fi
if [ $VERBOSE -eq 1 ]; then
echo -n "archiving submodules..."
fi
while read path; do
TREEISH=$(git submodule | grep "^ .*${path%/} " | cut -d ' ' -f 2) # git submodule does not list trailing slashes in $path
cd "$path"
git archive --format=$FORMAT --prefix="${PREFIX}$path" ${TREEISH:-HEAD} > "$TMPDIR"/"$(echo "$path" | sed -e 's/\//./g')"$FORMAT
if [ $FORMAT == 'zip' ]; then
# delete the empty directory entry; zipped submodules won't unzip if we don't do this
zip -d "$(tail -n 1 $TMPFILE)" "${PREFIX}${path%/}" >/dev/null # remove trailing '/'
fi
echo "$TMPDIR"/"$(echo "$path" | sed -e 's/\//./g')"$FORMAT >> $TMPFILE
cd "$OLD_PWD"
done < $TOARCHIVE
if [ $VERBOSE -eq 1 ]; then
echo "done"
fi
if [ $VERBOSE -eq 1 ]; then
echo -n "concatenating archives into single archive..."
fi
# Concatenate archives into a super-archive.
if [ $SEPARATE -eq 0 ]; then
if [ $FORMAT == 'tar' ]; then
sed -e '1d' $TMPFILE | while read file; do
$TARCMD --concatenate -f "$superfile" "$file" && rm -f "$file"
done
elif [ $FORMAT == 'zip' ]; then
sed -e '1d' $TMPFILE | while read file; do
# zip incorrectly stores the full path, so cd and then grow
cd `dirname "$file"`
zip -g "$superfile" `basename "$file"` && rm -f "$file"
done
cd "$OLD_PWD"
fi
echo "$superfile" >| $TMPFILE # clobber on purpose
fi
if [ $VERBOSE -eq 1 ]; then
echo "done"
fi
if [ $VERBOSE -eq 1 ]; then
echo -n "moving archive to $OUT_FILE..."
fi
while read file; do
mv "$file" "$OUT_FILE"
done < $TMPFILE
if [ $VERBOSE -eq 1 ]; then
echo "done"
fi
| 9,140 | 31.073684 | 132 | sh |
null | ceph-main/ceph-menv/build_links.sh | #!/bin/bash
DIR=`dirname $0`
ROOT=$1
[ "$ROOT" == "" ] && ROOT="$HOME/ceph"
mkdir -p $DIR/bin
echo $PWD
for f in `ls $ROOT/build/bin`; do
echo $f
ln -sf ../mdo.sh $DIR/bin/$f
done
echo "MRUN_CEPH_ROOT=$ROOT" > $DIR/.menvroot
| 246 | 13.529412 | 44 | sh |
null | ceph-main/ceph-menv/mdo.sh | #!/bin/bash
cmd=`basename $0`
MENV_ROOT=`dirname $0`/..
if [ -f $MENV_ROOT/.menvroot ]; then
. $MENV_ROOT/.menvroot
fi
[ "$MRUN_CEPH_ROOT" == "" ] && MRUN_CEPH_ROOT=$HOME/ceph
if [ "$MRUN_CLUSTER" == "" ]; then
${MRUN_CEPH_ROOT}/build/bin/$cmd "$@"
exit $?
fi
${MRUN_CEPH_ROOT}/src/mrun $MRUN_CLUSTER $cmd "$@"
| 327 | 18.294118 | 56 | sh |
null | ceph-main/ceph-menv/mset.sh | get_color() {
s=$1
sum=1 # just so that 'c1' isn't green that doesn't contrast with the rest of my prompt
for i in `seq 1 ${#s}`; do
c=${s:$((i-1)):1};
o=`printf '%d' "'$c"`
sum=$((sum+$o))
done
echo $sum
}
if [ "$1" == "" ]; then
unset MRUN_CLUSTER
unset MRUN_PROMPT
else
export MRUN_CLUSTER=$1
export MRUN_PROMPT='['${MRUN_CLUSTER}'] '
col=$(get_color $1)
MRUN_PROMPT_COLOR=$((col%7+31))
fi
| 533 | 23.272727 | 96 | sh |
null | ceph-main/cmake/modules/FindStdFilesystem_test.cc | #include <filesystem>
namespace fs = std::filesystem;
int main() {
fs::create_directory("sandbox");
fs::remove_all("sandbox");
}
| 139 | 14.555556 | 36 | cc |
null | ceph-main/cmake/modules/patch-dpdk-conf.sh | #!/bin/sh
# -*- mode:sh; tab-width:4; indent-tabs-mode:nil -*
setconf() {
local key=$1
local val=$2
if grep -q ^$key= ${conf}; then
sed -i -e "s:^$key=.*$:$key=$val:g" ${conf}
else
echo $key=$val >> ${conf}
fi
}
conf=$1/.config
shift
machine=$1
shift
arch=$1
shift
numa=$1
shift
setconf CONFIG_RTE_MACHINE "${machine}"
setconf CONFIG_RTE_ARCH "${arch}"
# Disable experimental features
setconf CONFIG_RTE_NEXT_ABI n
setconf CONFIG_RTE_LIBRTE_MBUF_OFFLOAD n
# Disable unmaintained features
setconf CONFIG_RTE_LIBRTE_POWER n
setconf CONFIG_RTE_EAL_IGB_UIO n
setconf CONFIG_RTE_LIBRTE_KNI n
setconf CONFIG_RTE_KNI_KMOD n
setconf CONFIG_RTE_KNI_PREEMPT_DEFAULT n
# no pdump
setconf CONFIG_RTE_LIBRTE_PDUMP n
# no vm support
setconf CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT n
setconf CONFIG_RTE_LIBRTE_VHOST n
setconf CONFIG_RTE_LIBRTE_VHOST_NUMA n
setconf CONFIG_RTE_LIBRTE_VMXNET3_PMD n
setconf CONFIG_RTE_LIBRTE_PMD_VHOST n
setconf CONFIG_RTE_APP_EVENTDEV n
setconf CONFIG_RTE_MAX_VFIO_GROUPS 64
# no test
setconf CONFIG_RTE_APP_TEST n
setconf CONFIG_RTE_TEST_PMD n
# async/dpdk does not like it
setconf CONFIG_RTE_MBUF_REFCNT_ATOMIC n
# balanced allocation of hugepages
setconf CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES "${numa}"
| 1,272 | 20.948276 | 54 | sh |
null | ceph-main/doc/_templates/page.html | {% extends "!page.html" %}
{% block body %}
{%- if release == 'dev' %}
<div id="dev-warning" class="admonition note">
<p class="first admonition-title">Notice</p>
<p class="last">This document is for a development version of Ceph.</p>
</div>
{%- endif %}
{%- if is_release_eol %}
<div id="eol-warning" class="admonition warning">
<p class="first admonition-title">Warning</p>
<p class="last">This document is for an unsupported version of Ceph.</p>
</div>
{%- endif %}
{%- if not is_release_eol %}
<div id="docubetter" align="right" style="padding: 5px; font-weight: bold;">
<a href="https://pad.ceph.com/p/Report_Documentation_Bugs">Report a Documentation Bug</a>
</div>
{%- endif %}
{{ super() }}
{% endblock %}
| 736 | 27.346154 | 93 | html |
null | ceph-main/doc/_templates/smarttoc.html | {#
Sphinx sidebar template: smart table of contents.
Shows a sidebar ToC that gives you a more global view of the
documentation, and not the confusing cur/prev/next which is the
default sidebar.
The ToC will open and collapse automatically to show the part of the
hierarchy you are in. Top-level items will always be visible.
#}
<h3><a href="{{ pathto(master_doc) }}">{{ _('Table Of Contents') }}</a></h3>
{{ toctree(maxdepth=-1, includehidden=True) }}
<!-- ugly kludge to make genindex look like it's part of the toc -->
<ul style="margin-top: -10px"><li class="toctree-l1"><a class="reference internal" href="{{ pathto('genindex') }}">Index</a></li></ul>
| 685 | 39.352941 | 134 | html |
null | ceph-main/doc/_themes/ceph/layout.html | {# TEMPLATE VAR SETTINGS #}
{%- set url_root = pathto('', 1) %}
{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
{%- if not embedded and docstitle %}
{%- set titlesuffix = " — "|safe + docstitle|e %}
{%- else %}
{%- set titlesuffix = "" %}
{%- endif %}
{%- set lang_attr = 'en' if language == None else (language | replace('_', '-')) %}
{%- set sphinx_writer = 'writer-html5' if html5_doctype else 'writer-html4' %}
<!DOCTYPE html>
<html class="{{ sphinx_writer }}" lang="{{ lang_attr }}" >
<head>
<meta charset="utf-8" />
{{ metatags }}
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
{% block htmltitle %}
<title>{{ title|striptags|e }}{{ titlesuffix }}</title>
{% endblock %}
{# CSS #}
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
<link rel="stylesheet" href="{{ pathto('_static/pygments.css', 1) }}" type="text/css" />
{%- for css in css_files %}
{%- if css|attr("rel") %}
<link rel="{{ css.rel }}" href="{{ pathto(css.filename, 1) }}" type="text/css"{% if css.title is not none %} title="{{ css.title }}"{% endif %} />
{%- else %}
<link rel="stylesheet" href="{{ pathto(css, 1) }}" type="text/css" />
{%- endif %}
{%- endfor %}
{%- for cssfile in extra_css_files %}
<link rel="stylesheet" href="{{ pathto(cssfile, 1) }}" type="text/css" />
{%- endfor %}
{# FAVICON #}
{% if favicon %}
<link rel="shortcut icon" href="{{ pathto('_static/' + favicon, 1) }}"/>
{% endif %}
{# CANONICAL URL (deprecated) #}
{% if theme_canonical_url and not pageurl %}
<link rel="canonical" href="{{ theme_canonical_url }}{{ pagename }}.html"/>
{% endif %}
{# CANONICAL URL #}
{%- if pageurl %}
<link rel="canonical" href="{{ pageurl|e }}" />
{%- endif %}
{# JAVASCRIPTS #}
{%- block scripts %}
<!--[if lt IE 9]>
<script src="{{ pathto('_static/js/html5shiv.min.js', 1) }}"></script>
<![endif]-->
{%- if not embedded %}
{# XXX Sphinx 1.8.0 made this an external js-file, quick fix until we refactor the template to inherit more blocks directly from sphinx #}
{% if sphinx_version >= "1.8.0" %}
<script type="text/javascript" id="documentation_options" data-url_root="{{ url_root }}" src="{{ pathto('_static/documentation_options.js', 1) }}"></script>
{%- for scriptfile in script_files %}
{{ js_tag(scriptfile) }}
{%- endfor %}
{% else %}
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT:'{{ url_root }}',
VERSION:'{{ release|e }}',
LANGUAGE:'{{ language }}',
COLLAPSE_INDEX:false,
FILE_SUFFIX:'{{ '' if no_search_suffix else file_suffix }}',
HAS_SOURCE: {{ has_source|lower }},
SOURCELINK_SUFFIX: '{{ sourcelink_suffix }}'
};
</script>
{%- for scriptfile in script_files %}
<script type="text/javascript" src="{{ pathto(scriptfile, 1) }}"></script>
{%- endfor %}
{% endif %}
<script type="text/javascript" src="{{ pathto('_static/js/theme.js', 1) }}"></script>
{# OPENSEARCH #}
{%- if use_opensearch %}
<link rel="search" type="application/opensearchdescription+xml"
title="{% trans docstitle=docstitle|e %}Search within {{ docstitle }}{% endtrans %}"
href="{{ pathto('_static/opensearch.xml', 1) }}"/>
{%- endif %}
{%- endif %}
{%- endblock %}
{%- block linktags %}
{%- if hasdoc('about') %}
<link rel="author" title="{{ _('About these documents') }}" href="{{ pathto('about') }}" />
{%- endif %}
{%- if hasdoc('genindex') %}
<link rel="index" title="{{ _('Index') }}" href="{{ pathto('genindex') }}" />
{%- endif %}
{%- if hasdoc('search') %}
<link rel="search" title="{{ _('Search') }}" href="{{ pathto('search') }}" />
{%- endif %}
{%- if hasdoc('copyright') %}
<link rel="copyright" title="{{ _('Copyright') }}" href="{{ pathto('copyright') }}" />
{%- endif %}
{%- if next %}
<link rel="next" title="{{ next.title|striptags|e }}" href="{{ next.link|e }}" />
{%- endif %}
{%- if prev %}
<link rel="prev" title="{{ prev.title|striptags|e }}" href="{{ prev.link|e }}" />
{%- endif %}
{%- endblock %}
{%- block extrahead %} {% endblock %}
</head>
<body class="wy-body-for-nav">
{% block extrabody %} {% endblock %}
<header class="top-bar">
{% include "breadcrumbs.html" %}
</header>
<div class="wy-grid-for-nav">
{# SIDE NAV, TOGGLES ON MOBILE #}
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
<div class="wy-side-scroll">
<div class="wy-side-nav-search" {% if theme_style_nav_header_background %} style="background: {{theme_style_nav_header_background}}" {% endif %}>
{% block sidebartitle %}
{% if logo and theme_logo_only %}
<a href="{{ pathto(master_doc) }}">
{% else %}
<a href="{{ pathto(master_doc) }}" class="icon icon-home"> {{ project }}
{% endif %}
{% if logo %}
{# Not strictly valid HTML, but it's the only way to display/scale
it properly, without weird scripting or heaps of work
#}
<img src="{{ pathto('_static/' + logo, 1) }}" class="logo" alt="{{ _('Logo') }}"/>
{% endif %}
</a>
{% if theme_display_version %}
{%- set nav_version = version %}
{% if READTHEDOCS and current_version %}
{%- set nav_version = current_version %}
{% endif %}
{% if nav_version %}
<div class="version">
{{ nav_version }}
</div>
{% endif %}
{% endif %}
{% include "searchbox.html" %}
{% endblock %}
</div>
{% block navigation %}
<div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
{% block menu %}
{#
The singlehtml builder doesn't handle this toctree call when the
toctree is empty. Skip building this for now.
#}
{% if 'singlehtml' not in builder %}
{% set global_toc = toctree(maxdepth=theme_navigation_depth|int,
collapse=theme_collapse_navigation|tobool,
includehidden=theme_includehidden|tobool,
titles_only=theme_titles_only|tobool) %}
{% endif %}
{% if global_toc %}
{{ global_toc }}
{% else %}
<!-- Local TOC -->
<div class="local-toc">{{ toc }}</div>
{% endif %}
{% endblock %}
</div>
{% endblock %}
</div>
</nav>
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
{# MOBILE NAV, TRIGGLES SIDE NAV ON TOGGLE #}
<nav class="wy-nav-top" aria-label="top navigation">
{% block mobile_nav %}
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
<a href="{{ pathto(master_doc) }}">{{ project }}</a>
{% endblock %}
</nav>
<div class="wy-nav-content">
{%- block content %}
{% if theme_style_external_links|tobool %}
<div class="rst-content style-external-links">
{% else %}
<div class="rst-content">
{% endif %}
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
{%- block document %}
<div itemprop="articleBody">
{% block body %}{% endblock %}
</div>
{% if self.comments()|trim %}
<div class="articleComments">
{% block comments %}{% endblock %}
</div>
{% endif%}
</div>
{%- endblock %}
{% include "footer.html" %}
</div>
{%- endblock %}
</div>
</section>
</div>
{% include "versions.html" %}
<script type="text/javascript">
jQuery(function () {
SphinxRtdTheme.Navigation.enable({{ 'true' if theme_sticky_navigation|tobool else 'false' }});
});
</script>
{# Do not conflict with RTD insertion of analytics script #}
{% if not READTHEDOCS %}
{% if theme_analytics_id %}
<!-- Theme Analytics -->
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', '{{ theme_analytics_id }}', 'auto');
{% if theme_analytics_anonymize_ip|tobool %}
ga('set', 'anonymizeIp', true);
{% endif %}
ga('send', 'pageview');
</script>
{% endif %}
{% endif %}
{%- block footer %} {% endblock %}
</body>
</html>
| 9,080 | 34.893281 | 162 | html |
null | ceph-main/examples/librados/hello_radosstriper.cc | #include "rados/librados.hpp"
#include "radosstriper/libradosstriper.hpp"
#include <iostream>
#include <string>
int main(int argc, char* argv[])
{
if(argc != 6)
{
std::cout <<"Please put in correct params\n"<<
"Stripe Count:\n"<<
"Object Size:\n" <<
"File Name:\n" <<
"Object Name:\n"
"Pool Name:"<< std::endl;
return EXIT_FAILURE;
}
uint32_t strip_count = std::stoi(argv[1]);
uint32_t obj_size = std::stoi(argv[2]);
std::string fname = argv[3];
std::string obj_name = argv[4];
std::string pool_name = argv[5];
int ret = 0;
librados::IoCtx io_ctx;
librados::Rados cluster;
libradosstriper::RadosStriper* rs = new libradosstriper::RadosStriper;
// make sure the keyring file is in /etc/ceph/ and is world readable
ret = cluster.init2("client.admin","ceph",0);
if( ret < 0)
{
std::cerr << "Couldn't init cluster "<< ret << std::endl;
}
// make sure ceph.conf is in /etc/ceph/ and is world readable
ret = cluster.conf_read_file("ceph.conf");
if( ret < 0)
{
std::cerr << "Couldn't read conf file "<< ret << std::endl;
}
ret = cluster.connect();
if(ret < 0)
{
std::cerr << "Couldn't connect to cluster "<< ret << std::endl;
}
else
{
std::cout << "Connected to Cluster"<< std::endl;
}
ret = cluster.ioctx_create(pool_name.c_str(), io_ctx);
if(ret < 0)
{
std::cerr << "Couldn't Create IO_CTX"<< ret << std::endl;
}
ret = libradosstriper::RadosStriper::striper_create(io_ctx,rs);
if(ret < 0)
{
std::cerr << "Couldn't Create RadosStriper"<< ret << std::endl;
delete rs;
}
uint64_t alignment = 0;
ret = io_ctx.pool_required_alignment2(&alignment);
if(ret < 0)
{
std::cerr << "IO_CTX didn't give alignment "<< ret
<< "\n Is this an erasure coded pool? "<< std::endl;
delete rs;
io_ctx.close();
cluster.shutdown();
return EXIT_FAILURE;
}
std::cout << "Pool alignment: "<< alignment << std::endl;
rs->set_object_layout_stripe_unit(alignment);
// how many objects are we striping across?
rs->set_object_layout_stripe_count(strip_count);
// how big should each object be?
rs->set_object_layout_object_size(obj_size);
std::string err = "no_err";
librados::bufferlist bl;
bl.read_file(fname.c_str(),&err);
if(err != "no_err")
{
std::cout << "Error reading file into bufferlist: "<< err << std::endl;
delete rs;
io_ctx.close();
cluster.shutdown();
return EXIT_FAILURE;
}
std::cout << "Writing: " << fname << "\nas: "<< obj_name << std::endl;
rs->write_full(obj_name,bl);
std::cout << "done with: " << fname << std::endl;
delete rs;
io_ctx.close();
cluster.shutdown();
}
| 2,756 | 25.76699 | 77 | cc |
null | ceph-main/examples/librados/hello_world.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
* Copyright 2013 Inktank
*/
// install the librados-dev package to get this
#include <rados/librados.hpp>
#include <iostream>
#include <string>
int main(int argc, const char **argv)
{
int ret = 0;
// we will use all of these below
const char *pool_name = "hello_world_pool";
std::string hello("hello world!");
std::string object_name("hello_object");
librados::IoCtx io_ctx;
// first, we create a Rados object and initialize it
librados::Rados rados;
{
ret = rados.init("admin"); // just use the client.admin keyring
if (ret < 0) { // let's handle any error that might have come back
std::cerr << "couldn't initialize rados! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we just set up a rados cluster object" << std::endl;
}
/*
* Now we need to get the rados object its config info. It can
* parse argv for us to find the id, monitors, etc, so let's just
* use that.
*/
{
ret = rados.conf_parse_argv(argc, argv);
if (ret < 0) {
// This really can't happen, but we need to check to be a good citizen.
std::cerr << "failed to parse config options! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we just parsed our config options" << std::endl;
// We also want to apply the config file if the user specified
// one, and conf_parse_argv won't do that for us.
for (int i = 0; i < argc; ++i) {
if ((strcmp(argv[i], "-c") == 0) || (strcmp(argv[i], "--conf") == 0)) {
ret = rados.conf_read_file(argv[i+1]);
if (ret < 0) {
// This could fail if the config file is malformed, but it'd be hard.
std::cerr << "failed to parse config file " << argv[i+1]
<< "! error" << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
break;
}
}
}
/*
* next, we actually connect to the cluster
*/
{
ret = rados.connect();
if (ret < 0) {
std::cerr << "couldn't connect to cluster! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we just connected to the rados cluster" << std::endl;
}
/*
* let's create our own pool instead of scribbling over real data.
* Note that this command creates pools with default PG counts specified
* by the monitors, which may not be appropriate for real use -- it's fine
* for testing, though.
*/
{
ret = rados.pool_create(pool_name);
if (ret < 0) {
std::cerr << "couldn't create pool! error " << ret << std::endl;
return EXIT_FAILURE;
}
std::cout << "we just created a new pool named " << pool_name << std::endl;
}
/*
* create an "IoCtx" which is used to do IO to a pool
*/
{
ret = rados.ioctx_create(pool_name, io_ctx);
if (ret < 0) {
std::cerr << "couldn't set up ioctx! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we just created an ioctx for our pool" << std::endl;
}
/*
* now let's do some IO to the pool! We'll write "hello world!" to a
* new object.
*/
{
/*
* "bufferlist"s are Ceph's native transfer type, and are carefully
* designed to be efficient about copying. You can fill them
* up from a lot of different data types, but strings or c strings
* are often convenient. Just make sure not to deallocate the memory
* until the bufferlist goes out of scope and any requests using it
* have been finished!
*/
librados::bufferlist bl;
bl.append(hello);
/*
* now that we have the data to write, let's send it to an object.
* We'll use the synchronous interface for simplicity.
*/
ret = io_ctx.write_full(object_name, bl);
if (ret < 0) {
std::cerr << "couldn't write object! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we just wrote new object " << object_name
<< ", with contents\n" << hello << std::endl;
}
/*
* now let's read that object back! Just for fun, we'll do it using
* async IO instead of synchronous. (This would be more useful if we
* wanted to send off multiple reads at once; see
* http://docs.ceph.com/docs/master/rados/api/librados/#asychronous-io )
*/
{
librados::bufferlist read_buf;
int read_len = 4194304; // this is way more than we need
// allocate the completion from librados
librados::AioCompletion *read_completion = librados::Rados::aio_create_completion();
// send off the request.
ret = io_ctx.aio_read(object_name, read_completion, &read_buf, read_len, 0);
if (ret < 0) {
std::cerr << "couldn't start read object! error " << ret << std::endl;
ret = EXIT_FAILURE;
read_completion->release();
goto out;
}
// wait for the request to complete, and check that it succeeded.
read_completion->wait_for_complete();
ret = read_completion->get_return_value();
if (ret < 0) {
std::cerr << "couldn't read object! error " << ret << std::endl;
ret = EXIT_FAILURE;
read_completion->release();
goto out;
}
std::cout << "we read our object " << object_name
<< ", and got back " << ret << " bytes with contents\n";
std::string read_string;
read_buf.begin().copy(ret, read_string);
std::cout << read_string << std::endl;
read_completion->release();
}
/*
* We can also use xattrs that go alongside the object.
*/
{
librados::bufferlist version_bl;
version_bl.append('1');
ret = io_ctx.setxattr(object_name, "version", version_bl);
if (ret < 0) {
std::cerr << "failed to set xattr version entry! error "
<< ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we set the xattr 'version' on our object!" << std::endl;
}
/*
* And if we want to be really cool, we can do multiple things in a single
* atomic operation. For instance, we can update the contents of our object
* and set the version at the same time.
*/
{
librados::bufferlist bl;
bl.append(hello);
bl.append("v2");
librados::ObjectWriteOperation write_op;
write_op.write_full(bl);
librados::bufferlist version_bl;
version_bl.append('2');
write_op.setxattr("version", version_bl);
ret = io_ctx.operate(object_name, &write_op);
if (ret < 0) {
std::cerr << "failed to do compound write! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we overwrote our object " << object_name
<< " with contents\n" << bl.c_str() << std::endl;
}
/*
* And to be even cooler, we can make sure that the object looks the
* way we expect before doing the write! Notice how this attempt fails
* because the xattr differs.
*/
{
librados::ObjectWriteOperation failed_write_op;
librados::bufferlist bl;
bl.append(hello);
bl.append("v2");
librados::ObjectWriteOperation write_op;
write_op.write_full(bl);
librados::bufferlist version_bl;
version_bl.append('2');
librados::bufferlist old_version_bl;
old_version_bl.append('1');
failed_write_op.cmpxattr("version", LIBRADOS_CMPXATTR_OP_EQ, old_version_bl);
failed_write_op.write_full(bl);
failed_write_op.setxattr("version", version_bl);
ret = io_ctx.operate(object_name, &failed_write_op);
if (ret < 0) {
std::cout << "we just failed a write because the xattr wasn't as specified"
<< std::endl;
} else {
std::cerr << "we succeeded on writing despite an xattr comparison mismatch!"
<< std::endl;
ret = EXIT_FAILURE;
goto out;
}
/*
* Now let's do the update with the correct xattr values so it
* actually goes through
*/
bl.clear();
bl.append(hello);
bl.append("v3");
old_version_bl.clear();
old_version_bl.append('2');
version_bl.clear();
version_bl.append('3');
librados::ObjectWriteOperation update_op;
update_op.cmpxattr("version", LIBRADOS_CMPXATTR_OP_EQ, old_version_bl);
update_op.write_full(bl);
update_op.setxattr("version", version_bl);
ret = io_ctx.operate(object_name, &update_op);
if (ret < 0) {
std::cerr << "failed to do a compound write update! error " << ret
<< std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::cout << "we overwrote our object " << object_name
<< " following an xattr test with contents\n" << bl.c_str()
<< std::endl;
}
ret = EXIT_SUCCESS;
out:
/*
* And now we're done, so let's remove our pool and then
* shut down the connection gracefully.
*/
int delete_ret = rados.pool_delete(pool_name);
if (delete_ret < 0) {
// be careful not to
std::cerr << "We failed to delete our test pool!" << std::endl;
ret = EXIT_FAILURE;
}
rados.shutdown();
return ret;
}
| 9,293 | 30.720137 | 88 | cc |
null | ceph-main/examples/librbd/hello_world.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
// install the librados-dev and librbd package to get this
#include <rados/librados.hpp>
#include <rbd/librbd.hpp>
#include <iostream>
#include <string>
#include <sstream>
int main(int argc, const char **argv)
{
int ret = 0;
// we will use all of these below
const char *pool_name = "hello_world_pool";
std::string hello("hello world!");
std::string object_name("hello_object");
librados::IoCtx io_ctx;
// first, we create a Rados object and initialize it
librados::Rados rados;
{
ret = rados.init("admin"); // just use the client.admin keyring
if (ret < 0) { // let's handle any error that might have come back
std::cerr << "couldn't initialize rados! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just set up a rados cluster object" << std::endl;
}
}
/*
* Now we need to get the rados object its config info. It can
* parse argv for us to find the id, monitors, etc, so let's just
* use that.
*/
{
ret = rados.conf_parse_argv(argc, argv);
if (ret < 0) {
// This really can't happen, but we need to check to be a good citizen.
std::cerr << "failed to parse config options! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just parsed our config options" << std::endl;
// We also want to apply the config file if the user specified
// one, and conf_parse_argv won't do that for us.
for (int i = 0; i < argc; ++i) {
if ((strcmp(argv[i], "-c") == 0) || (strcmp(argv[i], "--conf") == 0)) {
ret = rados.conf_read_file(argv[i+1]);
if (ret < 0) {
// This could fail if the config file is malformed, but it'd be hard.
std::cerr << "failed to parse config file " << argv[i+1]
<< "! error" << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
}
break;
}
}
}
}
/*
* next, we actually connect to the cluster
*/
{
ret = rados.connect();
if (ret < 0) {
std::cerr << "couldn't connect to cluster! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just connected to the rados cluster" << std::endl;
}
}
/*
* let's create our own pool instead of scribbling over real data.
* Note that this command creates pools with default PG counts specified
* by the monitors, which may not be appropriate for real use -- it's fine
* for testing, though.
*/
{
ret = rados.pool_create(pool_name);
if (ret < 0) {
std::cerr << "couldn't create pool! error " << ret << std::endl;
return EXIT_FAILURE;
} else {
std::cout << "we just created a new pool named " << pool_name << std::endl;
}
}
/*
* create an "IoCtx" which is used to do IO to a pool
*/
{
ret = rados.ioctx_create(pool_name, io_ctx);
if (ret < 0) {
std::cerr << "couldn't set up ioctx! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just created an ioctx for our pool" << std::endl;
}
}
/*
* create an rbd image and write data to it
*/
{
std::string name = "librbd_test";
uint64_t size = 2 << 20;
int order = 0;
librbd::RBD rbd;
librbd::Image image;
ret = rbd.create(io_ctx, name.c_str(), size, &order);
if (ret < 0) {
std::cerr << "couldn't create an rbd image! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just created an rbd image" << std::endl;
}
ret = rbd.open(io_ctx, image, name.c_str(), NULL);
if (ret < 0) {
std::cerr << "couldn't open the rbd image! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just opened the rbd image" << std::endl;
}
int TEST_IO_SIZE = 512;
char test_data[TEST_IO_SIZE + 1];
int i;
for (i = 0; i < TEST_IO_SIZE; ++i) {
test_data[i] = (char) (rand() % (126 - 33) + 33);
}
test_data[TEST_IO_SIZE] = '\0';
size_t len = strlen(test_data);
ceph::bufferlist bl;
bl.append(test_data, len);
ret = image.write(0, len, bl);
if (ret < 0) {
std::cerr << "couldn't write to the rbd image! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just wrote data to our rbd image " << std::endl;
}
/*
* let's read the image and compare it to the data we wrote
*/
ceph::bufferlist bl_r;
int read;
read = image.read(0, TEST_IO_SIZE, bl_r);
if (read < 0) {
std::cerr << "we couldn't read data from the image! error" << std::endl;
ret = EXIT_FAILURE;
goto out;
}
std::string bl_res(bl_r.c_str(), read);
int res = memcmp(bl_res.c_str(), test_data, TEST_IO_SIZE);
if (res != 0) {
std::cerr << "what we read didn't match expected! error" << std::endl;
} else {
std::cout << "we read our data on the image successfully" << std::endl;
}
image.close();
/*
*let's now delete the image
*/
ret = rbd.remove(io_ctx, name.c_str());
if (ret < 0) {
std::cerr << "failed to delete rbd image! error " << ret << std::endl;
ret = EXIT_FAILURE;
goto out;
} else {
std::cout << "we just deleted our rbd image " << std::endl;
}
}
ret = EXIT_SUCCESS;
out:
/*
* And now we're done, so let's remove our pool and then
* shut down the connection gracefully.
*/
int delete_ret = rados.pool_delete(pool_name);
if (delete_ret < 0) {
// be careful not to
std::cerr << "We failed to delete our test pool!" << std::endl;
ret = EXIT_FAILURE;
}
rados.shutdown();
return ret;
}
| 6,157 | 26.864253 | 81 | cc |
null | ceph-main/examples/rgw/rgw_admin_curl.sh | #!/usr/bin/env bash
show_help()
{
echo "Usage: `basename $0` -a <access-key> -s <secret-key>" \
"-e <rgw-endpoint> -r <http-request>" \
"-p <admin-resource> -q \"<http-query-string>\""
echo " -a Access key of rgw user"
echo " -s Secret key of rgw user"
echo " -e RGW endpoint in <ipaddr:port> format"
echo " -r HTTP request type GET/PUT/DELETE"
echo " -p RGW admin resource e.g user, bucket etc"
echo " -q HTTP query string"
echo " -j (Optional) Print output in pretty JSON format"
echo " Examples :"
echo " - To create rgw user"
echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
"-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
"-e 10.0.0.1:8080 -r PUT -p user" \
"-q \"uid=admin&display-name=Administrator\""
echo " - To get rgw user info"
echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
"-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
"-e 10.0.0.1:8080 -r GET -p user -q \"uid=admin\""
echo " - To list buckets"
echo " (List all buckets)"
echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
"-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
"-e 10.0.0.1:8080 -r GET -p bucket"
echo " (For specific rgw user)"
echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
"-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
"-e 10.0.0.1:8080 -r GET -p bucket -q \"uid=admin\""
echo " - To delete bucket"
echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
"-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
"-e 10.0.0.1:8080 -r DELETE -p bucket -q \"bucket=foo\""
echo " - To delete rgw user"
echo " # `basename $0` -a ABCD1234EFGH5678IJ90" \
"-s klmnopqrstuvwxyz12345ABCD987654321efghij" \
"-e 10.0.0.1:8080 -r DELETE -p user -q \"uid=admin\""
exit 1
}
access_key=""
secret_key=""
rgw_endpoint=""
http_request=""
admin_resource=""
http_query=""
use_jq=false
while getopts "a:s:e:r:p:q:j" opt; do
case "$opt" in
a)
access_key=${OPTARG}
;;
s) secret_key=${OPTARG}
;;
e) rgw_endpoint=${OPTARG}
;;
r) http_request=${OPTARG}
;;
p) admin_resource=${OPTARG}
;;
q) http_query=${OPTARG}
;;
j) use_jq=true
;;
*)
show_help
exit 1
;;
esac
done
shift $((OPTIND-1))
if [ -z "${access_key}" ] || [ -z "${secret_key}" ] || \
[ -z "${rgw_endpoint}" ] || [ -z "${http_request}" ] || \
[ -z "${admin_resource}" ] || [ -z "${http_query}" ]; then
if [ "${http_request}" = "GET" ] && [ "${admin_resource}" = "bucket" ] && \
[ -z "${http_query}" ]; then
:
else
show_help
fi
fi
resource="/admin/${admin_resource}"
contentType="application/x-compressed-tar"
dateTime=`date -R -u`
headerToSign="${http_request}
${contentType}
${dateTime}
${resource}"
signature=`echo -en "$headerToSign" | \
openssl sha1 -hmac ${secret_key} -binary | base64`
if "$use_jq";
then
curl -X ${http_request} -H "Content-Type: ${contentType}" -H "Date: ${dateTime}" \
-H "Authorization: AWS ${access_key}:${signature}" -H "Host: ${rgw_endpoint}" \
"http://${rgw_endpoint}${resource}?${http_query}" 2> /dev/null|jq "."
else
curl -X ${http_request} -H "Content-Type: ${contentType}" -H "Date: ${dateTime}" \
-H "Authorization: AWS ${access_key}:${signature}" -H "Host: ${rgw_endpoint}" \
"http://${rgw_endpoint}${resource}?${http_query}"
fi
echo ""
| 3,813 | 32.752212 | 88 | sh |
null | ceph-main/fusetrace/fusetrace_ll.cc | // -*- mode:C++; tab-width:8; c-basic-offset:4; indent-tabs-mode:t -*-
// vim: ts=8 sw=4 smarttab
/*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2007 Miklos Szeredi <[email protected]>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
gcc -Wall `pkg-config fuse --cflags --libs` -lulockmgr fusexmp_fh.c -o fusexmp_fh
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <fuse/fuse_lowlevel.h>
#include <ulockmgr.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <dirent.h>
#include <errno.h>
#include <sys/time.h>
#ifdef HAVE_SETXATTR
#include <sys/xattr.h>
#endif
#include <time.h>
#include "include/unordered_map.h"
#include "include/hash_namespace.h"
#ifndef __LP64__
CEPH_HASH_NAMESPACE_START
template<> struct hash<uint64_t> {
size_t operator()(uint64_t __x) const {
static hash<uint32_t> H;
return H((__x >> 32) ^ (__x & 0xffffffff));
}
};
CEPH_HASH_NAMESPACE_END
#endif
#include <iostream>
#include <fstream>
#include <map>
#include <set>
using namespace std;
#include "common/ceph_mutex.h"
ceph::mutex trace_lock;
ofstream tracefile;
#define traceout (tracefile.is_open() ? tracefile : cout)
char *basedir = 0;
int debug = 0;
bool do_timestamps = true;
#define dout if (debug) cout
ceph::mutex lock;
struct Inode {
struct stat stbuf;
int ref;
set<int> fds;
map<pair<string,ino_t>,Inode*> parents;
// if dir,
map<string,Inode*> dentries;
Inode() : ref(0) {}
Inode *lookup(const string& dname) {
if (dentries.count(dname))
return dentries[dname];
return 0;
}
};
Inode *root = 0;
ceph::unordered_map<ino_t, Inode*> inode_map;
bool make_inode_path(string &buf, Inode *in)
{
if (!in->parents.empty()) {
if (!make_inode_path(buf, in->parents.begin()->second))
return false;
buf += "/";
buf += in->parents.begin()->first.first;
} else {
if (in != root) return false;
assert(in->stbuf.st_ino == 1);
buf = basedir;
buf += "/";
}
return true;
//dout << "path: " << in->stbuf.st_ino << " -> " << buf << endl;
}
bool make_inode_path(string &buf, Inode *in, const char *name)
{
if (!make_inode_path(buf, in)) return false;
buf += "/";
buf += name;
return true;
}
bool make_ino_path(string &buf, ino_t ino)
{
Inode *in = inode_map[ino];
assert(in);
return make_inode_path(buf, in);
}
bool make_ino_path(string &buf, ino_t ino, const char *name)
{
Inode *in = inode_map[ino];
assert(in);
if (!make_inode_path(buf, in))
return false;
buf += "/";
buf += name;
return true;
}
void remove_dentry(Inode *pin, const string& dname)
{
dout << "remove_dentry " << pin->stbuf.st_ino << " " << dname << endl;
Inode *in = pin->lookup(dname);
assert(in);
pin->dentries.erase(dname);
in->parents.erase(pair<string,ino_t>(dname,pin->stbuf.st_ino));
dout << "remove_dentry " << pin->stbuf.st_ino << " " << dname
<< " ... inode " << in->stbuf.st_ino << " ref " << in->ref
<< endl;
}
void add_dentry(Inode *parent, const string& dname, Inode *in)
{
dout << "add_dentry " << parent->stbuf.st_ino << " " << dname << " to " << in->stbuf.st_ino << endl;
if (parent->dentries.count(dname))
remove_dentry(parent, dname); // e.g., when renaming over another file..
parent->dentries[dname] = in;
in->parents[pair<string,ino_t>(dname,parent->stbuf.st_ino)] = parent;
}
void unlink_inode(Inode *in)
{
dout << "unlink_inode " << in->stbuf.st_ino << " ref " << in->ref << endl;
// remove parent links
while (!in->parents.empty()) {
Inode *parent = in->parents.begin()->second;
string dname = in->parents.begin()->first.first;
remove_dentry(parent, dname);
}
// remove children
while (!in->dentries.empty())
remove_dentry(in, in->dentries.begin()->first);
while (!in->fds.empty()) {
int fd = *in->fds.begin();
::close(fd);
in->fds.erase(in->fds.begin());
dout << "remove_inode closeing stray fd " << fd << endl;
}
}
void remove_inode(Inode *in)
{
dout << "remove_inode " << in->stbuf.st_ino << " ref " << in->ref << endl;
unlink_inode(in);
inode_map.erase(in->stbuf.st_ino);
dout << "remove_inode " << in->stbuf.st_ino << " done" << endl;
delete in;
}
Inode *add_inode(Inode *parent, const char *name, struct stat *attr)
{
dout << "add_inode " << parent->stbuf.st_ino << " " << name << " " << attr->st_ino << endl;
Inode *in;
if (inode_map.count(attr->st_ino)) {
// reuse inode
in = inode_map[attr->st_ino];
unlink_inode(in); // hrm.. should this close open fds? probably.
dout << "** REUSING INODE **" << endl;
} else {
inode_map[attr->st_ino] = in = new Inode;
}
memcpy(&in->stbuf, attr, sizeof(*attr));
string dname(name);
add_dentry(parent, dname, in);
return in;
}
void print_time()
{
if (do_timestamps) {
struct timeval tv;
gettimeofday(&tv, 0);
traceout << "@" << endl
<< tv.tv_sec << endl
<< tv.tv_usec << endl;
}
}
bool has_perm(int mask, Inode *in, int uid, int gid)
{
dout << "hash_perm " << uid << "." << gid << " " << oct << mask << " in " << in->stbuf.st_mode
<< " " << in->stbuf.st_uid << "." << in->stbuf.st_gid << endl;
if (in->stbuf.st_mode & mask) return true;
if (in->stbuf.st_gid == gid && in->stbuf.st_mode & (mask << 3)) return true;
if (in->stbuf.st_uid == uid && in->stbuf.st_mode & (mask << 6)) return true;
return false;
}
static void ft_ll_lookup(fuse_req_t req, fuse_ino_t pino, const char *name)
{
int res = 0;
//dout << "lookup " << pino << " " << name << endl;
struct fuse_entry_param fe;
memset(&fe, 0, sizeof(fe));
lock.lock();
Inode *parent = inode_map[pino];
assert(parent);
// check permissions
string dname(name);
string path;
Inode *in = 0;
if (!has_perm(0001, parent, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid)) {
res = EPERM;
}
else if (!make_inode_path(path, parent, name)) {
res = ENOENT;
} else {
in = parent->lookup(dname);
if (in && res == 0) {
// re-stat, for good measure
res = ::lstat(path.c_str(), &in->stbuf);
// hrm!
if (res != 0) {
dout << "** WEIRD ** lookup on " << pino << " " << name << " inode went away!" << endl;
in = 0;
res = errno;
}
//dout << "have " << in->stbuf.st_ino << endl;
} else {
in = new Inode;
res = ::lstat(path.c_str(), &in->stbuf);
//dout << "stat " << path << " res = " << res << endl;
if (res == 0) {
inode_map[in->stbuf.st_ino] = in;
add_dentry(parent, dname, in);
} else {
delete in;
in = 0;
res = errno;
}
}
if (in) {
in->ref++;
fe.ino = in->stbuf.st_ino;
memcpy(&fe.attr, &in->stbuf, sizeof(in->stbuf));
}
}
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_lookup" << endl << pino << endl << name << endl << fe.attr.st_ino << endl;
trace_lock.unlock();
if (in)
fuse_reply_entry(req, &fe);
else
fuse_reply_err(req, res);
}
static void ft_ll_forget(fuse_req_t req, fuse_ino_t ino, long unsigned nlookup)
{
if (ino != 1) {
std::scoped_lock l{lock};
Inode *in = inode_map[ino];
if (in) {
dout << "forget on " << ino << " ref " << in->ref << ", forget " << nlookup << endl;
if (in->ref < nlookup)
dout << "**** BAD **** forget on " << ino << " ref " << in->ref << ", forget " << nlookup << endl;
in->ref -= nlookup;
if (in->ref <= 0)
remove_inode(in);
} else {
dout << "**** BAD **** forget " << nlookup << " on nonexistent inode " << ino << endl;
}
}
{
std::scoped_lock l{trace_lock};
print_time();
traceout << "ll_forget" << endl << ino << endl << nlookup << endl;
}
fuse_reply_none(req);
}
static void ft_ll_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi)
{
int res = 0;
string path;
int fd = 0;
Inode *in = 0;
struct stat attr;
lock.lock();
in = inode_map[ino];
if (in->fds.empty()) {
if (!make_inode_path(path, in))
res = ENOENT;
} else
fd = *in->fds.begin();
lock.unlock();
if (fd > 0) {
res = ::fstat(fd, &attr);
dout << "getattr fstat on fd " << fd << " res " << res << endl;
} else if (res == 0) {
res = ::lstat(path.c_str(), &attr);
dout << "getattr lstat on " << path << " res " << res << endl;
}
if (res < 0) res = errno;
if (ino == 1) attr.st_ino = 1;
trace_lock.lock();
print_time();
traceout << "ll_getattr" << endl << ino << endl;
trace_lock.unlock();
if (res == 0) {
lock.lock();
memcpy(&in->stbuf, &attr, sizeof(attr));
lock.unlock();
fuse_reply_attr(req, &attr, 0);
} else
fuse_reply_err(req, res);
}
static void ft_ll_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr,
int to_set, struct fuse_file_info *fi)
{
string path;
Inode *in = 0;
int fd = 0;
int res = 0;
lock.lock();
in = inode_map[ino];
if (in->fds.empty() || (to_set & FUSE_SET_ATTR_MTIME)) {
if (!make_inode_path(path, in))
res = ENOENT;
} else
fd = *in->fds.begin();
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_setattr" << endl << ino << endl;
traceout << attr->st_mode << endl;
traceout << attr->st_uid << endl << attr->st_gid << endl;
traceout << attr->st_size << endl;
traceout << attr->st_mtime << endl;
traceout << attr->st_atime << endl;
traceout << to_set << endl;
trace_lock.unlock();
if (res == 0 && !has_perm(0010, in, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid)) {
res = EPERM;
} else if (res == 0) {
if (to_set & FUSE_SET_ATTR_MODE) {
if (fd > 0)
res = ::fchmod(fd, attr->st_mode);
else
res = ::chmod(path.c_str(), attr->st_mode);
}
if (!res && to_set & FUSE_SET_ATTR_UID) {
if (fd > 0)
res = ::fchown(fd, attr->st_uid, attr->st_gid);
else
res = ::chown(path.c_str(), attr->st_uid, attr->st_gid);
}
if (!res && to_set & FUSE_SET_ATTR_SIZE) {
if (fd > 0)
res = ::ftruncate(fd, attr->st_size);
else
res = ::truncate(path.c_str(), attr->st_size);
}
if (!res && to_set & FUSE_SET_ATTR_MTIME) {
struct utimbuf ut;
ut.actime = attr->st_atime;
ut.modtime = attr->st_mtime;
res = ::utime(path.c_str(), &ut);
}
if (res < 0) res = errno;
}
if (res == 0) {
lock.lock();
::lstat(path.c_str(), &in->stbuf);
if (ino == 1) in->stbuf.st_ino = 1;
memcpy(attr, &in->stbuf, sizeof(*attr));
lock.unlock();
fuse_reply_attr(req, attr, 0);
} else
fuse_reply_err(req, res);
}
static void ft_ll_readlink(fuse_req_t req, fuse_ino_t ino)
{
string path;
int res = 0;
lock.lock();
if (!make_ino_path(path, ino))
res = ENOENT;
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_readlink" << endl << ino << endl;
trace_lock.unlock();
char buf[256];
if (res == 0) res = readlink(path.c_str(), buf, 255);
if (res < 0) res = errno;
if (res >= 0) {
buf[res] = 0;
fuse_reply_readlink(req, buf);
} else {
fuse_reply_err(req, res);
}
}
static void ft_ll_opendir(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi)
{
string path;
int res = 0;
lock.lock();
Inode *in = inode_map[ino];
if (!make_inode_path(path, in))
res = ENOENT;
lock.unlock();
DIR *dir = 0;
if (res == 0 && !has_perm(0100, in, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) dir = opendir(path.c_str());
if (res < 0) res = errno;
trace_lock.lock();
print_time();
traceout << "ll_opendir" << endl << ino << endl << (unsigned long)dir << endl;
trace_lock.unlock();
if (dir) {
fi->fh = (long)dir;
fuse_reply_open(req, fi);
} else
fuse_reply_err(req, res);
}
static void ft_ll_readdir(fuse_req_t req, fuse_ino_t ino, size_t size,
off_t off, struct fuse_file_info *fi)
{
struct dirent *de;
DIR *dp = (DIR*)fi->fh;
// buffer
char *buf;
size_t pos = 0;
buf = new char[size];
if (!buf) {
fuse_reply_err(req, ENOMEM);
return;
}
seekdir(dp, off);
while ((de = readdir(dp)) != NULL) {
struct stat st;
memset(&st, 0, sizeof(st));
st.st_ino = de->d_ino;
st.st_mode = de->d_type << 12;
size_t entrysize = fuse_add_direntry(req, buf + pos, size - pos,
de->d_name, &st, telldir(dp));
if (entrysize > size - pos)
break; // didn't fit, done for now.
pos += entrysize;
}
fuse_reply_buf(req, buf, pos);
delete[] buf;
}
static void ft_ll_releasedir(fuse_req_t req, fuse_ino_t ino,
struct fuse_file_info *fi)
{
DIR *dir = (DIR*)fi->fh;
trace_lock.lock();
print_time();
traceout << "ll_releasedir" << endl << (unsigned long)dir << endl;
trace_lock.unlock();
closedir(dir);
fuse_reply_err(req, 0);
}
static void ft_ll_mknod(fuse_req_t req, fuse_ino_t parent, const char *name,
mode_t mode, dev_t rdev)
{
string path;
Inode *pin = 0;
int res = 0;
lock.lock();
pin = inode_map[parent];
if (!make_inode_path(path, pin, name))
res = ENOENT;
lock.unlock();
dout << "mknod " << path << endl;
if (res == 0 && !has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) res = ::mknod(path.c_str(), mode, rdev);
if (res < 0)
res = errno;
else
::chown(path.c_str(), fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid);
struct fuse_entry_param fe;
if (res == 0) {
memset(&fe, 0, sizeof(fe));
::lstat(path.c_str(), &fe.attr);
fe.ino = fe.attr.st_ino;
lock.lock();
Inode *in = add_inode(pin, name, &fe.attr);
in->ref++;
lock.unlock();
}
trace_lock.lock();
print_time();
traceout << "ll_mknod" << endl << parent << endl << name << endl << mode << endl << rdev << endl;
traceout << (res == 0 ? fe.ino:0) << endl;
trace_lock.unlock();
if (res == 0)
fuse_reply_entry(req, &fe);
else
fuse_reply_err(req, res);
}
static void ft_ll_mkdir(fuse_req_t req, fuse_ino_t parent, const char *name,
mode_t mode)
{
string path;
Inode *pin = 0;
int res = 0;
lock.lock();
pin = inode_map[parent];
if (!make_inode_path(path, pin, name))
res = ENOENT;
lock.unlock();
if (res == 0 && !has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) res = ::mkdir(path.c_str(), mode);
if (res < 0)
res = errno;
else
::chown(path.c_str(), fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid);
struct fuse_entry_param fe;
if (res == 0) {
memset(&fe, 0, sizeof(fe));
::lstat(path.c_str(), &fe.attr);
fe.ino = fe.attr.st_ino;
lock.lock();
Inode *in = add_inode(pin, name, &fe.attr);
in->ref++;
lock.unlock();
}
trace_lock.lock();
print_time();
traceout << "ll_mkdir" << endl << parent << endl << name << endl << mode << endl;
traceout << (res == 0 ? fe.ino:0) << endl;
trace_lock.unlock();
if (res == 0)
fuse_reply_entry(req, &fe);
else
fuse_reply_err(req, res);
}
static void ft_ll_symlink(fuse_req_t req, const char *value, fuse_ino_t parent, const char *name)
{
string path;
Inode *pin = 0;
int res = 0;
lock.lock();
pin = inode_map[parent];
if (!make_inode_path(path, pin, name))
res = ENOENT;
lock.unlock();
if (res == 0 && !has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) res = ::symlink(value, path.c_str());
if (res < 0)
res = errno;
else
::chown(path.c_str(), fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid);
struct fuse_entry_param fe;
if (res == 0) {
memset(&fe, 0, sizeof(fe));
::lstat(path.c_str(), &fe.attr);
fe.ino = fe.attr.st_ino;
lock.lock();
Inode *in = add_inode(pin, name, &fe.attr);
in->ref++;
lock.unlock();
}
trace_lock.lock();
print_time();
traceout << "ll_symlink" << endl << parent << endl << name << endl << value << endl;
traceout << (res == 0 ? fe.ino:0) << endl;
trace_lock.unlock();
if (res == 0)
fuse_reply_entry(req, &fe);
else
fuse_reply_err(req, res);
}
static void ft_ll_create(fuse_req_t req, fuse_ino_t parent, const char *name,
mode_t mode, struct fuse_file_info *fi)
{
string path;
Inode *pin = 0;
int res = 0;
lock.lock();
pin = inode_map[parent];
if (!make_inode_path(path, pin, name))
res = ENOENT;
lock.unlock();
dout << "create " << path << endl;
int fd = 0;
if (res == 0 && !has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) {
fd = ::open(path.c_str(), fi->flags|O_CREAT, mode);
if (fd < 0) {
res = errno;
} else {
::fchown(fd, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid);
}
}
struct fuse_entry_param fe;
memset(&fe, 0, sizeof(fe));
if (res == 0) {
::lstat(path.c_str(), &fe.attr);
fe.ino = fe.attr.st_ino;
lock.lock();
Inode *in = add_inode(pin, name, &fe.attr);
in->ref++;
in->fds.insert(fd);
lock.unlock();
fi->fh = fd;
}
trace_lock.lock();
print_time();
traceout << "ll_create" << endl
<< parent << endl
<< name << endl
<< mode << endl
<< fi->flags << endl
<< (res == 0 ? fd:0) << endl
<< fe.ino << endl;
trace_lock.unlock();
if (res == 0)
fuse_reply_create(req, &fe, fi);
else
fuse_reply_err(req, res);
}
static void ft_ll_statfs(fuse_req_t req, fuse_ino_t ino)
{
string path;
int res = 0;
if (ino) {
lock.lock();
if (!make_ino_path(path, ino))
res = ENOENT;
lock.unlock();
} else {
path = basedir;
}
trace_lock.lock();
print_time();
traceout << "ll_statfs" << endl << ino << endl;
trace_lock.unlock();
struct statvfs stbuf;
if (res == 0) res = statvfs(path.c_str(), &stbuf);
if (res < 0) res = errno;
if (res == 0)
fuse_reply_statfs(req, &stbuf);
else
fuse_reply_err(req, res);
}
static void ft_ll_unlink(fuse_req_t req, fuse_ino_t parent, const char *name)
{
string path;
Inode *pin = 0;
Inode *in = 0;
string dname(name);
int res = 0;
lock.lock();
pin = inode_map[parent];
in = pin->lookup(dname);
if (!make_inode_path(path, pin, name))
res = ENOENT;
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_unlink" << endl << parent << endl << name << endl;
trace_lock.unlock();
if (res == 0 && !has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) {
if (in && in->fds.empty()) {
int fd = ::open(path.c_str(), O_RDWR);
if (fd > 0)
in->fds.insert(fd); // for slow getattrs.. wtf
dout << "unlink opening paranoia fd " << fd << endl;
}
res = ::unlink(path.c_str());
if (res < 0) res = errno;
}
if (res == 0) {
// remove from out cache
lock.lock();
string dname(name);
if (pin->lookup(dname))
remove_dentry(pin, dname);
lock.unlock();
fuse_reply_err(req, 0);
} else
fuse_reply_err(req, res);
}
static void ft_ll_rmdir(fuse_req_t req, fuse_ino_t parent, const char *name)
{
string path;
Inode *pin = 0;
int res = 0;
lock.lock();
pin = inode_map[parent];
if (!make_inode_path(path, pin, name))
res = ENOENT;
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_rmdir" << endl << parent << endl << name << endl;
trace_lock.unlock();
if (res == 0 && !has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) res = ::rmdir(path.c_str());
if (res < 0) res = errno;
if (res == 0) {
// remove from out cache
lock.lock();
string dname(name);
if (pin->lookup(dname))
remove_dentry(pin, dname);
lock.unlock();
fuse_reply_err(req, 0);
} else
fuse_reply_err(req, res);
}
static void ft_ll_rename(fuse_req_t req, fuse_ino_t parent, const char *name,
fuse_ino_t newparent, const char *newname)
{
string path;
string newpath;
Inode *pin = 0;
Inode *newpin = 0;
int res = 0;
lock.lock();
pin = inode_map[parent];
if (!make_inode_path(path, pin, name))
res = ENOENT;
newpin = inode_map[newparent];
if (!make_inode_path(newpath, newpin, newname))
res = ENOENT;
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_rename" << endl
<< parent << endl
<< name << endl
<< newparent << endl
<< newname << endl;
trace_lock.unlock();
if (res == 0 && (!has_perm(0010, pin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid) ||
!has_perm(0010, newpin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid)))
res = EPERM;
else if (res == 0) res = ::rename(path.c_str(), newpath.c_str());
if (res < 0) res = errno;
if (res == 0) {
string dname(name);
string newdname(newname);
lock.lock();
Inode *in = pin->lookup(dname);
if (in) {
add_dentry(newpin, newdname, in);
remove_dentry(pin, dname);
} else {
dout << "hrm, rename didn't have renamed inode.. " << path << " to " << newpath << endl;
}
lock.unlock();
fuse_reply_err(req, 0);
} else
fuse_reply_err(req, res);
}
static void ft_ll_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t newparent,
const char *newname)
{
string path;
string newpath;
Inode *in = 0;
Inode *newpin = 0;
int res = 0;
lock.lock();
in = inode_map[ino];
if (!make_inode_path(path, in))
res = ENOENT;
newpin = inode_map[newparent];
if (!make_inode_path(newpath, newpin, newname))
res = ENOENT;
lock.unlock();
trace_lock.lock();
print_time();
traceout << "ll_link" << endl
<< ino << endl
<< newparent << endl
<< newname << endl;
trace_lock.unlock();
//cout << "link " << path << " newpath " << newpath << endl;
if (res == 0 && (!has_perm(0010, in, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid) ||
!has_perm(0010, newpin, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid)))
res = EPERM;
else if (res == 0) res = ::link(path.c_str(), newpath.c_str());
if (res < 0) res = errno;
if (res == 0) {
struct fuse_entry_param fe;
memset(&fe, 0, sizeof(fe));
::lstat(newpath.c_str(), &fe.attr);
lock.lock();
string newdname(newname);
add_dentry(newpin, newdname, in);
in->ref++;
memcpy(&in->stbuf, &fe.attr, sizeof(fe.attr)); // re-read, bc we changed the link count
lock.unlock();
fe.ino = fe.attr.st_ino;
fuse_reply_entry(req, &fe);
} else
fuse_reply_err(req, res);
}
static void ft_ll_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi)
{
string path;
Inode *in = 0;
int res = 0;
lock.lock();
in = inode_map[ino];
if (!make_inode_path(path, in))
res = ENOENT;
lock.unlock();
int want = 0100;
if (fi->flags & O_RDWR) want |= 0010;
if (fi->flags == O_WRONLY) want = 0010;
int fd = 0;
if (res == 0 && !has_perm(want, in, fuse_req_ctx(req)->uid, fuse_req_ctx(req)->gid))
res = EPERM;
else if (res == 0) {
fd = ::open(path.c_str(), fi->flags);
if (fd <= 0) res = errno;
}
trace_lock.lock();
print_time();
traceout << "ll_open" << endl
<< ino << endl
<< fi->flags << endl
<< (fd > 0 ? fd:0) << endl;
trace_lock.unlock();
if (res == 0) {
lock.lock();
in->fds.insert(fd);
lock.unlock();
fi->fh = fd;
fuse_reply_open(req, fi);
} else
fuse_reply_err(req, res);
}
static void ft_ll_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
struct fuse_file_info *fi)
{
char *buf = new char[size];
int res = ::pread(fi->fh, buf, size, off);
//cout << "read " << path << " " << off << "~" << size << endl;
trace_lock.lock();
print_time();
traceout << "ll_read" << endl
<< fi->fh << endl
<< off << endl
<< size << endl;
trace_lock.unlock();
if (res >= 0)
fuse_reply_buf(req, buf, res);
else
fuse_reply_err(req, errno);
delete[] buf;
}
static void ft_ll_write(fuse_req_t req, fuse_ino_t ino, const char *buf,
size_t size, off_t off, struct fuse_file_info *fi)
{
int res = ::pwrite(fi->fh, buf, size, off);
trace_lock.lock();
print_time();
traceout << "ll_write" << endl
<< fi->fh << endl
<< off << endl
<< size << endl;
trace_lock.unlock();
if (res >= 0)
fuse_reply_write(req, res);
else
fuse_reply_err(req, errno);
}
static void ft_ll_flush(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi)
{
trace_lock.lock();
print_time();
traceout << "ll_flush" << endl << fi->fh << endl;
trace_lock.unlock();
int res = ::fdatasync(fi->fh);
//int res = ::close(dup(fi->fh));
if (res >= 0)
fuse_reply_err(req, 0);
else
fuse_reply_err(req, errno);
}
static void ft_ll_release(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi)
{
trace_lock.lock();
print_time();
traceout << "ll_release" << endl << fi->fh << endl;
trace_lock.unlock();
lock.lock();
Inode *in = inode_map[ino];
in->fds.erase(fi->fh);
lock.unlock();
int res = ::close(fi->fh);
if (res >= 0)
fuse_reply_err(req, 0);
else
fuse_reply_err(req, errno);
}
static void ft_ll_fsync(fuse_req_t req, fuse_ino_t ino, int datasync,
struct fuse_file_info *fi)
{
trace_lock.lock();
print_time();
traceout << "ll_fsync" << endl << fi->fh << endl;
trace_lock.unlock();
int res = ::fsync(fi->fh);
if (res >= 0)
fuse_reply_err(req, 0);
else
fuse_reply_err(req, errno);
}
static struct fuse_lowlevel_ops ft_ll_oper = {
init: 0,
destroy: 0,
lookup: ft_ll_lookup,
forget: ft_ll_forget,
getattr: ft_ll_getattr,
setattr: ft_ll_setattr,
readlink: ft_ll_readlink,
mknod: ft_ll_mknod,
mkdir: ft_ll_mkdir,
unlink: ft_ll_unlink,
rmdir: ft_ll_rmdir,
symlink: ft_ll_symlink,
rename: ft_ll_rename,
link: ft_ll_link,
open: ft_ll_open,
read: ft_ll_read,
write: ft_ll_write,
flush: ft_ll_flush,
release: ft_ll_release,
fsync: ft_ll_fsync,
opendir: ft_ll_opendir,
readdir: ft_ll_readdir,
releasedir: ft_ll_releasedir,
fsyncdir: 0,
statfs: ft_ll_statfs,
setxattr: 0,
getxattr: 0,
listxattr: 0,
removexattr: 0,
access: 0,
create: ft_ll_create,
getlk: 0,
setlk: 0,
bmap: 0
};
int main(int argc, char *argv[])
{
// open trace
// figure base dir
char *newargv[100];
int newargc = 0;
for (int i=0; i<argc; i++) {
if (strcmp(argv[i], "--basedir") == 0) {
basedir = argv[++i];
} else if (strcmp(argv[i], "--timestamps") == 0) {
do_timestamps = atoi(argv[++i]);
} else if (strcmp(argv[i], "--trace") == 0) {
tracefile.open(argv[++i], ios::out|ios::trunc);
if (!tracefile.is_open())
cerr << "** couldn't open trace file " << argv[i] << endl;
} else if (strcmp(argv[i], "--debug") == 0) {
debug = 1;
} else {
cout << "arg: " << newargc << " " << argv[i] << endl;
newargv[newargc++] = argv[i];
}
}
newargv[newargc++] = "-o";
newargv[newargc++] = "allow_other";
// newargv[newargc++] = "-o";
// newargv[newargc++] = "default_permissions";
if (!basedir) return 1;
cout << "basedir is " << basedir << endl;
// create root ino
root = new Inode;
::lstat(basedir, &root->stbuf);
root->stbuf.st_ino = 1;
inode_map[1] = root;
root->ref++;
umask(0);
// go go gadget fuse
struct fuse_args args = FUSE_ARGS_INIT(newargc, newargv);
struct fuse_chan *ch;
char *mountpoint;
if (fuse_parse_cmdline(&args, &mountpoint, NULL, NULL) != -1 &&
(ch = fuse_mount(mountpoint, &args)) != NULL) {
struct fuse_session *se;
// init fuse
se = fuse_lowlevel_new(&args, &ft_ll_oper, sizeof(ft_ll_oper),
NULL);
if (se != NULL) {
if (fuse_set_signal_handlers(se) != -1) {
fuse_session_add_chan(se, ch);
if (fuse_session_loop(se) <= -1) {
cout << "Failed fuse_session_loop() call." << endl;
return 1;
}
fuse_remove_signal_handlers(se);
fuse_session_remove_chan(ch);
}
fuse_session_destroy(se);
}
fuse_unmount(mountpoint, ch);
}
fuse_opt_free_args(&args);
}
| 28,326 | 22.904641 | 104 | cc |
null | ceph-main/mirroring/mirror-ceph.sh | #!/usr/bin/env bash
set -e
#
# Script to mirror Ceph locally
#
# Please, choose a local source and do not sync in a shorter interval than
# 3 hours.
#
SILENT=0
# All available source mirrors
declare -A SOURCES
SOURCES[eu]="eu.ceph.com"
SOURCES[de]="de.ceph.com"
SOURCES[se]="se.ceph.com"
SOURCES[au]="au.ceph.com"
SOURCES[us]="download.ceph.com"
SOURCES[fr]="fr.ceph.com"
SOURCES[ca]="ca.ceph.com"
SOURCES[us-west]="us-west.ceph.com"
SOURCES[global]="download.ceph.com"
function print_usage() {
echo "$0 [-q ] -s <source mirror> -t <target directory>"
}
while getopts ":qhs:t:" opt; do
case $opt in
q)
SILENT=1
;;
s)
SOURCE=$OPTARG
;;
t)
TARGET=$OPTARG
;;
h)
HELP=1
;;
\?)
print_usage
exit 1
;;
esac
done
if [ ! -z "$HELP" ] || [ -z "$TARGET" ] || [ -z "$SOURCE" ]; then
print_usage
exit 1
fi
if [ ! -d "$TARGET" ]; then
echo "$TARGET is not a valid target directory"
exit 1
fi
for i in "${!SOURCES[@]}"; do
if [ "$i" == "$SOURCE" ]; then
SOURCE_HOST=${SOURCES[$i]}
fi
done
if [ -z "$SOURCE_HOST" ]; then
echo -n "Please select one of the following sources:"
for i in "${!SOURCES[@]}"; do
echo -n " $i"
done
echo ""
exit 1
fi
RSYNC_OPTS="--stats --progress"
if [ $SILENT -eq 1 ]; then
RSYNC_OPTS="--quiet"
fi
# We start a two-stage sync here for DEB and RPM
# Based on: https://www.debian.org/mirror/ftpmirror
#
# The idea is to prevent temporary situations where metadata points to files
# which do not exist
#
# Exclude all metadata files
rsync ${RSYNC_OPTS} ${SOURCE_HOST}::ceph --recursive --times --links \
--hard-links \
--exclude Packages* \
--exclude Sources* \
--exclude Release* \
--exclude InRelease \
--exclude i18n/* \
--exclude ls-lR* \
--exclude repodata/* \
${TARGET}
# Now also transfer the metadata and delete afterwards
rsync ${RSYNC_OPTS} ${SOURCE_HOST}::ceph --recursive --times --links \
--hard-links --delete-after \
${TARGET}
| 2,560 | 24.356436 | 76 | sh |
null | ceph-main/mirroring/test-mirrors.sh | #!/usr/bin/env bash
#
# Simple script which performs a HTTP and rsync check on
# all Ceph mirrors over IPv4 and IPv6 to see if they are online
#
# Requires IPv4, IPv6, rsync and curl
#
# Example usage:
# - ./test-mirrors.sh eu.ceph.com,de.ceph.com,au.ceph.com
# - cat MIRRORS |cut -d ':' -f 1|xargs -n 1 ./test-mirrors.sh
#
function print_usage {
echo "Usage: $0 mirror1,mirror2,mirror3,mirror4,etc"
}
function test_http {
HOST=$1
echo -n "$HOST HTTP IPv4: "
curl -s -I -4 -o /dev/null http://$HOST
if [ "$?" -ne 0 ]; then
echo "FAIL"
else
echo "OK"
fi
echo -n "$HOST HTTP IPv6: "
curl -s -I -6 -o /dev/null http://$HOST
if [ "$?" -ne 0 ]; then
echo "FAIL"
else
echo "OK"
fi
}
function test_rsync {
HOST=$1
echo -n "$HOST RSYNC IPv4: "
rsync -4 -avrqn ${HOST}::ceph /tmp 2>/dev/null
if [ "$?" -ne 0 ]; then
echo "FAIL"
else
echo "OK"
fi
echo -n "$HOST RSYNC IPv6: "
rsync -6 -avrqn ${HOST}::ceph /tmp 2>/dev/null
if [ "$?" -ne 0 ]; then
echo "FAIL"
else
echo "OK"
fi
}
MIRRORS=$1
if [ -z "$MIRRORS" ]; then
print_usage
exit 1
fi
IFS=', ' read -r -a array <<< "$MIRRORS"
for MIRROR in "${array[@]}"; do
test_http $MIRROR
test_rsync $MIRROR
done
| 1,327 | 17.971429 | 63 | sh |
null | ceph-main/monitoring/ceph-mixin/jsonnet-bundler-build.sh | #!/bin/sh -ex
JSONNET_VERSION="v0.4.0"
OUTPUT_DIR=${1:-$(pwd)}
git clone -b ${JSONNET_VERSION} --depth 1 https://github.com/jsonnet-bundler/jsonnet-bundler
make -C jsonnet-bundler build
mv jsonnet-bundler/_output/jb ${OUTPUT_DIR}
| 233 | 25 | 92 | sh |
null | ceph-main/monitoring/ceph-mixin/lint-jsonnet.sh | #!/bin/sh -e
JSONNETS_FILES=$(find . -name 'vendor' -prune -o \
-name '*.jsonnet' -print -o -name '*.libsonnet' -print)
jsonnetfmt "$@" ${JSONNETS_FILES}
| 179 | 29 | 79 | sh |
null | ceph-main/monitoring/ceph-mixin/test-jsonnet.sh | #!/bin/sh -e
TEMPDIR=$(mktemp -d)
BASEDIR=$(dirname "$0")
jsonnet -J vendor -m ${TEMPDIR} $BASEDIR/dashboards.jsonnet
truncate -s 0 ${TEMPDIR}/json_difference.log
for file in ${BASEDIR}/dashboards_out/*.json
do
file_name="$(basename $file)"
for generated_file in ${TEMPDIR}/*.json
do
generated_file_name="$(basename $generated_file)"
if [ "$file_name" == "$generated_file_name" ]; then
jsondiff --indent 2 "${generated_file}" "${file}" \
| tee -a ${TEMPDIR}/json_difference.log
fi
done
done
jsonnet -J vendor -S alerts.jsonnet -o ${TEMPDIR}/prometheus_alerts.yml
jsondiff --indent 2 "prometheus_alerts.yml" "${TEMPDIR}/prometheus_alerts.yml" \
| tee -a ${TEMPDIR}/json_difference.log
err=0
if [ $(wc -l < ${TEMPDIR}/json_difference.log) -eq 0 ]
then
rm -rf ${TEMPDIR}
echo "Congratulations! Grafonnet Check Passed"
else
rm -rf ${TEMPDIR}
echo "Grafonnet Check Failed, failed comparing generated file with existing"
exit 1
fi
| 1,022 | 27.416667 | 80 | sh |
null | ceph-main/qa/find-used-ports.sh | #!/bin/bash
git --no-pager grep -n '127.0.0.1:[0-9]\+' | sed -n 's/.*127.0.0.1:\([0-9]\+\).*/\1/p' | sort -n | uniq -u
| 120 | 29.25 | 106 | sh |
null | ceph-main/qa/loopall.sh | #!/usr/bin/env bash
set -ex
basedir=`echo $0 | sed 's/[^/]*$//g'`.
testdir="$1"
[ -n "$2" ] && logdir=$2 || logdir=$1
[ ${basedir:0:1} == "." ] && basedir=`pwd`/${basedir:1}
PATH="$basedir/src:$PATH"
[ -z "$testdir" ] || [ ! -d "$testdir" ] && echo "specify test dir" && exit 1
cd $testdir
while true
do
for test in `cd $basedir/workunits && find . -executable -type f | $basedir/../src/script/permute`
do
echo "------ running test $test ------"
pwd
[ -d $test ] && rm -r $test
mkdir -p $test
mkdir -p `dirname $logdir/$test.log`
test -e $logdir/$test.log && rm $logdir/$test.log
sh -c "cd $test && $basedir/workunits/$test" 2>&1 | tee $logdir/$test.log
done
done
| 689 | 22.793103 | 102 | sh |
null | ceph-main/qa/run-standalone.sh | #!/usr/bin/env bash
set -e
if [ ! -e CMakeCache.txt -o ! -d bin ]; then
echo 'run this from the build dir'
exit 1
fi
function get_cmake_variable() {
local variable=$1
grep "$variable" CMakeCache.txt | cut -d "=" -f 2
}
function get_python_path() {
python_common=$(realpath ../src/python-common)
echo $(realpath ../src/pybind):$(pwd)/lib/cython_modules/lib.3:$python_common
}
if [ `uname` = FreeBSD ]; then
# otherwise module prettytable will not be found
export PYTHONPATH=$(get_python_path):/usr/local/lib/python3.6/site-packages
exec_mode=+111
KERNCORE="kern.corefile"
COREPATTERN="core.%N.%P"
else
export PYTHONPATH=$(get_python_path)
exec_mode=/111
KERNCORE="kernel.core_pattern"
COREPATTERN="core.%e.%p.%t"
fi
function cleanup() {
if [ -n "$precore" ]; then
sudo sysctl -w "${KERNCORE}=${precore}"
fi
}
function finish() {
cleanup
exit 0
}
trap finish TERM HUP INT
PATH=$(pwd)/bin:$PATH
# add /sbin and /usr/sbin to PATH to find sysctl in those cases where the
# user's PATH does not get these directories by default (e.g., tumbleweed)
PATH=$PATH:/sbin:/usr/sbin
export LD_LIBRARY_PATH="$(pwd)/lib"
# TODO: Use getops
dryrun=false
if [[ "$1" = "--dry-run" ]]; then
dryrun=true
shift
fi
all=false
if [ "$1" = "" ]; then
all=true
fi
select=("$@")
location="../qa/standalone"
count=0
errors=0
userargs=""
precore="$(sysctl -n $KERNCORE)"
# If corepattern already set, avoid having to use sudo
if [ "$precore" = "$COREPATTERN" ]; then
precore=""
else
sudo sysctl -w "${KERNCORE}=${COREPATTERN}"
fi
# Clean out any cores in core target directory (currently .)
if ls $(dirname $(sysctl -n $KERNCORE)) | grep -q '^core\|core$' ; then
mkdir found.cores.$$ 2> /dev/null || true
for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do
mv $i found.cores.$$
done
echo "Stray cores put in $(pwd)/found.cores.$$"
fi
ulimit -c unlimited
for f in $(cd $location ; find . -mindepth 2 -perm $exec_mode -type f)
do
f=$(echo $f | sed 's/\.\///')
if [[ "$all" = "false" ]]; then
found=false
for c in "${!select[@]}"
do
# Get command and any arguments of subset of tests to run
allargs="${select[$c]}"
arg1=$(echo "$allargs" | cut --delimiter " " --field 1)
# Get user args for this selection for use below
userargs="$(echo $allargs | cut -s --delimiter " " --field 2-)"
if [[ "$arg1" = $(basename $f) ]] || [[ "$arg1" = $(dirname $f) ]]; then
found=true
break
fi
if [[ "$arg1" = "$f" ]]; then
found=true
break
fi
done
if [[ "$found" = "false" ]]; then
continue
fi
fi
# Don't run test-failure.sh unless explicitly specified
if [ "$all" = "true" -a "$f" = "special/test-failure.sh" ]; then
continue
fi
cmd="$location/$f $userargs"
count=$(expr $count + 1)
echo "--- $cmd ---"
if [[ "$dryrun" != "true" ]]; then
if ! PATH=$PATH:bin \
CEPH_ROOT=.. \
CEPH_LIB=lib \
LOCALRUN=yes \
time -f "Elapsed %E (%e seconds)" $cmd ; then
echo "$f .............. FAILED"
errors=$(expr $errors + 1)
fi
fi
done
cleanup
if [ "$errors" != "0" ]; then
echo "$errors TESTS FAILED, $count TOTAL TESTS"
exit 1
fi
echo "ALL $count TESTS PASSED"
exit 0
| 3,517 | 23.774648 | 85 | sh |
null | ceph-main/qa/run_xfstests-obsolete.sh | #!/usr/bin/env bash
# Copyright (C) 2012 Dreamhost, LLC
#
# This is free software; see the source for copying conditions.
# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as
# published by the Free Software Foundation version 2.
# Usage:
# run_xfs_tests -t /dev/<testdev> -s /dev/<scratchdev> -f <fstype> <tests>
# - test device and scratch device will both get trashed
# - fstypes can be xfs, ext4, or btrfs (xfs default)
# - tests can be listed individually or in ranges: 1 3-5 8
# tests can also be specified by group: -g quick
#
# Exit status:
# 0: success
# 1: usage error
# 2: other runtime error
# 99: argument count error (programming error)
# 100: getopt error (internal error)
# Alex Elder <[email protected]>
# April 13, 2012
set -e
PROGNAME=$(basename $0)
# xfstests is downloaded from this git repository and then built.
# XFSTESTS_REPO="git://oss.sgi.com/xfs/cmds/xfstests.git"
XFSTESTS_REPO="git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git"
# Default command line option values
COUNT="1"
FS_TYPE="xfs"
SCRATCH_DEV="" # MUST BE SPECIFIED
TEST_DEV="" # MUST BE SPECIFIED
TESTS="-g auto" # The "auto" group is supposed to be "known good"
# rbd presents geometry information that causes mkfs.xfs to
# issue a warning. This option avoids this class of problems.
XFS_MKFS_OPTIONS="-l su=32k"
# Override the default test list with a list of tests known to pass
# until we can work through getting them all passing reliably.
TESTS="1-7 9 11-15 17 19-21 26-29 31-34 41 46-48 50-54 56 61 63-67 69-70 74-76"
TESTS="${TESTS} 78 79 84-89 91-92 100 103 105 108 110 116-121 124 126"
TESTS="${TESTS} 129-135 137-141 164-167 182 184 187-190 192 194"
TESTS="${TESTS} 196 199 201 203 214-216 220-227 234 236-238 241 243-249"
TESTS="${TESTS} 253 257-259 261 262 269 273 275 277 278 280 285 286"
# 275 was the highest available test as of 4/10/12.
# 289 was the highest available test as of 11/15/12.
######
# Some explanation of why tests have been excluded above:
#
# Test 008 was pulled because it contained a race condition leading to
# spurious failures.
#
# Test 049 was pulled because it caused a kernel fault.
# http://tracker.newdream.net/issues/2260
# Test 232 was pulled because it caused an XFS error
# http://tracker.newdream.net/issues/2302
#
# This test passes but takes a LONG time (1+ hours): 127
#
# These were not run for one (anticipated) reason or another:
# 010 016 030 035 040 044 057 058-060 072 077 090 093-095 097-099 104
# 112 113 122 123 125 128 142 147-163 168 175-178 180 185 191 193
# 195 197 198 207-213 217 228 230-233 235 239 240 252 254 255 264-266
# 270-272 276 278-279 281-284 288 289
#
# These tests all failed (produced output different from golden):
# 042 073 083 096 109 169 170 200 202 204-206 218 229 240 242 250
# 263 276 277 279 287
#
# The rest were not part of the "auto" group:
# 018 022 023 024 025 036 037 038 039 043 055 071 080 081 082 101
# 102 106 107 111 114 115 136 171 172 173 251 267 268
######
# print an error message and quit with non-zero status
function err() {
if [ $# -gt 0 ]; then
echo "" >&2
echo "${PROGNAME}: ${FUNCNAME[1]}: $@" >&2
fi
exit 2
}
# routine used to validate argument counts to all shell functions
function arg_count() {
local func
local want
local got
if [ $# -eq 2 ]; then
func="${FUNCNAME[1]}" # calling function
want=$1
got=$2
else
func="${FUNCNAME[0]}" # i.e., arg_count
want=2
got=$#
fi
[ "${want}" -eq "${got}" ] && return 0
echo "${PROGNAME}: ${func}: arg count bad (want ${want} got ${got})" >&2
exit 99
}
# validation function for repeat count argument
function count_valid() {
arg_count 1 $#
test "$1" -gt 0 # 0 is pointless; negative is wrong
}
# validation function for filesystem type argument
function fs_type_valid() {
arg_count 1 $#
case "$1" in
xfs|ext4|btrfs) return 0 ;;
*) return 1 ;;
esac
}
# validation function for device arguments
function device_valid() {
arg_count 1 $#
# Very simple testing--really should try to be more careful...
test -b "$1"
}
# print a usage message and quit
#
# if a message is supplied, print that first, and then exit
# with non-zero status
function usage() {
if [ $# -gt 0 ]; then
echo "" >&2
echo "$@" >&2
fi
echo "" >&2
echo "Usage: ${PROGNAME} <options> <tests>" >&2
echo "" >&2
echo " options:" >&2
echo " -h or --help" >&2
echo " show this message" >&2
echo " -c or --count" >&2
echo " iteration count (1 or more)" >&2
echo " -f or --fs-type" >&2
echo " one of: xfs, ext4, btrfs" >&2
echo " (default fs-type: xfs)" >&2
echo " -s or --scratch-dev (REQUIRED)" >&2
echo " name of device used for scratch filesystem" >&2
echo " -t or --test-dev (REQUIRED)" >&2
echo " name of device used for test filesystem" >&2
echo " tests:" >&2
echo " list of test numbers or ranges, e.g.:" >&2
echo " 1-9 11-15 17 19-21 26-28 31-34 41" >&2
echo " or possibly an xfstests test group, e.g.:" >&2
echo " -g quick" >&2
echo " (default tests: -g auto)" >&2
echo "" >&2
[ $# -gt 0 ] && exit 1
exit 0 # This is used for a --help
}
# parse command line arguments
function parseargs() {
# Short option flags
SHORT_OPTS=""
SHORT_OPTS="${SHORT_OPTS},h"
SHORT_OPTS="${SHORT_OPTS},c:"
SHORT_OPTS="${SHORT_OPTS},f:"
SHORT_OPTS="${SHORT_OPTS},s:"
SHORT_OPTS="${SHORT_OPTS},t:"
# Short option flags
LONG_OPTS=""
LONG_OPTS="${LONG_OPTS},help"
LONG_OPTS="${LONG_OPTS},count:"
LONG_OPTS="${LONG_OPTS},fs-type:"
LONG_OPTS="${LONG_OPTS},scratch-dev:"
LONG_OPTS="${LONG_OPTS},test-dev:"
TEMP=$(getopt --name "${PROGNAME}" \
--options "${SHORT_OPTS}" \
--longoptions "${LONG_OPTS}" \
-- "$@")
eval set -- "$TEMP"
while [ "$1" != "--" ]; do
case "$1" in
-h|--help)
usage
;;
-c|--count)
count_valid "$2" ||
usage "invalid count '$2'"
COUNT="$2"
shift
;;
-f|--fs-type)
fs_type_valid "$2" ||
usage "invalid fs_type '$2'"
FS_TYPE="$2"
shift
;;
-s|--scratch-dev)
device_valid "$2" ||
usage "invalid scratch-dev '$2'"
SCRATCH_DEV="$2"
shift
;;
-t|--test-dev)
device_valid "$2" ||
usage "invalid test-dev '$2'"
TEST_DEV="$2"
shift
;;
*)
exit 100 # Internal error
;;
esac
shift
done
shift
[ -n "${TEST_DEV}" ] || usage "test-dev must be supplied"
[ -n "${SCRATCH_DEV}" ] || usage "scratch-dev must be supplied"
[ $# -eq 0 ] || TESTS="$@"
}
################################################################
[ -z "$TESTDIR" ] && export TESTDIR="/tmp/cephtest"
# Set up some environment for normal teuthology test setup.
# This really should not be necessary but I found it was.
export CEPH_ARGS="--conf ${TESTDIR}/ceph.conf"
export CEPH_ARGS="${CEPH_ARGS} --keyring ${TESTDIR}/data/client.0.keyring"
export CEPH_ARGS="${CEPH_ARGS} --name client.0"
export LD_LIBRARY_PATH="${TESTDIR}/binary/usr/local/lib:${LD_LIBRARY_PATH}"
export PATH="${TESTDIR}/binary/usr/local/bin:${PATH}"
export PATH="${TESTDIR}/binary/usr/local/sbin:${PATH}"
################################################################
# Filesystem-specific mkfs options--set if not supplied
export XFS_MKFS_OPTIONS="${XFS_MKFS_OPTIONS:--f -l su=65536}"
export EXT4_MKFS_OPTIONS="${EXT4_MKFS_OPTIONS:--F}"
export BTRFS_MKFS_OPTION # No defaults
XFSTESTS_DIR="/var/lib/xfstests" # Where the tests live
# download, build, and install xfstests
function install_xfstests() {
arg_count 0 $#
local multiple=""
local ncpu
pushd "${TESTDIR}"
git clone "${XFSTESTS_REPO}"
cd xfstests-dev
# FIXME: use an older version before the tests were rearranged!
git reset --hard e5f1a13792f20cfac097fef98007610b422f2cac
ncpu=$(getconf _NPROCESSORS_ONLN 2>&1)
[ -n "${ncpu}" -a "${ncpu}" -gt 1 ] && multiple="-j ${ncpu}"
make realclean
make ${multiple}
make -k install
popd
}
# remove previously-installed xfstests files
function remove_xfstests() {
arg_count 0 $#
rm -rf "${TESTDIR}/xfstests-dev"
rm -rf "${XFSTESTS_DIR}"
}
# create a host options file that uses the specified devices
function setup_host_options() {
arg_count 0 $#
# Create mount points for the test and scratch filesystems
local test_dir="$(mktemp -d ${TESTDIR}/test_dir.XXXXXXXXXX)"
local scratch_dir="$(mktemp -d ${TESTDIR}/scratch_mnt.XXXXXXXXXX)"
# Write a host options file that uses these devices.
# xfstests uses the file defined by HOST_OPTIONS as the
# place to get configuration variables for its run, and
# all (or most) of the variables set here are required.
export HOST_OPTIONS="$(mktemp ${TESTDIR}/host_options.XXXXXXXXXX)"
cat > "${HOST_OPTIONS}" <<-!
# Created by ${PROGNAME} on $(date)
# HOST_OPTIONS="${HOST_OPTIONS}"
TEST_DEV="${TEST_DEV}"
SCRATCH_DEV="${SCRATCH_DEV}"
TEST_DIR="${test_dir}"
SCRATCH_MNT="${scratch_dir}"
FSTYP="${FS_TYPE}"
export TEST_DEV SCRATCH_DEV TEST_DIR SCRATCH_MNT FSTYP
#
export XFS_MKFS_OPTIONS="${XFS_MKFS_OPTIONS}"
!
# Now ensure we are using the same values
. "${HOST_OPTIONS}"
}
# remove the host options file, plus the directories it refers to
function cleanup_host_options() {
arg_count 0 $#
rm -rf "${TEST_DIR}" "${SCRATCH_MNT}"
rm -f "${HOST_OPTIONS}"
}
# run mkfs on the given device using the specified filesystem type
function do_mkfs() {
arg_count 1 $#
local dev="${1}"
local options
case "${FSTYP}" in
xfs) options="${XFS_MKFS_OPTIONS}" ;;
ext4) options="${EXT4_MKFS_OPTIONS}" ;;
btrfs) options="${BTRFS_MKFS_OPTIONS}" ;;
esac
"mkfs.${FSTYP}" ${options} "${dev}" ||
err "unable to make ${FSTYP} file system on device \"${dev}\""
}
# mount the given device on the given mount point
function do_mount() {
arg_count 2 $#
local dev="${1}"
local dir="${2}"
mount "${dev}" "${dir}" ||
err "unable to mount file system \"${dev}\" on \"${dir}\""
}
# unmount a previously-mounted device
function do_umount() {
arg_count 1 $#
local dev="${1}"
if mount | grep "${dev}" > /dev/null; then
if ! umount "${dev}"; then
err "unable to unmount device \"${dev}\""
fi
else
# Report it but don't error out
echo "device \"${dev}\" was not mounted" >&2
fi
}
# do basic xfstests setup--make and mount the test and scratch filesystems
function setup_xfstests() {
arg_count 0 $#
# TEST_DEV can persist across test runs, but for now we
# don't bother. I believe xfstests prefers its devices to
# have been already been formatted for the desired
# filesystem type--it uses blkid to identify things or
# something. So we mkfs both here for a fresh start.
do_mkfs "${TEST_DEV}"
do_mkfs "${SCRATCH_DEV}"
# I believe the test device is expected to be mounted; the
# scratch doesn't need to be (but it doesn't hurt).
do_mount "${TEST_DEV}" "${TEST_DIR}"
do_mount "${SCRATCH_DEV}" "${SCRATCH_MNT}"
}
# clean up changes made by setup_xfstests
function cleanup_xfstests() {
arg_count 0 $#
# Unmount these in case a test left them mounted (plus
# the corresponding setup function mounted them...)
do_umount "${TEST_DEV}"
do_umount "${SCRATCH_DEV}"
}
# top-level setup routine
function setup() {
arg_count 0 $#
setup_host_options
install_xfstests
setup_xfstests
}
# top-level (final) cleanup routine
function cleanup() {
arg_count 0 $#
cd /
cleanup_xfstests
remove_xfstests
cleanup_host_options
}
trap cleanup EXIT ERR HUP INT QUIT
# ################################################################
start_date="$(date)"
parseargs "$@"
setup
pushd "${XFSTESTS_DIR}"
for (( i = 1 ; i <= "${COUNT}" ; i++ )); do
[ "${COUNT}" -gt 1 ] && echo "=== Iteration "$i" starting at: $(date)"
./check ${TESTS} # Here we actually run the tests
status=$?
[ "${COUNT}" -gt 1 ] && echo "=== Iteration "$i" complete at: $(date)"
done
popd
# cleanup is called via the trap call, above
echo "This xfstests run started at: ${start_date}"
echo "xfstests run completed at: $(date)"
[ "${COUNT}" -gt 1 ] && echo "xfstests run consisted of ${COUNT} iterations"
exit "${status}"
| 12,288 | 25.77342 | 79 | sh |
null | ceph-main/qa/run_xfstests.sh | #!/usr/bin/env bash
# Copyright (C) 2012 Dreamhost, LLC
#
# This is free software; see the source for copying conditions.
# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as
# published by the Free Software Foundation version 2.
# Usage:
# run_xfstests -t /dev/<testdev> -s /dev/<scratchdev> [-f <fstype>] -- <tests>
# - test device and scratch device will both get trashed
# - fstypes can be xfs, ext4, or btrfs (xfs default)
# - tests can be listed individually: generic/001 xfs/008 xfs/009
# tests can also be specified by group: -g quick
#
# Exit status:
# 0: success
# 1: usage error
# 2: other runtime error
# 99: argument count error (programming error)
# 100: getopt error (internal error)
# Alex Elder <[email protected]>
# April 13, 2012
set -e
PROGNAME=$(basename $0)
# Default command line option values
COUNT="1"
EXPUNGE_FILE=""
DO_RANDOMIZE="" # false
FSTYP="xfs"
SCRATCH_DEV="" # MUST BE SPECIFIED
TEST_DEV="" # MUST BE SPECIFIED
TESTS="-g auto" # The "auto" group is supposed to be "known good"
# print an error message and quit with non-zero status
function err() {
if [ $# -gt 0 ]; then
echo "" >&2
echo "${PROGNAME}: ${FUNCNAME[1]}: $@" >&2
fi
exit 2
}
# routine used to validate argument counts to all shell functions
function arg_count() {
local func
local want
local got
if [ $# -eq 2 ]; then
func="${FUNCNAME[1]}" # calling function
want=$1
got=$2
else
func="${FUNCNAME[0]}" # i.e., arg_count
want=2
got=$#
fi
[ "${want}" -eq "${got}" ] && return 0
echo "${PROGNAME}: ${func}: arg count bad (want ${want} got ${got})" >&2
exit 99
}
# validation function for repeat count argument
function count_valid() {
arg_count 1 $#
test "$1" -gt 0 # 0 is pointless; negative is wrong
}
# validation function for filesystem type argument
function fs_type_valid() {
arg_count 1 $#
case "$1" in
xfs|ext4|btrfs) return 0 ;;
*) return 1 ;;
esac
}
# validation function for device arguments
function device_valid() {
arg_count 1 $#
# Very simple testing--really should try to be more careful...
test -b "$1"
}
# validation function for expunge file argument
function expunge_file_valid() {
arg_count 1 $#
test -s "$1"
}
# print a usage message and quit
#
# if a message is supplied, print that first, and then exit
# with non-zero status
function usage() {
if [ $# -gt 0 ]; then
echo "" >&2
echo "$@" >&2
fi
echo "" >&2
echo "Usage: ${PROGNAME} <options> -- <tests>" >&2
echo "" >&2
echo " options:" >&2
echo " -h or --help" >&2
echo " show this message" >&2
echo " -c or --count" >&2
echo " iteration count (1 or more)" >&2
echo " -f or --fs-type" >&2
echo " one of: xfs, ext4, btrfs" >&2
echo " (default fs-type: xfs)" >&2
echo " -r or --randomize" >&2
echo " randomize test order" >&2
echo " -s or --scratch-dev (REQUIRED)" >&2
echo " name of device used for scratch filesystem" >&2
echo " -t or --test-dev (REQUIRED)" >&2
echo " name of device used for test filesystem" >&2
echo " -x or --expunge-file" >&2
echo " name of file with list of tests to skip" >&2
echo " tests:" >&2
echo " list of test numbers, e.g.:" >&2
echo " generic/001 xfs/008 shared/032 btrfs/009" >&2
echo " or possibly an xfstests test group, e.g.:" >&2
echo " -g quick" >&2
echo " (default tests: -g auto)" >&2
echo "" >&2
[ $# -gt 0 ] && exit 1
exit 0 # This is used for a --help
}
# parse command line arguments
function parseargs() {
# Short option flags
SHORT_OPTS=""
SHORT_OPTS="${SHORT_OPTS},h"
SHORT_OPTS="${SHORT_OPTS},c:"
SHORT_OPTS="${SHORT_OPTS},f:"
SHORT_OPTS="${SHORT_OPTS},r"
SHORT_OPTS="${SHORT_OPTS},s:"
SHORT_OPTS="${SHORT_OPTS},t:"
SHORT_OPTS="${SHORT_OPTS},x:"
# Long option flags
LONG_OPTS=""
LONG_OPTS="${LONG_OPTS},help"
LONG_OPTS="${LONG_OPTS},count:"
LONG_OPTS="${LONG_OPTS},fs-type:"
LONG_OPTS="${LONG_OPTS},randomize"
LONG_OPTS="${LONG_OPTS},scratch-dev:"
LONG_OPTS="${LONG_OPTS},test-dev:"
LONG_OPTS="${LONG_OPTS},expunge-file:"
TEMP=$(getopt --name "${PROGNAME}" \
--options "${SHORT_OPTS}" \
--longoptions "${LONG_OPTS}" \
-- "$@")
eval set -- "$TEMP"
while [ "$1" != "--" ]; do
case "$1" in
-h|--help)
usage
;;
-c|--count)
count_valid "$2" ||
usage "invalid count '$2'"
COUNT="$2"
shift
;;
-f|--fs-type)
fs_type_valid "$2" ||
usage "invalid fs_type '$2'"
FSTYP="$2"
shift
;;
-r|--randomize)
DO_RANDOMIZE="t"
;;
-s|--scratch-dev)
device_valid "$2" ||
usage "invalid scratch-dev '$2'"
SCRATCH_DEV="$2"
shift
;;
-t|--test-dev)
device_valid "$2" ||
usage "invalid test-dev '$2'"
TEST_DEV="$2"
shift
;;
-x|--expunge-file)
expunge_file_valid "$2" ||
usage "invalid expunge-file '$2'"
EXPUNGE_FILE="$2"
shift
;;
*)
exit 100 # Internal error
;;
esac
shift
done
shift
[ -n "${TEST_DEV}" ] || usage "test-dev must be supplied"
[ -n "${SCRATCH_DEV}" ] || usage "scratch-dev must be supplied"
[ $# -eq 0 ] || TESTS="$@"
}
################################################################
# run mkfs on the given device using the specified filesystem type
function do_mkfs() {
arg_count 1 $#
local dev="${1}"
local options
case "${FSTYP}" in
xfs) options="-f" ;;
ext4) options="-F" ;;
btrfs) options="-f" ;;
esac
"mkfs.${FSTYP}" ${options} "${dev}" ||
err "unable to make ${FSTYP} file system on device \"${dev}\""
}
# top-level setup routine
function setup() {
arg_count 0 $#
wget -P "${TESTDIR}" http://download.ceph.com/qa/xfstests.tar.gz
tar zxf "${TESTDIR}/xfstests.tar.gz" -C "$(dirname "${XFSTESTS_DIR}")"
mkdir "${TEST_DIR}"
mkdir "${SCRATCH_MNT}"
do_mkfs "${TEST_DEV}"
}
# top-level (final) cleanup routine
function cleanup() {
arg_count 0 $#
# ensure teuthology can clean up the logs
chmod -R a+rw "${TESTDIR}/archive"
findmnt "${TEST_DEV}" && umount "${TEST_DEV}"
[ -d "${SCRATCH_MNT}" ] && rmdir "${SCRATCH_MNT}"
[ -d "${TEST_DIR}" ] && rmdir "${TEST_DIR}"
rm -rf "${XFSTESTS_DIR}"
rm -f "${TESTDIR}/xfstests.tar.gz"
}
# ################################################################
start_date="$(date)"
parseargs "$@"
[ -n "${TESTDIR}" ] || usage "TESTDIR env variable must be set"
[ -d "${TESTDIR}/archive" ] || usage "\$TESTDIR/archive directory must exist"
TESTDIR="$(readlink -e "${TESTDIR}")"
[ -n "${EXPUNGE_FILE}" ] && EXPUNGE_FILE="$(readlink -e "${EXPUNGE_FILE}")"
XFSTESTS_DIR="/var/lib/xfstests" # hardcoded into dbench binary
TEST_DIR="/mnt/test_dir"
SCRATCH_MNT="/mnt/scratch_mnt"
MKFS_OPTIONS=""
EXT_MOUNT_OPTIONS="-o block_validity,dioread_nolock"
trap cleanup EXIT ERR HUP INT QUIT
setup
export TEST_DEV
export TEST_DIR
export SCRATCH_DEV
export SCRATCH_MNT
export FSTYP
export MKFS_OPTIONS
export EXT_MOUNT_OPTIONS
pushd "${XFSTESTS_DIR}"
for (( i = 1 ; i <= "${COUNT}" ; i++ )); do
[ "${COUNT}" -gt 1 ] && echo "=== Iteration "$i" starting at: $(date)"
RESULT_BASE="${TESTDIR}/archive/results-${i}"
mkdir "${RESULT_BASE}"
export RESULT_BASE
EXPUNGE=""
[ -n "${EXPUNGE_FILE}" ] && EXPUNGE="-E ${EXPUNGE_FILE}"
RANDOMIZE=""
[ -n "${DO_RANDOMIZE}" ] && RANDOMIZE="-r"
# -T output timestamps
PATH="${PWD}/bin:${PATH}" ./check -T ${RANDOMIZE} ${EXPUNGE} ${TESTS}
findmnt "${TEST_DEV}" && umount "${TEST_DEV}"
[ "${COUNT}" -gt 1 ] && echo "=== Iteration "$i" complete at: $(date)"
done
popd
# cleanup is called via the trap call, above
echo "This xfstests run started at: ${start_date}"
echo "xfstests run completed at: $(date)"
[ "${COUNT}" -gt 1 ] && echo "xfstests run consisted of ${COUNT} iterations"
echo OK
| 8,000 | 23.694444 | 78 | sh |
null | ceph-main/qa/run_xfstests_qemu.sh | #!/usr/bin/env bash
#
# TODO switch to run_xfstests.sh (see run_xfstests_krbd.sh)
set -x
[ -n "${TESTDIR}" ] || export TESTDIR="/tmp/cephtest"
[ -d "${TESTDIR}" ] || mkdir "${TESTDIR}"
URL_BASE="https://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa"
SCRIPT="run_xfstests-obsolete.sh"
cd "${TESTDIR}"
curl -O "${URL_BASE}/${SCRIPT}"
# mark executable only if the file isn't empty since ./"${SCRIPT}"
# on an empty file would succeed
if [[ -s "${SCRIPT}" ]]; then
chmod +x "${SCRIPT}"
fi
TEST_DEV="/dev/vdb"
if [[ ! -b "${TEST_DEV}" ]]; then
TEST_DEV="/dev/sdb"
fi
SCRATCH_DEV="/dev/vdc"
if [[ ! -b "${SCRATCH_DEV}" ]]; then
SCRATCH_DEV="/dev/sdc"
fi
# tests excluded fail in the current testing vm regardless of whether
# rbd is used
./"${SCRIPT}" -c 1 -f xfs -t "${TEST_DEV}" -s "${SCRATCH_DEV}" \
1-7 9-17 19-26 28-49 51-61 63 66-67 69-79 83 85-105 108-110 112-135 \
137-170 174-191 193-204 206-217 220-227 230-231 233 235-241 243-249 \
252-259 261-262 264-278 281-286 289
STATUS=$?
rm -f "${SCRIPT}"
exit "${STATUS}"
| 1,050 | 23.44186 | 73 | sh |
null | ceph-main/qa/runallonce.sh | #!/usr/bin/env bash
set -ex
basedir=`echo $0 | sed 's/[^/]*$//g'`.
testdir="$1"
[ -n "$2" ] && logdir=$2 || logdir=$1
[ ${basedir:0:1} == "." ] && basedir=`pwd`/${basedir:1}
PATH="$basedir/src:$PATH"
[ -z "$testdir" ] || [ ! -d "$testdir" ] && echo "specify test dir" && exit 1
cd $testdir
for test in `cd $basedir/workunits && find . -executable -type f | $basedir/../src/script/permute`
do
echo "------ running test $test ------"
pwd
[ -d $test ] && rm -r $test
mkdir -p $test
mkdir -p `dirname $logdir/$test.log`
test -e $logdir/$test.log && rm $logdir/$test.log
sh -c "cd $test && $basedir/workunits/$test" 2>&1 | tee $logdir/$test.log
done
| 665 | 24.615385 | 98 | sh |
null | ceph-main/qa/runoncfuse.sh | #!/usr/bin/env bash
set -x
mkdir -p testspace
ceph-fuse testspace -m $1
./runallonce.sh testspace
killall ceph-fuse
| 118 | 12.222222 | 25 | sh |
null | ceph-main/qa/runonkclient.sh | #!/usr/bin/env bash
set -x
mkdir -p testspace
/bin/mount -t ceph $1 testspace
./runallonce.sh testspace
/bin/umount testspace
| 129 | 12 | 31 | sh |
null | ceph-main/qa/setup-chroot.sh | #!/usr/bin/env bash
die() {
echo ${@}
exit 1
}
usage()
{
cat << EOF
$0: sets up a chroot environment for building the ceph server
usage:
-h Show this message
-r [install_dir] location of the root filesystem to install to
example: -r /images/sepia/
-s [src_dir] location of the directory with the source code
example: -s ./src/ceph
EOF
}
cleanup() {
umount -l "${INSTALL_DIR}/mnt/tmp"
umount -l "${INSTALL_DIR}/proc"
umount -l "${INSTALL_DIR}/sys"
}
INSTALL_DIR=
SRC_DIR=
while getopts “hr:s:” OPTION; do
case $OPTION in
h) usage; exit 1 ;;
r) INSTALL_DIR=$OPTARG ;;
s) SRC_DIR=$OPTARG ;;
?) usage; exit
;;
esac
done
[ $EUID -eq 0 ] || die "This script uses chroot, which requires root permissions."
[ -d "${INSTALL_DIR}" ] || die "No such directory as '${INSTALL_DIR}'. \
You must specify an install directory with -r"
[ -d "${SRC_DIR}" ] || die "no such directory as '${SRC_DIR}'. \
You must specify a source directory with -s"
readlink -f ${SRC_DIR} || die "readlink failed on ${SRC_DIR}"
ABS_SRC_DIR=`readlink -f ${SRC_DIR}`
trap cleanup INT TERM EXIT
mount --bind "${ABS_SRC_DIR}" "${INSTALL_DIR}/mnt/tmp" || die "bind mount failed"
mount -t proc none "${INSTALL_DIR}/proc" || die "mounting proc failed"
mount -t sysfs none "${INSTALL_DIR}/sys" || die "mounting sys failed"
echo "$0: starting chroot."
echo "cd /mnt/tmp before building"
echo
chroot ${INSTALL_DIR} env HOME=/mnt/tmp /bin/bash
echo "$0: exiting chroot."
exit 0
| 1,636 | 23.80303 | 82 | sh |
null | ceph-main/qa/cephfs/unshare_ns_mount.sh | #!/usr/bin/env bash
# This is one helper for mounting the ceph-fuse/kernel clients by
# unsharing the network namespace, let's call it netns container.
# With the netns container, you can easily suspend or resume the
# virtual network interface to simulate the client node hard
# shutdown for some test cases.
#
# netnsX netnsY netnsZ
# -------------- -------------- --------------
# | mount client | | mount client | | mount client |
# | default | ... | default | ... | default |
# |192.168.0.1/16| |192.168.0.2/16| |192.168.0.3/16|
# | veth0 | | veth0 | | veth0 |
# -------------- -------------- -------------
# | | |
# \ | brx.Y /
# \ ---------------------- /
# \ brx.X | ceph-brx | brx.Z /
# \------>| default |<------/
# | | 192.168.255.254/16 | |
# | ---------------------- |
# (suspend/resume) | (suspend/resume)
# -----------
# | Physical |
# | A.B.C.D/M |
# -----------
#
# Defaultly it will use the 192.168.X.Y/16 private network IPs for
# the ceph-brx and netnses as above. And you can also specify your
# own new ip/mask for the ceph-brx, like:
#
# $ unshare_ns_mount.sh --fuse /mnt/cephfs --brxip 172.19.100.100/12
#
# Then the each netns will get a new ip from the ranges:
# [172.16.0.1 ~ 172.19.100.99]/12 and [172.19.100.101 ~ 172.31.255.254]/12
usage() {
echo ""
echo "This will help to isolate the network namespace from OS for the mount client!"
echo ""
echo "usage: unshare_ns_mount.sh [OPTIONS [paramters]] [--brxip <ip_address/mask>]"
echo "OPTIONS:"
echo -e " --fuse <ceph-fuse options>"
echo -e "\tThe ceph-fuse command options"
echo -e "\t $ unshare_ns_mount.sh --fuse -m 192.168.0.1:6789 /mnt/cephfs -o nonempty"
echo ""
echo -e " --kernel <mount options>"
echo -e "\tThe mount command options"
echo -e "\t $ unshare_ns_mount.sh --kernel -t ceph 192.168.0.1:6789:/ /mnt/cephfs -o fs=a"
echo ""
echo -e " --suspend <mountpoint>"
echo -e "\tDown the veth interface in the network namespace"
echo -e "\t $ unshare_ns_mount.sh --suspend /mnt/cephfs"
echo ""
echo -e " --resume <mountpoint>"
echo -e "\tUp the veth interface in the network namespace"
echo -e "\t $ unshare_ns_mount.sh --resume /mnt/cephfs"
echo ""
echo -e " --umount <mountpoint>"
echo -e "\tUmount and delete the network namespace"
echo -e "\t $ unshare_ns_mount.sh --umount /mnt/cephfs"
echo ""
echo -e " --brxip <ip_address/mask>"
echo -e "\tSpecify ip/mask for ceph-brx and it only makes sense for --fuse/--kernel options"
echo -e "\t(default: 192.168.255.254/16, netns ip: 192.168.0.1/16 ~ 192.168.255.253/16)"
echo -e "\t $ unshare_ns_mount.sh --fuse -m 192.168.0.1:6789 /mnt/cephfs --brxip 172.19.255.254/12"
echo -e "\t $ unshare_ns_mount.sh --kernel 192.168.0.1:6789:/ /mnt/cephfs --brxip 172.19.255.254/12"
echo ""
echo -e " -h, --help"
echo -e "\tPrint help"
echo ""
}
CEPH_BRX=ceph-brx
CEPH_BRX_IP_DEF=192.168.255.254
NET_MASK_DEF=16
BRD_DEF=192.168.255.255
CEPH_BRX_IP=$CEPH_BRX_IP_DEF
NET_MASK=$NET_MASK_DEF
BRD=$BRD_DEF
mountpoint=""
new_netns=""
fuse_type=false
function get_mountpoint() {
for param in $@
do
if [ -d $param ]; then
# skipping "--client_mountpoint/-r root_directory"
# option for ceph-fuse command
if [ "$last" == "-r" -o "$last" == "--client_mountpoint" ]; then
last=$param
continue
fi
if [ "0$mountpoint" != "0" ]; then
echo "Oops: too many mountpiont options!"
exit 1
fi
mountpoint=$param
fi
last=$param
done
if [ "0$mountpoint" == "0" ]; then
echo "Oops: mountpoint path is not a directory or no mountpoint specified!"
exit 1
fi
}
function get_new_netns() {
# prune the repeating slashes:
# "/mnt///cephfs///" --> "/mnt/cephfs/"
__mountpoint=`echo "$mountpoint" | sed 's/\/\+/\//g'`
# prune the leading slashes
while [ ${__mountpoint:0:1} == "/" ]
do
__mountpoint=${__mountpoint:1}
done
# prune the last slashes
while [ ${__mountpoint: -1} == "/" ]
do
__mountpoint=${__mountpoint:0:-1}
done
# replace '/' with '-'
__mountpoint=${__mountpoint//\//-}
# "mnt/cephfs" --> "ceph-fuse-mnt-cephfs"
if [ "$1" == "--fuse" ]; then
new_netns=`echo ceph-fuse-$__mountpoint`
fuse_type=true
return
fi
# "mnt/cephfs" --> "ceph-kernel-mnt-cephfs"
if [ "$1" == "--kernel" ]; then
new_netns=`echo ceph-kernel-$__mountpoint`
return
fi
# we are in umount/suspend/resume routines
for ns in `ip netns list | awk '{print $1}'`
do
if [ "$ns" == "ceph-fuse-$__mountpoint" ]; then
new_netns=$ns
fuse_type=true
return
fi
if [ "$ns" == "ceph-kernel-$__mountpoint" ]; then
new_netns=$ns
return
fi
done
if [ "0$new_netns" == "0" ]; then
echo "Oops, netns 'ceph-{fuse/kernel}-$__mountpoint' does not exists!"
exit 1
fi
}
# the peer veth name will be "brx.$nsid" on host node
function get_netns_brx() {
get_new_netns
nsid=`ip netns list-id | grep "$new_netns" | awk '{print $2}'`
netns_veth=brx.$nsid
eval $1="$netns_veth"
}
function suspend_netns_veth() {
get_mountpoint $@
get_netns_brx brx
ip link set $brx down
exit 0
}
function resume_netns_veth() {
get_mountpoint $@
get_netns_brx brx
ip link set $brx up
exit 0
}
# help and usage
if [ $# == 0 -o "$1" == "-h" -o "$1" == "--help" ]; then
usage
exit 0
fi
# suspend the veth from network namespace
if [ $1 == "--suspend" ]; then
suspend_netns_veth $@
exit 0
fi
# resume the veth from network namespace
if [ $1 == "--resume" ]; then
resume_netns_veth $@
exit 0
fi
function ceph_umount() {
get_mountpoint $@
get_new_netns
if [ $fuse_type == true ]; then
nsenter --net=/var/run/netns/$new_netns fusermount -u $mountpoint 2>/dev/null
else
nsenter --net=/var/run/netns/$new_netns umount $mountpoint 2>/dev/null
fi
# let's wait for a while to let the umount operation
# to finish before deleting the netns
while [ 1 ]
do
for pid in `ip netns pids $new_netns 2>/dev/null`
do
name=`cat /proc/$pid/comm 2>/dev/null`
if [ "$name" == "ceph-fuse" ]; then
break
fi
done
if [ "$name" == "ceph-fuse" ]; then
name=""
usleep 100000
continue
fi
break
done
nsid=`ip netns list-id | grep "$new_netns" | awk '{print $2}'`
netns_brx=brx.$nsid
# brctl delif $CEPH_BRX $netns_brx 2>/dev/null
nmcli connection down $netns_brx down 2>/dev/null
nmcli connection delete $netns_brx 2>/dev/null
ip netns delete $new_netns 2>/dev/null
# if this is the last netns_brx, will delete
# the $CEPH_BRX and restore the OS configure
# rc=`brctl show ceph-brx 2>/dev/null | grep 'brx\.'|wc -l`
rc=`nmcli connection show 2>/dev/null | grep 'brx\.' | wc -l`
if [ $rc == 0 ]; then
ip link set $CEPH_BRX down 2>/dev/null
# brctl delbr $CEPH_BRX 2>/dev/null
nmcli connection delete $CEPH_BRX 2>/dev/null
# restore the ip forward
tmpfile=`ls /tmp/ | grep "$CEPH_BRX\."`
tmpfile=/tmp/$tmpfile
if [ ! -f $tmpfile ]; then
echo "Oops, the $CEPH_BRX.XXX temp file does not exist!"
else
save=`cat $tmpfile`
echo $save > /proc/sys/net/ipv4/ip_forward
rm -rf $tmpfile
fi
# drop the iptables NAT rules
host_nic=`route | grep default | awk '{print $8}'`
iptables -D FORWARD -o $host_nic -i $CEPH_BRX -j ACCEPT
iptables -D FORWARD -i $host_nic -o $CEPH_BRX -j ACCEPT
iptables -t nat -D POSTROUTING -s $CEPH_BRX_IP/$NET_MASK -o $host_nic -j MASQUERADE
fi
}
function get_brd_mask() {
first=`echo "$CEPH_BRX_IP" | awk -F. '{print $1}'`
second=`echo "$CEPH_BRX_IP" | awk -F. '{print $2}'`
third=`echo "$CEPH_BRX_IP" | awk -F. '{print $3}'`
fourth=`echo "$CEPH_BRX_IP" | awk -F. '{print $4}'`
if [ "$first" == "172" ]; then
second_max=31
else
second_max=255
fi
third_max=255
fourth_max=255
if [ $NET_MASK -lt 16 ]; then
let power=16-$NET_MASK
m=`awk 'BEGIN{printf 2^"'$power'"-1}'`
second=$((second&~m))
let second_max=$second+$m
elif [ $NET_MASK -lt 24 ]; then
let power=24-$NET_MASK
m=`awk 'BEGIN{printf 2^"'$power'"-1}'`
third=$((third&~m))
let third_max=$third+$m
second_max=$second
elif [ $NET_MASK -lt 32 ]; then
let power=32-$NET_MASK
m=`awk 'BEGIN{printf 2^"'$power'"-1}'`
fourth=$((fourth&~m))
let fourth_max=$fourth+$m
second_max=$second
third_max=$third
fi
BRD=$first.$second_max.$third_max.$fourth_max
}
# As default:
# The netns IP will be 192.168.0.1 ~ 192.168.255.253,
# and 192.168.255.254 is saved for $CEPH_BRX
function get_new_ns_ip() {
first=`echo "$CEPH_BRX_IP" | awk -F. '{print $1}'`
second=`echo "$CEPH_BRX_IP" | awk -F. '{print $2}'`
third=`echo "$CEPH_BRX_IP" | awk -F. '{print $3}'`
fourth=`echo "$CEPH_BRX_IP" | awk -F. '{print $4}'`
if [ "$first" == ""172 ]; then
second_max=31
else
second_max=255
fi
third_max=255
fourth_max=254
if [ $NET_MASK -lt 16 ]; then
let power=16-$NET_MASK
m=`awk 'BEGIN{printf 2^"'$power'"-1}'`
second=$((second&~m))
let second_max=$second+$m
third=0
fourth=1
elif [ $NET_MASK -lt 24 ]; then
let power=24-$NET_MASK
m=`awk 'BEGIN{printf 2^"'$power'"-1}'`
third=$((third&~m))
let third_max=$third+$m
second_max=$second
fourth=1
elif [ $NET_MASK -lt 32 ]; then
let power=32-$NET_MASK
m=`awk 'BEGIN{printf 2^"'$power'"-1}'`
fourth=$((fourth&~m))
let fourth+=1
let fourth_max=$fourth+$m-1
second_max=$second
third_max=$third
fi
while [ $second -le $second_max -a $third -le $third_max -a $fourth -le $fourth_max ]
do
conflict=false
# check from the existing network namespaces
for netns in `ip netns list | awk '{print $1}'`
do
ip=`ip netns exec $netns ip addr | grep "inet " | grep "veth0"`
ip=`echo "$ip" | awk '{print $2}' | awk -F/ '{print $1}'`
if [ "0$ip" == "0" ]; then
continue
fi
if [ "$first.$second.$third.$fourth" == "$ip" ]; then
conflict=true
let fourth+=1
if [ $fourth -le $fourth_max ]; then
break
fi
fourth=0
let third+=1
if [ $third -le $third_max ]; then
break
fi
third=0
let second+=1
if [ $second -le $second_max ]; then
break
fi
echo "Oops: we have ran out of the ip addresses!"
exit 1
fi
done
# have we found one ?
if [ $conflict == false ]; then
break
fi
done
ip=$first.$second.$third.$fourth
max=$first.$second_max.$third_max.$fourth_max
if [ "$ip" == "$max" ]; then
echo "Oops: we have ran out of the ip addresses!"
exit 1
fi
eval $1="$ip"
}
function check_valid_private_ip() {
first=`echo "$1" | awk -F. '{print $1}'`
second=`echo "$1" | awk -F. '{print $2}'`
# private network class A 10.0.0.0 - 10.255.255.255
if [ "$first" == "10" -a $NET_MASK -ge 8 ]; then
return
fi
# private network class B 172.16.0.0 - 172.31.255.255
if [ "$first" == "172" -a $second -ge 16 -a $second -le 31 -a $NET_MASK -ge 12 ]; then
return
fi
# private network class C 192.168.0.0 - 192.168.255.255
if [ "$first" == "192" -a "$second" == "168" -a $NET_MASK -ge 16 ]; then
return
fi
echo "Oops: invalid private ip address '$CEPH_BRX_IP/$NET_MASK'!"
exit 1
}
function setup_bridge_and_nat() {
# check and parse the --brxip parameter
is_brxip=false
for ip in $@
do
if [ "$ip" == "--brxip" ]; then
is_brxip=true
continue
fi
if [ $is_brxip == true ]; then
new_brxip=$ip
break
fi
done
# if the $CEPH_BRX already exists, then check the new
# brxip, if not match fail it without doing anything.
rc=`ip addr | grep "inet " | grep " $CEPH_BRX"`
if [ "0$rc" != "0" ]; then
existing_brxip=`echo "$rc" | awk '{print $2}'`
if [ "0$new_brxip" != "0" -a "$existing_brxip" != "$new_brxip" ]; then
echo "Oops: conflict with the existing $CEPH_BRX ip '$existing_brxip', new '$new_brxip'!"
exit 1
fi
CEPH_BRX_IP=`echo "$existing_brxip" | awk -F/ '{print $1}'`
NET_MASK=`echo "$existing_brxip" | awk -F/ '{print $2}'`
get_brd_mask
return
fi
# if it is the first time to run the the script or there
# is no any network namespace exists, we need to setup
# the $CEPH_BRX, if no --brxip is specified will use the
# default $CEPH_BRX_IP/$NET_MASK
if [ "0$new_brxip" != "0" ]; then
CEPH_BRX_IP=`echo "$new_brxip" | awk -F/ '{print $1}'`
NET_MASK=`echo "$new_brxip" | awk -F/ '{print $2}'`
get_brd_mask
check_valid_private_ip $CEPH_BRX_IP
fi
# brctl addbr $CEPH_BRX
nmcli connection add type bridge con-name $CEPH_BRX ifname $CEPH_BRX stp no
# ip link set $CEPH_BRX up
# ip addr add $CEPH_BRX_IP/$NET_MASK brd $BRD dev $CEPH_BRX
nmcli connection modify $CEPH_BRX ipv4.addresses $CEPH_BRX_IP/$NET_MASK ipv4.method manual
nmcli connection up $CEPH_BRX
# setup the NAT
rm -rf /tmp/ceph-brx.*
tmpfile=$(mktemp /tmp/ceph-brx.XXXXXXXX)
save=`cat /proc/sys/net/ipv4/ip_forward`
echo $save > $tmpfile
echo 1 > /proc/sys/net/ipv4/ip_forward
host_nic=`route | grep default | awk '{print $8}'`
iptables -A FORWARD -o $host_nic -i $CEPH_BRX -j ACCEPT
iptables -A FORWARD -i $host_nic -o $CEPH_BRX -j ACCEPT
iptables -t nat -A POSTROUTING -s $CEPH_BRX_IP/$NET_MASK -o $host_nic -j MASQUERADE
}
function __ceph_mount() {
# for some options like the '-t' in mount command
# the nsenter command will take over it, so it is
# hard to pass it direct to the netns.
# here we will create one temp file with x mode
tmpfile=$(mktemp /tmp/ceph-nsenter.XXXXXXXX)
chmod +x $tmpfile
if [ "$1" == "--kernel" ]; then
cmd=`echo "$@" | sed 's/--kernel/mount/'`
else
cmd=`echo "$@" | sed 's/--fuse/ceph-fuse/'`
fi
# remove the --brxip parameter
cmd=`echo "$cmd" | sed 's/--brxip.*\/[0-9]* //'`
# enter $new_netns and run ceph fuse client mount,
# we couldn't use 'ip netns exec' here because it
# will unshare the mount namespace.
echo "$cmd" > $tmpfile
nsenter --net=/var/run/netns/$new_netns /bin/bash $tmpfile ; echo $? > $tmpfile
rc=`cat $tmpfile`
rm -f $tmpfile
# fall back
if [ $rc != 0 ]; then
m=$mountpoint
mountpoint=""
ceph_umount $m
fi
}
function get_new_nsid() {
# get one uniq netns id
uniq_id=0
while [ 1 ]
do
rc=`ip netns list-id | grep "nsid $uniq_id "`
if [ "0$rc" == "0" ]; then
break
fi
let uniq_id+=1
done
eval $1="$uniq_id"
}
function ceph_mount() {
get_mountpoint $@
setup_bridge_and_nat $@
get_new_netns $1
rc=`ip netns list | grep "$new_netns" | awk '{print $1}'`
if [ "0$rc" != "0" ]; then
echo "Oops: the netns "$new_netns" already exists!"
exit 1
fi
get_new_nsid new_nsid
# create a new network namespace
ip netns add $new_netns
ip netns set $new_netns $new_nsid
get_new_ns_ip ns_ip
if [ 0"$ns_ip" == "0" ]; then
echo "Oops: there is no ip address could be used any more!"
exit 1
fi
# veth interface in netns
ns_veth=veth0
netns_brx=brx.$new_nsid
# setup veth interfaces
ip link add $ns_veth netns $new_netns type veth peer name $netns_brx
ip netns exec $new_netns ip addr add $ns_ip/$NET_MASK brd $BRD dev $ns_veth
ip netns exec $new_netns ip link set $ns_veth up
ip netns exec $new_netns ip link set lo up
ip netns exec $new_netns ip route add default via $CEPH_BRX_IP
# bring up the bridge interface and join it to $CEPH_BRX
# brctl addif $CEPH_BRX $netns_brx
nmcli connection add type bridge-slave con-name $netns_brx ifname $netns_brx master $CEPH_BRX
nmcli connection up $netns_brx
# ip link set $netns_brx up
__ceph_mount $@
}
if [ "$1" == "--umount" ]; then
ceph_umount $@
exit 0
fi
# mount in the netns
if [ "$1" != "--kernel" -a "$1" != "--fuse" ]; then
echo "Oops: invalid mount options '$1'!"
exit 1
fi
ceph_mount $@
| 17,928 | 29.132773 | 105 | sh |
null | ceph-main/qa/client/30_subdir_mount.sh | #!/usr/bin/env bash
set -x
basedir=`echo $0 | sed 's/[^/]*$//g'`.
. $basedir/common.sh
client_mount
mkdir -p $mnt/sub
echo sub > $mnt/sub/file
client_umount
mkdir -p $mnt/1
mkdir -p $mnt/2
/bin/mount -t ceph $monhost:/sub $mnt/1
grep sub $mnt/1/file
/bin/mount -t ceph $monhost:/ $mnt/2
grep sub $mnt/2/sub/file
/bin/umount $mnt/1
grep sub $mnt/2/sub/file
/bin/umount $mnt/2
| 381 | 14.916667 | 39 | sh |
null | ceph-main/qa/client/common.sh |
# defaults
[ -z "$bindir" ] && bindir=$PWD # location of init-ceph
[ -z "$conf" ] && conf="$basedir/ceph.conf"
[ -z "$mnt" ] && mnt="/c"
[ -z "$monhost" ] && monhost="cosd0"
set -e
mydir=`hostname`_`echo $0 | sed 's/\//_/g'`
client_mount()
{
/bin/mount -t ceph $monhost:/ $mnt
}
client_umount()
{
/bin/umount $mnt
# look for VFS complaints
if dmesg | tail -n 50 | grep -c "VFS: Busy inodes" ; then
echo "looks like we left inodes pinned"
exit 1
fi
}
ceph_start()
{
$bindir/init-ceph -c $conf start ${1}
}
ceph_stop()
{
$bindir/init-ceph -c $conf stop ${1}
}
ceph_restart()
{
$bindir/init-ceph -c $conf restart ${1}
}
ceph_command()
{
$bindir/ceph -c $conf $*
}
client_enter_mydir()
{
pushd .
test -d $mnt/$mydir && rm -r $mnt/$mydir
mkdir $mnt/$mydir
cd $mnt/$mydir
}
client_leave_mydir()
{
popd
}
| 873 | 13.813559 | 61 | sh |
null | ceph-main/qa/client/gen-1774.sh | #!/usr/bin/env bash
set -e
mount () { :; }
umount () { :; }
list="\
abiword.control
abiword.list
abiword-plugin-latex.control
abiword-plugin-latex.list
abiword-plugin-opendocument.control
abiword-plugin-opendocument.list
abiword-plugin-openxml.control
abiword-plugin-openxml.list
abiword-plugin-pdf.control
abiword-plugin-pdf.list
abiword-plugin-wikipedia.control
abiword-plugin-wikipedia.list
abiword.postinst
aceofpenguins.control
aceofpenguins-launcher.control
aceofpenguins-launcher.list
aceofpenguins.list
aceofpenguins.postinst
alsa-conf-base.control
alsa-conf-base.list
alsa-scenarii-shr.conffiles
alsa-scenarii-shr.control
alsa-scenarii-shr.list
alsa-utils-alsactl.control
alsa-utils-alsactl.list
alsa-utils-alsamixer.control
alsa-utils-alsamixer.list
alsa-utils-amixer.control
alsa-utils-amixer.list
alsa-utils-aplay.control
alsa-utils-aplay.list
angstrom-libc-fixup-hack.control
angstrom-libc-fixup-hack.list
angstrom-libc-fixup-hack.postinst
apmd.control
apmd.list
apmd.postinst
apmd.postrm
apmd.prerm
aspell.control
aspell.list
atd-over-fso.control
atd-over-fso.list
atd-over-fso.postinst
atd-over-fso.postrm
atd-over-fso.prerm
base-files.conffiles
base-files.control
base-files.list
base-passwd.control
base-passwd.list
base-passwd.postinst
bash.control
bash.list
bash.postinst
bluez4.control
bluez4.list
bluez4.postinst
bluez4.postrm
bluez4.prerm
boost-signals.control
boost-signals.list
boost-signals.postinst
busybox.control
busybox.list
busybox-mountall.control
busybox-mountall.list
busybox-mountall.postinst
busybox-mountall.prerm
busybox.postinst
busybox.prerm
busybox-syslog.conffiles
busybox-syslog.control
busybox-syslog.list
busybox-syslog.postinst
busybox-syslog.postrm
busybox-syslog.prerm
ca-certificates.conffiles
ca-certificates.control
ca-certificates.list
ca-certificates.postinst
calc.control
calc.list
connman.control
connman.list
connman-plugin-udhcp.control
connman-plugin-udhcp.list
connman-plugin-wifi.control
connman-plugin-wifi.list
connman.postinst
connman.postrm
connman.prerm
connman-scripts.control
connman-scripts.list
cpio.control
cpio.list
cpio.postinst
cpio.prerm
cpp.control
cpp.list
cpp-symlinks.control
cpp-symlinks.list
cron.control
cron.list
cron.postinst
cron.postrm
cron.prerm
curl.control
curl.list
dbus.conffiles
dbus.control
dbus-daemon-proxy.control
dbus-daemon-proxy.list
dbus-hlid.control
dbus-hlid.list
dbus.list
dbus.postinst
dbus.postrm
dbus.prerm
dbus-x11.control
dbus-x11.list
devmem2.control
devmem2.list
distro-feed-configs.conffiles
distro-feed-configs.control
distro-feed-configs.list
dosfstools.control
dosfstools.list
e2fsprogs-badblocks.control
e2fsprogs-badblocks.list
e2fsprogs.control
e2fsprogs-e2fsck.control
e2fsprogs-e2fsck.list
e2fsprogs-e2fsck.postinst
e2fsprogs-e2fsck.prerm
e2fsprogs.list
e2fsprogs-mke2fs.control
e2fsprogs-mke2fs.list
e2fsprogs-mke2fs.postinst
e2fsprogs-mke2fs.prerm
e2fsprogs.postinst
e2fsprogs.prerm
ecore-con.control
ecore-con.list
ecore-con.postinst
ecore.control
ecore-evas.control
ecore-evas.list
ecore-evas.postinst
ecore-fb.control
ecore-fb.list
ecore-fb.postinst
ecore-file.control
ecore-file.list
ecore-file.postinst
ecore-imf.control
ecore-imf-evas.control
ecore-imf-evas.list
ecore-imf-evas.postinst
ecore-imf.list
ecore-imf.postinst
ecore-input.control
ecore-input.list
ecore-input.postinst
ecore-ipc.control
ecore-ipc.list
ecore-ipc.postinst
ecore.list
ecore.postinst
ecore-x.control
ecore-x.list
ecore-x.postinst
edbus.control
edbus.list
edbus.postinst
edje.control
edje.list
edje.postinst
edje-utils.control
edje-utils.list
efreet.control
efreet.list
efreet.postinst
eggdbus.control
eggdbus.list
eggdbus.postinst
eglibc-binary-localedata-en-us.control
eglibc-binary-localedata-en-us.list
eglibc-charmap-utf-8.control
eglibc-charmap-utf-8.list
eglibc-gconv.control
eglibc-gconv-cp1252.control
eglibc-gconv-cp1252.list
eglibc-gconv-ibm850.control
eglibc-gconv-ibm850.list
eglibc-gconv-iso8859-15.control
eglibc-gconv-iso8859-15.list
eglibc-gconv-iso8859-1.control
eglibc-gconv-iso8859-1.list
eglibc-gconv.list
eglibc-localedata-i18n.control
eglibc-localedata-i18n.list
eglibc-localedata-iso14651-t1-common.control
eglibc-localedata-iso14651-t1-common.list
eglibc-localedata-iso14651-t1.control
eglibc-localedata-iso14651-t1.list
eglibc-localedata-translit-circle.control
eglibc-localedata-translit-circle.list
eglibc-localedata-translit-cjk-compat.control
eglibc-localedata-translit-cjk-compat.list
eglibc-localedata-translit-compat.control
eglibc-localedata-translit-compat.list
eglibc-localedata-translit-font.control
eglibc-localedata-translit-font.list
eglibc-localedata-translit-fraction.control
eglibc-localedata-translit-fraction.list
eglibc-localedata-translit-narrow.control
eglibc-localedata-translit-narrow.list
eglibc-localedata-translit-neutral.control
eglibc-localedata-translit-neutral.list
eglibc-localedata-translit-small.control
eglibc-localedata-translit-small.list
eglibc-localedata-translit-wide.control
eglibc-localedata-translit-wide.list
eglibc-utils.control
eglibc-utils.list
eina.control
eina.list
eina.postinst
eject.control
eject.list
elementary-theme-gry.control
elementary-theme-gry.list
emacs-x11.control
emacs-x11.list
embryo.control
embryo.list
embryo.postinst
embryo-tests.control
embryo-tests.list
enchant.control
enchant.list
enchant.postinst
epdfview.control
epdfview.list
espeak.control
espeak.list
espeak.postinst
evas.control
evas-engine-buffer.control
evas-engine-buffer.list
evas-engine-fb.control
evas-engine-fb.list
evas-engine-software-16.control
evas-engine-software-16.list
evas-engine-software-16-x11.control
evas-engine-software-16-x11.list
evas-engine-software-generic.control
evas-engine-software-generic.list
evas-engine-software-x11.control
evas-engine-software-x11.list
evas-engine-xrender-x11.control
evas-engine-xrender-x11.list
evas.list
evas-loader-eet.control
evas-loader-eet.list
evas-loader-jpeg.control
evas-loader-jpeg.list
evas-loader-png.control
evas-loader-png.list
evas.postinst
evas-saver-eet.control
evas-saver-eet.list
evas-saver-jpeg.control
evas-saver-jpeg.list
evas-saver-png.control
evas-saver-png.list
evtest.control
evtest.list
e-wm-config-default.control
e-wm-config-default.list
e-wm-config-illume2-shr.control
e-wm-config-illume2-shr.list
e-wm-config-illume-shr.control
e-wm-config-illume-shr.list
e-wm.control
e-wm-icons.control
e-wm-icons.list
e-wm-images.control
e-wm-images.list
e-wm-input-methods.control
e-wm-input-methods.list
e-wm.list
e-wm-menu-shr.control
e-wm-menu-shr.list
e-wm-other.control
e-wm-other.list
e-wm.postinst
e-wm.postrm
e-wm-sysactions-shr.control
e-wm-sysactions-shr.list
e-wm-theme-default.control
e-wm-theme-default.list
e-wm-theme-illume-gry.control
e-wm-theme-illume-gry.list
e-wm-theme-illume-shr.control
e-wm-theme-illume-shr.list
e-wm-utils.control
e-wm-utils.list
fbreader.control
fbreader.list
fbreader.postinst
fbset.control
fbset.list
fbset-modes.conffiles
fbset-modes.control
fbset-modes.list
fbset.postinst
fbset.postrm
ffalarms.control
ffalarms.list
file.control
file.list
file.postinst
findutils.control
findutils.list
findutils.postinst
findutils.prerm
flac.control
flac.list
flite.control
flite.list
fontconfig-utils.control
fontconfig-utils.list
font-update-common.control
font-update-common.list
frameworkd-config-shr.conffiles
frameworkd-config-shr.control
frameworkd-config-shr.list
frameworkd.control
frameworkd.list
frameworkd.postinst
frameworkd.postrm
frameworkd.prerm
fso-abyss-config.conffiles
fso-abyss-config.control
fso-abyss-config.list
fso-abyss.control
fso-abyss.list
fso-apm.control
fso-apm.list
fsodatad.control
fsodatad.list
fsodatad.postinst
fsodeviced.control
fsodeviced.list
fsodeviced.postinst
fsodeviced.postrm
fsodeviced.prerm
fso-gpsd.control
fso-gpsd.list
fso-gpsd.postinst
fso-gpsd.postrm
fso-gpsd.prerm
fsogsmd.control
fsogsmd.list
fsogsmd.postinst
fsonetworkd.control
fsonetworkd.list
fsonetworkd.postinst
fsoraw.control
fsoraw.list
fsotdld.control
fsotdld.list
fsotdld.postinst
fsousaged.control
fsousaged.list
fsousaged.postinst
gcc.control
gcc.list
gconf.control
gconf.list
gconf.postinst
g++.control
gdb.control
gdb.list
gdk-pixbuf-loader-gif.control
gdk-pixbuf-loader-gif.list
gdk-pixbuf-loader-gif.postinst
gdk-pixbuf-loader-jpeg.control
gdk-pixbuf-loader-jpeg.list
gdk-pixbuf-loader-jpeg.postinst
gdk-pixbuf-loader-png.control
gdk-pixbuf-loader-png.list
gdk-pixbuf-loader-png.postinst
gdk-pixbuf-loader-xpm.control
gdk-pixbuf-loader-xpm.list
gdk-pixbuf-loader-xpm.postinst
git.control
git.list
g++.list
gnome-pty-helper.control
gnome-pty-helper.list
gnome-vfs.control
gnome-vfs.list
gnome-vfs-plugin-file.control
gnome-vfs-plugin-file.list
gnome-vfs.postinst
gnome-vfs.prerm
gnupg.control
gnupg.list
gpe-icons.control
gpe-icons.list
gpe-icons.postinst
gpe-icons.postrm
gpe-scap.control
gpe-scap.list
gpe-sketchbook.control
gpe-sketchbook.list
gpgv.control
gpgv.list
gridpad.control
gridpad.list
gst-plugin-alsa.control
gst-plugin-alsa.list
gst-plugin-audioconvert.control
gst-plugin-audioconvert.list
gst-plugin-autodetect.control
gst-plugin-autodetect.list
gst-plugin-gconfelements.control
gst-plugin-gconfelements.list
gst-plugin-gconfelements.postinst
gst-plugin-gconfelements.prerm
gst-plugin-mad.control
gst-plugin-mad.list
gstreamer.control
gstreamer.list
gstreamer.postinst
gtk+.control
gtk+.list
gtk+.postinst
hal.control
hal-info.control
hal-info.list
hal.list
hal.postinst
hal.postrm
hdparm.control
hdparm.list
hdparm.postinst
hdparm.prerm
hicolor-icon-theme.control
hicolor-icon-theme.list
hicolor-icon-theme.postinst
hicolor-icon-theme.postrm
htop.control
htop.list
i2c-tools.control
i2c-tools.list
id3lib.control
id3lib.list
id3lib.postinst
iliwi.control
iliwi.list
illume-keyboard-default-alpha.control
illume-keyboard-default-alpha.list
illume-keyboard-default-terminal.control
illume-keyboard-default-terminal.list
illume-keyboard-numeric-alt.control
illume-keyboard-numeric-alt.list
imagemagick.control
imagemagick.list
imagemagick.postinst
initscripts-shr.control
initscripts-shr.list
intone.control
intone.list
iptables.control
iptables.list
iptables.postinst
kernel-2.6.29-rc3.control
kernel-2.6.29-rc3.list
kernel.control
kernel-image-2.6.29-rc3.control
kernel-image-2.6.29-rc3.list
kernel-image-2.6.29-rc3.postinst
kernel.list
kernel-module-ar6000.control
kernel-module-ar6000.list
kernel-module-ar6000.postinst
kernel-module-ar6000.postrm
kernel-module-arc4.control
kernel-module-arc4.list
kernel-module-arc4.postinst
kernel-module-arc4.postrm
kernel-module-asix.control
kernel-module-asix.list
kernel-module-asix.postinst
kernel-module-asix.postrm
kernel-module-bluetooth.control
kernel-module-bluetooth.list
kernel-module-bluetooth.postinst
kernel-module-bluetooth.postrm
kernel-module-bnep.control
kernel-module-bnep.list
kernel-module-bnep.postinst
kernel-module-bnep.postrm
kernel-module-btusb.control
kernel-module-btusb.list
kernel-module-btusb.postinst
kernel-module-btusb.postrm
kernel-module-crc-ccitt.control
kernel-module-crc-ccitt.list
kernel-module-crc-ccitt.postinst
kernel-module-crc-ccitt.postrm
kernel-module-ecb.control
kernel-module-ecb.list
kernel-module-ecb.postinst
kernel-module-ecb.postrm
kernel-module-exportfs.control
kernel-module-exportfs.list
kernel-module-exportfs.postinst
kernel-module-exportfs.postrm
kernel-module-gadgetfs.control
kernel-module-gadgetfs.list
kernel-module-gadgetfs.postinst
kernel-module-gadgetfs.postrm
kernel-module-g-ether.control
kernel-module-g-ether.list
kernel-module-g-ether.postinst
kernel-module-g-ether.postrm
kernel-module-g-file-storage.control
kernel-module-g-file-storage.list
kernel-module-g-file-storage.postinst
kernel-module-g-file-storage.postrm
kernel-module-g-serial.control
kernel-module-g-serial.list
kernel-module-g-serial.postinst
kernel-module-g-serial.postrm
kernel-module-hidp.control
kernel-module-hidp.list
kernel-module-hidp.postinst
kernel-module-hidp.postrm
kernel-module-iptable-filter.control
kernel-module-iptable-filter.list
kernel-module-iptable-filter.postinst
kernel-module-iptable-filter.postrm
kernel-module-iptable-nat.control
kernel-module-iptable-nat.list
kernel-module-iptable-nat.postinst
kernel-module-iptable-nat.postrm
kernel-module-ip-tables.control
kernel-module-ip-tables.list
kernel-module-ip-tables.postinst
kernel-module-ip-tables.postrm
kernel-module-ipt-masquerade.control
kernel-module-ipt-masquerade.list
kernel-module-ipt-masquerade.postinst
kernel-module-ipt-masquerade.postrm
kernel-module-l2cap.control
kernel-module-l2cap.list
kernel-module-l2cap.postinst
kernel-module-l2cap.postrm
kernel-module-lockd.control
kernel-module-lockd.list
kernel-module-lockd.postinst
kernel-module-lockd.postrm
kernel-module-michael-mic.control
kernel-module-michael-mic.list
kernel-module-michael-mic.postinst
kernel-module-michael-mic.postrm
kernel-module-nf-conntrack.control
kernel-module-nf-conntrack-ipv4.control
kernel-module-nf-conntrack-ipv4.list
kernel-module-nf-conntrack-ipv4.postinst
kernel-module-nf-conntrack-ipv4.postrm
kernel-module-nf-conntrack.list
kernel-module-nf-conntrack.postinst
kernel-module-nf-conntrack.postrm
kernel-module-nf-defrag-ipv4.control
kernel-module-nf-defrag-ipv4.list
kernel-module-nf-defrag-ipv4.postinst
kernel-module-nf-defrag-ipv4.postrm
kernel-module-nf-nat.control
kernel-module-nf-nat.list
kernel-module-nf-nat.postinst
kernel-module-nf-nat.postrm
kernel-module-nfs-acl.control
kernel-module-nfs-acl.list
kernel-module-nfs-acl.postinst
kernel-module-nfs-acl.postrm
kernel-module-nfsd.control
kernel-module-nfsd.list
kernel-module-nfsd.postinst
kernel-module-nfsd.postrm
kernel-module-nls-utf8.control
kernel-module-nls-utf8.list
kernel-module-nls-utf8.postinst
kernel-module-nls-utf8.postrm
kernel-module-ohci-hcd.control
kernel-module-ohci-hcd.list
kernel-module-ohci-hcd.postinst
kernel-module-ohci-hcd.postrm
kernel-module-pegasus.control
kernel-module-pegasus.list
kernel-module-pegasus.postinst
kernel-module-pegasus.postrm
kernel-module-ppp-async.control
kernel-module-ppp-async.list
kernel-module-ppp-async.postinst
kernel-module-ppp-async.postrm
kernel-module-ppp-deflate.control
kernel-module-ppp-deflate.list
kernel-module-ppp-deflate.postinst
kernel-module-ppp-deflate.postrm
kernel-module-ppp-generic.control
kernel-module-ppp-generic.list
kernel-module-ppp-generic.postinst
kernel-module-ppp-generic.postrm
kernel-module-ppp-mppe.control
kernel-module-ppp-mppe.list
kernel-module-ppp-mppe.postinst
kernel-module-ppp-mppe.postrm
kernel-module-rfcomm.control
kernel-module-rfcomm.list
kernel-module-rfcomm.postinst
kernel-module-rfcomm.postrm
kernel-module-s3cmci.control
kernel-module-s3cmci.list
kernel-module-s3cmci.postinst
kernel-module-s3cmci.postrm
kernel-module-sco.control
kernel-module-sco.list
kernel-module-sco.postinst
kernel-module-sco.postrm
kernel-module-scsi-mod.control
kernel-module-scsi-mod.list
kernel-module-scsi-mod.postinst
kernel-module-scsi-mod.postrm
kernel-module-sd-mod.control
kernel-module-sd-mod.list
kernel-module-sd-mod.postinst
kernel-module-sd-mod.postrm
kernel-module-slhc.control
kernel-module-slhc.list
kernel-module-slhc.postinst
kernel-module-slhc.postrm
kernel-module-snd.control
kernel-module-snd.list
kernel-module-snd-page-alloc.control
kernel-module-snd-page-alloc.list
kernel-module-snd-page-alloc.postinst
kernel-module-snd-page-alloc.postrm
kernel-module-snd-pcm.control
kernel-module-snd-pcm.list
kernel-module-snd-pcm.postinst
kernel-module-snd-pcm.postrm
kernel-module-snd.postinst
kernel-module-snd.postrm
kernel-module-snd-soc-core.control
kernel-module-snd-soc-core.list
kernel-module-snd-soc-core.postinst
kernel-module-snd-soc-core.postrm
kernel-module-snd-soc-neo1973-gta02-wm8753.control
kernel-module-snd-soc-neo1973-gta02-wm8753.list
kernel-module-snd-soc-neo1973-gta02-wm8753.postinst
kernel-module-snd-soc-neo1973-gta02-wm8753.postrm
kernel-module-snd-soc-s3c24xx.control
kernel-module-snd-soc-s3c24xx-i2s.control
kernel-module-snd-soc-s3c24xx-i2s.list
kernel-module-snd-soc-s3c24xx-i2s.postinst
kernel-module-snd-soc-s3c24xx-i2s.postrm
kernel-module-snd-soc-s3c24xx.list
kernel-module-snd-soc-s3c24xx.postinst
kernel-module-snd-soc-s3c24xx.postrm
kernel-module-snd-soc-wm8753.control
kernel-module-snd-soc-wm8753.list
kernel-module-snd-soc-wm8753.postinst
kernel-module-snd-soc-wm8753.postrm
kernel-module-snd-timer.control
kernel-module-snd-timer.list
kernel-module-snd-timer.postinst
kernel-module-snd-timer.postrm
kernel-module-sunrpc.control
kernel-module-sunrpc.list
kernel-module-sunrpc.postinst
kernel-module-sunrpc.postrm
kernel-module-tun.control
kernel-module-tun.list
kernel-module-tun.postinst
kernel-module-tun.postrm
kernel-module-uinput.control
kernel-module-uinput.list
kernel-module-uinput.postinst
kernel-module-uinput.postrm
kernel-module-usbserial.control
kernel-module-usbserial.list
kernel-module-usbserial.postinst
kernel-module-usbserial.postrm
kernel-module-usb-storage.control
kernel-module-usb-storage.list
kernel-module-usb-storage.postinst
kernel-module-usb-storage.postrm
kernel-module-x-tables.control
kernel-module-x-tables.list
kernel-module-x-tables.postinst
kernel-module-x-tables.postrm
kernel.postinst
kernel.postrm
lame.control
lame.list
liba52-0.control
liba52-0.list
liba52-0.postinst
libacl1.control
libacl1.list
libacl1.postinst
libapm1.control
libapm1.list
libapm1.postinst
libasound2.control
libasound2.list
libasound2.postinst
libaspell15.control
libaspell15.list
libaspell15.postinst
libatk-1.0-0.control
libatk-1.0-0.list
libatk-1.0-0.postinst
libattr1.control
libattr1.list
libattr1.postinst
libavahi-client3.control
libavahi-client3.list
libavahi-client3.postinst
libavahi-common3.control
libavahi-common3.list
libavahi-common3.postinst
libavahi-glib1.control
libavahi-glib1.list
libavahi-glib1.postinst
libavcodec52.control
libavcodec52.list
libavcodec52.postinst
libavformat52.control
libavformat52.list
libavformat52.postinst
libavutil50.control
libavutil50.list
libavutil50.postinst
libblkid1.control
libblkid1.list
libblkid1.postinst
libbz2-1.control
libbz2-1.list
libbz2-1.postinst
libc6.control
libc6.list
libc6.postinst
libcairo2.control
libcairo2.list
libcairo2.postinst
libcanberra0.control
libcanberra0.list
libcanberra0.postinst
libcanberra-alsa.control
libcanberra-alsa.list
libcom-err2.control
libcom-err2.list
libcom-err2.postinst
libcroco.control
libcroco.list
libcroco.postinst
libcrypto0.9.8.control
libcrypto0.9.8.list
libcrypto0.9.8.postinst
libcups2.control
libcups2.list
libcups2.postinst
libcurl4.control
libcurl4.list
libcurl4.postinst
libdbus-1-3.control
libdbus-1-3.list
libdbus-1-3.postinst
libdbus-glib-1-2.control
libdbus-glib-1-2.list
libdbus-glib-1-2.postinst
libdmx1.control
libdmx1.list
libdmx1.postinst
libdrm.control
libdrm.list
libdrm.postinst
libdvdcss2.control
libdvdcss2.list
libdvdcss2.postinst
libdvdread3.control
libdvdread3.list
libdvdread3.postinst
libeet1.control
libeet1.list
libeet1.postinst
libelementary-ver-pre-svn-05-0.control
libelementary-ver-pre-svn-05-0.list
libelementary-ver-pre-svn-05-0.postinst
libelementary-ver-pre-svn-05-themes.control
libelementary-ver-pre-svn-05-themes.list
libelf0.control
libelf0.list
libelf0.postinst
libewebkit0.control
libewebkit0.list
libewebkit0.postinst
libexif12.control
libexif12.list
libexif12.postinst
libexosip2.control
libexosip2.list
libexosip2.postinst
libexpat1.control
libexpat1.list
libexpat1.postinst
libfaac0.control
libfaac0.list
libfaac0.postinst
libfakekey0.control
libfakekey0.list
libfakekey0.postinst
libffi5.control
libffi5.list
libffi5.postinst
libflac8.control
libflac8.list
libflac8.postinst
libfontconfig1.control
libfontconfig1.list
libfontconfig1.postinst
libfontenc1.control
libfontenc1.list
libfontenc1.postinst
libframeworkd-glib0.control
libframeworkd-glib0.list
libframeworkd-glib0.postinst
libfreetype6.control
libfreetype6.list
libfreetype6.postinst
libfribidi0.control
libfribidi0.list
libfribidi0.postinst
libfsobasics0.control
libfsobasics0.list
libfsobasics0.postinst
libfsoframework0.control
libfsoframework0.list
libfsoframework0.postinst
libfso-glib0.control
libfso-glib0.list
libfso-glib0.postinst
libfsoresource0.control
libfsoresource0.list
libfsoresource0.postinst
libfsotransport0.control
libfsotransport0.list
libfsotransport0.postinst
libgcc1.control
libgcc1.list
libgcc1.postinst
libgcrypt11.control
libgcrypt11.list
libgcrypt11.postinst
libgee2.control
libgee2.list
libgee2.postinst
libgio-2.0-0.control
libgio-2.0-0.list
libgio-2.0-0.postinst
libgl1.control
libgl1.list
libgl1.postinst
libglade-2.0-0.control
libglade-2.0-0.list
libglade-2.0-0.postinst
libglib-2.0-0.control
libglib-2.0-0.list
libglib-2.0-0.postinst
libglu1.control
libglu1.list
libglu1.postinst
libgmodule-2.0-0.control
libgmodule-2.0-0.list
libgmodule-2.0-0.postinst
libgmp3.control
libgmp3.list
libgmp3.postinst
libgnt0.control
libgnt0.list
libgnt0.postinst
libgnutls26.control
libgnutls26.list
libgnutls26.postinst
libgnutls-extra26.control
libgnutls-extra26.list
libgnutls-extra26.postinst
libgobject-2.0-0.control
libgobject-2.0-0.list
libgobject-2.0-0.postinst
libgoffice-0.8-8.control
libgoffice-0.8-8.list
libgoffice-0.8-8.postinst
libgoffice-0.8-plugin-plot-barcol.control
libgoffice-0.8-plugin-plot-barcol.list
libgoffice-0.8-plugin-plot-distrib.control
libgoffice-0.8-plugin-plot-distrib.list
libgoffice-0.8-plugin-plot-pie.control
libgoffice-0.8-plugin-plot-pie.list
libgoffice-0.8-plugin-plot-radar.control
libgoffice-0.8-plugin-plot-radar.list
libgoffice-0.8-plugin-plot-surface.control
libgoffice-0.8-plugin-plot-surface.list
libgoffice-0.8-plugin-plot-xy.control
libgoffice-0.8-plugin-plot-xy.list
libgoffice-0.8-plugin-reg-linear.control
libgoffice-0.8-plugin-reg-linear.list
libgoffice-0.8-plugin-reg-logfit.control
libgoffice-0.8-plugin-reg-logfit.list
libgoffice-0.8-plugin-smoothing.control
libgoffice-0.8-plugin-smoothing.list
libgpewidget1.control
libgpewidget1.list
libgpewidget1.postinst
libgpg-error0.control
libgpg-error0.list
libgpg-error0.postinst
libgpgme11.control
libgpgme11.list
libgpgme11.postinst
libgsf.control
libgsf.list
libgsf.postinst
libgsf.prerm
libgsm0710-0.control
libgsm0710-0.list
libgsm0710-0.postinst
libgsm0710mux0.control
libgsm0710mux0.list
libgsm0710mux0.postinst
libgsm1.control
libgsm1.list
libgsm1.postinst
libgstaudio-0.10-0.control
libgstaudio-0.10-0.list
libgstaudio-0.10-0.postinst
libgstfarsight-0.10-0.control
libgstfarsight-0.10-0.list
libgstfarsight-0.10-0.postinst
libgstinterfaces-0.10-0.control
libgstinterfaces-0.10-0.list
libgstinterfaces-0.10-0.postinst
libgstnetbuffer-0.10-0.control
libgstnetbuffer-0.10-0.list
libgstnetbuffer-0.10-0.postinst
libgstpbutils-0.10-0.control
libgstpbutils-0.10-0.list
libgstpbutils-0.10-0.postinst
libgstrtp-0.10-0.control
libgstrtp-0.10-0.list
libgstrtp-0.10-0.postinst
libgsttag-0.10-0.control
libgsttag-0.10-0.list
libgsttag-0.10-0.postinst
libgstvideo-0.10-0.control
libgstvideo-0.10-0.list
libgstvideo-0.10-0.postinst
libgthread-2.0-0.control
libgthread-2.0-0.list
libgthread-2.0-0.postinst
libgypsy0.control
libgypsy0.list
libgypsy0.postinst
libical.control
libical.list
libical.postinst
libice6.control
libice6.list
libice6.postinst
libicudata36.control
libicudata36.list
libicudata36.postinst
libicui18n36.control
libicui18n36.list
libicui18n36.postinst
libicuuc36.control
libicuuc36.list
libicuuc36.postinst
libid3tag0.control
libid3tag0.list
libid3tag0.postinst
libidl-2-0.control
libidl-2-0.list
libidl-2-0.postinst
libidn.control
libidn.list
libidn.postinst
libimlib2-1.control
libimlib2-1.list
libimlib2-1.postinst
libjasper1.control
libjasper1.list
libjasper1.postinst
libjpeg62.control
libjpeg62.list
libjpeg62.postinst
liblinebreak1.control
liblinebreak1.list
liblinebreak1.postinst
liblinphone3.control
liblinphone3.list
liblinphone3.postinst
liblockfile.control
liblockfile.list
liblockfile.postinst
libltdl7.control
libltdl7.list
libltdl7.postinst
liblzo1.control
liblzo1.list
liblzo1.postinst
libmad0.control
libmad0.list
libmad0.postinst
libmediastreamer0.control
libmediastreamer0.list
libmediastreamer0.postinst
libmp3lame0.control
libmp3lame0.list
libmp3lame0.postinst
libmpfr1.control
libmpfr1.list
libmpfr1.postinst
libnice.control
libnice.list
libnice.postinst
libnl2.control
libnl2.list
libnl2.postinst
libnl-genl2.control
libnl-genl2.list
libnl-genl2.postinst
libnl-nf2.control
libnl-nf2.list
libnl-nf2.postinst
libnl-route2.control
libnl-route2.list
libnl-route2.postinst
libode0.control
libode0.list
libode0.postinst
libogg0.control
libogg0.list
libogg0.postinst
liboil.control
liboil.list
liboil.postinst
libopkg0.control
libopkg0.list
libopkg0.postinst
libortp8.control
libortp8.list
libortp8.postinst
libosip2-3.control
libosip2-3.list
libosip2-3.postinst
libpam-base-files.control
libpam-base-files.list
libpam.control
libpam.list
libpam-meta.control
libpam-meta.list
libpam.postinst
libpcap.control
libpcap.list
libpcap.postinst
libpciaccess0.control
libpciaccess0.list
libpciaccess0.postinst
libperl5.control
libperl5.list
libperl5.postinst
libphone-ui0.conffiles
libphone-ui0.control
libphone-ui0.list
libphone-ui0.postinst
libphone-ui-shr.control
libphone-ui-shr.list
libphone-utils0.conffiles
libphone-utils0.control
libphone-utils0.list
libphone-utils0.postinst
libpixman-1-0.control
libpixman-1-0.list
libpixman-1-0.postinst
libpng12-0.control
libpng12-0.list
libpng12-0.postinst
libpng.control
libpng.list
libpoppler5.control
libpoppler5.list
libpoppler5.postinst
libpoppler-glib4.control
libpoppler-glib4.list
libpoppler-glib4.postinst
libpopt0.control
libpopt0.list
libpopt0.postinst
libportaudio2.control
libportaudio2.list
libportaudio2.postinst
libpostproc51.control
libpostproc51.list
libpostproc51.postinst
libpthread-stubs0.control
libpthread-stubs0.list
libpthread-stubs0.postinst
libpurple.control
libpurple.list
libpurple-plugin-ssl.control
libpurple-plugin-ssl-gnutls.control
libpurple-plugin-ssl-gnutls.list
libpurple-plugin-ssl.list
libpurple.postinst
libpurple.prerm
libpurple-protocol-icq.control
libpurple-protocol-icq.list
libpurple-protocol-irc.control
libpurple-protocol-irc.list
libpurple-protocol-msn.control
libpurple-protocol-msn.list
libpurple-protocol-xmpp.control
libpurple-protocol-xmpp.list
libpyglib-2.0-python0.control
libpyglib-2.0-python0.list
libpyglib-2.0-python0.postinst
libpython2.6-1.0.control
libpython2.6-1.0.list
libpython2.6-1.0.postinst
libreadline5.control
libreadline5.list
libreadline5.postinst
librsvg-2-2.control
librsvg-2-2.list
librsvg-2-2.postinst
librsvg-2-gtk.control
librsvg-2-gtk.list
librsvg-2-gtk.postinst
libschroedinger-1.0-0.control
libschroedinger-1.0-0.list
libschroedinger-1.0-0.postinst
libsdl-1.2-0.control
libsdl-1.2-0.list
libsdl-1.2-0.postinst
libsdl-image-1.2-0.control
libsdl-image-1.2-0.list
libsdl-image-1.2-0.postinst
libsdl-mixer-1.2-0.control
libsdl-mixer-1.2-0.list
libsdl-mixer-1.2-0.postinst
libsdl-ttf-2.0-0.control
libsdl-ttf-2.0-0.list
libsdl-ttf-2.0-0.postinst
libsm6.control
libsm6.list
libsm6.postinst
libsoup-2.2-8.control
libsoup-2.2-8.list
libsoup-2.2-8.postinst
libsoup-2.4-1.control
libsoup-2.4-1.list
libsoup-2.4-1.postinst
libspeex1.control
libspeex1.list
libspeex1.postinst
libspeexdsp1.control
libspeexdsp1.list
libspeexdsp1.postinst
libsqlite0.control
libsqlite0.list
libsqlite0.postinst
libsqlite3-0.control
libsqlite3-0.list
libsqlite3-0.postinst
libss2.control
libss2.list
libss2.postinst
libssl0.9.8.control
libssl0.9.8.list
libssl0.9.8.postinst
libstartup-notification-1-0.control
libstartup-notification-1-0.list
libstartup-notification-1-0.postinst
libstdc++6.control
libstdc++6.list
libstdc++6.postinst
libswscale0.control
libswscale0.list
libswscale0.postinst
libsysfs2.control
libsysfs2.list
libsysfs2.postinst
libtheora0.control
libtheora0.list
libtheora0.postinst
libthread-db1.control
libthread-db1.list
libthread-db1.postinst
libtiff5.control
libtiff5.list
libtiff5.postinst
libts-1.0-0.control
libts-1.0-0.list
libts-1.0-0.postinst
libungif4.control
libungif4.list
libungif4.postinst
libusb-0.1-4.control
libusb-0.1-4.list
libusb-0.1-4.postinst
libuuid1.control
libuuid1.list
libuuid1.postinst
libvorbis0.control
libvorbis0.list
libvorbis0.postinst
libvte9.control
libvte9.list
libvte9.postinst
libwebkit-1.0-2.control
libwebkit-1.0-2.list
libwebkit-1.0-2.postinst
libwrap0.control
libwrap0.list
libwrap0.postinst
libx11-6.control
libx11-6.list
libx11-6.postinst
libx11-locale.control
libx11-locale.list
libxau6.control
libxau6.list
libxau6.postinst
libxaw7-7.control
libxaw7-7.list
libxaw7-7.postinst
libxcalibrate0.control
libxcalibrate0.list
libxcalibrate0.postinst
libxcomposite1.control
libxcomposite1.list
libxcomposite1.postinst
libxcursor1.control
libxcursor1.list
libxcursor1.postinst
libxdamage1.control
libxdamage1.list
libxdamage1.postinst
libxdmcp6.control
libxdmcp6.list
libxdmcp6.postinst
libxext6.control
libxext6.list
libxext6.postinst
libxfixes3.control
libxfixes3.list
libxfixes3.postinst
libxfont1.control
libxfont1.list
libxfont1.postinst
libxfontcache1.control
libxfontcache1.list
libxfontcache1.postinst
libxft2.control
libxft2.list
libxft2.postinst
libxi6.control
libxi6.list
libxi6.postinst
libxinerama1.control
libxinerama1.list
libxinerama1.postinst
libxkbfile1.control
libxkbfile1.list
libxkbfile1.postinst
libxml2.control
libxml2.list
libxml2.postinst
libxmu6.control
libxmu6.list
libxmu6.postinst
libxmuu1.control
libxmuu1.list
libxmuu1.postinst
libxp6.control
libxp6.list
libxp6.postinst
libxpm4.control
libxpm4.list
libxpm4.postinst
libxrandr2.control
libxrandr2.list
libxrandr2.postinst
libxrender1.control
libxrender1.list
libxrender1.postinst
libxslt.control
libxslt.list
libxslt.postinst
libxss1.control
libxss1.list
libxss1.postinst
libxt6.control
libxt6.list
libxt6.postinst
libxtst6.control
libxtst6.list
libxtst6.postinst
libxv1.control
libxv1.list
libxv1.postinst
libxxf86dga1.control
libxxf86dga1.list
libxxf86dga1.postinst
libxxf86misc1.control
libxxf86misc1.list
libxxf86misc1.postinst
libxxf86vm1.control
libxxf86vm1.list
libxxf86vm1.postinst
libyaml-0-2.control
libyaml-0-2.list
libyaml-0-2.postinst
libz1.control
libz1.list
libz1.postinst
linphone.control
linphone.list
locale-base-en-us.control
locale-base-en-us.list
logrotate.conffiles
logrotate.control
logrotate.list
logrotate.postinst
logrotate.postrm
lsof.control
lsof.list
ltrace.control
ltrace.list
make.control
make.list
matchbox-keyboard-im.control
matchbox-keyboard-im.list
matchbox-keyboard-im.postinst
matchbox-keyboard-im.postrm
mbuffer.control
mbuffer.list
mdbus2.control
mdbus2.list
mesa-dri.control
mesa-dri.list
mesa-dri.postinst
mime-support.control
mime-support.list
mioctl.control
mioctl.list
mkdump.control
mkdump.list
mobile-broadband-provider-info.control
mobile-broadband-provider-info.list
module-init-tools.control
module-init-tools-depmod.control
module-init-tools-depmod.list
module-init-tools-depmod.postinst
module-init-tools-depmod.prerm
module-init-tools.list
module-init-tools.postinst
module-init-tools.prerm
modutils-initscripts.control
modutils-initscripts.list
modutils-initscripts.postinst
modutils-initscripts.postrm
modutils-initscripts.prerm
mokomaze.control
mokomaze.list
mplayer-common.control
mplayer-common.list
mplayer.conffiles
mplayer.control
mplayer.list
mtd-utils.control
mtd-utils.list
mterm2.control
mterm2.list
nano.control
nano.list
navit.conffiles
navit.control
navit-icons.control
navit-icons.list
navit.list
ncurses.control
ncurses.list
ncurses.postinst
netbase.conffiles
netbase.control
netbase.list
netbase.postinst
netbase.postrm
netbase.prerm
nfs-utils-client.control
nfs-utils-client.list
nmon.control
nmon.list
numptyphysics.control
numptyphysics.list
openssh.control
openssh-keygen.control
openssh-keygen.list
openssh.list
openssh-scp.control
openssh-scp.list
openssh-scp.postinst
openssh-scp.postrm
openssh-sftp-server.control
openssh-sftp-server.list
openssh-ssh.conffiles
openssh-ssh.control
openssh-sshd.conffiles
openssh-sshd.control
openssh-sshd.list
openssh-sshd.postinst
openssh-sshd.postrm
openssh-ssh.list
openssh-ssh.postinst
openssh-ssh.postrm
openssl.control
openssl.list
openvpn.control
openvpn.list
opimd-utils-cli.control
opimd-utils-cli.list
opimd-utils-data.control
opimd-utils-data.list
opimd-utils-notes.control
opimd-utils-notes.list
opkg-collateral.conffiles
opkg-collateral.control
opkg-collateral.list
opkg.control
opkg.list
opkg.postinst
opkg.postrm
orbit2.control
orbit2.list
orbit2.postinst
pam-plugin-access.control
pam-plugin-access.list
pam-plugin-debug.control
pam-plugin-debug.list
pam-plugin-deny.control
pam-plugin-deny.list
pam-plugin-echo.control
pam-plugin-echo.list
pam-plugin-env.control
pam-plugin-env.list
pam-plugin-exec.control
pam-plugin-exec.list
pam-plugin-faildelay.control
pam-plugin-faildelay.list
pam-plugin-filter.control
pam-plugin-filter.list
pam-plugin-ftp.control
pam-plugin-ftp.list
pam-plugin-group.control
pam-plugin-group.list
pam-plugin-issue.control
pam-plugin-issue.list
pam-plugin-keyinit.control
pam-plugin-keyinit.list
pam-plugin-lastlog.control
pam-plugin-lastlog.list
pam-plugin-limits.control
pam-plugin-limits.list
pam-plugin-listfile.control
pam-plugin-listfile.list
pam-plugin-localuser.control
pam-plugin-localuser.list
pam-plugin-loginuid.control
pam-plugin-loginuid.list
pam-plugin-mail.control
pam-plugin-mail.list
pam-plugin-mkhomedir.control
pam-plugin-mkhomedir.list
pam-plugin-motd.control
pam-plugin-motd.list
pam-plugin-namespace.control
pam-plugin-namespace.list
pam-plugin-nologin.control
pam-plugin-nologin.list
pam-plugin-permit.control
pam-plugin-permit.list
pam-plugin-pwhistory.control
pam-plugin-pwhistory.list
pam-plugin-rhosts.control
pam-plugin-rhosts.list
pam-plugin-rootok.control
pam-plugin-rootok.list
pam-plugin-securetty.control
pam-plugin-securetty.list
pam-plugin-shells.control
pam-plugin-shells.list
pam-plugin-stress.control
pam-plugin-stress.list
pam-plugin-succeed-if.control
pam-plugin-succeed-if.list
pam-plugin-tally2.control
pam-plugin-tally2.list
pam-plugin-tally.control
pam-plugin-tally.list
pam-plugin-time.control
pam-plugin-time.list
pam-plugin-timestamp.control
pam-plugin-timestamp.list
pam-plugin-umask.control
pam-plugin-umask.list
pam-plugin-unix.control
pam-plugin-unix.list
pam-plugin-warn.control
pam-plugin-warn.list
pam-plugin-wheel.control
pam-plugin-wheel.list
pam-plugin-xauth.control
pam-plugin-xauth.list
pango.control
pango.list
pango-module-basic-fc.control
pango-module-basic-fc.list
pango-module-basic-fc.postinst
pango-module-basic-x.control
pango-module-basic-x.list
pango-module-basic-x.postinst
pango.postinst
perl.control
perl.list
perl-module-carp.control
perl-module-carp.list
perl-module-exporter.control
perl-module-exporter.list
perl-module-file-basename.control
perl-module-file-basename.list
perl-module-file-path.control
perl-module-file-path.list
perl-module-strict.control
perl-module-strict.list
perl-module-warnings.control
perl-module-warnings.list
phonefsod.conffiles
phonefsod.control
phonefsod.list
phonefsod.postinst
phonefsod.postrm
phonefsod.prerm
phoneui-apps-contacts.control
phoneui-apps-contacts.list
phoneui-apps-dialer.control
phoneui-apps-dialer.list
phoneui-apps-messages.control
phoneui-apps-messages.list
phoneui-apps-quick-settings.control
phoneui-apps-quick-settings.list
phoneuid.conffiles
phoneuid.control
phoneuid.list
pidgin.control
pidgin-data.control
pidgin-data.list
pidgin.list
pingus.control
pingus.list
pointercal.control
pointercal.list
policykit.control
policykit.list
policykit.postinst
policykit.postrm
poppler-data.control
poppler-data.list
portmap.control
portmap.list
portmap.postinst
portmap.postrm
portmap.prerm
powertop.control
powertop.list
ppp.conffiles
ppp.control
ppp-dialin.control
ppp-dialin.list
ppp-dialin.postinst
ppp-dialin.postrm
ppp.list
ppp.postinst
procps.conffiles
procps.control
procps.list
procps.postinst
procps.postrm
procps.prerm
pth.control
pth.list
pth.postinst
pxaregs.control
pxaregs.list
pyefl-sudoku.control
pyefl-sudoku.list
pyphonelog.control
pyphonelog.list
python-codecs.control
python-codecs.list
python-core.control
python-core.list
python-crypt.control
python-crypt.list
python-ctypes.control
python-ctypes.list
python-datetime.control
python-datetime.list
python-dateutil.control
python-dateutil.list
python-dbus.control
python-dbus.list
python-difflib.control
python-difflib.list
python-ecore.control
python-ecore.list
python-edbus.control
python-edbus.list
python-edje.control
python-edje.list
python-elementary.control
python-elementary.list
python-evas.control
python-evas.list
python-fcntl.control
python-fcntl.list
python-gst.control
python-gst.list
python-io.control
python-io.list
python-lang.control
python-lang.list
python-logging.control
python-logging.list
python-math.control
python-math.list
python-multiprocessing.control
python-multiprocessing.list
python-pexpect.control
python-pexpect.list
python-phoneutils.control
python-phoneutils.list
python-pickle.control
python-pickle.list
python-pprint.control
python-pprint.list
python-pyalsaaudio.control
python-pyalsaaudio.list
python-pycairo.control
python-pycairo.list
python-pygobject.control
python-pygobject.list
python-pygtk.control
python-pygtk.list
python-pyrtc.control
python-pyrtc.list
python-pyserial.control
python-pyserial.list
python-pyyaml.control
python-pyyaml.list
python-readline.control
python-readline.list
python-re.control
python-re.list
python-resource.control
python-resource.list
python-shell.control
python-shell.list
python-sqlite3.control
python-sqlite3.list
python-stringold.control
python-stringold.list
python-subprocess.control
python-subprocess.list
python-syslog.control
python-syslog.list
python-terminal.control
python-terminal.list
python-textutils.control
python-textutils.list
python-threading.control
python-threading.list
python-vobject.control
python-vobject.list
python-xml.control
python-xml.list
python-zlib.control
python-zlib.list
rgb.control
rgb.list
rsync.control
rsync.list
s3c24xx-gpio.control
s3c24xx-gpio.list
s3c64xx-gpio.control
s3c64xx-gpio.list
screen.control
screen.list
sed.control
sed.list
sed.postinst
sed.prerm
serial-forward.control
serial-forward.list
shared-mime-info.control
shared-mime-info.list
shr-settings-addons-illume.control
shr-settings-addons-illume.list
shr-settings-backup-configuration.conffiles
shr-settings-backup-configuration.control
shr-settings-backup-configuration.list
shr-settings.control
shr-settings.list
shr-splash.control
shr-splash.list
shr-splash.postinst
shr-splash.postrm
shr-splash.prerm
shr-splash-theme-simple.control
shr-splash-theme-simple.list
shr-splash-theme-simple.postinst
shr-splash-theme-simple.postrm
shr-theme.control
shr-theme-gry.control
shr-theme-gry.list
shr-theme-gtk-e17lookalike.control
shr-theme-gtk-e17lookalike.list
shr-theme-gtk-e17lookalike.postinst
shr-theme-gtk-e17lookalike.postrm
shr-theme.list
shr-wizard.control
shr-wizard.list
socat.control
socat.list
strace.control
strace.list
synergy.control
synergy.list
sysfsutils.control
sysfsutils.list
sysstat.control
sysstat.list
sysvinit.control
sysvinit-inittab.conffiles
sysvinit-inittab.control
sysvinit-inittab.list
sysvinit.list
sysvinit-pidof.control
sysvinit-pidof.list
sysvinit-pidof.postinst
sysvinit-pidof.prerm
sysvinit.postinst
sysvinit.postrm
sysvinit.prerm
sysvinit-utils.control
sysvinit-utils.list
sysvinit-utils.postinst
sysvinit-utils.prerm
tangogps.control
tangogps.list
task-base-apm.control
task-base-apm.list
task-base-bluetooth.control
task-base-bluetooth.list
task-base.control
task-base-ext2.control
task-base-ext2.list
task-base-kernel26.control
task-base-kernel26.list
task-base.list
task-base-ppp.control
task-base-ppp.list
task-base-usbgadget.control
task-base-usbgadget.list
task-base-usbhost.control
task-base-usbhost.list
task-base-vfat.control
task-base-vfat.list
task-base-wifi.control
task-base-wifi.list
task-boot.control
task-boot.list
task-cli-tools.control
task-cli-tools-debug.control
task-cli-tools-debug.list
task-cli-tools.list
task-distro-base.control
task-distro-base.list
task-fonts-truetype-core.control
task-fonts-truetype-core.list
task-fso2-compliance.control
task-fso2-compliance.list
task-machine-base.control
task-machine-base.list
task-shr-apps.control
task-shr-apps.list
task-shr-cli.control
task-shr-cli.list
task-shr-games.control
task-shr-games.list
task-shr-gtk.control
task-shr-gtk.list
task-shr-minimal-apps.control
task-shr-minimal-apps.list
task-shr-minimal-audio.control
task-shr-minimal-audio.list
task-shr-minimal-base.control
task-shr-minimal-base.list
task-shr-minimal-cli.control
task-shr-minimal-cli.list
task-shr-minimal-fso.control
task-shr-minimal-fso.list
task-shr-minimal-gtk.control
task-shr-minimal-gtk.list
task-shr-minimal-x.control
task-shr-minimal-x.list
task-x11-illume.control
task-x11-illume.list
task-x11-server.control
task-x11-server.list
task-x11-utils.control
task-x11-utils.list
tcpdump.control
tcpdump.list
tinylogin.control
tinylogin.list
tinylogin.postinst
tinylogin.prerm
tslib-calibrate.control
tslib-calibrate.list
tslib-conf.control
tslib-conf.list
ttf-dejavu-common.control
ttf-dejavu-common.list
ttf-dejavu-common.postinst
ttf-dejavu-common.postrm
ttf-dejavu-sans.control
ttf-dejavu-sans.list
ttf-dejavu-sans-mono.control
ttf-dejavu-sans-mono.list
ttf-dejavu-sans-mono.postinst
ttf-dejavu-sans-mono.postrm
ttf-dejavu-sans.postinst
ttf-dejavu-sans.postrm
ttf-liberation-mono.control
ttf-liberation-mono.list
ttf-liberation-mono.postinst
ttf-liberation-mono.postrm
tzdata-africa.control
tzdata-africa.list
tzdata-americas.control
tzdata-americas.list
tzdata-asia.control
tzdata-asia.list
tzdata-australia.control
tzdata-australia.list
tzdata.conffiles
tzdata.control
tzdata-europe.control
tzdata-europe.list
tzdata.list
udev.control
udev.list
udev.postinst
udev.postrm
udev.prerm
udev-utils.control
udev-utils.list
update-modules.control
update-modules.list
update-modules.postinst
update-rc.d.control
update-rc.d.list
usb-gadget-mode.control
usb-gadget-mode.list
usb-gadget-mode.postinst
usb-gadget-mode.postrm
usbutils.control
usbutils.list
util-linux-ng-blkid.control
util-linux-ng-blkid.list
util-linux-ng-blkid.postinst
util-linux-ng-blkid.prerm
util-linux-ng-cfdisk.control
util-linux-ng-cfdisk.list
util-linux-ng.control
util-linux-ng-fdisk.control
util-linux-ng-fdisk.list
util-linux-ng-fdisk.postinst
util-linux-ng-fdisk.prerm
util-linux-ng-fsck.control
util-linux-ng-fsck.list
util-linux-ng-fsck.postinst
util-linux-ng-fsck.prerm
util-linux-ng.list
util-linux-ng-losetup.control
util-linux-ng-losetup.list
util-linux-ng-losetup.postinst
util-linux-ng-losetup.prerm
util-linux-ng-mountall.control
util-linux-ng-mountall.list
util-linux-ng-mountall.postinst
util-linux-ng-mountall.prerm
util-linux-ng-mount.control
util-linux-ng-mount.list
util-linux-ng-mount.postinst
util-linux-ng-mount.prerm
util-linux-ng.postinst
util-linux-ng.prerm
util-linux-ng-readprofile.control
util-linux-ng-readprofile.list
util-linux-ng-readprofile.postinst
util-linux-ng-readprofile.prerm
util-linux-ng-sfdisk.control
util-linux-ng-sfdisk.list
util-linux-ng-swaponoff.control
util-linux-ng-swaponoff.list
util-linux-ng-swaponoff.postinst
util-linux-ng-swaponoff.prerm
util-linux-ng-umount.control
util-linux-ng-umount.list
util-linux-ng-umount.postinst
util-linux-ng-umount.prerm
vagalume.control
vagalume.list
vala-terminal.control
vala-terminal.list
ventura.control
ventura.list
vnc.control
vnc.list
vpnc.conffiles
vpnc.control
vpnc.list
vte-termcap.control
vte-termcap.list
wireless-tools.control
wireless-tools.list
wmiconfig.control
wmiconfig.list
wpa-supplicant.control
wpa-supplicant.list
wpa-supplicant-passphrase.control
wpa-supplicant-passphrase.list
wv.control
wv.list
wv.postinst
x11vnc.control
x11vnc.list
xauth.control
xauth.list
xcursor-transparent-theme.control
xcursor-transparent-theme.list
xdpyinfo.control
xdpyinfo.list
xf86-input-evdev.control
xf86-input-evdev.list
xf86-input-keyboard.control
xf86-input-keyboard.list
xf86-input-mouse.control
xf86-input-mouse.list
xf86-input-tslib.control
xf86-input-tslib.list
xf86-video-glamo.control
xf86-video-glamo.list
xhost.control
xhost.list
xinit.control
xinit.list
xinput-calibrator.control
xinput-calibrator.list
xinput.control
xinput.list
xkbcomp.control
xkbcomp.list
xkeyboard-config.control
xkeyboard-config.list
xmodmap.control
xmodmap.list
xorg-minimal-fonts.control
xorg-minimal-fonts.list
xrandr.control
xrandr.list
xserver-kdrive-common.control
xserver-kdrive-common.list
xserver-nodm-init.control
xserver-nodm-init.list
xserver-nodm-init.postinst
xserver-nodm-init.postrm
xserver-nodm-init.prerm
xserver-xorg-conf.conffiles
xserver-xorg-conf.control
xserver-xorg-conf.list
xserver-xorg.control
xserver-xorg-extension-dri2.control
xserver-xorg-extension-dri2.list
xserver-xorg-extension-dri.control
xserver-xorg-extension-dri.list
xserver-xorg-extension-glx.control
xserver-xorg-extension-glx.list
xserver-xorg.list
xset.control
xset.list
xtscal.control
xtscal.list"
mount /mnt/ceph-fuse
: cd /mnt/ceph-fuse
mkdir test-1774
cd test-1774
for f in $list; do
touch $f
done
cd
umount /mnt/ceph-fuse
mount /mnt/ceph-fuse
cd -
# this worked before the 1774 fix
diff <(ls) <(echo "$list")
# but this failed, because we cached the dirlist wrong
# update-modules.postinst used to be the missing file,
# the last one in the first dirent set passed to ceph-fuse
diff <(ls) <(echo "$list")
cd ..
rm -rf test-1774
cd
umount /mnt/ceph-fuse
| 45,147 | 20.82117 | 58 | sh |
null | ceph-main/qa/machine_types/schedule_rados_ovh.sh | #!/usr/bin/env bash
# $1 - part
# $2 - branch name
# $3 - machine name
# $4 - email address
# $5 - filter out (this arg is to be at the end of the command line for now)
## example #1
## (date +%U) week number
## % 2 - mod 2 (e.g. 0,1,0,1 ...)
## * 7 - multiplied by 7 (e.g. 0,7,0,7...)
## $1 day of the week (0-6)
## /14 for 2 weeks
## example #2
## (date +%U) week number
## % 4 - mod 4 (e.g. 0,1,2,3,0,1,2,3 ...)
## * 7 - multiplied by 7 (e.g. 0,7,14,21,0,7,14,21...)
## $1 day of the week (0-6)
## /28 for 4 weeks
echo "Scheduling " $2 " branch"
if [ $2 = "master" ] ; then
# run master branch with --newest option looking for good sha1 7 builds back
teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/28 --newest 7 -e $4 ~/vps.yaml $5
elif [ $2 = "jewel" ] ; then
# run jewel branch with /40 jobs
teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/40 -e $4 ~/vps.yaml $5
else
# run NON master branches without --newest
teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/28 -e $4 ~/vps.yaml $5
fi
| 1,217 | 33.8 | 145 | sh |
null | ceph-main/qa/machine_types/schedule_subset.sh | #!/bin/bash -e
#command line => CEPH_BRANCH=<branch>; MACHINE_NAME=<machine_type>; SUITE_NAME=<suite>; ../schedule_subset.sh <day_of_week> $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL <$FILTER>
partitions="$1"
shift
branch="$1"
shift
machine="$1"
shift
suite="$1"
shift
email="$1"
shift
kernel="$1"
shift
# rest of arguments passed directly to teuthology-suite
echo "Scheduling $branch branch"
teuthology-suite -v -c "$branch" -m "$machine" -k "$kernel" -s "$suite" --ceph-repo https://git.ceph.com/ceph.git --suite-repo https://git.ceph.com/ceph.git --subset "$((RANDOM % partitions))/$partitions" --newest 100 -e "$email" "$@"
| 649 | 29.952381 | 234 | sh |
null | ceph-main/qa/mds/test_anchortable.sh | #!/usr/bin/env bash
set -x
mkdir links
for f in `seq 1 8`
do
mkdir $f
for g in `seq 1 20`
do
touch $f/$g
ln $f/$g links/$f.$g
done
done
for f in `seq 1 8`
do
echo testing failure point $f
bash -c "pushd . ; cd $bindir ; sleep 10; ./ceph -c $conf mds tell \* injectargs \"--mds_kill_mdstable_at $f\" ; popd" &
bash -c "pushd . ; cd $bindir ; sleep 11 ; ./init-ceph -c $conf start mds ; popd" &
for g in `seq 1 20`
do
rm $f/$g
rm links/$f.$g
sleep 1
done
done
| 506 | 17.107143 | 124 | sh |
null | ceph-main/qa/mds/test_mdstable_failures.sh | #!/usr/bin/env bash
set -x
for f in `seq 1 8`
do
echo testing failure point $f
pushd . ; cd $bindir ; ./ceph -c $conf mds tell \* injectargs "--mds_kill_mdstable_at $f" ; popd
sleep 1 # wait for mds command to go thru
bash -c "pushd . ; cd $bindir ; sleep 10 ; ./init-ceph -c $conf start mds ; popd" &
touch $f
ln $f $f.link
sleep 10
done
| 370 | 23.733333 | 100 | sh |
null | ceph-main/qa/mon/bootstrap/host.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[global]
mon host = 127.0.0.1:6789
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
EOF
rm -f mm
fsid=`uuidgen`
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c conf -i a --mkfs --fsid $fsid --mon-data mon.a -k keyring
ceph-mon -c conf -i a --mon-data $cwd/mon.a
ceph -c conf -k keyring health
killall ceph-mon
echo OK | 477 | 15.482759 | 69 | sh |
null | ceph-main/qa/mon/bootstrap/initial_members.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
mon initial members = a,b,d
EOF
rm -f mm
monmaptool --create mm \
--add a 127.0.0.1:6789 \
--add b 127.0.0.1:6790 \
--add c 127.0.0.1:6791
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c conf -i a --mkfs --monmap mm --mon-data $cwd/mon.a -k keyring
ceph-mon -c conf -i b --mkfs --monmap mm --mon-data $cwd/mon.b -k keyring
ceph-mon -c conf -i c --mkfs --monmap mm --mon-data $cwd/mon.c -k keyring
ceph-mon -c conf -i a --mon-data $cwd/mon.a
ceph-mon -c conf -i c --mon-data $cwd/mon.b
ceph-mon -c conf -i b --mon-data $cwd/mon.c
ceph -c conf -k keyring --monmap mm health
ceph -c conf -k keyring --monmap mm health
if ceph -c conf -k keyring --monmap mm mon stat | grep a= | grep b= | grep c= ; then
break
fi
killall ceph-mon
echo OK
| 959 | 23 | 84 | sh |
null | ceph-main/qa/mon/bootstrap/initial_members_asok.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[mon]
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
debug asok = 20
mon initial members = a,b,d
admin socket = $cwd/\$name.asok
EOF
rm -f mm
fsid=`uuidgen`
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c conf -i a --mkfs --fsid $fsid --mon-data $cwd/mon.a -k keyring
ceph-mon -c conf -i b --mkfs --fsid $fsid --mon-data $cwd/mon.b -k keyring
ceph-mon -c conf -i c --mkfs --fsid $fsid --mon-data $cwd/mon.c -k keyring
ceph-mon -c conf -i a --mon-data $cwd/mon.a --public-addr 127.0.0.1:6789
ceph-mon -c conf -i b --mon-data $cwd/mon.c --public-addr 127.0.0.1:6790
ceph-mon -c conf -i c --mon-data $cwd/mon.b --public-addr 127.0.0.1:6791
sleep 1
if timeout 5 ceph -c conf -k keyring -m localhost mon stat | grep "a,b,c" ; then
echo WTF
exit 1
fi
ceph --admin-daemon mon.a.asok add_bootstrap_peer_hint 127.0.0.1:6790
while true; do
if ceph -c conf -k keyring -m 127.0.0.1 mon stat | grep 'a,b'; then
break
fi
sleep 1
done
ceph --admin-daemon mon.c.asok add_bootstrap_peer_hint 127.0.0.1:6790
while true; do
if ceph -c conf -k keyring -m 127.0.0.1 mon stat | grep 'a,b,c'; then
break
fi
sleep 1
done
ceph-mon -c conf -i d --mkfs --fsid $fsid --mon-data $cwd/mon.d -k keyring
ceph-mon -c conf -i d --mon-data $cwd/mon.d --public-addr 127.0.0.1:6792
ceph --admin-daemon mon.d.asok add_bootstrap_peer_hint 127.0.0.1:6790
while true; do
if ceph -c conf -k keyring -m 127.0.0.1 mon stat | grep 'a,b,c,d'; then
break
fi
sleep 1
done
killall ceph-mon
echo OK
| 1,642 | 23.522388 | 80 | sh |
null | ceph-main/qa/mon/bootstrap/simple.sh | #!/bin/sh -e
cwd=`pwd`
cat > conf <<EOF
[mon]
admin socket =
EOF
rm -f mm
monmaptool --create mm \
--add a 127.0.0.1:6789 \
--add b 127.0.0.1:6790 \
--add c 127.0.0.1:6791
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c conf -i a --mkfs --monmap mm --mon-data $cwd/mon.a -k keyring
ceph-mon -c conf -i b --mkfs --monmap mm --mon-data $cwd/mon.b -k keyring
ceph-mon -c conf -i c --mkfs --monmap mm --mon-data $cwd/mon.c -k keyring
ceph-mon -c conf -i a --mon-data $cwd/mon.a
ceph-mon -c conf -i c --mon-data $cwd/mon.b
ceph-mon -c conf -i b --mon-data $cwd/mon.c
while true; do
ceph -c conf -k keyring --monmap mm health
if ceph -c conf -k keyring --monmap mm mon stat | grep 'quorum 0,1,2'; then
break
fi
sleep 1
done
killall ceph-mon
echo OK
| 863 | 22.351351 | 79 | sh |
null | ceph-main/qa/mon/bootstrap/simple_expand.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
EOF
rm -f mm
monmaptool --create mm \
--add a 127.0.0.1:6789 \
--add b 127.0.0.1:6790 \
--add c 127.0.0.1:6791
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c conf -i a --mkfs --monmap mm --mon-data $cwd/mon.a -k keyring
ceph-mon -c conf -i b --mkfs --monmap mm --mon-data $cwd/mon.b -k keyring
ceph-mon -c conf -i c --mkfs --monmap mm --mon-data $cwd/mon.c -k keyring
ceph-mon -c conf -i a --mon-data $cwd/mon.a
ceph-mon -c conf -i c --mon-data $cwd/mon.b
ceph-mon -c conf -i b --mon-data $cwd/mon.c
ceph -c conf -k keyring --monmap mm health
## expand via a kludged monmap
monmaptool mm --add d 127.0.0.1:6792
ceph-mon -c conf -i d --mkfs --monmap mm --mon-data $cwd/mon.d -k keyring
ceph-mon -c conf -i d --mon-data $cwd/mon.d
while true; do
ceph -c conf -k keyring --monmap mm health
if ceph -c conf -k keyring --monmap mm mon stat | grep 'quorum 0,1,2,3'; then
break
fi
sleep 1
done
# again
monmaptool mm --add e 127.0.0.1:6793
ceph-mon -c conf -i e --mkfs --monmap mm --mon-data $cwd/mon.e -k keyring
ceph-mon -c conf -i e --mon-data $cwd/mon.e
while true; do
ceph -c conf -k keyring --monmap mm health
if ceph -c conf -k keyring --monmap mm mon stat | grep 'quorum 0,1,2,3,4'; then
break
fi
sleep 1
done
killall ceph-mon
echo OK
| 1,495 | 23.52459 | 83 | sh |
null | ceph-main/qa/mon/bootstrap/simple_expand_monmap.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[mon]
admin socket =
EOF
rm -f mm
monmaptool --create mm \
--add a 127.0.0.1:6789 \
--add b 127.0.0.1:6790 \
--add c 127.0.0.1:6791
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c conf -i a --mkfs --monmap mm --mon-data $cwd/mon.a -k keyring
ceph-mon -c conf -i b --mkfs --monmap mm --mon-data $cwd/mon.b -k keyring
ceph-mon -c conf -i c --mkfs --monmap mm --mon-data $cwd/mon.c -k keyring
ceph-mon -c conf -i a --mon-data $cwd/mon.a
ceph-mon -c conf -i c --mon-data $cwd/mon.b
ceph-mon -c conf -i b --mon-data $cwd/mon.c
ceph -c conf -k keyring --monmap mm health
## expand via a kludged monmap
monmaptool mm --add d 127.0.0.1:6792
ceph-mon -c conf -i d --mkfs --monmap mm --mon-data $cwd/mon.d -k keyring
ceph-mon -c conf -i d --mon-data $cwd/mon.d
while true; do
ceph -c conf -k keyring --monmap mm health
if ceph -c conf -k keyring --monmap mm mon stat | grep d=; then
break
fi
sleep 1
done
killall ceph-mon
echo OK
| 1,084 | 23.111111 | 73 | sh |
null | ceph-main/qa/mon/bootstrap/simple_single_expand.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
EOF
rm -f mm
monmaptool --create mm \
--add a 127.0.0.1:6789
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c conf -i a --mkfs --monmap mm --mon-data $cwd/mon.a -k keyring
ceph-mon -c conf -i a --mon-data $cwd/mon.a
ceph -c conf -k keyring --monmap mm health
## expand via a kludged monmap
monmaptool mm --add d 127.0.0.1:6702
ceph-mon -c conf -i d --mkfs --monmap mm --mon-data $cwd/mon.d -k keyring
ceph-mon -c conf -i d --mon-data $cwd/mon.d
while true; do
ceph -c conf -k keyring --monmap mm health
if ceph -c conf -k keyring --monmap mm mon stat | grep 'quorum 0,1'; then
break
fi
sleep 1
done
# again
monmaptool mm --add e 127.0.0.1:6793
ceph-mon -c conf -i e --mkfs --monmap mm --mon-data $cwd/mon.e -k keyring
ceph-mon -c conf -i e --mon-data $cwd/mon.e
while true; do
ceph -c conf -k keyring --monmap mm health
if ceph -c conf -k keyring --monmap mm mon stat | grep 'quorum 0,1,2'; then
break
fi
sleep 1
done
killall ceph-mon
echo OK
| 1,193 | 20.709091 | 79 | sh |
null | ceph-main/qa/mon/bootstrap/simple_single_expand2.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
EOF
rm -f mm
ip=`host \`hostname\` | awk '{print $4}'`
monmaptool --create mm \
--add a $ip:6779
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c conf -i a --mkfs --monmap mm --mon-data $cwd/mon.a -k keyring
ceph-mon -c conf -i a --mon-data $cwd/mon.a
ceph -c conf -k keyring --monmap mm health
## expand via a local_network
ceph-mon -c conf -i d --mkfs --monmap mm --mon-data $cwd/mon.d -k keyring
ceph-mon -c conf -i d --mon-data $cwd/mon.d --public-network 127.0.0.1/32
while true; do
ceph -c conf -k keyring --monmap mm health
if ceph -c conf -k keyring --monmap mm mon stat | grep 'quorum 0,1'; then
break
fi
sleep 1
done
killall ceph-mon
echo OK
| 882 | 20.536585 | 77 | sh |
null | ceph-main/qa/mon/bootstrap/single_host.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[global]
mon host = 127.0.0.1:6789
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
EOF
rm -f mm
fsid=`uuidgen`
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c conf -i a --mkfs --fsid $fsid --mon-data $cwd/mon.a -k keyring
ceph-mon -c conf -i a --mon-data $cwd/mon.a
ceph -c conf -k keyring health
killall ceph-mon
echo OK | 482 | 15.655172 | 74 | sh |
null | ceph-main/qa/mon/bootstrap/single_host_multi.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[global]
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
mon host = 127.0.0.1:6789 127.0.0.1:6790 127.0.0.1:6791
EOF
rm -f mm
fsid=`uuidgen`
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c conf -i a --mkfs --fsid $fsid --mon-data $cwd/mon.a -k keyring --public-addr 127.0.0.1:6789
ceph-mon -c conf -i b --mkfs --fsid $fsid --mon-data $cwd/mon.b -k keyring --public-addr 127.0.0.1:6790
ceph-mon -c conf -i c --mkfs --fsid $fsid --mon-data $cwd/mon.c -k keyring --public-addr 127.0.0.1:6791
ceph-mon -c conf -i a --mon-data $cwd/mon.a
ceph-mon -c conf -i b --mon-data $cwd/mon.b
ceph-mon -c conf -i c --mon-data $cwd/mon.c
ceph -c conf -k keyring health -m 127.0.0.1
while true; do
if ceph -c conf -k keyring -m 127.0.0.1 mon stat | grep 'a,b,c'; then
break
fi
sleep 1
done
killall ceph-mon
echo OK | 970 | 23.897436 | 103 | sh |
null | ceph-main/qa/qa_scripts/cephscrub.sh | # remove the ceph directories
sudo rm -rf /var/log/ceph
sudo rm -rf /var/lib/ceph
sudo rm -rf /etc/ceph
sudo rm -rf /var/run/ceph
# remove the ceph packages
sudo apt-get -y purge ceph
sudo apt-get -y purge ceph-dbg
sudo apt-get -y purge ceph-mds
sudo apt-get -y purge ceph-mds-dbg
sudo apt-get -y purge ceph-fuse
sudo apt-get -y purge ceph-fuse-dbg
sudo apt-get -y purge ceph-common
sudo apt-get -y purge ceph-common-dbg
sudo apt-get -y purge ceph-resource-agents
sudo apt-get -y purge librados2
sudo apt-get -y purge librados2-dbg
sudo apt-get -y purge librados-dev
sudo apt-get -y purge librbd1
sudo apt-get -y purge librbd1-dbg
sudo apt-get -y purge librbd-dev
sudo apt-get -y purge libcephfs2
sudo apt-get -y purge libcephfs2-dbg
sudo apt-get -y purge libcephfs-dev
sudo apt-get -y purge radosgw
sudo apt-get -y purge radosgw-dbg
sudo apt-get -y purge obsync
sudo apt-get -y purge python-rados
sudo apt-get -y purge python-rbd
sudo apt-get -y purge python-cephfs
| 991 | 31 | 43 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install.sh | #!/usr/bin/env bash
#
# Install a simple ceph cluster upon which openstack images will be stored.
#
set -fv
ceph_node=${1}
source copy_func.sh
copy_file files/$OS_CEPH_ISO $ceph_node .
copy_file execs/ceph_cluster.sh $ceph_node . 0777
copy_file execs/ceph-pool-create.sh $ceph_node . 0777
ssh $ceph_node ./ceph_cluster.sh $*
| 326 | 26.25 | 75 | sh |
null | ceph-main/qa/qa_scripts/openstack/connectceph.sh | #!/usr/bin/env bash
#
# Connect openstack node just installed to a ceph cluster.
#
# Essentially implements:
#
# http://docs.ceph.com/en/latest/rbd/rbd-openstack/
#
# The directory named files contains templates for the /etc/glance/glance-api.conf,
# /etc/cinder/cinder.conf, /etc/nova/nova.conf Openstack files
#
set -fv
source ./copy_func.sh
source ./fix_conf_file.sh
openstack_node=${1}
ceph_node=${2}
scp $ceph_node:/etc/ceph/ceph.conf ./ceph.conf
ssh $openstack_node sudo mkdir /etc/ceph
copy_file ceph.conf $openstack_node /etc/ceph 0644
rm -f ceph.conf
ssh $openstack_node sudo yum -y install python-rbd
ssh $openstack_node sudo yum -y install ceph-common
ssh $ceph_node "sudo ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'"
ssh $ceph_node "sudo ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'"
ssh $ceph_node "sudo ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'"
ssh $ceph_node sudo ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
ssh $ceph_node sudo ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'
ssh $ceph_node sudo ceph auth get-or-create client.glance | ssh $openstack_node sudo tee /etc/ceph/ceph.client.glance.keyring
ssh $openstack_node sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring
ssh $ceph_node sudo ceph auth get-or-create client.cinder | ssh $openstack_node sudo tee /etc/ceph/ceph.client.cinder.keyring
ssh $openstack_node sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
ssh $ceph_node sudo ceph auth get-or-create client.cinder-backup | ssh $openstack_node sudo tee /etc/ceph/ceph.client.cinder-backup.keyring
ssh $openstack_node sudo chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring
ssh $ceph_node sudo ceph auth get-key client.cinder | ssh $openstack_node tee client.cinder.key
copy_file execs/libvirt-secret.sh $openstack_node .
secret_msg=`ssh $openstack_node sudo ./libvirt-secret.sh $openstack_node`
secret_virt=`echo $secret_msg | sed 's/.* set //'`
echo $secret_virt
fix_conf_file $openstack_node glance-api /etc/glance
fix_conf_file $openstack_node cinder /etc/cinder $secret_virt
fix_conf_file $openstack_node nova /etc/nova $secret_virt
copy_file execs/start_openstack.sh $openstack_node . 0755
ssh $openstack_node ./start_openstack.sh
| 2,662 | 58.177778 | 189 | sh |
null | ceph-main/qa/qa_scripts/openstack/copy_func.sh | #
# copy_file(<filename>, <node>, <directory>, [<permissions>], [<owner>]
#
# copy a file -- this is needed because passwordless ssh does not
# work when sudo'ing.
# <file> -- name of local file to be copied
# <node> -- node where we want the file
# <directory> -- location where we want the file on <node>
# <permissions> -- (optional) permissions on the copied file
# <owner> -- (optional) owner of the copied file
#
function copy_file() {
fname=`basename ${1}`
scp ${1} ${2}:/tmp/${fname}
ssh ${2} sudo cp /tmp/${fname} ${3}
if [ $# -gt 3 ]; then
ssh ${2} sudo chmod ${4} ${3}/${fname}
fi
if [ $# -gt 4 ]; then
ssh ${2} sudo chown ${5} ${3}/${fname}
fi
}
| 731 | 30.826087 | 71 | sh |
null | ceph-main/qa/qa_scripts/openstack/fix_conf_file.sh | source ./copy_func.sh
#
# Take a templated file, modify a local copy, and write it to the
# remote site.
#
# Usage: fix_conf_file <remote-site> <file-name> <remote-location> [<rbd-secret>]
# <remote-site> -- site where we want this modified file stored.
# <file-name> -- name of the remote file.
# <remote-location> -- directory where the file will be stored
# <rbd-secret> -- (optional) rbd_secret used by libvirt
#
function fix_conf_file() {
if [[ $# < 3 ]]; then
echo 'fix_conf_file: Too few parameters'
exit 1
fi
openstack_node_local=${1}
cp files/${2}.template.conf ${2}.conf
hostname=`ssh $openstack_node_local hostname`
inet4addr=`ssh $openstack_node_local hostname -i`
sed -i s/VARHOSTNAME/$hostname/g ${2}.conf
sed -i s/VARINET4ADDR/$inet4addr/g ${2}.conf
if [[ $# == 4 ]]; then
sed -i s/RBDSECRET/${4}/g ${2}.conf
fi
copy_file ${2}.conf $openstack_node_local ${3} 0644 "root:root"
rm ${2}.conf
}
| 999 | 33.482759 | 81 | sh |
null | ceph-main/qa/qa_scripts/openstack/image_create.sh | #!/usr/bin/env bash
#
# Set up a vm on packstack. Use the iso in RHEL_ISO (defaults to home dir)
#
set -fv
source ./copy_func.sh
source ./fix_conf_file.sh
openstack_node=${1}
ceph_node=${2}
RHEL_ISO=${RHEL_ISO:-~/rhel-server-7.2-x86_64-boot.iso}
copy_file ${RHEL_ISO} $openstack_node .
copy_file execs/run_openstack.sh $openstack_node . 0755
filler=`date +%s`
ssh $openstack_node ./run_openstack.sh "${openstack_node}X${filler}" rhel-server-7.2-x86_64-boot.iso
ssh $ceph_node sudo ceph df
| 491 | 27.941176 | 100 | sh |
null | ceph-main/qa/qa_scripts/openstack/openstack.sh | #!/usr/bin/env bash
#
# Install Openstack.
# Usage: openstack <openstack-site> <ceph-monitor>
#
# This script installs Openstack on one node, and connects it to a ceph
# cluster on another set of nodes. It is intended to run from a third
# node.
#
# Assumes a single node Openstack cluster and a single monitor ceph
# cluster.
#
# The execs directory contains scripts to be run on remote sites.
# The files directory contains files to be copied to remote sites.
#
set -fv
source ./copy_func.sh
source ./fix_conf_file.sh
openstack_node=${1}
ceph_node=${2}
./packstack.sh $openstack_node $ceph_node
echo 'done running packstack'
sleep 60
./connectceph.sh $openstack_node $ceph_node
echo 'done connecting'
sleep 60
./image_create.sh $openstack_node $ceph_node
| 763 | 25.344828 | 71 | sh |
null | ceph-main/qa/qa_scripts/openstack/packstack.sh | #!/usr/bin/env bash
#
# Install openstack by running packstack.
#
# Implements the operations in:
# https://docs.google.com/document/d/1us18KR3LuLyINgGk2rmI-SVj9UksCE7y4C2D_68Aa8o/edit?ts=56a78fcb
#
# The directory named files contains a template for the kilo.conf file used by packstack.
#
set -fv
source ./copy_func.sh
source ./fix_conf_file.sh
openstack_node=${1}
ceph_node=${2}
copy_file execs/openstack-preinstall.sh $openstack_node . 0777
fix_conf_file $openstack_node kilo .
ssh $openstack_node sudo ./openstack-preinstall.sh
sleep 240
ssh $openstack_node sudo packstack --answer-file kilo.conf
| 604 | 27.809524 | 98 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/ceph_install.sh | #! /usr/bin/env bash
if [ $# -ne 5 ]; then
echo 'Usage: ceph_install.sh <admin-node> <mon-node> <osd-node> <osd-node> <osd-node>'
exit -1
fi
allnodes=$*
adminnode=$1
shift
cephnodes=$*
monnode=$1
shift
osdnodes=$*
./multi_action.sh cdn_setup.sh $allnodes
./talknice.sh $allnodes
for mac in $allnodes; do
ssh $mac sudo yum -y install yum-utils
done
source ./repolocs.sh
ssh $adminnode sudo yum-config-manager --add ${CEPH_REPO_TOOLS}
ssh $monnode sudo yum-config-manager --add ${CEPH_REPO_MON}
for mac in $osdnodes; do
ssh $mac sudo yum-config-manager --add ${CEPH_REPO_OSD}
done
ssh $adminnode sudo yum-config-manager --add ${INSTALLER_REPO_LOC}
for mac in $allnodes; do
ssh $mac sudo sed -i 's/gpgcheck=1/gpgcheck=0/' /etc/yum.conf
done
source copy_func.sh
copy_file execs/ceph_ansible.sh $adminnode . 0777 ubuntu:ubuntu
copy_file execs/edit_ansible_hosts.sh $adminnode . 0777 ubuntu:ubuntu
copy_file execs/edit_groupvars_osds.sh $adminnode . 0777 ubuntu:ubuntu
copy_file ../execs/ceph-pool-create.sh $monnode . 0777 ubuntu:ubuntu
if [ -e ~/ip_info ]; then
copy_file ~/ip_info $adminnode . 0777 ubuntu:ubuntu
fi
ssh $adminnode ./ceph_ansible.sh $cephnodes
| 1,184 | 28.625 | 90 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/copy_func.sh | ../copy_func.sh | 15 | 15 | 15 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/multi_action.sh | #! /usr/bin/env bash
source copy_func.sh
allparms=$*
cmdv=$1
shift
sites=$*
for mac in $sites; do
echo $cmdv $mac
if [ -f ~/secrets ]; then
copy_file ~/secrets $mac . 0777 ubuntu:ubuntu
fi
copy_file execs/${cmdv} $mac . 0777 ubuntu:ubuntu
ssh $mac ./${cmdv} &
done
./staller.sh $allparms
for mac in $sites; do
ssh $mac sudo rm -rf secrets
done
echo "DONE"
| 388 | 18.45 | 53 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/repolocs.sh | #! /usr/bin/env bash
SPECIFIC_VERSION=latest-Ceph-2-RHEL-7
#SPECIFIC_VERSION=Ceph-2-RHEL-7-20160630.t.0
#SPECIFIC_VERSION=Ceph-2.0-RHEL-7-20160718.t.0
export CEPH_REPO_TOOLS=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/ceph-2-rhel-7-compose/${SPECIFIC_VERSION}/compose/Tools/x86_64/os/
export CEPH_REPO_MON=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/ceph-2-rhel-7-compose/${SPECIFIC_VERSION}/compose/MON/x86_64/os/
export CEPH_REPO_OSD=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/ceph-2-rhel-7-compose/${SPECIFIC_VERSION}/compose/OSD/x86_64/os/
export INSTALLER_REPO_LOC=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/rhscon-2-rhel-7-compose/latest-RHSCON-2-RHEL-7/compose/Installer/x86_64/os/
| 760 | 83.555556 | 162 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/staller.sh | #! /usr/bin/env bash
cmd_wait=$1
shift
sites=$*
donebit=0
while [ $donebit -ne 1 ]; do
sleep 10
donebit=1
for rem in $sites; do
rval=`ssh $rem ps aux | grep $cmd_wait | wc -l`
if [ $rval -gt 0 ]; then
donebit=0
fi
done
done
| 277 | 16.375 | 56 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/talknice.sh | #!/usr/bin/env bash
declare -A rsapub
for fulln in $*; do
sname=`echo $fulln | sed 's/\..*//'`
nhead=`echo $sname | sed 's/[0-9]*//g'`
x=`ssh $fulln "ls .ssh/id_rsa"`
if [ -z $x ]; then
ssh $fulln "ssh-keygen -N '' -f .ssh/id_rsa";
fi
xx=`ssh $fulln "ls .ssh/config"`
if [ -z $xx ]; then
scp config $fulln:/home/ubuntu/.ssh/config
fi
ssh $fulln "chown ubuntu:ubuntu .ssh/config"
ssh $fulln "chmod 0600 .ssh/config"
rsapub[$fulln]=`ssh $fulln "cat .ssh/id_rsa.pub"`
done
for ii in $*; do
ssh $ii sudo iptables -F
for jj in $*; do
pval=${rsapub[$jj]}
if [ "$ii" != "$jj" ]; then
xxxx=`ssh $ii "grep $jj .ssh/authorized_keys"`
if [ -z "$xxxx" ]; then
ssh $ii "echo '$pval' | sudo tee -a /home/ubuntu/.ssh/authorized_keys"
fi
fi
done;
done
| 884 | 28.5 | 86 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/cdn_setup.sh | #! /usr/bin/env bash
if [ -f ~/secrets ]; then
source ~/secrets
fi
subm=`which subscription-manager`
if [ ${#subm} -eq 0 ]; then
sudo yum -y update
exit
fi
subst=`sudo subscription-manager status | grep "^Overall" | awk '{print $NF}'`
if [ $subst == 'Unknown' ]; then
mynameis=${subscrname:-'inigomontoya'}
mypassis=${subscrpassword:-'youkeelmyfatherpreparetodie'}
sudo subscription-manager register --username=$mynameis --password=$mypassis --force
sudo subscription-manager refresh
if [ $? -eq 1 ]; then exit 1; fi
sudo subscription-manager attach --pool=8a85f9823e3d5e43013e3ddd4e2a0977
fi
sudo subscription-manager repos --enable=rhel-7-server-rpms
sudo yum -y update
| 708 | 32.761905 | 88 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/ceph_ansible.sh | #! /usr/bin/env bash
cephnodes=$*
monnode=$1
sudo yum -y install ceph-ansible
cd
sudo ./edit_ansible_hosts.sh $cephnodes
mkdir ceph-ansible-keys
cd /usr/share/ceph-ansible/group_vars/
if [ -f ~/ip_info ]; then
source ~/ip_info
fi
mon_intf=${mon_intf:-'eno1'}
pub_netw=${pub_netw:-'10.8.128.0\/21'}
sudo cp all.sample all
sudo sed -i 's/#ceph_origin:.*/ceph_origin: distro/' all
sudo sed -i 's/#fetch_directory:.*/fetch_directory: ~\/ceph-ansible-keys/' all
sudo sed -i 's/#ceph_stable:.*/ceph_stable: true/' all
sudo sed -i 's/#ceph_stable_rh_storage:.*/ceph_stable_rh_storage: false/' all
sudo sed -i 's/#ceph_stable_rh_storage_cdn_install:.*/ceph_stable_rh_storage_cdn_install: true/' all
sudo sed -i 's/#cephx:.*/cephx: true/' all
sudo sed -i "s/#monitor_interface:.*/monitor_interface: ${mon_intf}/" all
sudo sed -i 's/#journal_size:.*/journal_size: 1024/' all
sudo sed -i "s/#public_network:.*/public_network: ${pub_netw}/" all
sudo cp osds.sample osds
sudo sed -i 's/#fetch_directory:.*/fetch_directory: ~\/ceph-ansible-keys/' osds
sudo sed -i 's/#crush_location:/crush_location:/' osds
sudo sed -i 's/#osd_crush_location:/osd_crush_location:/' osds
sudo sed -i 's/#cephx:/cephx:/' osds
sudo sed -i 's/#devices:/devices:/' osds
sudo sed -i 's/#journal_collocation:.*/journal_collocation: true/' osds
cd
sudo ./edit_groupvars_osds.sh
cd /usr/share/ceph-ansible
sudo cp site.yml.sample site.yml
ansible-playbook site.yml
ssh $monnode ~/ceph-pool-create.sh
| 1,464 | 38.594595 | 100 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_ansible_hosts.sh | #! /usr/bin/env bash
ed /etc/ansible/hosts << EOF
$
a
[mons]
${1}
[osds]
${2}
${3}
${4}
.
w
q
EOF
| 101 | 4.666667 | 28 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_groupvars_osds.sh | #! /usr/bin/env bash
ed /usr/share/ceph-ansible/group_vars/osds << EOF
$
/^devices:
.+1
i
- /dev/sdb
- /dev/sdc
- /dev/sdd
.
w
q
EOF
| 142 | 9.214286 | 49 | sh |
null | ceph-main/qa/qa_scripts/openstack/execs/ceph-pool-create.sh | #!/usr/bin/env bash
set -f
#
# On the ceph site, make the pools required for Openstack
#
#
# Make a pool, if it does not already exist.
#
function make_pool {
if [[ -z `sudo ceph osd lspools | grep " $1,"` ]]; then
echo "making $1"
sudo ceph osd pool create $1 128
fi
}
#
# Make sure the pg_num and pgp_num values are good.
#
count=`sudo ceph osd pool get rbd pg_num | sed 's/pg_num: //'`
while [ $count -lt 128 ]; do
sudo ceph osd pool set rbd pg_num $count
count=`expr $count + 32`
sleep 30
done
sudo ceph osd pool set rbd pg_num 128
sleep 30
sudo ceph osd pool set rbd pgp_num 128
sleep 30
make_pool volumes
make_pool images
make_pool backups
make_pool vms
| 699 | 19 | 62 | sh |
null | ceph-main/qa/qa_scripts/openstack/execs/ceph_cluster.sh | #!/usr/bin/env bash
set -f
echo $OS_CEPH_ISO
if [[ $# -ne 4 ]]; then
echo "Usage: ceph_cluster mon.0 osd.0 osd.1 osd.2"
exit -1
fi
allsites=$*
mon=$1
shift
osds=$*
ISOVAL=${OS_CEPH_ISO-rhceph-1.3.1-rhel-7-x86_64-dvd.iso}
sudo mount -o loop ${ISOVAL} /mnt
fqdn=`hostname -f`
lsetup=`ls /mnt/Installer | grep "^ice_setup"`
sudo yum -y install /mnt/Installer/${lsetup}
sudo ice_setup -d /mnt << EOF
yes
/mnt
$fqdn
http
EOF
ceph-deploy new ${mon}
ceph-deploy install --repo --release=ceph-mon ${mon}
ceph-deploy install --repo --release=ceph-osd ${allsites}
ceph-deploy install --mon ${mon}
ceph-deploy install --osd ${allsites}
ceph-deploy mon create-initial
sudo service ceph -a start osd
for d in b c d; do
for m in $osds; do
ceph-deploy disk zap ${m}:sd${d}
done
for m in $osds; do
ceph-deploy osd prepare ${m}:sd${d}
done
for m in $osds; do
ceph-deploy osd activate ${m}:sd${d}1:sd${d}2
done
done
sudo ./ceph-pool-create.sh
hchk=`sudo ceph health`
while [[ $hchk != 'HEALTH_OK' ]]; do
sleep 30
hchk=`sudo ceph health`
done
| 1,092 | 20.431373 | 57 | sh |
null | ceph-main/qa/qa_scripts/openstack/execs/libvirt-secret.sh | #!/usr/bin/env bash
set -f
#
# Generate a libvirt secret on the Openstack node.
#
openstack_node=${1}
uuid=`uuidgen`
cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
<uuid>${uuid}</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
EOF
sudo virsh secret-define --file secret.xml
sudo virsh secret-set-value --secret ${uuid} --base64 $(cat client.cinder.key)
echo ${uuid}
| 422 | 20.15 | 78 | sh |
null | ceph-main/qa/qa_scripts/openstack/execs/openstack-preinstall.sh | #!/usr/bin/env bash
set -f
#
# Remotely setup the stuff needed to run packstack. This should do items 1-4 in
# https://docs.google.com/document/d/1us18KR3LuLyINgGk2rmI-SVj9UksCE7y4C2D_68Aa8o/edit?ts=56a78fcb
#
yum remove -y rhos-release
rpm -ivh http://rhos-release.virt.bos.redhat.com/repos/rhos-release/rhos-release-latest.noarch.rpm
rm -rf /etc/yum.repos.d/*
rm -rf /var/cache/yum/*
rhos-release 8
yum update -y
yum install -y nc puppet vim screen setroubleshoot crudini bpython openstack-packstack
systemctl disable ntpd
systemctl stop ntpd
reboot
| 554 | 29.833333 | 98 | sh |
null | ceph-main/qa/qa_scripts/openstack/execs/run_openstack.sh | #!/usr/bin/env bash
set -fv
#
# Create a glance image, a corresponding cinder volume, a nova instance, attach, the cinder volume to the
# nova instance, and create a backup.
#
image_name=${1}X
file_name=${2-rhel-server-7.2-x86_64-boot.iso}
source ./keystonerc_admin
glance image-create --name $image_name --disk-format iso --container-format bare --file $file_name
glance_id=`glance image-list | grep ${image_name} | sed 's/^| //' | sed 's/ |.*//'`
cinder create --image-id ${glance_id} --display-name ${image_name}-volume 8
nova boot --image ${image_name} --flavor 1 ${image_name}-inst
cinder_id=`cinder list | grep ${image_name} | sed 's/^| //' | sed 's/ |.*//'`
chkr=`cinder list | grep ${image_name}-volume | grep available`
while [ -z "$chkr" ]; do
sleep 30
chkr=`cinder list | grep ${image_name}-volume | grep available`
done
nova volume-attach ${image_name}-inst ${cinder_id} auto
sleep 30
cinder backup-create --name ${image_name}-backup ${image_name}-volume --force
| 986 | 40.125 | 105 | sh |
null | ceph-main/qa/qa_scripts/openstack/execs/start_openstack.sh | #!/usr/bin/env bash
set -fv
#
# start the Openstack services
#
sudo cp /root/keystonerc_admin ./keystonerc_admin
sudo chmod 0644 ./keystonerc_admin
source ./keystonerc_admin
sudo service httpd stop
sudo service openstack-keystone restart
sudo service openstack-glance-api restart
sudo service openstack-nova-compute restart
sudo service openstack-cinder-volume restart
sudo service openstack-cinder-backup restart
| 415 | 25 | 49 | sh |
null | ceph-main/qa/rbd/common.sh | #!/usr/bin/env bash
die() {
echo "$*"
exit 1
}
cleanup() {
rm -rf $TDIR
TDIR=""
}
set_variables() {
# defaults
[ -z "$bindir" ] && bindir=$PWD # location of init-ceph
if [ -z "$conf" ]; then
conf="$basedir/ceph.conf"
[ -e $conf ] || conf="/etc/ceph/ceph.conf"
fi
[ -e $conf ] || die "conf file not found"
CCONF="ceph-conf -c $conf"
[ -z "$mnt" ] && mnt="/c"
if [ -z "$monhost" ]; then
$CCONF -t mon -i 0 'mon addr' > $TDIR/cconf_mon
if [ $? -ne 0 ]; then
$CCONF -t mon.a -i 0 'mon addr' > $TDIR/cconf_mon
[ $? -ne 0 ] && die "can't figure out \$monhost"
fi
read monhost < $TDIR/cconf_mon
fi
[ -z "$imgsize" ] && imgsize=1024
[ -z "$user" ] && user=admin
[ -z "$keyring" ] && keyring="`$CCONF keyring`"
[ -z "$secret" ] && secret="`ceph-authtool $keyring -n client.$user -p`"
monip="`echo $monhost | sed 's/:/ /g' | awk '{print $1}'`"
monport="`echo $monhost | sed 's/:/ /g' | awk '{print $2}'`"
[ -z "$monip" ] && die "bad mon address"
[ -z "$monport" ] && monport=6789
set -e
mydir=`hostname`_`echo $0 | sed 's/\//_/g'`
img_name=test.`hostname`.$$
}
rbd_load() {
modprobe rbd
}
rbd_create_image() {
id=$1
rbd create $img_name.$id --size=$imgsize
}
rbd_add() {
id=$1
echo "$monip:$monport name=$user,secret=$secret rbd $img_name.$id" \
> /sys/bus/rbd/add
pushd /sys/bus/rbd/devices &> /dev/null
[ $? -eq 0 ] || die "failed to cd"
devid=""
rm -f "$TDIR/rbd_devs"
for f in *; do echo $f >> "$TDIR/rbd_devs"; done
sort -nr "$TDIR/rbd_devs" > "$TDIR/rev_rbd_devs"
while read f < "$TDIR/rev_rbd_devs"; do
read d_img_name < "$f/name"
if [ "x$d_img_name" == "x$img_name.$id" ]; then
devid=$f
break
fi
done
popd &> /dev/null
[ "x$devid" == "x" ] && die "failed to find $img_name.$id"
export rbd$id=$devid
while [ ! -e /dev/rbd$devid ]; do sleep 1; done
}
rbd_test_init() {
rbd_load
}
rbd_remove() {
echo $1 > /sys/bus/rbd/remove
}
rbd_rm_image() {
id=$1
rbd rm $imgname.$id
}
TDIR=`mktemp -d`
trap cleanup INT TERM EXIT
set_variables
| 2,161 | 19.788462 | 76 | sh |
null | ceph-main/qa/rbd/rbd.sh | #!/usr/bin/env bash
set -x
basedir=`echo $0 | sed 's/[^/]*$//g'`.
. $basedir/common.sh
rbd_test_init
create_multiple() {
for i in `seq 1 10`; do
rbd_create_image $i
done
for i in `seq 1 10`; do
rbd_add $i
done
for i in `seq 1 10`; do
devname=/dev/rbd`eval echo \\$rbd$i`
echo $devname
done
for i in `seq 1 10`; do
devid=`eval echo \\$rbd$i`
rbd_remove $devid
done
for i in `seq 1 10`; do
rbd_rm_image $i
done
}
test_dbench() {
rbd_create_image 0
rbd_add 0
devname=/dev/rbd$rbd0
mkfs -t ext3 $devname
mount -t ext3 $devname $mnt
dbench -D $mnt -t 30 5
sync
umount $mnt
rbd_remove $rbd0
rbd_rm_image 0
}
create_multiple
test_dbench
| 676 | 12.27451 | 38 | sh |
null | ceph-main/qa/standalone/ceph-helpers.sh | #!/usr/bin/env bash
#
# Copyright (C) 2013,2014 Cloudwatt <[email protected]>
# Copyright (C) 2014,2015 Red Hat <[email protected]>
# Copyright (C) 2014 Federico Gimenez <[email protected]>
#
# Author: Loic Dachary <[email protected]>
# Author: Federico Gimenez <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
TIMEOUT=300
WAIT_FOR_CLEAN_TIMEOUT=90
MAX_TIMEOUT=15
PG_NUM=4
TMPDIR=${TMPDIR:-/tmp}
CEPH_BUILD_VIRTUALENV=${TMPDIR}
TESTDIR=${TESTDIR:-${TMPDIR}}
if type xmlstarlet > /dev/null 2>&1; then
XMLSTARLET=xmlstarlet
elif type xml > /dev/null 2>&1; then
XMLSTARLET=xml
else
echo "Missing xmlstarlet binary!"
exit 1
fi
if [ `uname` = FreeBSD ]; then
SED=gsed
AWK=gawk
DIFFCOLOPTS=""
KERNCORE="kern.corefile"
else
SED=sed
AWK=awk
termwidth=$(stty -a | head -1 | sed -e 's/.*columns \([0-9]*\).*/\1/')
if [ -n "$termwidth" -a "$termwidth" != "0" ]; then
termwidth="-W ${termwidth}"
fi
DIFFCOLOPTS="-y $termwidth"
KERNCORE="kernel.core_pattern"
fi
EXTRA_OPTS=""
#! @file ceph-helpers.sh
# @brief Toolbox to manage Ceph cluster dedicated to testing
#
# Example use case:
#
# ~~~~~~~~~~~~~~~~{.sh}
# source ceph-helpers.sh
#
# function mytest() {
# # cleanup leftovers and reset mydir
# setup mydir
# # create a cluster with one monitor and three osds
# run_mon mydir a
# run_osd mydir 0
# run_osd mydir 2
# run_osd mydir 3
# # put and get an object
# rados --pool rbd put GROUP /etc/group
# rados --pool rbd get GROUP /tmp/GROUP
# # stop the cluster and cleanup the directory
# teardown mydir
# }
# ~~~~~~~~~~~~~~~~
#
# The focus is on simplicity and efficiency, in the context of
# functional tests. The output is intentionally very verbose
# and functions return as soon as an error is found. The caller
# is also expected to abort on the first error so that debugging
# can be done by looking at the end of the output.
#
# Each function is documented, implemented and tested independently.
# When modifying a helper, the test and the documentation are
# expected to be updated and it is easier of they are collocated. A
# test for a given function can be run with
#
# ~~~~~~~~~~~~~~~~{.sh}
# ceph-helpers.sh TESTS test_get_osds
# ~~~~~~~~~~~~~~~~
#
# and all the tests (i.e. all functions matching test_*) are run
# with:
#
# ~~~~~~~~~~~~~~~~{.sh}
# ceph-helpers.sh TESTS
# ~~~~~~~~~~~~~~~~
#
# A test function takes a single argument : the directory dedicated
# to the tests. It is expected to not create any file outside of this
# directory and remove it entirely when it completes successfully.
#
function get_asok_dir() {
if [ -n "$CEPH_ASOK_DIR" ]; then
echo "$CEPH_ASOK_DIR"
else
echo ${TMPDIR:-/tmp}/ceph-asok.$$
fi
}
function get_asok_path() {
local name=$1
if [ -n "$name" ]; then
echo $(get_asok_dir)/ceph-$name.asok
else
echo $(get_asok_dir)/\$cluster-\$name.asok
fi
}
##
# Cleanup any leftovers found in **dir** via **teardown**
# and reset **dir** as an empty environment.
#
# @param dir path name of the environment
# @return 0 on success, 1 on error
#
function setup() {
local dir=$1
teardown $dir || return 1
mkdir -p $dir
mkdir -p $(get_asok_dir)
if [ $(ulimit -n) -le 1024 ]; then
ulimit -n 4096 || return 1
fi
if [ -z "$LOCALRUN" ]; then
trap "teardown $dir 1" TERM HUP INT
fi
}
function test_setup() {
local dir=$dir
setup $dir || return 1
test -d $dir || return 1
setup $dir || return 1
test -d $dir || return 1
teardown $dir
}
#######################################################################
##
# Kill all daemons for which a .pid file exists in **dir** and remove
# **dir**. If the file system in which **dir** is btrfs, delete all
# subvolumes that relate to it.
#
# @param dir path name of the environment
# @param dumplogs pass "1" to dump logs otherwise it will only if cores found
# @return 0 on success, 1 on error
#
function teardown() {
local dir=$1
local dumplogs=$2
kill_daemons $dir KILL
if [ `uname` != FreeBSD ] \
&& [ $(stat -f -c '%T' .) == "btrfs" ]; then
__teardown_btrfs $dir
fi
local cores="no"
local pattern="$(sysctl -n $KERNCORE)"
# See if we have apport core handling
if [ "${pattern:0:1}" = "|" ]; then
# TODO: Where can we get the dumps?
# Not sure where the dumps really are so this will look in the CWD
pattern=""
fi
# Local we start with core and teuthology ends with core
if ls $(dirname "$pattern") | grep -q '^core\|core$' ; then
cores="yes"
if [ -n "$LOCALRUN" ]; then
mkdir /tmp/cores.$$ 2> /dev/null || true
for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do
mv $i /tmp/cores.$$
done
fi
fi
if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then
if [ -n "$LOCALRUN" ]; then
display_logs $dir
else
# Move logs to where Teuthology will archive it
mkdir -p $TESTDIR/archive/log
mv $dir/*.log $TESTDIR/archive/log
fi
fi
rm -fr $dir
rm -rf $(get_asok_dir)
if [ "$cores" = "yes" ]; then
echo "ERROR: Failure due to cores found"
if [ -n "$LOCALRUN" ]; then
echo "Find saved core files in /tmp/cores.$$"
fi
return 1
fi
return 0
}
function __teardown_btrfs() {
local btrfs_base_dir=$1
local btrfs_root=$(df -P . | tail -1 | $AWK '{print $NF}')
local btrfs_dirs=$(cd $btrfs_base_dir; sudo btrfs subvolume list -t . | $AWK '/^[0-9]/ {print $4}' | grep "$btrfs_base_dir/$btrfs_dir")
for subvolume in $btrfs_dirs; do
sudo btrfs subvolume delete $btrfs_root/$subvolume
done
}
function test_teardown() {
local dir=$dir
setup $dir || return 1
teardown $dir || return 1
! test -d $dir || return 1
}
#######################################################################
##
# Sends a signal to a single daemon.
# This is a helper function for kill_daemons
#
# After the daemon is sent **signal**, its actual termination
# will be verified by sending it signal 0. If the daemon is
# still alive, kill_daemon will pause for a few seconds and
# try again. This will repeat for a fixed number of times
# before kill_daemon returns on failure. The list of
# sleep intervals can be specified as **delays** and defaults
# to:
#
# 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120
#
# This sequence is designed to run first a very short sleep time (0.1)
# if the machine is fast enough and the daemon terminates in a fraction of a
# second. The increasing sleep numbers should give plenty of time for
# the daemon to die even on the slowest running machine. If a daemon
# takes more than a few minutes to stop (the sum of all sleep times),
# there probably is no point in waiting more and a number of things
# are likely to go wrong anyway: better give up and return on error.
#
# @param pid the process id to send a signal
# @param send_signal the signal to send
# @param delays sequence of sleep times before failure
#
function kill_daemon() {
local pid=$(cat $1)
local send_signal=$2
local delays=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
local exit_code=1
# In order to try after the last large sleep add 0 at the end so we check
# one last time before dropping out of the loop
for try in $delays 0 ; do
if kill -$send_signal $pid 2> /dev/null ; then
exit_code=1
else
exit_code=0
break
fi
send_signal=0
sleep $try
done;
return $exit_code
}
function test_kill_daemon() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
name_prefix=osd
for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
#
# sending signal 0 won't kill the daemon
# waiting just for one second instead of the default schedule
# allows us to quickly verify what happens when kill fails
# to stop the daemon (i.e. it must return false)
#
! kill_daemon $pidfile 0 1 || return 1
#
# killing just the osd and verify the mon still is responsive
#
kill_daemon $pidfile TERM || return 1
done
name_prefix=mgr
for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
#
# kill the mgr
#
kill_daemon $pidfile TERM || return 1
done
name_prefix=mon
for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
#
# kill the mon and verify it cannot be reached
#
kill_daemon $pidfile TERM || return 1
! timeout 5 ceph status || return 1
done
teardown $dir || return 1
}
##
# Kill all daemons for which a .pid file exists in **dir**. Each
# daemon is sent a **signal** and kill_daemons waits for it to exit
# during a few minutes. By default all daemons are killed. If a
# **name_prefix** is provided, only the daemons for which a pid
# file is found matching the prefix are killed. See run_osd and
# run_mon for more information about the name conventions for
# the pid files.
#
# Send TERM to all daemons : kill_daemons $dir
# Send KILL to all daemons : kill_daemons $dir KILL
# Send KILL to all osds : kill_daemons $dir KILL osd
# Send KILL to osd 1 : kill_daemons $dir KILL osd.1
#
# If a daemon is sent the TERM signal and does not terminate
# within a few minutes, it will still be running even after
# kill_daemons returns.
#
# If all daemons are kill successfully the function returns 0
# if at least one daemon remains, this is treated as an
# error and the function return 1.
#
# @param dir path name of the environment
# @param signal name of the first signal (defaults to TERM)
# @param name_prefix only kill match daemons (defaults to all)
# @param delays sequence of sleep times before failure
# @return 0 on success, 1 on error
#
function kill_daemons() {
local trace=$(shopt -q -o xtrace && echo true || echo false)
$trace && shopt -u -o xtrace
local dir=$1
local signal=${2:-TERM}
local name_prefix=$3 # optional, osd, mon, osd.1
local delays=$4 #optional timing
local status=0
local pids=""
for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
run_in_background pids kill_daemon $pidfile $signal $delays
done
wait_background pids
status=$?
$trace && shopt -s -o xtrace
return $status
}
function test_kill_daemons() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
#
# sending signal 0 won't kill the daemon
# waiting just for one second instead of the default schedule
# allows us to quickly verify what happens when kill fails
# to stop the daemon (i.e. it must return false)
#
! kill_daemons $dir 0 osd 1 || return 1
#
# killing just the osd and verify the mon still is responsive
#
kill_daemons $dir TERM osd || return 1
#
# kill the mgr
#
kill_daemons $dir TERM mgr || return 1
#
# kill the mon and verify it cannot be reached
#
kill_daemons $dir TERM || return 1
! timeout 5 ceph status || return 1
teardown $dir || return 1
}
#
# return a random TCP port which is not used yet
#
# please note, there could be racing if we use this function for
# a free port, and then try to bind on this port.
#
function get_unused_port() {
local ip=127.0.0.1
python3 -c "import socket; s=socket.socket(); s.bind(('$ip', 0)); print(s.getsockname()[1]); s.close()"
}
#######################################################################
##
# Run a monitor by the name mon.**id** with data in **dir**/**id**.
# The logs can be found in **dir**/mon.**id**.log and the pid file
# is **dir**/mon.**id**.pid and the admin socket is
# **dir**/**id**/ceph-mon.**id**.asok.
#
# The remaining arguments are passed verbatim to ceph-mon --mkfs
# and the ceph-mon daemon.
#
# Two mandatory arguments must be provided: --fsid and --mon-host
# Instead of adding them to every call to run_mon, they can be
# set in the CEPH_ARGS environment variable to be read implicitly
# by every ceph command.
#
# The CEPH_CONF variable is expected to be set to /dev/null to
# only rely on arguments for configuration.
#
# Examples:
#
# CEPH_ARGS="--fsid=$(uuidgen) "
# CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
# run_mon $dir a # spawn a mon and bind port 7018
# run_mon $dir a --debug-filestore=20 # spawn with filestore debugging
#
# If mon_initial_members is not set, the default rbd pool is deleted
# and replaced with a replicated pool with less placement groups to
# speed up initialization. If mon_initial_members is set, no attempt
# is made to recreate the rbd pool because it would hang forever,
# waiting for other mons to join.
#
# A **dir**/ceph.conf file is created but not meant to be used by any
# function. It is convenient for debugging a failure with:
#
# ceph --conf **dir**/ceph.conf -s
#
# @param dir path name of the environment
# @param id mon identifier
# @param ... can be any option valid for ceph-mon
# @return 0 on success, 1 on error
#
function run_mon() {
local dir=$1
shift
local id=$1
shift
local data=$dir/$id
ceph-mon \
--id $id \
--mkfs \
--mon-data=$data \
--run-dir=$dir \
"$@" || return 1
ceph-mon \
--id $id \
--osd-failsafe-full-ratio=.99 \
--mon-osd-full-ratio=.99 \
--mon-data-avail-crit=1 \
--mon-data-avail-warn=5 \
--paxos-propose-interval=0.1 \
--osd-crush-chooseleaf-type=0 \
$EXTRA_OPTS \
--debug-mon 20 \
--debug-ms 20 \
--debug-paxos 20 \
--chdir= \
--mon-data=$data \
--log-file=$dir/\$name.log \
--admin-socket=$(get_asok_path) \
--mon-cluster-log-file=$dir/log \
--run-dir=$dir \
--pid-file=$dir/\$name.pid \
--mon-allow-pool-delete \
--mon-allow-pool-size-one \
--osd-pool-default-pg-autoscale-mode off \
--mon-osd-backfillfull-ratio .99 \
--mon-warn-on-insecure-global-id-reclaim-allowed=false \
"$@" || return 1
cat > $dir/ceph.conf <<EOF
[global]
fsid = $(get_config mon $id fsid)
mon host = $(get_config mon $id mon_host)
EOF
}
function test_run_mon() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
ceph mon dump | grep "mon.a" || return 1
kill_daemons $dir || return 1
run_mon $dir a --osd_pool_default_size=3 || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
create_rbd_pool || return 1
ceph osd dump | grep "pool 1 'rbd'" || return 1
local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
config get osd_pool_default_size)
test "$size" = '{"osd_pool_default_size":"3"}' || return 1
! CEPH_ARGS='' ceph status || return 1
CEPH_ARGS='' ceph --conf $dir/ceph.conf status || return 1
kill_daemons $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
config get osd_pool_default_size)
test "$size" = '{"osd_pool_default_size":"1"}' || return 1
kill_daemons $dir || return 1
CEPH_ARGS="$CEPH_ARGS --osd_pool_default_size=2" \
run_mon $dir a || return 1
local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
config get osd_pool_default_size)
test "$size" = '{"osd_pool_default_size":"2"}' || return 1
kill_daemons $dir || return 1
teardown $dir || return 1
}
function create_rbd_pool() {
ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
create_pool rbd $PG_NUM || return 1
rbd pool init rbd
}
function create_pool() {
ceph osd pool create "$@"
sleep 1
}
function delete_pool() {
local poolname=$1
ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
}
#######################################################################
function run_mgr() {
local dir=$1
shift
local id=$1
shift
local data=$dir/$id
ceph config set mgr mgr_pool false --force
ceph-mgr \
--id $id \
$EXTRA_OPTS \
--osd-failsafe-full-ratio=.99 \
--debug-mgr 20 \
--debug-objecter 20 \
--debug-ms 20 \
--debug-paxos 20 \
--chdir= \
--mgr-data=$data \
--log-file=$dir/\$name.log \
--admin-socket=$(get_asok_path) \
--run-dir=$dir \
--pid-file=$dir/\$name.pid \
--mgr-module-path=$(realpath ${CEPH_ROOT}/src/pybind/mgr) \
"$@" || return 1
}
function run_mds() {
local dir=$1
shift
local id=$1
shift
local data=$dir/$id
ceph-mds \
--id $id \
$EXTRA_OPTS \
--debug-mds 20 \
--debug-objecter 20 \
--debug-ms 20 \
--chdir= \
--mds-data=$data \
--log-file=$dir/\$name.log \
--admin-socket=$(get_asok_path) \
--run-dir=$dir \
--pid-file=$dir/\$name.pid \
"$@" || return 1
}
#######################################################################
##
# Create (prepare) and run (activate) an osd by the name osd.**id**
# with data in **dir**/**id**. The logs can be found in
# **dir**/osd.**id**.log, the pid file is **dir**/osd.**id**.pid and
# the admin socket is **dir**/**id**/ceph-osd.**id**.asok.
#
# The remaining arguments are passed verbatim to ceph-osd.
#
# Two mandatory arguments must be provided: --fsid and --mon-host
# Instead of adding them to every call to run_osd, they can be
# set in the CEPH_ARGS environment variable to be read implicitly
# by every ceph command.
#
# The CEPH_CONF variable is expected to be set to /dev/null to
# only rely on arguments for configuration.
#
# The run_osd function creates the OSD data directory on the **dir**/**id**
# directory and relies on the activate_osd function to run the daemon.
#
# Examples:
#
# CEPH_ARGS="--fsid=$(uuidgen) "
# CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
# run_osd $dir 0 # prepare and activate an osd using the monitor listening on 7018
#
# @param dir path name of the environment
# @param id osd identifier
# @param ... can be any option valid for ceph-osd
# @return 0 on success, 1 on error
#
function run_osd() {
local dir=$1
shift
local id=$1
shift
local osd_data=$dir/$id
local ceph_args="$CEPH_ARGS"
ceph_args+=" --osd-failsafe-full-ratio=.99"
ceph_args+=" --osd-journal-size=100"
ceph_args+=" --osd-scrub-load-threshold=2000"
ceph_args+=" --osd-data=$osd_data"
ceph_args+=" --osd-journal=${osd_data}/journal"
ceph_args+=" --chdir="
ceph_args+=$EXTRA_OPTS
ceph_args+=" --run-dir=$dir"
ceph_args+=" --admin-socket=$(get_asok_path)"
ceph_args+=" --debug-osd=20"
ceph_args+=" --debug-ms=1"
ceph_args+=" --debug-monc=20"
ceph_args+=" --log-file=$dir/\$name.log"
ceph_args+=" --pid-file=$dir/\$name.pid"
ceph_args+=" --osd-max-object-name-len=460"
ceph_args+=" --osd-max-object-namespace-len=64"
ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
ceph_args+=" --osd-mclock-profile=high_recovery_ops"
ceph_args+=" "
ceph_args+="$@"
mkdir -p $osd_data
local uuid=`uuidgen`
echo "add osd$id $uuid"
OSD_SECRET=$(ceph-authtool --gen-print-key)
echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
ceph osd new $uuid -i $osd_data/new.json
rm $osd_data/new.json
ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid
local key_fn=$osd_data/keyring
cat > $key_fn<<EOF
[osd.$id]
key = $OSD_SECRET
EOF
echo adding osd$id key to auth repository
ceph -i "$key_fn" auth add osd.$id osd "allow *" mon "allow profile osd" mgr "allow profile osd"
echo start osd.$id
ceph-osd -i $id $ceph_args &
# If noup is set, then can't wait for this osd
if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
return 0
fi
wait_for_osd up $id || return 1
}
function run_osd_filestore() {
local dir=$1
shift
local id=$1
shift
local osd_data=$dir/$id
local ceph_args="$CEPH_ARGS"
ceph_args+=" --osd-failsafe-full-ratio=.99"
ceph_args+=" --osd-journal-size=100"
ceph_args+=" --osd-scrub-load-threshold=2000"
ceph_args+=" --osd-data=$osd_data"
ceph_args+=" --osd-journal=${osd_data}/journal"
ceph_args+=" --chdir="
ceph_args+=$EXTRA_OPTS
ceph_args+=" --run-dir=$dir"
ceph_args+=" --admin-socket=$(get_asok_path)"
ceph_args+=" --debug-osd=20"
ceph_args+=" --debug-ms=1"
ceph_args+=" --debug-monc=20"
ceph_args+=" --log-file=$dir/\$name.log"
ceph_args+=" --pid-file=$dir/\$name.pid"
ceph_args+=" --osd-max-object-name-len=460"
ceph_args+=" --osd-max-object-namespace-len=64"
ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
ceph_args+=" "
ceph_args+="$@"
mkdir -p $osd_data
local uuid=`uuidgen`
echo "add osd$osd $uuid"
OSD_SECRET=$(ceph-authtool --gen-print-key)
echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $osd_data/new.json
ceph osd new $uuid -i $osd_data/new.json
rm $osd_data/new.json
ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid --osd-objectstore=filestore
local key_fn=$osd_data/keyring
cat > $key_fn<<EOF
[osd.$osd]
key = $OSD_SECRET
EOF
echo adding osd$id key to auth repository
ceph -i "$key_fn" auth add osd.$id osd "allow *" mon "allow profile osd" mgr "allow profile osd"
echo start osd.$id
ceph-osd -i $id $ceph_args &
# If noup is set, then can't wait for this osd
if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
return 0
fi
wait_for_osd up $id || return 1
}
function test_run_osd() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_max_backfills)
echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
run_osd $dir 1 --osd-max-backfills 20 || return 1
local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.1) \
config get osd_max_backfills)
test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
CEPH_ARGS="$CEPH_ARGS --osd-max-backfills 30" run_osd $dir 2 || return 1
local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.2) \
config get osd_max_backfills)
test "$backfills" = '{"osd_max_backfills":"30"}' || return 1
teardown $dir || return 1
}
#######################################################################
##
# Shutdown and remove all traces of the osd by the name osd.**id**.
#
# The OSD is shutdown with the TERM signal. It is then removed from
# the auth list, crush map, osd map etc and the files associated with
# it are also removed.
#
# @param dir path name of the environment
# @param id osd identifier
# @return 0 on success, 1 on error
#
function destroy_osd() {
local dir=$1
local id=$2
ceph osd out osd.$id || return 1
kill_daemons $dir TERM osd.$id || return 1
ceph osd down osd.$id || return 1
ceph osd purge osd.$id --yes-i-really-mean-it || return 1
teardown $dir/$id || return 1
rm -fr $dir/$id
}
function test_destroy_osd() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
destroy_osd $dir 0 || return 1
! ceph osd dump | grep "osd.$id " || return 1
teardown $dir || return 1
}
#######################################################################
##
# Run (activate) an osd by the name osd.**id** with data in
# **dir**/**id**. The logs can be found in **dir**/osd.**id**.log,
# the pid file is **dir**/osd.**id**.pid and the admin socket is
# **dir**/**id**/ceph-osd.**id**.asok.
#
# The remaining arguments are passed verbatim to ceph-osd.
#
# Two mandatory arguments must be provided: --fsid and --mon-host
# Instead of adding them to every call to activate_osd, they can be
# set in the CEPH_ARGS environment variable to be read implicitly
# by every ceph command.
#
# The CEPH_CONF variable is expected to be set to /dev/null to
# only rely on arguments for configuration.
#
# The activate_osd function expects a valid OSD data directory
# in **dir**/**id**, either just created via run_osd or re-using
# one left by a previous run of ceph-osd. The ceph-osd daemon is
# run directly on the foreground
#
# The activate_osd function blocks until the monitor reports the osd
# up. If it fails to do so within $TIMEOUT seconds, activate_osd
# fails.
#
# Examples:
#
# CEPH_ARGS="--fsid=$(uuidgen) "
# CEPH_ARGS+="--mon-host=127.0.0.1:7018 "
# activate_osd $dir 0 # activate an osd using the monitor listening on 7018
#
# @param dir path name of the environment
# @param id osd identifier
# @param ... can be any option valid for ceph-osd
# @return 0 on success, 1 on error
#
function activate_osd() {
local dir=$1
shift
local id=$1
shift
local osd_data=$dir/$id
local ceph_args="$CEPH_ARGS"
ceph_args+=" --osd-failsafe-full-ratio=.99"
ceph_args+=" --osd-journal-size=100"
ceph_args+=" --osd-scrub-load-threshold=2000"
ceph_args+=" --osd-data=$osd_data"
ceph_args+=" --osd-journal=${osd_data}/journal"
ceph_args+=" --chdir="
ceph_args+=$EXTRA_OPTS
ceph_args+=" --run-dir=$dir"
ceph_args+=" --admin-socket=$(get_asok_path)"
ceph_args+=" --debug-osd=20"
ceph_args+=" --log-file=$dir/\$name.log"
ceph_args+=" --pid-file=$dir/\$name.pid"
ceph_args+=" --osd-max-object-name-len=460"
ceph_args+=" --osd-max-object-namespace-len=64"
ceph_args+=" --enable-experimental-unrecoverable-data-corrupting-features=*"
ceph_args+=" --osd-mclock-profile=high_recovery_ops"
ceph_args+=" "
ceph_args+="$@"
mkdir -p $osd_data
echo start osd.$id
ceph-osd -i $id $ceph_args &
[ "$id" = "$(cat $osd_data/whoami)" ] || return 1
# If noup is set, then can't wait for this osd
if ceph osd dump --format=json | jq '.flags_set[]' | grep -q '"noup"' ; then
return 0
fi
wait_for_osd up $id || return 1
}
function test_activate_osd() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_max_backfills)
echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
kill_daemons $dir TERM osd || return 1
activate_osd $dir 0 --osd-max-backfills 20 || return 1
local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_max_backfills)
test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
teardown $dir || return 1
}
function test_activate_osd_after_mark_down() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_max_backfills)
echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
kill_daemons $dir TERM osd || return 1
ceph osd down 0 || return 1
wait_for_osd down 0 || return 1
activate_osd $dir 0 --osd-max-backfills 20 || return 1
local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_max_backfills)
test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
teardown $dir || return 1
}
function test_activate_osd_skip_benchmark() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
# Skip the osd benchmark during first osd bring-up.
run_osd $dir 0 --osd-op-queue=mclock_scheduler \
--osd-mclock-skip-benchmark=true || return 1
local max_iops_hdd_def=$(CEPH_ARGS='' ceph --format=json daemon \
$(get_asok_path osd.0) config get osd_mclock_max_capacity_iops_hdd)
local max_iops_ssd_def=$(CEPH_ARGS='' ceph --format=json daemon \
$(get_asok_path osd.0) config get osd_mclock_max_capacity_iops_ssd)
kill_daemons $dir TERM osd || return 1
ceph osd down 0 || return 1
wait_for_osd down 0 || return 1
# Skip the osd benchmark during activation as well. Validate that
# the max osd capacities are left unchanged.
activate_osd $dir 0 --osd-op-queue=mclock_scheduler \
--osd-mclock-skip-benchmark=true || return 1
local max_iops_hdd_after_boot=$(CEPH_ARGS='' ceph --format=json daemon \
$(get_asok_path osd.0) config get osd_mclock_max_capacity_iops_hdd)
local max_iops_ssd_after_boot=$(CEPH_ARGS='' ceph --format=json daemon \
$(get_asok_path osd.0) config get osd_mclock_max_capacity_iops_ssd)
test "$max_iops_hdd_def" = "$max_iops_hdd_after_boot" || return 1
test "$max_iops_ssd_def" = "$max_iops_ssd_after_boot" || return 1
teardown $dir || return 1
}
#######################################################################
##
# Wait until the OSD **id** is either up or down, as specified by
# **state**. It fails after $TIMEOUT seconds.
#
# @param state either up or down
# @param id osd identifier
# @return 0 on success, 1 on error
#
function wait_for_osd() {
local state=$1
local id=$2
status=1
for ((i=0; i < $TIMEOUT; i++)); do
echo $i
if ! ceph osd dump | grep "osd.$id $state"; then
sleep 1
else
status=0
break
fi
done
return $status
}
function test_wait_for_osd() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
wait_for_osd up 0 || return 1
wait_for_osd up 1 || return 1
kill_daemons $dir TERM osd.0 || return 1
wait_for_osd down 0 || return 1
( TIMEOUT=1 ; ! wait_for_osd up 0 ) || return 1
teardown $dir || return 1
}
#######################################################################
##
# Display the list of OSD ids supporting the **objectname** stored in
# **poolname**, as reported by ceph osd map.
#
# @param poolname an existing pool
# @param objectname an objectname (may or may not exist)
# @param STDOUT white space separated list of OSD ids
# @return 0 on success, 1 on error
#
function get_osds() {
local poolname=$1
local objectname=$2
local osds=$(ceph --format json osd map $poolname $objectname 2>/dev/null | \
jq '.acting | .[]')
# get rid of the trailing space
echo $osds
}
function test_get_osds() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
create_rbd_pool || return 1
get_osds rbd GROUP | grep --quiet '^[0-1] [0-1]$' || return 1
teardown $dir || return 1
}
#######################################################################
##
# Wait for the monitor to form quorum (optionally, of size N)
#
# @param timeout duration (lower-bound) to wait for quorum to be formed
# @param quorumsize size of quorum to wait for
# @return 0 on success, 1 on error
#
function wait_for_quorum() {
local timeout=$1
local quorumsize=$2
if [[ -z "$timeout" ]]; then
timeout=300
fi
if [[ -z "$quorumsize" ]]; then
timeout $timeout ceph quorum_status --format=json >&/dev/null || return 1
return 0
fi
no_quorum=1
wait_until=$((`date +%s` + $timeout))
while [[ $(date +%s) -lt $wait_until ]]; do
jqfilter='.quorum | length == '$quorumsize
jqinput="$(timeout $timeout ceph quorum_status --format=json 2>/dev/null)"
res=$(echo $jqinput | jq "$jqfilter")
if [[ "$res" == "true" ]]; then
no_quorum=0
break
fi
done
return $no_quorum
}
#######################################################################
##
# Return the PG of supporting the **objectname** stored in
# **poolname**, as reported by ceph osd map.
#
# @param poolname an existing pool
# @param objectname an objectname (may or may not exist)
# @param STDOUT a PG
# @return 0 on success, 1 on error
#
function get_pg() {
local poolname=$1
local objectname=$2
ceph --format json osd map $poolname $objectname 2>/dev/null | jq -r '.pgid'
}
function test_get_pg() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
get_pg rbd GROUP | grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' || return 1
teardown $dir || return 1
}
#######################################################################
##
# Return the value of the **config**, obtained via the config get command
# of the admin socket of **daemon**.**id**.
#
# @param daemon mon or osd
# @param id mon or osd ID
# @param config the configuration variable name as found in config_opts.h
# @param STDOUT the config value
# @return 0 on success, 1 on error
#
function get_config() {
local daemon=$1
local id=$2
local config=$3
CEPH_ARGS='' \
ceph --format json daemon $(get_asok_path $daemon.$id) \
config get $config 2> /dev/null | \
jq -r ".$config"
}
function test_get_config() {
local dir=$1
# override the default config using command line arg and check it
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
test $(get_config mon a osd_pool_default_size) = 1 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 --osd_max_scrubs=3 || return 1
test $(get_config osd 0 osd_max_scrubs) = 3 || return 1
teardown $dir || return 1
}
#######################################################################
##
# Set the **config** to specified **value**, via the config set command
# of the admin socket of **daemon**.**id**
#
# @param daemon mon or osd
# @param id mon or osd ID
# @param config the configuration variable name as found in config_opts.h
# @param value the config value
# @return 0 on success, 1 on error
#
function set_config() {
local daemon=$1
local id=$2
local config=$3
local value=$4
test $(env CEPH_ARGS='' ceph --format json daemon $(get_asok_path $daemon.$id) \
config set $config $value 2> /dev/null | \
jq 'has("success")') == true
}
function test_set_config() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
test $(get_config mon a ms_crc_header) = true || return 1
set_config mon a ms_crc_header false || return 1
test $(get_config mon a ms_crc_header) = false || return 1
set_config mon a ms_crc_header true || return 1
test $(get_config mon a ms_crc_header) = true || return 1
teardown $dir || return 1
}
#######################################################################
##
# Return the OSD id of the primary OSD supporting the **objectname**
# stored in **poolname**, as reported by ceph osd map.
#
# @param poolname an existing pool
# @param objectname an objectname (may or may not exist)
# @param STDOUT the primary OSD id
# @return 0 on success, 1 on error
#
function get_primary() {
local poolname=$1
local objectname=$2
ceph --format json osd map $poolname $objectname 2>/dev/null | \
jq '.acting_primary'
}
function test_get_primary() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
local osd=0
run_mgr $dir x || return 1
run_osd $dir $osd || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
test $(get_primary rbd GROUP) = $osd || return 1
teardown $dir || return 1
}
#######################################################################
##
# Return the id of any OSD supporting the **objectname** stored in
# **poolname**, as reported by ceph osd map, except the primary.
#
# @param poolname an existing pool
# @param objectname an objectname (may or may not exist)
# @param STDOUT the OSD id
# @return 0 on success, 1 on error
#
function get_not_primary() {
local poolname=$1
local objectname=$2
local primary=$(get_primary $poolname $objectname)
ceph --format json osd map $poolname $objectname 2>/dev/null | \
jq ".acting | map(select (. != $primary)) | .[0]"
}
function test_get_not_primary() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
local primary=$(get_primary rbd GROUP)
local not_primary=$(get_not_primary rbd GROUP)
test $not_primary != $primary || return 1
test $not_primary = 0 -o $not_primary = 1 || return 1
teardown $dir || return 1
}
#######################################################################
function _objectstore_tool_nodown() {
local dir=$1
shift
local id=$1
shift
local osd_data=$dir/$id
ceph-objectstore-tool \
--data-path $osd_data \
"$@" || return 1
}
function _objectstore_tool_nowait() {
local dir=$1
shift
local id=$1
shift
kill_daemons $dir TERM osd.$id >&2 < /dev/null || return 1
_objectstore_tool_nodown $dir $id "$@" || return 1
activate_osd $dir $id $ceph_osd_args >&2 || return 1
}
##
# Run ceph-objectstore-tool against the OSD **id** using the data path
# **dir**. The OSD is killed with TERM prior to running
# ceph-objectstore-tool because access to the data path is
# exclusive. The OSD is restarted after the command completes. The
# objectstore_tool returns after all PG are active+clean again.
#
# @param dir the data path of the OSD
# @param id the OSD id
# @param ... arguments to ceph-objectstore-tool
# @param STDIN the input of ceph-objectstore-tool
# @param STDOUT the output of ceph-objectstore-tool
# @return 0 on success, 1 on error
#
# The value of $ceph_osd_args will be passed to restarted osds
#
function objectstore_tool() {
local dir=$1
shift
local id=$1
shift
_objectstore_tool_nowait $dir $id "$@" || return 1
wait_for_clean >&2
}
function test_objectstore_tool() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
local osd=0
run_mgr $dir x || return 1
run_osd $dir $osd || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
rados --pool rbd put GROUP /etc/group || return 1
objectstore_tool $dir $osd GROUP get-bytes | \
diff - /etc/group
! objectstore_tool $dir $osd NOTEXISTS get-bytes || return 1
teardown $dir || return 1
}
#######################################################################
##
# Predicate checking if there is an ongoing recovery in the
# cluster. If any of the recovering_{keys,bytes,objects}_per_sec
# counters are reported by ceph status, it means recovery is in
# progress.
#
# @return 0 if recovery in progress, 1 otherwise
#
function get_is_making_recovery_progress() {
local recovery_progress
recovery_progress+=".recovering_keys_per_sec + "
recovery_progress+=".recovering_bytes_per_sec + "
recovery_progress+=".recovering_objects_per_sec"
local progress=$(ceph --format json status 2>/dev/null | \
jq -r ".pgmap | $recovery_progress")
test "$progress" != null
}
function test_get_is_making_recovery_progress() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
! get_is_making_recovery_progress || return 1
teardown $dir || return 1
}
#######################################################################
##
# Return the number of active PGs in the cluster. A PG is active if
# ceph pg dump pgs reports it both **active** and **clean** and that
# not **stale**.
#
# @param STDOUT the number of active PGs
# @return 0 on success, 1 on error
#
function get_num_active_clean() {
local expression
expression+="select(contains(\"active\") and contains(\"clean\")) | "
expression+="select(contains(\"stale\") | not)"
ceph --format json pg dump pgs 2>/dev/null | \
jq ".pg_stats | [.[] | .state | $expression] | length"
}
function test_get_num_active_clean() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
local num_active_clean=$(get_num_active_clean)
test "$num_active_clean" = $PG_NUM || return 1
teardown $dir || return 1
}
##
# Return the number of active or peered PGs in the cluster. A PG matches if
# ceph pg dump pgs reports it is either **active** or **peered** and that
# not **stale**.
#
# @param STDOUT the number of active PGs
# @return 0 on success, 1 on error
#
function get_num_active_or_peered() {
local expression
expression+="select(contains(\"active\") or contains(\"peered\")) | "
expression+="select(contains(\"stale\") | not)"
ceph --format json pg dump pgs 2>/dev/null | \
jq ".pg_stats | [.[] | .state | $expression] | length"
}
function test_get_num_active_or_peered() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
local num_peered=$(get_num_active_or_peered)
test "$num_peered" = $PG_NUM || return 1
teardown $dir || return 1
}
#######################################################################
##
# Return the number of PGs in the cluster, according to
# ceph pg dump pgs.
#
# @param STDOUT the number of PGs
# @return 0 on success, 1 on error
#
function get_num_pgs() {
ceph --format json status 2>/dev/null | jq '.pgmap.num_pgs'
}
function test_get_num_pgs() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
local num_pgs=$(get_num_pgs)
test "$num_pgs" -gt 0 || return 1
teardown $dir || return 1
}
#######################################################################
##
# Return the OSD ids in use by at least one PG in the cluster (either
# in the up or the acting set), according to ceph pg dump pgs. Every
# OSD id shows as many times as they are used in up and acting sets.
# If an OSD id is in both the up and acting set of a given PG, it will
# show twice.
#
# @param STDOUT a sorted list of OSD ids
# @return 0 on success, 1 on error
#
function get_osd_id_used_by_pgs() {
ceph --format json pg dump pgs 2>/dev/null | jq '.pg_stats | .[] | .up[], .acting[]' | sort
}
function test_get_osd_id_used_by_pgs() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
local osd_ids=$(get_osd_id_used_by_pgs | uniq)
test "$osd_ids" = "0" || return 1
teardown $dir || return 1
}
#######################################################################
##
# Wait until the OSD **id** shows **count** times in the
# PGs (see get_osd_id_used_by_pgs for more information about
# how OSD ids are counted).
#
# @param id the OSD id
# @param count the number of time it must show in the PGs
# @return 0 on success, 1 on error
#
function wait_osd_id_used_by_pgs() {
local id=$1
local count=$2
status=1
for ((i=0; i < $TIMEOUT / 5; i++)); do
echo $i
if ! test $(get_osd_id_used_by_pgs | grep -c $id) = $count ; then
sleep 5
else
status=0
break
fi
done
return $status
}
function test_wait_osd_id_used_by_pgs() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
wait_osd_id_used_by_pgs 0 8 || return 1
! TIMEOUT=1 wait_osd_id_used_by_pgs 123 5 || return 1
teardown $dir || return 1
}
#######################################################################
##
# Return the date and time of the last completed scrub for **pgid**,
# as reported by ceph pg dump pgs. Note that a repair also sets this
# date.
#
# @param pgid the id of the PG
# @param STDOUT the date and time of the last scrub
# @return 0 on success, 1 on error
#
function get_last_scrub_stamp() {
local pgid=$1
local sname=${2:-last_scrub_stamp}
ceph --format json pg dump pgs 2>/dev/null | \
jq -r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname"
}
function test_get_last_scrub_stamp() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
stamp=$(get_last_scrub_stamp 1.0)
test -n "$stamp" || return 1
teardown $dir || return 1
}
#######################################################################
##
# Predicate checking if the cluster is clean, i.e. all of its PGs are
# in a clean state (see get_num_active_clean for a definition).
#
# @return 0 if the cluster is clean, 1 otherwise
#
function is_clean() {
num_pgs=$(get_num_pgs)
test $num_pgs != 0 || return 1
test $(get_num_active_clean) = $num_pgs || return 1
}
function test_is_clean() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
is_clean || return 1
teardown $dir || return 1
}
#######################################################################
calc() { $AWK "BEGIN{print $*}"; }
##
# Return a list of numbers that are increasingly larger and whose
# total is **timeout** seconds. It can be used to have short sleep
# delay while waiting for an event on a fast machine. But if running
# very slowly the larger delays avoid stressing the machine even
# further or spamming the logs.
#
# @param timeout sum of all delays, in seconds
# @return a list of sleep delays
#
function get_timeout_delays() {
local trace=$(shopt -q -o xtrace && echo true || echo false)
$trace && shopt -u -o xtrace
local timeout=$1
local first_step=${2:-1}
local max_timeout=${3:-$MAX_TIMEOUT}
local i
local total="0"
i=$first_step
while test "$(calc $total + $i \<= $timeout)" = "1"; do
echo -n "$(calc $i) "
total=$(calc $total + $i)
i=$(calc $i \* 2)
if [ $max_timeout -gt 0 ]; then
# Did we reach max timeout ?
if [ ${i%.*} -eq ${max_timeout%.*} ] && [ ${i#*.} \> ${max_timeout#*.} ] || [ ${i%.*} -gt ${max_timeout%.*} ]; then
# Yes, so let's cap the max wait time to max
i=$max_timeout
fi
fi
done
if test "$(calc $total \< $timeout)" = "1"; then
echo -n "$(calc $timeout - $total) "
fi
$trace && shopt -s -o xtrace
}
function test_get_timeout_delays() {
test "$(get_timeout_delays 1)" = "1 " || return 1
test "$(get_timeout_delays 5)" = "1 2 2 " || return 1
test "$(get_timeout_delays 6)" = "1 2 3 " || return 1
test "$(get_timeout_delays 7)" = "1 2 4 " || return 1
test "$(get_timeout_delays 8)" = "1 2 4 1 " || return 1
test "$(get_timeout_delays 1 .1)" = "0.1 0.2 0.4 0.3 " || return 1
test "$(get_timeout_delays 1.5 .1)" = "0.1 0.2 0.4 0.8 " || return 1
test "$(get_timeout_delays 5 .1)" = "0.1 0.2 0.4 0.8 1.6 1.9 " || return 1
test "$(get_timeout_delays 6 .1)" = "0.1 0.2 0.4 0.8 1.6 2.9 " || return 1
test "$(get_timeout_delays 6.3 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 " || return 1
test "$(get_timeout_delays 20 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 7.3 " || return 1
test "$(get_timeout_delays 300 .1 0)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 12.8 25.6 51.2 102.4 95.3 " || return 1
test "$(get_timeout_delays 300 .1 10)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 7.3 " || return 1
}
#######################################################################
##
# Wait until the cluster becomes clean or if it does not make progress
# for $WAIT_FOR_CLEAN_TIMEOUT seconds.
# Progress is measured either via the **get_is_making_recovery_progress**
# predicate or if the number of clean PGs changes (as returned by get_num_active_clean)
#
# @return 0 if the cluster is clean, 1 otherwise
#
function wait_for_clean() {
local cmd=$1
local num_active_clean=-1
local cur_active_clean
local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1))
local -i loop=0
flush_pg_stats || return 1
while test $(get_num_pgs) == 0 ; do
sleep 1
done
while true ; do
# Comparing get_num_active_clean & get_num_pgs is used to determine
# if the cluster is clean. That's almost an inline of is_clean() to
# get more performance by avoiding multiple calls of get_num_active_clean.
cur_active_clean=$(get_num_active_clean)
test $cur_active_clean = $(get_num_pgs) && break
if test $cur_active_clean != $num_active_clean ; then
loop=0
num_active_clean=$cur_active_clean
elif get_is_making_recovery_progress ; then
loop=0
elif (( $loop >= ${#delays[*]} )) ; then
ceph report
return 1
fi
# eval is a no-op if cmd is empty
eval $cmd
sleep ${delays[$loop]}
loop+=1
done
return 0
}
function test_wait_for_clean() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=2 || return 1
run_osd $dir 0 || return 1
run_mgr $dir x || return 1
create_rbd_pool || return 1
! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1
run_osd $dir 1 || return 1
wait_for_clean || return 1
teardown $dir || return 1
}
##
# Wait until the cluster becomes peered or if it does not make progress
# for $WAIT_FOR_CLEAN_TIMEOUT seconds.
# Progress is measured either via the **get_is_making_recovery_progress**
# predicate or if the number of peered PGs changes (as returned by get_num_active_or_peered)
#
# @return 0 if the cluster is clean, 1 otherwise
#
function wait_for_peered() {
local cmd=$1
local num_peered=-1
local cur_peered
local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1))
local -i loop=0
flush_pg_stats || return 1
while test $(get_num_pgs) == 0 ; do
sleep 1
done
while true ; do
# Comparing get_num_active_clean & get_num_pgs is used to determine
# if the cluster is clean. That's almost an inline of is_clean() to
# get more performance by avoiding multiple calls of get_num_active_clean.
cur_peered=$(get_num_active_or_peered)
test $cur_peered = $(get_num_pgs) && break
if test $cur_peered != $num_peered ; then
loop=0
num_peered=$cur_peered
elif get_is_making_recovery_progress ; then
loop=0
elif (( $loop >= ${#delays[*]} )) ; then
ceph report
return 1
fi
# eval is a no-op if cmd is empty
eval $cmd
sleep ${delays[$loop]}
loop+=1
done
return 0
}
function test_wait_for_peered() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=2 || return 1
run_osd $dir 0 || return 1
run_mgr $dir x || return 1
create_rbd_pool || return 1
! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1
run_osd $dir 1 || return 1
wait_for_peered || return 1
teardown $dir || return 1
}
#######################################################################
##
# Wait until the cluster's health condition disappeared.
# $TIMEOUT default
#
# @param string to grep for in health detail
# @return 0 if the cluster health doesn't matches request,
# 1 otherwise if after $TIMEOUT seconds health condition remains.
#
function wait_for_health_gone() {
local grepstr=$1
local -a delays=($(get_timeout_delays $TIMEOUT .1))
local -i loop=0
while ceph health detail | grep "$grepstr" ; do
if (( $loop >= ${#delays[*]} )) ; then
ceph health detail
return 1
fi
sleep ${delays[$loop]}
loop+=1
done
}
##
# Wait until the cluster has health condition passed as arg
# again for $TIMEOUT seconds.
#
# @param string to grep for in health detail
# @return 0 if the cluster health matches request, 1 otherwise
#
function wait_for_health() {
local grepstr=$1
local -a delays=($(get_timeout_delays $TIMEOUT .1))
local -i loop=0
while ! ceph health detail | grep "$grepstr" ; do
if (( $loop >= ${#delays[*]} )) ; then
ceph health detail
return 1
fi
sleep ${delays[$loop]}
loop+=1
done
}
##
# Wait until the cluster becomes HEALTH_OK again or if it does not make progress
# for $TIMEOUT seconds.
#
# @return 0 if the cluster is HEALTHY, 1 otherwise
#
function wait_for_health_ok() {
wait_for_health "HEALTH_OK" || return 1
}
function test_wait_for_health_ok() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1
run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1
# start osd_pool_default_size OSDs
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
kill_daemons $dir TERM osd || return 1
ceph osd down 0 || return 1
# expect TOO_FEW_OSDS warning
! TIMEOUT=1 wait_for_health_ok || return 1
# resurrect all OSDs
activate_osd $dir 0 || return 1
activate_osd $dir 1 || return 1
activate_osd $dir 2 || return 1
wait_for_health_ok || return 1
teardown $dir || return 1
}
#######################################################################
##
# Run repair on **pgid** and wait until it completes. The repair
# function will fail if repair does not complete within $TIMEOUT
# seconds.
#
# @param pgid the id of the PG
# @return 0 on success, 1 on error
#
function repair() {
local pgid=$1
local last_scrub=$(get_last_scrub_stamp $pgid)
ceph pg repair $pgid
wait_for_scrub $pgid "$last_scrub"
}
function test_repair() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
repair 1.0 || return 1
kill_daemons $dir KILL osd || return 1
! TIMEOUT=1 repair 1.0 || return 1
teardown $dir || return 1
}
#######################################################################
##
# Run scrub on **pgid** and wait until it completes. The pg_scrub
# function will fail if repair does not complete within $TIMEOUT
# seconds. The pg_scrub is complete whenever the
# **get_last_scrub_stamp** function reports a timestamp different from
# the one stored before starting the scrub.
#
# @param pgid the id of the PG
# @return 0 on success, 1 on error
#
function pg_scrub() {
local pgid=$1
local last_scrub=$(get_last_scrub_stamp $pgid)
ceph pg scrub $pgid
wait_for_scrub $pgid "$last_scrub"
}
function pg_deep_scrub() {
local pgid=$1
local last_scrub=$(get_last_scrub_stamp $pgid last_deep_scrub_stamp)
ceph pg deep-scrub $pgid
wait_for_scrub $pgid "$last_scrub" last_deep_scrub_stamp
}
function test_pg_scrub() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
pg_scrub 1.0 || return 1
kill_daemons $dir KILL osd || return 1
! TIMEOUT=1 pg_scrub 1.0 || return 1
teardown $dir || return 1
}
#######################################################################
##
# Run the *command* and expect it to fail (i.e. return a non zero status).
# The output (stderr and stdout) is stored in a temporary file in *dir*
# and is expected to contain the string *expected*.
#
# Return 0 if the command failed and the string was found. Otherwise
# return 1 and cat the full output of the command on stderr for debug.
#
# @param dir temporary directory to store the output
# @param expected string to look for in the output
# @param command ... the command and its arguments
# @return 0 on success, 1 on error
#
function expect_failure() {
local dir=$1
shift
local expected="$1"
shift
local success
if "$@" > $dir/out 2>&1 ; then
success=true
else
success=false
fi
if $success || ! grep --quiet "$expected" $dir/out ; then
cat $dir/out >&2
return 1
else
return 0
fi
}
function test_expect_failure() {
local dir=$1
setup $dir || return 1
expect_failure $dir FAIL bash -c 'echo FAIL ; exit 1' || return 1
# the command did not fail
! expect_failure $dir FAIL bash -c 'echo FAIL ; exit 0' > $dir/out || return 1
grep --quiet FAIL $dir/out || return 1
# the command failed but the output does not contain the expected string
! expect_failure $dir FAIL bash -c 'echo UNEXPECTED ; exit 1' > $dir/out || return 1
! grep --quiet FAIL $dir/out || return 1
teardown $dir || return 1
}
#######################################################################
##
# Given the *last_scrub*, wait for scrub to happen on **pgid**. It
# will fail if scrub does not complete within $TIMEOUT seconds. The
# repair is complete whenever the **get_last_scrub_stamp** function
# reports a timestamp different from the one given in argument.
#
# @param pgid the id of the PG
# @param last_scrub timestamp of the last scrub for *pgid*
# @return 0 on success, 1 on error
#
function wait_for_scrub() {
local pgid=$1
local last_scrub="$2"
local sname=${3:-last_scrub_stamp}
for ((i=0; i < $TIMEOUT; i++)); do
if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then
return 0
fi
sleep 1
done
return 1
}
function test_wait_for_scrub() {
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
local pgid=1.0
ceph pg repair $pgid
local last_scrub=$(get_last_scrub_stamp $pgid)
wait_for_scrub $pgid "$last_scrub" || return 1
kill_daemons $dir KILL osd || return 1
last_scrub=$(get_last_scrub_stamp $pgid)
! TIMEOUT=1 wait_for_scrub $pgid "$last_scrub" || return 1
teardown $dir || return 1
}
#######################################################################
##
# Return 0 if the erasure code *plugin* is available, 1 otherwise.
#
# @param plugin erasure code plugin
# @return 0 on success, 1 on error
#
function erasure_code_plugin_exists() {
local plugin=$1
local status
local grepstr
local s
case `uname` in
FreeBSD) grepstr="Cannot open.*$plugin" ;;
*) grepstr="$plugin.*No such file" ;;
esac
s=$(ceph osd erasure-code-profile set TESTPROFILE plugin=$plugin 2>&1)
local status=$?
if [ $status -eq 0 ]; then
ceph osd erasure-code-profile rm TESTPROFILE
elif ! echo $s | grep --quiet "$grepstr" ; then
status=1
# display why the string was rejected.
echo $s
fi
return $status
}
function test_erasure_code_plugin_exists() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
erasure_code_plugin_exists jerasure || return 1
! erasure_code_plugin_exists FAKE || return 1
teardown $dir || return 1
}
#######################################################################
##
# Display all log files from **dir** on stdout.
#
# @param dir directory in which all data is stored
#
function display_logs() {
local dir=$1
find $dir -maxdepth 1 -name '*.log' | \
while read file ; do
echo "======================= $file"
cat $file
done
}
function test_display_logs() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
kill_daemons $dir || return 1
display_logs $dir > $dir/log.out
grep --quiet mon.a.log $dir/log.out || return 1
teardown $dir || return 1
}
#######################################################################
##
# Spawn a command in background and save the pid in the variable name
# passed in argument. To make the output reading easier, the output is
# prepend with the process id.
#
# Example:
# pids1=""
# run_in_background pids1 bash -c 'sleep 1; exit 1'
#
# @param pid_variable the variable name (not value) where the pids will be stored
# @param ... the command to execute
# @return only the pid_variable output should be considered and used with **wait_background**
#
function run_in_background() {
local pid_variable=$1
shift
# Execute the command and prepend the output with its pid
# We enforce to return the exit status of the command and not the sed one.
("$@" |& sed 's/^/'$BASHPID': /'; return "${PIPESTATUS[0]}") >&2 &
eval "$pid_variable+=\" $!\""
}
function save_stdout {
local out="$1"
shift
"$@" > "$out"
}
function test_run_in_background() {
local pids
run_in_background pids sleep 1
run_in_background pids sleep 1
test $(echo $pids | wc -w) = 2 || return 1
wait $pids || return 1
}
#######################################################################
##
# Wait for pids running in background to complete.
# This function is usually used after a **run_in_background** call
# Example:
# pids1=""
# run_in_background pids1 bash -c 'sleep 1; exit 1'
# wait_background pids1
#
# @param pids The variable name that contains the active PIDS. Set as empty at then end of the function.
# @return returns 1 if at least one process exits in error unless returns 0
#
function wait_background() {
# We extract the PIDS from the variable name
pids=${!1}
return_code=0
for pid in $pids; do
if ! wait $pid; then
# If one process failed then return 1
return_code=1
fi
done
# We empty the variable reporting that all process ended
eval "$1=''"
return $return_code
}
function test_wait_background() {
local pids=""
run_in_background pids bash -c "sleep 1; exit 1"
run_in_background pids bash -c "sleep 2; exit 0"
wait_background pids
if [ $? -ne 1 ]; then return 1; fi
run_in_background pids bash -c "sleep 1; exit 0"
run_in_background pids bash -c "sleep 2; exit 0"
wait_background pids
if [ $? -ne 0 ]; then return 1; fi
if [ ! -z "$pids" ]; then return 1; fi
}
function flush_pg_stats()
{
local timeout=${1:-$TIMEOUT}
ids=`ceph osd ls`
seqs=''
for osd in $ids; do
seq=`ceph tell osd.$osd flush_pg_stats`
if test -z "$seq"
then
continue
fi
seqs="$seqs $osd-$seq"
done
for s in $seqs; do
osd=`echo $s | cut -d - -f 1`
seq=`echo $s | cut -d - -f 2`
echo "waiting osd.$osd seq $seq"
while test $(ceph osd last-stat-seq $osd) -lt $seq; do
sleep 1
if [ $((timeout--)) -eq 0 ]; then
return 1
fi
done
done
}
function test_flush_pg_stats()
{
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
create_rbd_pool || return 1
rados -p rbd put obj /etc/group
flush_pg_stats || return 1
local jq_filter='.pools | .[] | select(.name == "rbd") | .stats'
stored=`ceph df detail --format=json | jq "$jq_filter.stored"`
stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
test $stored -gt 0 || return 1
test $stored == $stored_raw || return 1
teardown $dir
}
########################################################################
##
# Get the current op scheduler enabled on an osd by reading the
# osd_op_queue config option
#
# Example:
# get_op_scheduler $osdid
#
# @param id the id of the OSD
# @return the name of the op scheduler enabled for the OSD
#
function get_op_scheduler() {
local id=$1
get_config osd $id osd_op_queue
}
function test_get_op_scheduler() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 --osd_op_queue=wpq || return 1
test $(get_op_scheduler 0) = "wpq" || return 1
run_osd $dir 1 --osd_op_queue=mclock_scheduler || return 1
test $(get_op_scheduler 1) = "mclock_scheduler" || return 1
teardown $dir || return 1
}
#######################################################################
##
# Call the **run** function (which must be defined by the caller) with
# the **dir** argument followed by the caller argument list.
#
# If the **run** function returns on error, all logs found in **dir**
# are displayed for diagnostic purposes.
#
# **teardown** function is called when the **run** function returns
# (on success or on error), to cleanup leftovers. The CEPH_CONF is set
# to /dev/null and CEPH_ARGS is unset so that the tests are protected from
# external interferences.
#
# It is the responsibility of the **run** function to call the
# **setup** function to prepare the test environment (create a temporary
# directory etc.).
#
# The shell is required (via PS4) to display the function and line
# number whenever a statement is executed to help debugging.
#
# @param dir directory in which all data is stored
# @param ... arguments passed transparently to **run**
# @return 0 on success, 1 on error
#
function main() {
local dir=td/$1
shift
shopt -s -o xtrace
PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
export PATH=.:$PATH # make sure program from sources are preferred
export PYTHONWARNINGS=ignore
export CEPH_CONF=/dev/null
unset CEPH_ARGS
local code
if run $dir "$@" ; then
code=0
else
code=1
fi
teardown $dir $code || return 1
return $code
}
#######################################################################
function run_tests() {
shopt -s -o xtrace
PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
export .:$PATH # make sure program from sources are preferred
export CEPH_MON="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+=" --fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
export CEPH_CONF=/dev/null
local funcs=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')}
local dir=td/ceph-helpers
for func in $funcs ; do
if ! $func $dir; then
teardown $dir 1
return 1
fi
done
}
if test "$1" = TESTS ; then
shift
run_tests "$@"
exit $?
fi
# NOTE:
# jq only support --exit-status|-e from version 1.4 forwards, which makes
# returning on error waaaay prettier and straightforward.
# However, the current automated upstream build is running with v1.3,
# which has no idea what -e is. Hence the convoluted error checking we
# need. Sad.
# The next time someone changes this code, please check if v1.4 is now
# a thing, and, if so, please change these to use -e. Thanks.
# jq '.all.supported | select([.[] == "foo"] | any)'
function jq_success() {
input="$1"
filter="$2"
expects="\"$3\""
in_escaped=$(printf %s "$input" | sed "s/'/'\\\\''/g")
filter_escaped=$(printf %s "$filter" | sed "s/'/'\\\\''/g")
ret=$(echo "$in_escaped" | jq "$filter_escaped")
if [[ "$ret" == "true" ]]; then
return 0
elif [[ -n "$expects" ]]; then
if [[ "$ret" == "$expects" ]]; then
return 0
fi
fi
return 1
input=$1
filter=$2
expects="$3"
ret="$(echo $input | jq \"$filter\")"
if [[ "$ret" == "true" ]]; then
return 0
elif [[ -n "$expects" && "$ret" == "$expects" ]]; then
return 0
fi
return 1
}
function inject_eio() {
local pooltype=$1
shift
local which=$1
shift
local poolname=$1
shift
local objname=$1
shift
local dir=$1
shift
local shard_id=$1
shift
local -a initial_osds=($(get_osds $poolname $objname))
local osd_id=${initial_osds[$shard_id]}
if [ "$pooltype" != "ec" ]; then
shard_id=""
fi
type=$(cat $dir/$osd_id/type)
set_config osd $osd_id ${type}_debug_inject_read_err true || return 1
local loop=0
while ( CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.$osd_id) \
inject${which}err $poolname $objname $shard_id | grep -q Invalid ); do
loop=$(expr $loop + 1)
if [ $loop = "10" ]; then
return 1
fi
sleep 1
done
}
function multidiff() {
if ! diff $@ ; then
if [ "$DIFFCOLOPTS" = "" ]; then
return 1
fi
diff $DIFFCOLOPTS $@
fi
}
function create_ec_pool() {
local pool_name=$1
shift
local allow_overwrites=$1
shift
ceph osd erasure-code-profile set myprofile crush-failure-domain=osd "$@" || return 1
create_pool "$poolname" 1 1 erasure myprofile || return 1
if [ "$allow_overwrites" = "true" ]; then
ceph osd pool set "$poolname" allow_ec_overwrites true || return 1
fi
wait_for_clean || return 1
return 0
}
# Local Variables:
# compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config"
# End:
| 72,141 | 28.93444 | 175 | sh |
null | ceph-main/qa/standalone/c2c/c2c.sh | #!/usr/bin/env bash
set -ex
function run_perf_c2c() {
# First get some background system info
uname -a > uname.out
lscpu > lscpu.out
cat /proc/cmdline > cmdline.out
timeout -s INT 10 vmstat -w 1 > vmstat.out || true
sudo dmesg >& dmesg.out
cat /proc/cpuinfo > cpuinfo.out
ps axo psr,time,stat,ppid,pid,pcpu,comm > ps.1.out
ps -eafT > ps.2.out
sudo sysctl -a > sysctl.out
nodecnt=`lscpu|grep "NUMA node(" |awk '{print $3}'`
for ((i=0; i<$nodecnt; i++))
do
sudo cat /sys/devices/system/node/node${i}/meminfo > meminfo.$i.out
done
sudo more `sudo find /proc -name status` > proc_parent_child_status.out
sudo more /proc/*/numa_maps > numa_maps.out
#
# Get separate kernel and user perf-c2c stats
#
sudo perf c2c record -a --ldlat=70 --all-user -o perf_c2c_a_all_user.data sleep 5
sudo perf c2c report --stdio -i perf_c2c_a_all_user.data > perf_c2c_a_all_user.out 2>&1
sudo perf c2c report --full-symbols --stdio -i perf_c2c_a_all_user.data > perf_c2c_full-sym_a_all_user.out 2>&1
sudo perf c2c record --call-graph dwarf -a --ldlat=70 --all-user -o perf_c2c_g_a_all_user.data sleep 5
sudo perf c2c report -g --stdio -i perf_c2c_g_a_all_user.data > perf_c2c_g_a_all_user.out 2>&1
sudo perf c2c record -a --ldlat=70 --all-kernel -o perf_c2c_a_all_kernel.data sleep 4
sudo perf c2c report --stdio -i perf_c2c_a_all_kernel.data > perf_c2c_a_all_kernel.out 2>&1
sudo perf c2c record --call-graph dwarf --ldlat=70 -a --all-kernel -o perf_c2c_g_a_all_kernel.data sleep 4
sudo perf c2c report -g --stdio -i perf_c2c_g_a_all_kernel.data > perf_c2c_g_a_all_kernel.out 2>&1
#
# Get combined kernel and user perf-c2c stats
#
sudo perf c2c record -a --ldlat=70 -o perf_c2c_a_both.data sleep 4
sudo perf c2c report --stdio -i perf_c2c_a_both.data > perf_c2c_a_both.out 2>&1
sudo perf c2c record --call-graph dwarf --ldlat=70 -a --all-kernel -o perf_c2c_g_a_both.data sleep 4
sudo perf c2c report -g --stdio -i perf_c2c_g_a_both.data > perf_c2c_g_a_both.out 2>&1
#
# Get all-user physical addr stats, in case multiple threads or processes are
# accessing shared memory with different vaddrs.
#
sudo perf c2c record --phys-data -a --ldlat=70 --all-user -o perf_c2c_a_all_user_phys_data.data sleep 5
sudo perf c2c report --stdio -i perf_c2c_a_all_user_phys_data.data > perf_c2c_a_all_user_phys_data.out 2>&1
}
function run() {
local dir=$1
shift
(
rm -fr $dir
mkdir $dir
cd $dir
ceph_test_c2c --threads $(($(nproc) * 2)) "$@" &
sleep 30 # let it warm up
run_perf_c2c
kill $! || { echo "ceph_test_c2c WAS NOT RUNNING" ; exit 1 ; }
) || exit 1
}
function bench() {
optimized=$(timeout 30 ceph_test_c2c --threads $(($(nproc) * 2)) --sharding 2> /dev/null || true)
not_optimized=$(timeout 30 ceph_test_c2c --threads $(($(nproc) * 2)) 2> /dev/null || true)
if ! (( $optimized > ( $not_optimized * 2 ) )) ; then
echo "the optimization is expected to be at least x2 faster"
exit 1
fi
}
run with-sharding --sharding
run without-sharding
bench
| 3,139 | 35.941176 | 115 | sh |
null | ceph-main/qa/standalone/crush/crush-choose-args.sh | #!/usr/bin/env bash
#
# Copyright (C) 2017 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7131" # git grep '\<7131\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--crush-location=root=default,host=HOST "
CEPH_ARGS+="--osd-crush-initial-weight=3 "
#
# Disable device auto class feature for now.
# The device class is non-deterministic and will
# crash the crushmap comparison below.
#
CEPH_ARGS+="--osd-class-update-on-start=false "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_choose_args_update() {
#
# adding a weighted OSD updates the weight up to the top
#
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
ceph osd set-require-min-compat-client luminous
ceph osd getcrushmap > $dir/map || return 1
crushtool -d $dir/map -o $dir/map.txt || return 1
sed -i -e '/end crush map/d' $dir/map.txt
cat >> $dir/map.txt <<EOF
# choose_args
choose_args 0 {
{
bucket_id -1
weight_set [
[ 2.00000 ]
[ 2.00000 ]
]
ids [ -10 ]
}
{
bucket_id -2
weight_set [
[ 2.00000 ]
[ 2.00000 ]
]
ids [ -20 ]
}
}
# end crush map
EOF
crushtool -c $dir/map.txt -o $dir/map-new || return 1
ceph osd setcrushmap -i $dir/map-new || return 1
ceph osd crush tree
run_osd $dir 1 || return 1
ceph osd crush tree
ceph osd getcrushmap > $dir/map-one-more || return 1
crushtool -d $dir/map-one-more -o $dir/map-one-more.txt || return 1
cat $dir/map-one-more.txt
diff -u $dir/map-one-more.txt $CEPH_ROOT/src/test/crush/crush-choose-args-expected-one-more-3.txt || return 1
destroy_osd $dir 1 || return 1
ceph osd crush tree
ceph osd getcrushmap > $dir/map-one-less || return 1
crushtool -d $dir/map-one-less -o $dir/map-one-less.txt || return 1
diff -u $dir/map-one-less.txt $dir/map.txt || return 1
}
function TEST_no_update_weight_set() {
#
# adding a zero weight OSD does not update the weight set at all
#
local dir=$1
ORIG_CEPH_ARGS="$CEPH_ARGS"
CEPH_ARGS+="--osd-crush-update-weight-set=false "
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
ceph osd set-require-min-compat-client luminous
ceph osd crush tree
ceph osd getcrushmap > $dir/map || return 1
crushtool -d $dir/map -o $dir/map.txt || return 1
sed -i -e '/end crush map/d' $dir/map.txt
cat >> $dir/map.txt <<EOF
# choose_args
choose_args 0 {
{
bucket_id -1
weight_set [
[ 2.00000 ]
[ 1.00000 ]
]
ids [ -10 ]
}
{
bucket_id -2
weight_set [
[ 2.00000 ]
[ 1.00000 ]
]
ids [ -20 ]
}
}
# end crush map
EOF
crushtool -c $dir/map.txt -o $dir/map-new || return 1
ceph osd setcrushmap -i $dir/map-new || return 1
ceph osd crush tree
run_osd $dir 1 || return 1
ceph osd crush tree
ceph osd getcrushmap > $dir/map-one-more || return 1
crushtool -d $dir/map-one-more -o $dir/map-one-more.txt || return 1
cat $dir/map-one-more.txt
diff -u $dir/map-one-more.txt $CEPH_ROOT/src/test/crush/crush-choose-args-expected-one-more-0.txt || return 1
destroy_osd $dir 1 || return 1
ceph osd crush tree
ceph osd getcrushmap > $dir/map-one-less || return 1
crushtool -d $dir/map-one-less -o $dir/map-one-less.txt || return 1
diff -u $dir/map-one-less.txt $dir/map.txt || return 1
CEPH_ARGS="$ORIG_CEPH_ARGS"
}
function TEST_reweight() {
# reweight and reweight-compat behave appropriately
local dir=$1
ORIG_CEPH_ARGS="$CEPH_ARGS"
CEPH_ARGS+="--osd-crush-update-weight-set=false "
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
ceph osd crush weight-set create-compat || return 1
ceph osd crush tree
ceph osd crush weight-set reweight-compat osd.0 2 || return 1
ceph osd crush tree
ceph osd crush tree | grep host | grep '6.00000 5.00000' || return 1
run_osd $dir 2 || return 1
ceph osd crush tree
ceph osd crush tree | grep host | grep '9.00000 5.00000' || return 1
ceph osd crush reweight osd.2 4
ceph osd crush tree
ceph osd crush tree | grep host | grep '10.00000 5.00000' || return 1
ceph osd crush weight-set reweight-compat osd.2 4
ceph osd crush tree
ceph osd crush tree | grep host | grep '10.00000 9.00000' || return 1
}
function TEST_move_bucket() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
ceph osd crush weight-set create-compat || return 1
ceph osd crush weight-set reweight-compat osd.0 2 || return 1
ceph osd crush weight-set reweight-compat osd.1 2 || return 1
ceph osd crush tree
ceph osd crush tree | grep HOST | grep '6.00000 4.00000' || return 1
# moving a bucket adjusts the weights
ceph osd crush add-bucket RACK rack root=default || return 1
ceph osd crush move HOST rack=RACK || return 1
ceph osd crush tree
ceph osd crush tree | grep HOST | grep '6.00000 4.00000' || return 1
ceph osd crush tree | grep RACK | grep '6.00000 4.00000' || return 1
# weight-set reweight adjusts containing buckets
ceph osd crush weight-set reweight-compat osd.0 1 || return 1
ceph osd crush tree
ceph osd crush tree | grep HOST | grep '6.00000 3.00000' || return 1
ceph osd crush tree | grep RACK | grep '6.00000 3.00000' || return 1
# moving a leaf resets its weight-set to the canonical weight...
ceph config set mon osd_crush_update_weight_set true || return 1
ceph osd crush add-bucket FOO host root=default || return 1
ceph osd crush move osd.0 host=FOO || return 1
ceph osd crush tree
ceph osd crush tree | grep osd.0 | grep '3.00000 3.00000' || return 1
ceph osd crush tree | grep HOST | grep '3.00000 2.00000' || return 1
ceph osd crush tree | grep RACK | grep '3.00000 2.00000' || return 1
# ...or to zero.
ceph config set mon osd_crush_update_weight_set false || return 1
ceph osd crush move osd.1 host=FOO || return 1
ceph osd crush tree
ceph osd crush tree | grep osd.0 | grep '3.00000 3.00000' || return 1
ceph osd crush tree | grep osd.1 | grep '3.00000 0' || return 1
ceph osd crush tree | grep FOO | grep '6.00000 3.00000' || return 1
}
main crush-choose-args "$@"
# Local Variables:
# compile-command: "cd ../../../build ; ln -sf ../src/ceph-disk/ceph_disk/main.py bin/ceph-disk && make -j4 && ../src/test/crush/crush-choose-args.sh"
# End:
| 7,592 | 30.118852 | 150 | sh |
null | ceph-main/qa/standalone/crush/crush-classes.sh | #!/usr/bin/env bash
#
# Copyright (C) 2017 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7130" # git grep '\<7130\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
#
# Disable auto-class, so we can inject device class manually below
#
CEPH_ARGS+="--osd-class-update-on-start=false "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function add_something() {
local dir=$1
local obj=${2:-SOMETHING}
local payload=ABCDEF
echo $payload > $dir/ORIGINAL
rados --pool rbd put $obj $dir/ORIGINAL || return 1
}
function get_osds_up() {
local poolname=$1
local objectname=$2
local osds=$(ceph --format xml osd map $poolname $objectname 2>/dev/null | \
$XMLSTARLET sel -t -m "//up/osd" -v . -o ' ')
# get rid of the trailing space
echo $osds
}
function TEST_reweight_vs_classes() {
local dir=$1
# CrushWrapper::update_item (and ceph osd crush set) must rebuild the shadow
# tree too. https://tracker.ceph.com/issues/48065
run_mon $dir a || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
ceph osd crush set-device-class ssd osd.0 || return 1
ceph osd crush class ls-osd ssd | grep 0 || return 1
ceph osd crush set-device-class ssd osd.1 || return 1
ceph osd crush class ls-osd ssd | grep 1 || return 1
ceph osd crush reweight osd.0 1
h=`hostname -s`
ceph osd crush dump | jq ".buckets[] | select(.name==\"$h\") | .items[0].weight" | grep 65536
ceph osd crush dump | jq ".buckets[] | select(.name==\"$h~ssd\") | .items[0].weight" | grep 65536
ceph osd crush set 0 2 host=$h
ceph osd crush dump | jq ".buckets[] | select(.name==\"$h\") | .items[0].weight" | grep 131072
ceph osd crush dump | jq ".buckets[] | select(.name==\"$h~ssd\") | .items[0].weight" | grep 131072
}
function TEST_classes() {
local dir=$1
run_mon $dir a || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
create_rbd_pool || return 1
test "$(get_osds_up rbd SOMETHING)" == "1 2 0" || return 1
add_something $dir SOMETHING || return 1
#
# osd.0 has class ssd and the rule is modified
# to only take ssd devices.
#
ceph osd getcrushmap > $dir/map || return 1
crushtool -d $dir/map -o $dir/map.txt || return 1
${SED} -i \
-e '/device 0 osd.0/s/$/ class ssd/' \
-e '/step take default/s/$/ class ssd/' \
$dir/map.txt || return 1
crushtool -c $dir/map.txt -o $dir/map-new || return 1
ceph osd setcrushmap -i $dir/map-new || return 1
#
# There can only be one mapping since there only is
# one device with ssd class.
#
ok=false
for delay in 2 4 8 16 32 64 128 256 ; do
if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0" ; then
ok=true
break
fi
sleep $delay
ceph osd dump # for debugging purposes
ceph pg dump # for debugging purposes
done
$ok || return 1
#
# Writing keeps working because the pool is min_size 1 by
# default.
#
add_something $dir SOMETHING_ELSE || return 1
#
# Sanity check that the rule indeed has ssd
# generated bucket with a name including ~ssd.
#
ceph osd crush dump | grep -q '~ssd' || return 1
}
function TEST_set_device_class() {
local dir=$1
TEST_classes $dir || return 1
ceph osd crush set-device-class ssd osd.0 || return 1
ceph osd crush class ls-osd ssd | grep 0 || return 1
ceph osd crush set-device-class ssd osd.1 || return 1
ceph osd crush class ls-osd ssd | grep 1 || return 1
ceph osd crush set-device-class ssd 0 1 || return 1 # should be idempotent
ok=false
for delay in 2 4 8 16 32 64 128 256 ; do
if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0 1" ; then
ok=true
break
fi
sleep $delay
ceph osd crush dump
ceph osd dump # for debugging purposes
ceph pg dump # for debugging purposes
done
$ok || return 1
}
function TEST_mon_classes() {
local dir=$1
run_mon $dir a || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
create_rbd_pool || return 1
test "$(get_osds_up rbd SOMETHING)" == "1 2 0" || return 1
add_something $dir SOMETHING || return 1
# test create and remove class
ceph osd crush class create CLASS || return 1
ceph osd crush class create CLASS || return 1 # idempotent
ceph osd crush class ls | grep CLASS || return 1
ceph osd crush class rename CLASS TEMP || return 1
ceph osd crush class ls | grep TEMP || return 1
ceph osd crush class rename TEMP CLASS || return 1
ceph osd crush class ls | grep CLASS || return 1
ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd crush-device-class=CLASS || return 1
expect_failure $dir EBUSY ceph osd crush class rm CLASS || return 1
ceph osd erasure-code-profile rm myprofile || return 1
ceph osd crush class rm CLASS || return 1
ceph osd crush class rm CLASS || return 1 # test idempotence
# test rm-device-class
ceph osd crush set-device-class aaa osd.0 || return 1
ceph osd tree | grep -q 'aaa' || return 1
ceph osd crush dump | grep -q '~aaa' || return 1
ceph osd crush tree --show-shadow | grep -q '~aaa' || return 1
ceph osd crush set-device-class bbb osd.1 || return 1
ceph osd tree | grep -q 'bbb' || return 1
ceph osd crush dump | grep -q '~bbb' || return 1
ceph osd crush tree --show-shadow | grep -q '~bbb' || return 1
ceph osd crush set-device-class ccc osd.2 || return 1
ceph osd tree | grep -q 'ccc' || return 1
ceph osd crush dump | grep -q '~ccc' || return 1
ceph osd crush tree --show-shadow | grep -q '~ccc' || return 1
ceph osd crush rm-device-class 0 || return 1
ceph osd tree | grep -q 'aaa' && return 1
ceph osd crush class ls | grep -q 'aaa' && return 1 # class 'aaa' should gone
ceph osd crush rm-device-class 1 || return 1
ceph osd tree | grep -q 'bbb' && return 1
ceph osd crush class ls | grep -q 'bbb' && return 1 # class 'bbb' should gone
ceph osd crush rm-device-class 2 || return 1
ceph osd tree | grep -q 'ccc' && return 1
ceph osd crush class ls | grep -q 'ccc' && return 1 # class 'ccc' should gone
ceph osd crush set-device-class asdf all || return 1
ceph osd tree | grep -q 'asdf' || return 1
ceph osd crush dump | grep -q '~asdf' || return 1
ceph osd crush tree --show-shadow | grep -q '~asdf' || return 1
ceph osd crush rule create-replicated asdf-rule default host asdf || return 1
ceph osd crush rm-device-class all || return 1
ceph osd tree | grep -q 'asdf' && return 1
ceph osd crush class ls | grep -q 'asdf' || return 1 # still referenced by asdf-rule
ceph osd crush set-device-class abc osd.2 || return 1
ceph osd crush move osd.2 root=foo rack=foo-rack host=foo-host || return 1
out=`ceph osd tree |awk '$1 == 2 && $2 == "abc" {print $0}'`
if [ "$out" == "" ]; then
return 1
fi
# verify 'crush move' too
ceph osd crush dump | grep -q 'foo~abc' || return 1
ceph osd crush tree --show-shadow | grep -q 'foo~abc' || return 1
ceph osd crush dump | grep -q 'foo-rack~abc' || return 1
ceph osd crush tree --show-shadow | grep -q 'foo-rack~abc' || return 1
ceph osd crush dump | grep -q 'foo-host~abc' || return 1
ceph osd crush tree --show-shadow | grep -q 'foo-host~abc' || return 1
ceph osd crush rm-device-class osd.2 || return 1
# restore class, so we can continue to test create-replicated
ceph osd crush set-device-class abc osd.2 || return 1
ceph osd crush rule create-replicated foo-rule foo host abc || return 1
# test set-device-class implicitly change class
ceph osd crush set-device-class hdd osd.0 || return 1
expect_failure $dir EBUSY ceph osd crush set-device-class nvme osd.0 || return 1
# test class rename
ceph osd crush rm-device-class all || return 1
ceph osd crush set-device-class class_1 all || return 1
ceph osd crush class ls | grep 'class_1' || return 1
ceph osd crush tree --show-shadow | grep 'class_1' || return 1
ceph osd crush rule create-replicated class_1_rule default host class_1 || return 1
ceph osd crush class rename class_1 class_2
ceph osd crush class rename class_1 class_2 # idempotent
ceph osd crush class ls | grep 'class_1' && return 1
ceph osd crush tree --show-shadow | grep 'class_1' && return 1
ceph osd crush class ls | grep 'class_2' || return 1
ceph osd crush tree --show-shadow | grep 'class_2' || return 1
}
main crush-classes "$@"
# Local Variables:
# compile-command: "cd ../../../build ; ln -sf ../src/ceph-disk/ceph_disk/main.py bin/ceph-disk && make -j4 && ../src/test/crush/crush-classes.sh"
# End:
| 9,876 | 36.131579 | 156 | sh |
null | ceph-main/qa/standalone/erasure-code/test-erasure-code-plugins.sh | #!/usr/bin/env bash
set -x
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
arch=$(uname -m)
case $arch in
i[[3456]]86*|x86_64*|amd64*)
legacy_jerasure_plugins=(jerasure_generic jerasure_sse3 jerasure_sse4)
legacy_shec_plugins=(shec_generic shec_sse3 shec_sse4)
plugins=(jerasure shec lrc isa)
;;
aarch64*|arm*)
legacy_jerasure_plugins=(jerasure_generic jerasure_neon)
legacy_shec_plugins=(shec_generic shec_neon)
plugins=(jerasure shec lrc)
;;
*)
echo "unsupported platform ${arch}."
return 1
;;
esac
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:17110" # git grep '\<17110\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
$func $dir || return 1
done
}
function TEST_preload_warning() {
local dir=$1
for plugin in ${legacy_jerasure_plugins[*]} ${legacy_shec_plugins[*]}; do
setup $dir || return 1
run_mon $dir a --osd_erasure_code_plugins="${plugin}" || return 1
run_mgr $dir x || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
run_osd $dir 0 --osd_erasure_code_plugins="${plugin}" || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
grep "WARNING: osd_erasure_code_plugins contains plugin ${plugin}" $dir/mon.a.log || return 1
grep "WARNING: osd_erasure_code_plugins contains plugin ${plugin}" $dir/osd.0.log || return 1
teardown $dir || return 1
done
return 0
}
function TEST_preload_no_warning() {
local dir=$1
for plugin in ${plugins[*]}; do
setup $dir || return 1
run_mon $dir a --osd_erasure_code_plugins="${plugin}" || return 1
run_mgr $dir x || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
run_osd $dir 0 --osd_erasure_code_plugins="${plugin}" || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
! grep "WARNING: osd_erasure_code_plugins contains plugin" $dir/mon.a.log || return 1
! grep "WARNING: osd_erasure_code_plugins contains plugin" $dir/osd.0.log || return 1
teardown $dir || return 1
done
return 0
}
function TEST_preload_no_warning_default() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
! grep "WARNING: osd_erasure_code_plugins" $dir/mon.a.log || return 1
! grep "WARNING: osd_erasure_code_plugins" $dir/osd.0.log || return 1
teardown $dir || return 1
return 0
}
function TEST_ec_profile_warning() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
for id in $(seq 0 2) ; do
run_osd $dir $id || return 1
done
create_rbd_pool || return 1
wait_for_clean || return 1
for plugin in ${legacy_jerasure_plugins[*]}; do
ceph osd erasure-code-profile set prof-${plugin} crush-failure-domain=osd technique=reed_sol_van plugin=${plugin} || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep "WARNING: erasure coding profile prof-${plugin} uses plugin ${plugin}" $dir/mon.a.log || return 1
done
for plugin in ${legacy_shec_plugins[*]}; do
ceph osd erasure-code-profile set prof-${plugin} crush-failure-domain=osd plugin=${plugin} || return 1
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep "WARNING: erasure coding profile prof-${plugin} uses plugin ${plugin}" $dir/mon.a.log || return 1
done
teardown $dir || return 1
}
main test-erasure-code-plugins "$@"
| 4,144 | 33.831933 | 133 | sh |
null | ceph-main/qa/standalone/erasure-code/test-erasure-code.sh | #!/usr/bin/env bash
#
# Copyright (C) 2014 Cloudwatt <[email protected]>
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7101" # git grep '\<7101\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON --mon-osd-prime-pg-temp=false"
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
# check that erasure code plugins are preloaded
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1
for id in $(seq 0 10) ; do
run_osd $dir $id || return 1
done
create_rbd_pool || return 1
wait_for_clean || return 1
# check that erasure code plugins are preloaded
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1
create_erasure_coded_pool ecpool || return 1
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
$func $dir || return 1
done
delete_pool ecpool || return 1
teardown $dir || return 1
}
function create_erasure_coded_pool() {
local poolname=$1
ceph osd erasure-code-profile set myprofile \
crush-failure-domain=osd || return 1
create_pool $poolname 12 12 erasure myprofile \
|| return 1
wait_for_clean || return 1
}
function rados_put_get() {
local dir=$1
local poolname=$2
local objname=${3:-SOMETHING}
for marker in AAA BBB CCCC DDDD ; do
printf "%*s" 1024 $marker
done > $dir/ORIGINAL
#
# get and put an object, compare they are equal
#
rados --pool $poolname put $objname $dir/ORIGINAL || return 1
rados --pool $poolname get $objname $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
rm $dir/COPY
#
# take out an OSD used to store the object and
# check the object can still be retrieved, which implies
# recovery
#
local -a initial_osds=($(get_osds $poolname $objname))
local last=$((${#initial_osds[@]} - 1))
ceph osd out ${initial_osds[$last]} || return 1
# give the osdmap up to 5 seconds to refresh
sleep 5
! get_osds $poolname $objname | grep '\<'${initial_osds[$last]}'\>' || return 1
rados --pool $poolname get $objname $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
ceph osd in ${initial_osds[$last]} || return 1
rm $dir/ORIGINAL
}
function rados_osds_out_in() {
local dir=$1
local poolname=$2
local objname=${3:-SOMETHING}
for marker in FFFF GGGG HHHH IIII ; do
printf "%*s" 1024 $marker
done > $dir/ORIGINAL
#
# get and put an object, compare they are equal
#
rados --pool $poolname put $objname $dir/ORIGINAL || return 1
rados --pool $poolname get $objname $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
rm $dir/COPY
#
# take out two OSDs used to store the object, wait for the cluster
# to be clean (i.e. all PG are clean and active) again which
# implies the PG have been moved to use the remaining OSDs. Check
# the object can still be retrieved.
#
wait_for_clean || return 1
local osds_list=$(get_osds $poolname $objname)
local -a osds=($osds_list)
for osd in 0 1 ; do
ceph osd out ${osds[$osd]} || return 1
done
wait_for_clean || return 1
#
# verify the object is no longer mapped to the osds that are out
#
for osd in 0 1 ; do
! get_osds $poolname $objname | grep '\<'${osds[$osd]}'\>' || return 1
done
rados --pool $poolname get $objname $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
#
# bring the osds back in, , wait for the cluster
# to be clean (i.e. all PG are clean and active) again which
# implies the PG go back to using the same osds as before
#
for osd in 0 1 ; do
ceph osd in ${osds[$osd]} || return 1
done
wait_for_clean || return 1
test "$osds_list" = "$(get_osds $poolname $objname)" || return 1
rm $dir/ORIGINAL
}
function TEST_rados_put_get_lrc_advanced() {
local dir=$1
local poolname=pool-lrc-a
local profile=profile-lrc-a
ceph osd erasure-code-profile set $profile \
plugin=lrc \
mapping=DD_ \
crush-steps='[ [ "chooseleaf", "osd", 0 ] ]' \
layers='[ [ "DDc", "" ] ]' || return 1
create_pool $poolname 12 12 erasure $profile \
|| return 1
rados_put_get $dir $poolname || return 1
delete_pool $poolname
ceph osd erasure-code-profile rm $profile
}
function TEST_rados_put_get_lrc_kml() {
local dir=$1
local poolname=pool-lrc
local profile=profile-lrc
ceph osd erasure-code-profile set $profile \
plugin=lrc \
k=4 m=2 l=3 \
crush-failure-domain=osd || return 1
create_pool $poolname 12 12 erasure $profile \
|| return 1
rados_put_get $dir $poolname || return 1
delete_pool $poolname
ceph osd erasure-code-profile rm $profile
}
function TEST_rados_put_get_isa() {
if ! erasure_code_plugin_exists isa ; then
echo "SKIP because plugin isa has not been built"
return 0
fi
local dir=$1
local poolname=pool-isa
ceph osd erasure-code-profile set profile-isa \
plugin=isa \
crush-failure-domain=osd || return 1
create_pool $poolname 1 1 erasure profile-isa \
|| return 1
rados_put_get $dir $poolname || return 1
delete_pool $poolname
}
function TEST_rados_put_get_jerasure() {
local dir=$1
rados_put_get $dir ecpool || return 1
local poolname=pool-jerasure
local profile=profile-jerasure
ceph osd erasure-code-profile set $profile \
plugin=jerasure \
k=4 m=2 \
crush-failure-domain=osd || return 1
create_pool $poolname 12 12 erasure $profile \
|| return 1
rados_put_get $dir $poolname || return 1
rados_osds_out_in $dir $poolname || return 1
delete_pool $poolname
ceph osd erasure-code-profile rm $profile
}
function TEST_rados_put_get_shec() {
local dir=$1
local poolname=pool-shec
local profile=profile-shec
ceph osd erasure-code-profile set $profile \
plugin=shec \
k=2 m=1 c=1 \
crush-failure-domain=osd || return 1
create_pool $poolname 12 12 erasure $profile \
|| return 1
rados_put_get $dir $poolname || return 1
delete_pool $poolname
ceph osd erasure-code-profile rm $profile
}
function TEST_alignment_constraints() {
local payload=ABC
echo "$payload" > $dir/ORIGINAL
#
# Verify that the rados command enforces alignment constraints
# imposed by the stripe width
# See http://tracker.ceph.com/issues/8622
#
local stripe_unit=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
eval local $(ceph osd erasure-code-profile get myprofile | grep k=)
local block_size=$((stripe_unit * k - 1))
dd if=/dev/zero of=$dir/ORIGINAL bs=$block_size count=2
rados --block-size=$block_size \
--pool ecpool put UNALIGNED $dir/ORIGINAL || return 1
rm $dir/ORIGINAL
}
function chunk_size() {
echo $(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
}
#
# By default an object will be split in two (k=2) with the first part
# of the object in the first OSD of the up set and the second part in
# the next OSD in the up set. This layout is defined by the mapping
# parameter and this function helps verify that the first and second
# part of the object are located in the OSD where they should be.
#
function verify_chunk_mapping() {
local dir=$1
local poolname=$2
local first=$3
local second=$4
local payload=$(printf '%*s' $(chunk_size) FIRST$poolname ; printf '%*s' $(chunk_size) SECOND$poolname)
echo -n "$payload" > $dir/ORIGINAL
rados --pool $poolname put SOMETHING$poolname $dir/ORIGINAL || return 1
rados --pool $poolname get SOMETHING$poolname $dir/COPY || return 1
local -a osds=($(get_osds $poolname SOMETHING$poolname))
for (( i = 0; i < ${#osds[@]}; i++ )) ; do
ceph daemon osd.${osds[$i]} flush_journal
done
diff $dir/ORIGINAL $dir/COPY || return 1
rm $dir/COPY
local -a osds=($(get_osds $poolname SOMETHING$poolname))
objectstore_tool $dir ${osds[$first]} SOMETHING$poolname get-bytes | grep --quiet FIRST$poolname || return 1
objectstore_tool $dir ${osds[$second]} SOMETHING$poolname get-bytes | grep --quiet SECOND$poolname || return 1
}
function TEST_chunk_mapping() {
local dir=$1
#
# mapping=DD_ is the default:
# first OSD (i.e. 0) in the up set has the first part of the object
# second OSD (i.e. 1) in the up set has the second part of the object
#
verify_chunk_mapping $dir ecpool 0 1 || return 1
ceph osd erasure-code-profile set remap-profile \
plugin=lrc \
layers='[ [ "cDD", "" ] ]' \
mapping='_DD' \
crush-steps='[ [ "choose", "osd", 0 ] ]' || return 1
ceph osd erasure-code-profile get remap-profile
create_pool remap-pool 12 12 erasure remap-profile \
|| return 1
#
# mapping=_DD
# second OSD (i.e. 1) in the up set has the first part of the object
# third OSD (i.e. 2) in the up set has the second part of the object
#
verify_chunk_mapping $dir remap-pool 1 2 || return 1
delete_pool remap-pool
ceph osd erasure-code-profile rm remap-profile
}
main test-erasure-code "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/erasure-code/test-erasure-code.sh"
# End:
| 10,393 | 29.751479 | 114 | sh |
null | ceph-main/qa/standalone/erasure-code/test-erasure-eio.sh | #!/usr/bin/env bash
#
# Copyright (C) 2015 Red Hat <[email protected]>
#
#
# Author: Kefu Chai <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7112" # git grep '\<7112\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--osd_mclock_override_recovery_settings=true "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
create_pool rbd 4 || return 1
# check that erasure code plugins are preloaded
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function setup_osds() {
local count=$1
shift
for id in $(seq 0 $(expr $count - 1)) ; do
run_osd $dir $id || return 1
done
# check that erasure code plugins are preloaded
CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1
}
function get_state() {
local pgid=$1
local sname=state
ceph --format json pg dump pgs 2>/dev/null | \
jq -r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname"
}
function create_erasure_coded_pool() {
local poolname=$1
shift
local k=$1
shift
local m=$1
shift
ceph osd erasure-code-profile set myprofile \
plugin=jerasure \
k=$k m=$m \
crush-failure-domain=osd || return 1
create_pool $poolname 1 1 erasure myprofile \
|| return 1
wait_for_clean || return 1
}
function delete_erasure_coded_pool() {
local poolname=$1
ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
ceph osd erasure-code-profile rm myprofile
}
function rados_put() {
local dir=$1
local poolname=$2
local objname=${3:-SOMETHING}
for marker in AAA BBB CCCC DDDD ; do
printf "%*s" 1024 $marker
done > $dir/ORIGINAL
#
# get and put an object, compare they are equal
#
rados --pool $poolname put $objname $dir/ORIGINAL || return 1
}
function rados_get() {
local dir=$1
local poolname=$2
local objname=${3:-SOMETHING}
local expect=${4:-ok}
#
# Expect a failure to get object
#
if [ $expect = "fail" ];
then
! rados --pool $poolname get $objname $dir/COPY
return
fi
#
# get an object, compare with $dir/ORIGINAL
#
rados --pool $poolname get $objname $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
rm $dir/COPY
}
function inject_remove() {
local pooltype=$1
shift
local which=$1
shift
local poolname=$1
shift
local objname=$1
shift
local dir=$1
shift
local shard_id=$1
shift
local -a initial_osds=($(get_osds $poolname $objname))
local osd_id=${initial_osds[$shard_id]}
objectstore_tool $dir $osd_id $objname remove || return 1
}
# Test with an inject error
function rados_put_get_data() {
local inject=$1
shift
local dir=$1
shift
local shard_id=$1
shift
local arg=$1
# inject eio to speificied shard
#
local poolname=pool-jerasure
local objname=obj-$inject-$$-$shard_id
rados_put $dir $poolname $objname || return 1
inject_$inject ec data $poolname $objname $dir $shard_id || return 1
rados_get $dir $poolname $objname || return 1
if [ "$arg" = "recovery" ];
then
#
# take out the last OSD used to store the object,
# bring it back, and check for clean PGs which means
# recovery didn't crash the primary.
#
local -a initial_osds=($(get_osds $poolname $objname))
local last_osd=${initial_osds[-1]}
# Kill OSD
kill_daemons $dir TERM osd.${last_osd} >&2 < /dev/null || return 1
ceph osd out ${last_osd} || return 1
! get_osds $poolname $objname | grep '\<'${last_osd}'\>' || return 1
ceph osd in ${last_osd} || return 1
activate_osd $dir ${last_osd} || return 1
wait_for_clean || return 1
# Won't check for eio on get here -- recovery above might have fixed it
else
shard_id=$(expr $shard_id + 1)
inject_$inject ec data $poolname $objname $dir $shard_id || return 1
rados_get $dir $poolname $objname fail || return 1
rm $dir/ORIGINAL
fi
}
# Change the size of speificied shard
#
function set_size() {
local objname=$1
shift
local dir=$1
shift
local shard_id=$1
shift
local bytes=$1
shift
local mode=${1}
local poolname=pool-jerasure
local -a initial_osds=($(get_osds $poolname $objname))
local osd_id=${initial_osds[$shard_id]}
ceph osd set noout
if [ "$mode" = "add" ];
then
objectstore_tool $dir $osd_id $objname get-bytes $dir/CORRUPT || return 1
dd if=/dev/urandom bs=$bytes count=1 >> $dir/CORRUPT
elif [ "$bytes" = "0" ];
then
touch $dir/CORRUPT
else
dd if=/dev/urandom bs=$bytes count=1 of=$dir/CORRUPT
fi
objectstore_tool $dir $osd_id $objname set-bytes $dir/CORRUPT || return 1
rm -f $dir/CORRUPT
ceph osd unset noout
}
function rados_get_data_bad_size() {
local dir=$1
shift
local shard_id=$1
shift
local bytes=$1
shift
local mode=${1:-set}
local poolname=pool-jerasure
local objname=obj-size-$$-$shard_id-$bytes
rados_put $dir $poolname $objname || return 1
# Change the size of speificied shard
#
set_size $objname $dir $shard_id $bytes $mode || return 1
rados_get $dir $poolname $objname || return 1
# Leave objname and modify another shard
shard_id=$(expr $shard_id + 1)
set_size $objname $dir $shard_id $bytes $mode || return 1
rados_get $dir $poolname $objname fail || return 1
rm $dir/ORIGINAL
}
#
# These two test cases try to validate the following behavior:
# For object on EC pool, if there is one shard having read error (
# either primary or replica), client can still read object.
#
# If 2 shards have read errors the client will get an error.
#
function TEST_rados_get_subread_eio_shard_0() {
local dir=$1
setup_osds 4 || return 1
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 2 1 || return 1
# inject eio on primary OSD (0) and replica OSD (1)
local shard_id=0
rados_put_get_data eio $dir $shard_id || return 1
delete_erasure_coded_pool $poolname
}
function TEST_rados_get_subread_eio_shard_1() {
local dir=$1
setup_osds 4 || return 1
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 2 1 || return 1
# inject eio into replicas OSD (1) and OSD (2)
local shard_id=1
rados_put_get_data eio $dir $shard_id || return 1
delete_erasure_coded_pool $poolname
}
# We don't remove the object from the primary because
# that just causes it to appear to be missing
function TEST_rados_get_subread_missing() {
local dir=$1
setup_osds 4 || return 1
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 2 1 || return 1
# inject remove into replicas OSD (1) and OSD (2)
local shard_id=1
rados_put_get_data remove $dir $shard_id || return 1
delete_erasure_coded_pool $poolname
}
#
#
# These two test cases try to validate that following behavior:
# For object on EC pool, if there is one shard which an incorrect
# size this will cause an internal read error, client can still read object.
#
# If 2 shards have incorrect size the client will get an error.
#
function TEST_rados_get_bad_size_shard_0() {
local dir=$1
setup_osds 4 || return 1
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 2 1 || return 1
# Set incorrect size into primary OSD (0) and replica OSD (1)
local shard_id=0
rados_get_data_bad_size $dir $shard_id 10 || return 1
rados_get_data_bad_size $dir $shard_id 0 || return 1
rados_get_data_bad_size $dir $shard_id 256 add || return 1
delete_erasure_coded_pool $poolname
}
function TEST_rados_get_bad_size_shard_1() {
local dir=$1
setup_osds 4 || return 1
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 2 1 || return 1
# Set incorrect size into replicas OSD (1) and OSD (2)
local shard_id=1
rados_get_data_bad_size $dir $shard_id 10 || return 1
rados_get_data_bad_size $dir $shard_id 0 || return 1
rados_get_data_bad_size $dir $shard_id 256 add || return 1
delete_erasure_coded_pool $poolname
}
function TEST_rados_get_with_subreadall_eio_shard_0() {
local dir=$1
local shard_id=0
setup_osds 4 || return 1
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 2 1 || return 1
# inject eio on primary OSD (0)
rados_put_get_data eio $dir $shard_id recovery || return 1
delete_erasure_coded_pool $poolname
}
function TEST_rados_get_with_subreadall_eio_shard_1() {
local dir=$1
local shard_id=1
setup_osds 4 || return 1
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 2 1 || return 1
# inject eio on replica OSD (1)
rados_put_get_data eio $dir $shard_id recovery || return 1
delete_erasure_coded_pool $poolname
}
# Test recovery the object attr read error
function TEST_ec_object_attr_read_error() {
local dir=$1
local objname=myobject
setup_osds 7 || return 1
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 3 2 || return 1
local primary_osd=$(get_primary $poolname $objname)
# Kill primary OSD
kill_daemons $dir TERM osd.${primary_osd} >&2 < /dev/null || return 1
# Write data
rados_put $dir $poolname $objname || return 1
# Inject eio, shard 1 is the one read attr
inject_eio ec mdata $poolname $objname $dir 1 || return 1
# Restart OSD
activate_osd $dir ${primary_osd} || return 1
# Cluster should recover this object
wait_for_clean || return 1
rados_get $dir $poolname myobject || return 1
delete_erasure_coded_pool $poolname
}
# Test recovery the first k copies aren't all available
function TEST_ec_single_recovery_error() {
local dir=$1
local objname=myobject
setup_osds 7 || return 1
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 3 2 || return 1
rados_put $dir $poolname $objname || return 1
inject_eio ec data $poolname $objname $dir 0 || return 1
local -a initial_osds=($(get_osds $poolname $objname))
local last_osd=${initial_osds[-1]}
# Kill OSD
kill_daemons $dir TERM osd.${last_osd} >&2 < /dev/null || return 1
ceph osd down ${last_osd} || return 1
ceph osd out ${last_osd} || return 1
# Cluster should recover this object
wait_for_clean || return 1
rados_get $dir $poolname myobject || return 1
delete_erasure_coded_pool $poolname
}
# Test recovery when repeated reads are needed due to EIO
function TEST_ec_recovery_multiple_errors() {
local dir=$1
local objname=myobject
setup_osds 9 || return 1
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 4 4 || return 1
rados_put $dir $poolname $objname || return 1
inject_eio ec data $poolname $objname $dir 0 || return 1
# first read will try shards 0,1,2 when 0 gets EIO, shard 3 gets
# tried as well. Make that fail to test multiple-EIO handling.
inject_eio ec data $poolname $objname $dir 3 || return 1
inject_eio ec data $poolname $objname $dir 4 || return 1
local -a initial_osds=($(get_osds $poolname $objname))
local last_osd=${initial_osds[-1]}
# Kill OSD
kill_daemons $dir TERM osd.${last_osd} >&2 < /dev/null || return 1
ceph osd down ${last_osd} || return 1
ceph osd out ${last_osd} || return 1
# Cluster should recover this object
wait_for_clean || return 1
rados_get $dir $poolname myobject || return 1
delete_erasure_coded_pool $poolname
}
# Test recovery when there's only one shard to recover, but multiple
# objects recovering in one RecoveryOp
function TEST_ec_recovery_multiple_objects() {
local dir=$1
local objname=myobject
ORIG_ARGS=$CEPH_ARGS
CEPH_ARGS+=' --osd-recovery-max-single-start 3 --osd-recovery-max-active 3 '
setup_osds 7 || return 1
CEPH_ARGS=$ORIG_ARGS
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 3 2 || return 1
rados_put $dir $poolname test1
rados_put $dir $poolname test2
rados_put $dir $poolname test3
ceph osd out 0 || return 1
# Cluster should recover these objects all at once
wait_for_clean || return 1
rados_get $dir $poolname test1
rados_get $dir $poolname test2
rados_get $dir $poolname test3
delete_erasure_coded_pool $poolname
}
# test multi-object recovery when the one missing shard gets EIO
function TEST_ec_recovery_multiple_objects_eio() {
local dir=$1
local objname=myobject
ORIG_ARGS=$CEPH_ARGS
CEPH_ARGS+=' --osd-recovery-max-single-start 3 --osd-recovery-max-active 3 '
setup_osds 7 || return 1
CEPH_ARGS=$ORIG_ARGS
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 3 2 || return 1
rados_put $dir $poolname test1
rados_put $dir $poolname test2
rados_put $dir $poolname test3
# can't read from this shard anymore
inject_eio ec data $poolname $objname $dir 0 || return 1
ceph osd out 0 || return 1
# Cluster should recover these objects all at once
wait_for_clean || return 1
rados_get $dir $poolname test1
rados_get $dir $poolname test2
rados_get $dir $poolname test3
delete_erasure_coded_pool $poolname
}
# Test backfill with unfound object
function TEST_ec_backfill_unfound() {
local dir=$1
local objname=myobject
local lastobj=300
# Must be between 1 and $lastobj
local testobj=obj250
ORIG_ARGS=$CEPH_ARGS
CEPH_ARGS+=' --osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10'
setup_osds 5 || return 1
CEPH_ARGS=$ORIG_ARGS
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 3 2 || return 1
ceph pg dump pgs
rados_put $dir $poolname $objname || return 1
local primary=$(get_primary $poolname $objname)
local -a initial_osds=($(get_osds $poolname $objname))
local last_osd=${initial_osds[-1]}
kill_daemons $dir TERM osd.${last_osd} 2>&2 < /dev/null || return 1
ceph osd down ${last_osd} || return 1
ceph osd out ${last_osd} || return 1
ceph pg dump pgs
dd if=/dev/urandom of=${dir}/ORIGINAL bs=1024 count=4
for i in $(seq 1 $lastobj)
do
rados --pool $poolname put obj${i} $dir/ORIGINAL || return 1
done
inject_eio ec data $poolname $testobj $dir 0 || return 1
inject_eio ec data $poolname $testobj $dir 1 || return 1
activate_osd $dir ${last_osd} || return 1
ceph osd in ${last_osd} || return 1
sleep 15
for tmp in $(seq 1 240); do
state=$(get_state 2.0)
echo $state | grep backfill_unfound
if [ "$?" = "0" ]; then
break
fi
echo $state
sleep 1
done
ceph pg dump pgs
kill_daemons $dir TERM osd.${last_osd} 2>&2 < /dev/null || return 1
sleep 5
ceph pg dump pgs
ceph pg 2.0 list_unfound
ceph pg 2.0 query
ceph pg 2.0 list_unfound | grep -q $testobj || return 1
check=$(ceph pg 2.0 list_unfound | jq ".available_might_have_unfound")
test "$check" == "true" || return 1
eval check=$(ceph pg 2.0 list_unfound | jq .might_have_unfound[0].status)
test "$check" == "osd is down" || return 1
eval check=$(ceph pg 2.0 list_unfound | jq .might_have_unfound[0].osd)
test "$check" == "2(4)" || return 1
activate_osd $dir ${last_osd} || return 1
# Command should hang because object is unfound
timeout 5 rados -p $poolname get $testobj $dir/CHECK
test $? = "124" || return 1
ceph pg 2.0 mark_unfound_lost delete
wait_for_clean || return 1
for i in $(seq 1 $lastobj)
do
if [ obj${i} = "$testobj" ]; then
# Doesn't exist anymore
! rados -p $poolname get $testobj $dir/CHECK || return 1
else
rados --pool $poolname get obj${i} $dir/CHECK || return 1
diff -q $dir/ORIGINAL $dir/CHECK || return 1
fi
done
rm -f ${dir}/ORIGINAL ${dir}/CHECK
delete_erasure_coded_pool $poolname
}
# Test recovery with unfound object
function TEST_ec_recovery_unfound() {
local dir=$1
local objname=myobject
local lastobj=100
# Must be between 1 and $lastobj
local testobj=obj75
ORIG_ARGS=$CEPH_ARGS
CEPH_ARGS+=' --osd-recovery-max-single-start 3 --osd-recovery-max-active 3 '
CEPH_ARGS+=' --osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10'
setup_osds 5 || return 1
CEPH_ARGS=$ORIG_ARGS
local poolname=pool-jerasure
create_erasure_coded_pool $poolname 3 2 || return 1
ceph pg dump pgs
rados_put $dir $poolname $objname || return 1
local -a initial_osds=($(get_osds $poolname $objname))
local last_osd=${initial_osds[-1]}
kill_daemons $dir TERM osd.${last_osd} 2>&2 < /dev/null || return 1
ceph osd down ${last_osd} || return 1
ceph osd out ${last_osd} || return 1
ceph pg dump pgs
dd if=/dev/urandom of=${dir}/ORIGINAL bs=1024 count=4
for i in $(seq 1 $lastobj)
do
rados --pool $poolname put obj${i} $dir/ORIGINAL || return 1
done
inject_eio ec data $poolname $testobj $dir 0 || return 1
inject_eio ec data $poolname $testobj $dir 1 || return 1
activate_osd $dir ${last_osd} || return 1
ceph osd in ${last_osd} || return 1
sleep 15
for tmp in $(seq 1 100); do
state=$(get_state 2.0)
echo $state | grep recovery_unfound
if [ "$?" = "0" ]; then
break
fi
echo "$state "
sleep 1
done
ceph pg dump pgs
ceph pg 2.0 list_unfound
ceph pg 2.0 query
ceph pg 2.0 list_unfound | grep -q $testobj || return 1
check=$(ceph pg 2.0 list_unfound | jq ".available_might_have_unfound")
test "$check" == "true" || return 1
check=$(ceph pg 2.0 list_unfound | jq ".might_have_unfound | length")
test $check == 0 || return 1
# Command should hang because object is unfound
timeout 5 rados -p $poolname get $testobj $dir/CHECK
test $? = "124" || return 1
ceph pg 2.0 mark_unfound_lost delete
wait_for_clean || return 1
for i in $(seq 1 $lastobj)
do
if [ obj${i} = "$testobj" ]; then
# Doesn't exist anymore
! rados -p $poolname get $testobj $dir/CHECK || return 1
else
rados --pool $poolname get obj${i} $dir/CHECK || return 1
diff -q $dir/ORIGINAL $dir/CHECK || return 1
fi
done
rm -f ${dir}/ORIGINAL ${dir}/CHECK
delete_erasure_coded_pool $poolname
}
main test-erasure-eio "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/erasure-code/test-erasure-eio.sh"
# End:
| 19,747 | 27.171184 | 85 | sh |
null | ceph-main/qa/standalone/mgr/balancer.sh | #!/usr/bin/env bash
#
# Copyright (C) 2019 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7102" # git grep '\<7102\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
$func $dir || return 1
done
}
TEST_POOL1=test1
TEST_POOL2=test2
function TEST_balancer() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
create_pool $TEST_POOL1 8
create_pool $TEST_POOL2 8
wait_for_clean || return 1
ceph pg dump pgs
ceph balancer status || return 1
eval MODE=$(ceph balancer status | jq '.mode')
test $MODE = "upmap" || return 1
ACTIVE=$(ceph balancer status | jq '.active')
test $ACTIVE = "true" || return 1
ceph balancer ls || return 1
PLANS=$(ceph balancer ls)
test "$PLANS" = "[]" || return 1
ceph balancer eval || return 1
EVAL="$(ceph balancer eval)"
test "$EVAL" = "current cluster score 0.000000 (lower is better)"
ceph balancer eval-verbose || return 1
ceph balancer pool add $TEST_POOL1 || return 1
ceph balancer pool add $TEST_POOL2 || return 1
ceph balancer pool ls || return 1
eval POOL=$(ceph balancer pool ls | jq 'sort | .[0]')
test "$POOL" = "$TEST_POOL1" || return 1
eval POOL=$(ceph balancer pool ls | jq 'sort | .[1]')
test "$POOL" = "$TEST_POOL2" || return 1
ceph balancer pool rm $TEST_POOL1 || return 1
ceph balancer pool rm $TEST_POOL2 || return 1
ceph balancer pool ls || return 1
ceph balancer pool add $TEST_POOL1 || return 1
ceph balancer mode crush-compat || return 1
ceph balancer status || return 1
eval MODE=$(ceph balancer status | jq '.mode')
test $MODE = "crush-compat" || return 1
ceph balancer off || return 1
! ceph balancer optimize plan_crush $TEST_POOL1 || return 1
ceph balancer status || return 1
eval RESULT=$(ceph balancer status | jq '.optimize_result')
test "$RESULT" = "Distribution is already perfect" || return 1
ceph balancer on || return 1
ACTIVE=$(ceph balancer status | jq '.active')
test $ACTIVE = "true" || return 1
sleep 2
ceph balancer status || return 1
ceph balancer off || return 1
ACTIVE=$(ceph balancer status | jq '.active')
test $ACTIVE = "false" || return 1
sleep 2
ceph balancer reset || return 1
ceph balancer mode upmap || return 1
ceph balancer status || return 1
eval MODE=$(ceph balancer status | jq '.mode')
test $MODE = "upmap" || return 1
! ceph balancer optimize plan_upmap $TEST_POOL || return 1
ceph balancer status || return 1
eval RESULT=$(ceph balancer status | jq '.optimize_result')
test "$RESULT" = "Unable to find further optimization, or pool(s) pg_num is decreasing, or distribution is already perfect" || return 1
ceph balancer on || return 1
ACTIVE=$(ceph balancer status | jq '.active')
test $ACTIVE = "true" || return 1
sleep 2
ceph balancer status || return 1
ceph balancer off || return 1
ACTIVE=$(ceph balancer status | jq '.active')
test $ACTIVE = "false" || return 1
teardown $dir || return 1
}
function TEST_balancer2() {
local dir=$1
TEST_PGS1=118
TEST_PGS2=132
TOTAL_PGS=$(expr $TEST_PGS1 + $TEST_PGS2)
OSDS=5
DEFAULT_REPLICAS=3
# Integer average of PGS per OSD (70.8), so each OSD >= this
FINAL_PER_OSD1=$(expr \( $TEST_PGS1 \* $DEFAULT_REPLICAS \) / $OSDS)
# Integer average of PGS per OSD (150)
FINAL_PER_OSD2=$(expr \( \( $TEST_PGS1 + $TEST_PGS2 \) \* $DEFAULT_REPLICAS \) / $OSDS)
CEPH_ARGS+="--osd_pool_default_pg_autoscale_mode=off "
CEPH_ARGS+="--debug_osd=20 "
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
for i in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $i || return 1
done
ceph osd set-require-min-compat-client luminous
ceph config set mgr mgr/balancer/upmap_max_deviation 1
ceph balancer mode upmap || return 1
ceph balancer on || return 1
ceph config set mgr mgr/balancer/sleep_interval 5
create_pool $TEST_POOL1 $TEST_PGS1
wait_for_clean || return 1
# Wait up to 2 minutes
OK=no
for i in $(seq 1 25)
do
sleep 5
if grep -q "Optimization plan is almost perfect" $dir/mgr.x.log
then
OK=yes
break
fi
done
test $OK = "yes" || return 1
# Plan is found, but PGs still need to move
sleep 10
wait_for_clean || return 1
ceph osd df
PGS=$(ceph osd df --format=json-pretty | jq '.nodes[0].pgs')
test $PGS -ge $FINAL_PER_OSD1 || return 1
PGS=$(ceph osd df --format=json-pretty | jq '.nodes[1].pgs')
test $PGS -ge $FINAL_PER_OSD1 || return 1
PGS=$(ceph osd df --format=json-pretty | jq '.nodes[2].pgs')
test $PGS -ge $FINAL_PER_OSD1 || return 1
PGS=$(ceph osd df --format=json-pretty | jq '.nodes[3].pgs')
test $PGS -ge $FINAL_PER_OSD1 || return 1
PGS=$(ceph osd df --format=json-pretty | jq '.nodes[4].pgs')
test $PGS -ge $FINAL_PER_OSD1 || return 1
create_pool $TEST_POOL2 $TEST_PGS2
# Wait up to 2 minutes
OK=no
for i in $(seq 1 25)
do
sleep 5
COUNT=$(grep "Optimization plan is almost perfect" $dir/mgr.x.log | wc -l)
if test $COUNT = "2"
then
OK=yes
break
fi
done
test $OK = "yes" || return 1
# Plan is found, but PGs still need to move
sleep 10
wait_for_clean || return 1
ceph osd df
# We should be with plus or minus 2 of FINAL_PER_OSD2
# This is because here each pool is balanced independently
MIN=$(expr $FINAL_PER_OSD2 - 2)
MAX=$(expr $FINAL_PER_OSD2 + 2)
PGS=$(ceph osd df --format=json-pretty | jq '.nodes[0].pgs')
test $PGS -ge $MIN -a $PGS -le $MAX || return 1
PGS=$(ceph osd df --format=json-pretty | jq '.nodes[1].pgs')
test $PGS -ge $MIN -a $PGS -le $MAX || return 1
PGS=$(ceph osd df --format=json-pretty | jq '.nodes[2].pgs')
test $PGS -ge $MIN -a $PGS -le $MAX || return 1
PGS=$(ceph osd df --format=json-pretty | jq '.nodes[3].pgs')
test $PGS -ge $MIN -a $PGS -le $MAX || return 1
PGS=$(ceph osd df --format=json-pretty | jq '.nodes[4].pgs')
test $PGS -ge $MIN -a $PGS -le $MAX || return 1
teardown $dir || return 1
}
main balancer "$@"
# Local Variables:
# compile-command: "make -j4 && ../qa/run-standalone.sh balancer.sh"
# End:
| 7,323 | 31.696429 | 139 | sh |
null | ceph-main/qa/standalone/misc/mclock-config.sh | #!/usr/bin/env bash
#
# Copyright (C) 2022 Red Hat <[email protected]>
#
# Author: Sridhar Seshasayee <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7124" # git grep '\<7124\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--debug-mclock 20 "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_profile_builtin_to_custom() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 --osd_op_queue=mclock_scheduler || return 1
# Verify the default mclock profile on the OSD
local mclock_profile=$(ceph config get osd.0 osd_mclock_profile)
test "$mclock_profile" = "balanced" || return 1
# Verify the running mClock profile
mclock_profile=$(CEPH_ARGS='' ceph --format=json daemon \
$(get_asok_path osd.0) config get osd_mclock_profile |\
jq .osd_mclock_profile)
mclock_profile=$(eval echo $mclock_profile)
test "$mclock_profile" = "high_recovery_ops" || return 1
# Change the mclock profile to 'custom'
ceph tell osd.0 config set osd_mclock_profile custom || return 1
# Verify that the mclock profile is set to 'custom' on the OSDs
mclock_profile=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \
osd.0) config get osd_mclock_profile | jq .osd_mclock_profile)
mclock_profile=$(eval echo $mclock_profile)
test "$mclock_profile" = "custom" || return 1
# Change a mclock config param and confirm the change
local client_res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \
osd.0) config get osd_mclock_scheduler_client_res | \
jq .osd_mclock_scheduler_client_res | bc)
echo "client_res = $client_res"
local client_res_new=$(echo "$client_res + 0.1" | bc -l)
echo "client_res_new = $client_res_new"
ceph config set osd.0 osd_mclock_scheduler_client_res \
$client_res_new || return 1
# Check value in config monitor db
local res=$(ceph config get osd.0 \
osd_mclock_scheduler_client_res) || return 1
if (( $(echo "$res != $client_res_new" | bc -l) )); then
return 1
fi
# Check value in the in-memory 'values' map
res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \
osd.0) config get osd_mclock_scheduler_client_res | \
jq .osd_mclock_scheduler_client_res | bc)
if (( $(echo "$res != $client_res_new" | bc -l) )); then
return 1
fi
teardown $dir || return 1
}
function TEST_profile_custom_to_builtin() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 --osd_op_queue=mclock_scheduler || return 1
# Verify the default mclock profile on the OSD
local def_mclock_profile
def_mclock_profile=$(ceph config get osd.0 osd_mclock_profile)
test "$def_mclock_profile" = "balanced" || return 1
# Verify the running mClock profile
local orig_mclock_profile=$(CEPH_ARGS='' ceph --format=json daemon \
$(get_asok_path osd.0) config get osd_mclock_profile |\
jq .osd_mclock_profile)
orig_mclock_profile=$(eval echo $orig_mclock_profile)
test $orig_mclock_profile = "high_recovery_ops" || return 1
# Change the mclock profile to 'custom'
ceph tell osd.0 config set osd_mclock_profile custom || return 1
# Verify that the mclock profile is set to 'custom' on the OSDs
local mclock_profile=$(CEPH_ARGS='' ceph --format=json daemon \
$(get_asok_path osd.0) config get osd_mclock_profile | \
jq .osd_mclock_profile)
mclock_profile=$(eval echo $mclock_profile)
test $mclock_profile = "custom" || return 1
# Save the original client reservations allocated to the OSDs
local client_res
client_res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \
osd.0) config get osd_mclock_scheduler_client_res | \
jq .osd_mclock_scheduler_client_res | bc)
echo "Original client_res for osd.0 = $client_res"
# Change a mclock config param and confirm the change
local client_res_new=$(echo "$client_res + 0.1" | bc -l)
echo "client_res_new = $client_res_new"
ceph config set osd osd_mclock_scheduler_client_res \
$client_res_new || return 1
# Check value in config monitor db
local res=$(ceph config get osd.0 \
osd_mclock_scheduler_client_res) || return 1
if (( $(echo "$res != $client_res_new" | bc -l) )); then
return 1
fi
# Check value in the in-memory 'values' map
res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \
osd.0) config get osd_mclock_scheduler_client_res | \
jq .osd_mclock_scheduler_client_res | bc)
if (( $(echo "$res != $client_res_new" | bc -l) )); then
return 1
fi
# Switch the mclock profile back to the original built-in profile.
# The config subsystem prevents the overwrite of the changed QoS config
# option above i.e. osd_mclock_scheduler_client_res. This fact is verified
# before proceeding to remove the entry from the config monitor db. After
# the config entry is removed, the original value for the config option is
# restored and is verified.
ceph tell osd.0 config set osd_mclock_profile $orig_mclock_profile || return 1
# Verify that the mclock profile is set back to the original on the OSD
eval mclock_profile=$(CEPH_ARGS='' ceph --format=json daemon \
$(get_asok_path osd.0) config get osd_mclock_profile | \
jq .osd_mclock_profile)
#mclock_profile=$(ceph config get osd.0 osd_mclock_profile)
test "$mclock_profile" = "$orig_mclock_profile" || return 1
# Verify that the new value is still in effect
# Check value in config monitor db
local res=$(ceph config get osd.0 \
osd_mclock_scheduler_client_res) || return 1
if (( $(echo "$res != $client_res_new" | bc -l) )); then
return 1
fi
# Check value in the in-memory 'values' map
res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \
osd.0) config get osd_mclock_scheduler_client_res | \
jq .osd_mclock_scheduler_client_res | bc)
if (( $(echo "$res != $client_res_new" | bc -l) )); then
return 1
fi
# Remove the changed QoS config option from monitor db
ceph config rm osd osd_mclock_scheduler_client_res || return 1
sleep 5 # Allow time for change to take effect
# Verify that the original values are now restored
# Check value in config monitor db
res=$(ceph config get osd.0 \
osd_mclock_scheduler_client_res) || return 1
if (( $(echo "$res != 0.0" | bc -l) )); then
return 1
fi
# Check value in the in-memory 'values' map
res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \
osd.0) config get osd_mclock_scheduler_client_res | \
jq .osd_mclock_scheduler_client_res | bc)
if (( $(echo "$res != $client_res" | bc -l) )); then
return 1
fi
teardown $dir || return 1
}
function TEST_recovery_limit_adjustment_mclock() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 --osd_op_queue=mclock_scheduler || return 1
local recoveries=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_recovery_max_active)
# Get default value
echo "$recoveries" | grep --quiet 'osd_recovery_max_active' || return 1
# Change the recovery limit without setting
# osd_mclock_override_recovery_settings option. Verify that the recovery
# limit is retained at its default value.
ceph config set osd.0 osd_recovery_max_active 10 || return 1
sleep 2 # Allow time for change to take effect
local max_recoveries=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_recovery_max_active)
test "$max_recoveries" = "$recoveries" || return 1
# Change recovery limit after setting osd_mclock_override_recovery_settings.
# Verify that the recovery limit is modified.
ceph config set osd.0 osd_mclock_override_recovery_settings true || return 1
ceph config set osd.0 osd_recovery_max_active 10 || return 1
sleep 2 # Allow time for change to take effect
max_recoveries=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_recovery_max_active)
test "$max_recoveries" = '{"osd_recovery_max_active":"10"}' || return 1
teardown $dir || return 1
}
function TEST_backfill_limit_adjustment_mclock() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 --osd_op_queue=mclock_scheduler || return 1
local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_max_backfills | jq .osd_max_backfills | bc)
# Get default value
echo "osd_max_backfills: $backfills" || return 1
# Change the backfill limit without setting
# osd_mclock_override_recovery_settings option. Verify that the backfill
# limit is retained at its default value.
ceph config set osd.0 osd_max_backfills 20 || return 1
sleep 2 # Allow time for change to take effect
local max_backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_max_backfills | jq .osd_max_backfills | bc)
test $max_backfills = $backfills || return 1
# Verify local and async reserver settings are not changed
max_backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
dump_recovery_reservations | jq .local_reservations.max_allowed | bc)
test $max_backfills = $backfills || return 1
max_backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
dump_recovery_reservations | jq .remote_reservations.max_allowed | bc)
test $max_backfills = $backfills || return 1
# Change backfills limit after setting osd_mclock_override_recovery_settings.
# Verify that the backfills limit is modified.
ceph config set osd.0 osd_mclock_override_recovery_settings true || return 1
ceph config set osd.0 osd_max_backfills 20 || return 1
sleep 2 # Allow time for change to take effect
max_backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_max_backfills | jq .osd_max_backfills | bc)
test $max_backfills = 20 || return 1
# Verify local and async reserver settings are changed
max_backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
dump_recovery_reservations | jq .local_reservations.max_allowed | bc)
test $max_backfills = 20 || return 1
max_backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
dump_recovery_reservations | jq .remote_reservations.max_allowed | bc)
test $max_backfills = 20 || return 1
# Kill osd and bring it back up.
# Confirm that the backfill settings are retained.
kill_daemons $dir TERM osd || return 1
ceph osd down 0 || return 1
wait_for_osd down 0 || return 1
activate_osd $dir 0 --osd-op-queue=mclock_scheduler || return 1
max_backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_max_backfills | jq .osd_max_backfills | bc)
test $max_backfills = 20 || return 1
# Verify local and async reserver settings are changed
max_backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
dump_recovery_reservations | jq .local_reservations.max_allowed | bc)
test $max_backfills = 20 || return 1
max_backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
dump_recovery_reservations | jq .remote_reservations.max_allowed | bc)
test $max_backfills = 20 || return 1
teardown $dir || return 1
}
function TEST_profile_disallow_builtin_params_modify() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 --osd_op_queue=mclock_scheduler || return 1
# Verify that the default mclock profile is set on the OSD
local def_mclock_profile=$(ceph config get osd.0 osd_mclock_profile)
test "$def_mclock_profile" = "balanced" || return 1
# Verify the running mClock profile
local cur_mclock_profile=$(CEPH_ARGS='' ceph --format=json daemon \
$(get_asok_path osd.0) config get osd_mclock_profile |\
jq .osd_mclock_profile)
cur_mclock_profile=$(eval echo $cur_mclock_profile)
test $cur_mclock_profile = "high_recovery_ops" || return 1
declare -a options=("osd_mclock_scheduler_background_recovery_res"
"osd_mclock_scheduler_client_res")
local retries=10
local errors=0
for opt in "${options[@]}"
do
# Try and change a mclock config param and confirm that no change occurred
local opt_val_orig=$(CEPH_ARGS='' ceph --format=json daemon \
$(get_asok_path osd.0) config get $opt | jq .$opt | bc)
local opt_val_new=$(echo "$opt_val_orig + 0.1" | bc -l)
ceph config set osd.0 $opt $opt_val_new || return 1
# Check configuration values
for count in $(seq 0 $(expr $retries - 1))
do
errors=0
sleep 2 # Allow time for changes to take effect
echo "Check configuration values - Attempt#: $count"
# Check configuration value on Mon store (or the default) for the osd
local res=$(ceph config get osd.0 $opt) || return 1
echo "Mon db (or default): osd.0 $opt = $res"
if (( $(echo "$res == $opt_val_new" | bc -l) )); then
errors=$(expr $errors + 1)
fi
# Check running configuration value using "config show" cmd
res=$(ceph config show osd.0 | grep $opt |\
awk '{ print $2 }' | bc ) || return 1
echo "Running config: osd.0 $opt = $res"
if (( $(echo "$res == $opt_val_new" | bc -l) || \
$(echo "$res != $opt_val_orig" | bc -l) )); then
errors=$(expr $errors + 1)
fi
# Check value in the in-memory 'values' map is unmodified
res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \
osd.0) config get $opt | jq .$opt | bc)
echo "Values map: osd.0 $opt = $res"
if (( $(echo "$res == $opt_val_new" | bc -l) || \
$(echo "$res != $opt_val_orig" | bc -l) )); then
errors=$(expr $errors + 1)
fi
# Check if we succeeded or exhausted retry count
if [ $errors -eq 0 ]
then
break
elif [ $count -eq $(expr $retries - 1) ]
then
return 1
fi
done
done
teardown $dir || return 1
}
function TEST_profile_disallow_builtin_params_override() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 --osd_op_queue=mclock_scheduler || return 1
# Verify that the default mclock profile is set on the OSD
local def_mclock_profile=$(ceph config get osd.0 osd_mclock_profile)
test "$def_mclock_profile" = "balanced" || return 1
# Verify the running mClock profile
local cur_mclock_profile=$(CEPH_ARGS='' ceph --format=json daemon \
$(get_asok_path osd.0) config get osd_mclock_profile |\
jq .osd_mclock_profile)
cur_mclock_profile=$(eval echo $cur_mclock_profile)
test $cur_mclock_profile = "high_recovery_ops" || return 1
declare -a options=("osd_mclock_scheduler_background_recovery_res"
"osd_mclock_scheduler_client_res")
local retries=10
local errors=0
for opt in "${options[@]}"
do
# Override a mclock config param and confirm that no change occurred
local opt_val_orig=$(CEPH_ARGS='' ceph --format=json daemon \
$(get_asok_path osd.0) config get $opt | jq .$opt | bc)
local opt_val_new=$(echo "$opt_val_orig + 0.1" | bc -l)
ceph tell osd.0 config set $opt $opt_val_new || return 1
# Check configuration values
for count in $(seq 0 $(expr $retries - 1))
do
errors=0
sleep 2 # Allow time for changes to take effect
echo "Check configuration values - Attempt#: $count"
# Check configuration value on Mon store (or the default) for the osd
local res=$(ceph config get osd.0 $opt) || return 1
echo "Mon db (or default): osd.0 $opt = $res"
if (( $(echo "$res == $opt_val_new" | bc -l) )); then
errors=$(expr $errors + 1)
fi
# Check running configuration value using "config show" cmd
res=$(ceph config show osd.0 | grep $opt |\
awk '{ print $2 }' | bc ) || return 1
echo "Running config: osd.0 $opt = $res"
if (( $(echo "$res == $opt_val_new" | bc -l) || \
$(echo "$res != $opt_val_orig" | bc -l) )); then
errors=$(expr $errors + 1)
fi
# Check value in the in-memory 'values' map is unmodified
res=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path \
osd.0) config get $opt | jq .$opt | bc)
echo "Values map: osd.0 $opt = $res"
if (( $(echo "$res == $opt_val_new" | bc -l) || \
$(echo "$res != $opt_val_orig" | bc -l) )); then
errors=$(expr $errors + 1)
fi
# Check if we succeeded or exhausted retry count
if [ $errors -eq 0 ]
then
break
elif [ $count -eq $(expr $retries - 1) ]
then
return 1
fi
done
done
teardown $dir || return 1
}
main mclock-config "$@"
# Local Variables:
# compile-command: "cd build ; make -j4 && \
# ../qa/run-standalone.sh mclock-config.sh"
# End:
| 18,490 | 38.510684 | 90 | sh |
null | ceph-main/qa/standalone/misc/network-ping.sh | #!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--debug_disable_randomized_ping=true "
CEPH_ARGS+="--debug_heartbeat_testing_span=5 "
CEPH_ARGS+="--osd_heartbeat_interval=1 "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_network_ping_test1() {
local dir=$1
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
sleep 5
create_pool foo 16
# write some objects
timeout 20 rados bench -p foo 10 write -b 4096 --no-cleanup || return 1
# Get 1 cycle worth of ping data "1 minute"
sleep 10
flush_pg_stats
CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network | tee $dir/json
test "$(cat $dir/json | jq '.entries | length')" = "0" || return 1
test "$(cat $dir/json | jq '.threshold')" = "1000" || return 1
CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network | tee $dir/json
test "$(cat $dir/json | jq '.entries | length')" = "0" || return 1
test "$(cat $dir/json | jq '.threshold')" = "1000" || return 1
CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network 0 | tee $dir/json
test "$(cat $dir/json | jq '.entries | length')" = "4" || return 1
test "$(cat $dir/json | jq '.threshold')" = "0" || return 1
CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network 0 | tee $dir/json
test "$(cat $dir/json | jq '.entries | length')" = "12" || return 1
test "$(cat $dir/json | jq '.threshold')" = "0" || return 1
# Wait another 4 cycles to get "5 minute interval"
sleep 20
flush_pg_stats
CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network | tee $dir/json
test "$(cat $dir/json | jq '.entries | length')" = "0" || return 1
test "$(cat $dir/json | jq '.threshold')" = "1000" || return 1
CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network | tee $dir/json
test "$(cat $dir/json | jq '.entries | length')" = "0" || return 1
test "$(cat $dir/json | jq '.threshold')" = "1000" || return 1
CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network 0 | tee $dir/json
test "$(cat $dir/json | jq '.entries | length')" = "4" || return 1
test "$(cat $dir/json | jq '.threshold')" = "0" || return 1
CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network 0 | tee $dir/json
test "$(cat $dir/json | jq '.entries | length')" = "12" || return 1
test "$(cat $dir/json | jq '.threshold')" = "0" || return 1
# Wait another 10 cycles to get "15 minute interval"
sleep 50
flush_pg_stats
CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network | tee $dir/json
test "$(cat $dir/json | jq '.entries | length')" = "0" || return 1
test "$(cat $dir/json | jq '.threshold')" = "1000" || return 1
CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network | tee $dir/json
test "$(cat $dir/json | jq '.entries | length')" = "0" || return 1
test "$(cat $dir/json | jq '.threshold')" = "1000" || return 1
CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network 0 | tee $dir/json
test "$(cat $dir/json | jq '.entries | length')" = "4" || return 1
test "$(cat $dir/json | jq '.threshold')" = "0" || return 1
CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network 0 | tee $dir/json
test "$(cat $dir/json | jq '.entries | length')" = "12" || return 1
test "$(cat $dir/json | jq '.threshold')" = "0" || return 1
# Just check the threshold output matches the input
CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network 99 | tee $dir/json
test "$(cat $dir/json | jq '.threshold')" = "99" || return 1
CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network 98 | tee $dir/json
test "$(cat $dir/json | jq '.threshold')" = "98" || return 1
rm -f $dir/json
}
# Test setting of mon_warn_on_slow_ping_time very low to
# get health warning
function TEST_network_ping_test2() {
local dir=$1
export CEPH_ARGS
export EXTRA_OPTS=" --mon_warn_on_slow_ping_time=0.001"
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
sleep 5
ceph osd crush add-bucket dc1 datacenter
ceph osd crush add-bucket dc2 datacenter
ceph osd crush add-bucket dc3 datacenter
ceph osd crush add-bucket rack1 rack
ceph osd crush add-bucket rack2 rack
ceph osd crush add-bucket rack3 rack
ceph osd crush add-bucket host1 host
ceph osd crush add-bucket host2 host
ceph osd crush add-bucket host3 host
ceph osd crush move dc1 root=default
ceph osd crush move dc2 root=default
ceph osd crush move dc3 root=default
ceph osd crush move rack1 datacenter=dc1
ceph osd crush move rack2 datacenter=dc2
ceph osd crush move rack3 datacenter=dc3
ceph osd crush move host1 rack=rack1
ceph osd crush move host2 rack=rack2
ceph osd crush move host3 rack=rack3
ceph osd crush set osd.0 1.0 host=host1
ceph osd crush set osd.1 1.0 host=host2
ceph osd crush set osd.2 1.0 host=host3
ceph osd crush rule create-simple myrule default host firstn
create_pool foo 16 16 replicated myrule
# write some objects
timeout 20 rados bench -p foo 10 write -b 4096 --no-cleanup || return 1
# Get at least 1 cycle of ping data (this test runs with 5 second cycles of 1 second pings)
sleep 10
flush_pg_stats
ceph health | tee $dir/health
grep -q "Slow OSD heartbeats" $dir/health || return 1
ceph health detail | tee $dir/health
grep -q "OSD_SLOW_PING_TIME_BACK" $dir/health || return 1
grep -q "OSD_SLOW_PING_TIME_FRONT" $dir/health || return 1
grep -q "Slow OSD heartbeats on front from osd[.][0-2] [[]dc[1-3],rack[1-3][]] \
to osd[.][0-2] [[]dc[1-3],rack[1-3][]]" $dir/health || return 1
rm -f $dir/health
}
main network-ping "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && ../qa/run-standalone.sh network-ping.sh"
# End:
| 6,542 | 37.488235 | 95 | sh |
null | ceph-main/qa/standalone/misc/ok-to-stop.sh | #!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON_A="127.0.0.1:7150" # git grep '\<7150\>' : there must be only one
export CEPH_MON_B="127.0.0.1:7151" # git grep '\<7151\>' : there must be only one
export CEPH_MON_C="127.0.0.1:7152" # git grep '\<7152\>' : there must be only one
export CEPH_MON_D="127.0.0.1:7153" # git grep '\<7153\>' : there must be only one
export CEPH_MON_E="127.0.0.1:7154" # git grep '\<7154\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
export ORIG_CEPH_ARGS="$CEPH_ARGS"
local funcs=${@:-$(set | ${SED} -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
kill_daemons $dir KILL || return 1
teardown $dir || return 1
done
}
function TEST_1_mon_checks() {
local dir=$1
CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A "
run_mon $dir a --public-addr=$CEPH_MON_A || return 1
ceph mon ok-to-stop dne || return 1
! ceph mon ok-to-stop a || return 1
! ceph mon ok-to-add-offline || return 1
! ceph mon ok-to-rm a || return 1
ceph mon ok-to-rm dne || return 1
}
function TEST_2_mons_checks() {
local dir=$1
CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B "
run_mon $dir a --public-addr=$CEPH_MON_A || return 1
run_mon $dir b --public-addr=$CEPH_MON_B || return 1
ceph mon ok-to-stop dne || return 1
! ceph mon ok-to-stop a || return 1
! ceph mon ok-to-stop b || return 1
! ceph mon ok-to-stop a b || return 1
ceph mon ok-to-add-offline || return 1
ceph mon ok-to-rm a || return 1
ceph mon ok-to-rm b || return 1
ceph mon ok-to-rm dne || return 1
}
function TEST_3_mons_checks() {
local dir=$1
CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C "
run_mon $dir a --public-addr=$CEPH_MON_A || return 1
run_mon $dir b --public-addr=$CEPH_MON_B || return 1
run_mon $dir c --public-addr=$CEPH_MON_C || return 1
wait_for_quorum 60 3
ceph mon ok-to-stop dne || return 1
ceph mon ok-to-stop a || return 1
ceph mon ok-to-stop b || return 1
ceph mon ok-to-stop c || return 1
! ceph mon ok-to-stop a b || return 1
! ceph mon ok-to-stop b c || return 1
! ceph mon ok-to-stop a b c || return 1
ceph mon ok-to-add-offline || return 1
ceph mon ok-to-rm a || return 1
ceph mon ok-to-rm b || return 1
ceph mon ok-to-rm c || return 1
kill_daemons $dir KILL mon.b
wait_for_quorum 60 2
! ceph mon ok-to-stop a || return 1
ceph mon ok-to-stop b || return 1
! ceph mon ok-to-stop c || return 1
! ceph mon ok-to-add-offline || return 1
! ceph mon ok-to-rm a || return 1
ceph mon ok-to-rm b || return 1
! ceph mon ok-to-rm c || return 1
}
function TEST_4_mons_checks() {
local dir=$1
CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C,$CEPH_MON_D "
run_mon $dir a --public-addr=$CEPH_MON_A || return 1
run_mon $dir b --public-addr=$CEPH_MON_B || return 1
run_mon $dir c --public-addr=$CEPH_MON_C || return 1
run_mon $dir d --public-addr=$CEPH_MON_D || return 1
wait_for_quorum 60 4
ceph mon ok-to-stop dne || return 1
ceph mon ok-to-stop a || return 1
ceph mon ok-to-stop b || return 1
ceph mon ok-to-stop c || return 1
ceph mon ok-to-stop d || return 1
! ceph mon ok-to-stop a b || return 1
! ceph mon ok-to-stop c d || return 1
ceph mon ok-to-add-offline || return 1
ceph mon ok-to-rm a || return 1
ceph mon ok-to-rm b || return 1
ceph mon ok-to-rm c || return 1
kill_daemons $dir KILL mon.a
wait_for_quorum 60 3
ceph mon ok-to-stop a || return 1
! ceph mon ok-to-stop b || return 1
! ceph mon ok-to-stop c || return 1
! ceph mon ok-to-stop d || return 1
ceph mon ok-to-add-offline || return 1
ceph mon ok-to-rm a || return 1
ceph mon ok-to-rm b || return 1
ceph mon ok-to-rm c || return 1
ceph mon ok-to-rm d || return 1
}
function TEST_5_mons_checks() {
local dir=$1
CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C,$CEPH_MON_D,$CEPH_MON_E "
run_mon $dir a --public-addr=$CEPH_MON_A || return 1
run_mon $dir b --public-addr=$CEPH_MON_B || return 1
run_mon $dir c --public-addr=$CEPH_MON_C || return 1
run_mon $dir d --public-addr=$CEPH_MON_D || return 1
run_mon $dir e --public-addr=$CEPH_MON_E || return 1
wait_for_quorum 60 5
ceph mon ok-to-stop dne || return 1
ceph mon ok-to-stop a || return 1
ceph mon ok-to-stop b || return 1
ceph mon ok-to-stop c || return 1
ceph mon ok-to-stop d || return 1
ceph mon ok-to-stop e || return 1
ceph mon ok-to-stop a b || return 1
ceph mon ok-to-stop c d || return 1
! ceph mon ok-to-stop a b c || return 1
ceph mon ok-to-add-offline || return 1
ceph mon ok-to-rm a || return 1
ceph mon ok-to-rm b || return 1
ceph mon ok-to-rm c || return 1
ceph mon ok-to-rm d || return 1
ceph mon ok-to-rm e || return 1
kill_daemons $dir KILL mon.a
wait_for_quorum 60 4
ceph mon ok-to-stop a || return 1
ceph mon ok-to-stop b || return 1
ceph mon ok-to-stop c || return 1
ceph mon ok-to-stop d || return 1
ceph mon ok-to-stop e || return 1
ceph mon ok-to-add-offline || return 1
ceph mon ok-to-rm a || return 1
ceph mon ok-to-rm b || return 1
ceph mon ok-to-rm c || return 1
ceph mon ok-to-rm d || return 1
ceph mon ok-to-rm e || return 1
kill_daemons $dir KILL mon.e
wait_for_quorum 60 3
ceph mon ok-to-stop a || return 1
! ceph mon ok-to-stop b || return 1
! ceph mon ok-to-stop c || return 1
! ceph mon ok-to-stop d || return 1
ceph mon ok-to-stop e || return 1
! ceph mon ok-to-add-offline || return 1
ceph mon ok-to-rm a || return 1
! ceph mon ok-to-rm b || return 1
! ceph mon ok-to-rm c || return 1
! ceph mon ok-to-rm d || return 1
ceph mon ok-to-rm e || return 1
}
function TEST_0_mds() {
local dir=$1
CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A "
run_mon $dir a --public-addr=$CEPH_MON_A || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_mds $dir a || return 1
ceph osd pool create meta 1 || return 1
ceph osd pool create data 1 || return 1
ceph fs new myfs meta data || return 1
sleep 5
! ceph mds ok-to-stop a || return 1
! ceph mds ok-to-stop a dne || return 1
ceph mds ok-to-stop dne || return 1
run_mds $dir b || return 1
sleep 5
ceph mds ok-to-stop a || return 1
ceph mds ok-to-stop b || return 1
! ceph mds ok-to-stop a b || return 1
ceph mds ok-to-stop a dne1 dne2 || return 1
ceph mds ok-to-stop b dne || return 1
! ceph mds ok-to-stop a b dne || return 1
ceph mds ok-to-stop dne1 dne2 || return 1
kill_daemons $dir KILL mds.a
}
function TEST_0_osd() {
local dir=$1
CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A "
run_mon $dir a --public-addr=$CEPH_MON_A || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_osd $dir 3 || return 1
ceph osd erasure-code-profile set ec-profile m=2 k=2 crush-failure-domain=osd || return 1
ceph osd pool create ec erasure ec-profile || return 1
wait_for_clean || return 1
# with min_size 3, we can stop only 1 osd
ceph osd pool set ec min_size 3 || return 1
wait_for_clean || return 1
ceph osd ok-to-stop 0 || return 1
ceph osd ok-to-stop 1 || return 1
ceph osd ok-to-stop 2 || return 1
ceph osd ok-to-stop 3 || return 1
! ceph osd ok-to-stop 0 1 || return 1
! ceph osd ok-to-stop 2 3 || return 1
ceph osd ok-to-stop 0 --max 2 | grep '[0]' || return 1
ceph osd ok-to-stop 1 --max 2 | grep '[1]' || return 1
# with min_size 2 we can stop 1 osds
ceph osd pool set ec min_size 2 || return 1
wait_for_clean || return 1
ceph osd ok-to-stop 0 1 || return 1
ceph osd ok-to-stop 2 3 || return 1
! ceph osd ok-to-stop 0 1 2 || return 1
! ceph osd ok-to-stop 1 2 3 || return 1
ceph osd ok-to-stop 0 --max 2 | grep '[0,1]' || return 1
ceph osd ok-to-stop 0 --max 20 | grep '[0,1]' || return 1
ceph osd ok-to-stop 2 --max 2 | grep '[2,3]' || return 1
ceph osd ok-to-stop 2 --max 20 | grep '[2,3]' || return 1
# we should get the same result with one of the osds already down
kill_daemons $dir TERM osd.0 || return 1
ceph osd down 0 || return 1
wait_for_peered || return 1
ceph osd ok-to-stop 0 || return 1
ceph osd ok-to-stop 0 1 || return 1
! ceph osd ok-to-stop 0 1 2 || return 1
! ceph osd ok-to-stop 1 2 3 || return 1
}
main ok-to-stop "$@"
| 8,982 | 29.245791 | 103 | sh |
null | ceph-main/qa/standalone/misc/rados-striper.sh | #!/usr/bin/env bash
#
# Copyright (C) 2014 Red Hat <[email protected]>
#
# Author: Sebastien Ponce <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7116" # git grep '\<7116\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
# setup
setup $dir || return 1
# create a cluster with one monitor and three osds
run_mon $dir a || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
create_rbd_pool || return 1
# create toyfile
dd if=/dev/urandom of=$dir/toyfile bs=1234 count=1
# put a striped object
rados --pool rbd --striper put toyfile $dir/toyfile || return 1
# stat it, with and without striping
rados --pool rbd --striper stat toyfile | cut -d ',' -f 2 > $dir/stripedStat || return 1
rados --pool rbd stat toyfile.0000000000000000 | cut -d ',' -f 2 > $dir/stat || return 1
echo ' size 1234' > $dir/refstat
diff -w $dir/stripedStat $dir/refstat || return 1
diff -w $dir/stat $dir/refstat || return 1
rados --pool rbd stat toyfile >& $dir/staterror
grep -q 'No such file or directory' $dir/staterror || return 1
# get the file back with and without striping
rados --pool rbd --striper get toyfile $dir/stripedGroup || return 1
diff -w $dir/toyfile $dir/stripedGroup || return 1
rados --pool rbd get toyfile.0000000000000000 $dir/nonSTripedGroup || return 1
diff -w $dir/toyfile $dir/nonSTripedGroup || return 1
# test truncate
rados --pool rbd --striper truncate toyfile 12
rados --pool rbd --striper stat toyfile | cut -d ',' -f 2 > $dir/stripedStat || return 1
rados --pool rbd stat toyfile.0000000000000000 | cut -d ',' -f 2 > $dir/stat || return 1
echo ' size 12' > $dir/reftrunc
diff -w $dir/stripedStat $dir/reftrunc || return 1
diff -w $dir/stat $dir/reftrunc || return 1
# test xattrs
rados --pool rbd --striper setxattr toyfile somexattr somevalue || return 1
rados --pool rbd --striper getxattr toyfile somexattr > $dir/xattrvalue || return 1
rados --pool rbd getxattr toyfile.0000000000000000 somexattr > $dir/xattrvalue2 || return 1
echo 'somevalue' > $dir/refvalue
diff -w $dir/xattrvalue $dir/refvalue || return 1
diff -w $dir/xattrvalue2 $dir/refvalue || return 1
rados --pool rbd --striper listxattr toyfile > $dir/xattrlist || return 1
echo 'somexattr' > $dir/reflist
diff -w $dir/xattrlist $dir/reflist || return 1
rados --pool rbd listxattr toyfile.0000000000000000 | grep -v striper > $dir/xattrlist2 || return 1
diff -w $dir/xattrlist2 $dir/reflist || return 1
rados --pool rbd --striper rmxattr toyfile somexattr || return 1
local attr_not_found_str="No data available"
[ `uname` = FreeBSD ] && \
attr_not_found_str="Attribute not found"
expect_failure $dir "$attr_not_found_str" \
rados --pool rbd --striper getxattr toyfile somexattr || return 1
expect_failure $dir "$attr_not_found_str" \
rados --pool rbd getxattr toyfile.0000000000000000 somexattr || return 1
# test rm
rados --pool rbd --striper rm toyfile || return 1
expect_failure $dir 'No such file or directory' \
rados --pool rbd --striper stat toyfile || return 1
expect_failure $dir 'No such file or directory' \
rados --pool rbd stat toyfile.0000000000000000 || return 1
# cleanup
teardown $dir || return 1
}
main rados-striper "$@"
| 4,113 | 39.333333 | 103 | sh |
null | ceph-main/qa/standalone/misc/test-ceph-helpers.sh | #!/usr/bin/env bash
#
# Copyright (C) 2013,2014 Cloudwatt <[email protected]>
# Copyright (C) 2014 Red Hat <[email protected]>
# Copyright (C) 2014 Federico Gimenez <[email protected]>
#
# Author: Loic Dachary <[email protected]>
# Author: Federico Gimenez <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
$CEPH_ROOT/qa/standalone/ceph-helpers.sh TESTS "$@"
| 821 | 36.363636 | 70 | sh |
null | ceph-main/qa/standalone/misc/test-snaptrim-stats.sh | #!/usr/bin/env bash
#
# Copyright (C) 2022 Red Hat <[email protected]>
#
# Author: Sridhar Seshasayee <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7124" # git grep '\<7124\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--debug-bluestore 20 "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_snaptrim_stats() {
local dir=$1
local poolname=test
local OSDS=3
local PGNUM=8
local PGPNUM=8
local objects=10
local WAIT_FOR_UPDATE=10
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=$OSDS || return 1
run_mgr $dir x || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd --osd_pool_default_pg_autoscale_mode=off || return 1
done
# disable scrubs
ceph osd set noscrub || return 1
ceph osd set nodeep-scrub || return 1
# Create a pool
create_pool $poolname $PGNUM $PGPNUM
wait_for_clean || return 1
poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
# write a few objects
TESTDATA="testdata.1"
dd if=/dev/urandom of=$TESTDATA bs=4096 count=1
for i in `seq 1 $objects`
do
rados -p $poolname put obj${i} $TESTDATA
done
rm -f $TESTDATA
# create a snapshot, clones
SNAP=1
rados -p $poolname mksnap snap${SNAP}
TESTDATA="testdata.2"
dd if=/dev/urandom of=$TESTDATA bs=4096 count=1
for i in `seq 1 $objects`
do
rados -p $poolname put obj${i} $TESTDATA
done
rm -f $TESTDATA
# remove the snapshot, should trigger snaptrim
rados -p $poolname rmsnap snap${SNAP}
# check for snaptrim stats
wait_for_clean || return 1
sleep $WAIT_FOR_UPDATE
local objects_trimmed=0
local snaptrim_duration_total=0.0
for i in $(seq 0 $(expr $PGNUM - 1))
do
local pgid="${poolid}.${i}"
objects_trimmed=$(expr $objects_trimmed + $(ceph pg $pgid query | \
jq '.info.stats.objects_trimmed'))
snaptrim_duration_total=`echo $snaptrim_duration_total + $(ceph pg \
$pgid query | jq '.info.stats.snaptrim_duration') | bc`
done
test $objects_trimmed -eq $objects || return 1
echo "$snaptrim_duration_total > 0.0" | bc || return 1
teardown $dir || return 1
}
function TEST_snaptrim_stats_multiple_snaps() {
local dir=$1
local poolname=test
local OSDS=3
local PGNUM=8
local PGPNUM=8
local objects=10
local WAIT_FOR_UPDATE=10
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=$OSDS || return 1
run_mgr $dir x || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd --osd_pool_default_pg_autoscale_mode=off || return 1
done
# disable scrubs
ceph osd set noscrub || return 1
ceph osd set nodeep-scrub || return 1
# Create a pool
create_pool $poolname $PGNUM $PGPNUM
wait_for_clean || return 1
poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
# write a few objects
local TESTDATA="testdata.0"
dd if=/dev/urandom of=$TESTDATA bs=4096 count=1
for i in `seq 1 $objects`
do
rados -p $poolname put obj${i} $TESTDATA
done
rm -f $TESTDATA
# create snapshots, clones
NUMSNAPS=2
for i in `seq 1 $NUMSNAPS`
do
rados -p $poolname mksnap snap${i}
TESTDATA="testdata".${i}
dd if=/dev/urandom of=$TESTDATA bs=4096 count=1
for i in `seq 1 $objects`
do
rados -p $poolname put obj${i} $TESTDATA
done
rm -f $TESTDATA
done
# remove the snapshots, should trigger snaptrim
local total_objects_trimmed=0
for i in `seq 1 $NUMSNAPS`
do
rados -p $poolname rmsnap snap${i}
# check for snaptrim stats
wait_for_clean || return 1
sleep $WAIT_FOR_UPDATE
local objects_trimmed=0
local snaptrim_duration_total=0.0
for i in $(seq 0 $(expr $PGNUM - 1))
do
local pgid="${poolid}.${i}"
objects_trimmed=$(expr $objects_trimmed + $(ceph pg $pgid query | \
jq '.info.stats.objects_trimmed'))
snaptrim_duration_total=`echo $snaptrim_duration_total + $(ceph pg \
$pgid query | jq '.info.stats.snaptrim_duration') | bc`
done
test $objects_trimmed -eq $objects || return 1
echo "$snaptrim_duration_total > 0.0" | bc || return 1
total_objects_trimmed=$(expr $total_objects_trimmed + $objects_trimmed)
done
test $total_objects_trimmed -eq $((objects * NUMSNAPS)) || return 1
teardown $dir || return 1
}
main test-snaptrim-stats "$@"
# Local Variables:
# compile-command: "cd build ; make -j4 && \
# ../qa/run-standalone.sh test-snaptrim-stats.sh"
# End:
| 5,635 | 28.820106 | 83 | sh |
null | ceph-main/qa/standalone/misc/ver-health.sh | #!/usr/bin/env bash
#
# Copyright (C) 2020 Red Hat <[email protected]>
#
# Author: David Zafman <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON_A="127.0.0.1:7165" # git grep '\<7165\>' : there must be only one
export CEPH_MON_B="127.0.0.1:7166" # git grep '\<7166\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
CEPH_ARGS+="--mon_health_to_clog_tick_interval=1.0 "
export ORIG_CEPH_ARGS="$CEPH_ARGS"
local funcs=${@:-$(set | ${SED} -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function wait_for_health_string() {
local grep_string=$1
local seconds=${2:-20}
# Allow mon to notice version difference
set -o pipefail
PASSED="false"
for ((i=0; i < $seconds; i++)); do
if ceph health | grep -q "$grep_string"
then
PASSED="true"
break
fi
sleep 1
done
set +o pipefail
# Make sure health changed
if [ $PASSED = "false" ];
then
return 1
fi
return 0
}
# Test a single OSD with an old version and multiple OSDs with 2 different old versions
function TEST_check_version_health_1() {
local dir=$1
# Asssume MON_A is leader?
CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A "
# setup
setup $dir || return 1
# create a cluster with two monitors and three osds
run_mon $dir a --public-addr=$CEPH_MON_A --mon_warn_older_version_delay=0 || return 1
run_mon $dir b --public-addr=$CEPH_MON_B --mon_warn_older_version_delay=0 || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
sleep 5
ceph health detail
# should not see this yet
ceph health detail | grep DAEMON_OLD_VERSION && return 1
kill_daemons $dir KILL osd.1
ceph_debug_version_for_testing=01.00.00-gversion-test activate_osd $dir 1
wait_for_health_string "HEALTH_WARN .*There is a daemon running an older version of ceph" || return 1
ceph health detail
# Should notice that osd.1 is a different version
ceph health detail | grep -q "HEALTH_WARN .*There is a daemon running an older version of ceph" || return 1
ceph health detail | grep -q "^[[]WRN[]] DAEMON_OLD_VERSION: There is a daemon running an older version of ceph" || return 1
ceph health detail | grep -q "osd.1 is running an older version of ceph: 01.00.00-gversion-test" || return 1
kill_daemons $dir KILL osd.2
ceph_debug_version_for_testing=01.00.00-gversion-test activate_osd $dir 2
kill_daemons $dir KILL osd.0
ceph_debug_version_for_testing=02.00.00-gversion-test activate_osd $dir 0
wait_for_health_string "HEALTH_ERR .*There are daemons running multiple old versions of ceph" || return 1
ceph health detail
ceph health detail | grep -q "HEALTH_ERR .*There are daemons running multiple old versions of ceph" || return 1
ceph health detail | grep -q "^[[]ERR[]] DAEMON_OLD_VERSION: There are daemons running multiple old versions of ceph" || return 1
ceph health detail | grep -q "osd.1 osd.2 are running an older version of ceph: 01.00.00-gversion-test" || return 1
ceph health detail | grep -q "osd.0 is running an older version of ceph: 02.00.00-gversion-test" || return 1
}
# Test with 1 MON and 1 MDS with an older version, and add 2 OSDs with different versions
function TEST_check_version_health_2() {
local dir=$1
# Asssume MON_A is leader?
CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A "
# setup
setup $dir || return 1
# create a cluster with all daemon types
run_mon $dir a --public-addr=$CEPH_MON_A --mon_warn_older_version_delay=0 || return 1
run_mon $dir b --public-addr=$CEPH_MON_B --mon_warn_older_version_delay=0 || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
run_mgr $dir x || return 1
run_mgr $dir y || return 1
run_mds $dir m || return 1
run_mds $dir n || return 1
sleep 5
ceph health detail
# should not see this yet
ceph health detail | grep DAEMON_OLD_VERSION && return 1
kill_daemons $dir KILL mon.b
ceph_debug_version_for_testing=01.00.00-gversion-test run_mon $dir b --mon_warn_older_version_delay=0
# XXX: Manager doesn't seem to use the test specific config for version
#kill_daemons $dir KILL mgr.x
#ceph_debug_version_for_testing=02.00.00-gversion-test run_mgr $dir x
kill_daemons $dir KILL mds.m
ceph_debug_version_for_testing=01.00.00-gversion-test run_mds $dir m
wait_for_health_string "HEALTH_WARN .*There are daemons running an older version of ceph" || return 1
ceph health detail
# Should notice that mon.b and mds.m is a different version
ceph health detail | grep -q "HEALTH_WARN .*There are daemons running an older version of ceph" || return 1
ceph health detail | grep -q "^[[]WRN[]] DAEMON_OLD_VERSION: There are daemons running an older version of ceph" || return 1
ceph health detail | grep -q "mon.b mds.m are running an older version of ceph: 01.00.00-gversion-test" || return 1
kill_daemons $dir KILL osd.2
ceph_debug_version_for_testing=01.00.00-gversion-test activate_osd $dir 2
kill_daemons $dir KILL osd.0
ceph_debug_version_for_testing=02.00.00-gversion-test activate_osd $dir 0
wait_for_health_string "HEALTH_ERR .*There are daemons running multiple old versions of ceph" || return 1
ceph health detail
ceph health | grep -q "HEALTH_ERR .*There are daemons running multiple old versions of ceph" || return 1
ceph health detail | grep -q "HEALTH_ERR .*There are daemons running multiple old versions of ceph" || return 1
ceph health detail | grep -q "^[[]ERR[]] DAEMON_OLD_VERSION: There are daemons running multiple old versions of ceph" || return 1
ceph health detail | grep -q "mon.b osd.2 mds.m are running an older version of ceph: 01.00.00-gversion-test" || return 1
ceph health detail | grep -q "osd.0 is running an older version of ceph: 02.00.00-gversion-test" || return 1
}
# Verify delay handling with same setup as test 1
function TEST_check_version_health_3() {
local dir=$1
# Asssume MON_A is leader?
CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A "
# setup
setup $dir || return 1
# create a cluster with two monitors and three osds
run_mon $dir a --public-addr=$CEPH_MON_A || return 1
run_mon $dir b --public-addr=$CEPH_MON_B || return 1
local start_osd_time=$SECONDS
# use memstore for faster bootup
EXTRA_OPTS=" --osd-objectstore=memstore" run_osd $dir 0 || return 1
EXTRA_OPTS=" --osd-objectstore=memstore" run_osd $dir 1 || return 1
EXTRA_OPTS=" --osd-objectstore=memstore" run_osd $dir 2 || return 1
# take the time used for boot osds into consideration
local warn_older_version_delay=$(($SECONDS - $start_osd_time + 20))
sleep 5
ceph health detail
# should not see this yet
ceph health detail | grep DAEMON_OLD_VERSION && return 1
ceph tell 'mon.*' injectargs "--mon_warn_older_version_delay $warn_older_version_delay"
kill_daemons $dir KILL osd.1
EXTRA_OPTS=" --osd-objectstore=memstore" \
ceph_debug_version_for_testing=01.00.00-gversion-test \
activate_osd $dir 1
# Wait 50% of 20 second delay config
sleep 10
# should not see this yet
ceph health detail | grep DAEMON_OLD_VERSION && return 1
# Now make sure that at least 20 seconds have passed
wait_for_health_string "HEALTH_WARN .*There is a daemon running an older version of ceph" 20 || return 1
ceph health detail
# Should notice that osd.1 is a different version
ceph health detail | grep -q "HEALTH_WARN .*There is a daemon running an older version of ceph" || return 1
ceph health detail | grep -q "^[[]WRN[]] DAEMON_OLD_VERSION: There is a daemon running an older version of ceph" || return 1
ceph health detail | grep -q "osd.1 is running an older version of ceph: 01.00.00-gversion-test" || return 1
kill_daemons $dir KILL osd.2
ceph_debug_version_for_testing=01.00.00-gversion-test activate_osd $dir 2
kill_daemons $dir KILL osd.0
ceph_debug_version_for_testing=02.00.00-gversion-test activate_osd $dir 0
wait_for_health_string "HEALTH_ERR .*There are daemons running multiple old versions of ceph" || return 1
ceph health detail
ceph health detail | grep -q "HEALTH_ERR .*There are daemons running multiple old versions of ceph" || return 1
ceph health detail | grep -q "^[[]ERR[]] DAEMON_OLD_VERSION: There are daemons running multiple old versions of ceph" || return 1
ceph health detail | grep -q "osd.1 osd.2 are running an older version of ceph: 01.00.00-gversion-test" || return 1
ceph health detail | grep -q "osd.0 is running an older version of ceph: 02.00.00-gversion-test" || return 1
}
main ver-health "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && ../qa/run-standalone.sh ver-health.sh"
# End:
| 9,707 | 40.844828 | 133 | sh |
null | ceph-main/qa/standalone/mon-stretch/mon-stretch-fail-recovery.sh | #!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON_A="127.0.0.1:7139" # git grep '\<7139\>' : there must be only one
export CEPH_MON_B="127.0.0.1:7141" # git grep '\<7141\>' : there must be only one
export CEPH_MON_C="127.0.0.1:7142" # git grep '\<7142\>' : there must be only one
export CEPH_MON_D="127.0.0.1:7143" # git grep '\<7143\>' : there must be only one
export CEPH_MON_E="127.0.0.1:7144" # git grep '\<7144\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
export BASE_CEPH_ARGS=$CEPH_ARGS
CEPH_ARGS+="--mon-host=$CEPH_MON_A"
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
TEST_stretched_cluster_failover_add_three_osds(){
local dir=$1
local OSDS=8
setup $dir || return 1
run_mon $dir a --public-addr $CEPH_MON_A || return 1
wait_for_quorum 300 1 || return 1
run_mon $dir b --public-addr $CEPH_MON_B || return 1
CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B"
wait_for_quorum 300 2 || return 1
run_mon $dir c --public-addr $CEPH_MON_C || return 1
CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C"
wait_for_quorum 300 3 || return 1
run_mon $dir d --public-addr $CEPH_MON_D || return 1
CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C,$CEPH_MON_D"
wait_for_quorum 300 4 || return 1
run_mon $dir e --public-addr $CEPH_MON_E || return 1
CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C,$CEPH_MON_D,$CEPH_MON_E"
wait_for_quorum 300 5 || return 1
ceph mon set election_strategy connectivity
ceph mon add disallowed_leader e
run_mgr $dir x || return 1
run_mgr $dir y || return 1
run_mgr $dir z || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
for zone in iris pze
do
ceph osd crush add-bucket $zone zone
ceph osd crush move $zone root=default
done
ceph osd crush add-bucket node-2 host
ceph osd crush add-bucket node-3 host
ceph osd crush add-bucket node-4 host
ceph osd crush add-bucket node-5 host
ceph osd crush move node-2 zone=iris
ceph osd crush move node-3 zone=iris
ceph osd crush move node-4 zone=pze
ceph osd crush move node-5 zone=pze
ceph osd crush move osd.0 host=node-2
ceph osd crush move osd.1 host=node-2
ceph osd crush move osd.2 host=node-3
ceph osd crush move osd.3 host=node-3
ceph osd crush move osd.4 host=node-4
ceph osd crush move osd.5 host=node-4
ceph osd crush move osd.6 host=node-5
ceph osd crush move osd.7 host=node-5
ceph mon set_location a zone=iris host=node-2
ceph mon set_location b zone=iris host=node-3
ceph mon set_location c zone=pze host=node-4
ceph mon set_location d zone=pze host=node-5
hostname=$(hostname -s)
ceph osd crush remove $hostname || return 1
ceph osd getcrushmap > crushmap || return 1
crushtool --decompile crushmap > crushmap.txt || return 1
sed 's/^# end crush map$//' crushmap.txt > crushmap_modified.txt || return 1
cat >> crushmap_modified.txt << EOF
rule stretch_rule {
id 1
type replicated
min_size 1
max_size 10
step take iris
step chooseleaf firstn 2 type host
step emit
step take pze
step chooseleaf firstn 2 type host
step emit
}
# end crush map
EOF
crushtool --compile crushmap_modified.txt -o crushmap.bin || return 1
ceph osd setcrushmap -i crushmap.bin || return 1
local stretched_poolname=stretched_rbdpool
ceph osd pool create $stretched_poolname 32 32 stretch_rule || return 1
ceph osd pool set $stretched_poolname size 4 || return 1
sleep 3
ceph mon set_location e zone=arbiter host=node-1
ceph mon enable_stretch_mode e stretch_rule zone
kill_daemons $dir KILL mon.c || return 1
kill_daemons $dir KILL mon.d || return 1
kill_daemons $dir KILL osd.4 || return 1
kill_daemons $dir KILL osd.5 || return 1
kill_daemons $dir KILL osd.6 || return 1
kill_daemons $dir KILL osd.7 || return 1
ceph -s
sleep 3
run_osd $dir 8 || return 1
run_osd $dir 9 || return 1
run_osd $dir 10 || return 1
ceph -s
sleep 3
teardown $dir || return 1
}
main mon-stretch-fail-recovery "$@" | 4,639 | 30.351351 | 102 | sh |
null | ceph-main/qa/standalone/mon-stretch/mon-stretch-uneven-crush-weights.sh | #!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON_A="127.0.0.1:7139" # git grep '\<7139\>' : there must be only one
export CEPH_MON_B="127.0.0.1:7141" # git grep '\<7141\>' : there must be only one
export CEPH_MON_C="127.0.0.1:7142" # git grep '\<7142\>' : there must be only one
export CEPH_MON_D="127.0.0.1:7143" # git grep '\<7143\>' : there must be only one
export CEPH_MON_E="127.0.0.1:7144" # git grep '\<7144\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
export BASE_CEPH_ARGS=$CEPH_ARGS
CEPH_ARGS+="--mon-host=$CEPH_MON_A"
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
TEST_stretched_cluster_uneven_weight() {
local dir=$1
local OSDS=4
local weight=0.09000
setup $dir || return 1
run_mon $dir a --public-addr $CEPH_MON_A || return 1
wait_for_quorum 300 1 || return 1
run_mon $dir b --public-addr $CEPH_MON_B || return 1
CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B"
wait_for_quorum 300 2 || return 1
run_mon $dir c --public-addr $CEPH_MON_C || return 1
CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C"
wait_for_quorum 300 3 || return 1
run_mon $dir d --public-addr $CEPH_MON_D || return 1
CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C,$CEPH_MON_D"
wait_for_quorum 300 4 || return 1
run_mon $dir e --public-addr $CEPH_MON_E || return 1
CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C,$CEPH_MON_D,$CEPH_MON_E"
wait_for_quorum 300 5 || return 1
ceph mon set election_strategy connectivity
ceph mon add disallowed_leader e
run_mgr $dir x || return 1
run_mgr $dir y || return 1
run_mgr $dir z || return 1
for osd in $(seq 0 $(expr $OSDS - 1))
do
run_osd $dir $osd || return 1
done
for zone in iris pze
do
ceph osd crush add-bucket $zone zone
ceph osd crush move $zone root=default
done
ceph osd crush add-bucket node-2 host
ceph osd crush add-bucket node-3 host
ceph osd crush add-bucket node-4 host
ceph osd crush add-bucket node-5 host
ceph osd crush move node-2 zone=iris
ceph osd crush move node-3 zone=iris
ceph osd crush move node-4 zone=pze
ceph osd crush move node-5 zone=pze
ceph osd crush move osd.0 host=node-2
ceph osd crush move osd.1 host=node-3
ceph osd crush move osd.2 host=node-4
ceph osd crush move osd.3 host=node-5
ceph mon set_location a zone=iris host=node-2
ceph mon set_location b zone=iris host=node-3
ceph mon set_location c zone=pze host=node-4
ceph mon set_location d zone=pze host=node-5
hostname=$(hostname -s)
ceph osd crush remove $hostname || return 1
ceph osd getcrushmap > crushmap || return 1
crushtool --decompile crushmap > crushmap.txt || return 1
sed 's/^# end crush map$//' crushmap.txt > crushmap_modified.txt || return 1
cat >> crushmap_modified.txt << EOF
rule stretch_rule {
id 1
type replicated
min_size 1
max_size 10
step take iris
step chooseleaf firstn 2 type host
step emit
step take pze
step chooseleaf firstn 2 type host
step emit
}
# end crush map
EOF
crushtool --compile crushmap_modified.txt -o crushmap.bin || return 1
ceph osd setcrushmap -i crushmap.bin || return 1
local stretched_poolname=stretched_rbdpool
ceph osd pool create $stretched_poolname 32 32 stretch_rule || return 1
ceph osd pool set $stretched_poolname size 4 || return 1
ceph mon set_location e zone=arbiter host=node-1 || return 1
ceph mon enable_stretch_mode e stretch_rule zone || return 1 # Enter strech mode
# reweight to a more round decimal.
ceph osd crush reweight osd.0 $weight
ceph osd crush reweight osd.1 $weight
ceph osd crush reweight osd.2 $weight
ceph osd crush reweight osd.3 $weight
# Firstly, we test for stretch mode buckets != 2
ceph osd crush add-bucket sham zone || return 1
ceph osd crush move sham root=default || return 1
wait_for_health "INCORRECT_NUM_BUCKETS_STRETCH_MODE" || return 1
ceph osd crush rm sham # clear the health warn
wait_for_health_gone "INCORRECT_NUM_BUCKETS_STRETCH_MODE" || return 1
# Next, we test for uneven weights across buckets
ceph osd crush reweight osd.0 0.07000
wait_for_health "UNEVEN_WEIGHTS_STRETCH_MODE" || return 1
ceph osd crush reweight osd.0 $weight # clear the health warn
wait_for_health_gone "UNEVEN_WEIGHTS_STRETCH_MODE" || return 1
teardown $dir || return 1
}
main mon-stretched-cluster-uneven-weight "$@" | 4,966 | 33.255172 | 102 | sh |
null | ceph-main/qa/standalone/mon/health-mute.sh | #!/bin/bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7143" # git grep '\<714\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none --mon-pg-warn-min-per-osd 0 --mon-max-pg-per-osd 1000 "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_mute() {
local dir=$1
setup $dir || return 1
set -o pipefail
run_mon $dir a || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
ceph osd pool create foo 8
ceph osd pool application enable foo rbd --yes-i-really-mean-it
wait_for_clean || return 1
ceph -s
ceph health | grep HEALTH_OK || return 1
# test warning on setting pool size=1
ceph osd pool set foo size 1 --yes-i-really-mean-it
ceph -s
ceph health | grep HEALTH_WARN || return 1
ceph health detail | grep POOL_NO_REDUNDANCY || return 1
ceph health mute POOL_NO_REDUNDANCY
ceph -s
ceph health | grep HEALTH_OK | grep POOL_NO_REDUNDANCY || return 1
ceph health unmute POOL_NO_REDUNDANCY
ceph -s
ceph health | grep HEALTH_WARN || return 1
# restore pool size to default
ceph osd pool set foo size 3
ceph -s
ceph health | grep HEALTH_OK || return 1
ceph osd set noup
ceph -s
ceph health detail | grep OSDMAP_FLAGS || return 1
ceph osd down 0
ceph -s
ceph health detail | grep OSD_DOWN || return 1
ceph health detail | grep HEALTH_WARN || return 1
ceph health mute OSD_DOWN
ceph health mute OSDMAP_FLAGS
ceph -s
ceph health | grep HEALTH_OK | grep OSD_DOWN | grep OSDMAP_FLAGS || return 1
ceph health unmute OSD_DOWN
ceph -s
ceph health | grep HEALTH_WARN || return 1
# ttl
ceph health mute OSD_DOWN 10s
ceph -s
ceph health | grep HEALTH_OK || return 1
sleep 15
ceph -s
ceph health | grep HEALTH_WARN || return 1
# sticky
ceph health mute OSDMAP_FLAGS --sticky
ceph osd unset noup
sleep 5
ceph -s
ceph health | grep OSDMAP_FLAGS || return 1
ceph osd set noup
ceph -s
ceph health | grep HEALTH_OK || return 1
# rachet down on OSD_DOWN count
ceph osd down 0 1
ceph -s
ceph health detail | grep OSD_DOWN || return 1
ceph health mute OSD_DOWN
kill_daemons $dir TERM osd.0
ceph osd unset noup
sleep 10
ceph -s
ceph health detail | grep OSD_DOWN || return 1
ceph health detail | grep '1 osds down' || return 1
ceph health | grep HEALTH_OK || return 1
sleep 10 # give time for mon tick to rachet the mute
ceph osd set noup
ceph health mute OSDMAP_FLAGS
ceph -s
ceph health detail
ceph health | grep HEALTH_OK || return 1
ceph osd down 1
ceph -s
ceph health detail
ceph health detail | grep '2 osds down' || return 1
sleep 10 # give time for mute to clear
ceph -s
ceph health detail
ceph health | grep HEALTH_WARN || return 1
ceph health detail | grep '2 osds down' || return 1
teardown $dir || return 1
}
main health-mute "$@"
| 3,385 | 26.088 | 111 | sh |
null | ceph-main/qa/standalone/mon/misc.sh | #!/usr/bin/env bash
#
# Copyright (C) 2014 Cloudwatt <[email protected]>
# Copyright (C) 2014, 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:7102" # git grep '\<7102\>' : there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
$func $dir || return 1
done
}
TEST_POOL=rbd
function TEST_osd_pool_get_set() {
local dir=$1
setup $dir || return 1
run_mon $dir a || return 1
create_pool $TEST_POOL 8
local flag
for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
ceph osd pool set $TEST_POOL $flag 0 || return 1
! ceph osd dump | grep 'pool ' | grep $flag || return 1
ceph osd pool set $TEST_POOL $flag 1 || return 1
ceph osd dump | grep 'pool ' | grep $flag || return 1
ceph osd pool set $TEST_POOL $flag false || return 1
! ceph osd dump | grep 'pool ' | grep $flag || return 1
ceph osd pool set $TEST_POOL $flag false || return 1
# check that setting false twice does not toggle to true (bug)
! ceph osd dump | grep 'pool ' | grep $flag || return 1
ceph osd pool set $TEST_POOL $flag true || return 1
ceph osd dump | grep 'pool ' | grep $flag || return 1
# cleanup
ceph osd pool set $TEST_POOL $flag 0 || return 1
done
local size=$(ceph osd pool get $TEST_POOL size|awk '{print $2}')
local min_size=$(ceph osd pool get $TEST_POOL min_size|awk '{print $2}')
local expected_min_size=$(expr $size - $size / 2)
if [ $min_size -ne $expected_min_size ]; then
echo "default min_size is wrong: expected $expected_min_size, got $min_size"
return 1
fi
ceph osd pool set $TEST_POOL scrub_min_interval 123456 || return 1
ceph osd dump | grep 'pool ' | grep 'scrub_min_interval 123456' || return 1
ceph osd pool set $TEST_POOL scrub_min_interval 0 || return 1
ceph osd dump | grep 'pool ' | grep 'scrub_min_interval' && return 1
ceph osd pool set $TEST_POOL scrub_max_interval 123456 || return 1
ceph osd dump | grep 'pool ' | grep 'scrub_max_interval 123456' || return 1
ceph osd pool set $TEST_POOL scrub_max_interval 0 || return 1
ceph osd dump | grep 'pool ' | grep 'scrub_max_interval' && return 1
ceph osd pool set $TEST_POOL deep_scrub_interval 123456 || return 1
ceph osd dump | grep 'pool ' | grep 'deep_scrub_interval 123456' || return 1
ceph osd pool set $TEST_POOL deep_scrub_interval 0 || return 1
ceph osd dump | grep 'pool ' | grep 'deep_scrub_interval' && return 1
#replicated pool size restrict in 1 and 10
! ceph osd pool set $TEST_POOL 11 || return 1
#replicated pool min_size must be between in 1 and size
! ceph osd pool set $TEST_POOL min_size $(expr $size + 1) || return 1
! ceph osd pool set $TEST_POOL min_size 0 || return 1
local ecpool=erasepool
create_pool $ecpool 12 12 erasure default || return 1
#erasue pool size=k+m, min_size=k
local size=$(ceph osd pool get $ecpool size|awk '{print $2}')
local min_size=$(ceph osd pool get $ecpool min_size|awk '{print $2}')
local k=$(expr $min_size - 1) # default min_size=k+1
#erasure pool size can't change
! ceph osd pool set $ecpool size $(expr $size + 1) || return 1
#erasure pool min_size must be between in k and size
ceph osd pool set $ecpool min_size $(expr $k + 1) || return 1
! ceph osd pool set $ecpool min_size $(expr $k - 1) || return 1
! ceph osd pool set $ecpool min_size $(expr $size + 1) || return 1
teardown $dir || return 1
}
function TEST_mon_add_to_single_mon() {
local dir=$1
fsid=$(uuidgen)
MONA=127.0.0.1:7117 # git grep '\<7117\>' : there must be only one
MONB=127.0.0.1:7118 # git grep '\<7118\>' : there must be only one
CEPH_ARGS_orig=$CEPH_ARGS
CEPH_ARGS="--fsid=$fsid --auth-supported=none "
CEPH_ARGS+="--mon-initial-members=a "
CEPH_ARGS+="--mon-host=$MONA "
setup $dir || return 1
run_mon $dir a --public-addr $MONA || return 1
# wait for the quorum
timeout 120 ceph -s > /dev/null || return 1
run_mon $dir b --public-addr $MONB || return 1
teardown $dir || return 1
setup $dir || return 1
run_mon $dir a --public-addr $MONA || return 1
# without the fix of #5454, mon.a will assert failure at seeing the MMonJoin
# from mon.b
run_mon $dir b --public-addr $MONB || return 1
# make sure mon.b get's it's join request in first, then
sleep 2
# wait for the quorum
timeout 120 ceph -s > /dev/null || return 1
ceph mon dump
ceph mon dump -f json-pretty
local num_mons
num_mons=$(ceph mon dump --format=json 2>/dev/null | jq ".mons | length") || return 1
[ $num_mons == 2 ] || return 1
# no reason to take more than 120 secs to get this submitted
timeout 120 ceph mon add b $MONB || return 1
teardown $dir || return 1
}
function TEST_no_segfault_for_bad_keyring() {
local dir=$1
setup $dir || return 1
# create a client.admin key and add it to ceph.mon.keyring
ceph-authtool --create-keyring $dir/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring $dir/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *'
ceph-authtool $dir/ceph.mon.keyring --import-keyring $dir/ceph.client.admin.keyring
CEPH_ARGS_TMP="--fsid=$(uuidgen) --mon-host=127.0.0.1:7102 --auth-supported=cephx "
CEPH_ARGS_orig=$CEPH_ARGS
CEPH_ARGS="$CEPH_ARGS_TMP --keyring=$dir/ceph.mon.keyring "
run_mon $dir a
# create a bad keyring and make sure no segfault occurs when using the bad keyring
echo -e "[client.admin]\nkey = BQAUlgtWoFePIxAAQ9YLzJSVgJX5V1lh5gyctg==" > $dir/bad.keyring
CEPH_ARGS="$CEPH_ARGS_TMP --keyring=$dir/bad.keyring"
ceph osd dump 2> /dev/null
# 139(11|128) means segfault and core dumped
[ $? -eq 139 ] && return 1
CEPH_ARGS=$CEPH_ARGS_orig
teardown $dir || return 1
}
function TEST_mon_features() {
local dir=$1
setup $dir || return 1
fsid=$(uuidgen)
MONA=127.0.0.1:7127 # git grep '\<7127\>' ; there must be only one
MONB=127.0.0.1:7128 # git grep '\<7128\>' ; there must be only one
MONC=127.0.0.1:7129 # git grep '\<7129\>' ; there must be only one
CEPH_ARGS_orig=$CEPH_ARGS
CEPH_ARGS="--fsid=$fsid --auth-supported=none "
CEPH_ARGS+="--mon-host=$MONA,$MONB,$MONC "
CEPH_ARGS+="--mon-debug-no-initial-persistent-features "
CEPH_ARGS+="--mon-debug-no-require-reef "
run_mon $dir a --public-addr $MONA || return 1
run_mon $dir b --public-addr $MONB || return 1
timeout 120 ceph -s > /dev/null || return 1
# expect monmap to contain 3 monitors (a, b, and c)
jqinput="$(ceph quorum_status --format=json 2>/dev/null)"
jq_success "$jqinput" '.monmap.mons | length == 3' || return 1
# quorum contains two monitors
jq_success "$jqinput" '.quorum | length == 2' || return 1
# quorum's monitor features contain kraken, luminous, mimic, nautilus,
# octopus, pacific, quincy
jqfilter='.features.quorum_mon[]|select(. == "kraken")'
jq_success "$jqinput" "$jqfilter" "kraken" || return 1
jqfilter='.features.quorum_mon[]|select(. == "luminous")'
jq_success "$jqinput" "$jqfilter" "luminous" || return 1
jqfilter='.features.quorum_mon[]|select(. == "mimic")'
jq_success "$jqinput" "$jqfilter" "mimic" || return 1
jqfilter='.features.quorum_mon[]|select(. == "nautilus")'
jq_success "$jqinput" "$jqfilter" "nautilus" || return 1
jqfilter='.features.quorum_mon[]|select(. == "octopus")'
jq_success "$jqinput" "$jqfilter" "octopus" || return 1
jqfilter='.features.quorum_mon[]|select(. == "pacific")'
jq_success "$jqinput" "$jqfilter" "pacific" || return 1
jqfilter='.features.quorum_mon[]|select(. == "quincy")'
jq_success "$jqinput" "$jqfilter" "quincy" || return 1
jqfilter='.features.quorum_mon[]|select(. == "reef")'
jq_success "$jqinput" "$jqfilter" "reef" || return 1
# monmap must have no persistent features set, because we
# don't currently have a quorum made out of all the monitors
# in the monmap.
jqfilter='.monmap.features.persistent | length == 0'
jq_success "$jqinput" "$jqfilter" || return 1
# nor do we have any optional features, for that matter.
jqfilter='.monmap.features.optional | length == 0'
jq_success "$jqinput" "$jqfilter" || return 1
# validate 'mon feature ls'
jqinput="$(ceph mon feature ls --format=json 2>/dev/null)"
# k l m n o p q are supported
jqfilter='.all.supported[] | select(. == "kraken")'
jq_success "$jqinput" "$jqfilter" "kraken" || return 1
jqfilter='.all.supported[] | select(. == "luminous")'
jq_success "$jqinput" "$jqfilter" "luminous" || return 1
jqfilter='.all.supported[] | select(. == "mimic")'
jq_success "$jqinput" "$jqfilter" "mimic" || return 1
jqfilter='.all.supported[] | select(. == "nautilus")'
jq_success "$jqinput" "$jqfilter" "nautilus" || return 1
jqfilter='.all.supported[] | select(. == "octopus")'
jq_success "$jqinput" "$jqfilter" "octopus" || return 1
jqfilter='.all.supported[] | select(. == "pacific")'
jq_success "$jqinput" "$jqfilter" "pacific" || return 1
jqfilter='.all.supported[] | select(. == "quincy")'
jq_success "$jqinput" "$jqfilter" "quincy" || return 1
jqfilter='.all.supported[] | select(. == "reef")'
jq_success "$jqinput" "$jqfilter" "reef" || return 1
# start third monitor
run_mon $dir c --public-addr $MONC || return 1
wait_for_quorum 300 3 || return 1
timeout 300 ceph -s > /dev/null || return 1
jqinput="$(ceph quorum_status --format=json 2>/dev/null)"
# expect quorum to have all three monitors
jqfilter='.quorum | length == 3'
jq_success "$jqinput" "$jqfilter" || return 1
# quorum's monitor features should have p now too
jqfilter='.features.quorum_mon[]|select(. == "pacific")'
jq_success "$jqinput" "$jqfilter" "pacific" || return 1
# persistent too
jqfilter='.monmap.features.persistent[]|select(. == "kraken")'
jq_success "$jqinput" "$jqfilter" "kraken" || return 1
jqfilter='.monmap.features.persistent[]|select(. == "luminous")'
jq_success "$jqinput" "$jqfilter" "luminous" || return 1
jqfilter='.monmap.features.persistent[]|select(. == "mimic")'
jq_success "$jqinput" "$jqfilter" "mimic" || return 1
jqfilter='.monmap.features.persistent[]|select(. == "osdmap-prune")'
jq_success "$jqinput" "$jqfilter" "osdmap-prune" || return 1
jqfilter='.monmap.features.persistent[]|select(. == "nautilus")'
jq_success "$jqinput" "$jqfilter" "nautilus" || return 1
jqfilter='.monmap.features.persistent[]|select(. == "octopus")'
jq_success "$jqinput" "$jqfilter" "octopus" || return 1
jqfilter='.monmap.features.persistent[]|select(. == "pacific")'
jq_success "$jqinput" "$jqfilter" "pacific" || return 1
jqfilter='.monmap.features.persistent[]|select(. == "elector-pinging")'
jq_success "$jqinput" "$jqfilter" "elector-pinging" || return 1
jqfilter='.monmap.features.persistent | length == 10'
jq_success "$jqinput" "$jqfilter" || return 1
jqfilter='.monmap.features.persistent[]|select(. == "quincy")'
jq_success "$jqinput" "$jqfilter" "quincy" || return 1
jqfilter='.monmap.features.persistent[]|select(. == "reef")'
jq_success "$jqinput" "$jqfilter" "reef" || return 1
CEPH_ARGS=$CEPH_ARGS_orig
# that's all folks. thank you for tuning in.
teardown $dir || return 1
}
main misc "$@"
# Local Variables:
# compile-command: "cd ../.. ; make -j4 && test/mon/misc.sh"
# End:
| 12,390 | 42.477193 | 111 | sh |
null | ceph-main/qa/standalone/mon/mkfs.sh | #!/usr/bin/env bash
#
# Copyright (C) 2013 Cloudwatt <[email protected]>
# Copyright (C) 2014 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
set -xe
PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
DIR=mkfs
export CEPH_CONF=/dev/null
unset CEPH_ARGS
MON_ID=a
MON_DIR=$DIR/$MON_ID
CEPH_MON=127.0.0.1:7110 # git grep '\<7110\>' : there must be only one
TIMEOUT=360
EXTRAOPTS=""
function setup() {
teardown
mkdir $DIR
}
function teardown() {
kill_daemons
rm -fr $DIR
}
function mon_mkfs() {
local fsid=$(uuidgen)
ceph-mon \
--id $MON_ID \
--fsid $fsid \
$EXTRAOPTS \
--mkfs \
--mon-data=$MON_DIR \
--mon-initial-members=$MON_ID \
--mon-host=$CEPH_MON \
"$@"
}
function mon_run() {
ceph-mon \
--id $MON_ID \
--chdir= \
--mon-osd-full-ratio=.99 \
--mon-data-avail-crit=1 \
$EXTRAOPTS \
--mon-data=$MON_DIR \
--log-file=$MON_DIR/log \
--mon-cluster-log-file=$MON_DIR/log \
--run-dir=$MON_DIR \
--pid-file=$MON_DIR/pidfile \
--public-addr $CEPH_MON \
"$@"
}
function kill_daemons() {
for pidfile in $(find $DIR -name pidfile) ; do
pid=$(cat $pidfile)
for try in 0 1 1 1 2 3 ; do
kill $pid || break
sleep $try
done
done
}
function auth_none() {
mon_mkfs --auth-supported=none
ceph-mon \
--id $MON_ID \
--mon-osd-full-ratio=.99 \
--mon-data-avail-crit=1 \
$EXTRAOPTS \
--mon-data=$MON_DIR \
--extract-monmap $MON_DIR/monmap
[ -f $MON_DIR/monmap ] || return 1
[ ! -f $MON_DIR/keyring ] || return 1
mon_run --auth-supported=none
timeout $TIMEOUT ceph --mon-host $CEPH_MON mon stat || return 1
}
function auth_cephx_keyring() {
cat > $DIR/keyring <<EOF
[mon.]
key = AQDUS79S0AF9FRAA2cgRLFscVce0gROn/s9WMg==
caps mon = "allow *"
EOF
mon_mkfs --keyring=$DIR/keyring
[ -f $MON_DIR/keyring ] || return 1
mon_run
timeout $TIMEOUT ceph \
--name mon. \
--keyring $MON_DIR/keyring \
--mon-host $CEPH_MON mon stat || return 1
}
function auth_cephx_key() {
if [ -f /etc/ceph/keyring ] ; then
echo "Please move /etc/ceph/keyring away for testing!"
return 1
fi
local key=$(ceph-authtool --gen-print-key)
if mon_mkfs --key='corrupted key' ; then
return 1
else
rm -fr $MON_DIR/store.db
rm -fr $MON_DIR/kv_backend
fi
mon_mkfs --key=$key
[ -f $MON_DIR/keyring ] || return 1
grep $key $MON_DIR/keyring
mon_run
timeout $TIMEOUT ceph \
--name mon. \
--keyring $MON_DIR/keyring \
--mon-host $CEPH_MON mon stat || return 1
}
function makedir() {
local toodeep=$MON_DIR/toodeep
# fail if recursive directory creation is needed
ceph-mon \
--id $MON_ID \
--mon-osd-full-ratio=.99 \
--mon-data-avail-crit=1 \
$EXTRAOPTS \
--mkfs \
--mon-data=$toodeep 2>&1 | tee $DIR/makedir.log
grep 'toodeep.*No such file' $DIR/makedir.log > /dev/null
rm $DIR/makedir.log
# an empty directory does not mean the mon exists
mkdir $MON_DIR
mon_mkfs --auth-supported=none 2>&1 | tee $DIR/makedir.log
! grep "$MON_DIR already exists" $DIR/makedir.log || return 1
}
function idempotent() {
mon_mkfs --auth-supported=none
mon_mkfs --auth-supported=none 2>&1 | tee $DIR/makedir.log
grep "'$MON_DIR' already exists" $DIR/makedir.log > /dev/null || return 1
}
function run() {
local actions
actions+="makedir "
actions+="idempotent "
actions+="auth_cephx_key "
actions+="auth_cephx_keyring "
actions+="auth_none "
for action in $actions ; do
setup
$action || return 1
teardown
done
}
run
# Local Variables:
# compile-command: "cd ../.. ; make TESTS=test/mon/mkfs.sh check"
# End:
| 4,454 | 21.963918 | 77 | sh |
null | ceph-main/qa/standalone/mon/mon-bind.sh | #!/usr/bin/env bash
#
# Copyright (C) 2017 Quantum Corp.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
SOCAT_PIDS=()
function port_forward() {
local source_port=$1
local target_port=$2
socat TCP-LISTEN:${source_port},fork,reuseaddr TCP:localhost:${target_port} &
SOCAT_PIDS+=( $! )
}
function cleanup() {
for p in "${SOCAT_PIDS[@]}"; do
kill $p
done
SOCAT_PIDS=()
}
trap cleanup SIGTERM SIGKILL SIGQUIT SIGINT
function run() {
local dir=$1
shift
export MON_IP=127.0.0.1
export MONA_PUBLIC=7132 # git grep '\<7132\>' ; there must be only one
export MONB_PUBLIC=7133 # git grep '\<7133\>' ; there must be only one
export MONC_PUBLIC=7134 # git grep '\<7134\>' ; there must be only one
export MONA_BIND=7135 # git grep '\<7135\>' ; there must be only one
export MONB_BIND=7136 # git grep '\<7136\>' ; there must be only one
export MONC_BIND=7137 # git grep '\<7137\>' ; there must be only one
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir && cleanup || { cleanup; return 1; }
teardown $dir
done
}
function TEST_mon_client_connect_fails() {
local dir=$1
# start the mon with a public-bind-addr that is different
# from the public-addr.
CEPH_ARGS+="--mon-host=${MON_IP}:${MONA_PUBLIC} "
run_mon $dir a --mon-host=${MON_IP}:${MONA_PUBLIC} --public-bind-addr=${MON_IP}:${MONA_BIND} || return 1
# now attempt to ping it that should fail.
timeout 3 ceph ping mon.a || return 0
return 1
}
function TEST_mon_client_connect() {
local dir=$1
# start the mon with a public-bind-addr that is different
# from the public-addr.
CEPH_ARGS+="--mon-host=${MON_IP}:${MONA_PUBLIC} "
run_mon $dir a --mon-host=${MON_IP}:${MONA_PUBLIC} --public-bind-addr=${MON_IP}:${MONA_BIND} || return 1
# now forward the public port to the bind port.
port_forward ${MONA_PUBLIC} ${MONA_BIND}
# attempt to connect. we expect that to work
ceph ping mon.a || return 1
}
function TEST_mon_quorum() {
local dir=$1
# start the mon with a public-bind-addr that is different
# from the public-addr.
CEPH_ARGS+="--mon-host=${MON_IP}:${MONA_PUBLIC},${MON_IP}:${MONB_PUBLIC},${MON_IP}:${MONC_PUBLIC} "
run_mon $dir a --public-addr=${MON_IP}:${MONA_PUBLIC} --public-bind-addr=${MON_IP}:${MONA_BIND} || return 1
run_mon $dir b --public-addr=${MON_IP}:${MONB_PUBLIC} --public-bind-addr=${MON_IP}:${MONB_BIND} || return 1
run_mon $dir c --public-addr=${MON_IP}:${MONC_PUBLIC} --public-bind-addr=${MON_IP}:${MONC_BIND} || return 1
# now forward the public port to the bind port.
port_forward ${MONA_PUBLIC} ${MONA_BIND}
port_forward ${MONB_PUBLIC} ${MONB_BIND}
port_forward ${MONC_PUBLIC} ${MONC_BIND}
# expect monmap to contain 3 monitors (a, b, and c)
jqinput="$(ceph quorum_status --format=json 2>/dev/null)"
jq_success "$jqinput" '.monmap.mons | length == 3' || return 1
# quorum should form
wait_for_quorum 300 3 || return 1
# expect quorum to have all three monitors
jqfilter='.quorum | length == 3'
jq_success "$jqinput" "$jqfilter" || return 1
}
function TEST_put_get() {
local dir=$1
# start the mon with a public-bind-addr that is different
# from the public-addr.
CEPH_ARGS+="--mon-host=${MON_IP}:${MONA_PUBLIC},${MON_IP}:${MONB_PUBLIC},${MON_IP}:${MONC_PUBLIC} "
run_mon $dir a --public-addr=${MON_IP}:${MONA_PUBLIC} --public-bind-addr=${MON_IP}:${MONA_BIND} || return 1
run_mon $dir b --public-addr=${MON_IP}:${MONB_PUBLIC} --public-bind-addr=${MON_IP}:${MONB_BIND} || return 1
run_mon $dir c --public-addr=${MON_IP}:${MONC_PUBLIC} --public-bind-addr=${MON_IP}:${MONC_BIND} || return 1
# now forward the public port to the bind port.
port_forward ${MONA_PUBLIC} ${MONA_BIND}
port_forward ${MONB_PUBLIC} ${MONB_BIND}
port_forward ${MONC_PUBLIC} ${MONC_BIND}
# quorum should form
wait_for_quorum 300 3 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
create_pool hello 8 || return 1
echo "hello world" > $dir/hello
rados --pool hello put foo $dir/hello || return 1
rados --pool hello get foo $dir/hello2 || return 1
diff $dir/hello $dir/hello2 || return 1
}
main mon-bind "$@"
| 5,024 | 33.895833 | 111 | sh |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.