Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/src/vnewosd.sh
|
#!/bin/bash -ex
OSD_SECRET=`bin/ceph-authtool --gen-print-key`
echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > /tmp/$$
OSD_UUID=`uuidgen`
OSD_ID=`bin/ceph osd new $OSD_UUID -i /tmp/$$`
rm /tmp/$$
rm dev/osd$OSD_ID/* || true
mkdir -p dev/osd$OSD_ID
bin/ceph-osd -i $OSD_ID --mkfs --key $OSD_SECRET --osd-uuid $OSD_UUID
echo "[osd.$OSD_ID]
key = $OSD_SECRET" > dev/osd$OSD_ID/keyring
H=`hostname`
echo "[osd.$OSD_ID]
host = $H" >> ceph.conf
| 437 | 26.375 | 69 |
sh
|
null |
ceph-main/src/vstart.sh
|
#!/usr/bin/env bash
# -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
# vim: softtabstop=4 shiftwidth=4 expandtab
# abort on failure
set -e
quoted_print() {
for s in "$@"; do
if [[ "$s" =~ \ ]]; then
printf -- "'%s' " "$s"
else
printf -- "$s "
fi
done
printf '\n'
}
debug() {
"$@" >&2
}
prunb() {
debug quoted_print "$@" '&'
PATH=$CEPH_BIN:$PATH "$@" &
}
prun() {
debug quoted_print "$@"
PATH=$CEPH_BIN:$PATH "$@"
}
if [ -n "$VSTART_DEST" ]; then
SRC_PATH=`dirname $0`
SRC_PATH=`(cd $SRC_PATH; pwd)`
CEPH_DIR=$SRC_PATH
CEPH_BIN=${CEPH_BIN:-${PWD}/bin}
CEPH_LIB=${CEPH_LIB:-${PWD}/lib}
CEPH_CONF_PATH=$VSTART_DEST
CEPH_DEV_DIR=$VSTART_DEST/dev
CEPH_OUT_DIR=$VSTART_DEST/out
CEPH_ASOK_DIR=$VSTART_DEST/asok
CEPH_OUT_CLIENT_DIR=${CEPH_OUT_CLIENT_DIR:-$CEPH_OUT_DIR}
fi
get_cmake_variable() {
local variable=$1
grep "${variable}:" CMakeCache.txt | cut -d "=" -f 2
}
# for running out of the CMake build directory
if [ -e CMakeCache.txt ]; then
# Out of tree build, learn source location from CMakeCache.txt
CEPH_ROOT=$(get_cmake_variable ceph_SOURCE_DIR)
CEPH_BUILD_DIR=`pwd`
[ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
fi
# use CEPH_BUILD_ROOT to vstart from a 'make install'
if [ -n "$CEPH_BUILD_ROOT" ]; then
[ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
[ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
[ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_ROOT/external/lib
[ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
[ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
# make install should install python extensions into PYTHONPATH
elif [ -n "$CEPH_ROOT" ]; then
[ -z "$CEPHFS_SHELL" ] && CEPHFS_SHELL=$CEPH_ROOT/src/tools/cephfs/shell/cephfs-shell
[ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
[ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
[ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
[ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
[ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
[ -z "$CEPH_EXT_LIB" ] && CEPH_EXT_LIB=$CEPH_BUILD_DIR/external/lib
[ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
[ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
[ -z "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON=$CEPH_ROOT/src/python-common
fi
if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
PATH=$(pwd):$PATH
fi
[ -z "$PYBIND" ] && PYBIND=./pybind
[ -n "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON="$CEPH_PYTHON_COMMON:"
CYTHON_PYTHONPATH="$CEPH_LIB/cython_modules/lib.3"
export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON$PYTHONPATH
export LD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$LD_LIBRARY_PATH
export DYLD_LIBRARY_PATH=$CEPH_LIB:$CEPH_EXT_LIB:$DYLD_LIBRARY_PATH
# Suppress logging for regular use that indicated that we are using a
# development version. vstart.sh is only used during testing and
# development
export CEPH_DEV=1
[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
[ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM="$NFS"
# if none of the CEPH_NUM_* number is specified, kill the existing
# cluster.
if [ -z "$CEPH_NUM_MON" -a \
-z "$CEPH_NUM_OSD" -a \
-z "$CEPH_NUM_MDS" -a \
-z "$CEPH_NUM_MGR" -a \
-z "$GANESHA_DAEMON_NUM" ]; then
kill_all=1
else
kill_all=0
fi
[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
[ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
[ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
[ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM=0
[ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
[ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
[ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
[ -z "$CEPH_ASOK_DIR" ] && CEPH_ASOK_DIR="$CEPH_DIR/asok"
[ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
[ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
CEPH_OUT_CLIENT_DIR=${CEPH_OUT_CLIENT_DIR:-$CEPH_OUT_DIR}
if [ $CEPH_NUM_OSD -gt 3 ]; then
OSD_POOL_DEFAULT_SIZE=3
else
OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
fi
extra_conf=""
new=0
standby=0
debug=0
trace=0
ip=""
nodaemon=0
redirect=0
smallmds=0
short=0
crimson=0
ec=0
cephadm=0
parallel=true
restart=1
hitset=""
overwrite_conf=0
cephx=1 #turn cephx on by default
gssapi_authx=0
cache=""
if [ `uname` = FreeBSD ]; then
objectstore="memstore"
else
objectstore="bluestore"
fi
ceph_osd=ceph-osd
rgw_frontend="beast"
rgw_compression=""
lockdep=${LOCKDEP:-1}
spdk_enabled=0 # disable SPDK by default
pmem_enabled=0
zoned_enabled=0
io_uring_enabled=0
with_jaeger=0
with_mgr_dashboard=true
if [[ "$(get_cmake_variable WITH_MGR_DASHBOARD_FRONTEND)" != "ON" ]] ||
[[ "$(get_cmake_variable WITH_RBD)" != "ON" ]]; then
debug echo "ceph-mgr dashboard not built - disabling."
with_mgr_dashboard=false
fi
with_mgr_restful=false
kstore_path=
declare -a block_devs
declare -a bluestore_db_devs
declare -a bluestore_wal_devs
declare -a secondary_block_devs
secondary_block_devs_type="SSD"
VSTART_SEC="client.vstart.sh"
MON_ADDR=""
DASH_URLS=""
RESTFUL_URLS=""
conf_fn="$CEPH_CONF_PATH/ceph.conf"
keyring_fn="$CEPH_CONF_PATH/keyring"
monmap_fn="/tmp/ceph_monmap.$$"
inc_osd_num=0
msgr="21"
read -r -d '' usage <<EOF || true
usage: $0 [option]... \nex: MON=3 OSD=1 MDS=1 MGR=1 RGW=1 NFS=1 $0 -n -d
options:
-d, --debug
-t, --trace
-s, --standby_mds: Generate standby-replay MDS for each active
-l, --localhost: use localhost instead of hostname
-i <ip>: bind to specific ip
-n, --new
--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'
--nodaemon: use ceph-run as wrapper for mon/osd/mds
--redirect-output: only useful with nodaemon, directs output to log file
--smallmds: limit mds cache memory limit
-m ip:port specify monitor address
-k keep old configuration files (default)
-x enable cephx (on by default)
-X disable cephx
-g --gssapi enable Kerberos/GSSApi authentication
-G disable Kerberos/GSSApi authentication
--hitset <pool> <hit_set_type>: enable hitset tracking
-e : create an erasure pool
-o config add extra config parameters to all sections
--rgw_port specify ceph rgw http listen port
--rgw_frontend specify the rgw frontend configuration
--rgw_arrow_flight start arrow flight frontend
--rgw_compression specify the rgw compression plugin
--seastore use seastore as crimson osd backend
-b, --bluestore use bluestore as the osd objectstore backend (default)
-K, --kstore use kstore as the osd objectstore backend
--cyanstore use cyanstore as the osd objectstore backend
--memstore use memstore as the osd objectstore backend
--cache <pool>: enable cache tiering on pool
--short: short object names only; necessary for ext4 dev
--nolockdep disable lockdep
--multimds <count> allow multimds with maximum active count
--without-dashboard: do not run using mgr dashboard
--bluestore-spdk: enable SPDK and with a comma-delimited list of PCI-IDs of NVME device (e.g, 0000:81:00.0)
--bluestore-pmem: enable PMEM and with path to a file mapped to PMEM
--msgr1: use msgr1 only
--msgr2: use msgr2 only
--msgr21: use msgr2 and msgr1
--crimson: use crimson-osd instead of ceph-osd
--crimson-foreground: use crimson-osd, but run it in the foreground
--osd-args: specify any extra osd specific options
--bluestore-devs: comma-separated list of blockdevs to use for bluestore
--bluestore-db-devs: comma-separated list of db-devs to use for bluestore
--bluestore-wal-devs: comma-separated list of wal-devs to use for bluestore
--bluestore-zoned: blockdevs listed by --bluestore-devs are zoned devices (HM-SMR HDD or ZNS SSD)
--bluestore-io-uring: enable io_uring backend
--inc-osd: append some more osds into existing vcluster
--cephadm: enable cephadm orchestrator with ~/.ssh/id_rsa[.pub]
--no-parallel: dont start all OSDs in parallel
--no-restart: dont restart process when using ceph-run
--jaeger: use jaegertracing for tracing
--seastore-devs: comma-separated list of blockdevs to use for seastore
--seastore-secondary-devs: comma-separated list of secondary blockdevs to use for seastore
--seastore-secondary-devs-type: device type of all secondary blockdevs. HDD, SSD(default), ZNS or RANDOM_BLOCK_SSD
--crimson-smp: number of cores to use for crimson
\n
EOF
usage_exit() {
printf "$usage"
exit
}
parse_block_devs() {
local opt_name=$1
shift
local devs=$1
shift
local dev
IFS=',' read -r -a block_devs <<< "$devs"
for dev in "${block_devs[@]}"; do
if [ ! -b $dev ] || [ ! -w $dev ]; then
echo "All $opt_name must refer to writable block devices"
exit 1
fi
done
}
parse_bluestore_db_devs() {
local opt_name=$1
shift
local devs=$1
shift
local dev
IFS=',' read -r -a bluestore_db_devs <<< "$devs"
for dev in "${bluestore_db_devs[@]}"; do
if [ ! -b $dev ] || [ ! -w $dev ]; then
echo "All $opt_name must refer to writable block devices"
exit 1
fi
done
}
parse_bluestore_wal_devs() {
local opt_name=$1
shift
local devs=$1
shift
local dev
IFS=',' read -r -a bluestore_wal_devs <<< "$devs"
for dev in "${bluestore_wal_devs[@]}"; do
if [ ! -b $dev ] || [ ! -w $dev ]; then
echo "All $opt_name must refer to writable block devices"
exit 1
fi
done
}
parse_secondary_devs() {
local opt_name=$1
shift
local devs=$1
shift
local dev
IFS=',' read -r -a secondary_block_devs <<< "$devs"
for dev in "${secondary_block_devs[@]}"; do
if [ ! -b $dev ] || [ ! -w $dev ]; then
echo "All $opt_name must refer to writable block devices"
exit 1
fi
done
}
crimson_smp=1
while [ $# -ge 1 ]; do
case $1 in
-d | --debug)
debug=1
;;
-t | --trace)
trace=1
;;
-s | --standby_mds)
standby=1
;;
-l | --localhost)
ip="127.0.0.1"
;;
-i)
[ -z "$2" ] && usage_exit
ip="$2"
shift
;;
-e)
ec=1
;;
--new | -n)
new=1
;;
--inc-osd)
new=0
kill_all=0
inc_osd_num=$2
if [ "$inc_osd_num" == "" ]; then
inc_osd_num=1
else
shift
fi
;;
--short)
short=1
;;
--crimson)
crimson=1
ceph_osd=crimson-osd
nodaemon=1
msgr=2
;;
--crimson-foreground)
crimson=1
ceph_osd=crimson-osd
nodaemon=0
msgr=2
;;
--osd-args)
extra_osd_args="$2"
shift
;;
--msgr1)
msgr="1"
;;
--msgr2)
msgr="2"
;;
--msgr21)
msgr="21"
;;
--cephadm)
cephadm=1
;;
--no-parallel)
parallel=false
;;
--no-restart)
restart=0
;;
--valgrind)
[ -z "$2" ] && usage_exit
valgrind=$2
shift
;;
--valgrind_args)
valgrind_args="$2"
shift
;;
--valgrind_mds)
[ -z "$2" ] && usage_exit
valgrind_mds=$2
shift
;;
--valgrind_osd)
[ -z "$2" ] && usage_exit
valgrind_osd=$2
shift
;;
--valgrind_mon)
[ -z "$2" ] && usage_exit
valgrind_mon=$2
shift
;;
--valgrind_mgr)
[ -z "$2" ] && usage_exit
valgrind_mgr=$2
shift
;;
--valgrind_rgw)
[ -z "$2" ] && usage_exit
valgrind_rgw=$2
shift
;;
--nodaemon)
nodaemon=1
;;
--redirect-output)
redirect=1
;;
--smallmds)
smallmds=1
;;
--rgw_port)
CEPH_RGW_PORT=$2
shift
;;
--rgw_frontend)
rgw_frontend=$2
shift
;;
--rgw_arrow_flight)
rgw_flight_frontend="yes"
;;
--rgw_compression)
rgw_compression=$2
shift
;;
--kstore_path)
kstore_path=$2
shift
;;
-m)
[ -z "$2" ] && usage_exit
MON_ADDR=$2
shift
;;
-x)
cephx=1 # this is on be default, flag exists for historical consistency
;;
-X)
cephx=0
;;
-g | --gssapi)
gssapi_authx=1
;;
-G)
gssapi_authx=0
;;
-k)
if [ ! -r $conf_fn ]; then
echo "cannot use old configuration: $conf_fn not readable." >&2
exit
fi
new=0
;;
--memstore)
objectstore="memstore"
;;
--cyanstore)
objectstore="cyanstore"
;;
--seastore)
objectstore="seastore"
;;
-b | --bluestore)
objectstore="bluestore"
;;
-K | --kstore)
objectstore="kstore"
;;
--hitset)
hitset="$hitset $2 $3"
shift
shift
;;
-o)
extra_conf+=$'\n'"$2"
shift
;;
--cache)
if [ -z "$cache" ]; then
cache="$2"
else
cache="$cache $2"
fi
shift
;;
--nolockdep)
lockdep=0
;;
--multimds)
CEPH_MAX_MDS="$2"
shift
;;
--without-dashboard)
with_mgr_dashboard=false
;;
--with-restful)
with_mgr_restful=true
;;
--seastore-devs)
parse_block_devs --seastore-devs "$2"
shift
;;
--seastore-secondary-devs)
parse_secondary_devs --seastore-devs "$2"
shift
;;
--seastore-secondary-devs-type)
secondary_block_devs_type="$2"
shift
;;
--crimson-smp)
crimson_smp=$2
shift
;;
--bluestore-spdk)
[ -z "$2" ] && usage_exit
IFS=',' read -r -a bluestore_spdk_dev <<< "$2"
spdk_enabled=1
shift
;;
--bluestore-pmem)
[ -z "$2" ] && usage_exit
bluestore_pmem_file="$2"
pmem_enabled=1
shift
;;
--bluestore-devs)
parse_block_devs --bluestore-devs "$2"
shift
;;
--bluestore-db-devs)
parse_bluestore_db_devs --bluestore-db-devs "$2"
shift
;;
--bluestore-wal-devs)
parse_bluestore_wal_devs --bluestore-wal-devs "$2"
shift
;;
--bluestore-zoned)
zoned_enabled=1
;;
--bluestore-io-uring)
io_uring_enabled=1
shift
;;
--jaeger)
with_jaeger=1
echo "with_jaeger $with_jaeger"
;;
*)
usage_exit
esac
shift
done
if [ $kill_all -eq 1 ]; then
$SUDO $INIT_CEPH stop
fi
if [ "$new" -eq 0 ]; then
if [ -z "$CEPH_ASOK_DIR" ]; then
CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
fi
mkdir -p $CEPH_ASOK_DIR
MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mon 2>/dev/null` && \
CEPH_NUM_MON="$MON"
OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_osd 2>/dev/null` && \
CEPH_NUM_OSD="$OSD"
MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mds 2>/dev/null` && \
CEPH_NUM_MDS="$MDS"
MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mgr 2>/dev/null` && \
CEPH_NUM_MGR="$MGR"
RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_rgw 2>/dev/null` && \
CEPH_NUM_RGW="$RGW"
NFS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_ganesha 2>/dev/null` && \
GANESHA_DAEMON_NUM="$NFS"
else
# only delete if -n
if [ -e "$conf_fn" ]; then
asok_dir=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
rm -- "$conf_fn"
if [ $asok_dir != /var/run/ceph ]; then
[ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
fi
fi
if [ -z "$CEPH_ASOK_DIR" ]; then
CEPH_ASOK_DIR=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
fi
fi
ARGS="-c $conf_fn"
run() {
type=$1
shift
num=$1
shift
eval "valg=\$valgrind_$type"
[ -z "$valg" ] && valg="$valgrind"
if [ -n "$valg" ]; then
prunb valgrind --tool="$valg" $valgrind_args "$@" -f
sleep 1
else
if [ "$nodaemon" -eq 0 ]; then
prun "$@"
else
if [ "$restart" -eq 0 ]; then
set -- '--no-restart' "$@"
fi
if [ "$redirect" -eq 0 ]; then
prunb ${CEPH_ROOT}/src/ceph-run "$@" -f
else
( prunb ${CEPH_ROOT}/src/ceph-run "$@" -f ) >$CEPH_OUT_DIR/$type.$num.stdout 2>&1
fi
fi
fi
}
wconf() {
if [ "$new" -eq 1 -o "$overwrite_conf" -eq 1 ]; then
cat >> "$conf_fn"
fi
}
do_rgw_conf() {
if [ $CEPH_NUM_RGW -eq 0 ]; then
return 0
fi
# setup each rgw on a sequential port, starting at $CEPH_RGW_PORT.
# individual rgw's ids will be their ports.
current_port=$CEPH_RGW_PORT
# allow only first rgw to start arrow_flight server/port
local flight_conf=$rgw_flight_frontend
for n in $(seq 1 $CEPH_NUM_RGW); do
wconf << EOF
[client.rgw.${current_port}]
rgw frontends = $rgw_frontend port=${current_port}${flight_conf:+,arrow_flight}
admin socket = ${CEPH_OUT_DIR}/radosgw.${current_port}.asok
debug rgw_flight = 20
EOF
current_port=$((current_port + 1))
unset flight_conf
done
}
format_conf() {
local opts=$1
local indent=" "
local opt
local formatted
while read -r opt; do
if [ -z "$formatted" ]; then
formatted="${opt}"
else
formatted+=$'\n'${indent}${opt}
fi
done <<< "$opts"
echo "$formatted"
}
prepare_conf() {
local DAEMONOPTS="
log file = $CEPH_OUT_DIR/\$name.log
admin socket = $CEPH_ASOK_DIR/\$name.asok
chdir = \"\"
pid file = $CEPH_OUT_DIR/\$name.pid
heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
"
local mgr_modules="iostat nfs"
if $with_mgr_dashboard; then
mgr_modules+=" dashboard"
fi
if $with_mgr_restful; then
mgr_modules+=" restful"
fi
local msgr_conf=''
if [ $msgr -eq 21 ]; then
msgr_conf="ms bind msgr2 = true
ms bind msgr1 = true"
fi
if [ $msgr -eq 2 ]; then
msgr_conf="ms bind msgr2 = true
ms bind msgr1 = false"
fi
if [ $msgr -eq 1 ]; then
msgr_conf="ms bind msgr2 = false
ms bind msgr1 = true"
fi
wconf <<EOF
; generated by vstart.sh on `date`
[$VSTART_SEC]
num mon = $CEPH_NUM_MON
num osd = $CEPH_NUM_OSD
num mds = $CEPH_NUM_MDS
num mgr = $CEPH_NUM_MGR
num rgw = $CEPH_NUM_RGW
num ganesha = $GANESHA_DAEMON_NUM
[global]
fsid = $(uuidgen)
osd failsafe full ratio = .99
mon osd full ratio = .99
mon osd nearfull ratio = .99
mon osd backfillfull ratio = .99
mon_max_pg_per_osd = ${MON_MAX_PG_PER_OSD:-1000}
erasure code dir = $EC_PATH
plugin dir = $CEPH_LIB
run dir = $CEPH_OUT_DIR
crash dir = $CEPH_OUT_DIR
enable experimental unrecoverable data corrupting features = *
osd_crush_chooseleaf_type = 0
debug asok assert abort = true
$(format_conf "${msgr_conf}")
$(format_conf "${extra_conf}")
$AUTOSCALER_OPTS
EOF
if [ "$with_jaeger" -eq 1 ] ; then
wconf <<EOF
jaeger_agent_port = 6831
EOF
fi
if [ "$lockdep" -eq 1 ] ; then
wconf <<EOF
lockdep = true
EOF
fi
if [ "$cephx" -eq 1 ] ; then
wconf <<EOF
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
EOF
elif [ "$gssapi_authx" -eq 1 ] ; then
wconf <<EOF
auth cluster required = gss
auth service required = gss
auth client required = gss
gss ktab client file = $CEPH_DEV_DIR/gss_\$name.keytab
EOF
else
wconf <<EOF
auth cluster required = none
auth service required = none
auth client required = none
ms mon client mode = crc
EOF
fi
if [ "$short" -eq 1 ]; then
COSDSHORT=" osd max object name len = 460
osd max object namespace len = 64"
fi
if [ "$objectstore" == "bluestore" ]; then
if [ "$spdk_enabled" -eq 1 ] || [ "$pmem_enabled" -eq 1 ]; then
BLUESTORE_OPTS=" bluestore_block_db_path = \"\"
bluestore_block_db_size = 0
bluestore_block_db_create = false
bluestore_block_wal_path = \"\"
bluestore_block_wal_size = 0
bluestore_block_wal_create = false
bluestore_spdk_mem = 2048"
else
BLUESTORE_OPTS=" bluestore block db path = $CEPH_DEV_DIR/osd\$id/block.db.file
bluestore block db size = 1073741824
bluestore block db create = true
bluestore block wal path = $CEPH_DEV_DIR/osd\$id/block.wal.file
bluestore block wal size = 1048576000
bluestore block wal create = true"
if [ ${#block_devs[@]} -gt 0 ] || \
[ ${#bluestore_db_devs[@]} -gt 0 ] || \
[ ${#bluestore_wal_devs[@]} -gt 0 ]; then
# when use physical disk, not create file for db/wal
BLUESTORE_OPTS=""
fi
fi
if [ "$zoned_enabled" -eq 1 ]; then
BLUESTORE_OPTS+="
bluestore min alloc size = 65536
bluestore prefer deferred size = 0
bluestore prefer deferred size hdd = 0
bluestore prefer deferred size ssd = 0
bluestore allocator = zoned"
fi
if [ "$io_uring_enabled" -eq 1 ]; then
BLUESTORE_OPTS+="
bdev ioring = true"
fi
fi
wconf <<EOF
[client]
$CCLIENTDEBUG
keyring = $keyring_fn
log file = $CEPH_OUT_CLIENT_DIR/\$name.\$pid.log
admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
; needed for s3tests
rgw crypt s3 kms backend = testing
rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
rgw crypt require ssl = false
; uncomment the following to set LC days as the value in seconds;
; needed for passing lc time based s3-tests (can be verbose)
; rgw lc debug interval = 10
$(format_conf "${extra_conf}")
EOF
do_rgw_conf
wconf << EOF
[mds]
$CMDSDEBUG
$DAEMONOPTS
mds data = $CEPH_DEV_DIR/mds.\$id
mds root ino uid = `id -u`
mds root ino gid = `id -g`
$(format_conf "${extra_conf}")
[mgr]
mgr disabled modules = rook
mgr data = $CEPH_DEV_DIR/mgr.\$id
mgr module path = $MGR_PYTHON_PATH
cephadm path = $CEPH_BIN/cephadm
$DAEMONOPTS
$(format_conf "${extra_conf}")
[osd]
$DAEMONOPTS
osd_check_max_object_name_len_on_startup = false
osd data = $CEPH_DEV_DIR/osd\$id
osd journal = $CEPH_DEV_DIR/osd\$id/journal
osd journal size = 100
osd class tmp = out
osd class dir = $OBJCLASS_PATH
osd class load list = *
osd class default list = *
osd fast shutdown = false
bluestore fsck on mount = true
bluestore block create = true
$BLUESTORE_OPTS
; kstore
kstore fsck on mount = true
osd objectstore = $objectstore
$COSDSHORT
$(format_conf "${extra_conf}")
[mon]
mon_data_avail_crit = 1
mgr initial modules = $mgr_modules
$DAEMONOPTS
$CMONDEBUG
$(format_conf "${extra_conf}")
mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
auth allow insecure global id reclaim = false
EOF
if [ "$crimson" -eq 1 ]; then
wconf <<EOF
osd pool default crimson = true
EOF
fi
}
write_logrotate_conf() {
out_dir=$(pwd)"/out/*.log"
cat << EOF
$out_dir
{
rotate 5
size 1G
copytruncate
compress
notifempty
missingok
sharedscripts
postrotate
# NOTE: assuring that the absence of one of the following processes
# won't abort the logrotate command.
killall -u $USER -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror || echo ""
endscript
}
EOF
}
init_logrotate() {
logrotate_conf_path=$(pwd)"/logrotate.conf"
logrotate_state_path=$(pwd)"/logrotate.state"
if ! test -a $logrotate_conf_path; then
if test -a $logrotate_state_path; then
rm -f $logrotate_state_path
fi
write_logrotate_conf > $logrotate_conf_path
fi
}
start_mon() {
local MONS=""
local count=0
for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
do
[ $count -eq $CEPH_NUM_MON ] && break;
count=$(($count + 1))
if [ -z "$MONS" ]; then
MONS="$f"
else
MONS="$MONS $f"
fi
done
if [ "$new" -eq 1 ]; then
if [ `echo $IP | grep '^127\\.'` ]; then
echo
echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
echo " connect. either adjust /etc/hosts, or edit this script to use your"
echo " machine's real IP."
echo
fi
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin \
--cap mon 'allow *' \
--cap osd 'allow *' \
--cap mds 'allow *' \
--cap mgr 'allow *' \
"$keyring_fn"
# build a fresh fs monmap, mon fs
local params=()
local count=0
local mon_host=""
for f in $MONS
do
if [ $msgr -eq 1 ]; then
A="v1:$IP:$(($CEPH_PORT+$count+1))"
fi
if [ $msgr -eq 2 ]; then
A="v2:$IP:$(($CEPH_PORT+$count+1))"
fi
if [ $msgr -eq 21 ]; then
A="[v2:$IP:$(($CEPH_PORT+$count)),v1:$IP:$(($CEPH_PORT+$count+1))]"
fi
params+=("--addv" "$f" "$A")
mon_host="$mon_host $A"
wconf <<EOF
[mon.$f]
host = $HOSTNAME
mon data = $CEPH_DEV_DIR/mon.$f
EOF
count=$(($count + 2))
done
wconf <<EOF
[global]
mon host = $mon_host
EOF
prun "$CEPH_BIN/monmaptool" --create --clobber "${params[@]}" --print "$monmap_fn"
for f in $MONS
do
prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
done
prun rm -- "$monmap_fn"
fi
# start monitors
for f in $MONS
do
run 'mon' $f $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
done
if [ "$crimson" -eq 1 ]; then
$CEPH_BIN/ceph osd set-allow-crimson --yes-i-really-mean-it
fi
}
start_osd() {
if [ $inc_osd_num -gt 0 ]; then
old_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
start=$old_maxosd
end=$(($start-1+$inc_osd_num))
overwrite_conf=1 # fake wconf
else
start=0
end=$(($CEPH_NUM_OSD-1))
fi
local osds_wait
for osd in `seq $start $end`
do
local extra_seastar_args
if [ "$ceph_osd" == "crimson-osd" ]; then
bottom_cpu=$(( osd * crimson_smp ))
top_cpu=$(( bottom_cpu + crimson_smp - 1 ))
# set a single CPU nodes for each osd
extra_seastar_args="--cpuset $bottom_cpu-$top_cpu"
if [ "$debug" -ne 0 ]; then
extra_seastar_args+=" --debug"
fi
if [ "$trace" -ne 0 ]; then
extra_seastar_args+=" --trace"
fi
fi
if [ "$new" -eq 1 -o $inc_osd_num -gt 0 ]; then
wconf <<EOF
[osd.$osd]
host = $HOSTNAME
EOF
if [ "$spdk_enabled" -eq 1 ]; then
wconf <<EOF
bluestore_block_path = spdk:${bluestore_spdk_dev[$osd]}
EOF
elif [ "$pmem_enabled" -eq 1 ]; then
wconf <<EOF
bluestore_block_path = ${bluestore_pmem_file}
EOF
fi
rm -rf $CEPH_DEV_DIR/osd$osd || true
if command -v btrfs > /dev/null; then
for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
fi
if [ -n "$kstore_path" ]; then
ln -s $kstore_path $CEPH_DEV_DIR/osd$osd
else
mkdir -p $CEPH_DEV_DIR/osd$osd
if [ -n "${block_devs[$osd]}" ]; then
dd if=/dev/zero of=${block_devs[$osd]} bs=1M count=1
ln -s ${block_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block
fi
if [ -n "${bluestore_db_devs[$osd]}" ]; then
dd if=/dev/zero of=${bluestore_db_devs[$osd]} bs=1M count=1
ln -s ${bluestore_db_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block.db
fi
if [ -n "${bluestore_wal_devs[$osd]}" ]; then
dd if=/dev/zero of=${bluestore_wal_devs[$osd]} bs=1M count=1
ln -s ${bluestore_wal_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block.wal
fi
if [ -n "${secondary_block_devs[$osd]}" ]; then
dd if=/dev/zero of=${secondary_block_devs[$osd]} bs=1M count=1
mkdir -p $CEPH_DEV_DIR/osd$osd/block.${secondary_block_devs_type}.1
ln -s ${secondary_block_devs[$osd]} $CEPH_DEV_DIR/osd$osd/block.${secondary_block_devs_type}.1/block
fi
fi
if [ "$objectstore" == "bluestore" ]; then
wconf <<EOF
bluestore fsck on mount = false
EOF
fi
local uuid=`uuidgen`
echo "add osd$osd $uuid"
OSD_SECRET=$($CEPH_BIN/ceph-authtool --gen-print-key)
echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $CEPH_DEV_DIR/osd$osd/new.json
ceph_adm osd new $uuid -i $CEPH_DEV_DIR/osd$osd/new.json
rm $CEPH_DEV_DIR/osd$osd/new.json
prun $SUDO $CEPH_BIN/$ceph_osd $extra_osd_args -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid $extra_seastar_args \
2>&1 | tee $CEPH_OUT_DIR/osd-mkfs.$osd.log
local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
cat > $key_fn<<EOF
[osd.$osd]
key = $OSD_SECRET
EOF
fi
echo start osd.$osd
local osd_pid
echo 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
$extra_seastar_args $extra_osd_args \
-i $osd $ARGS $COSD_ARGS
run 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
$extra_seastar_args $extra_osd_args \
-i $osd $ARGS $COSD_ARGS &
osd_pid=$!
if $parallel; then
osds_wait=$osd_pid
else
wait $osd_pid
fi
done
if $parallel; then
for p in $osds_wait; do
wait $p
done
debug echo OSDs started
fi
if [ $inc_osd_num -gt 0 ]; then
# update num osd
new_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
sed -i "s/num osd = .*/num osd = $new_maxosd/g" $conf_fn
fi
}
create_mgr_restful_secret() {
while ! ceph_adm -h | grep -c -q ^restful ; do
debug echo 'waiting for mgr restful module to start'
sleep 1
done
local secret_file
if ceph_adm restful create-self-signed-cert > /dev/null; then
secret_file=`mktemp`
ceph_adm restful create-key admin -o $secret_file
RESTFUL_SECRET=`cat $secret_file`
rm $secret_file
else
debug echo MGR Restful is not working, perhaps the package is not installed?
fi
}
start_mgr() {
local mgr=0
local ssl=${DASHBOARD_SSL:-1}
# avoid monitors on nearby ports (which test/*.sh use extensively)
MGR_PORT=$(($CEPH_PORT + 1000))
PROMETHEUS_PORT=9283
for name in x y z a b c d e f g h i j k l m n o p
do
[ $mgr -eq $CEPH_NUM_MGR ] && break
mgr=$(($mgr + 1))
if [ "$new" -eq 1 ]; then
mkdir -p $CEPH_DEV_DIR/mgr.$name
key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
$SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
wconf <<EOF
[mgr.$name]
host = $HOSTNAME
EOF
if $with_mgr_dashboard ; then
local port_option="ssl_server_port"
local http_proto="https"
if [ "$ssl" == "0" ]; then
port_option="server_port"
http_proto="http"
ceph_adm config set mgr mgr/dashboard/ssl false --force
fi
ceph_adm config set mgr mgr/dashboard/$name/$port_option $MGR_PORT --force
if [ $mgr -eq 1 ]; then
DASH_URLS="$http_proto://$IP:$MGR_PORT"
else
DASH_URLS+=", $http_proto://$IP:$MGR_PORT"
fi
fi
MGR_PORT=$(($MGR_PORT + 1000))
ceph_adm config set mgr mgr/prometheus/$name/server_port $PROMETHEUS_PORT --force
PROMETHEUS_PORT=$(($PROMETHEUS_PORT + 1000))
ceph_adm config set mgr mgr/restful/$name/server_port $MGR_PORT --force
if [ $mgr -eq 1 ]; then
RESTFUL_URLS="https://$IP:$MGR_PORT"
else
RESTFUL_URLS+=", https://$IP:$MGR_PORT"
fi
MGR_PORT=$(($MGR_PORT + 1000))
fi
debug echo "Starting mgr.${name}"
run 'mgr' $name $CEPH_BIN/ceph-mgr -i $name $ARGS
done
while ! ceph_adm mgr stat | jq -e '.available'; do
debug echo 'waiting for mgr to become available'
sleep 1
done
if [ "$new" -eq 1 ]; then
# setting login credentials for dashboard
if $with_mgr_dashboard; then
while ! ceph_adm -h | grep -c -q ^dashboard ; do
debug echo 'waiting for mgr dashboard module to start'
sleep 1
done
DASHBOARD_ADMIN_SECRET_FILE="${CEPH_CONF_PATH}/dashboard-admin-secret.txt"
printf 'admin' > "${DASHBOARD_ADMIN_SECRET_FILE}"
ceph_adm dashboard ac-user-create admin -i "${DASHBOARD_ADMIN_SECRET_FILE}" \
administrator --force-password
if [ "$ssl" != "0" ]; then
if ! ceph_adm dashboard create-self-signed-cert; then
debug echo dashboard module not working correctly!
fi
fi
fi
if $with_mgr_restful; then
create_mgr_restful_secret
fi
fi
if [ "$cephadm" -eq 1 ]; then
debug echo Enabling cephadm orchestrator
if [ "$new" -eq 1 ]; then
digest=$(curl -s \
https://hub.docker.com/v2/repositories/ceph/daemon-base/tags/latest-master-devel \
| jq -r '.images[0].digest')
ceph_adm config set global container_image "docker.io/ceph/daemon-base@$digest"
fi
ceph_adm config-key set mgr/cephadm/ssh_identity_key -i ~/.ssh/id_rsa
ceph_adm config-key set mgr/cephadm/ssh_identity_pub -i ~/.ssh/id_rsa.pub
ceph_adm mgr module enable cephadm
ceph_adm orch set backend cephadm
ceph_adm orch host add "$(hostname)"
ceph_adm orch apply crash '*'
ceph_adm config set mgr mgr/cephadm/allow_ptrace true
fi
}
start_mds() {
local mds=0
for name in a b c d e f g h i j k l m n o p
do
[ $mds -eq $CEPH_NUM_MDS ] && break
mds=$(($mds + 1))
if [ "$new" -eq 1 ]; then
prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
key_fn=$CEPH_DEV_DIR/mds.$name/keyring
wconf <<EOF
[mds.$name]
host = $HOSTNAME
EOF
if [ "$standby" -eq 1 ]; then
mkdir -p $CEPH_DEV_DIR/mds.${name}s
wconf <<EOF
mds standby for rank = $mds
[mds.${name}s]
mds standby replay = true
mds standby for name = ${name}
EOF
fi
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow rw tag cephfs *=*' mds 'allow' mgr 'allow profile mds'
if [ "$standby" -eq 1 ]; then
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
"$CEPH_DEV_DIR/mds.${name}s/keyring"
ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
fi
fi
run 'mds' $name $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
if [ "$standby" -eq 1 ]; then
run 'mds' $name $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
fi
#valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
#$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
#ceph_adm mds set max_mds 2
done
if [ $new -eq 1 ]; then
if [ "$CEPH_NUM_FS" -gt "0" ] ; then
sleep 5 # time for MDS to come up as standby to avoid health warnings on fs creation
if [ "$CEPH_NUM_FS" -gt "1" ] ; then
ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
fi
# wait for volume module to load
while ! ceph_adm fs volume ls ; do sleep 1 ; done
local fs=0
for name in a b c d e f g h i j k l m n o p
do
ceph_adm fs volume create ${name}
ceph_adm fs authorize ${name} "client.fs_${name}" / rwp >> "$keyring_fn"
fs=$(($fs + 1))
[ $fs -eq $CEPH_NUM_FS ] && break
done
fi
fi
}
# Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace
# nfs-ganesha-rados-urls (version 3.3 and above) packages installed. On
# Fedora>=31 these packages can be installed directly with 'dnf'. For CentOS>=8
# the packages are available at
# https://wiki.centos.org/SpecialInterestGroup/Storage
# Similarly for Ubuntu>=16.04 follow the instructions on
# https://launchpad.net/~nfs-ganesha
start_ganesha() {
cluster_id="vstart"
GANESHA_PORT=$(($CEPH_PORT + 4000))
local ganesha=0
test_user="$cluster_id"
pool_name=".nfs"
namespace=$cluster_id
url="rados://$pool_name/$namespace/conf-nfs.$test_user"
prun ceph_adm auth get-or-create client.$test_user \
mon "allow r" \
osd "allow rw pool=$pool_name namespace=$namespace, allow rw tag cephfs data=a" \
mds "allow rw path=/" \
>> "$keyring_fn"
ceph_adm mgr module enable test_orchestrator
ceph_adm orch set backend test_orchestrator
ceph_adm test_orchestrator load_data -i $CEPH_ROOT/src/pybind/mgr/test_orchestrator/dummy_data.json
prun ceph_adm nfs cluster create $cluster_id
prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path "/cephfs"
for name in a b c d e f g h i j k l m n o p
do
[ $ganesha -eq $GANESHA_DAEMON_NUM ] && break
port=$(($GANESHA_PORT + ganesha))
ganesha=$(($ganesha + 1))
ganesha_dir="$CEPH_DEV_DIR/ganesha.$name"
prun rm -rf $ganesha_dir
prun mkdir -p $ganesha_dir
echo "NFS_CORE_PARAM {
Enable_NLM = false;
Enable_RQUOTA = false;
Protocols = 4;
NFS_Port = $port;
}
MDCACHE {
Dir_Chunk = 0;
}
NFSv4 {
RecoveryBackend = rados_cluster;
Minor_Versions = 1, 2;
}
RADOS_KV {
pool = '$pool_name';
namespace = $namespace;
UserId = $test_user;
nodeid = $name;
}
RADOS_URLS {
Userid = $test_user;
watch_url = '$url';
}
%url $url" > "$ganesha_dir/ganesha-$name.conf"
wconf <<EOF
[ganesha.$name]
host = $HOSTNAME
ip = $IP
port = $port
ganesha data = $ganesha_dir
pid file = $CEPH_OUT_DIR/ganesha-$name.pid
EOF
prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace add $name
prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
prun env CEPH_CONF="${conf_fn}" ganesha.nfsd -L "$CEPH_OUT_DIR/ganesha-$name.log" -f "$ganesha_dir/ganesha-$name.conf" -p "$CEPH_OUT_DIR/ganesha-$name.pid" -N NIV_DEBUG
# Wait few seconds for grace period to be removed
sleep 2
prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
echo "$test_user ganesha daemon $name started on port: $port"
done
}
if [ "$debug" -eq 0 ]; then
CMONDEBUG='
debug mon = 10
debug ms = 1'
CCLIENTDEBUG=''
CMDSDEBUG=''
else
debug echo "** going verbose **"
CMONDEBUG='
debug osd = 20
debug mon = 20
debug osd = 20
debug paxos = 20
debug auth = 20
debug mgrc = 20
debug ms = 1'
CCLIENTDEBUG='
debug client = 20'
CMDSDEBUG='
debug mds = 20'
fi
# Crimson doesn't support PG merge/split yet.
if [ "$ceph_osd" == "crimson-osd" ]; then
AUTOSCALER_OPTS='
osd_pool_default_pg_autoscale_mode = off'
fi
if [ -n "$MON_ADDR" ]; then
CMON_ARGS=" -m "$MON_ADDR
COSD_ARGS=" -m "$MON_ADDR
CMDS_ARGS=" -m "$MON_ADDR
fi
if [ -z "$CEPH_PORT" ]; then
while [ true ]
do
CEPH_PORT="$(echo $(( RANDOM % 1000 + 40000 )))"
ss -a -n | egrep "\<LISTEN\>.+:${CEPH_PORT}\s+" 1>/dev/null 2>&1 || break
done
fi
[ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
# sudo if btrfs
[ -d $CEPH_DEV_DIR/osd0/. ] && [ -e $CEPH_DEV_DIR/sudo ] && SUDO="sudo"
if [ $inc_osd_num -eq 0 ]; then
prun $SUDO rm -f core*
fi
[ -d $CEPH_ASOK_DIR ] || mkdir -p $CEPH_ASOK_DIR
[ -d $CEPH_OUT_DIR ] || mkdir -p $CEPH_OUT_DIR
[ -d $CEPH_DEV_DIR ] || mkdir -p $CEPH_DEV_DIR
[ -d $CEPH_OUT_CLIENT_DIR ] || mkdir -p $CEPH_OUT_CLIENT_DIR
if [ $inc_osd_num -eq 0 ]; then
$SUDO find "$CEPH_OUT_DIR" -type f -delete
fi
[ -d gmon ] && $SUDO rm -rf gmon/*
[ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && [ -e $keyring_fn ] && rm $keyring_fn
# figure machine's ip
HOSTNAME=`hostname -s`
if [ -n "$ip" ]; then
IP="$ip"
else
echo hostname $HOSTNAME
if [ -x "$(which ip 2>/dev/null)" ]; then
IP_CMD="ip addr"
else
IP_CMD="ifconfig"
fi
# filter out IPv4 and localhost addresses
IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
# if nothing left, try using localhost address, it might work
if [ -z "$IP" ]; then IP="127.0.0.1"; fi
fi
echo "ip $IP"
echo "port $CEPH_PORT"
[ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
ceph_adm() {
if [ "$cephx" -eq 1 ]; then
prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
else
prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
fi
}
if [ $inc_osd_num -gt 0 ]; then
start_osd
exit
fi
if [ "$new" -eq 1 ]; then
prepare_conf
fi
if [ $CEPH_NUM_MON -gt 0 ]; then
start_mon
debug echo Populating config ...
cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
[global]
osd_pool_default_size = $OSD_POOL_DEFAULT_SIZE
osd_pool_default_min_size = 1
[mon]
mon_osd_reporter_subtree_level = osd
mon_data_avail_warn = 2
mon_data_avail_crit = 1
mon_allow_pool_delete = true
mon_allow_pool_size_one = true
[osd]
osd_scrub_load_threshold = 2000
osd_debug_op_order = true
osd_debug_misdirected_ops = true
osd_copyfrom_max_chunk = 524288
[mds]
mds_debug_frag = true
mds_debug_auth_pins = true
mds_debug_subtrees = true
[mgr]
mgr/telemetry/nag = false
mgr/telemetry/enable = false
EOF
if [ "$debug" -ne 0 ]; then
debug echo Setting debug configs ...
cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
[mgr]
debug_ms = 1
debug_mgr = 20
debug_monc = 20
debug_mon = 20
[osd]
debug_ms = 1
debug_osd = 25
debug_objecter = 20
debug_monc = 20
debug_mgrc = 20
debug_journal = 20
debug_bluestore = 20
debug_bluefs = 20
debug_rocksdb = 20
debug_bdev = 20
debug_reserver = 10
debug_objclass = 20
[mds]
debug_ms = 1
debug_mds = 20
debug_monc = 20
debug_mgrc = 20
mds_debug_scatterstat = true
mds_verify_scatter = true
EOF
fi
if [ "$cephadm" -gt 0 ]; then
debug echo Setting mon public_network ...
public_network=$(ip route list | grep -w "$IP" | awk '{print $1}')
ceph_adm config set mon public_network $public_network
fi
fi
if [ "$ceph_osd" == "crimson-osd" ]; then
$CEPH_BIN/ceph -c $conf_fn config set osd crimson_seastar_smp $crimson_smp
fi
if [ $CEPH_NUM_MGR -gt 0 ]; then
start_mgr
fi
# osd
if [ $CEPH_NUM_OSD -gt 0 ]; then
start_osd
fi
# mds
if [ "$smallmds" -eq 1 ]; then
wconf <<EOF
[mds]
mds log max segments = 2
# Default 'mds cache memory limit' is 1GiB, and here we set it to 100MiB.
mds cache memory limit = 100M
EOF
fi
if [ $CEPH_NUM_MDS -gt 0 ]; then
start_mds
# key with access to all FS
ceph_adm fs authorize \* "client.fs" / rwp >> "$keyring_fn"
fi
# Don't set max_mds until all the daemons are started, otherwise
# the intended standbys might end up in active roles.
if [ "$CEPH_MAX_MDS" -gt 1 ]; then
sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
fi
fs=0
for name in a b c d e f g h i j k l m n o p
do
[ $fs -eq $CEPH_NUM_FS ] && break
fs=$(($fs + 1))
if [ "$CEPH_MAX_MDS" -gt 1 ]; then
ceph_adm fs set "${name}" max_mds "$CEPH_MAX_MDS"
fi
done
# mgr
if [ "$ec" -eq 1 ]; then
ceph_adm <<EOF
osd erasure-code-profile set ec-profile m=2 k=2
osd pool create ec erasure ec-profile
EOF
fi
do_cache() {
while [ -n "$*" ]; do
p="$1"
shift
debug echo "creating cache for pool $p ..."
ceph_adm <<EOF
osd pool create ${p}-cache
osd tier add $p ${p}-cache
osd tier cache-mode ${p}-cache writeback
osd tier set-overlay $p ${p}-cache
EOF
done
}
do_cache $cache
do_hitsets() {
while [ -n "$*" ]; do
pool="$1"
type="$2"
shift
shift
debug echo "setting hit_set on pool $pool type $type ..."
ceph_adm <<EOF
osd pool set $pool hit_set_type $type
osd pool set $pool hit_set_count 8
osd pool set $pool hit_set_period 30
EOF
done
}
do_hitsets $hitset
do_rgw_create_bucket()
{
# Create RGW Bucket
local rgw_python_file='rgw-create-bucket.py'
echo "import boto
import boto.s3.connection
conn = boto.connect_s3(
aws_access_key_id = '$s3_akey',
aws_secret_access_key = '$s3_skey',
host = '$HOSTNAME',
port = 80,
is_secure=False,
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
bucket = conn.create_bucket('nfs-bucket')
print('created new bucket')" > "$CEPH_OUT_DIR/$rgw_python_file"
prun python $CEPH_OUT_DIR/$rgw_python_file
}
do_rgw_create_users()
{
# Create S3 user
s3_akey='0555b35654ad1656d804'
s3_skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
debug echo "setting up user testid"
$CEPH_BIN/radosgw-admin user create --uid testid --access-key $s3_akey --secret $s3_skey --display-name 'M. Tester' --email [email protected] -c $conf_fn > /dev/null
# Create S3-test users
# See: https://github.com/ceph/s3-tests
debug echo "setting up s3-test users"
$CEPH_BIN/radosgw-admin user create \
--uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
--access-key ABCDEFGHIJKLMNOPQRST \
--secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
--display-name youruseridhere \
--email [email protected] --caps="user-policy=*" -c $conf_fn > /dev/null
$CEPH_BIN/radosgw-admin user create \
--uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
--access-key NOPQRSTUVWXYZABCDEFG \
--secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
--display-name john.doe \
--email [email protected] -c $conf_fn > /dev/null
$CEPH_BIN/radosgw-admin user create \
--tenant testx \
--uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
--access-key HIJKLMNOPQRSTUVWXYZA \
--secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
--display-name tenanteduser \
--email [email protected] -c $conf_fn > /dev/null
# Create Swift user
debug echo "setting up user tester"
$CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
echo ""
echo "S3 User Info:"
echo " access key: $s3_akey"
echo " secret key: $s3_skey"
echo ""
echo "Swift User Info:"
echo " account : test"
echo " user : tester"
echo " password : testing"
echo ""
}
do_rgw()
{
if [ "$new" -eq 1 ]; then
do_rgw_create_users
if [ -n "$rgw_compression" ]; then
debug echo "setting compression type=$rgw_compression"
$CEPH_BIN/radosgw-admin zone placement modify -c $conf_fn --rgw-zone=default --placement-id=default-placement --compression=$rgw_compression > /dev/null
fi
fi
if [ -n "$rgw_flight_frontend" ] ;then
debug echo "starting arrow_flight frontend on first rgw"
fi
# Start server
if [ "$cephadm" -gt 0 ]; then
ceph_adm orch apply rgw rgwTest
return
fi
RGWDEBUG=""
if [ "$debug" -ne 0 ]; then
RGWDEBUG="--debug-rgw=20 --debug-ms=1"
fi
local CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT}"
local CEPH_RGW_HTTPS="${CEPH_RGW_PORT: -1}"
if [[ "${CEPH_RGW_HTTPS}" = "s" ]]; then
CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT::-1}"
else
CEPH_RGW_HTTPS=""
fi
RGWSUDO=
[ $CEPH_RGW_PORT_NUM -lt 1024 ] && RGWSUDO=sudo
current_port=$CEPH_RGW_PORT
# allow only first rgw to start arrow_flight server/port
local flight_conf=$rgw_flight_frontend
for n in $(seq 1 $CEPH_NUM_RGW); do
rgw_name="client.rgw.${current_port}"
ceph_adm auth get-or-create $rgw_name \
mon 'allow rw' \
osd 'allow rwx' \
mgr 'allow rw' \
>> "$keyring_fn"
debug echo start rgw on http${CEPH_RGW_HTTPS}://localhost:${current_port}
run 'rgw' $current_port $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn \
--log-file=${CEPH_OUT_DIR}/radosgw.${current_port}.log \
--admin-socket=${CEPH_OUT_DIR}/radosgw.${current_port}.asok \
--pid-file=${CEPH_OUT_DIR}/radosgw.${current_port}.pid \
--rgw_luarocks_location=${CEPH_OUT_DIR}/luarocks \
${RGWDEBUG} \
-n ${rgw_name} \
"--rgw_frontends=${rgw_frontend} port=${current_port}${CEPH_RGW_HTTPS}${flight_conf:+,arrow_flight}"
i=$(($i + 1))
[ $i -eq $CEPH_NUM_RGW ] && break
current_port=$((current_port+1))
unset flight_conf
done
}
if [ "$CEPH_NUM_RGW" -gt 0 ]; then
do_rgw
fi
# Ganesha Daemons
if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
pseudo_path="/cephfs"
if [ "$cephadm" -gt 0 ]; then
cluster_id="vstart"
port="2049"
prun ceph_adm nfs cluster create $cluster_id
if [ $CEPH_NUM_MDS -gt 0 ]; then
prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path $pseudo_path
echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
fi
if [ "$CEPH_NUM_RGW" -gt 0 ]; then
pseudo_path="/rgw"
do_rgw_create_bucket
prun ceph_adm nfs export create rgw --cluster-id $cluster_id --pseudo-path $pseudo_path --bucket "nfs-bucket"
echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
fi
else
start_ganesha
echo "Mount using: mount -t nfs -o port=<ganesha-port-num> $IP:$pseudo_path mountpoint"
fi
fi
docker_service(){
local service=''
#prefer podman
if command -v podman > /dev/null; then
service="podman"
elif pgrep -f docker > /dev/null; then
service="docker"
fi
if [ -n "$service" ]; then
echo "using $service for deploying jaeger..."
#check for exited container, remove them and restart container
if [ "$($service ps -aq -f status=exited -f name=jaeger)" ]; then
$service rm jaeger
fi
if [ ! "$(podman ps -aq -f name=jaeger)" ]; then
$service "$@"
fi
else
echo "cannot find docker or podman, please restart service and rerun."
fi
}
echo ""
if [ $with_jaeger -eq 1 ]; then
debug echo "Enabling jaegertracing..."
docker_service run -d --name jaeger \
-p 5775:5775/udp \
-p 6831:6831/udp \
-p 6832:6832/udp \
-p 5778:5778 \
-p 16686:16686 \
-p 14268:14268 \
-p 14250:14250 \
quay.io/jaegertracing/all-in-one
fi
debug echo "vstart cluster complete. Use stop.sh to stop. See out/* (e.g. 'tail -f out/????') for debug output."
echo ""
if [ "$new" -eq 1 ]; then
if $with_mgr_dashboard; then
cat <<EOF
dashboard urls: $DASH_URLS
w/ user/pass: admin / admin
EOF
fi
if $with_mgr_restful; then
cat <<EOF
restful urls: $RESTFUL_URLS
w/ user/pass: admin / $RESTFUL_SECRET
EOF
fi
fi
echo ""
# add header to the environment file
{
echo "#"
echo "# source this file into your shell to set up the environment."
echo "# For example:"
echo "# $ . $CEPH_DIR/vstart_environment.sh"
echo "#"
} > $CEPH_DIR/vstart_environment.sh
{
echo "export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON\$PYTHONPATH"
echo "export LD_LIBRARY_PATH=$CEPH_LIB:\$LD_LIBRARY_PATH"
echo "export PATH=$CEPH_DIR/bin:\$PATH"
if [ "$CEPH_DIR" != "$PWD" ]; then
echo "export CEPH_CONF=$conf_fn"
echo "export CEPH_KEYRING=$keyring_fn"
fi
if [ -n "$CEPHFS_SHELL" ]; then
echo "alias cephfs-shell=$CEPHFS_SHELL"
fi
} | tee -a $CEPH_DIR/vstart_environment.sh
echo "CEPH_DEV=1"
# always keep this section at the very bottom of this file
STRAY_CONF_PATH="/etc/ceph/ceph.conf"
if [ -f "$STRAY_CONF_PATH" -a -n "$conf_fn" -a ! "$conf_fn" -ef "$STRAY_CONF_PATH" ]; then
echo ""
echo ""
echo "WARNING:"
echo " Please remove stray $STRAY_CONF_PATH if not needed."
echo " Your conf files $conf_fn and $STRAY_CONF_PATH may not be in sync"
echo " and may lead to undesired results."
echo ""
echo "NOTE:"
echo " Remember to restart cluster after removing $STRAY_CONF_PATH"
fi
init_logrotate
| 55,884 | 28.121939 | 176 |
sh
|
null |
ceph-main/src/arch/arm.c
|
#include "acconfig.h"
#include "arch/probe.h"
/* flags we export */
int ceph_arch_neon = 0;
int ceph_arch_aarch64_crc32 = 0;
int ceph_arch_aarch64_pmull = 0;
#include <stdio.h>
#if __linux__
#include <elf.h>
#include <link.h> // ElfW macro
#include <sys/auxv.h>
#if __arm__ || __aarch64__
#include <asm/hwcap.h>
#endif // __arm__
#endif // __linux__
int ceph_arch_arm_probe(void)
{
#if __linux__
unsigned long hwcap = getauxval(AT_HWCAP);
#if __arm__
ceph_arch_neon = (hwcap & HWCAP_NEON) == HWCAP_NEON;
#elif __aarch64__
ceph_arch_neon = (hwcap & HWCAP_ASIMD) == HWCAP_ASIMD;
ceph_arch_aarch64_crc32 = (hwcap & HWCAP_CRC32) == HWCAP_CRC32;
ceph_arch_aarch64_pmull = (hwcap & HWCAP_PMULL) == HWCAP_PMULL;
#endif
#endif // __linux__
return 0;
}
| 758 | 18.973684 | 64 |
c
|
null |
ceph-main/src/arch/arm.h
|
#ifndef CEPH_ARCH_ARM_H
#define CEPH_ARCH_ARM_H
#ifdef __cplusplus
extern "C" {
#endif
extern int ceph_arch_neon; /* true if we have ARM NEON or ASIMD abilities */
extern int ceph_arch_aarch64_crc32; /* true if we have AArch64 CRC32/CRC32C abilities */
extern int ceph_arch_aarch64_pmull; /* true if we have AArch64 PMULL abilities */
extern int ceph_arch_arm_probe(void);
#ifdef __cplusplus
}
#endif
#endif
| 416 | 20.947368 | 89 |
h
|
null |
ceph-main/src/arch/intel.c
|
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013,2014 Inktank Storage, Inc.
* Copyright (C) 2014 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <stdio.h>
#include "arch/probe.h"
/* flags we export */
int ceph_arch_intel_pclmul = 0;
int ceph_arch_intel_sse42 = 0;
int ceph_arch_intel_sse41 = 0;
int ceph_arch_intel_ssse3 = 0;
int ceph_arch_intel_sse3 = 0;
int ceph_arch_intel_sse2 = 0;
int ceph_arch_intel_aesni = 0;
#ifdef __x86_64__
#include <cpuid.h>
/* http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits */
#define CPUID_PCLMUL (1 << 1)
#define CPUID_SSE42 (1 << 20)
#define CPUID_SSE41 (1 << 19)
#define CPUID_SSSE3 (1 << 9)
#define CPUID_SSE3 (1)
#define CPUID_SSE2 (1 << 26)
#define CPUID_AESNI (1 << 25)
int ceph_arch_intel_probe(void)
{
/* i know how to check this on x86_64... */
unsigned int eax, ebx, ecx = 0, edx = 0;
if (!__get_cpuid(1, &eax, &ebx, &ecx, &edx)) {
return 1;
}
if ((ecx & CPUID_PCLMUL) != 0) {
ceph_arch_intel_pclmul = 1;
}
if ((ecx & CPUID_SSE42) != 0) {
ceph_arch_intel_sse42 = 1;
}
if ((ecx & CPUID_SSE41) != 0) {
ceph_arch_intel_sse41 = 1;
}
if ((ecx & CPUID_SSSE3) != 0) {
ceph_arch_intel_ssse3 = 1;
}
if ((ecx & CPUID_SSE3) != 0) {
ceph_arch_intel_sse3 = 1;
}
if ((edx & CPUID_SSE2) != 0) {
ceph_arch_intel_sse2 = 1;
}
if ((ecx & CPUID_AESNI) != 0) {
ceph_arch_intel_aesni = 1;
}
return 0;
}
#else // __x86_64__
int ceph_arch_intel_probe(void)
{
/* no features */
return 0;
}
#endif // __x86_64__
| 1,883 | 22.259259 | 81 |
c
|
null |
ceph-main/src/arch/intel.h
|
#ifndef CEPH_ARCH_INTEL_H
#define CEPH_ARCH_INTEL_H
#ifdef __cplusplus
extern "C" {
#endif
extern int ceph_arch_intel_pclmul; /* true if we have PCLMUL features */
extern int ceph_arch_intel_sse42; /* true if we have sse 4.2 features */
extern int ceph_arch_intel_sse41; /* true if we have sse 4.1 features */
extern int ceph_arch_intel_ssse3; /* true if we have ssse 3 features */
extern int ceph_arch_intel_sse3; /* true if we have sse 3 features */
extern int ceph_arch_intel_sse2; /* true if we have sse 2 features */
extern int ceph_arch_intel_aesni; /* true if we have aesni features */
extern int ceph_arch_intel_probe(void);
#ifdef __cplusplus
}
#endif
#endif
| 681 | 28.652174 | 73 |
h
|
null |
ceph-main/src/arch/ppc.c
|
/* Copyright (C) 2017 International Business Machines Corp.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include "arch/ppc.h"
#include "arch/probe.h"
/* flags we export */
int ceph_arch_ppc_crc32 = 0;
#include <stdio.h>
#ifdef HAVE_PPC64LE
#include <sys/auxv.h>
#include <asm/cputable.h>
#endif /* HAVE_PPC64LE */
#ifndef PPC_FEATURE2_VEC_CRYPTO
#define PPC_FEATURE2_VEC_CRYPTO 0x02000000
#endif
#ifndef AT_HWCAP2
#define AT_HWCAP2 26
#endif
int ceph_arch_ppc_probe(void)
{
ceph_arch_ppc_crc32 = 0;
#ifdef HAVE_PPC64LE
if (getauxval(AT_HWCAP2) & PPC_FEATURE2_VEC_CRYPTO) ceph_arch_ppc_crc32 = 1;
#endif /* HAVE_PPC64LE */
return 0;
}
| 889 | 20.707317 | 78 |
c
|
null |
ceph-main/src/arch/ppc.h
|
/* Copyright (C) 2017 International Business Machines Corp.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef CEPH_ARCH_PPC_H
#define CEPH_ARCH_PPC_H
#ifdef __cplusplus
extern "C" {
#endif
extern int ceph_arch_ppc_crc32;
extern int ceph_arch_ppc_probe(void);
#ifdef __cplusplus
}
#endif
#endif
| 540 | 20.64 | 64 |
h
|
null |
ceph-main/src/arch/probe.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "arch/probe.h"
#include "arch/intel.h"
#include "arch/arm.h"
#include "arch/ppc.h"
int ceph_arch_probe(void)
{
if (ceph_arch_probed)
return 1;
#if defined(__i386__) || defined(__x86_64__)
ceph_arch_intel_probe();
#elif defined(__arm__) || defined(__aarch64__)
ceph_arch_arm_probe();
#elif defined(__powerpc__) || defined(__ppc__)
ceph_arch_ppc_probe();
#endif
ceph_arch_probed = 1;
return 1;
}
// do this once using the magic of c++.
int ceph_arch_probed = ceph_arch_probe();
| 603 | 21.37037 | 70 |
cc
|
null |
ceph-main/src/arch/probe.h
|
#ifndef CEPH_ARCH_PROBE_H
#define CEPH_ARCH_PROBE_H
#ifdef __cplusplus
extern "C" {
#endif
extern int ceph_arch_probed; /* non-zero if we've probed features */
extern int ceph_arch_probe(void);
#ifdef __cplusplus
}
#endif
#endif
| 235 | 12.882353 | 69 |
h
|
null |
ceph-main/src/auth/Auth.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_AUTHTYPES_H
#define CEPH_AUTHTYPES_H
#include "Crypto.h"
#include "common/entity_name.h"
// The _MAX values are a bit wonky here because we are overloading the first
// byte of the auth payload to identify both the type of authentication to be
// used *and* the encoding version for the authenticator. So, we define a
// range.
enum {
AUTH_MODE_NONE = 0,
AUTH_MODE_AUTHORIZER = 1,
AUTH_MODE_AUTHORIZER_MAX = 9,
AUTH_MODE_MON = 10,
AUTH_MODE_MON_MAX = 19,
};
struct EntityAuth {
CryptoKey key;
std::map<std::string, ceph::buffer::list> caps;
CryptoKey pending_key; ///< new but uncommitted key
void encode(ceph::buffer::list& bl) const {
__u8 struct_v = 3;
using ceph::encode;
encode(struct_v, bl);
encode((uint64_t)CEPH_AUTH_UID_DEFAULT, bl);
encode(key, bl);
encode(caps, bl);
encode(pending_key, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
if (struct_v >= 2) {
uint64_t old_auid;
decode(old_auid, bl);
}
decode(key, bl);
decode(caps, bl);
if (struct_v >= 3) {
decode(pending_key, bl);
}
}
};
WRITE_CLASS_ENCODER(EntityAuth)
inline std::ostream& operator<<(std::ostream& out, const EntityAuth& a)
{
out << "auth(key=" << a.key;
if (!a.pending_key.empty()) {
out << " pending_key=" << a.pending_key;
}
out << ")";
return out;
}
struct AuthCapsInfo {
bool allow_all;
ceph::buffer::list caps;
AuthCapsInfo() : allow_all(false) {}
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 1;
encode(struct_v, bl);
__u8 a = (__u8)allow_all;
encode(a, bl);
encode(caps, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
__u8 a;
decode(a, bl);
allow_all = (bool)a;
decode(caps, bl);
}
};
WRITE_CLASS_ENCODER(AuthCapsInfo)
/*
* The ticket (if properly validated) authorizes the principal use
* services as described by 'caps' during the specified validity
* period.
*/
struct AuthTicket {
EntityName name;
uint64_t global_id; /* global instance id */
utime_t created, renew_after, expires;
AuthCapsInfo caps;
__u32 flags;
AuthTicket() : global_id(0), flags(0){}
void init_timestamps(utime_t now, double ttl) {
created = now;
expires = now;
expires += ttl;
renew_after = now;
renew_after += ttl / 2.0;
}
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 2;
encode(struct_v, bl);
encode(name, bl);
encode(global_id, bl);
encode((uint64_t)CEPH_AUTH_UID_DEFAULT, bl);
encode(created, bl);
encode(expires, bl);
encode(caps, bl);
encode(flags, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(name, bl);
decode(global_id, bl);
if (struct_v >= 2) {
uint64_t old_auid;
decode(old_auid, bl);
}
decode(created, bl);
decode(expires, bl);
decode(caps, bl);
decode(flags, bl);
}
};
WRITE_CLASS_ENCODER(AuthTicket)
/*
* abstract authorizer class
*/
struct AuthAuthorizer {
__u32 protocol;
ceph::buffer::list bl;
CryptoKey session_key;
explicit AuthAuthorizer(__u32 p) : protocol(p) {}
virtual ~AuthAuthorizer() {}
virtual bool verify_reply(ceph::buffer::list::const_iterator& reply,
std::string *connection_secret) = 0;
virtual bool add_challenge(CephContext *cct,
const ceph::buffer::list& challenge) = 0;
};
struct AuthAuthorizerChallenge {
virtual ~AuthAuthorizerChallenge() {}
};
struct AuthConnectionMeta {
uint32_t auth_method = CEPH_AUTH_UNKNOWN; //< CEPH_AUTH_*
/// client: initial empty, but populated if server said bad method
std::vector<uint32_t> allowed_methods;
int auth_mode = AUTH_MODE_NONE; ///< AUTH_MODE_*
int con_mode = 0; ///< negotiated mode
bool is_mode_crc() const {
return con_mode == CEPH_CON_MODE_CRC;
}
bool is_mode_secure() const {
return con_mode == CEPH_CON_MODE_SECURE;
}
CryptoKey session_key; ///< per-ticket key
size_t get_connection_secret_length() const {
switch (con_mode) {
case CEPH_CON_MODE_CRC:
return 0;
case CEPH_CON_MODE_SECURE:
return 16 * 4;
}
return 0;
}
std::string connection_secret; ///< per-connection key
std::unique_ptr<AuthAuthorizer> authorizer;
std::unique_ptr<AuthAuthorizerChallenge> authorizer_challenge;
///< set if msgr1 peer doesn't support CEPHX_V2
bool skip_authorizer_challenge = false;
};
/*
* Key management
*/
#define KEY_ROTATE_NUM 3 /* prev, current, next */
struct ExpiringCryptoKey {
CryptoKey key;
utime_t expiration;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 1;
encode(struct_v, bl);
encode(key, bl);
encode(expiration, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(key, bl);
decode(expiration, bl);
}
};
WRITE_CLASS_ENCODER(ExpiringCryptoKey)
inline std::ostream& operator<<(std::ostream& out, const ExpiringCryptoKey& c)
{
return out << c.key << " expires " << c.expiration;
}
struct RotatingSecrets {
std::map<uint64_t, ExpiringCryptoKey> secrets;
version_t max_ver;
RotatingSecrets() : max_ver(0) {}
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 1;
encode(struct_v, bl);
encode(secrets, bl);
encode(max_ver, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(secrets, bl);
decode(max_ver, bl);
}
uint64_t add(ExpiringCryptoKey& key) {
secrets[++max_ver] = key;
while (secrets.size() > KEY_ROTATE_NUM)
secrets.erase(secrets.begin());
return max_ver;
}
bool need_new_secrets() const {
return secrets.size() < KEY_ROTATE_NUM;
}
bool need_new_secrets(const utime_t& now) const {
return secrets.size() < KEY_ROTATE_NUM || current().expiration <= now;
}
ExpiringCryptoKey& previous() {
return secrets.begin()->second;
}
ExpiringCryptoKey& current() {
auto p = secrets.begin();
++p;
return p->second;
}
const ExpiringCryptoKey& current() const {
auto p = secrets.begin();
++p;
return p->second;
}
ExpiringCryptoKey& next() {
return secrets.rbegin()->second;
}
bool empty() {
return secrets.empty();
}
void dump();
};
WRITE_CLASS_ENCODER(RotatingSecrets)
class KeyStore {
public:
virtual ~KeyStore() {}
virtual bool get_secret(const EntityName& name, CryptoKey& secret) const = 0;
virtual bool get_service_secret(uint32_t service_id, uint64_t secret_id,
CryptoKey& secret) const = 0;
};
inline bool auth_principal_needs_rotating_keys(EntityName& name)
{
uint32_t ty(name.get_type());
return ((ty == CEPH_ENTITY_TYPE_OSD)
|| (ty == CEPH_ENTITY_TYPE_MDS)
|| (ty == CEPH_ENTITY_TYPE_MGR));
}
#endif
| 7,619 | 22.8125 | 79 |
h
|
null |
ceph-main/src/auth/AuthAuthorizeHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_AUTHAUTHORIZEHANDLER_H
#define CEPH_AUTHAUTHORIZEHANDLER_H
#include "Auth.h"
#include "include/common_fwd.h"
#include "include/types.h"
#include "common/ceph_mutex.h"
// Different classes of session crypto handling
#define SESSION_CRYPTO_NONE 0
#define SESSION_SYMMETRIC_AUTHENTICATE 1
#define SESSION_SYMMETRIC_ENCRYPT 2
class KeyRing;
struct AuthAuthorizeHandler {
virtual ~AuthAuthorizeHandler() {}
virtual bool verify_authorizer(
CephContext *cct,
const KeyStore& keys,
const ceph::buffer::list& authorizer_data,
size_t connection_secret_required_len,
ceph::buffer::list *authorizer_reply,
EntityName *entity_name,
uint64_t *global_id,
AuthCapsInfo *caps_info,
CryptoKey *session_key,
std::string *connection_secret,
std::unique_ptr<AuthAuthorizerChallenge> *challenge) = 0;
virtual int authorizer_session_crypto() = 0;
};
#endif
| 1,331 | 26.75 | 71 |
h
|
null |
ceph-main/src/auth/AuthClient.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstdint>
#include <vector>
#include "include/buffer_fwd.h"
class AuthConnectionMeta;
class Connection;
class CryptoKey;
class AuthClient {
public:
virtual ~AuthClient() {}
/// Build an authentication request to begin the handshake
virtual int get_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t *method,
std::vector<uint32_t> *preferred_modes,
ceph::buffer::list *out) = 0;
/// Handle server's request to continue the handshake
virtual int handle_auth_reply_more(
Connection *con,
AuthConnectionMeta *auth_meta,
const ceph::buffer::list& bl,
ceph::buffer::list *reply) = 0;
/// Handle server's indication that authentication succeeded
virtual int handle_auth_done(
Connection *con,
AuthConnectionMeta *auth_meta,
uint64_t global_id,
uint32_t con_mode,
const ceph::buffer::list& bl,
CryptoKey *session_key,
std::string *connection_secret) = 0;
/// Handle server's indication that the previous auth attempt failed
virtual int handle_auth_bad_method(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes) = 0;
};
| 1,390 | 25.75 | 70 |
h
|
null |
ceph-main/src/auth/AuthClientHandler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include "AuthClientHandler.h"
#include "cephx/CephxClientHandler.h"
#ifdef HAVE_GSSAPI
#include "krb/KrbClientHandler.hpp"
#endif
#include "none/AuthNoneClientHandler.h"
AuthClientHandler*
AuthClientHandler::create(CephContext* cct, int proto,
RotatingKeyRing* rkeys)
{
switch (proto) {
case CEPH_AUTH_CEPHX:
return new CephxClientHandler(cct, rkeys);
case CEPH_AUTH_NONE:
return new AuthNoneClientHandler{cct};
#ifdef HAVE_GSSAPI
case CEPH_AUTH_GSS:
return new KrbClientHandler(cct);
#endif
default:
return NULL;
}
}
| 1,005 | 22.395349 | 71 |
cc
|
null |
ceph-main/src/auth/AuthClientHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_AUTHCLIENTHANDLER_H
#define CEPH_AUTHCLIENTHANDLER_H
#include "auth/Auth.h"
#include "include/common_fwd.h"
class RotatingKeyRing;
class AuthClientHandler {
protected:
CephContext *cct;
EntityName name;
uint64_t global_id;
uint32_t want;
uint32_t have;
uint32_t need;
public:
explicit AuthClientHandler(CephContext *cct_)
: cct(cct_), global_id(0), want(CEPH_ENTITY_TYPE_AUTH), have(0), need(0)
{}
virtual ~AuthClientHandler() {}
virtual AuthClientHandler* clone() const = 0;
void init(const EntityName& n) { name = n; }
void set_want_keys(__u32 keys) {
want = keys | CEPH_ENTITY_TYPE_AUTH;
validate_tickets();
}
virtual int get_protocol() const = 0;
virtual void reset() = 0;
virtual void prepare_build_request() = 0;
virtual void build_initial_request(ceph::buffer::list *bl) const {
// this is empty for methods cephx and none.
}
virtual int build_request(ceph::buffer::list& bl) const = 0;
virtual int handle_response(int ret, ceph::buffer::list::const_iterator& iter,
CryptoKey *session_key,
std::string *connection_secret) = 0;
virtual bool build_rotating_request(ceph::buffer::list& bl) const = 0;
virtual AuthAuthorizer *build_authorizer(uint32_t service_id) const = 0;
virtual bool need_tickets() = 0;
virtual void set_global_id(uint64_t id) = 0;
static AuthClientHandler* create(CephContext* cct, int proto, RotatingKeyRing* rkeys);
protected:
virtual void validate_tickets() = 0;
};
#endif
| 1,947 | 25.324324 | 88 |
h
|
null |
ceph-main/src/auth/AuthMethodList.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <algorithm>
#include "common/debug.h"
#include "include/str_list.h"
#include "AuthMethodList.h"
const static int dout_subsys = ceph_subsys_auth;
AuthMethodList::AuthMethodList(CephContext *cct, std::string str)
{
std::list<std::string> sup_list;
get_str_list(str, sup_list);
if (sup_list.empty()) {
lderr(cct) << "WARNING: empty auth protocol list" << dendl;
}
for (auto iter = sup_list.begin(); iter != sup_list.end(); ++iter) {
ldout(cct, 5) << "adding auth protocol: " << *iter << dendl;
if (iter->compare("cephx") == 0) {
auth_supported.push_back(CEPH_AUTH_CEPHX);
} else if (iter->compare("none") == 0) {
auth_supported.push_back(CEPH_AUTH_NONE);
} else if (iter->compare("gss") == 0) {
auth_supported.push_back(CEPH_AUTH_GSS);
} else {
auth_supported.push_back(CEPH_AUTH_UNKNOWN);
lderr(cct) << "WARNING: unknown auth protocol defined: " << *iter << dendl;
}
}
if (auth_supported.empty()) {
lderr(cct) << "WARNING: no auth protocol defined, use 'cephx' by default" << dendl;
auth_supported.push_back(CEPH_AUTH_CEPHX);
}
}
bool AuthMethodList::is_supported_auth(int auth_type)
{
return std::find(auth_supported.begin(), auth_supported.end(), auth_type) != auth_supported.end();
}
int AuthMethodList::pick(const std::set<__u32>& supported)
{
for (auto p = supported.rbegin(); p != supported.rend(); ++p)
if (is_supported_auth(*p))
return *p;
return CEPH_AUTH_UNKNOWN;
}
void AuthMethodList::remove_supported_auth(int auth_type)
{
for (auto p = auth_supported.begin(); p != auth_supported.end(); ) {
if (*p == (__u32)auth_type)
auth_supported.erase(p++);
else
++p;
}
}
| 2,140 | 28.736111 | 100 |
cc
|
null |
ceph-main/src/auth/AuthMethodList.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_AUTHMETHODLIST_H
#define CEPH_AUTHMETHODLIST_H
#include "include/common_fwd.h"
#include "include/int_types.h"
#include <list>
#include <set>
#include <string>
class AuthMethodList {
std::list<__u32> auth_supported;
public:
AuthMethodList(CephContext *cct, std::string str);
bool is_supported_auth(int auth_type);
int pick(const std::set<__u32>& supported);
const std::list<__u32>& get_supported_set() const {
return auth_supported;
}
void remove_supported_auth(int auth_type);
};
#endif
| 955 | 21.761905 | 71 |
h
|
null |
ceph-main/src/auth/AuthRegistry.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "AuthRegistry.h"
#include "cephx/CephxAuthorizeHandler.h"
#ifdef HAVE_GSSAPI
#include "krb/KrbAuthorizeHandler.hpp"
#endif
#include "none/AuthNoneAuthorizeHandler.h"
#include "common/ceph_context.h"
#include "common/debug.h"
#include "auth/KeyRing.h"
#define dout_subsys ceph_subsys_auth
#undef dout_prefix
#define dout_prefix *_dout << "AuthRegistry(" << this << ") "
using std::string;
AuthRegistry::AuthRegistry(CephContext *cct)
: cct(cct)
{
cct->_conf.add_observer(this);
}
AuthRegistry::~AuthRegistry()
{
cct->_conf.remove_observer(this);
for (auto i : authorize_handlers) {
delete i.second;
}
}
const char** AuthRegistry::get_tracked_conf_keys() const
{
static const char *keys[] = {
"auth_supported",
"auth_client_required",
"auth_cluster_required",
"auth_service_required",
"ms_mon_cluster_mode",
"ms_mon_service_mode",
"ms_mon_client_mode",
"ms_cluster_mode",
"ms_service_mode",
"ms_client_mode",
"keyring",
NULL
};
return keys;
}
void AuthRegistry::handle_conf_change(
const ConfigProxy& conf,
const std::set<std::string>& changed)
{
std::scoped_lock l(lock);
_refresh_config();
}
void AuthRegistry::_parse_method_list(const string& s,
std::vector<uint32_t> *v)
{
std::list<std::string> sup_list;
get_str_list(s, sup_list);
if (sup_list.empty()) {
lderr(cct) << "WARNING: empty auth protocol list" << dendl;
}
v->clear();
for (auto& i : sup_list) {
ldout(cct, 5) << "adding auth protocol: " << i << dendl;
if (i == "cephx") {
v->push_back(CEPH_AUTH_CEPHX);
} else if (i == "none") {
v->push_back(CEPH_AUTH_NONE);
} else if (i == "gss") {
v->push_back(CEPH_AUTH_GSS);
} else {
lderr(cct) << "WARNING: unknown auth protocol defined: " << i << dendl;
}
}
if (v->empty()) {
lderr(cct) << "WARNING: no auth protocol defined" << dendl;
}
ldout(cct,20) << __func__ << " " << s << " -> " << *v << dendl;
}
void AuthRegistry::_parse_mode_list(const string& s,
std::vector<uint32_t> *v)
{
std::list<std::string> sup_list;
get_str_list(s, sup_list);
if (sup_list.empty()) {
lderr(cct) << "WARNING: empty auth protocol list" << dendl;
}
v->clear();
for (auto& i : sup_list) {
ldout(cct, 5) << "adding con mode: " << i << dendl;
if (i == "crc") {
v->push_back(CEPH_CON_MODE_CRC);
} else if (i == "secure") {
v->push_back(CEPH_CON_MODE_SECURE);
} else {
lderr(cct) << "WARNING: unknown connection mode " << i << dendl;
}
}
if (v->empty()) {
lderr(cct) << "WARNING: no connection modes defined" << dendl;
}
ldout(cct,20) << __func__ << " " << s << " -> " << *v << dendl;
}
void AuthRegistry::_refresh_config()
{
if (cct->_conf->auth_supported.size()) {
_parse_method_list(cct->_conf->auth_supported, &cluster_methods);
_parse_method_list(cct->_conf->auth_supported, &service_methods);
_parse_method_list(cct->_conf->auth_supported, &client_methods);
} else {
_parse_method_list(cct->_conf->auth_cluster_required, &cluster_methods);
_parse_method_list(cct->_conf->auth_service_required, &service_methods);
_parse_method_list(cct->_conf->auth_client_required, &client_methods);
}
_parse_mode_list(cct->_conf.get_val<string>("ms_mon_cluster_mode"),
&mon_cluster_modes);
_parse_mode_list(cct->_conf.get_val<string>("ms_mon_service_mode"),
&mon_service_modes);
_parse_mode_list(cct->_conf.get_val<string>("ms_mon_client_mode"),
&mon_client_modes);
_parse_mode_list(cct->_conf.get_val<string>("ms_cluster_mode"),
&cluster_modes);
_parse_mode_list(cct->_conf.get_val<string>("ms_service_mode"),
&service_modes);
_parse_mode_list(cct->_conf.get_val<string>("ms_client_mode"),
&client_modes);
ldout(cct,10) << __func__ << " cluster_methods " << cluster_methods
<< " service_methods " << service_methods
<< " client_methods " << client_methods
<< dendl;
ldout(cct,10) << __func__ << " mon_cluster_modes " << mon_cluster_modes
<< " mon_service_modes " << mon_service_modes
<< " mon_client_modes " << mon_client_modes
<< "; cluster_modes " << cluster_modes
<< " service_modes " << service_modes
<< " client_modes " << client_modes
<< dendl;
// if we have no keyring, filter out cephx
_no_keyring_disabled_cephx = false;
bool any_cephx = false;
for (auto *p : {&cluster_methods, &service_methods, &client_methods}) {
auto q = std::find(p->begin(), p->end(), CEPH_AUTH_CEPHX);
if (q != p->end()) {
any_cephx = true;
break;
}
}
if (any_cephx) {
KeyRing k;
int r = k.from_ceph_context(cct);
if (r == -ENOENT) {
for (auto *p : {&cluster_methods, &service_methods, &client_methods}) {
auto q = std::find(p->begin(), p->end(), CEPH_AUTH_CEPHX);
if (q != p->end()) {
p->erase(q);
_no_keyring_disabled_cephx = true;
}
}
}
if (_no_keyring_disabled_cephx) {
lderr(cct) << "no keyring found at " << cct->_conf->keyring
<< ", disabling cephx" << dendl;
}
}
}
void AuthRegistry::get_supported_methods(
int peer_type,
std::vector<uint32_t> *methods,
std::vector<uint32_t> *modes) const
{
if (methods) {
methods->clear();
}
if (modes) {
modes->clear();
}
std::scoped_lock l(lock);
switch (cct->get_module_type()) {
case CEPH_ENTITY_TYPE_CLIENT:
// i am client
if (methods) {
*methods = client_methods;
}
if (modes) {
switch (peer_type) {
case CEPH_ENTITY_TYPE_MON:
case CEPH_ENTITY_TYPE_MGR:
*modes = mon_client_modes;
break;
default:
*modes = client_modes;
}
}
return;
case CEPH_ENTITY_TYPE_MON:
case CEPH_ENTITY_TYPE_MGR:
// i am mon/mgr
switch (peer_type) {
case CEPH_ENTITY_TYPE_MON:
case CEPH_ENTITY_TYPE_MGR:
// they are mon/mgr
if (methods) {
*methods = cluster_methods;
}
if (modes) {
*modes = mon_cluster_modes;
}
break;
default:
// they are anything but mons
if (methods) {
*methods = service_methods;
}
if (modes) {
*modes = mon_service_modes;
}
}
return;
default:
// i am a non-mon daemon
switch (peer_type) {
case CEPH_ENTITY_TYPE_MON:
case CEPH_ENTITY_TYPE_MGR:
// they are a mon daemon
if (methods) {
*methods = cluster_methods;
}
if (modes) {
*modes = mon_cluster_modes;
}
break;
case CEPH_ENTITY_TYPE_MDS:
case CEPH_ENTITY_TYPE_OSD:
// they are another daemon
if (methods) {
*methods = cluster_methods;
}
if (modes) {
*modes = cluster_modes;
}
break;
default:
// they are a client
if (methods) {
*methods = service_methods;
}
if (modes) {
*modes = service_modes;
}
break;
}
}
}
bool AuthRegistry::is_supported_method(int peer_type, int method) const
{
std::vector<uint32_t> s;
get_supported_methods(peer_type, &s);
return std::find(s.begin(), s.end(), method) != s.end();
}
bool AuthRegistry::any_supported_methods(int peer_type) const
{
std::vector<uint32_t> s;
get_supported_methods(peer_type, &s);
return !s.empty();
}
void AuthRegistry::get_supported_modes(
int peer_type,
uint32_t auth_method,
std::vector<uint32_t> *modes) const
{
std::vector<uint32_t> s;
get_supported_methods(peer_type, nullptr, &s);
if (auth_method == CEPH_AUTH_NONE) {
// filter out all but crc for AUTH_NONE
modes->clear();
for (auto mode : s) {
if (mode == CEPH_CON_MODE_CRC) {
modes->push_back(mode);
}
}
} else {
*modes = s;
}
}
uint32_t AuthRegistry::pick_mode(
int peer_type,
uint32_t auth_method,
const std::vector<uint32_t>& preferred_modes)
{
std::vector<uint32_t> allowed_modes;
get_supported_modes(peer_type, auth_method, &allowed_modes);
for (auto mode : preferred_modes) {
if (std::find(allowed_modes.begin(), allowed_modes.end(), mode)
!= allowed_modes.end()) {
return mode;
}
}
ldout(cct,1) << "failed to pick con mode from client's " << preferred_modes
<< " and our " << allowed_modes << dendl;
return CEPH_CON_MODE_UNKNOWN;
}
AuthAuthorizeHandler *AuthRegistry::get_handler(int peer_type, int method)
{
std::scoped_lock l{lock};
ldout(cct,20) << __func__ << " peer_type " << peer_type << " method " << method
<< " cluster_methods " << cluster_methods
<< " service_methods " << service_methods
<< " client_methods " << client_methods
<< dendl;
if (cct->get_module_type() == CEPH_ENTITY_TYPE_CLIENT) {
return nullptr;
}
switch (peer_type) {
case CEPH_ENTITY_TYPE_MON:
case CEPH_ENTITY_TYPE_MGR:
case CEPH_ENTITY_TYPE_MDS:
case CEPH_ENTITY_TYPE_OSD:
if (std::find(cluster_methods.begin(), cluster_methods.end(), method) ==
cluster_methods.end()) {
return nullptr;
}
break;
default:
if (std::find(service_methods.begin(), service_methods.end(), method) ==
service_methods.end()) {
return nullptr;
}
break;
}
auto iter = authorize_handlers.find(method);
if (iter != authorize_handlers.end()) {
return iter->second;
}
AuthAuthorizeHandler *ah = nullptr;
switch (method) {
case CEPH_AUTH_NONE:
ah = new AuthNoneAuthorizeHandler();
break;
case CEPH_AUTH_CEPHX:
ah = new CephxAuthorizeHandler();
break;
#ifdef HAVE_GSSAPI
case CEPH_AUTH_GSS:
ah = new KrbAuthorizeHandler();
break;
#endif
}
if (ah) {
authorize_handlers[method] = ah;
}
return ah;
}
| 9,647 | 24.796791 | 81 |
cc
|
null |
ceph-main/src/auth/AuthRegistry.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <map>
#include <vector>
#include "AuthAuthorizeHandler.h"
#include "AuthMethodList.h"
#include "common/ceph_mutex.h"
#include "common/ceph_context.h"
#include "common/config_cacher.h"
class AuthRegistry : public md_config_obs_t {
CephContext *cct;
mutable ceph::mutex lock = ceph::make_mutex("AuthRegistry::lock");
std::map<int,AuthAuthorizeHandler*> authorize_handlers;
bool _no_keyring_disabled_cephx = false;
// CEPH_AUTH_*
std::vector<uint32_t> cluster_methods;
std::vector<uint32_t> service_methods;
std::vector<uint32_t> client_methods;
// CEPH_CON_MODE_*
std::vector<uint32_t> mon_cluster_modes;
std::vector<uint32_t> mon_service_modes;
std::vector<uint32_t> mon_client_modes;
std::vector<uint32_t> cluster_modes;
std::vector<uint32_t> service_modes;
std::vector<uint32_t> client_modes;
void _parse_method_list(const std::string& str, std::vector<uint32_t> *v);
void _parse_mode_list(const std::string& str, std::vector<uint32_t> *v);
void _refresh_config();
public:
AuthRegistry(CephContext *cct);
~AuthRegistry();
void refresh_config() {
std::scoped_lock l(lock);
_refresh_config();
}
void get_supported_methods(int peer_type,
std::vector<uint32_t> *methods,
std::vector<uint32_t> *modes=nullptr) const;
bool is_supported_method(int peer_type, int method) const;
bool any_supported_methods(int peer_type) const;
void get_supported_modes(int peer_type,
uint32_t auth_method,
std::vector<uint32_t> *modes) const;
uint32_t pick_mode(int peer_type,
uint32_t auth_method,
const std::vector<uint32_t>& preferred_modes);
static bool is_secure_method(uint32_t method) {
return (method == CEPH_AUTH_CEPHX);
}
static bool is_secure_mode(uint32_t mode) {
return (mode == CEPH_CON_MODE_SECURE);
}
AuthAuthorizeHandler *get_handler(int peer_type, int method);
const char** get_tracked_conf_keys() const override;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string>& changed) override;
bool no_keyring_disabled_cephx() {
std::scoped_lock l(lock);
return _no_keyring_disabled_cephx;
}
};
| 2,310 | 27.182927 | 76 |
h
|
null |
ceph-main/src/auth/AuthServer.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "AuthRegistry.h"
#include "include/common_fwd.h"
#include <vector>
class Connection;
class AuthServer {
public:
AuthRegistry auth_registry;
AuthServer(CephContext *cct) : auth_registry(cct) {}
virtual ~AuthServer() {}
/// Get authentication methods and connection modes for the given peer type
virtual void get_supported_auth_methods(
int peer_type,
std::vector<uint32_t> *methods,
std::vector<uint32_t> *modes = nullptr) {
auth_registry.get_supported_methods(peer_type, methods, modes);
}
/// Get support connection modes for the given peer type and auth method
virtual uint32_t pick_con_mode(
int peer_type,
uint32_t auth_method,
const std::vector<uint32_t>& preferred_modes) {
return auth_registry.pick_mode(peer_type, auth_method, preferred_modes);
}
/// return an AuthAuthorizeHandler for the given peer type and auth method
AuthAuthorizeHandler *get_auth_authorize_handler(
int peer_type,
int auth_method) {
return auth_registry.get_handler(peer_type, auth_method);
}
/// Handle an authentication request on an incoming connection
virtual int handle_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
bool more, ///< true if this is not the first part of the handshake
uint32_t auth_method,
const ceph::buffer::list& bl,
ceph::buffer::list *reply) = 0;
};
| 1,509 | 28.038462 | 81 |
h
|
null |
ceph-main/src/auth/AuthServiceHandler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "AuthServiceHandler.h"
#include "cephx/CephxServiceHandler.h"
#ifdef HAVE_GSSAPI
#include "krb/KrbServiceHandler.hpp"
#endif
#include "none/AuthNoneServiceHandler.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_auth
std::ostream& operator<<(std::ostream& os,
global_id_status_t global_id_status)
{
switch (global_id_status) {
case global_id_status_t::NONE:
return os << "none";
case global_id_status_t::NEW_PENDING:
return os << "new_pending";
case global_id_status_t::NEW_OK:
return os << "new_ok";
case global_id_status_t::NEW_NOT_EXPOSED:
return os << "new_not_exposed";
case global_id_status_t::RECLAIM_PENDING:
return os << "reclaim_pending";
case global_id_status_t::RECLAIM_OK:
return os << "reclaim_ok";
case global_id_status_t::RECLAIM_INSECURE:
return os << "reclaim_insecure";
default:
ceph_abort();
}
}
int AuthServiceHandler::start_session(const EntityName& entity_name,
uint64_t global_id,
bool is_new_global_id,
ceph::buffer::list *result,
AuthCapsInfo *caps)
{
ceph_assert(!this->entity_name.get_type() && !this->global_id &&
global_id_status == global_id_status_t::NONE);
ldout(cct, 10) << __func__ << " entity_name=" << entity_name
<< " global_id=" << global_id << " is_new_global_id="
<< is_new_global_id << dendl;
this->entity_name = entity_name;
this->global_id = global_id;
return do_start_session(is_new_global_id, result, caps);
}
AuthServiceHandler *get_auth_service_handler(int type, CephContext *cct, KeyServer *ks)
{
switch (type) {
case CEPH_AUTH_CEPHX:
return new CephxServiceHandler(cct, ks);
case CEPH_AUTH_NONE:
return new AuthNoneServiceHandler(cct);
#ifdef HAVE_GSSAPI
case CEPH_AUTH_GSS:
return new KrbServiceHandler(cct, ks);
#endif
default:
return nullptr;
}
}
| 2,304 | 27.109756 | 87 |
cc
|
null |
ceph-main/src/auth/AuthServiceHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_AUTHSERVICEHANDLER_H
#define CEPH_AUTHSERVICEHANDLER_H
#include <stddef.h> // for NULL
#include <stdint.h> // for uint64_t
#include "common/entity_name.h" // for EntityName
#include "include/common_fwd.h"
#include "include/buffer_fwd.h" // for ceph::buffer::list
class KeyServer;
class CryptoKey;
struct AuthCapsInfo;
enum class global_id_status_t {
NONE,
// fresh client (global_id == 0); waiting for CephXAuthenticate
NEW_PENDING,
// connected client; new enough to correctly reclaim global_id
NEW_OK,
// connected client; unknown whether it can reclaim global_id correctly
NEW_NOT_EXPOSED,
// reconnecting client (global_id != 0); waiting for CephXAuthenticate
RECLAIM_PENDING,
// reconnected client; correctly reclaimed global_id
RECLAIM_OK,
// reconnected client; did not properly prove prior global_id ownership
RECLAIM_INSECURE
};
std::ostream& operator<<(std::ostream& os,
global_id_status_t global_id_status);
struct AuthServiceHandler {
protected:
CephContext *cct;
EntityName entity_name;
uint64_t global_id = 0;
global_id_status_t global_id_status = global_id_status_t::NONE;
public:
explicit AuthServiceHandler(CephContext *cct_) : cct(cct_) {}
virtual ~AuthServiceHandler() { }
int start_session(const EntityName& entity_name,
uint64_t global_id,
bool is_new_global_id,
ceph::buffer::list *result,
AuthCapsInfo *caps);
virtual int handle_request(ceph::buffer::list::const_iterator& indata,
size_t connection_secret_required_length,
ceph::buffer::list *result,
AuthCapsInfo *caps,
CryptoKey *session_key,
std::string *connection_secret) = 0;
const EntityName& get_entity_name() { return entity_name; }
uint64_t get_global_id() { return global_id; }
global_id_status_t get_global_id_status() { return global_id_status; }
private:
virtual int do_start_session(bool is_new_global_id,
ceph::buffer::list *result,
AuthCapsInfo *caps) = 0;
};
extern AuthServiceHandler *get_auth_service_handler(int type, CephContext *cct, KeyServer *ks);
#endif
| 2,573 | 29.642857 | 95 |
h
|
null |
ceph-main/src/auth/AuthSessionHandler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/debug.h"
#include "AuthSessionHandler.h"
#include "cephx/CephxSessionHandler.h"
#ifdef HAVE_GSSAPI
#include "krb/KrbSessionHandler.hpp"
#endif
#include "none/AuthNoneSessionHandler.h"
#include "common/ceph_crypto.h"
#define dout_subsys ceph_subsys_auth
AuthSessionHandler *get_auth_session_handler(
CephContext *cct, int protocol,
const CryptoKey& key,
uint64_t features)
{
// Should add code to only print the SHA1 hash of the key, unless in secure debugging mode
#ifndef WITH_SEASTAR
ldout(cct,10) << "In get_auth_session_handler for protocol " << protocol << dendl;
#endif
switch (protocol) {
case CEPH_AUTH_CEPHX:
// if there is no session key, there is no session handler.
if (key.get_type() == CEPH_CRYPTO_NONE) {
return nullptr;
}
return new CephxSessionHandler(cct, key, features);
case CEPH_AUTH_NONE:
return new AuthNoneSessionHandler();
#ifdef HAVE_GSSAPI
case CEPH_AUTH_GSS:
return new KrbSessionHandler();
#endif
default:
return nullptr;
}
}
| 1,463 | 25.618182 | 92 |
cc
|
null |
ceph-main/src/auth/AuthSessionHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_AUTHSESSIONHANDLER_H
#define CEPH_AUTHSESSIONHANDLER_H
#include "include/common_fwd.h"
#include "include/types.h"
#include "Auth.h"
#define SESSION_SIGNATURE_FAILURE -1
// Defines the security applied to ongoing messages in a session, once the session is established. PLR
class Message;
struct AuthSessionHandler {
virtual ~AuthSessionHandler() = default;
virtual int sign_message(Message *message) = 0;
virtual int check_message_signature(Message *message) = 0;
};
struct DummyAuthSessionHandler : AuthSessionHandler {
int sign_message(Message*) final {
return 0;
}
int check_message_signature(Message*) final {
return 0;
}
};
struct DecryptionError : public std::exception {};
extern AuthSessionHandler *get_auth_session_handler(
CephContext *cct, int protocol,
const CryptoKey& key,
uint64_t features);
#endif
| 1,290 | 23.826923 | 102 |
h
|
null |
ceph-main/src/auth/Crypto.cc
|
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <array>
#include <sstream>
#include <limits>
#include <fcntl.h>
#include <openssl/aes.h>
#include "Crypto.h"
#include "include/ceph_assert.h"
#include "common/Clock.h"
#include "common/armor.h"
#include "common/ceph_context.h"
#include "common/ceph_crypto.h"
#include "common/hex.h"
#include "common/safe_io.h"
#include "include/ceph_fs.h"
#include "include/compat.h"
#include "common/Formatter.h"
#include "common/debug.h"
#include <errno.h>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
using std::ostringstream;
using std::string;
using ceph::bufferlist;
using ceph::bufferptr;
using ceph::Formatter;
// use getentropy() if available. it uses the same source of randomness
// as /dev/urandom without the filesystem overhead
#ifdef HAVE_GETENTROPY
#include <unistd.h>
static bool getentropy_works()
{
char buf;
auto ret = TEMP_FAILURE_RETRY(::getentropy(&buf, sizeof(buf)));
if (ret == 0) {
return true;
} else if (errno == ENOSYS || errno == EPERM) {
return false;
} else {
throw std::system_error(errno, std::system_category());
}
}
CryptoRandom::CryptoRandom() : fd(getentropy_works() ? -1 : open_urandom())
{}
CryptoRandom::~CryptoRandom()
{
if (fd >= 0) {
VOID_TEMP_FAILURE_RETRY(::close(fd));
}
}
void CryptoRandom::get_bytes(char *buf, int len)
{
ssize_t ret = 0;
if (unlikely(fd >= 0)) {
ret = safe_read_exact(fd, buf, len);
} else {
// getentropy() reads up to 256 bytes
assert(len <= 256);
ret = TEMP_FAILURE_RETRY(::getentropy(buf, len));
}
if (ret < 0) {
throw std::system_error(errno, std::system_category());
}
}
#elif defined(_WIN32) // !HAVE_GETENTROPY
#include <bcrypt.h>
CryptoRandom::CryptoRandom() : fd(0) {}
CryptoRandom::~CryptoRandom() = default;
void CryptoRandom::get_bytes(char *buf, int len)
{
auto ret = BCryptGenRandom (
NULL,
(unsigned char*)buf,
len,
BCRYPT_USE_SYSTEM_PREFERRED_RNG);
if (ret != 0) {
throw std::system_error(ret, std::system_category());
}
}
#else // !HAVE_GETENTROPY && !_WIN32
// open /dev/urandom once on construction and reuse the fd for all reads
CryptoRandom::CryptoRandom()
: fd{open_urandom()}
{
if (fd < 0) {
throw std::system_error(errno, std::system_category());
}
}
CryptoRandom::~CryptoRandom()
{
VOID_TEMP_FAILURE_RETRY(::close(fd));
}
void CryptoRandom::get_bytes(char *buf, int len)
{
auto ret = safe_read_exact(fd, buf, len);
if (ret < 0) {
throw std::system_error(-ret, std::system_category());
}
}
#endif
int CryptoRandom::open_urandom()
{
int fd = TEMP_FAILURE_RETRY(::open("/dev/urandom", O_CLOEXEC|O_RDONLY));
if (fd < 0) {
throw std::system_error(errno, std::system_category());
}
return fd;
}
// ---------------------------------------------------
// fallback implementation of the bufferlist-free
// interface.
std::size_t CryptoKeyHandler::encrypt(
const CryptoKeyHandler::in_slice_t& in,
const CryptoKeyHandler::out_slice_t& out) const
{
ceph::bufferptr inptr(reinterpret_cast<const char*>(in.buf), in.length);
ceph::bufferlist plaintext;
plaintext.append(std::move(inptr));
ceph::bufferlist ciphertext;
std::string error;
const int ret = encrypt(plaintext, ciphertext, &error);
if (ret != 0 || !error.empty()) {
throw std::runtime_error(std::move(error));
}
// we need to specify the template parameter explicitly as ::length()
// returns unsigned int, not size_t.
const auto todo_len = \
std::min<std::size_t>(ciphertext.length(), out.max_length);
memcpy(out.buf, ciphertext.c_str(), todo_len);
return todo_len;
}
std::size_t CryptoKeyHandler::decrypt(
const CryptoKeyHandler::in_slice_t& in,
const CryptoKeyHandler::out_slice_t& out) const
{
ceph::bufferptr inptr(reinterpret_cast<const char*>(in.buf), in.length);
ceph::bufferlist ciphertext;
ciphertext.append(std::move(inptr));
ceph::bufferlist plaintext;
std::string error;
const int ret = decrypt(ciphertext, plaintext, &error);
if (ret != 0 || !error.empty()) {
throw std::runtime_error(std::move(error));
}
// we need to specify the template parameter explicitly as ::length()
// returns unsigned int, not size_t.
const auto todo_len = \
std::min<std::size_t>(plaintext.length(), out.max_length);
memcpy(out.buf, plaintext.c_str(), todo_len);
return todo_len;
}
sha256_digest_t CryptoKeyHandler::hmac_sha256(
const ceph::bufferlist& in) const
{
TOPNSPC::crypto::HMACSHA256 hmac((const unsigned char*)secret.c_str(), secret.length());
for (const auto& bptr : in.buffers()) {
hmac.Update((const unsigned char *)bptr.c_str(), bptr.length());
}
sha256_digest_t ret;
hmac.Final(ret.v);
return ret;
}
// ---------------------------------------------------
class CryptoNoneKeyHandler : public CryptoKeyHandler {
public:
CryptoNoneKeyHandler()
: CryptoKeyHandler(CryptoKeyHandler::BLOCK_SIZE_0B()) {
}
using CryptoKeyHandler::encrypt;
using CryptoKeyHandler::decrypt;
int encrypt(const bufferlist& in,
bufferlist& out, std::string *error) const override {
out = in;
return 0;
}
int decrypt(const bufferlist& in,
bufferlist& out, std::string *error) const override {
out = in;
return 0;
}
};
class CryptoNone : public CryptoHandler {
public:
CryptoNone() { }
~CryptoNone() override {}
int get_type() const override {
return CEPH_CRYPTO_NONE;
}
int create(CryptoRandom *random, bufferptr& secret) override {
return 0;
}
int validate_secret(const bufferptr& secret) override {
return 0;
}
CryptoKeyHandler *get_key_handler(const bufferptr& secret, string& error) override {
return new CryptoNoneKeyHandler;
}
};
// ---------------------------------------------------
class CryptoAES : public CryptoHandler {
public:
CryptoAES() { }
~CryptoAES() override {}
int get_type() const override {
return CEPH_CRYPTO_AES;
}
int create(CryptoRandom *random, bufferptr& secret) override;
int validate_secret(const bufferptr& secret) override;
CryptoKeyHandler *get_key_handler(const bufferptr& secret, string& error) override;
};
// when we say AES, we mean AES-128
static constexpr const std::size_t AES_KEY_LEN{16};
static constexpr const std::size_t AES_BLOCK_LEN{16};
class CryptoAESKeyHandler : public CryptoKeyHandler {
AES_KEY enc_key;
AES_KEY dec_key;
public:
CryptoAESKeyHandler()
: CryptoKeyHandler(CryptoKeyHandler::BLOCK_SIZE_16B()) {
}
int init(const bufferptr& s, ostringstream& err) {
secret = s;
const int enc_key_ret = \
AES_set_encrypt_key((const unsigned char*)secret.c_str(),
AES_KEY_LEN * CHAR_BIT, &enc_key);
if (enc_key_ret != 0) {
err << "cannot set OpenSSL encrypt key for AES: " << enc_key_ret;
return -1;
}
const int dec_key_ret = \
AES_set_decrypt_key((const unsigned char*)secret.c_str(),
AES_KEY_LEN * CHAR_BIT, &dec_key);
if (dec_key_ret != 0) {
err << "cannot set OpenSSL decrypt key for AES: " << dec_key_ret;
return -1;
}
return 0;
}
int encrypt(const ceph::bufferlist& in,
ceph::bufferlist& out,
std::string* /* unused */) const override {
// we need to take into account the PKCS#7 padding. There *always* will
// be at least one byte of padding. This stays even to input aligned to
// AES_BLOCK_LEN. Otherwise we would face ambiguities during decryption.
// To exemplify:
// 16 + p2align(10, 16) -> 16
// 16 + p2align(16, 16) -> 32 including 16 bytes for padding.
ceph::bufferptr out_tmp{static_cast<unsigned>(
AES_BLOCK_LEN + p2align<std::size_t>(in.length(), AES_BLOCK_LEN))};
// let's pad the data
std::uint8_t pad_len = out_tmp.length() - in.length();
ceph::bufferptr pad_buf{pad_len};
// FIPS zeroization audit 20191115: this memset is not intended to
// wipe out a secret after use.
memset(pad_buf.c_str(), pad_len, pad_len);
// form contiguous buffer for block cipher. The ctor copies shallowly.
ceph::bufferlist incopy(in);
incopy.append(std::move(pad_buf));
const auto in_buf = reinterpret_cast<unsigned char*>(incopy.c_str());
// reinitialize IV each time. It might be unnecessary depending on
// actual implementation but at the interface layer we are obliged
// to deliver IV as non-const.
static_assert(strlen_ct(CEPH_AES_IV) == AES_BLOCK_LEN);
unsigned char iv[AES_BLOCK_LEN];
memcpy(iv, CEPH_AES_IV, AES_BLOCK_LEN);
// we aren't using EVP because of performance concerns. Profiling
// shows the cost is quite high. Endianness might be an issue.
// However, as they would affect Cephx, any fallout should pop up
// rather early, hopefully.
AES_cbc_encrypt(in_buf, reinterpret_cast<unsigned char*>(out_tmp.c_str()),
out_tmp.length(), &enc_key, iv, AES_ENCRYPT);
out.append(out_tmp);
return 0;
}
int decrypt(const ceph::bufferlist& in,
ceph::bufferlist& out,
std::string* /* unused */) const override {
// PKCS#7 padding enlarges even empty plain-text to take 16 bytes.
if (in.length() < AES_BLOCK_LEN || in.length() % AES_BLOCK_LEN) {
return -1;
}
// needed because of .c_str() on const. It's a shallow copy.
ceph::bufferlist incopy(in);
const auto in_buf = reinterpret_cast<unsigned char*>(incopy.c_str());
// make a local, modifiable copy of IV.
static_assert(strlen_ct(CEPH_AES_IV) == AES_BLOCK_LEN);
unsigned char iv[AES_BLOCK_LEN];
memcpy(iv, CEPH_AES_IV, AES_BLOCK_LEN);
ceph::bufferptr out_tmp{in.length()};
AES_cbc_encrypt(in_buf, reinterpret_cast<unsigned char*>(out_tmp.c_str()),
in.length(), &dec_key, iv, AES_DECRYPT);
// BE CAREFUL: we cannot expose any single bit of information about
// the cause of failure. Otherwise we'll face padding oracle attack.
// See: https://en.wikipedia.org/wiki/Padding_oracle_attack.
const auto pad_len = \
std::min<std::uint8_t>(out_tmp[in.length() - 1], AES_BLOCK_LEN);
out_tmp.set_length(in.length() - pad_len);
out.append(std::move(out_tmp));
return 0;
}
std::size_t encrypt(const in_slice_t& in,
const out_slice_t& out) const override {
if (out.buf == nullptr) {
// 16 + p2align(10, 16) -> 16
// 16 + p2align(16, 16) -> 32
return AES_BLOCK_LEN + p2align<std::size_t>(in.length, AES_BLOCK_LEN);
}
// how many bytes of in.buf hang outside the alignment boundary and how
// much padding we need.
// length = 23 -> tail_len = 7, pad_len = 9
// length = 32 -> tail_len = 0, pad_len = 16
const std::uint8_t tail_len = in.length % AES_BLOCK_LEN;
const std::uint8_t pad_len = AES_BLOCK_LEN - tail_len;
static_assert(std::numeric_limits<std::uint8_t>::max() > AES_BLOCK_LEN);
std::array<unsigned char, AES_BLOCK_LEN> last_block;
memcpy(last_block.data(), in.buf + in.length - tail_len, tail_len);
// FIPS zeroization audit 20191115: this memset is not intended to
// wipe out a secret after use.
memset(last_block.data() + tail_len, pad_len, pad_len);
// need a local copy because AES_cbc_encrypt takes `iv` as non-const.
// Useful because it allows us to encrypt in two steps: main + tail.
static_assert(strlen_ct(CEPH_AES_IV) == AES_BLOCK_LEN);
std::array<unsigned char, AES_BLOCK_LEN> iv;
memcpy(iv.data(), CEPH_AES_IV, AES_BLOCK_LEN);
const std::size_t main_encrypt_size = \
std::min(in.length - tail_len, out.max_length);
AES_cbc_encrypt(in.buf, out.buf, main_encrypt_size, &enc_key, iv.data(),
AES_ENCRYPT);
const std::size_t tail_encrypt_size = \
std::min(AES_BLOCK_LEN, out.max_length - main_encrypt_size);
AES_cbc_encrypt(last_block.data(), out.buf + main_encrypt_size,
tail_encrypt_size, &enc_key, iv.data(), AES_ENCRYPT);
return main_encrypt_size + tail_encrypt_size;
}
std::size_t decrypt(const in_slice_t& in,
const out_slice_t& out) const override {
if (in.length % AES_BLOCK_LEN != 0 || in.length < AES_BLOCK_LEN) {
throw std::runtime_error("input not aligned to AES_BLOCK_LEN");
} else if (out.buf == nullptr) {
// essentially it would be possible to decrypt into a buffer that
// doesn't include space for any PKCS#7 padding. We don't do that
// for the sake of performance and simplicity.
return in.length;
} else if (out.max_length < in.length) {
throw std::runtime_error("output buffer too small");
}
static_assert(strlen_ct(CEPH_AES_IV) == AES_BLOCK_LEN);
std::array<unsigned char, AES_BLOCK_LEN> iv;
memcpy(iv.data(), CEPH_AES_IV, AES_BLOCK_LEN);
AES_cbc_encrypt(in.buf, out.buf, in.length, &dec_key, iv.data(),
AES_DECRYPT);
// NOTE: we aren't handling partial decrypt. PKCS#7 padding must be
// at the end. If it's malformed, don't say a word to avoid risk of
// having an oracle. All we need to ensure is valid buffer boundary.
const auto pad_len = \
std::min<std::uint8_t>(out.buf[in.length - 1], AES_BLOCK_LEN);
return in.length - pad_len;
}
};
// ------------------------------------------------------------
int CryptoAES::create(CryptoRandom *random, bufferptr& secret)
{
bufferptr buf(AES_KEY_LEN);
random->get_bytes(buf.c_str(), buf.length());
secret = std::move(buf);
return 0;
}
int CryptoAES::validate_secret(const bufferptr& secret)
{
if (secret.length() < AES_KEY_LEN) {
return -EINVAL;
}
return 0;
}
CryptoKeyHandler *CryptoAES::get_key_handler(const bufferptr& secret,
string& error)
{
CryptoAESKeyHandler *ckh = new CryptoAESKeyHandler;
ostringstream oss;
if (ckh->init(secret, oss) < 0) {
error = oss.str();
delete ckh;
return NULL;
}
return ckh;
}
// --
// ---------------------------------------------------
void CryptoKey::encode(bufferlist& bl) const
{
using ceph::encode;
encode(type, bl);
encode(created, bl);
__u16 len = secret.length();
encode(len, bl);
bl.append(secret);
}
void CryptoKey::decode(bufferlist::const_iterator& bl)
{
using ceph::decode;
decode(type, bl);
decode(created, bl);
__u16 len;
decode(len, bl);
bufferptr tmp;
bl.copy_deep(len, tmp);
if (_set_secret(type, tmp) < 0)
throw ceph::buffer::malformed_input("malformed secret");
}
int CryptoKey::set_secret(int type, const bufferptr& s, utime_t c)
{
int r = _set_secret(type, s);
if (r < 0)
return r;
this->created = c;
return 0;
}
int CryptoKey::_set_secret(int t, const bufferptr& s)
{
if (s.length() == 0) {
secret = s;
ckh.reset();
return 0;
}
CryptoHandler *ch = CryptoHandler::create(t);
if (ch) {
int ret = ch->validate_secret(s);
if (ret < 0) {
delete ch;
return ret;
}
string error;
ckh.reset(ch->get_key_handler(s, error));
delete ch;
if (error.length()) {
return -EIO;
}
} else {
return -EOPNOTSUPP;
}
type = t;
secret = s;
return 0;
}
int CryptoKey::create(CephContext *cct, int t)
{
CryptoHandler *ch = CryptoHandler::create(t);
if (!ch) {
if (cct)
lderr(cct) << "ERROR: cct->get_crypto_handler(type=" << t << ") returned NULL" << dendl;
return -EOPNOTSUPP;
}
bufferptr s;
int r = ch->create(cct->random(), s);
delete ch;
if (r < 0)
return r;
r = _set_secret(t, s);
if (r < 0)
return r;
created = ceph_clock_now();
return r;
}
void CryptoKey::print(std::ostream &out) const
{
out << encode_base64();
}
void CryptoKey::to_str(std::string& s) const
{
int len = secret.length() * 4;
char buf[len];
hex2str(secret.c_str(), secret.length(), buf, len);
s = buf;
}
void CryptoKey::encode_formatted(string label, Formatter *f, bufferlist &bl)
{
f->open_object_section(label.c_str());
f->dump_string("key", encode_base64());
f->close_section();
f->flush(bl);
}
void CryptoKey::encode_plaintext(bufferlist &bl)
{
bl.append(encode_base64());
}
// ------------------
CryptoHandler *CryptoHandler::create(int type)
{
switch (type) {
case CEPH_CRYPTO_NONE:
return new CryptoNone;
case CEPH_CRYPTO_AES:
return new CryptoAES;
default:
return NULL;
}
}
#pragma clang diagnostic pop
#pragma GCC diagnostic pop
| 16,749 | 26.191558 | 94 |
cc
|
null |
ceph-main/src/auth/Crypto.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_AUTH_CRYPTO_H
#define CEPH_AUTH_CRYPTO_H
#include "include/common_fwd.h"
#include "include/types.h"
#include "include/utime.h"
#include "include/buffer.h"
#include <string>
class CryptoKeyContext;
namespace ceph { class Formatter; }
/*
* Random byte stream generator suitable for cryptographic use
*/
class CryptoRandom {
public:
CryptoRandom(); // throws on failure
~CryptoRandom();
/// copy up to 256 random bytes into the given buffer. throws on failure
void get_bytes(char *buf, int len);
private:
static int open_urandom();
const int fd;
};
/*
* some per-key context that is specific to a particular crypto backend
*/
class CryptoKeyHandler {
public:
// The maximum size of a single block for all descendants of the class.
static constexpr std::size_t MAX_BLOCK_SIZE {16};
// A descendant pick-ups one from these and passes it to the ctor template.
typedef std::integral_constant<std::size_t, 0> BLOCK_SIZE_0B;
typedef std::integral_constant<std::size_t, 16> BLOCK_SIZE_16B;
struct in_slice_t {
const std::size_t length;
const unsigned char* const buf;
};
struct out_slice_t {
const std::size_t max_length;
unsigned char* const buf;
};
ceph::bufferptr secret;
template <class BlockSizeT>
CryptoKeyHandler(BlockSizeT) {
static_assert(BlockSizeT::value <= MAX_BLOCK_SIZE);
}
virtual ~CryptoKeyHandler() {}
virtual int encrypt(const ceph::buffer::list& in,
ceph::buffer::list& out, std::string *error) const = 0;
virtual int decrypt(const ceph::buffer::list& in,
ceph::buffer::list& out, std::string *error) const = 0;
// TODO: provide nullptr in the out::buf to get/estimate size requirements?
// Or maybe dedicated methods?
virtual std::size_t encrypt(const in_slice_t& in,
const out_slice_t& out) const;
virtual std::size_t decrypt(const in_slice_t& in,
const out_slice_t& out) const;
sha256_digest_t hmac_sha256(const ceph::bufferlist& in) const;
};
/*
* match encoding of struct ceph_secret
*/
class CryptoKey {
protected:
__u16 type;
utime_t created;
ceph::buffer::ptr secret; // must set this via set_secret()!
// cache a pointer to the implementation-specific key handler, so we
// don't have to create it for every crypto operation.
mutable std::shared_ptr<CryptoKeyHandler> ckh;
int _set_secret(int type, const ceph::buffer::ptr& s);
public:
CryptoKey() : type(0) { }
CryptoKey(int t, utime_t c, ceph::buffer::ptr& s)
: created(c) {
_set_secret(t, s);
}
~CryptoKey() {
}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
void clear() {
*this = CryptoKey();
}
int get_type() const { return type; }
utime_t get_created() const { return created; }
void print(std::ostream& out) const;
int set_secret(int type, const ceph::buffer::ptr& s, utime_t created);
const ceph::buffer::ptr& get_secret() { return secret; }
const ceph::buffer::ptr& get_secret() const { return secret; }
bool empty() const { return ckh.get() == nullptr; }
void encode_base64(std::string& s) const {
ceph::buffer::list bl;
encode(bl);
ceph::bufferlist e;
bl.encode_base64(e);
e.append('\0');
s = e.c_str();
}
std::string encode_base64() const {
std::string s;
encode_base64(s);
return s;
}
void decode_base64(const std::string& s) {
ceph::buffer::list e;
e.append(s);
ceph::buffer::list bl;
bl.decode_base64(e);
auto p = std::cbegin(bl);
decode(p);
}
void encode_formatted(std::string label, ceph::Formatter *f,
ceph::buffer::list &bl);
void encode_plaintext(ceph::buffer::list &bl);
// --
int create(CephContext *cct, int type);
int encrypt(CephContext *cct, const ceph::buffer::list& in,
ceph::buffer::list& out,
std::string *error) const {
ceph_assert(ckh); // Bad key?
return ckh->encrypt(in, out, error);
}
int decrypt(CephContext *cct, const ceph::buffer::list& in,
ceph::buffer::list& out,
std::string *error) const {
ceph_assert(ckh); // Bad key?
return ckh->decrypt(in, out, error);
}
using in_slice_t = CryptoKeyHandler::in_slice_t;
using out_slice_t = CryptoKeyHandler::out_slice_t;
std::size_t encrypt(CephContext*, const in_slice_t& in,
const out_slice_t& out) {
ceph_assert(ckh);
return ckh->encrypt(in, out);
}
std::size_t decrypt(CephContext*, const in_slice_t& in,
const out_slice_t& out) {
ceph_assert(ckh);
return ckh->encrypt(in, out);
}
sha256_digest_t hmac_sha256(CephContext*, const ceph::buffer::list& in) {
ceph_assert(ckh);
return ckh->hmac_sha256(in);
}
static constexpr std::size_t get_max_outbuf_size(std::size_t want_size) {
return want_size + CryptoKeyHandler::MAX_BLOCK_SIZE;
}
void to_str(std::string& s) const;
};
WRITE_CLASS_ENCODER(CryptoKey)
inline std::ostream& operator<<(std::ostream& out, const CryptoKey& k)
{
k.print(out);
return out;
}
/*
* Driver for a particular algorithm
*
* To use these functions, you need to call ceph::crypto::init(), see
* common/ceph_crypto.h. common_init_finish does this for you.
*/
class CryptoHandler {
public:
virtual ~CryptoHandler() {}
virtual int get_type() const = 0;
virtual int create(CryptoRandom *random, ceph::buffer::ptr& secret) = 0;
virtual int validate_secret(const ceph::buffer::ptr& secret) = 0;
virtual CryptoKeyHandler *get_key_handler(const ceph::buffer::ptr& secret,
std::string& error) = 0;
static CryptoHandler *create(int type);
};
#endif
| 6,046 | 25.995536 | 77 |
h
|
null |
ceph-main/src/auth/DummyAuth.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "AuthClient.h"
#include "AuthServer.h"
class DummyAuthClientServer : public AuthClient,
public AuthServer {
public:
DummyAuthClientServer(CephContext *cct) : AuthServer(cct) {}
// client
int get_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t *method,
std::vector<uint32_t> *preferred_modes,
bufferlist *out) override {
*method = CEPH_AUTH_NONE;
*preferred_modes = { CEPH_CON_MODE_CRC };
return 0;
}
int handle_auth_reply_more(
Connection *con,
AuthConnectionMeta *auth_meta,
const bufferlist& bl,
bufferlist *reply) override {
ceph_abort();
}
int handle_auth_done(
Connection *con,
AuthConnectionMeta *auth_meta,
uint64_t global_id,
uint32_t con_mode,
const bufferlist& bl,
CryptoKey *session_key,
std::string *connection_secret) {
return 0;
}
int handle_auth_bad_method(
Connection *con,
AuthConnectionMeta *auth_meta,
uint32_t old_auth_method,
int result,
const std::vector<uint32_t>& allowed_methods,
const std::vector<uint32_t>& allowed_modes) override {
ceph_abort();
}
// server
int handle_auth_request(
Connection *con,
AuthConnectionMeta *auth_meta,
bool more,
uint32_t auth_method,
const bufferlist& bl,
bufferlist *reply) override {
return 1;
}
};
| 1,471 | 22 | 70 |
h
|
null |
ceph-main/src/auth/KeyRing.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include <map>
#include <memory>
#include <sstream>
#include <algorithm>
#include <boost/algorithm/string/replace.hpp>
#include "auth/KeyRing.h"
#include "include/stringify.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/Formatter.h"
#define dout_subsys ceph_subsys_auth
#undef dout_prefix
#define dout_prefix *_dout << "auth: "
using std::map;
using std::ostream;
using std::ostringstream;
using std::string;
using ceph::bufferlist;
using ceph::Formatter;
int KeyRing::from_ceph_context(CephContext *cct)
{
const auto& conf = cct->_conf;
string filename;
int ret = ceph_resolve_file_search(conf->keyring, filename);
if (!ret) {
ret = load(cct, filename);
if (ret < 0)
lderr(cct) << "failed to load " << filename
<< ": " << cpp_strerror(ret) << dendl;
} else if (conf->key.empty() && conf->keyfile.empty()) {
lderr(cct) << "unable to find a keyring on " << conf->keyring
<< ": " << cpp_strerror(ret) << dendl;
}
if (!conf->key.empty()) {
EntityAuth ea;
try {
ea.key.decode_base64(conf->key);
add(conf->name, ea);
return 0;
}
catch (ceph::buffer::error& e) {
lderr(cct) << "failed to decode key '" << conf->key << "'" << dendl;
return -EINVAL;
}
}
if (!conf->keyfile.empty()) {
bufferlist bl;
string err;
int r = bl.read_file(conf->keyfile.c_str(), &err);
if (r < 0) {
lderr(cct) << err << dendl;
return r;
}
string k(bl.c_str(), bl.length());
EntityAuth ea;
try {
ea.key.decode_base64(k);
add(conf->name, ea);
}
catch (ceph::buffer::error& e) {
lderr(cct) << "failed to decode key '" << k << "'" << dendl;
return -EINVAL;
}
return 0;
}
return ret;
}
int KeyRing::set_modifier(const char *type,
const char *val,
EntityName& name,
map<string, bufferlist>& caps)
{
if (!val)
return -EINVAL;
if (strcmp(type, "key") == 0) {
CryptoKey key;
string l(val);
try {
key.decode_base64(l);
} catch (const ceph::buffer::error& err) {
return -EINVAL;
}
set_key(name, key);
} else if (strncmp(type, "caps ", 5) == 0) {
const char *caps_entity = type + 5;
if (!*caps_entity)
return -EINVAL;
string l(val);
bufferlist bl;
encode(l, bl);
caps[caps_entity] = bl;
set_caps(name, caps);
} else if (strcmp(type, "auid") == 0) {
// just ignore it so we can still decode "old" keyrings that have an auid
} else
return -EINVAL;
return 0;
}
void KeyRing::encode_plaintext(bufferlist& bl)
{
std::ostringstream os;
print(os);
string str = os.str();
bl.append(str);
}
void KeyRing::encode_formatted(string label, Formatter *f, bufferlist& bl)
{
f->open_array_section(label.c_str());
for (const auto &[ename, eauth] : keys) {
f->open_object_section("auth_entities");
f->dump_string("entity", ename.to_str().c_str());
f->dump_string("key", stringify(eauth.key));
if (!eauth.pending_key.empty()) {
f->dump_string("pending_key", stringify(eauth.pending_key));
}
f->open_object_section("caps");
for (auto& [sys, capsbl] : eauth.caps) {
auto dataiter = capsbl.cbegin();
string caps;
ceph::decode(caps, dataiter);
f->dump_string(sys.c_str(), caps);
}
f->close_section(); /* caps */
f->close_section(); /* auth_entities */
}
f->close_section(); /* auth_dump */
f->flush(bl);
}
void KeyRing::decode(bufferlist::const_iterator& bli)
{
int ret;
bufferlist bl;
bli.copy_all(bl);
ConfFile cf;
if (cf.parse_bufferlist(&bl, nullptr) != 0) {
throw ceph::buffer::malformed_input("cannot parse buffer");
}
for (auto& [name, section] : cf) {
if (name == "global")
continue;
EntityName ename;
map<string, bufferlist> caps;
if (!ename.from_str(name)) {
ostringstream oss;
oss << "bad entity name in keyring: " << name;
throw ceph::buffer::malformed_input(oss.str().c_str());
}
for (auto& [k, val] : section) {
if (k.empty())
continue;
string key;
std::replace_copy(k.begin(), k.end(), back_inserter(key), '_', ' ');
ret = set_modifier(key.c_str(), val.c_str(), ename, caps);
if (ret < 0) {
ostringstream oss;
oss << "error setting modifier for [" << name << "] type=" << key
<< " val=" << val;
throw ceph::buffer::malformed_input(oss.str().c_str());
}
}
}
}
int KeyRing::load(CephContext *cct, const std::string &filename)
{
if (filename.empty())
return -EINVAL;
bufferlist bl;
std::string err;
int ret = bl.read_file(filename.c_str(), &err);
if (ret < 0) {
lderr(cct) << "error reading file: " << filename << ": " << err << dendl;
return ret;
}
try {
auto iter = bl.cbegin();
decode(iter);
}
catch (const ceph::buffer::error& err) {
lderr(cct) << "error parsing file " << filename << ": " << err.what() << dendl;
return -EIO;
}
ldout(cct, 2) << "KeyRing::load: loaded key file " << filename << dendl;
return 0;
}
void KeyRing::print(ostream& out)
{
for (auto& [ename, eauth] : keys) {
out << "[" << ename << "]" << std::endl;
out << "\tkey = " << eauth.key << std::endl;
if (!eauth.pending_key.empty()) {
out << "\tpending key = " << eauth.pending_key << std::endl;
}
for (auto& [sys, capbl] : eauth.caps) {
auto dataiter = capbl.cbegin();
string caps;
ceph::decode(caps, dataiter);
boost::replace_all(caps, "\"", "\\\"");
out << "\tcaps " << sys << " = \"" << caps << '"' << std::endl;
}
}
}
void KeyRing::import(CephContext *cct, KeyRing& other)
{
for (map<EntityName, EntityAuth>::iterator p = other.keys.begin();
p != other.keys.end();
++p) {
ldout(cct, 10) << " importing " << p->first << dendl;
ldout(cct, 30) << " " << p->second << dendl;
keys[p->first] = p->second;
}
}
| 6,441 | 24.066148 | 83 |
cc
|
null |
ceph-main/src/auth/KeyRing.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_KEYRING_H
#define CEPH_KEYRING_H
#include "auth/Auth.h"
#include "include/common_fwd.h"
class KeyRing : public KeyStore {
std::map<EntityName, EntityAuth> keys;
int set_modifier(const char *type, const char *val, EntityName& name, std::map<std::string, ceph::buffer::list>& caps);
public:
void decode_plaintext(ceph::buffer::list::const_iterator& bl);
/* Create a KeyRing from a Ceph context.
* We will use the configuration stored inside the context. */
int from_ceph_context(CephContext *cct);
std::map<EntityName, EntityAuth>& get_keys() { return keys; } // yuck
int load(CephContext *cct, const std::string &filename);
void print(std::ostream& out);
// accessors
bool exists(const EntityName& name) const {
auto p = keys.find(name);
return p != keys.end();
}
bool get_auth(const EntityName& name, EntityAuth &a) const {
std::map<EntityName, EntityAuth>::const_iterator k = keys.find(name);
if (k == keys.end())
return false;
a = k->second;
return true;
}
bool get_secret(const EntityName& name, CryptoKey& secret) const override {
std::map<EntityName, EntityAuth>::const_iterator k = keys.find(name);
if (k == keys.end())
return false;
secret = k->second.key;
return true;
}
bool get_service_secret(uint32_t service_id, uint64_t secret_id,
CryptoKey& secret) const override {
return false;
}
bool get_caps(const EntityName& name,
const std::string& type, AuthCapsInfo& caps) const {
std::map<EntityName, EntityAuth>::const_iterator k = keys.find(name);
if (k == keys.end())
return false;
std::map<std::string,ceph::buffer::list>::const_iterator i = k->second.caps.find(type);
if (i != k->second.caps.end()) {
caps.caps = i->second;
}
return true;
}
size_t size() const {
return keys.size();
}
// modifiers
void add(const EntityName& name, const EntityAuth &a) {
keys[name] = a;
}
void add(const EntityName& name, const CryptoKey &k) {
EntityAuth a;
a.key = k;
keys[name] = a;
}
void add(const EntityName& name, const CryptoKey &k, const CryptoKey &pk) {
EntityAuth a;
a.key = k;
a.pending_key = pk;
keys[name] = a;
}
void remove(const EntityName& name) {
keys.erase(name);
}
void set_caps(const EntityName& name, std::map<std::string, ceph::buffer::list>& caps) {
keys[name].caps = caps;
}
void set_key(EntityName& ename, CryptoKey& key) {
keys[ename].key = key;
}
void import(CephContext *cct, KeyRing& other);
// decode as plaintext
void decode(ceph::buffer::list::const_iterator& bl);
void encode_plaintext(ceph::buffer::list& bl);
void encode_formatted(std::string label, ceph::Formatter *f, ceph::buffer::list& bl);
};
// don't use WRITE_CLASS_ENCODER macro because we don't have an encode
// macro. don't juse encode_plaintext in that case because it is not
// wrappable; it assumes it gets the entire ceph::buffer::list.
static inline void decode(KeyRing& kr, ceph::buffer::list::const_iterator& p) {
kr.decode(p);
}
#endif
| 3,520 | 29.617391 | 121 |
h
|
null |
ceph-main/src/auth/RotatingKeyRing.cc
|
#include <map>
#include "common/debug.h"
#include "auth/RotatingKeyRing.h"
#include "auth/KeyRing.h"
#define dout_subsys ceph_subsys_auth
#undef dout_prefix
#define dout_prefix *_dout << "auth: "
bool RotatingKeyRing::need_new_secrets() const
{
std::lock_guard l{lock};
return secrets.need_new_secrets();
}
bool RotatingKeyRing::need_new_secrets(utime_t now) const
{
std::lock_guard l{lock};
return secrets.need_new_secrets(now);
}
void RotatingKeyRing::set_secrets(RotatingSecrets&& s)
{
std::lock_guard l{lock};
secrets = std::move(s);
dump_rotating();
}
void RotatingKeyRing::dump_rotating() const
{
ldout(cct, 10) << "dump_rotating:" << dendl;
for (auto iter = secrets.secrets.begin();
iter != secrets.secrets.end();
++iter)
ldout(cct, 10) << " id " << iter->first << " " << iter->second << dendl;
}
bool RotatingKeyRing::get_secret(const EntityName& name, CryptoKey& secret) const
{
std::lock_guard l{lock};
return keyring->get_secret(name, secret);
}
bool RotatingKeyRing::get_service_secret(uint32_t service_id_, uint64_t secret_id,
CryptoKey& secret) const
{
std::lock_guard l{lock};
if (service_id_ != this->service_id) {
ldout(cct, 0) << "do not have service " << ceph_entity_type_name(service_id_)
<< ", i am " << ceph_entity_type_name(this->service_id) << dendl;
return false;
}
auto iter = secrets.secrets.find(secret_id);
if (iter == secrets.secrets.end()) {
ldout(cct, 0) << "could not find secret_id=" << secret_id << dendl;
dump_rotating();
return false;
}
secret = iter->second.key;
return true;
}
KeyRing* RotatingKeyRing::get_keyring()
{
return keyring;
}
| 1,678 | 22.319444 | 82 |
cc
|
null |
ceph-main/src/auth/RotatingKeyRing.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_ROTATINGKEYRING_H
#define CEPH_ROTATINGKEYRING_H
#include "common/ceph_mutex.h"
#include "auth/Auth.h"
#include "include/common_fwd.h"
/*
* mediate access to a service's keyring and rotating secrets
*/
class KeyRing;
class RotatingKeyRing : public KeyStore {
CephContext *cct;
uint32_t service_id;
RotatingSecrets secrets;
KeyRing *keyring;
mutable ceph::mutex lock;
public:
RotatingKeyRing(CephContext *cct_, uint32_t s, KeyRing *kr) :
cct(cct_),
service_id(s),
keyring(kr),
lock{ceph::make_mutex("RotatingKeyRing::lock")}
{}
bool need_new_secrets() const;
bool need_new_secrets(utime_t now) const;
void set_secrets(RotatingSecrets&& s);
void dump_rotating() const;
bool get_secret(const EntityName& name, CryptoKey& secret) const override;
bool get_service_secret(uint32_t service_id, uint64_t secret_id,
CryptoKey& secret) const override;
KeyRing *get_keyring();
};
#endif
| 1,376 | 24.5 | 76 |
h
|
null |
ceph-main/src/auth/cephx/CephxAuthorizeHandler.cc
|
#include "CephxProtocol.h"
#include "CephxAuthorizeHandler.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_auth
bool CephxAuthorizeHandler::verify_authorizer(
CephContext *cct,
const KeyStore& keys,
const ceph::bufferlist& authorizer_data,
size_t connection_secret_required_len,
ceph::bufferlist *authorizer_reply,
EntityName *entity_name,
uint64_t *global_id,
AuthCapsInfo *caps_info,
CryptoKey *session_key,
std::string *connection_secret,
std::unique_ptr<AuthAuthorizerChallenge> *challenge)
{
auto iter = authorizer_data.cbegin();
if (!authorizer_data.length()) {
ldout(cct, 1) << "verify authorizer, authorizer_data.length()=0" << dendl;
return false;
}
CephXServiceTicketInfo auth_ticket_info;
bool isvalid = cephx_verify_authorizer(cct, keys, iter,
connection_secret_required_len,
auth_ticket_info,
challenge, connection_secret,
authorizer_reply);
if (isvalid) {
*caps_info = auth_ticket_info.ticket.caps;
*entity_name = auth_ticket_info.ticket.name;
*global_id = auth_ticket_info.ticket.global_id;
*session_key = auth_ticket_info.session_key;
}
return isvalid;
}
// Return type of crypto used for this session's data; for cephx, symmetric authentication
int CephxAuthorizeHandler::authorizer_session_crypto()
{
return SESSION_SYMMETRIC_AUTHENTICATE;
}
| 1,371 | 25.901961 | 91 |
cc
|
null |
ceph-main/src/auth/cephx/CephxAuthorizeHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CEPHXAUTHORIZEHANDLER_H
#define CEPH_CEPHXAUTHORIZEHANDLER_H
#include "auth/AuthAuthorizeHandler.h"
#include "include/common_fwd.h"
struct CephxAuthorizeHandler : public AuthAuthorizeHandler {
bool verify_authorizer(
CephContext *cct,
const KeyStore& keys,
const ceph::buffer::list& authorizer_data,
size_t connection_secret_required_len,
ceph::buffer::list *authorizer_reply,
EntityName *entity_name,
uint64_t *global_id,
AuthCapsInfo *caps_info,
CryptoKey *session_key,
std::string *connection_secret,
std::unique_ptr<AuthAuthorizerChallenge> *challenge) override;
int authorizer_session_crypto() override;
};
#endif
| 1,114 | 26.875 | 71 |
h
|
null |
ceph-main/src/auth/cephx/CephxClientHandler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include "CephxClientHandler.h"
#include "CephxProtocol.h"
#include "auth/KeyRing.h"
#include "include/random.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_auth
#undef dout_prefix
#define dout_prefix *_dout << "cephx client: "
using std::string;
using ceph::bufferlist;
void CephxClientHandler::reset()
{
ldout(cct,10) << __func__ << dendl;
starting = true;
server_challenge = 0;
}
int CephxClientHandler::build_request(bufferlist& bl) const
{
ldout(cct, 10) << "build_request" << dendl;
if (need & CEPH_ENTITY_TYPE_AUTH) {
/* authenticate */
CephXRequestHeader header;
header.request_type = CEPHX_GET_AUTH_SESSION_KEY;
encode(header, bl);
CryptoKey secret;
const bool got = keyring->get_secret(cct->_conf->name, secret);
if (!got) {
ldout(cct, 20) << "no secret found for entity: " << cct->_conf->name << dendl;
return -ENOENT;
}
// is the key OK?
if (!secret.get_secret().length()) {
ldout(cct, 20) << "secret for entity " << cct->_conf->name << " is invalid" << dendl;
return -EINVAL;
}
CephXAuthenticate req;
req.client_challenge = ceph::util::generate_random_number<uint64_t>();
std::string error;
cephx_calc_client_server_challenge(cct, secret, server_challenge,
req.client_challenge, &req.key, error);
if (!error.empty()) {
ldout(cct, 20) << "cephx_calc_client_server_challenge error: " << error << dendl;
return -EIO;
}
req.old_ticket = ticket_handler->ticket;
// for nautilus+ servers: request other keys at the same time
req.other_keys = need;
if (req.old_ticket.blob.length()) {
ldout(cct, 20) << "old ticket len=" << req.old_ticket.blob.length() << dendl;
}
encode(req, bl);
ldout(cct, 10) << "get auth session key: client_challenge "
<< std::hex << req.client_challenge << std::dec << dendl;
return 0;
}
if (_need_tickets()) {
/* get service tickets */
ldout(cct, 10) << "get service keys: want=" << want << " need=" << need << " have=" << have << dendl;
CephXRequestHeader header;
header.request_type = CEPHX_GET_PRINCIPAL_SESSION_KEY;
encode(header, bl);
CephXAuthorizer *authorizer = ticket_handler->build_authorizer(global_id);
if (!authorizer)
return -EINVAL;
bl.claim_append(authorizer->bl);
delete authorizer;
CephXServiceTicketRequest req;
req.keys = need;
encode(req, bl);
}
return 0;
}
bool CephxClientHandler::_need_tickets() const
{
// do not bother (re)requesting tickets if we *only* need the MGR
// ticket; that can happen during an upgrade and we want to avoid a
// loop. we'll end up re-requesting it later when the secrets
// rotating.
return need && need != CEPH_ENTITY_TYPE_MGR;
}
int CephxClientHandler::handle_response(
int ret,
bufferlist::const_iterator& indata,
CryptoKey *session_key,
std::string *connection_secret)
{
ldout(cct, 10) << this << " handle_response ret = " << ret << dendl;
if (ret < 0)
return ret; // hrm!
if (starting) {
CephXServerChallenge ch;
try {
decode(ch, indata);
} catch (ceph::buffer::error& e) {
ldout(cct, 1) << __func__ << " failed to decode CephXServerChallenge: "
<< e.what() << dendl;
return -EPERM;
}
server_challenge = ch.server_challenge;
ldout(cct, 10) << " got initial server challenge "
<< std::hex << server_challenge << std::dec << dendl;
starting = false;
tickets.invalidate_ticket(CEPH_ENTITY_TYPE_AUTH);
return -EAGAIN;
}
struct CephXResponseHeader header;
try {
decode(header, indata);
} catch (ceph::buffer::error& e) {
ldout(cct, 1) << __func__ << " failed to decode CephXResponseHeader: "
<< e.what() << dendl;
return -EPERM;
}
switch (header.request_type) {
case CEPHX_GET_AUTH_SESSION_KEY:
{
ldout(cct, 10) << " get_auth_session_key" << dendl;
CryptoKey secret;
const bool got = keyring->get_secret(cct->_conf->name, secret);
if (!got) {
ldout(cct, 0) << "key not found for " << cct->_conf->name << dendl;
return -ENOENT;
}
if (!tickets.verify_service_ticket_reply(secret, indata)) {
ldout(cct, 0) << "could not verify service_ticket reply" << dendl;
return -EACCES;
}
ldout(cct, 10) << " want=" << want << " need=" << need << " have=" << have << dendl;
if (!indata.end()) {
bufferlist cbl, extra_tickets;
using ceph::decode;
try {
decode(cbl, indata);
decode(extra_tickets, indata);
} catch (ceph::buffer::error& e) {
ldout(cct, 1) << __func__ << " failed to decode tickets: "
<< e.what() << dendl;
return -EPERM;
}
ldout(cct, 10) << " got connection bl " << cbl.length()
<< " and extra tickets " << extra_tickets.length()
<< dendl;
// for msgr1, both session_key and connection_secret are NULL
// so we skip extra_tickets and incur an additional round-trip
// to get service tickets via CEPHX_GET_PRINCIPAL_SESSION_KEY
// as if talking to a pre-nautilus mon
// this wasn't intended but turns out to be needed because in
// msgr1 case MonClient doesn't explicitly wait for the monmap
// (which is shared together with CEPHX_GET_AUTH_SESSION_KEY
// reply)
// instead, it waits for CEPHX_GET_PRINCIPAL_SESSION_KEY reply
// which comes after the monmap and hence the monmap is always
// handled by the time authentication is considered finished
// if we start to always process extra_tickets here, MonClient
// would have no reason to send CEPHX_GET_PRINCIPAL_SESSION_KEY
// and RadosClient::connect() or similar could return with no
// actual monmap but just an initial bootstrap stub, leading
// to mon commands going out with zero fsid and other issues
if (session_key && connection_secret) {
CephXTicketHandler& ticket_handler =
tickets.get_handler(CEPH_ENTITY_TYPE_AUTH);
if (session_key) {
*session_key = ticket_handler.session_key;
}
if (cbl.length() && connection_secret) {
auto p = cbl.cbegin();
string err;
if (decode_decrypt(cct, *connection_secret, *session_key, p,
err)) {
lderr(cct) << __func__ << " failed to decrypt connection_secret"
<< dendl;
} else {
ldout(cct, 10) << " got connection_secret "
<< connection_secret->size() << " bytes" << dendl;
}
}
if (extra_tickets.length()) {
auto p = extra_tickets.cbegin();
if (!tickets.verify_service_ticket_reply(
*session_key, p)) {
lderr(cct) << "could not verify extra service_tickets" << dendl;
} else {
ldout(cct, 10) << " got extra service_tickets" << dendl;
}
}
}
}
validate_tickets();
if (_need_tickets())
ret = -EAGAIN;
else
ret = 0;
}
break;
case CEPHX_GET_PRINCIPAL_SESSION_KEY:
{
CephXTicketHandler& ticket_handler = tickets.get_handler(CEPH_ENTITY_TYPE_AUTH);
ldout(cct, 10) << " get_principal_session_key session_key " << ticket_handler.session_key << dendl;
if (!tickets.verify_service_ticket_reply(ticket_handler.session_key, indata)) {
ldout(cct, 0) << "could not verify service_ticket reply" << dendl;
return -EACCES;
}
validate_tickets();
if (!_need_tickets()) {
ret = 0;
}
}
break;
case CEPHX_GET_ROTATING_KEY:
{
ldout(cct, 10) << " get_rotating_key" << dendl;
if (rotating_secrets) {
RotatingSecrets secrets;
CryptoKey secret_key;
const bool got = keyring->get_secret(cct->_conf->name, secret_key);
if (!got) {
ldout(cct, 0) << "key not found for " << cct->_conf->name << dendl;
return -ENOENT;
}
std::string error;
if (decode_decrypt(cct, secrets, secret_key, indata, error)) {
ldout(cct, 0) << "could not set rotating key: decode_decrypt failed. error:"
<< error << dendl;
return -EINVAL;
} else {
rotating_secrets->set_secrets(std::move(secrets));
}
}
}
break;
default:
ldout(cct, 0) << " unknown request_type " << header.request_type << dendl;
ceph_abort();
}
return ret;
}
AuthAuthorizer *CephxClientHandler::build_authorizer(uint32_t service_id) const
{
ldout(cct, 10) << "build_authorizer for service " << ceph_entity_type_name(service_id) << dendl;
return tickets.build_authorizer(service_id);
}
bool CephxClientHandler::build_rotating_request(bufferlist& bl) const
{
ldout(cct, 10) << "build_rotating_request" << dendl;
CephXRequestHeader header;
header.request_type = CEPHX_GET_ROTATING_KEY;
encode(header, bl);
return true;
}
void CephxClientHandler::prepare_build_request()
{
ldout(cct, 10) << "validate_tickets: want=" << want << " need=" << need
<< " have=" << have << dendl;
validate_tickets();
ldout(cct, 10) << "want=" << want << " need=" << need << " have=" << have
<< dendl;
ticket_handler = &(tickets.get_handler(CEPH_ENTITY_TYPE_AUTH));
}
void CephxClientHandler::validate_tickets()
{
// lock should be held for write
tickets.validate_tickets(want, have, need);
}
bool CephxClientHandler::need_tickets()
{
validate_tickets();
ldout(cct, 20) << "need_tickets: want=" << want
<< " have=" << have
<< " need=" << need
<< dendl;
return _need_tickets();
}
| 9,770 | 28.254491 | 105 |
cc
|
null |
ceph-main/src/auth/cephx/CephxClientHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CEPHXCLIENTHANDLER_H
#define CEPH_CEPHXCLIENTHANDLER_H
#include "auth/AuthClientHandler.h"
#include "CephxProtocol.h"
#include "auth/RotatingKeyRing.h"
#include "include/common_fwd.h"
class KeyRing;
class CephxClientHandler : public AuthClientHandler {
bool starting;
/* envelope protocol parameters */
uint64_t server_challenge;
CephXTicketManager tickets;
CephXTicketHandler* ticket_handler;
RotatingKeyRing* rotating_secrets;
KeyRing *keyring;
public:
CephxClientHandler(CephContext *cct_,
RotatingKeyRing *rsecrets)
: AuthClientHandler(cct_),
starting(false),
server_challenge(0),
tickets(cct_),
ticket_handler(NULL),
rotating_secrets(rsecrets),
keyring(rsecrets->get_keyring())
{
reset();
}
CephxClientHandler* clone() const override {
return new CephxClientHandler(*this);
}
void reset() override;
void prepare_build_request() override;
int build_request(ceph::buffer::list& bl) const override;
int handle_response(int ret, ceph::buffer::list::const_iterator& iter,
CryptoKey *session_key,
std::string *connection_secret) override;
bool build_rotating_request(ceph::buffer::list& bl) const override;
int get_protocol() const override { return CEPH_AUTH_CEPHX; }
AuthAuthorizer *build_authorizer(uint32_t service_id) const override;
bool need_tickets() override;
void set_global_id(uint64_t id) override {
global_id = id;
tickets.global_id = id;
}
private:
void validate_tickets() override;
bool _need_tickets() const;
};
#endif
| 2,017 | 24.544304 | 72 |
h
|
null |
ceph-main/src/auth/cephx/CephxKeyServer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/config.h"
#include "CephxKeyServer.h"
#include "common/dout.h"
#include <sstream>
#define dout_subsys ceph_subsys_auth
#undef dout_prefix
#define dout_prefix *_dout << "cephx keyserverdata: "
using std::ostringstream;
using std::string;
using std::stringstream;
using ceph::bufferptr;
using ceph::bufferlist;
using ceph::Formatter;
bool KeyServerData::get_service_secret(CephContext *cct, uint32_t service_id,
CryptoKey& secret, uint64_t& secret_id,
double& ttl) const
{
auto iter = rotating_secrets.find(service_id);
if (iter == rotating_secrets.end()) {
ldout(cct, 10) << "get_service_secret service " << ceph_entity_type_name(service_id) << " not found " << dendl;
return false;
}
const RotatingSecrets& secrets = iter->second;
// second to oldest, unless it's expired
auto riter = secrets.secrets.begin();
if (secrets.secrets.size() > 1)
++riter;
utime_t now = ceph_clock_now();
if (riter->second.expiration < now)
++riter; // "current" key has expired, use "next" key instead
secret_id = riter->first;
secret = riter->second.key;
// ttl may have just been increased by the user
// cap it by expiration of "next" key to prevent handing out a ticket
// with a bogus, possibly way into the future, validity
ttl = service_id == CEPH_ENTITY_TYPE_AUTH ?
cct->_conf->auth_mon_ticket_ttl : cct->_conf->auth_service_ticket_ttl;
ttl = std::min(ttl, static_cast<double>(
secrets.secrets.rbegin()->second.expiration - now));
ldout(cct, 30) << __func__ << " service "
<< ceph_entity_type_name(service_id) << " secret_id "
<< secret_id << " " << riter->second << " ttl " << ttl
<< dendl;
return true;
}
bool KeyServerData::get_service_secret(CephContext *cct, uint32_t service_id,
uint64_t secret_id, CryptoKey& secret) const
{
auto iter = rotating_secrets.find(service_id);
if (iter == rotating_secrets.end()) {
ldout(cct, 10) << __func__ << " no rotating_secrets for service " << service_id
<< " " << ceph_entity_type_name(service_id) << dendl;
return false;
}
const RotatingSecrets& secrets = iter->second;
auto riter = secrets.secrets.find(secret_id);
if (riter == secrets.secrets.end()) {
ldout(cct, 10) << "get_service_secret service " << ceph_entity_type_name(service_id)
<< " secret " << secret_id << " not found" << dendl;
ldout(cct, 30) << " I have:" << dendl;
for (auto iter = secrets.secrets.begin();
iter != secrets.secrets.end();
++iter)
ldout(cct, 30) << " id " << iter->first << " " << iter->second << dendl;
return false;
}
secret = riter->second.key;
return true;
}
bool KeyServerData::get_auth(const EntityName& name, EntityAuth& auth) const {
auto iter = secrets.find(name);
if (iter != secrets.end()) {
auth = iter->second;
return true;
}
return extra_secrets->get_auth(name, auth);
}
bool KeyServerData::get_secret(const EntityName& name, CryptoKey& secret) const {
auto iter = secrets.find(name);
if (iter != secrets.end()) {
secret = iter->second.key;
return true;
}
return extra_secrets->get_secret(name, secret);
}
bool KeyServerData::get_caps(CephContext *cct, const EntityName& name,
const string& type, AuthCapsInfo& caps_info) const
{
caps_info.allow_all = false;
ldout(cct, 10) << "get_caps: name=" << name.to_str() << dendl;
auto iter = secrets.find(name);
if (iter != secrets.end()) {
ldout(cct, 10) << "get_caps: num of caps=" << iter->second.caps.size() << dendl;
auto capsiter = iter->second.caps.find(type);
if (capsiter != iter->second.caps.end()) {
caps_info.caps = capsiter->second;
}
return true;
}
return extra_secrets->get_caps(name, type, caps_info);
}
#undef dout_prefix
#define dout_prefix *_dout << "cephx keyserver: "
KeyServer::KeyServer(CephContext *cct_, KeyRing *extra_secrets)
: cct(cct_),
data(extra_secrets),
lock{ceph::make_mutex("KeyServer::lock")}
{
}
int KeyServer::start_server()
{
std::scoped_lock l{lock};
_dump_rotating_secrets();
return 0;
}
void KeyServer::dump()
{
_dump_rotating_secrets();
}
void KeyServer::_dump_rotating_secrets()
{
ldout(cct, 30) << "_dump_rotating_secrets" << dendl;
for (auto iter = data.rotating_secrets.begin();
iter != data.rotating_secrets.end();
++iter) {
RotatingSecrets& key = iter->second;
for (auto mapiter = key.secrets.begin();
mapiter != key.secrets.end();
++mapiter)
ldout(cct, 30) << "service " << ceph_entity_type_name(iter->first)
<< " id " << mapiter->first
<< " key " << mapiter->second << dendl;
}
}
int KeyServer::_rotate_secret(uint32_t service_id, KeyServerData &pending_data)
{
RotatingSecrets& r = pending_data.rotating_secrets[service_id];
int added = 0;
utime_t now = ceph_clock_now();
double ttl = service_id == CEPH_ENTITY_TYPE_AUTH ? cct->_conf->auth_mon_ticket_ttl : cct->_conf->auth_service_ticket_ttl;
while (r.need_new_secrets(now)) {
ExpiringCryptoKey ek;
generate_secret(ek.key);
if (r.empty()) {
ek.expiration = now;
} else {
utime_t next_ttl = now;
next_ttl += ttl;
ek.expiration = std::max(next_ttl, r.next().expiration);
}
ek.expiration += ttl;
uint64_t secret_id = r.add(ek);
ldout(cct, 10) << "_rotate_secret adding " << ceph_entity_type_name(service_id) << dendl;
ldout(cct, 30) << "_rotate_secret adding " << ceph_entity_type_name(service_id)
<< " id " << secret_id << " " << ek
<< dendl;
added++;
}
return added;
}
bool KeyServer::get_secret(const EntityName& name, CryptoKey& secret) const
{
std::scoped_lock l{lock};
return data.get_secret(name, secret);
}
bool KeyServer::get_auth(const EntityName& name, EntityAuth& auth) const
{
std::scoped_lock l{lock};
return data.get_auth(name, auth);
}
bool KeyServer::get_caps(const EntityName& name, const string& type,
AuthCapsInfo& caps_info) const
{
std::scoped_lock l{lock};
return data.get_caps(cct, name, type, caps_info);
}
bool KeyServer::get_service_secret(uint32_t service_id, CryptoKey& secret,
uint64_t& secret_id, double& ttl) const
{
std::scoped_lock l{lock};
return data.get_service_secret(cct, service_id, secret, secret_id, ttl);
}
bool KeyServer::get_service_secret(uint32_t service_id,
uint64_t secret_id, CryptoKey& secret) const
{
std::scoped_lock l{lock};
return data.get_service_secret(cct, service_id, secret_id, secret);
}
void KeyServer::note_used_pending_key(const EntityName& name, const CryptoKey& key)
{
std::scoped_lock l(lock);
used_pending_keys[name] = key;
}
void KeyServer::clear_used_pending_keys()
{
std::scoped_lock l(lock);
used_pending_keys.clear();
}
std::map<EntityName,CryptoKey> KeyServer::get_used_pending_keys()
{
std::map<EntityName,CryptoKey> ret;
std::scoped_lock l(lock);
ret.swap(used_pending_keys);
return ret;
}
bool KeyServer::generate_secret(CryptoKey& secret)
{
bufferptr bp;
CryptoHandler *crypto = cct->get_crypto_handler(CEPH_CRYPTO_AES);
if (!crypto)
return false;
if (crypto->create(cct->random(), bp) < 0)
return false;
secret.set_secret(CEPH_CRYPTO_AES, bp, ceph_clock_now());
return true;
}
bool KeyServer::generate_secret(EntityName& name, CryptoKey& secret)
{
if (!generate_secret(secret))
return false;
std::scoped_lock l{lock};
EntityAuth auth;
auth.key = secret;
data.add_auth(name, auth);
return true;
}
bool KeyServer::contains(const EntityName& name) const
{
std::scoped_lock l{lock};
return data.contains(name);
}
int KeyServer::encode_secrets(Formatter *f, stringstream *ds) const
{
std::scoped_lock l{lock};
auto mapiter = data.secrets_begin();
if (mapiter == data.secrets_end())
return -ENOENT;
if (f)
f->open_array_section("auth_dump");
while (mapiter != data.secrets_end()) {
const EntityName& name = mapiter->first;
if (ds) {
*ds << name.to_str() << std::endl;
*ds << "\tkey: " << mapiter->second.key << std::endl;
}
if (f) {
f->open_object_section("auth_entities");
f->dump_string("entity", name.to_str());
f->dump_stream("key") << mapiter->second.key;
f->open_object_section("caps");
}
auto capsiter = mapiter->second.caps.begin();
for (; capsiter != mapiter->second.caps.end(); ++capsiter) {
// FIXME: need a const_iterator for bufferlist, but it doesn't exist yet.
bufferlist *bl = const_cast<bufferlist*>(&capsiter->second);
auto dataiter = bl->cbegin();
string caps;
using ceph::decode;
decode(caps, dataiter);
if (ds)
*ds << "\tcaps: [" << capsiter->first << "] " << caps << std::endl;
if (f)
f->dump_string(capsiter->first.c_str(), caps);
}
if (f) {
f->close_section(); // caps
f->close_section(); // auth_entities
}
++mapiter;
}
if (f)
f->close_section(); // auth_dump
return 0;
}
void KeyServer::encode_formatted(string label, Formatter *f, bufferlist &bl)
{
ceph_assert(f != NULL);
f->open_object_section(label.c_str());
encode_secrets(f, NULL);
f->close_section();
f->flush(bl);
}
void KeyServer::encode_plaintext(bufferlist &bl)
{
stringstream os;
encode_secrets(NULL, &os);
bl.append(os.str());
}
bool KeyServer::prepare_rotating_update(bufferlist& rotating_bl)
{
std::scoped_lock l{lock};
ldout(cct, 20) << __func__ << " before: data.rotating_ver=" << data.rotating_ver
<< dendl;
KeyServerData pending_data(nullptr);
pending_data.rotating_ver = data.rotating_ver + 1;
pending_data.rotating_secrets = data.rotating_secrets;
int added = 0;
added += _rotate_secret(CEPH_ENTITY_TYPE_AUTH, pending_data);
added += _rotate_secret(CEPH_ENTITY_TYPE_MON, pending_data);
added += _rotate_secret(CEPH_ENTITY_TYPE_OSD, pending_data);
added += _rotate_secret(CEPH_ENTITY_TYPE_MDS, pending_data);
added += _rotate_secret(CEPH_ENTITY_TYPE_MGR, pending_data);
if (!added) {
return false;
}
ldout(cct, 20) << __func__ << " after: pending_data.rotating_ver="
<< pending_data.rotating_ver
<< dendl;
pending_data.encode_rotating(rotating_bl);
return true;
}
bool KeyServer::get_rotating_encrypted(const EntityName& name,
bufferlist& enc_bl) const
{
std::scoped_lock l{lock};
auto mapiter = data.find_name(name);
if (mapiter == data.secrets_end())
return false;
const CryptoKey& specific_key = mapiter->second.key;
auto rotate_iter = data.rotating_secrets.find(name.get_type());
if (rotate_iter == data.rotating_secrets.end())
return false;
RotatingSecrets secrets = rotate_iter->second;
std::string error;
if (encode_encrypt(cct, secrets, specific_key, enc_bl, error))
return false;
return true;
}
bool KeyServer::_get_service_caps(const EntityName& name, uint32_t service_id,
AuthCapsInfo& caps_info) const
{
string s = ceph_entity_type_name(service_id);
return data.get_caps(cct, name, s, caps_info);
}
bool KeyServer::get_service_caps(const EntityName& name, uint32_t service_id,
AuthCapsInfo& caps_info) const
{
std::scoped_lock l{lock};
return _get_service_caps(name, service_id, caps_info);
}
int KeyServer::_build_session_auth_info(uint32_t service_id,
const AuthTicket& parent_ticket,
CephXSessionAuthInfo& info,
double ttl)
{
info.service_id = service_id;
info.ticket = parent_ticket;
info.ticket.init_timestamps(ceph_clock_now(), ttl);
info.validity.set_from_double(ttl);
generate_secret(info.session_key);
// mon keys are stored externally. and the caps are blank anyway.
if (service_id != CEPH_ENTITY_TYPE_MON) {
string s = ceph_entity_type_name(service_id);
if (!data.get_caps(cct, info.ticket.name, s, info.ticket.caps)) {
return -EINVAL;
}
}
return 0;
}
int KeyServer::build_session_auth_info(uint32_t service_id,
const AuthTicket& parent_ticket,
CephXSessionAuthInfo& info)
{
double ttl;
if (!get_service_secret(service_id, info.service_secret, info.secret_id,
ttl)) {
return -EACCES;
}
std::scoped_lock l{lock};
return _build_session_auth_info(service_id, parent_ticket, info, ttl);
}
int KeyServer::build_session_auth_info(uint32_t service_id,
const AuthTicket& parent_ticket,
const CryptoKey& service_secret,
uint64_t secret_id,
CephXSessionAuthInfo& info)
{
info.service_secret = service_secret;
info.secret_id = secret_id;
std::scoped_lock l{lock};
return _build_session_auth_info(service_id, parent_ticket, info,
cct->_conf->auth_service_ticket_ttl);
}
| 13,116 | 26.327083 | 123 |
cc
|
null |
ceph-main/src/auth/cephx/CephxKeyServer.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_KEYSSERVER_H
#define CEPH_KEYSSERVER_H
#include "auth/KeyRing.h"
#include "CephxProtocol.h"
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
struct KeyServerData {
version_t version;
/* for each entity */
std::map<EntityName, EntityAuth> secrets;
KeyRing *extra_secrets;
/* for each service type */
version_t rotating_ver;
std::map<uint32_t, RotatingSecrets> rotating_secrets;
explicit KeyServerData(KeyRing *extra)
: version(0),
extra_secrets(extra),
rotating_ver(0) {}
void encode(ceph::buffer::list& bl) const {
__u8 struct_v = 1;
using ceph::encode;
encode(struct_v, bl);
encode(version, bl);
encode(rotating_ver, bl);
encode(secrets, bl);
encode(rotating_secrets, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(version, bl);
decode(rotating_ver, bl);
decode(secrets, bl);
decode(rotating_secrets, bl);
}
void encode_rotating(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 1;
encode(struct_v, bl);
encode(rotating_ver, bl);
encode(rotating_secrets, bl);
}
void decode_rotating(ceph::buffer::list& rotating_bl) {
using ceph::decode;
auto iter = rotating_bl.cbegin();
__u8 struct_v;
decode(struct_v, iter);
decode(rotating_ver, iter);
decode(rotating_secrets, iter);
}
bool contains(const EntityName& name) const {
return (secrets.find(name) != secrets.end());
}
void clear_secrets() {
version = 0;
secrets.clear();
rotating_ver = 0;
rotating_secrets.clear();
}
void add_auth(const EntityName& name, EntityAuth& auth) {
secrets[name] = auth;
}
void remove_secret(const EntityName& name) {
auto iter = secrets.find(name);
if (iter == secrets.end())
return;
secrets.erase(iter);
}
bool get_service_secret(CephContext *cct, uint32_t service_id,
CryptoKey& secret, uint64_t& secret_id,
double& ttl) const;
bool get_service_secret(CephContext *cct, uint32_t service_id,
uint64_t secret_id, CryptoKey& secret) const;
bool get_auth(const EntityName& name, EntityAuth& auth) const;
bool get_secret(const EntityName& name, CryptoKey& secret) const;
bool get_caps(CephContext *cct, const EntityName& name,
const std::string& type, AuthCapsInfo& caps) const;
std::map<EntityName, EntityAuth>::iterator secrets_begin()
{ return secrets.begin(); }
std::map<EntityName, EntityAuth>::const_iterator secrets_begin() const
{ return secrets.begin(); }
std::map<EntityName, EntityAuth>::iterator secrets_end()
{ return secrets.end(); }
std::map<EntityName, EntityAuth>::const_iterator secrets_end() const
{ return secrets.end(); }
std::map<EntityName, EntityAuth>::iterator find_name(const EntityName& name)
{ return secrets.find(name); }
std::map<EntityName, EntityAuth>::const_iterator find_name(const EntityName& name) const
{ return secrets.find(name); }
// -- incremental updates --
typedef enum {
AUTH_INC_NOP,
AUTH_INC_ADD,
AUTH_INC_DEL,
AUTH_INC_SET_ROTATING,
} IncrementalOp;
struct Incremental {
IncrementalOp op;
ceph::buffer::list rotating_bl; // if SET_ROTATING. otherwise,
EntityName name;
EntityAuth auth;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 1;
encode(struct_v, bl);
__u32 _op = (__u32)op;
encode(_op, bl);
if (op == AUTH_INC_SET_ROTATING) {
encode(rotating_bl, bl);
} else {
encode(name, bl);
encode(auth, bl);
}
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
__u32 _op;
decode(_op, bl);
op = (IncrementalOp)_op;
ceph_assert(op >= AUTH_INC_NOP && op <= AUTH_INC_SET_ROTATING);
if (op == AUTH_INC_SET_ROTATING) {
decode(rotating_bl, bl);
} else {
decode(name, bl);
decode(auth, bl);
}
}
};
void apply_incremental(Incremental& inc) {
switch (inc.op) {
case AUTH_INC_ADD:
add_auth(inc.name, inc.auth);
break;
case AUTH_INC_DEL:
remove_secret(inc.name);
break;
case AUTH_INC_SET_ROTATING:
decode_rotating(inc.rotating_bl);
break;
case AUTH_INC_NOP:
break;
default:
ceph_abort();
}
}
};
WRITE_CLASS_ENCODER(KeyServerData)
WRITE_CLASS_ENCODER(KeyServerData::Incremental)
class KeyServer : public KeyStore {
CephContext *cct;
KeyServerData data;
std::map<EntityName, CryptoKey> used_pending_keys;
mutable ceph::mutex lock;
int _rotate_secret(uint32_t service_id, KeyServerData &pending_data);
void _dump_rotating_secrets();
int _build_session_auth_info(uint32_t service_id,
const AuthTicket& parent_ticket,
CephXSessionAuthInfo& info,
double ttl);
bool _get_service_caps(const EntityName& name, uint32_t service_id,
AuthCapsInfo& caps) const;
public:
KeyServer(CephContext *cct_, KeyRing *extra_secrets);
bool generate_secret(CryptoKey& secret);
bool get_secret(const EntityName& name, CryptoKey& secret) const override;
bool get_auth(const EntityName& name, EntityAuth& auth) const;
bool get_caps(const EntityName& name, const std::string& type, AuthCapsInfo& caps) const;
bool get_active_rotating_secret(const EntityName& name, CryptoKey& secret) const;
void note_used_pending_key(const EntityName& name, const CryptoKey& key);
void clear_used_pending_keys();
std::map<EntityName,CryptoKey> get_used_pending_keys();
int start_server();
void rotate_timeout(double timeout);
void dump();
int build_session_auth_info(uint32_t service_id,
const AuthTicket& parent_ticket,
CephXSessionAuthInfo& info);
int build_session_auth_info(uint32_t service_id,
const AuthTicket& parent_ticket,
const CryptoKey& service_secret,
uint64_t secret_id,
CephXSessionAuthInfo& info);
/* get current secret for specific service type */
bool get_service_secret(uint32_t service_id, CryptoKey& secret,
uint64_t& secret_id, double& ttl) const;
bool get_service_secret(uint32_t service_id, uint64_t secret_id,
CryptoKey& secret) const override;
bool generate_secret(EntityName& name, CryptoKey& secret);
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
encode(data, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
std::scoped_lock l{lock};
using ceph::decode;
decode(data, bl);
}
bool contains(const EntityName& name) const;
int encode_secrets(ceph::Formatter *f, std::stringstream *ds) const;
void encode_formatted(std::string label, ceph::Formatter *f, ceph::buffer::list &bl);
void encode_plaintext(ceph::buffer::list &bl);
int list_secrets(std::stringstream& ds) const {
return encode_secrets(NULL, &ds);
}
version_t get_ver() const {
std::scoped_lock l{lock};
return data.version;
}
void clear_secrets() {
std::scoped_lock l{lock};
data.clear_secrets();
}
void apply_data_incremental(KeyServerData::Incremental& inc) {
std::scoped_lock l{lock};
data.apply_incremental(inc);
}
void set_ver(version_t ver) {
std::scoped_lock l{lock};
data.version = ver;
}
void add_auth(const EntityName& name, EntityAuth& auth) {
std::scoped_lock l{lock};
data.add_auth(name, auth);
}
void remove_secret(const EntityName& name) {
std::scoped_lock l{lock};
data.remove_secret(name);
}
bool has_secrets() {
auto b = data.secrets_begin();
return (b != data.secrets_end());
}
int get_num_secrets() {
std::scoped_lock l{lock};
return data.secrets.size();
}
void clone_to(KeyServerData& dst) const {
std::scoped_lock l{lock};
dst = data;
}
void export_keyring(KeyRing& keyring) {
std::scoped_lock l{lock};
for (auto p = data.secrets.begin(); p != data.secrets.end(); ++p) {
keyring.add(p->first, p->second);
}
}
bool prepare_rotating_update(ceph::buffer::list& rotating_bl);
bool get_rotating_encrypted(const EntityName& name, ceph::buffer::list& enc_bl) const;
ceph::mutex& get_lock() const { return lock; }
bool get_service_caps(const EntityName& name, uint32_t service_id,
AuthCapsInfo& caps) const;
std::map<EntityName, EntityAuth>::iterator secrets_begin()
{ return data.secrets_begin(); }
std::map<EntityName, EntityAuth>::iterator secrets_end()
{ return data.secrets_end(); }
};
WRITE_CLASS_ENCODER(KeyServer)
#endif
| 9,074 | 27.009259 | 91 |
h
|
null |
ceph-main/src/auth/cephx/CephxProtocol.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2009-2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "CephxProtocol.h"
#include "common/Clock.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include "common/debug.h"
#include "include/buffer.h"
#define dout_subsys ceph_subsys_auth
#undef dout_prefix
#define dout_prefix *_dout << "cephx: "
using std::dec;
using std::hex;
using std::vector;
using ceph::bufferlist;
using ceph::decode;
using ceph::encode;
void cephx_calc_client_server_challenge(CephContext *cct, CryptoKey& secret, uint64_t server_challenge,
uint64_t client_challenge, uint64_t *key, std::string &error)
{
CephXChallengeBlob b;
b.server_challenge = server_challenge;
b.client_challenge = client_challenge;
bufferlist enc;
if (encode_encrypt(cct, b, secret, enc, error))
return;
uint64_t k = 0;
const ceph_le64 *p = (const ceph_le64 *)enc.c_str();
for (int pos = 0; pos + sizeof(k) <= enc.length(); pos+=sizeof(k), p++)
k ^= *p;
*key = k;
}
/*
* Authentication
*/
bool cephx_build_service_ticket_blob(CephContext *cct, CephXSessionAuthInfo& info,
CephXTicketBlob& blob)
{
CephXServiceTicketInfo ticket_info;
ticket_info.session_key = info.session_key;
ticket_info.ticket = info.ticket;
ticket_info.ticket.caps = info.ticket.caps;
ldout(cct, 10) << "build_service_ticket service "
<< ceph_entity_type_name(info.service_id)
<< " secret_id " << info.secret_id
<< " ticket_info.ticket.name="
<< ticket_info.ticket.name.to_str()
<< " ticket.global_id " << info.ticket.global_id << dendl;
blob.secret_id = info.secret_id;
std::string error;
if (!info.service_secret.get_secret().length())
error = "invalid key"; // Bad key?
else
encode_encrypt_enc_bl(cct, ticket_info, info.service_secret, blob.blob, error);
if (!error.empty()) {
ldout(cct, -1) << "cephx_build_service_ticket_blob failed with error "
<< error << dendl;
return false;
}
return true;
}
/*
* AUTH SERVER: authenticate
*
* Authenticate principal, respond with AuthServiceTicketInfo
*
* {session key, validity}^principal_secret
* {principal_ticket, session key}^service_secret ... "enc_ticket"
*/
bool cephx_build_service_ticket_reply(CephContext *cct,
CryptoKey& principal_secret,
vector<CephXSessionAuthInfo> ticket_info_vec,
bool should_encrypt_ticket,
CryptoKey& ticket_enc_key,
bufferlist& reply)
{
__u8 service_ticket_reply_v = 1;
using ceph::encode;
encode(service_ticket_reply_v, reply);
uint32_t num = ticket_info_vec.size();
encode(num, reply);
ldout(cct, 10) << "build_service_ticket_reply encoding " << num
<< " tickets with secret " << principal_secret << dendl;
for (auto ticket_iter = ticket_info_vec.begin();
ticket_iter != ticket_info_vec.end();
++ticket_iter) {
CephXSessionAuthInfo& info = *ticket_iter;
encode(info.service_id, reply);
__u8 service_ticket_v = 1;
encode(service_ticket_v, reply);
CephXServiceTicket msg_a;
msg_a.session_key = info.session_key;
msg_a.validity = info.validity;
std::string error;
if (encode_encrypt(cct, msg_a, principal_secret, reply, error)) {
ldout(cct, -1) << "error encoding encrypted: " << error << dendl;
return false;
}
bufferlist service_ticket_bl;
CephXTicketBlob blob;
if (!cephx_build_service_ticket_blob(cct, info, blob)) {
return false;
}
encode(blob, service_ticket_bl);
ldout(cct, 30) << "service_ticket_blob is ";
service_ticket_bl.hexdump(*_dout);
*_dout << dendl;
encode((__u8)should_encrypt_ticket, reply);
if (should_encrypt_ticket) {
if (encode_encrypt(cct, service_ticket_bl, ticket_enc_key, reply, error)) {
ldout(cct, -1) << "error encoding encrypted ticket: " << error << dendl;
return false;
}
} else {
encode(service_ticket_bl, reply);
}
}
return true;
}
/*
* PRINCIPAL: verify our attempt to authenticate succeeded. fill out
* this ServiceTicket with the result.
*/
bool CephXTicketHandler::verify_service_ticket_reply(
CryptoKey& secret,
bufferlist::const_iterator& indata)
{
using ceph::decode;
try {
__u8 service_ticket_v;
decode(service_ticket_v, indata);
CephXServiceTicket msg_a;
std::string error;
if (decode_decrypt(cct, msg_a, secret, indata, error)) {
ldout(cct, 0) << __func__ << " failed decode_decrypt, error is: " << error
<< dendl;
return false;
}
__u8 ticket_enc;
decode(ticket_enc, indata);
bufferlist service_ticket_bl;
if (ticket_enc) {
ldout(cct, 10) << __func__ << " got encrypted ticket" << dendl;
std::string error;
if (decode_decrypt(cct, service_ticket_bl, session_key, indata, error)) {
ldout(cct, 10) << __func__ << " decode_decrypt failed "
<< "with " << error << dendl;
return false;
}
} else {
decode(service_ticket_bl, indata);
}
auto iter = service_ticket_bl.cbegin();
decode(ticket, iter);
ldout(cct, 10) << __func__ << " ticket.secret_id=" << ticket.secret_id
<< dendl;
ldout(cct, 10) << __func__ << " service "
<< ceph_entity_type_name(service_id)
<< " secret_id " << ticket.secret_id
<< " session_key " << msg_a.session_key
<< " validity=" << msg_a.validity << dendl;
session_key = msg_a.session_key;
if (!msg_a.validity.is_zero()) {
expires = ceph_clock_now();
expires += msg_a.validity;
renew_after = expires;
renew_after -= ((double)msg_a.validity.sec() / 4);
ldout(cct, 10) << __func__ << " ticket expires=" << expires
<< " renew_after=" << renew_after << dendl;
}
have_key_flag = true;
return true;
} catch (ceph::buffer::error& e) {
ldout(cct, 1) << __func__ << " decode error: " << e.what() << dendl;
return false;
}
}
bool CephXTicketHandler::have_key()
{
if (have_key_flag) {
have_key_flag = ceph_clock_now() < expires;
}
return have_key_flag;
}
bool CephXTicketHandler::need_key() const
{
if (have_key_flag) {
return (!expires.is_zero()) && (ceph_clock_now() >= renew_after);
}
return true;
}
bool CephXTicketManager::have_key(uint32_t service_id)
{
auto iter = tickets_map.find(service_id);
if (iter == tickets_map.end())
return false;
return iter->second.have_key();
}
bool CephXTicketManager::need_key(uint32_t service_id) const
{
auto iter = tickets_map.find(service_id);
if (iter == tickets_map.end())
return true;
return iter->second.need_key();
}
void CephXTicketManager::set_have_need_key(uint32_t service_id, uint32_t& have, uint32_t& need)
{
auto iter = tickets_map.find(service_id);
if (iter == tickets_map.end()) {
have &= ~service_id;
need |= service_id;
ldout(cct, 10) << "set_have_need_key no handler for service "
<< ceph_entity_type_name(service_id) << dendl;
return;
}
//ldout(cct, 10) << "set_have_need_key service " << ceph_entity_type_name(service_id)
//<< " (" << service_id << ")"
//<< " need=" << iter->second.need_key() << " have=" << iter->second.have_key() << dendl;
if (iter->second.need_key())
need |= service_id;
else
need &= ~service_id;
if (iter->second.have_key())
have |= service_id;
else
have &= ~service_id;
}
void CephXTicketManager::invalidate_ticket(uint32_t service_id)
{
auto iter = tickets_map.find(service_id);
if (iter != tickets_map.end())
iter->second.invalidate_ticket();
}
/*
* PRINCIPAL: verify our attempt to authenticate succeeded. fill out
* this ServiceTicket with the result.
*/
bool CephXTicketManager::verify_service_ticket_reply(CryptoKey& secret,
bufferlist::const_iterator& indata)
{
__u8 service_ticket_reply_v;
uint32_t num = 0;
try {
decode(service_ticket_reply_v, indata);
decode(num, indata);
} catch (ceph::buffer::error& e) {
ldout(cct, 10) << __func__ << " failed to decode ticket v or count: "
<< e.what() << dendl;
}
ldout(cct, 10) << "verify_service_ticket_reply got " << num << " keys" << dendl;
for (int i=0; i<(int)num; i++) {
uint32_t type = 0;
try {
decode(type, indata);
} catch (ceph::buffer::error& e) {
ldout(cct, 10) << __func__ << " failed to decode ticket type: " << e.what()
<< dendl;
}
ldout(cct, 10) << "got key for service_id " << ceph_entity_type_name(type) << dendl;
CephXTicketHandler& handler = get_handler(type);
if (!handler.verify_service_ticket_reply(secret, indata)) {
return false;
}
handler.service_id = type;
}
return true;
}
/*
* PRINCIPAL: build authorizer to access the service.
*
* ticket, {timestamp}^session_key
*/
CephXAuthorizer *CephXTicketHandler::build_authorizer(uint64_t global_id) const
{
CephXAuthorizer *a = new CephXAuthorizer(cct);
a->session_key = session_key;
cct->random()->get_bytes((char*)&a->nonce, sizeof(a->nonce));
__u8 authorizer_v = 1; // see AUTH_MODE_* in Auth.h
encode(authorizer_v, a->bl);
encode(global_id, a->bl);
encode(service_id, a->bl);
encode(ticket, a->bl);
a->base_bl = a->bl;
CephXAuthorize msg;
msg.nonce = a->nonce;
std::string error;
if (encode_encrypt(cct, msg, session_key, a->bl, error)) {
ldout(cct, 0) << "failed to encrypt authorizer: " << error << dendl;
delete a;
return 0;
}
return a;
}
/*
* PRINCIPAL: build authorizer to access the service.
*
* ticket, {timestamp}^session_key
*/
CephXAuthorizer *CephXTicketManager::build_authorizer(uint32_t service_id) const
{
auto iter = tickets_map.find(service_id);
if (iter == tickets_map.end()) {
ldout(cct, 0) << "no TicketHandler for service "
<< ceph_entity_type_name(service_id) << dendl;
return NULL;
}
const CephXTicketHandler& handler = iter->second;
return handler.build_authorizer(global_id);
}
void CephXTicketManager::validate_tickets(uint32_t mask, uint32_t& have, uint32_t& need)
{
uint32_t i;
need = 0;
for (i = 1; i<=mask; i<<=1) {
if (mask & i) {
set_have_need_key(i, have, need);
}
}
ldout(cct, 10) << "validate_tickets want " << mask << " have " << have
<< " need " << need << dendl;
}
bool cephx_decode_ticket(CephContext *cct, KeyStore *keys,
uint32_t service_id,
const CephXTicketBlob& ticket_blob,
CephXServiceTicketInfo& ticket_info)
{
uint64_t secret_id = ticket_blob.secret_id;
CryptoKey service_secret;
if (!ticket_blob.blob.length()) {
return false;
}
if (secret_id == (uint64_t)-1) {
if (!keys->get_secret(cct->_conf->name, service_secret)) {
ldout(cct, 0) << "ceph_decode_ticket could not get general service secret for service_id="
<< ceph_entity_type_name(service_id) << " secret_id=" << secret_id << dendl;
return false;
}
} else {
if (!keys->get_service_secret(service_id, secret_id, service_secret)) {
ldout(cct, 0) << "ceph_decode_ticket could not get service secret for service_id="
<< ceph_entity_type_name(service_id) << " secret_id=" << secret_id << dendl;
return false;
}
}
std::string error;
decode_decrypt_enc_bl(cct, ticket_info, service_secret, ticket_blob.blob, error);
if (!error.empty()) {
ldout(cct, 0) << "ceph_decode_ticket could not decrypt ticket info. error:"
<< error << dendl;
return false;
}
return true;
}
/*
* SERVICE: verify authorizer and generate reply authorizer
*
* {timestamp + 1}^session_key
*/
bool cephx_verify_authorizer(CephContext *cct, const KeyStore& keys,
bufferlist::const_iterator& indata,
size_t connection_secret_required_len,
CephXServiceTicketInfo& ticket_info,
std::unique_ptr<AuthAuthorizerChallenge> *challenge,
std::string *connection_secret,
bufferlist *reply_bl)
{
__u8 authorizer_v;
uint32_t service_id;
uint64_t global_id;
CryptoKey service_secret;
// ticket blob
CephXTicketBlob ticket;
try {
decode(authorizer_v, indata);
decode(global_id, indata);
decode(service_id, indata);
decode(ticket, indata);
} catch (ceph::buffer::end_of_buffer &e) {
// Unable to decode!
return false;
}
ldout(cct, 10) << "verify_authorizer decrypted service "
<< ceph_entity_type_name(service_id)
<< " secret_id=" << ticket.secret_id << dendl;
if (ticket.secret_id == (uint64_t)-1) {
EntityName name;
name.set_type(service_id);
if (!keys.get_secret(name, service_secret)) {
ldout(cct, 0) << "verify_authorizer could not get general service secret for service "
<< ceph_entity_type_name(service_id) << " secret_id=" << ticket.secret_id << dendl;
return false;
}
} else {
if (!keys.get_service_secret(service_id, ticket.secret_id, service_secret)) {
ldout(cct, 0) << "verify_authorizer could not get service secret for service "
<< ceph_entity_type_name(service_id) << " secret_id=" << ticket.secret_id << dendl;
if (cct->_conf->auth_debug && ticket.secret_id == 0)
ceph_abort_msg("got secret_id=0");
return false;
}
}
std::string error;
if (!service_secret.get_secret().length())
error = "invalid key"; // Bad key?
else
decode_decrypt_enc_bl(cct, ticket_info, service_secret, ticket.blob, error);
if (!error.empty()) {
ldout(cct, 0) << "verify_authorizer could not decrypt ticket info: error: "
<< error << dendl;
return false;
}
if (ticket_info.ticket.global_id != global_id) {
ldout(cct, 0) << "verify_authorizer global_id mismatch: declared id=" << global_id
<< " ticket_id=" << ticket_info.ticket.global_id << dendl;
return false;
}
ldout(cct, 10) << "verify_authorizer global_id=" << global_id << dendl;
// CephXAuthorize
CephXAuthorize auth_msg;
if (decode_decrypt(cct, auth_msg, ticket_info.session_key, indata, error)) {
ldout(cct, 0) << "verify_authorizercould not decrypt authorize request with error: "
<< error << dendl;
return false;
}
if (challenge) {
auto *c = static_cast<CephXAuthorizeChallenge*>(challenge->get());
if (!auth_msg.have_challenge || !c) {
c = new CephXAuthorizeChallenge;
challenge->reset(c);
cct->random()->get_bytes((char*)&c->server_challenge, sizeof(c->server_challenge));
ldout(cct,10) << __func__ << " adding server_challenge " << c->server_challenge
<< dendl;
encode_encrypt_enc_bl(cct, *c, ticket_info.session_key, *reply_bl, error);
if (!error.empty()) {
ldout(cct, 10) << "verify_authorizer: encode_encrypt error: " << error << dendl;
return false;
}
return false;
}
ldout(cct, 10) << __func__ << " got server_challenge+1 "
<< auth_msg.server_challenge_plus_one
<< " expecting " << c->server_challenge + 1 << dendl;
if (c->server_challenge + 1 != auth_msg.server_challenge_plus_one) {
return false;
}
}
/*
* Reply authorizer:
* {timestamp + 1}^session_key
*/
CephXAuthorizeReply reply;
// reply.trans_id = auth_msg.trans_id;
reply.nonce_plus_one = auth_msg.nonce + 1;
if (connection_secret) {
// generate a connection secret
connection_secret->resize(connection_secret_required_len);
if (connection_secret_required_len) {
#ifdef WITH_SEASTAR
std::random_device rd;
std::generate_n(connection_secret->data(),
connection_secret_required_len,
std::default_random_engine{rd()});
#else
cct->random()->get_bytes(connection_secret->data(),
connection_secret_required_len);
#endif
}
reply.connection_secret = *connection_secret;
}
if (encode_encrypt(cct, reply, ticket_info.session_key, *reply_bl, error)) {
ldout(cct, 10) << "verify_authorizer: encode_encrypt error: " << error << dendl;
return false;
}
ldout(cct, 10) << "verify_authorizer ok nonce " << hex << auth_msg.nonce << dec
<< " reply_bl.length()=" << reply_bl->length() << dendl;
return true;
}
bool CephXAuthorizer::verify_reply(bufferlist::const_iterator& indata,
std::string *connection_secret)
{
CephXAuthorizeReply reply;
std::string error;
if (decode_decrypt(cct, reply, session_key, indata, error)) {
ldout(cct, 0) << "verify_reply couldn't decrypt with error: " << error << dendl;
return false;
}
uint64_t expect = nonce + 1;
if (expect != reply.nonce_plus_one) {
ldout(cct, 0) << "verify_authorizer_reply bad nonce got " << reply.nonce_plus_one << " expected " << expect
<< " sent " << nonce << dendl;
return false;
}
if (connection_secret &&
reply.connection_secret.size()) {
*connection_secret = reply.connection_secret;
}
return true;
}
bool CephXAuthorizer::add_challenge(CephContext *cct,
const bufferlist& challenge)
{
bl = base_bl;
CephXAuthorize msg;
msg.nonce = nonce;
auto p = challenge.begin();
if (!p.end()) {
std::string error;
CephXAuthorizeChallenge ch;
decode_decrypt_enc_bl(cct, ch, session_key, challenge, error);
if (!error.empty()) {
ldout(cct, 0) << "failed to decrypt challenge (" << challenge.length() << " bytes): "
<< error << dendl;
return false;
}
msg.have_challenge = true;
msg.server_challenge_plus_one = ch.server_challenge + 1;
}
std::string error;
if (encode_encrypt(cct, msg, session_key, bl, error)) {
ldout(cct, 0) << __func__ << " failed to encrypt authorizer: " << error << dendl;
return false;
}
return true;
}
| 17,859 | 28.520661 | 111 |
cc
|
null |
ceph-main/src/auth/cephx/CephxProtocol.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CEPHXPROTOCOL_H
#define CEPH_CEPHXPROTOCOL_H
/*
Ceph X protocol
See doc/dev/cephx.rst
*/
/* authenticate requests */
#define CEPHX_GET_AUTH_SESSION_KEY 0x0100
#define CEPHX_GET_PRINCIPAL_SESSION_KEY 0x0200
#define CEPHX_GET_ROTATING_KEY 0x0400
#define CEPHX_REQUEST_TYPE_MASK 0x0F00
#define CEPHX_CRYPT_ERR 1
#include "auth/Auth.h"
#include <errno.h>
#include <sstream>
#include "include/common_fwd.h"
/*
* Authentication
*/
// initial server -> client challenge
struct CephXServerChallenge {
uint64_t server_challenge;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 1;
encode(struct_v, bl);
encode(server_challenge, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(server_challenge, bl);
}
};
WRITE_CLASS_ENCODER(CephXServerChallenge)
// request/reply headers, for subsequent exchanges.
struct CephXRequestHeader {
__u16 request_type;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
encode(request_type, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
decode(request_type, bl);
}
};
WRITE_CLASS_ENCODER(CephXRequestHeader)
struct CephXResponseHeader {
uint16_t request_type;
int32_t status;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
encode(request_type, bl);
encode(status, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
decode(request_type, bl);
decode(status, bl);
}
};
WRITE_CLASS_ENCODER(CephXResponseHeader)
struct CephXTicketBlob {
uint64_t secret_id;
ceph::buffer::list blob;
CephXTicketBlob() : secret_id(0) {}
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 1;
encode(struct_v, bl);
encode(secret_id, bl);
encode(blob, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(secret_id, bl);
decode(blob, bl);
}
};
WRITE_CLASS_ENCODER(CephXTicketBlob)
// client -> server response to challenge
struct CephXAuthenticate {
uint64_t client_challenge;
uint64_t key;
CephXTicketBlob old_ticket;
uint32_t other_keys = 0; // replaces CephXServiceTicketRequest
bool old_ticket_may_be_omitted;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 3;
encode(struct_v, bl);
encode(client_challenge, bl);
encode(key, bl);
encode(old_ticket, bl);
encode(other_keys, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(client_challenge, bl);
decode(key, bl);
decode(old_ticket, bl);
if (struct_v >= 2) {
decode(other_keys, bl);
}
// v2 and v3 encodings are the same, but:
// - some clients that send v1 or v2 don't populate old_ticket
// on reconnects (but do on renewals)
// - any client that sends v3 or later is expected to populate
// old_ticket both on reconnects and renewals
old_ticket_may_be_omitted = struct_v < 3;
}
};
WRITE_CLASS_ENCODER(CephXAuthenticate)
struct CephXChallengeBlob {
uint64_t server_challenge, client_challenge;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
encode(server_challenge, bl);
encode(client_challenge, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
decode(server_challenge, bl);
decode(client_challenge, bl);
}
};
WRITE_CLASS_ENCODER(CephXChallengeBlob)
void cephx_calc_client_server_challenge(CephContext *cct,
CryptoKey& secret, uint64_t server_challenge, uint64_t client_challenge,
uint64_t *key, std::string &error);
/*
* getting service tickets
*/
struct CephXSessionAuthInfo {
uint32_t service_id;
uint64_t secret_id;
AuthTicket ticket;
CryptoKey session_key;
CryptoKey service_secret;
utime_t validity;
};
extern bool cephx_build_service_ticket_blob(CephContext *cct,
CephXSessionAuthInfo& ticket_info, CephXTicketBlob& blob);
extern void cephx_build_service_ticket_request(CephContext *cct,
uint32_t keys,
ceph::buffer::list& request);
extern bool cephx_build_service_ticket_reply(CephContext *cct,
CryptoKey& principal_secret,
std::vector<CephXSessionAuthInfo> ticket_info,
bool should_encrypt_ticket,
CryptoKey& ticket_enc_key,
ceph::buffer::list& reply);
struct CephXServiceTicketRequest {
uint32_t keys;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 1;
encode(struct_v, bl);
encode(keys, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(keys, bl);
}
};
WRITE_CLASS_ENCODER(CephXServiceTicketRequest)
/*
* Authorize
*/
struct CephXAuthorizeReply {
uint64_t nonce_plus_one;
std::string connection_secret;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 1;
if (connection_secret.size()) {
struct_v = 2;
}
encode(struct_v, bl);
encode(nonce_plus_one, bl);
if (struct_v >= 2) {
struct_v = 2;
encode(connection_secret, bl);
}
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(nonce_plus_one, bl);
if (struct_v >= 2) {
decode(connection_secret, bl);
}
}
};
WRITE_CLASS_ENCODER(CephXAuthorizeReply)
struct CephXAuthorizer : public AuthAuthorizer {
private:
CephContext *cct;
public:
uint64_t nonce;
ceph::buffer::list base_bl;
explicit CephXAuthorizer(CephContext *cct_)
: AuthAuthorizer(CEPH_AUTH_CEPHX), cct(cct_), nonce(0) {}
bool build_authorizer();
bool verify_reply(ceph::buffer::list::const_iterator& reply,
std::string *connection_secret) override;
bool add_challenge(CephContext *cct, const ceph::buffer::list& challenge) override;
};
/*
* TicketHandler
*/
struct CephXTicketHandler {
uint32_t service_id;
CryptoKey session_key;
CephXTicketBlob ticket; // opaque to us
utime_t renew_after, expires;
bool have_key_flag;
CephXTicketHandler(CephContext *cct_, uint32_t service_id_)
: service_id(service_id_), have_key_flag(false), cct(cct_) { }
// to build our ServiceTicket
bool verify_service_ticket_reply(CryptoKey& principal_secret,
ceph::buffer::list::const_iterator& indata);
// to access the service
CephXAuthorizer *build_authorizer(uint64_t global_id) const;
bool have_key();
bool need_key() const;
void invalidate_ticket() {
have_key_flag = false;
}
private:
CephContext *cct;
};
struct CephXTicketManager {
typedef std::map<uint32_t, CephXTicketHandler> tickets_map_t;
tickets_map_t tickets_map;
uint64_t global_id;
explicit CephXTicketManager(CephContext *cct_) : global_id(0), cct(cct_) {}
bool verify_service_ticket_reply(CryptoKey& principal_secret,
ceph::buffer::list::const_iterator& indata);
CephXTicketHandler& get_handler(uint32_t type) {
tickets_map_t::iterator i = tickets_map.find(type);
if (i != tickets_map.end())
return i->second;
CephXTicketHandler newTicketHandler(cct, type);
std::pair < tickets_map_t::iterator, bool > res =
tickets_map.insert(std::make_pair(type, newTicketHandler));
ceph_assert(res.second);
return res.first->second;
}
CephXAuthorizer *build_authorizer(uint32_t service_id) const;
bool have_key(uint32_t service_id);
bool need_key(uint32_t service_id) const;
void set_have_need_key(uint32_t service_id, uint32_t& have, uint32_t& need);
void validate_tickets(uint32_t mask, uint32_t& have, uint32_t& need);
void invalidate_ticket(uint32_t service_id);
private:
CephContext *cct;
};
/* A */
struct CephXServiceTicket {
CryptoKey session_key;
utime_t validity;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 1;
encode(struct_v, bl);
encode(session_key, bl);
encode(validity, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(session_key, bl);
decode(validity, bl);
}
};
WRITE_CLASS_ENCODER(CephXServiceTicket)
/* B */
struct CephXServiceTicketInfo {
AuthTicket ticket;
CryptoKey session_key;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 1;
encode(struct_v, bl);
encode(ticket, bl);
encode(session_key, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(ticket, bl);
decode(session_key, bl);
}
};
WRITE_CLASS_ENCODER(CephXServiceTicketInfo)
struct CephXAuthorizeChallenge : public AuthAuthorizerChallenge {
uint64_t server_challenge;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 1;
encode(struct_v, bl);
encode(server_challenge, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(server_challenge, bl);
}
};
WRITE_CLASS_ENCODER(CephXAuthorizeChallenge)
struct CephXAuthorize {
uint64_t nonce;
bool have_challenge = false;
uint64_t server_challenge_plus_one = 0;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 struct_v = 2;
encode(struct_v, bl);
encode(nonce, bl);
encode(have_challenge, bl);
encode(server_challenge_plus_one, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
decode(nonce, bl);
if (struct_v >= 2) {
decode(have_challenge, bl);
decode(server_challenge_plus_one, bl);
}
}
};
WRITE_CLASS_ENCODER(CephXAuthorize)
/*
* Decode an extract ticket
*/
bool cephx_decode_ticket(CephContext *cct, KeyStore *keys,
uint32_t service_id,
const CephXTicketBlob& ticket_blob,
CephXServiceTicketInfo& ticket_info);
/*
* Verify authorizer and generate reply authorizer
*/
extern bool cephx_verify_authorizer(
CephContext *cct,
const KeyStore& keys,
ceph::buffer::list::const_iterator& indata,
size_t connection_secret_required_len,
CephXServiceTicketInfo& ticket_info,
std::unique_ptr<AuthAuthorizerChallenge> *challenge,
std::string *connection_secret,
ceph::buffer::list *reply_bl);
/*
* encode+encrypt macros
*/
static constexpr uint64_t AUTH_ENC_MAGIC = 0xff009cad8826aa55ull;
template <typename T>
void decode_decrypt_enc_bl(CephContext *cct, T& t, CryptoKey key,
const ceph::buffer::list& bl_enc,
std::string &error)
{
uint64_t magic;
ceph::buffer::list bl;
if (key.decrypt(cct, bl_enc, bl, &error) < 0)
return;
auto iter2 = bl.cbegin();
__u8 struct_v;
using ceph::decode;
decode(struct_v, iter2);
decode(magic, iter2);
if (magic != AUTH_ENC_MAGIC) {
std::ostringstream oss;
oss << "bad magic in decode_decrypt, " << magic << " != " << AUTH_ENC_MAGIC;
error = oss.str();
return;
}
decode(t, iter2);
}
template <typename T>
void encode_encrypt_enc_bl(CephContext *cct, const T& t, const CryptoKey& key,
ceph::buffer::list& out, std::string &error)
{
ceph::buffer::list bl;
__u8 struct_v = 1;
using ceph::encode;
encode(struct_v, bl);
uint64_t magic = AUTH_ENC_MAGIC;
encode(magic, bl);
encode(t, bl);
key.encrypt(cct, bl, out, &error);
}
template <typename T>
int decode_decrypt(CephContext *cct, T& t, const CryptoKey& key,
ceph::buffer::list::const_iterator& iter, std::string &error)
{
ceph::buffer::list bl_enc;
using ceph::decode;
try {
decode(bl_enc, iter);
decode_decrypt_enc_bl(cct, t, key, bl_enc, error);
}
catch (ceph::buffer::error &e) {
error = "error decoding block for decryption";
}
if (!error.empty())
return CEPHX_CRYPT_ERR;
return 0;
}
template <typename T>
int encode_encrypt(CephContext *cct, const T& t, const CryptoKey& key,
ceph::buffer::list& out, std::string &error)
{
using ceph::encode;
ceph::buffer::list bl_enc;
encode_encrypt_enc_bl(cct, t, key, bl_enc, error);
if (!error.empty()){
return CEPHX_CRYPT_ERR;
}
encode(bl_enc, out);
return 0;
}
#endif
| 13,084 | 23.782197 | 85 |
h
|
null |
ceph-main/src/auth/cephx/CephxServiceHandler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "CephxServiceHandler.h"
#include "CephxProtocol.h"
#include "CephxKeyServer.h"
#include <errno.h>
#include <sstream>
#include "include/random.h"
#include "common/config.h"
#include "common/debug.h"
#define dout_subsys ceph_subsys_auth
#undef dout_prefix
#define dout_prefix *_dout << "cephx server " << entity_name << ": "
using std::dec;
using std::hex;
using std::vector;
using ceph::bufferlist;
using ceph::decode;
using ceph::encode;
int CephxServiceHandler::do_start_session(
bool is_new_global_id,
bufferlist *result_bl,
AuthCapsInfo *caps)
{
global_id_status = is_new_global_id ? global_id_status_t::NEW_PENDING :
global_id_status_t::RECLAIM_PENDING;
uint64_t min = 1; // always non-zero
uint64_t max = std::numeric_limits<uint64_t>::max();
server_challenge = ceph::util::generate_random_number<uint64_t>(min, max);
ldout(cct, 10) << "start_session server_challenge "
<< hex << server_challenge << dec << dendl;
CephXServerChallenge ch;
ch.server_challenge = server_challenge;
encode(ch, *result_bl);
return 0;
}
int CephxServiceHandler::verify_old_ticket(
const CephXAuthenticate& req,
CephXServiceTicketInfo& old_ticket_info,
bool& should_enc_ticket)
{
ldout(cct, 20) << " checking old_ticket: secret_id="
<< req.old_ticket.secret_id
<< " len=" << req.old_ticket.blob.length()
<< ", old_ticket_may_be_omitted="
<< req.old_ticket_may_be_omitted << dendl;
ceph_assert(global_id_status != global_id_status_t::NONE);
if (global_id_status == global_id_status_t::NEW_PENDING) {
// old ticket is not needed
if (req.old_ticket.blob.length()) {
ldout(cct, 0) << " superfluous ticket presented" << dendl;
return -EINVAL;
}
if (req.old_ticket_may_be_omitted) {
ldout(cct, 10) << " new global_id " << global_id
<< " (unexposed legacy client)" << dendl;
global_id_status = global_id_status_t::NEW_NOT_EXPOSED;
} else {
ldout(cct, 10) << " new global_id " << global_id << dendl;
global_id_status = global_id_status_t::NEW_OK;
}
return 0;
}
if (!req.old_ticket.blob.length()) {
// old ticket is needed but not presented
if (cct->_conf->auth_allow_insecure_global_id_reclaim &&
req.old_ticket_may_be_omitted) {
ldout(cct, 10) << " allowing reclaim of global_id " << global_id
<< " with no ticket presented (legacy client, auth_allow_insecure_global_id_reclaim=true)"
<< dendl;
global_id_status = global_id_status_t::RECLAIM_INSECURE;
return 0;
}
ldout(cct, 0) << " attempt to reclaim global_id " << global_id
<< " without presenting ticket" << dendl;
return -EACCES;
}
if (!cephx_decode_ticket(cct, key_server, CEPH_ENTITY_TYPE_AUTH,
req.old_ticket, old_ticket_info)) {
if (cct->_conf->auth_allow_insecure_global_id_reclaim &&
req.old_ticket_may_be_omitted) {
ldout(cct, 10) << " allowing reclaim of global_id " << global_id
<< " using bad ticket (legacy client, auth_allow_insecure_global_id_reclaim=true)"
<< dendl;
global_id_status = global_id_status_t::RECLAIM_INSECURE;
return 0;
}
ldout(cct, 0) << " attempt to reclaim global_id " << global_id
<< " using bad ticket" << dendl;
return -EACCES;
}
ldout(cct, 20) << " decoded old_ticket: global_id="
<< old_ticket_info.ticket.global_id << dendl;
if (global_id != old_ticket_info.ticket.global_id) {
if (cct->_conf->auth_allow_insecure_global_id_reclaim &&
req.old_ticket_may_be_omitted) {
ldout(cct, 10) << " allowing reclaim of global_id " << global_id
<< " using mismatching ticket (legacy client, auth_allow_insecure_global_id_reclaim=true)"
<< dendl;
global_id_status = global_id_status_t::RECLAIM_INSECURE;
return 0;
}
ldout(cct, 0) << " attempt to reclaim global_id " << global_id
<< " using mismatching ticket" << dendl;
return -EACCES;
}
ldout(cct, 10) << " allowing reclaim of global_id " << global_id
<< " (valid ticket presented, will encrypt new ticket)"
<< dendl;
global_id_status = global_id_status_t::RECLAIM_OK;
should_enc_ticket = true;
return 0;
}
int CephxServiceHandler::handle_request(
bufferlist::const_iterator& indata,
size_t connection_secret_required_len,
bufferlist *result_bl,
AuthCapsInfo *caps,
CryptoKey *psession_key,
std::string *pconnection_secret)
{
int ret = 0;
struct CephXRequestHeader cephx_header;
try {
decode(cephx_header, indata);
} catch (ceph::buffer::error& e) {
ldout(cct, 0) << __func__ << " failed to decode CephXRequestHeader: "
<< e.what() << dendl;
return -EPERM;
}
switch (cephx_header.request_type) {
case CEPHX_GET_AUTH_SESSION_KEY:
{
ldout(cct, 10) << "handle_request get_auth_session_key for "
<< entity_name << dendl;
CephXAuthenticate req;
try {
decode(req, indata);
} catch (ceph::buffer::error& e) {
ldout(cct, 0) << __func__ << " failed to decode CephXAuthenticate: "
<< e.what() << dendl;
ret = -EPERM;
break;
}
EntityAuth eauth;
if (!key_server->get_auth(entity_name, eauth)) {
ldout(cct, 0) << "couldn't find entity name: " << entity_name << dendl;
ret = -EACCES;
break;
}
if (!server_challenge) {
ret = -EACCES;
break;
}
uint64_t expected_key;
CryptoKey *used_key = &eauth.key;
std::string error;
cephx_calc_client_server_challenge(cct, eauth.key, server_challenge,
req.client_challenge, &expected_key, error);
if ((!error.empty() || req.key != expected_key) &&
!eauth.pending_key.empty()) {
ldout(cct, 10) << "normal key failed for " << entity_name
<< ", trying pending_key" << dendl;
// try pending_key instead
error.clear();
cephx_calc_client_server_challenge(cct, eauth.pending_key,
server_challenge,
req.client_challenge, &expected_key,
error);
if (error.empty()) {
used_key = &eauth.pending_key;
key_server->note_used_pending_key(entity_name, eauth.pending_key);
}
}
if (!error.empty()) {
ldout(cct, 0) << " cephx_calc_client_server_challenge error: " << error << dendl;
ret = -EACCES;
break;
}
ldout(cct, 20) << " checking key: req.key=" << hex << req.key
<< " expected_key=" << expected_key << dec << dendl;
if (req.key != expected_key) {
ldout(cct, 0) << " unexpected key: req.key=" << hex << req.key
<< " expected_key=" << expected_key << dec << dendl;
ret = -EACCES;
break;
}
CryptoKey session_key;
CephXSessionAuthInfo info;
bool should_enc_ticket = false;
CephXServiceTicketInfo old_ticket_info;
ret = verify_old_ticket(req, old_ticket_info, should_enc_ticket);
if (ret) {
ldout(cct, 0) << " could not verify old ticket" << dendl;
break;
}
double ttl;
if (!key_server->get_service_secret(CEPH_ENTITY_TYPE_AUTH,
info.service_secret, info.secret_id,
ttl)) {
ldout(cct, 0) << " could not get service secret for auth subsystem" << dendl;
ret = -EIO;
break;
}
info.service_id = CEPH_ENTITY_TYPE_AUTH;
info.ticket.name = entity_name;
info.ticket.global_id = global_id;
info.ticket.init_timestamps(ceph_clock_now(), ttl);
info.validity.set_from_double(ttl);
key_server->generate_secret(session_key);
info.session_key = session_key;
if (psession_key) {
*psession_key = session_key;
}
vector<CephXSessionAuthInfo> info_vec;
info_vec.push_back(info);
build_cephx_response_header(cephx_header.request_type, 0, *result_bl);
if (!cephx_build_service_ticket_reply(
cct, *used_key, info_vec, should_enc_ticket,
old_ticket_info.session_key, *result_bl)) {
ret = -EIO;
break;
}
if (!key_server->get_service_caps(entity_name, CEPH_ENTITY_TYPE_MON,
*caps)) {
ldout(cct, 0) << " could not get mon caps for " << entity_name << dendl;
ret = -EACCES;
break;
} else {
char *caps_str = caps->caps.c_str();
if (!caps_str || !caps_str[0]) {
ldout(cct,0) << "mon caps null for " << entity_name << dendl;
ret = -EACCES;
break;
}
if (req.other_keys) {
// nautilus+ client
// generate a connection_secret
bufferlist cbl;
if (pconnection_secret) {
pconnection_secret->resize(connection_secret_required_len);
if (connection_secret_required_len) {
cct->random()->get_bytes(pconnection_secret->data(),
connection_secret_required_len);
}
std::string err;
if (encode_encrypt(cct, *pconnection_secret, session_key, cbl,
err)) {
lderr(cct) << __func__ << " failed to encrypt connection secret, "
<< err << dendl;
ret = -EACCES;
break;
}
}
encode(cbl, *result_bl);
// provide requested service tickets at the same time
vector<CephXSessionAuthInfo> info_vec;
for (uint32_t service_id = 1; service_id <= req.other_keys;
service_id <<= 1) {
// skip CEPH_ENTITY_TYPE_AUTH: auth ticket is already encoded
// (possibly encrypted with the old session key)
if ((req.other_keys & service_id) &&
service_id != CEPH_ENTITY_TYPE_AUTH) {
ldout(cct, 10) << " adding key for service "
<< ceph_entity_type_name(service_id) << dendl;
CephXSessionAuthInfo svc_info;
key_server->build_session_auth_info(
service_id,
info.ticket,
svc_info);
info_vec.push_back(svc_info);
}
}
bufferlist extra;
if (!info_vec.empty()) {
CryptoKey no_key;
cephx_build_service_ticket_reply(
cct, session_key, info_vec, false, no_key, extra);
}
encode(extra, *result_bl);
}
// caller should try to finish authentication
ret = 1;
}
}
break;
case CEPHX_GET_PRINCIPAL_SESSION_KEY:
{
ldout(cct, 10) << "handle_request get_principal_session_key" << dendl;
bufferlist tmp_bl;
CephXServiceTicketInfo auth_ticket_info;
// note: no challenge here.
if (!cephx_verify_authorizer(
cct, *key_server, indata, 0, auth_ticket_info, nullptr,
nullptr,
&tmp_bl)) {
ret = -EACCES;
break;
}
CephXServiceTicketRequest ticket_req;
try {
decode(ticket_req, indata);
} catch (ceph::buffer::error& e) {
ldout(cct, 0) << __func__
<< " failed to decode CephXServiceTicketRequest: "
<< e.what() << dendl;
ret = -EPERM;
break;
}
ldout(cct, 10) << " ticket_req.keys = " << ticket_req.keys << dendl;
ret = 0;
vector<CephXSessionAuthInfo> info_vec;
int found_services = 0;
int service_err = 0;
for (uint32_t service_id = 1; service_id <= ticket_req.keys;
service_id <<= 1) {
// skip CEPH_ENTITY_TYPE_AUTH: auth ticket must be obtained with
// CEPHX_GET_AUTH_SESSION_KEY
if ((ticket_req.keys & service_id) &&
service_id != CEPH_ENTITY_TYPE_AUTH) {
ldout(cct, 10) << " adding key for service "
<< ceph_entity_type_name(service_id) << dendl;
CephXSessionAuthInfo info;
int r = key_server->build_session_auth_info(
service_id,
auth_ticket_info.ticket, // parent ticket (client's auth ticket)
info);
// tolerate missing MGR rotating key for the purposes of upgrades.
if (r < 0) {
ldout(cct, 10) << " missing key for service "
<< ceph_entity_type_name(service_id) << dendl;
service_err = r;
continue;
}
info_vec.push_back(info);
++found_services;
}
}
if (!found_services && service_err) {
ldout(cct, 10) << __func__ << " did not find any service keys" << dendl;
ret = service_err;
}
CryptoKey no_key;
build_cephx_response_header(cephx_header.request_type, ret, *result_bl);
cephx_build_service_ticket_reply(cct, auth_ticket_info.session_key,
info_vec, false, no_key, *result_bl);
}
break;
case CEPHX_GET_ROTATING_KEY:
{
ldout(cct, 10) << "handle_request getting rotating secret for "
<< entity_name << dendl;
build_cephx_response_header(cephx_header.request_type, 0, *result_bl);
if (!key_server->get_rotating_encrypted(entity_name, *result_bl)) {
ret = -EACCES;
break;
}
}
break;
default:
ldout(cct, 10) << "handle_request unknown op " << cephx_header.request_type << dendl;
return -EINVAL;
}
return ret;
}
void CephxServiceHandler::build_cephx_response_header(int request_type, int status, bufferlist& bl)
{
struct CephXResponseHeader header;
header.request_type = request_type;
header.status = status;
encode(header, bl);
}
| 13,167 | 30.203791 | 99 |
cc
|
null |
ceph-main/src/auth/cephx/CephxServiceHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CEPHXSERVICEHANDLER_H
#define CEPH_CEPHXSERVICEHANDLER_H
#include "auth/AuthServiceHandler.h"
#include "auth/Auth.h"
class KeyServer;
struct CephXAuthenticate;
struct CephXServiceTicketInfo;
class CephxServiceHandler : public AuthServiceHandler {
KeyServer *key_server;
uint64_t server_challenge;
public:
CephxServiceHandler(CephContext *cct_, KeyServer *ks)
: AuthServiceHandler(cct_), key_server(ks), server_challenge(0) {}
~CephxServiceHandler() override {}
int handle_request(
ceph::buffer::list::const_iterator& indata,
size_t connection_secret_required_length,
ceph::buffer::list *result_bl,
AuthCapsInfo *caps,
CryptoKey *session_key,
std::string *connection_secret) override;
private:
int do_start_session(bool is_new_global_id,
ceph::buffer::list *result_bl,
AuthCapsInfo *caps) override;
int verify_old_ticket(const CephXAuthenticate& req,
CephXServiceTicketInfo& old_ticket_info,
bool& should_enc_ticket);
void build_cephx_response_header(int request_type, int status,
ceph::buffer::list& bl);
};
#endif
| 1,544 | 27.090909 | 71 |
h
|
null |
ceph-main/src/auth/cephx/CephxSessionHandler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "CephxSessionHandler.h"
#include "CephxProtocol.h"
#include <errno.h>
#include <sstream>
#include "common/config.h"
#include "include/ceph_features.h"
#include "msg/Message.h"
#define dout_subsys ceph_subsys_auth
namespace {
#ifdef WITH_SEASTAR
crimson::common::ConfigProxy& conf(CephContext*) {
return crimson::common::local_conf();
}
#else
ConfigProxy& conf(CephContext* cct) {
return cct->_conf;
}
#endif
}
int CephxSessionHandler::_calc_signature(Message *m, uint64_t *psig)
{
const ceph_msg_header& header = m->get_header();
const ceph_msg_footer& footer = m->get_footer();
if (!HAVE_FEATURE(features, CEPHX_V2)) {
// legacy pre-mimic behavior for compatibility
// optimized signature calculation
// - avoid temporary allocated buffers from encode_encrypt[_enc_bl]
// - skip the leading 4 byte wrapper from encode_encrypt
struct {
__u8 v;
ceph_le64 magic;
ceph_le32 len;
ceph_le32 header_crc;
ceph_le32 front_crc;
ceph_le32 middle_crc;
ceph_le32 data_crc;
} __attribute__ ((packed)) sigblock = {
1, ceph_le64(AUTH_ENC_MAGIC), ceph_le32(4 * 4),
ceph_le32(header.crc), ceph_le32(footer.front_crc),
ceph_le32(footer.middle_crc), ceph_le32(footer.data_crc)
};
char exp_buf[CryptoKey::get_max_outbuf_size(sizeof(sigblock))];
try {
const CryptoKey::in_slice_t in {
sizeof(sigblock),
reinterpret_cast<const unsigned char*>(&sigblock)
};
const CryptoKey::out_slice_t out {
sizeof(exp_buf),
reinterpret_cast<unsigned char*>(&exp_buf)
};
key.encrypt(cct, in, out);
} catch (std::exception& e) {
lderr(cct) << __func__ << " failed to encrypt signature block" << dendl;
return -1;
}
*psig = *reinterpret_cast<ceph_le64*>(exp_buf);
} else {
// newer mimic+ signatures
struct {
ceph_le32 header_crc;
ceph_le32 front_crc;
ceph_le32 front_len;
ceph_le32 middle_crc;
ceph_le32 middle_len;
ceph_le32 data_crc;
ceph_le32 data_len;
ceph_le32 seq_lower_word;
} __attribute__ ((packed)) sigblock = {
ceph_le32(header.crc),
ceph_le32(footer.front_crc),
ceph_le32(header.front_len),
ceph_le32(footer.middle_crc),
ceph_le32(header.middle_len),
ceph_le32(footer.data_crc),
ceph_le32(header.data_len),
ceph_le32(header.seq)
};
char exp_buf[CryptoKey::get_max_outbuf_size(sizeof(sigblock))];
try {
const CryptoKey::in_slice_t in {
sizeof(sigblock),
reinterpret_cast<const unsigned char*>(&sigblock)
};
const CryptoKey::out_slice_t out {
sizeof(exp_buf),
reinterpret_cast<unsigned char*>(&exp_buf)
};
key.encrypt(cct, in, out);
} catch (std::exception& e) {
lderr(cct) << __func__ << " failed to encrypt signature block" << dendl;
return -1;
}
struct enc {
ceph_le64 a, b, c, d;
} *penc = reinterpret_cast<enc*>(exp_buf);
*psig = penc->a ^ penc->b ^ penc->c ^ penc->d;
}
ldout(cct, 10) << __func__ << " seq " << m->get_seq()
<< " front_crc_ = " << footer.front_crc
<< " middle_crc = " << footer.middle_crc
<< " data_crc = " << footer.data_crc
<< " sig = " << *psig
<< dendl;
return 0;
}
int CephxSessionHandler::sign_message(Message *m)
{
// If runtime signing option is off, just return success without signing.
if (!conf(cct)->cephx_sign_messages) {
return 0;
}
uint64_t sig;
int r = _calc_signature(m, &sig);
if (r < 0)
return r;
ceph_msg_footer& f = m->get_footer();
f.sig = sig;
f.flags = (unsigned)f.flags | CEPH_MSG_FOOTER_SIGNED;
ldout(cct, 20) << "Putting signature in client message(seq # " << m->get_seq()
<< "): sig = " << sig << dendl;
return 0;
}
int CephxSessionHandler::check_message_signature(Message *m)
{
// If runtime signing option is off, just return success without checking signature.
if (!conf(cct)->cephx_sign_messages) {
return 0;
}
if ((features & CEPH_FEATURE_MSG_AUTH) == 0) {
// it's fine, we didn't negotiate this feature.
return 0;
}
uint64_t sig;
int r = _calc_signature(m, &sig);
if (r < 0)
return r;
if (sig != m->get_footer().sig) {
// Should have been signed, but signature check failed. PLR
if (!(m->get_footer().flags & CEPH_MSG_FOOTER_SIGNED)) {
ldout(cct, 0) << "SIGN: MSG " << m->get_seq() << " Sender did not set CEPH_MSG_FOOTER_SIGNED." << dendl;
}
ldout(cct, 0) << "SIGN: MSG " << m->get_seq() << " Message signature does not match contents." << dendl;
ldout(cct, 0) << "SIGN: MSG " << m->get_seq() << "Signature on message:" << dendl;
ldout(cct, 0) << "SIGN: MSG " << m->get_seq() << " sig: " << m->get_footer().sig << dendl;
ldout(cct, 0) << "SIGN: MSG " << m->get_seq() << "Locally calculated signature:" << dendl;
ldout(cct, 0) << "SIGN: MSG " << m->get_seq() << " sig_check:" << sig << dendl;
// For the moment, printing an error message to the log and
// returning failure is sufficient. In the long term, we should
// probably have code parsing the log looking for this kind of
// security failure, particularly when there are large numbers of
// them, since the latter is a potential sign of an attack. PLR
ldout(cct, 0) << "Signature failed." << dendl;
return (SESSION_SIGNATURE_FAILURE);
}
return 0;
}
| 5,829 | 28.897436 | 110 |
cc
|
null |
ceph-main/src/auth/cephx/CephxSessionHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "auth/AuthSessionHandler.h"
#include "auth/Auth.h"
#include "include/common_fwd.h"
class Message;
class CephxSessionHandler : public AuthSessionHandler {
CephContext *cct;
int protocol;
CryptoKey key; // per mon authentication
uint64_t features;
int _calc_signature(Message *m, uint64_t *psig);
public:
CephxSessionHandler(CephContext *cct,
const CryptoKey& session_key,
const uint64_t features)
: cct(cct),
protocol(CEPH_AUTH_CEPHX),
key(session_key),
features(features) {
}
~CephxSessionHandler() override = default;
int sign_message(Message *m) override;
int check_message_signature(Message *m) override ;
};
| 1,128 | 24.088889 | 70 |
h
|
null |
ceph-main/src/auth/krb/KrbAuthorizeHandler.cpp
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (c) 2018 SUSE LLC.
* Author: Daniel Oliveira <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "KrbAuthorizeHandler.hpp"
#include "common/debug.h"
#define dout_subsys ceph_subsys_auth
bool KrbAuthorizeHandler::verify_authorizer(
CephContext* ceph_ctx,
const KeyStore& keys,
const bufferlist& authorizer_data,
size_t connection_secret_required_len,
bufferlist *authorizer_reply,
EntityName *entity_name,
uint64_t *global_id,
AuthCapsInfo *caps_info,
CryptoKey *session_key,
std::string *connection_secret,
std::unique_ptr<AuthAuthorizerChallenge>* challenge)
{
auto itr(authorizer_data.cbegin());
try {
uint8_t value = (1);
using ceph::decode;
decode(value, itr);
decode(*entity_name, itr);
decode(*global_id, itr);
} catch (const buffer::error& err) {
ldout(ceph_ctx, 0)
<< "Error: KrbAuthorizeHandler::verify_authorizer() failed!" << dendl;
return false;
}
caps_info->allow_all = true;
return true;
}
| 1,345 | 23.925926 | 78 |
cpp
|
null |
ceph-main/src/auth/krb/KrbAuthorizeHandler.hpp
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (c) 2018 SUSE LLC.
* Author: Daniel Oliveira <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef KRB_AUTHORIZE_HANDLER_HPP
#define KRB_AUTHORIZE_HANDLER_HPP
#include "auth/AuthAuthorizeHandler.h"
class KrbAuthorizeHandler : public AuthAuthorizeHandler {
bool verify_authorizer(
CephContext*,
const KeyStore&,
const bufferlist&,
size_t,
bufferlist *,
EntityName *,
uint64_t *,
AuthCapsInfo *,
CryptoKey *,
std::string *connection_secret,
std::unique_ptr<
AuthAuthorizerChallenge>* = nullptr) override;
int authorizer_session_crypto() override {
return SESSION_SYMMETRIC_AUTHENTICATE;
};
~KrbAuthorizeHandler() override = default;
};
#endif //-- KRB_AUTHORIZE_HANDLER_HPP
| 1,104 | 22.510638 | 70 |
hpp
|
null |
ceph-main/src/auth/krb/KrbClientHandler.cpp
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (c) 2018 SUSE LLC.
* Author: Daniel Oliveira <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "KrbClientHandler.hpp"
#include <errno.h>
#include <string>
#include "KrbProtocol.hpp"
#include "auth/KeyRing.h"
#include "include/random.h"
#include "common/ceph_context.h"
#include "common/config.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_auth
#undef dout_prefix
#define dout_prefix *_dout << "krb5/gssapi client request: "
struct AuthAuthorizer;
AuthAuthorizer*
KrbClientHandler::build_authorizer(uint32_t service_id) const
{
ldout(cct, 20)
<< "KrbClientHandler::build_authorizer(): Service: "
<< ceph_entity_type_name(service_id) << dendl;
KrbAuthorizer* krb_auth = new KrbAuthorizer();
if (krb_auth) {
krb_auth->build_authorizer(cct->_conf->name, global_id);
}
return krb_auth;
}
KrbClientHandler::~KrbClientHandler()
{
OM_uint32 gss_minor_status(0);
gss_release_name(&gss_minor_status, &m_gss_client_name);
gss_release_name(&gss_minor_status, &m_gss_service_name);
gss_release_cred(&gss_minor_status, &m_gss_credentials);
gss_delete_sec_context(&gss_minor_status, &m_gss_sec_ctx, GSS_C_NO_BUFFER);
gss_release_buffer(&gss_minor_status,
static_cast<gss_buffer_t>(&m_gss_buffer_out));
}
int KrbClientHandler::build_request(bufferlist& buff_list) const
{
ldout(cct, 20)
<< "KrbClientHandler::build_request() " << dendl;
KrbTokenBlob krb_token;
KrbRequest krb_request;
krb_request.m_request_type =
static_cast<int>(GSSAuthenticationRequest::GSS_TOKEN);
using ceph::encode;
encode(krb_request, buff_list);
if (m_gss_buffer_out.length != 0) {
krb_token.m_token_blob.append(buffer::create_static(
m_gss_buffer_out.length,
reinterpret_cast<char*>
(m_gss_buffer_out.value)));
encode(krb_token, buff_list);
ldout(cct, 20)
<< "KrbClientHandler::build_request() : Token Blob: " << "\n";
krb_token.m_token_blob.hexdump(*_dout);
*_dout << dendl;
}
return 0;
}
int KrbClientHandler::handle_response(
int ret,
bufferlist::const_iterator& buff_list,
CryptoKey *session_key,
std::string *connection_secret)
{
auto result(ret);
gss_buffer_desc gss_buffer_in = {0, nullptr};
gss_OID_set_desc gss_mechs_wanted = {0, nullptr};
OM_uint32 gss_major_status(0);
OM_uint32 gss_minor_status(0);
OM_uint32 gss_wanted_flags(GSS_C_MUTUAL_FLAG |
GSS_C_INTEG_FLAG);
OM_uint32 gss_result_flags(0);
ldout(cct, 20)
<< "KrbClientHandler::handle_response() " << dendl;
if (result < 0) {
return result;
}
gss_mechs_wanted.elements = const_cast<gss_OID>(&GSS_API_SPNEGO_OID_PTR);
gss_mechs_wanted.count = 1;
KrbResponse krb_response;
using ceph::decode;
decode(krb_response, buff_list);
if (m_gss_credentials == GSS_C_NO_CREDENTIAL) {
gss_OID krb_client_type = GSS_C_NT_USER_NAME;
std::string krb_client_name(cct->_conf->name.to_str());
gss_buffer_in.length = krb_client_name.length();
gss_buffer_in.value = (const_cast<char*>(krb_client_name.c_str()));
if (cct->_conf->name.get_type() == CEPH_ENTITY_TYPE_CLIENT) {
gss_major_status = gss_import_name(&gss_minor_status,
&gss_buffer_in,
krb_client_type,
&m_gss_client_name);
if (gss_major_status != GSS_S_COMPLETE) {
auto status_str(gss_auth_show_status(gss_major_status,
gss_minor_status));
ldout(cct, 0)
<< "ERROR: KrbClientHandler::handle_response() "
"[gss_import_name(gss_client_name)] failed! "
<< gss_major_status << " "
<< gss_minor_status << " "
<< status_str
<< dendl;
}
}
gss_major_status = gss_acquire_cred(&gss_minor_status,
m_gss_client_name,
0,
&gss_mechs_wanted,
GSS_C_INITIATE,
&m_gss_credentials,
nullptr,
nullptr);
if (gss_major_status != GSS_S_COMPLETE) {
auto status_str(gss_auth_show_status(gss_major_status,
gss_minor_status));
ldout(cct, 20)
<< "ERROR: KrbClientHandler::handle_response() "
"[gss_acquire_cred()] failed! "
<< gss_major_status << " "
<< gss_minor_status << " "
<< status_str
<< dendl;
return (-EACCES);
}
gss_buffer_desc krb_input_name_buff = {0, nullptr};
gss_OID krb_input_type = GSS_C_NT_HOSTBASED_SERVICE;
std::string gss_target_name(cct->_conf.get_val<std::string>
("gss_target_name"));
krb_input_name_buff.length = gss_target_name.length();
krb_input_name_buff.value = (const_cast<char*>(gss_target_name.c_str()));
gss_major_status = gss_import_name(&gss_minor_status,
&krb_input_name_buff,
krb_input_type,
&m_gss_service_name);
if (gss_major_status != GSS_S_COMPLETE) {
auto status_str(gss_auth_show_status(gss_major_status,
gss_minor_status));
ldout(cct, 0)
<< "ERROR: KrbClientHandler::handle_response() "
"[gss_import_name(gss_service_name)] failed! "
<< gss_major_status << " "
<< gss_minor_status << " "
<< status_str
<< dendl;
}
} else {
KrbTokenBlob krb_token;
using ceph::decode;
decode(krb_token, buff_list);
ldout(cct, 20)
<< "KrbClientHandler::handle_response() : Token Blob: " << "\n";
krb_token.m_token_blob.hexdump(*_dout);
*_dout << dendl;
gss_buffer_in.length = krb_token.m_token_blob.length();
gss_buffer_in.value = krb_token.m_token_blob.c_str();
}
const gss_OID gss_mech_type = gss_mechs_wanted.elements;
if (m_gss_buffer_out.length != 0) {
gss_release_buffer(&gss_minor_status,
static_cast<gss_buffer_t>(&m_gss_buffer_out));
}
gss_major_status = gss_init_sec_context(&gss_minor_status,
m_gss_credentials,
&m_gss_sec_ctx,
m_gss_service_name,
gss_mech_type,
gss_wanted_flags,
0,
nullptr,
&gss_buffer_in,
nullptr,
&m_gss_buffer_out,
&gss_result_flags,
nullptr);
switch (gss_major_status) {
case GSS_S_CONTINUE_NEEDED:
ldout(cct, 20)
<< "KrbClientHandler::handle_response() : "
"[gss_init_sec_context(GSS_S_CONTINUE_NEEDED)] " << dendl;
result = (-EAGAIN);
break;
case GSS_S_COMPLETE:
ldout(cct, 20)
<< "KrbClientHandler::handle_response() : "
"[gss_init_sec_context(GSS_S_COMPLETE)] " << dendl;
result = 0;
break;
default:
auto status_str(gss_auth_show_status(gss_major_status,
gss_minor_status));
ldout(cct, 0)
<< "ERROR: KrbClientHandler::handle_response() "
"[gss_init_sec_context()] failed! "
<< gss_major_status << " "
<< gss_minor_status << " "
<< status_str
<< dendl;
result = (-EACCES);
break;
}
return result;
}
| 8,553 | 32.677165 | 78 |
cpp
|
null |
ceph-main/src/auth/krb/KrbClientHandler.hpp
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (c) 2018 SUSE LLC.
* Author: Daniel Oliveira <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef KRB_CLIENT_HANDLER_HPP
#define KRB_CLIENT_HANDLER_HPP
#include "auth/AuthClientHandler.h"
#include "auth/RotatingKeyRing.h"
#include "include/common_fwd.h"
#include "KrbProtocol.hpp"
#include <gssapi.h>
#include <gssapi/gssapi_generic.h>
#include <gssapi/gssapi_krb5.h>
#include <gssapi/gssapi_ext.h>
class Keyring;
class KrbClientHandler : public AuthClientHandler {
public:
KrbClientHandler(CephContext* ceph_ctx = nullptr)
: AuthClientHandler(ceph_ctx) {
reset();
}
~KrbClientHandler() override;
KrbClientHandler* clone() const override {
return new KrbClientHandler(*this);
}
int get_protocol() const override { return CEPH_AUTH_GSS; }
void reset() override {
m_gss_client_name = GSS_C_NO_NAME;
m_gss_service_name = GSS_C_NO_NAME;
m_gss_credentials = GSS_C_NO_CREDENTIAL;
m_gss_sec_ctx = GSS_C_NO_CONTEXT;
m_gss_buffer_out = {0, 0};
}
void prepare_build_request() override { };
int build_request(bufferlist& buff_list) const override;
int handle_response(int ret,
bufferlist::const_iterator& buff_list,
CryptoKey *session_key,
std::string *connection_secret) override;
bool build_rotating_request(bufferlist& buff_list) const override {
return false;
}
AuthAuthorizer* build_authorizer(uint32_t service_id) const override;
bool need_tickets() override { return false; }
void set_global_id(uint64_t guid) override { global_id = guid; }
private:
gss_name_t m_gss_client_name;
gss_name_t m_gss_service_name;
gss_cred_id_t m_gss_credentials;
gss_ctx_id_t m_gss_sec_ctx;
gss_buffer_desc m_gss_buffer_out;
protected:
void validate_tickets() override { }
};
#endif //-- KRB_CLIENT_HANDLER_HPP
| 2,259 | 25.588235 | 73 |
hpp
|
null |
ceph-main/src/auth/krb/KrbProtocol.cpp
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (c) 2018 SUSE LLC.
* Author: Daniel Oliveira <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "KrbProtocol.hpp"
#include "common/Clock.h"
#include "common/config.h"
#include "common/debug.h"
#include "include/buffer.h"
#define dout_subsys ceph_subsys_auth
#undef dout_prefix
#define dout_prefix *_dout << "krb5/gssapi protocol: "
std::string gss_auth_show_status(const OM_uint32 gss_major_status,
const OM_uint32 gss_minor_status)
{
const std::string STR_DOT(".");
const std::string STR_BLANK(" ");
gss_buffer_desc gss_str_status = {0, nullptr};
OM_uint32 gss_maj_status(0);
OM_uint32 gss_min_status(0);
OM_uint32 gss_ctx_message(-1);
std::string str_status("");
const auto gss_complete_status_str_format = [&](const uint32_t gss_status) {
if (gss_status == GSS_S_COMPLETE) {
std::string str_tmp("");
str_tmp.append(reinterpret_cast<char*>(gss_str_status.value),
gss_str_status.length);
str_tmp += STR_DOT;
if (gss_ctx_message != 0) {
str_tmp += STR_BLANK;
}
return str_tmp;
}
return STR_BLANK;
};
while (gss_ctx_message != 0) {
gss_maj_status = gss_display_status(&gss_min_status,
gss_major_status,
GSS_C_GSS_CODE,
GSS_C_NO_OID,
&gss_ctx_message,
&gss_str_status);
if (gss_maj_status == GSS_S_COMPLETE) {
str_status += gss_complete_status_str_format(gss_maj_status);
gss_release_buffer(&gss_min_status, &gss_str_status);
}
}
if (gss_major_status == GSS_S_FAILURE) {
gss_ctx_message = -1;
while (gss_ctx_message != 0) {
gss_maj_status = gss_display_status(&gss_min_status,
gss_minor_status,
GSS_C_MECH_CODE,
const_cast<gss_OID>(&GSS_API_KRB5_OID_PTR),
&gss_ctx_message,
&gss_str_status);
if (gss_maj_status == GSS_S_COMPLETE) {
str_status += gss_complete_status_str_format(gss_maj_status);
gss_release_buffer(&gss_min_status, &gss_str_status);
}
}
}
return str_status;
}
| 2,782 | 30.988506 | 85 |
cpp
|
null |
ceph-main/src/auth/krb/KrbProtocol.hpp
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (c) 2018 SUSE LLC.
* Author: Daniel Oliveira <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef KRB_PROTOCOL_HPP
#define KRB_PROTOCOL_HPP
#include "auth/Auth.h"
#include <errno.h>
#include <gssapi.h>
#include <gssapi/gssapi_generic.h>
#include <gssapi/gssapi_krb5.h>
#include <gssapi/gssapi_ext.h>
#include <map>
#include <sstream>
#include <string>
/*
Kerberos Version 5 GSS-API Mechanism
OID {1.2.840.113554.1.2.2}
RFC https://tools.ietf.org/html/rfc1964
*/
static const gss_OID_desc GSS_API_KRB5_OID_PTR =
{ 9, (void *)"\052\206\110\206\367\022\001\002\002" };
/*
Kerberos Version 5 GSS-API Mechanism
Simple and Protected GSS-API Negotiation Mechanism
OID {1.3.6.1.5.5.2}
RFC https://tools.ietf.org/html/rfc4178
*/
static const gss_OID_desc GSS_API_SPNEGO_OID_PTR =
{6, (void *)"\x2b\x06\x01\x05\x05\x02"};
static const std::string KRB_SERVICE_NAME("kerberos/gssapi");
static const std::string GSS_API_SPNEGO_OID("{1.3.6.1.5.5.2}");
static const std::string GSS_API_KRB5_OID("{1.2.840.113554.1.2.2}");
enum class GSSAuthenticationRequest {
GSS_CRYPTO_ERR = 1,
GSS_MUTUAL = 0x100,
GSS_TOKEN = 0x200,
GSS_REQUEST_MASK = 0x0F00
};
enum class GSSKeyExchange {
USERAUTH_GSSAPI_RESPONSE = 70,
USERAUTH_GSSAPI_TOKEN,
USERAUTH_GSSAPI_EXCHANGE_COMPLETE,
USERAUTH_GSSAPI_ERROR,
USERAUTH_GSSAPI_ERRTOK,
USERAUTH_GSSAPI_MIC,
};
static constexpr auto CEPH_GSS_OIDTYPE(0x07);
struct AuthAuthorizer;
class KrbAuthorizer : public AuthAuthorizer {
public:
KrbAuthorizer() : AuthAuthorizer(CEPH_AUTH_GSS) { }
~KrbAuthorizer() = default;
bool build_authorizer(const EntityName& entity_name,
const uint64_t guid) {
uint8_t value = (1);
using ceph::encode;
encode(value, bl, 0);
encode(entity_name, bl, 0);
encode(guid, bl, 0);
return false;
}
bool verify_reply(bufferlist::const_iterator& buff_list,
std::string *connection_secret) override {
return true;
}
bool add_challenge(CephContext* ceph_ctx,
const bufferlist& buff_list) override {
return true;
}
};
class KrbRequest {
public:
void decode(bufferlist::const_iterator& buff_list) {
using ceph::decode;
decode(m_request_type, buff_list);
}
void encode(bufferlist& buff_list) const {
using ceph::encode;
encode(m_request_type, buff_list);
}
uint16_t m_request_type;
};
WRITE_CLASS_ENCODER(KrbRequest);
class KrbResponse {
public:
void decode(bufferlist::const_iterator& buff_list) {
using ceph::decode;
decode(m_response_type, buff_list);
}
void encode(bufferlist& buff_list) const {
using ceph::encode;
encode(m_response_type, buff_list);
}
uint16_t m_response_type;
};
WRITE_CLASS_ENCODER(KrbResponse);
class KrbTokenBlob {
public:
void decode(bufferlist::const_iterator& buff_list) {
uint8_t value = (0);
using ceph::decode;
decode(value, buff_list);
decode(m_token_blob, buff_list);
}
void encode(bufferlist& buff_list) const {
uint8_t value = (1);
using ceph::encode;
encode(value, buff_list, 0);
encode(m_token_blob, buff_list, 0);
}
bufferlist m_token_blob;
};
WRITE_CLASS_ENCODER(KrbTokenBlob);
std::string gss_auth_show_status(const OM_uint32 gss_major_status,
const OM_uint32 gss_minor_status);
#endif //-- KRB_PROTOCOL_HPP
| 3,909 | 23.285714 | 70 |
hpp
|
null |
ceph-main/src/auth/krb/KrbServiceHandler.cpp
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (c) 2018 SUSE LLC.
* Author: Daniel Oliveira <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "KrbServiceHandler.hpp"
#include "KrbProtocol.hpp"
#include <errno.h>
#include <sstream>
#include "common/config.h"
#include "common/debug.h"
#define dout_subsys ceph_subsys_auth
#undef dout_prefix
#define dout_prefix *_dout << "krb5/gssapi service: " << entity_name << " : "
int KrbServiceHandler::handle_request(
bufferlist::const_iterator& indata,
size_t connection_secret_required_length,
bufferlist *buff_list,
AuthCapsInfo *caps,
CryptoKey *session_key,
std::string *connection_secret)
{
auto result(0);
gss_buffer_desc gss_buffer_in = {0, nullptr};
gss_name_t gss_client_name = GSS_C_NO_NAME;
gss_OID gss_object_id = {0};
OM_uint32 gss_major_status(0);
OM_uint32 gss_minor_status(0);
OM_uint32 gss_result_flags(0);
std::string status_str(" ");
ldout(cct, 20)
<< "KrbServiceHandler::handle_request() " << dendl;
KrbRequest krb_request;
KrbTokenBlob krb_token;
using ceph::decode;
decode(krb_request, indata);
decode(krb_token, indata);
gss_buffer_in.length = krb_token.m_token_blob.length();
gss_buffer_in.value = krb_token.m_token_blob.c_str();
ldout(cct, 20)
<< "KrbClientHandler::handle_request() : Token Blob: "
<< "\n";
krb_token.m_token_blob.hexdump(*_dout);
*_dout << dendl;
if (m_gss_buffer_out.length != 0) {
gss_release_buffer(&gss_minor_status,
static_cast<gss_buffer_t>(&m_gss_buffer_out));
}
gss_major_status = gss_accept_sec_context(&gss_minor_status,
&m_gss_sec_ctx,
m_gss_credentials,
&gss_buffer_in,
GSS_C_NO_CHANNEL_BINDINGS,
&gss_client_name,
&gss_object_id,
&m_gss_buffer_out,
&gss_result_flags,
nullptr,
nullptr);
switch (gss_major_status) {
case GSS_S_CONTINUE_NEEDED:
{
ldout(cct, 20)
<< "KrbServiceHandler::handle_response() : "
"[KrbServiceHandler(GSS_S_CONTINUE_NEEDED)] " << dendl;
result = 0;
break;
}
case GSS_S_COMPLETE:
{
result = 0;
ldout(cct, 20)
<< "KrbServiceHandler::handle_response() : "
"[KrbServiceHandler(GSS_S_COMPLETE)] " << dendl;
if (!m_key_server->get_service_caps(entity_name,
CEPH_ENTITY_TYPE_MON,
*caps)) {
result = (-EACCES);
ldout(cct, 0)
<< "KrbServiceHandler::handle_response() : "
"ERROR: Could not get MONITOR CAPS : " << entity_name << dendl;
} else {
if (!caps->caps.c_str()) {
result = (-EACCES);
ldout(cct, 0)
<< "KrbServiceHandler::handle_response() : "
"ERROR: MONITOR CAPS invalid : " << entity_name << dendl;
}
}
break;
}
default:
{
status_str = gss_auth_show_status(gss_major_status,
gss_minor_status);
ldout(cct, 0)
<< "ERROR: KrbServiceHandler::handle_response() "
"[gss_accept_sec_context()] failed! "
<< gss_major_status << " "
<< gss_minor_status << " "
<< status_str
<< dendl;
result = (-EACCES);
break;
}
}
if (m_gss_buffer_out.length != 0) {
KrbResponse krb_response;
KrbTokenBlob krb_token;
krb_response.m_response_type =
static_cast<int>(GSSAuthenticationRequest::GSS_TOKEN);
using ceph::encode;
encode(krb_response, *buff_list);
krb_token.m_token_blob.append(buffer::create_static(
m_gss_buffer_out.length,
reinterpret_cast<char*>
(m_gss_buffer_out.value)));
encode(krb_token, *buff_list);
ldout(cct, 20)
<< "KrbServiceHandler::handle_request() : Token Blob: " << "\n";
krb_token.m_token_blob.hexdump(*_dout);
*_dout << dendl;
}
gss_release_name(&gss_minor_status, &gss_client_name);
return result;
}
int KrbServiceHandler::do_start_session(
bool is_new_global_id,
bufferlist *buff_list,
AuthCapsInfo *caps)
{
gss_buffer_desc gss_buffer_in = {0, nullptr};
gss_OID gss_object_id = GSS_C_NT_HOSTBASED_SERVICE;
gss_OID_set gss_mechs_wanted = GSS_C_NO_OID_SET;
OM_uint32 gss_major_status(0);
OM_uint32 gss_minor_status(0);
std::string gss_service_name(cct->_conf.get_val<std::string>
("gss_target_name"));
gss_buffer_in.length = gss_service_name.length();
gss_buffer_in.value = (const_cast<char*>(gss_service_name.c_str()));
gss_major_status = gss_import_name(&gss_minor_status,
&gss_buffer_in,
gss_object_id,
&m_gss_service_name);
if (gss_major_status != GSS_S_COMPLETE) {
auto status_str(gss_auth_show_status(gss_major_status,
gss_minor_status));
ldout(cct, 0)
<< "ERROR: KrbServiceHandler::start_session() "
"[gss_import_name(gss_client_name)] failed! "
<< gss_major_status << " "
<< gss_minor_status << " "
<< status_str
<< dendl;
}
gss_major_status = gss_acquire_cred(&gss_minor_status,
m_gss_service_name,
0,
gss_mechs_wanted,
GSS_C_ACCEPT,
&m_gss_credentials,
nullptr,
nullptr);
if (gss_major_status != GSS_S_COMPLETE) {
auto status_str(gss_auth_show_status(gss_major_status,
gss_minor_status));
ldout(cct, 0)
<< "ERROR: KrbServiceHandler::start_session() "
"[gss_acquire_cred()] failed! "
<< gss_major_status << " "
<< gss_minor_status << " "
<< status_str
<< dendl;
return (-EACCES);
} else {
KrbResponse krb_response;
krb_response.m_response_type =
static_cast<int>(GSSAuthenticationRequest::GSS_MUTUAL);
using ceph::encode;
encode(krb_response, *buff_list);
return (CEPH_AUTH_GSS);
}
}
KrbServiceHandler::~KrbServiceHandler()
{
OM_uint32 gss_minor_status(0);
gss_release_name(&gss_minor_status, &m_gss_service_name);
gss_release_cred(&gss_minor_status, &m_gss_credentials);
gss_delete_sec_context(&gss_minor_status, &m_gss_sec_ctx, GSS_C_NO_BUFFER);
gss_release_buffer(&gss_minor_status, static_cast<gss_buffer_t>(&m_gss_buffer_out));
}
| 7,650 | 32.853982 | 87 |
cpp
|
null |
ceph-main/src/auth/krb/KrbServiceHandler.hpp
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (c) 2018 SUSE LLC.
* Author: Daniel Oliveira <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef KRB_SERVICE_HANDLER_HPP
#define KRB_SERVICE_HANDLER_HPP
#include "auth/AuthServiceHandler.h"
#include "auth/Auth.h"
#include "auth/cephx/CephxKeyServer.h"
#include <gssapi.h>
#include <gssapi/gssapi_generic.h>
#include <gssapi/gssapi_krb5.h>
#include <gssapi/gssapi_ext.h>
class KrbServiceHandler : public AuthServiceHandler {
public:
explicit KrbServiceHandler(CephContext* ceph_ctx, KeyServer* kserver) :
AuthServiceHandler(ceph_ctx),
m_gss_buffer_out({0, nullptr}),
m_gss_credentials(GSS_C_NO_CREDENTIAL),
m_gss_sec_ctx(GSS_C_NO_CONTEXT),
m_gss_service_name(GSS_C_NO_NAME),
m_key_server(kserver) { }
~KrbServiceHandler();
int handle_request(bufferlist::const_iterator& indata,
size_t connection_secret_required_length,
bufferlist *buff_list,
AuthCapsInfo *caps,
CryptoKey *session_key,
std::string *connection_secret) override;
private:
int do_start_session(bool is_new_global_id,
ceph::buffer::list *buff_list,
AuthCapsInfo *caps) override;
gss_buffer_desc m_gss_buffer_out;
gss_cred_id_t m_gss_credentials;
gss_ctx_id_t m_gss_sec_ctx;
gss_name_t m_gss_service_name;
KeyServer* m_key_server;
};
#endif //-- KRB_SERVICE_HANDLER_HPP
| 1,758 | 27.370968 | 76 |
hpp
|
null |
ceph-main/src/auth/krb/KrbSessionHandler.hpp
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (c) 2018 SUSE LLC.
* Author: Daniel Oliveira <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef KRB_SESSION_HANDLER_HPP
#define KRB_SESSION_HANDLER_HPP
#include "auth/AuthSessionHandler.h"
#include "auth/Auth.h"
#include "KrbProtocol.hpp"
#include <errno.h>
#include <sstream>
#include "common/config.h"
#include "include/ceph_features.h"
#include "msg/Message.h"
#define dout_subsys ceph_subsys_auth
struct KrbSessionHandler : DummyAuthSessionHandler {
};
#endif //-- KRB_SESSION_HANDLER_HPP
| 867 | 21.842105 | 70 |
hpp
|
null |
ceph-main/src/auth/none/AuthNoneAuthorizeHandler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2009-2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "AuthNoneAuthorizeHandler.h"
#include "common/debug.h"
#define dout_subsys ceph_subsys_auth
bool AuthNoneAuthorizeHandler::verify_authorizer(
CephContext *cct,
const KeyStore& keys,
const ceph::buffer::list& authorizer_data,
size_t connection_secret_required_len,
ceph::buffer::list *authorizer_reply,
EntityName *entity_name,
uint64_t *global_id,
AuthCapsInfo *caps_info,
CryptoKey *session_key,
std::string *connection_secret,
std::unique_ptr<AuthAuthorizerChallenge> *challenge)
{
using ceph::decode;
auto iter = authorizer_data.cbegin();
try {
__u8 struct_v = 1;
decode(struct_v, iter);
decode(*entity_name, iter);
decode(*global_id, iter);
} catch (const ceph::buffer::error &err) {
ldout(cct, 0) << "AuthNoneAuthorizeHandle::verify_authorizer() failed to decode" << dendl;
return false;
}
caps_info->allow_all = true;
return true;
}
// Return type of crypto used for this session's data; for none, no crypt used
int AuthNoneAuthorizeHandler::authorizer_session_crypto()
{
return SESSION_CRYPTO_NONE;
}
| 1,502 | 25.368421 | 94 |
cc
|
null |
ceph-main/src/auth/none/AuthNoneAuthorizeHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_AUTHNONEAUTHORIZEHANDLER_H
#define CEPH_AUTHNONEAUTHORIZEHANDLER_H
#include "auth/AuthAuthorizeHandler.h"
#include "include/common_fwd.h"
struct AuthNoneAuthorizeHandler : public AuthAuthorizeHandler {
bool verify_authorizer(
CephContext *cct,
const KeyStore& keys,
const ceph::buffer::list& authorizer_data,
size_t connection_secret_required_len,
ceph::buffer::list *authorizer_reply,
EntityName *entity_name,
uint64_t *global_id,
AuthCapsInfo *caps_info,
CryptoKey *session_key,
std::string *connection_secret,
std::unique_ptr<AuthAuthorizerChallenge> *challenge) override;
int authorizer_session_crypto() override;
};
#endif
| 1,123 | 27.1 | 71 |
h
|
null |
ceph-main/src/auth/none/AuthNoneClientHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_AUTHNONECLIENTHANDLER_H
#define CEPH_AUTHNONECLIENTHANDLER_H
#include "auth/AuthClientHandler.h"
#include "AuthNoneProtocol.h"
#include "common/ceph_context.h"
#include "common/config.h"
class AuthNoneClientHandler : public AuthClientHandler {
public:
AuthNoneClientHandler(CephContext *cct_)
: AuthClientHandler(cct_) {}
AuthNoneClientHandler* clone() const override {
return new AuthNoneClientHandler(*this);
}
void reset() override { }
void prepare_build_request() override {}
int build_request(ceph::buffer::list& bl) const override { return 0; }
int handle_response(int ret, ceph::buffer::list::const_iterator& iter,
CryptoKey *session_key,
std::string *connection_secret) override { return 0; }
bool build_rotating_request(ceph::buffer::list& bl) const override { return false; }
int get_protocol() const override { return CEPH_AUTH_NONE; }
AuthAuthorizer *build_authorizer(uint32_t service_id) const override {
AuthNoneAuthorizer *auth = new AuthNoneAuthorizer();
if (auth) {
auth->build_authorizer(cct->_conf->name, global_id);
}
return auth;
}
bool need_tickets() override { return false; }
void set_global_id(uint64_t id) override {
global_id = id;
}
private:
void validate_tickets() override {}
};
#endif
| 1,751 | 27.258065 | 86 |
h
|
null |
ceph-main/src/auth/none/AuthNoneProtocol.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_AUTHNONEPROTOCOL_H
#define CEPH_AUTHNONEPROTOCOL_H
#include "auth/Auth.h"
#include "include/common_fwd.h"
struct AuthNoneAuthorizer : public AuthAuthorizer {
AuthNoneAuthorizer() : AuthAuthorizer(CEPH_AUTH_NONE) { }
bool build_authorizer(const EntityName &ename, uint64_t global_id) {
__u8 struct_v = 1; // see AUTH_MODE_* in Auth.h
using ceph::encode;
encode(struct_v, bl);
encode(ename, bl);
encode(global_id, bl);
return 0;
}
bool verify_reply(ceph::buffer::list::const_iterator& reply,
std::string *connection_secret) override { return true; }
bool add_challenge(CephContext *cct, const ceph::buffer::list& ch) override {
return true;
}
};
#endif
| 1,142 | 28.307692 | 79 |
h
|
null |
ceph-main/src/auth/none/AuthNoneServiceHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_AUTHNONESERVICEHANDLER_H
#define CEPH_AUTHNONESERVICEHANDLER_H
#include "auth/AuthServiceHandler.h"
#include "auth/Auth.h"
#include "include/common_fwd.h"
class AuthNoneServiceHandler : public AuthServiceHandler {
public:
explicit AuthNoneServiceHandler(CephContext *cct_)
: AuthServiceHandler(cct_) {}
~AuthNoneServiceHandler() override {}
int handle_request(ceph::buffer::list::const_iterator& indata,
size_t connection_secret_required_length,
ceph::buffer::list *result_bl,
AuthCapsInfo *caps,
CryptoKey *session_key,
std::string *connection_secret) override {
return 0;
}
private:
int do_start_session(bool is_new_global_id,
ceph::buffer::list *result_bl,
AuthCapsInfo *caps) override {
caps->allow_all = true;
return 1;
}
};
#endif
| 1,268 | 26 | 71 |
h
|
null |
ceph-main/src/auth/none/AuthNoneSessionHandler.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "auth/AuthSessionHandler.h"
struct AuthNoneSessionHandler : DummyAuthSessionHandler {
};
| 529 | 25.5 | 71 |
h
|
null |
ceph-main/src/blk/BlockDevice.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 XSky <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <libgen.h>
#include <unistd.h>
#include "BlockDevice.h"
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
#include "kernel/KernelDevice.h"
#endif
#if defined(HAVE_SPDK)
#include "spdk/NVMEDevice.h"
#endif
#if defined(HAVE_BLUESTORE_PMEM)
#include "pmem/PMEMDevice.h"
#endif
#if defined(HAVE_LIBZBD)
#include "zoned/HMSMRDevice.h"
#endif
#include "common/debug.h"
#include "common/EventTrace.h"
#include "common/errno.h"
#include "include/compat.h"
#define dout_context cct
#define dout_subsys ceph_subsys_bdev
#undef dout_prefix
#define dout_prefix *_dout << "bdev "
using std::string;
blk_access_mode_t buffermode(bool buffered)
{
return buffered ? blk_access_mode_t::BUFFERED : blk_access_mode_t::DIRECT;
}
std::ostream& operator<<(std::ostream& os, const blk_access_mode_t buffered)
{
os << (buffered == blk_access_mode_t::BUFFERED ? "(buffered)" : "(direct)");
return os;
}
void IOContext::aio_wait()
{
std::unique_lock l(lock);
// see _aio_thread for waker logic
while (num_running.load() > 0) {
dout(10) << __func__ << " " << this
<< " waiting for " << num_running.load() << " aios to complete"
<< dendl;
cond.wait(l);
}
dout(20) << __func__ << " " << this << " done" << dendl;
}
uint64_t IOContext::get_num_ios() const
{
// this is about the simplest model for transaction cost you can
// imagine. there is some fixed overhead cost by saying there is a
// minimum of one "io". and then we have some cost per "io" that is
// a configurable (with different hdd and ssd defaults), and add
// that to the bytes value.
uint64_t ios = 0;
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
ios += pending_aios.size();
#endif
#ifdef HAVE_SPDK
ios += total_nseg;
#endif
return ios;
}
void IOContext::release_running_aios()
{
ceph_assert(!num_running);
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
// release aio contexts (including pinned buffers).
running_aios.clear();
#endif
}
BlockDevice::block_device_t
BlockDevice::detect_device_type(const std::string& path)
{
#if defined(HAVE_SPDK)
if (NVMEDevice::support(path)) {
return block_device_t::spdk;
}
#endif
#if defined(HAVE_BLUESTORE_PMEM)
if (PMEMDevice::support(path)) {
return block_device_t::pmem;
}
#endif
#if (defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)) && defined(HAVE_LIBZBD)
if (HMSMRDevice::support(path)) {
return block_device_t::hm_smr;
}
#endif
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
return block_device_t::aio;
#else
return block_device_t::unknown;
#endif
}
BlockDevice::block_device_t
BlockDevice::device_type_from_name(const std::string& blk_dev_name)
{
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
if (blk_dev_name == "aio") {
return block_device_t::aio;
}
#endif
#if defined(HAVE_SPDK)
if (blk_dev_name == "spdk") {
return block_device_t::spdk;
}
#endif
#if defined(HAVE_BLUESTORE_PMEM)
if (blk_dev_name == "pmem") {
return block_device_t::pmem;
}
#endif
#if (defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)) && defined(HAVE_LIBZBD)
if (blk_dev_name == "hm_smr") {
return block_device_t::hm_smr;
}
#endif
return block_device_t::unknown;
}
BlockDevice* BlockDevice::create_with_type(block_device_t device_type,
CephContext* cct, const std::string& path, aio_callback_t cb,
void *cbpriv, aio_callback_t d_cb, void *d_cbpriv)
{
switch (device_type) {
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
case block_device_t::aio:
return new KernelDevice(cct, cb, cbpriv, d_cb, d_cbpriv);
#endif
#if defined(HAVE_SPDK)
case block_device_t::spdk:
return new NVMEDevice(cct, cb, cbpriv);
#endif
#if defined(HAVE_BLUESTORE_PMEM)
case block_device_t::pmem:
return new PMEMDevice(cct, cb, cbpriv);
#endif
#if (defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)) && defined(HAVE_LIBZBD)
case block_device_t::hm_smr:
return new HMSMRDevice(cct, cb, cbpriv, d_cb, d_cbpriv);
#endif
default:
ceph_abort_msg("unsupported device");
return nullptr;
}
}
BlockDevice *BlockDevice::create(
CephContext* cct, const string& path, aio_callback_t cb,
void *cbpriv, aio_callback_t d_cb, void *d_cbpriv)
{
const string blk_dev_name = cct->_conf.get_val<string>("bdev_type");
block_device_t device_type = block_device_t::unknown;
if (blk_dev_name.empty()) {
device_type = detect_device_type(path);
} else {
device_type = device_type_from_name(blk_dev_name);
}
return create_with_type(device_type, cct, path, cb, cbpriv, d_cb, d_cbpriv);
}
bool BlockDevice::is_valid_io(uint64_t off, uint64_t len) const {
bool ret = (off % block_size == 0 &&
len % block_size == 0 &&
len > 0 &&
off < size &&
off + len <= size);
if (!ret) {
derr << __func__ << " " << std::hex
<< off << "~" << len
<< " block_size " << block_size
<< " size " << size
<< std::dec << dendl;
}
return ret;
}
| 5,393 | 24.443396 | 78 |
cc
|
null |
ceph-main/src/blk/BlockDevice.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 XSky <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BLK_BLOCKDEVICE_H
#define CEPH_BLK_BLOCKDEVICE_H
#include <atomic>
#include <condition_variable>
#include <list>
#include <map>
#include <mutex>
#include <set>
#include <string>
#include <vector>
#include "acconfig.h"
#include "common/ceph_mutex.h"
#include "include/common_fwd.h"
#include "extblkdev/ExtBlkDevInterface.h"
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
#include "aio/aio.h"
#endif
#include "include/ceph_assert.h"
#include "include/buffer.h"
#include "include/interval_set.h"
#define SPDK_PREFIX "spdk:"
#if defined(__linux__)
#if !defined(F_SET_FILE_RW_HINT)
#define F_LINUX_SPECIFIC_BASE 1024
#define F_SET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 14)
#endif
// These values match Linux definition
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/fcntl.h#n56
#define WRITE_LIFE_NOT_SET 0 // No hint information set
#define WRITE_LIFE_NONE 1 // No hints about write life time
#define WRITE_LIFE_SHORT 2 // Data written has a short life time
#define WRITE_LIFE_MEDIUM 3 // Data written has a medium life time
#define WRITE_LIFE_LONG 4 // Data written has a long life time
#define WRITE_LIFE_EXTREME 5 // Data written has an extremely long life time
#define WRITE_LIFE_MAX 6
#else
// On systems don't have WRITE_LIFE_* only use one FD
// And all files are created equal
#define WRITE_LIFE_NOT_SET 0 // No hint information set
#define WRITE_LIFE_NONE 0 // No hints about write life time
#define WRITE_LIFE_SHORT 0 // Data written has a short life time
#define WRITE_LIFE_MEDIUM 0 // Data written has a medium life time
#define WRITE_LIFE_LONG 0 // Data written has a long life time
#define WRITE_LIFE_EXTREME 0 // Data written has an extremely long life time
#define WRITE_LIFE_MAX 1
#endif
enum struct blk_access_mode_t {
DIRECT,
BUFFERED
};
blk_access_mode_t buffermode(bool buffered);
std::ostream& operator<<(std::ostream& os, const blk_access_mode_t buffered);
/// track in-flight io
struct IOContext {
enum {
FLAG_DONT_CACHE = 1
};
private:
ceph::mutex lock = ceph::make_mutex("IOContext::lock");
ceph::condition_variable cond;
int r = 0;
public:
CephContext* cct;
void *priv;
#ifdef HAVE_SPDK
void *nvme_task_first = nullptr;
void *nvme_task_last = nullptr;
std::atomic_int total_nseg = {0};
#endif
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
std::list<aio_t> pending_aios; ///< not yet submitted
std::list<aio_t> running_aios; ///< submitting or submitted
#endif
std::atomic_int num_pending = {0};
std::atomic_int num_running = {0};
bool allow_eio;
uint32_t flags = 0; // FLAG_*
explicit IOContext(CephContext* cct, void *p, bool allow_eio = false)
: cct(cct), priv(p), allow_eio(allow_eio)
{}
// no copying
IOContext(const IOContext& other) = delete;
IOContext &operator=(const IOContext& other) = delete;
bool has_pending_aios() {
return num_pending.load();
}
void release_running_aios();
void aio_wait();
uint64_t get_num_ios() const;
void try_aio_wake() {
assert(num_running >= 1);
std::lock_guard l(lock);
if (num_running.fetch_sub(1) == 1) {
// we might have some pending IOs submitted after the check
// as there is no lock protection for aio_submit.
// Hence we might have false conditional trigger.
// aio_wait has to handle that hence do not care here.
cond.notify_all();
}
}
void set_return_value(int _r) {
r = _r;
}
int get_return_value() const {
return r;
}
bool skip_cache() const {
return flags & FLAG_DONT_CACHE;
}
};
class BlockDevice {
public:
CephContext* cct;
typedef void (*aio_callback_t)(void *handle, void *aio);
private:
ceph::mutex ioc_reap_lock = ceph::make_mutex("BlockDevice::ioc_reap_lock");
std::vector<IOContext*> ioc_reap_queue;
std::atomic_int ioc_reap_count = {0};
enum class block_device_t {
unknown,
#if defined(HAVE_LIBAIO) || defined(HAVE_POSIXAIO)
aio,
#if defined(HAVE_LIBZBD)
hm_smr,
#endif
#endif
#if defined(HAVE_SPDK)
spdk,
#endif
#if defined(HAVE_BLUESTORE_PMEM)
pmem,
#endif
};
static block_device_t detect_device_type(const std::string& path);
static block_device_t device_type_from_name(const std::string& blk_dev_name);
static BlockDevice *create_with_type(block_device_t device_type,
CephContext* cct, const std::string& path, aio_callback_t cb,
void *cbpriv, aio_callback_t d_cb, void *d_cbpriv);
protected:
uint64_t size = 0;
uint64_t block_size = 0;
uint64_t optimal_io_size = 0;
bool support_discard = false;
bool rotational = true;
bool lock_exclusive = true;
// HM-SMR specific properties. In HM-SMR drives the LBA space is divided into
// fixed-size zones. Typically, the first few zones are randomly writable;
// they form a conventional region of the drive. The remaining zones must be
// written sequentially and they must be reset before rewritten. For example,
// a 14 TB HGST HSH721414AL drive has 52156 zones each of size is 256 MiB.
// The zones 0-523 are randomly writable and they form the conventional region
// of the drive. The zones 524-52155 are sequential zones.
uint64_t conventional_region_size = 0;
uint64_t zone_size = 0;
public:
aio_callback_t aio_callback;
void *aio_callback_priv;
BlockDevice(CephContext* cct, aio_callback_t cb, void *cbpriv)
: cct(cct),
aio_callback(cb),
aio_callback_priv(cbpriv)
{}
virtual ~BlockDevice() = default;
static BlockDevice *create(
CephContext* cct, const std::string& path, aio_callback_t cb, void *cbpriv, aio_callback_t d_cb, void *d_cbpriv);
virtual bool supported_bdev_label() { return true; }
virtual bool is_rotational() { return rotational; }
// HM-SMR-specific calls
virtual bool is_smr() const { return false; }
virtual uint64_t get_zone_size() const {
ceph_assert(is_smr());
return zone_size;
}
virtual uint64_t get_conventional_region_size() const {
ceph_assert(is_smr());
return conventional_region_size;
}
virtual void reset_all_zones() {
ceph_assert(is_smr());
}
virtual void reset_zone(uint64_t zone) {
ceph_assert(is_smr());
}
virtual std::vector<uint64_t> get_zones() {
ceph_assert(is_smr());
return std::vector<uint64_t>();
}
virtual void aio_submit(IOContext *ioc) = 0;
void set_no_exclusive_lock() {
lock_exclusive = false;
}
uint64_t get_size() const { return size; }
uint64_t get_block_size() const { return block_size; }
uint64_t get_optimal_io_size() const { return optimal_io_size; }
/// hook to provide utilization of thinly-provisioned device
virtual int get_ebd_state(ExtBlkDevState &state) const {
return -ENOENT;
}
virtual int collect_metadata(const std::string& prefix, std::map<std::string,std::string> *pm) const = 0;
virtual int get_devname(std::string *out) const {
return -ENOENT;
}
virtual int get_devices(std::set<std::string> *ls) const {
std::string s;
if (get_devname(&s) == 0) {
ls->insert(s);
}
return 0;
}
virtual int get_numa_node(int *node) const {
return -EOPNOTSUPP;
}
virtual int read(
uint64_t off,
uint64_t len,
ceph::buffer::list *pbl,
IOContext *ioc,
bool buffered) = 0;
virtual int read_random(
uint64_t off,
uint64_t len,
char *buf,
bool buffered) = 0;
virtual int write(
uint64_t off,
ceph::buffer::list& bl,
bool buffered,
int write_hint = WRITE_LIFE_NOT_SET) = 0;
virtual int aio_read(
uint64_t off,
uint64_t len,
ceph::buffer::list *pbl,
IOContext *ioc) = 0;
virtual int aio_write(
uint64_t off,
ceph::buffer::list& bl,
IOContext *ioc,
bool buffered,
int write_hint = WRITE_LIFE_NOT_SET) = 0;
virtual int flush() = 0;
virtual bool try_discard(interval_set<uint64_t> &to_release, bool async=true) { return false; }
virtual void discard_drain() { return; }
// for managing buffered readers/writers
virtual int invalidate_cache(uint64_t off, uint64_t len) = 0;
virtual int open(const std::string& path) = 0;
virtual void close() = 0;
struct hugepaged_raw_marker_t {};
protected:
bool is_valid_io(uint64_t off, uint64_t len) const;
};
#endif //CEPH_BLK_BLOCKDEVICE_H
| 8,853 | 28.029508 | 117 |
h
|
null |
ceph-main/src/blk/aio/aio.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <algorithm>
#include "aio.h"
std::ostream& operator<<(std::ostream& os, const aio_t& aio)
{
unsigned i = 0;
os << "aio: ";
for (auto& iov : aio.iov) {
os << "\n [" << i++ << "] 0x"
<< std::hex << iov.iov_base << "~" << iov.iov_len << std::dec;
}
return os;
}
int aio_queue_t::submit_batch(aio_iter begin, aio_iter end,
uint16_t aios_size, void *priv,
int *retries)
{
// 2^16 * 125us = ~8 seconds, so max sleep is ~16 seconds
int attempts = 16;
int delay = 125;
int r;
aio_iter cur = begin;
struct aio_t *piocb[aios_size];
int left = 0;
while (cur != end) {
cur->priv = priv;
*(piocb+left) = &(*cur);
++left;
++cur;
}
ceph_assert(aios_size >= left);
int done = 0;
while (left > 0) {
#if defined(HAVE_LIBAIO)
r = io_submit(ctx, std::min(left, max_iodepth), (struct iocb**)(piocb + done));
#elif defined(HAVE_POSIXAIO)
if (piocb[done]->n_aiocb == 1) {
// TODO: consider batching multiple reads together with lio_listio
piocb[done]->aio.aiocb.aio_sigevent.sigev_notify = SIGEV_KEVENT;
piocb[done]->aio.aiocb.aio_sigevent.sigev_notify_kqueue = ctx;
piocb[done]->aio.aiocb.aio_sigevent.sigev_value.sival_ptr = piocb[done];
r = aio_read(&piocb[done]->aio.aiocb);
} else {
struct sigevent sev;
sev.sigev_notify = SIGEV_KEVENT;
sev.sigev_notify_kqueue = ctx;
sev.sigev_value.sival_ptr = piocb[done];
r = lio_listio(LIO_NOWAIT, &piocb[done]->aio.aiocbp, piocb[done]->n_aiocb, &sev);
}
#endif
if (r < 0) {
if (r == -EAGAIN && attempts-- > 0) {
usleep(delay);
delay *= 2;
(*retries)++;
continue;
}
return r;
}
ceph_assert(r > 0);
done += r;
left -= r;
attempts = 16;
delay = 125;
}
return done;
}
int aio_queue_t::get_next_completed(int timeout_ms, aio_t **paio, int max)
{
#if defined(HAVE_LIBAIO)
io_event events[max];
#elif defined(HAVE_POSIXAIO)
struct kevent events[max];
#endif
struct timespec t = {
timeout_ms / 1000,
(timeout_ms % 1000) * 1000 * 1000
};
int r = 0;
do {
#if defined(HAVE_LIBAIO)
r = io_getevents(ctx, 1, max, events, &t);
#elif defined(HAVE_POSIXAIO)
r = kevent(ctx, NULL, 0, events, max, &t);
if (r < 0)
r = -errno;
#endif
} while (r == -EINTR);
for (int i=0; i<r; ++i) {
#if defined(HAVE_LIBAIO)
paio[i] = (aio_t *)events[i].obj;
paio[i]->rval = events[i].res;
#else
paio[i] = (aio_t*)events[i].udata;
if (paio[i]->n_aiocb == 1) {
paio[i]->rval = aio_return(&paio[i]->aio.aiocb);
} else {
// Emulate the return value of pwritev. I can't find any documentation
// for what the value of io_event.res is supposed to be. I'm going to
// assume that it's just like pwritev/preadv/pwrite/pread.
paio[i]->rval = 0;
for (int j = 0; j < paio[i]->n_aiocb; j++) {
int res = aio_return(&paio[i]->aio.aiocbp[j]);
if (res < 0) {
paio[i]->rval = res;
break;
} else {
paio[i]->rval += res;
}
}
free(paio[i]->aio.aiocbp);
}
#endif
}
return r;
}
| 3,200 | 24.608 | 87 |
cc
|
null |
ceph-main/src/blk/aio/aio.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "acconfig.h"
#if defined(HAVE_LIBAIO)
#include <libaio.h>
#elif defined(HAVE_POSIXAIO)
#include <aio.h>
#include <sys/event.h>
#endif
#include <boost/intrusive/list.hpp>
#include <boost/container/small_vector.hpp>
#include "include/buffer.h"
#include "include/types.h"
struct aio_t {
#if defined(HAVE_LIBAIO)
struct iocb iocb{}; // must be first element; see shenanigans in aio_queue_t
#elif defined(HAVE_POSIXAIO)
// static long aio_listio_max = -1;
union {
struct aiocb aiocb;
struct aiocb *aiocbp;
} aio;
int n_aiocb;
#endif
void *priv;
int fd;
boost::container::small_vector<iovec,4> iov;
uint64_t offset, length;
long rval;
ceph::buffer::list bl; ///< write payload (so that it remains stable for duration)
boost::intrusive::list_member_hook<> queue_item;
aio_t(void *p, int f) : priv(p), fd(f), offset(0), length(0), rval(-1000) {
}
void pwritev(uint64_t _offset, uint64_t len) {
offset = _offset;
length = len;
#if defined(HAVE_LIBAIO)
io_prep_pwritev(&iocb, fd, &iov[0], iov.size(), offset);
#elif defined(HAVE_POSIXAIO)
n_aiocb = iov.size();
aio.aiocbp = (struct aiocb*)calloc(iov.size(), sizeof(struct aiocb));
for (int i = 0; i < iov.size(); i++) {
aio.aiocbp[i].aio_fildes = fd;
aio.aiocbp[i].aio_offset = offset;
aio.aiocbp[i].aio_buf = iov[i].iov_base;
aio.aiocbp[i].aio_nbytes = iov[i].iov_len;
aio.aiocbp[i].aio_lio_opcode = LIO_WRITE;
offset += iov[i].iov_len;
}
#endif
}
void preadv(uint64_t _offset, uint64_t len) {
offset = _offset;
length = len;
#if defined(HAVE_LIBAIO)
io_prep_preadv(&iocb, fd, &iov[0], iov.size(), offset);
#elif defined(HAVE_POSIXAIO)
n_aiocb = iov.size();
aio.aiocbp = (struct aiocb*)calloc(iov.size(), sizeof(struct aiocb));
for (size_t i = 0; i < iov.size(); i++) {
aio.aiocbp[i].aio_fildes = fd;
aio.aiocbp[i].aio_buf = iov[i].iov_base;
aio.aiocbp[i].aio_nbytes = iov[i].iov_len;
aio.aiocbp[i].aio_offset = offset;
aio.aiocbp[i].aio_lio_opcode = LIO_READ;
offset += iov[i].iov_len;
}
#endif
}
long get_return_value() {
return rval;
}
};
std::ostream& operator<<(std::ostream& os, const aio_t& aio);
typedef boost::intrusive::list<
aio_t,
boost::intrusive::member_hook<
aio_t,
boost::intrusive::list_member_hook<>,
&aio_t::queue_item> > aio_list_t;
struct io_queue_t {
typedef std::list<aio_t>::iterator aio_iter;
virtual ~io_queue_t() {};
virtual int init(std::vector<int> &fds) = 0;
virtual void shutdown() = 0;
virtual int submit_batch(aio_iter begin, aio_iter end, uint16_t aios_size,
void *priv, int *retries) = 0;
virtual int get_next_completed(int timeout_ms, aio_t **paio, int max) = 0;
};
struct aio_queue_t final : public io_queue_t {
int max_iodepth;
#if defined(HAVE_LIBAIO)
io_context_t ctx;
#elif defined(HAVE_POSIXAIO)
int ctx;
#endif
explicit aio_queue_t(unsigned max_iodepth)
: max_iodepth(max_iodepth),
ctx(0) {
}
~aio_queue_t() final {
ceph_assert(ctx == 0);
}
int init(std::vector<int> &fds) final {
(void)fds;
ceph_assert(ctx == 0);
#if defined(HAVE_LIBAIO)
int r = io_setup(max_iodepth, &ctx);
if (r < 0) {
if (ctx) {
io_destroy(ctx);
ctx = 0;
}
}
return r;
#elif defined(HAVE_POSIXAIO)
ctx = kqueue();
if (ctx < 0)
return -errno;
else
return 0;
#endif
}
void shutdown() final {
if (ctx) {
#if defined(HAVE_LIBAIO)
int r = io_destroy(ctx);
#elif defined(HAVE_POSIXAIO)
int r = close(ctx);
#endif
ceph_assert(r == 0);
ctx = 0;
}
}
int submit_batch(aio_iter begin, aio_iter end, uint16_t aios_size,
void *priv, int *retries) final;
int get_next_completed(int timeout_ms, aio_t **paio, int max) final;
};
| 3,965 | 23.7875 | 85 |
h
|
null |
ceph-main/src/blk/kernel/KernelDevice.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <limits>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/file.h>
#include <sys/mman.h>
#include <boost/container/flat_map.hpp>
#include <boost/lockfree/queue.hpp>
#include "KernelDevice.h"
#include "include/buffer_raw.h"
#include "include/intarith.h"
#include "include/types.h"
#include "include/compat.h"
#include "include/stringify.h"
#include "include/str_map.h"
#include "common/blkdev.h"
#include "common/buffer_instrumentation.h"
#include "common/errno.h"
#if defined(__FreeBSD__)
#include "bsm/audit_errno.h"
#endif
#include "common/debug.h"
#include "common/numa.h"
#include "global/global_context.h"
#include "io_uring.h"
#define dout_context cct
#define dout_subsys ceph_subsys_bdev
#undef dout_prefix
#define dout_prefix *_dout << "bdev(" << this << " " << path << ") "
using std::list;
using std::map;
using std::string;
using std::vector;
using ceph::bufferlist;
using ceph::bufferptr;
using ceph::make_timespan;
using ceph::mono_clock;
using ceph::operator <<;
KernelDevice::KernelDevice(CephContext* cct, aio_callback_t cb, void *cbpriv, aio_callback_t d_cb, void *d_cbpriv)
: BlockDevice(cct, cb, cbpriv),
aio(false), dio(false),
discard_callback(d_cb),
discard_callback_priv(d_cbpriv),
aio_stop(false),
discard_started(false),
discard_stop(false),
aio_thread(this),
discard_thread(this),
injecting_crash(0)
{
fd_directs.resize(WRITE_LIFE_MAX, -1);
fd_buffereds.resize(WRITE_LIFE_MAX, -1);
bool use_ioring = cct->_conf.get_val<bool>("bdev_ioring");
unsigned int iodepth = cct->_conf->bdev_aio_max_queue_depth;
if (use_ioring && ioring_queue_t::supported()) {
bool use_ioring_hipri = cct->_conf.get_val<bool>("bdev_ioring_hipri");
bool use_ioring_sqthread_poll = cct->_conf.get_val<bool>("bdev_ioring_sqthread_poll");
io_queue = std::make_unique<ioring_queue_t>(iodepth, use_ioring_hipri, use_ioring_sqthread_poll);
} else {
static bool once;
if (use_ioring && !once) {
derr << "WARNING: io_uring API is not supported! Fallback to libaio!"
<< dendl;
once = true;
}
io_queue = std::make_unique<aio_queue_t>(iodepth);
}
}
int KernelDevice::_lock()
{
// When the block changes, systemd-udevd will open the block,
// read some information and close it. Then a failure occurs here.
// So we need to try again here.
int fd = fd_directs[WRITE_LIFE_NOT_SET];
dout(10) << __func__ << " fd=" << fd << dendl;
uint64_t nr_tries = 0;
for (;;) {
struct flock fl = { .l_type = F_WRLCK,
.l_whence = SEEK_SET };
int r = ::fcntl(fd, F_OFD_SETLK, &fl);
if (r < 0) {
if (errno == EINVAL) {
r = ::flock(fd, LOCK_EX | LOCK_NB);
}
}
if (r == 0) {
return 0;
}
if (errno != EAGAIN) {
return -errno;
}
dout(1) << __func__ << " flock busy on " << path << dendl;
if (const uint64_t max_retry =
cct->_conf.get_val<uint64_t>("bdev_flock_retry");
max_retry > 0 && nr_tries++ == max_retry) {
return -EAGAIN;
}
double retry_interval =
cct->_conf.get_val<double>("bdev_flock_retry_interval");
std::this_thread::sleep_for(ceph::make_timespan(retry_interval));
}
}
int KernelDevice::open(const string& p)
{
path = p;
int r = 0, i = 0;
dout(1) << __func__ << " path " << path << dendl;
struct stat statbuf;
bool is_block;
r = stat(path.c_str(), &statbuf);
if (r != 0) {
derr << __func__ << " stat got: " << cpp_strerror(r) << dendl;
goto out_fail;
}
is_block = (statbuf.st_mode & S_IFMT) == S_IFBLK;
for (i = 0; i < WRITE_LIFE_MAX; i++) {
int flags = 0;
if (lock_exclusive && is_block && (i == 0)) {
// If opening block device use O_EXCL flag. It gives us best protection,
// as no other process can overwrite the data for as long as we are running.
// For block devices ::flock is not enough,
// since 2 different inodes with same major/minor can be locked.
// Exclusion by O_EXCL works in containers too.
flags |= O_EXCL;
}
int fd = ::open(path.c_str(), O_RDWR | O_DIRECT | flags);
if (fd < 0) {
r = -errno;
break;
}
fd_directs[i] = fd;
fd = ::open(path.c_str(), O_RDWR | O_CLOEXEC);
if (fd < 0) {
r = -errno;
break;
}
fd_buffereds[i] = fd;
}
if (i != WRITE_LIFE_MAX) {
derr << __func__ << " open got: " << cpp_strerror(r) << dendl;
goto out_fail;
}
#if defined(F_SET_FILE_RW_HINT)
for (i = WRITE_LIFE_NONE; i < WRITE_LIFE_MAX; i++) {
if (fcntl(fd_directs[i], F_SET_FILE_RW_HINT, &i) < 0) {
r = -errno;
break;
}
if (fcntl(fd_buffereds[i], F_SET_FILE_RW_HINT, &i) < 0) {
r = -errno;
break;
}
}
if (i != WRITE_LIFE_MAX) {
enable_wrt = false;
dout(0) << "ioctl(F_SET_FILE_RW_HINT) on " << path << " failed: " << cpp_strerror(r) << dendl;
}
#endif
dio = true;
aio = cct->_conf->bdev_aio;
if (!aio) {
ceph_abort_msg("non-aio not supported");
}
// disable readahead as it will wreak havoc on our mix of
// directio/aio and buffered io.
r = posix_fadvise(fd_buffereds[WRITE_LIFE_NOT_SET], 0, 0, POSIX_FADV_RANDOM);
if (r) {
r = -r;
derr << __func__ << " posix_fadvise got: " << cpp_strerror(r) << dendl;
goto out_fail;
}
if (lock_exclusive) {
// We need to keep soft locking (via flock()) because O_EXCL does not work for regular files.
// This is as good as we can get. Other processes can still overwrite the data,
// but at least we are protected from mounting same device twice in ceph processes.
// We also apply soft locking for block devices, as it populates /proc/locks. (see lslocks)
r = _lock();
if (r < 0) {
derr << __func__ << " failed to lock " << path << ": " << cpp_strerror(r)
<< dendl;
goto out_fail;
}
}
struct stat st;
r = ::fstat(fd_directs[WRITE_LIFE_NOT_SET], &st);
if (r < 0) {
r = -errno;
derr << __func__ << " fstat got " << cpp_strerror(r) << dendl;
goto out_fail;
}
// Operate as though the block size is 4 KB. The backing file
// blksize doesn't strictly matter except that some file systems may
// require a read/modify/write if we write something smaller than
// it.
block_size = cct->_conf->bdev_block_size;
if (block_size != (unsigned)st.st_blksize) {
dout(1) << __func__ << " backing device/file reports st_blksize "
<< st.st_blksize << ", using bdev_block_size "
<< block_size << " anyway" << dendl;
}
{
BlkDev blkdev_direct(fd_directs[WRITE_LIFE_NOT_SET]);
BlkDev blkdev_buffered(fd_buffereds[WRITE_LIFE_NOT_SET]);
if (S_ISBLK(st.st_mode)) {
int64_t s;
r = blkdev_direct.get_size(&s);
if (r < 0) {
goto out_fail;
}
size = s;
} else {
size = st.st_size;
}
char partition[PATH_MAX], devname[PATH_MAX];
if ((r = blkdev_buffered.partition(partition, PATH_MAX)) ||
(r = blkdev_buffered.wholedisk(devname, PATH_MAX))) {
derr << "unable to get device name for " << path << ": "
<< cpp_strerror(r) << dendl;
rotational = true;
} else {
dout(20) << __func__ << " devname " << devname << dendl;
rotational = blkdev_buffered.is_rotational();
support_discard = blkdev_buffered.support_discard();
optimal_io_size = blkdev_buffered.get_optimal_io_size();
this->devname = devname;
// check if any extended block device plugin recognizes this device
// detect_vdo has moved into the VDO plugin
int rc = extblkdev::detect_device(cct, devname, ebd_impl);
if (rc != 0) {
dout(20) << __func__ << " no plugin volume maps to " << devname << dendl;
}
}
}
r = _post_open();
if (r < 0) {
goto out_fail;
}
r = _aio_start();
if (r < 0) {
goto out_fail;
}
if (support_discard && cct->_conf->bdev_enable_discard && cct->_conf->bdev_async_discard) {
_discard_start();
}
// round size down to an even block
size &= ~(block_size - 1);
dout(1) << __func__
<< " size " << size
<< " (0x" << std::hex << size << std::dec << ", "
<< byte_u_t(size) << ")"
<< " block_size " << block_size
<< " (" << byte_u_t(block_size) << ")"
<< " " << (rotational ? "rotational device," : "non-rotational device,")
<< " discard " << (support_discard ? "supported" : "not supported")
<< dendl;
return 0;
out_fail:
for (i = 0; i < WRITE_LIFE_MAX; i++) {
if (fd_directs[i] >= 0) {
VOID_TEMP_FAILURE_RETRY(::close(fd_directs[i]));
fd_directs[i] = -1;
} else {
break;
}
if (fd_buffereds[i] >= 0) {
VOID_TEMP_FAILURE_RETRY(::close(fd_buffereds[i]));
fd_buffereds[i] = -1;
} else {
break;
}
}
return r;
}
int KernelDevice::get_devices(std::set<std::string> *ls) const
{
if (devname.empty()) {
return 0;
}
get_raw_devices(devname, ls);
return 0;
}
void KernelDevice::close()
{
dout(1) << __func__ << dendl;
_aio_stop();
if (discard_thread.is_started()) {
_discard_stop();
}
_pre_close();
extblkdev::release_device(ebd_impl);
for (int i = 0; i < WRITE_LIFE_MAX; i++) {
assert(fd_directs[i] >= 0);
VOID_TEMP_FAILURE_RETRY(::close(fd_directs[i]));
fd_directs[i] = -1;
assert(fd_buffereds[i] >= 0);
VOID_TEMP_FAILURE_RETRY(::close(fd_buffereds[i]));
fd_buffereds[i] = -1;
}
path.clear();
}
int KernelDevice::collect_metadata(const string& prefix, map<string,string> *pm) const
{
(*pm)[prefix + "support_discard"] = stringify((int)(bool)support_discard);
(*pm)[prefix + "rotational"] = stringify((int)(bool)rotational);
(*pm)[prefix + "size"] = stringify(get_size());
(*pm)[prefix + "block_size"] = stringify(get_block_size());
(*pm)[prefix + "optimal_io_size"] = stringify(get_optimal_io_size());
(*pm)[prefix + "driver"] = "KernelDevice";
if (rotational) {
(*pm)[prefix + "type"] = "hdd";
} else {
(*pm)[prefix + "type"] = "ssd";
}
// if compression device detected, collect meta data for device
// VDO specific meta data has moved into VDO plugin
if (ebd_impl) {
ebd_impl->collect_metadata(prefix, pm);
}
{
string res_names;
std::set<std::string> devnames;
if (get_devices(&devnames) == 0) {
for (auto& dev : devnames) {
if (!res_names.empty()) {
res_names += ",";
}
res_names += dev;
}
if (res_names.size()) {
(*pm)[prefix + "devices"] = res_names;
}
}
}
struct stat st;
int r = ::fstat(fd_buffereds[WRITE_LIFE_NOT_SET], &st);
if (r < 0)
return -errno;
if (S_ISBLK(st.st_mode)) {
(*pm)[prefix + "access_mode"] = "blk";
char buffer[1024] = {0};
BlkDev blkdev{fd_buffereds[WRITE_LIFE_NOT_SET]};
if (r = blkdev.partition(buffer, sizeof(buffer)); r) {
(*pm)[prefix + "partition_path"] = "unknown";
} else {
(*pm)[prefix + "partition_path"] = buffer;
}
buffer[0] = '\0';
if (r = blkdev.partition(buffer, sizeof(buffer)); r) {
(*pm)[prefix + "dev_node"] = "unknown";
} else {
(*pm)[prefix + "dev_node"] = buffer;
}
if (!r) {
return 0;
}
buffer[0] = '\0';
blkdev.model(buffer, sizeof(buffer));
(*pm)[prefix + "model"] = buffer;
buffer[0] = '\0';
blkdev.dev(buffer, sizeof(buffer));
(*pm)[prefix + "dev"] = buffer;
// nvme exposes a serial number
buffer[0] = '\0';
blkdev.serial(buffer, sizeof(buffer));
(*pm)[prefix + "serial"] = buffer;
// numa
int node;
r = blkdev.get_numa_node(&node);
if (r >= 0) {
(*pm)[prefix + "numa_node"] = stringify(node);
}
} else {
(*pm)[prefix + "access_mode"] = "file";
(*pm)[prefix + "path"] = path;
}
return 0;
}
int KernelDevice::get_ebd_state(ExtBlkDevState &state) const
{
// use compression driver plugin to determine physical size and availability
// VDO specific get_thin_utilization has moved into VDO plugin
if (ebd_impl) {
return ebd_impl->get_state(state);
}
return -ENOENT;
}
int KernelDevice::choose_fd(bool buffered, int write_hint) const
{
#if defined(F_SET_FILE_RW_HINT)
if (!enable_wrt)
write_hint = WRITE_LIFE_NOT_SET;
#else
// Without WRITE_LIFE capabilities, only one file is used.
// And rocksdb sets this value also to > 0, so we need to catch this here
// instead of trusting rocksdb to set write_hint.
write_hint = WRITE_LIFE_NOT_SET;
#endif
return buffered ? fd_buffereds[write_hint] : fd_directs[write_hint];
}
int KernelDevice::flush()
{
// protect flush with a mutex. note that we are not really protecting
// data here. instead, we're ensuring that if any flush() caller
// sees that io_since_flush is true, they block any racing callers
// until the flush is observed. that allows racing threads to be
// calling flush while still ensuring that *any* of them that got an
// aio completion notification will not return before that aio is
// stable on disk: whichever thread sees the flag first will block
// followers until the aio is stable.
std::lock_guard l(flush_mutex);
bool expect = true;
if (!io_since_flush.compare_exchange_strong(expect, false)) {
dout(10) << __func__ << " no-op (no ios since last flush), flag is "
<< (int)io_since_flush.load() << dendl;
return 0;
}
dout(10) << __func__ << " start" << dendl;
if (cct->_conf->bdev_inject_crash) {
++injecting_crash;
// sleep for a moment to give other threads a chance to submit or
// wait on io that races with a flush.
derr << __func__ << " injecting crash. first we sleep..." << dendl;
sleep(cct->_conf->bdev_inject_crash_flush_delay);
derr << __func__ << " and now we die" << dendl;
cct->_log->flush();
_exit(1);
}
utime_t start = ceph_clock_now();
int r = ::fdatasync(fd_directs[WRITE_LIFE_NOT_SET]);
utime_t end = ceph_clock_now();
utime_t dur = end - start;
if (r < 0) {
r = -errno;
derr << __func__ << " fdatasync got: " << cpp_strerror(r) << dendl;
ceph_abort();
}
dout(5) << __func__ << " in " << dur << dendl;;
return r;
}
int KernelDevice::_aio_start()
{
if (aio) {
dout(10) << __func__ << dendl;
int r = io_queue->init(fd_directs);
if (r < 0) {
if (r == -EAGAIN) {
derr << __func__ << " io_setup(2) failed with EAGAIN; "
<< "try increasing /proc/sys/fs/aio-max-nr" << dendl;
} else {
derr << __func__ << " io_setup(2) failed: " << cpp_strerror(r) << dendl;
}
return r;
}
aio_thread.create("bstore_aio");
}
return 0;
}
void KernelDevice::_aio_stop()
{
if (aio) {
dout(10) << __func__ << dendl;
aio_stop = true;
aio_thread.join();
aio_stop = false;
io_queue->shutdown();
}
}
void KernelDevice::_discard_start()
{
discard_thread.create("bstore_discard");
}
void KernelDevice::_discard_stop()
{
dout(10) << __func__ << dendl;
{
std::unique_lock l(discard_lock);
while (!discard_started) {
discard_cond.wait(l);
}
discard_stop = true;
discard_cond.notify_all();
}
discard_thread.join();
{
std::lock_guard l(discard_lock);
discard_stop = false;
}
dout(10) << __func__ << " stopped" << dendl;
}
void KernelDevice::discard_drain()
{
dout(10) << __func__ << dendl;
std::unique_lock l(discard_lock);
while (!discard_queued.empty() || discard_running) {
discard_cond.wait(l);
}
}
static bool is_expected_ioerr(const int r)
{
// https://lxr.missinglinkelectronics.com/linux+v4.15/block/blk-core.c#L135
return (r == -EOPNOTSUPP || r == -ETIMEDOUT || r == -ENOSPC ||
r == -ENOLINK || r == -EREMOTEIO || r == -EAGAIN || r == -EIO ||
r == -ENODATA || r == -EILSEQ || r == -ENOMEM ||
#if defined(__linux__)
r == -EREMCHG || r == -EBADE
#elif defined(__FreeBSD__)
r == - BSM_ERRNO_EREMCHG || r == -BSM_ERRNO_EBADE
#endif
);
}
void KernelDevice::_aio_thread()
{
dout(10) << __func__ << " start" << dendl;
int inject_crash_count = 0;
while (!aio_stop) {
dout(40) << __func__ << " polling" << dendl;
int max = cct->_conf->bdev_aio_reap_max;
aio_t *aio[max];
int r = io_queue->get_next_completed(cct->_conf->bdev_aio_poll_ms,
aio, max);
if (r < 0) {
derr << __func__ << " got " << cpp_strerror(r) << dendl;
ceph_abort_msg("got unexpected error from io_getevents");
}
if (r > 0) {
dout(30) << __func__ << " got " << r << " completed aios" << dendl;
for (int i = 0; i < r; ++i) {
IOContext *ioc = static_cast<IOContext*>(aio[i]->priv);
_aio_log_finish(ioc, aio[i]->offset, aio[i]->length);
if (aio[i]->queue_item.is_linked()) {
std::lock_guard l(debug_queue_lock);
debug_aio_unlink(*aio[i]);
}
// set flag indicating new ios have completed. we do this *before*
// any completion or notifications so that any user flush() that
// follows the observed io completion will include this io. Note
// that an earlier, racing flush() could observe and clear this
// flag, but that also ensures that the IO will be stable before the
// later flush() occurs.
io_since_flush.store(true);
long r = aio[i]->get_return_value();
if (r < 0) {
derr << __func__ << " got r=" << r << " (" << cpp_strerror(r) << ")"
<< dendl;
if (ioc->allow_eio && is_expected_ioerr(r)) {
derr << __func__ << " translating the error to EIO for upper layer"
<< dendl;
ioc->set_return_value(-EIO);
} else {
if (is_expected_ioerr(r)) {
note_io_error_event(
devname.c_str(),
path.c_str(),
r,
#if defined(HAVE_POSIXAIO)
aio[i]->aio.aiocb.aio_lio_opcode,
#else
aio[i]->iocb.aio_lio_opcode,
#endif
aio[i]->offset,
aio[i]->length);
ceph_abort_msg(
"Unexpected IO error. "
"This may suggest a hardware issue. "
"Please check your kernel log!");
}
ceph_abort_msg(
"Unexpected IO error. "
"This may suggest HW issue. Please check your dmesg!");
}
} else if (aio[i]->length != (uint64_t)r) {
derr << "aio to 0x" << std::hex << aio[i]->offset
<< "~" << aio[i]->length << std::dec
<< " but returned: " << r << dendl;
ceph_abort_msg("unexpected aio return value: does not match length");
}
dout(10) << __func__ << " finished aio " << aio[i] << " r " << r
<< " ioc " << ioc
<< " with " << (ioc->num_running.load() - 1)
<< " aios left" << dendl;
// NOTE: once num_running and we either call the callback or
// call aio_wake we cannot touch ioc or aio[] as the caller
// may free it.
if (ioc->priv) {
if (--ioc->num_running == 0) {
aio_callback(aio_callback_priv, ioc->priv);
}
} else {
ioc->try_aio_wake();
}
}
}
if (cct->_conf->bdev_debug_aio) {
utime_t now = ceph_clock_now();
std::lock_guard l(debug_queue_lock);
if (debug_oldest) {
if (debug_stall_since == utime_t()) {
debug_stall_since = now;
} else {
if (cct->_conf->bdev_debug_aio_suicide_timeout) {
utime_t cutoff = now;
cutoff -= cct->_conf->bdev_debug_aio_suicide_timeout;
if (debug_stall_since < cutoff) {
derr << __func__ << " stalled aio " << debug_oldest
<< " since " << debug_stall_since << ", timeout is "
<< cct->_conf->bdev_debug_aio_suicide_timeout
<< "s, suicide" << dendl;
ceph_abort_msg("stalled aio... buggy kernel or bad device?");
}
}
}
}
}
if (cct->_conf->bdev_inject_crash) {
++inject_crash_count;
if (inject_crash_count * cct->_conf->bdev_aio_poll_ms / 1000 >
cct->_conf->bdev_inject_crash + cct->_conf->bdev_inject_crash_flush_delay) {
derr << __func__ << " bdev_inject_crash trigger from aio thread"
<< dendl;
cct->_log->flush();
_exit(1);
}
}
}
dout(10) << __func__ << " end" << dendl;
}
void KernelDevice::_discard_thread()
{
std::unique_lock l(discard_lock);
ceph_assert(!discard_started);
discard_started = true;
discard_cond.notify_all();
while (true) {
ceph_assert(discard_finishing.empty());
if (discard_queued.empty()) {
if (discard_stop)
break;
dout(20) << __func__ << " sleep" << dendl;
discard_cond.notify_all(); // for the thread trying to drain...
discard_cond.wait(l);
dout(20) << __func__ << " wake" << dendl;
} else {
discard_finishing.swap(discard_queued);
discard_running = true;
l.unlock();
dout(20) << __func__ << " finishing" << dendl;
for (auto p = discard_finishing.begin();p != discard_finishing.end(); ++p) {
_discard(p.get_start(), p.get_len());
}
discard_callback(discard_callback_priv, static_cast<void*>(&discard_finishing));
discard_finishing.clear();
l.lock();
discard_running = false;
}
}
dout(10) << __func__ << " finish" << dendl;
discard_started = false;
}
int KernelDevice::_queue_discard(interval_set<uint64_t> &to_release)
{
// if bdev_async_discard enabled on the fly, discard_thread is not started here, fallback to sync discard
if (!discard_thread.is_started())
return -1;
if (to_release.empty())
return 0;
std::lock_guard l(discard_lock);
discard_queued.insert(to_release);
discard_cond.notify_all();
return 0;
}
// return true only if _queue_discard succeeded, so caller won't have to do alloc->release
// otherwise false
bool KernelDevice::try_discard(interval_set<uint64_t> &to_release, bool async)
{
if (!support_discard || !cct->_conf->bdev_enable_discard)
return false;
if (async && discard_thread.is_started()) {
return 0 == _queue_discard(to_release);
} else {
for (auto p = to_release.begin(); p != to_release.end(); ++p) {
_discard(p.get_start(), p.get_len());
}
}
return false;
}
void KernelDevice::_aio_log_start(
IOContext *ioc,
uint64_t offset,
uint64_t length)
{
dout(20) << __func__ << " 0x" << std::hex << offset << "~" << length
<< std::dec << dendl;
if (cct->_conf->bdev_debug_inflight_ios) {
std::lock_guard l(debug_lock);
if (debug_inflight.intersects(offset, length)) {
derr << __func__ << " inflight overlap of 0x"
<< std::hex
<< offset << "~" << length << std::dec
<< " with " << debug_inflight << dendl;
ceph_abort();
}
debug_inflight.insert(offset, length);
}
}
void KernelDevice::debug_aio_link(aio_t& aio)
{
if (debug_queue.empty()) {
debug_oldest = &aio;
}
debug_queue.push_back(aio);
}
void KernelDevice::debug_aio_unlink(aio_t& aio)
{
if (aio.queue_item.is_linked()) {
debug_queue.erase(debug_queue.iterator_to(aio));
if (debug_oldest == &aio) {
auto age = cct->_conf->bdev_debug_aio_log_age;
if (age && debug_stall_since != utime_t()) {
utime_t cutoff = ceph_clock_now();
cutoff -= age;
if (debug_stall_since < cutoff) {
derr << __func__ << " stalled aio " << debug_oldest
<< " since " << debug_stall_since << ", timeout is "
<< age
<< "s" << dendl;
}
}
if (debug_queue.empty()) {
debug_oldest = nullptr;
} else {
debug_oldest = &debug_queue.front();
}
debug_stall_since = utime_t();
}
}
}
void KernelDevice::_aio_log_finish(
IOContext *ioc,
uint64_t offset,
uint64_t length)
{
dout(20) << __func__ << " " << aio << " 0x"
<< std::hex << offset << "~" << length << std::dec << dendl;
if (cct->_conf->bdev_debug_inflight_ios) {
std::lock_guard l(debug_lock);
debug_inflight.erase(offset, length);
}
}
void KernelDevice::aio_submit(IOContext *ioc)
{
dout(20) << __func__ << " ioc " << ioc
<< " pending " << ioc->num_pending.load()
<< " running " << ioc->num_running.load()
<< dendl;
if (ioc->num_pending.load() == 0) {
return;
}
// move these aside, and get our end iterator position now, as the
// aios might complete as soon as they are submitted and queue more
// wal aio's.
list<aio_t>::iterator e = ioc->running_aios.begin();
ioc->running_aios.splice(e, ioc->pending_aios);
int pending = ioc->num_pending.load();
ioc->num_running += pending;
ioc->num_pending -= pending;
ceph_assert(ioc->num_pending.load() == 0); // we should be only thread doing this
ceph_assert(ioc->pending_aios.size() == 0);
if (cct->_conf->bdev_debug_aio) {
list<aio_t>::iterator p = ioc->running_aios.begin();
while (p != e) {
dout(30) << __func__ << " " << *p << dendl;
std::lock_guard l(debug_queue_lock);
debug_aio_link(*p++);
}
}
void *priv = static_cast<void*>(ioc);
int r, retries = 0;
// num of pending aios should not overflow when passed to submit_batch()
assert(pending <= std::numeric_limits<uint16_t>::max());
r = io_queue->submit_batch(ioc->running_aios.begin(), e,
pending, priv, &retries);
if (retries)
derr << __func__ << " retries " << retries << dendl;
if (r < 0) {
derr << " aio submit got " << cpp_strerror(r) << dendl;
ceph_assert(r == 0);
}
}
int KernelDevice::_sync_write(uint64_t off, bufferlist &bl, bool buffered, int write_hint)
{
uint64_t len = bl.length();
dout(5) << __func__ << " 0x" << std::hex << off << "~" << len
<< std::dec << " " << buffermode(buffered) << dendl;
if (cct->_conf->bdev_inject_crash &&
rand() % cct->_conf->bdev_inject_crash == 0) {
derr << __func__ << " bdev_inject_crash: dropping io 0x" << std::hex
<< off << "~" << len << std::dec << dendl;
++injecting_crash;
return 0;
}
vector<iovec> iov;
bl.prepare_iov(&iov);
auto left = len;
auto o = off;
size_t idx = 0;
do {
auto r = ::pwritev(choose_fd(buffered, write_hint),
&iov[idx], iov.size() - idx, o);
if (r < 0) {
r = -errno;
derr << __func__ << " pwritev error: " << cpp_strerror(r) << dendl;
return r;
}
o += r;
left -= r;
if (left) {
// skip fully processed IOVs
while (idx < iov.size() && (size_t)r >= iov[idx].iov_len) {
r -= iov[idx++].iov_len;
}
// update partially processed one if any
if (r) {
ceph_assert(idx < iov.size());
ceph_assert((size_t)r < iov[idx].iov_len);
iov[idx].iov_base = static_cast<char*>(iov[idx].iov_base) + r;
iov[idx].iov_len -= r;
r = 0;
}
ceph_assert(r == 0);
}
} while (left);
#ifdef HAVE_SYNC_FILE_RANGE
if (buffered) {
// initiate IO and wait till it completes
auto r = ::sync_file_range(fd_buffereds[WRITE_LIFE_NOT_SET], off, len, SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER|SYNC_FILE_RANGE_WAIT_BEFORE);
if (r < 0) {
r = -errno;
derr << __func__ << " sync_file_range error: " << cpp_strerror(r) << dendl;
return r;
}
}
#endif
io_since_flush.store(true);
return 0;
}
int KernelDevice::write(
uint64_t off,
bufferlist &bl,
bool buffered,
int write_hint)
{
uint64_t len = bl.length();
dout(20) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< " " << buffermode(buffered)
<< dendl;
ceph_assert(is_valid_io(off, len));
if (cct->_conf->objectstore_blackhole) {
lderr(cct) << __func__ << " objectstore_blackhole=true, throwing out IO"
<< dendl;
return 0;
}
if ((!buffered || bl.get_num_buffers() >= IOV_MAX) &&
bl.rebuild_aligned_size_and_memory(block_size, block_size, IOV_MAX)) {
dout(20) << __func__ << " rebuilding buffer to be aligned" << dendl;
}
dout(40) << "data:\n";
bl.hexdump(*_dout);
*_dout << dendl;
return _sync_write(off, bl, buffered, write_hint);
}
int KernelDevice::aio_write(
uint64_t off,
bufferlist &bl,
IOContext *ioc,
bool buffered,
int write_hint)
{
uint64_t len = bl.length();
dout(20) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< " " << buffermode(buffered)
<< dendl;
ceph_assert(is_valid_io(off, len));
if (cct->_conf->objectstore_blackhole) {
lderr(cct) << __func__ << " objectstore_blackhole=true, throwing out IO"
<< dendl;
return 0;
}
if ((!buffered || bl.get_num_buffers() >= IOV_MAX) &&
bl.rebuild_aligned_size_and_memory(block_size, block_size, IOV_MAX)) {
dout(20) << __func__ << " rebuilding buffer to be aligned" << dendl;
}
dout(40) << "data:\n";
bl.hexdump(*_dout);
*_dout << dendl;
_aio_log_start(ioc, off, len);
#ifdef HAVE_LIBAIO
if (aio && dio && !buffered) {
if (cct->_conf->bdev_inject_crash &&
rand() % cct->_conf->bdev_inject_crash == 0) {
derr << __func__ << " bdev_inject_crash: dropping io 0x" << std::hex
<< off << "~" << len << std::dec
<< dendl;
// generate a real io so that aio_wait behaves properly, but make it
// a read instead of write, and toss the result.
ioc->pending_aios.push_back(aio_t(ioc, choose_fd(false, write_hint)));
++ioc->num_pending;
auto& aio = ioc->pending_aios.back();
aio.bl.push_back(
ceph::buffer::ptr_node::create(ceph::buffer::create_small_page_aligned(len)));
aio.bl.prepare_iov(&aio.iov);
aio.preadv(off, len);
++injecting_crash;
} else {
if (bl.length() <= RW_IO_MAX) {
// fast path (non-huge write)
ioc->pending_aios.push_back(aio_t(ioc, choose_fd(false, write_hint)));
++ioc->num_pending;
auto& aio = ioc->pending_aios.back();
bl.prepare_iov(&aio.iov);
aio.bl.claim_append(bl);
aio.pwritev(off, len);
dout(30) << aio << dendl;
dout(5) << __func__ << " 0x" << std::hex << off << "~" << len
<< std::dec << " aio " << &aio << dendl;
} else {
// write in RW_IO_MAX-sized chunks
uint64_t prev_len = 0;
while (prev_len < bl.length()) {
bufferlist tmp;
if (prev_len + RW_IO_MAX < bl.length()) {
tmp.substr_of(bl, prev_len, RW_IO_MAX);
} else {
tmp.substr_of(bl, prev_len, bl.length() - prev_len);
}
auto len = tmp.length();
ioc->pending_aios.push_back(aio_t(ioc, choose_fd(false, write_hint)));
++ioc->num_pending;
auto& aio = ioc->pending_aios.back();
tmp.prepare_iov(&aio.iov);
aio.bl.claim_append(tmp);
aio.pwritev(off + prev_len, len);
dout(30) << aio << dendl;
dout(5) << __func__ << " 0x" << std::hex << off + prev_len
<< "~" << len
<< std::dec << " aio " << &aio << " (piece)" << dendl;
prev_len += len;
}
}
}
} else
#endif
{
int r = _sync_write(off, bl, buffered, write_hint);
_aio_log_finish(ioc, off, len);
if (r < 0)
return r;
}
return 0;
}
int KernelDevice::_discard(uint64_t offset, uint64_t len)
{
int r = 0;
if (cct->_conf->objectstore_blackhole) {
lderr(cct) << __func__ << " objectstore_blackhole=true, throwing out IO"
<< dendl;
return 0;
}
dout(10) << __func__
<< " 0x" << std::hex << offset << "~" << len << std::dec
<< dendl;
r = BlkDev{fd_directs[WRITE_LIFE_NOT_SET]}.discard((int64_t)offset, (int64_t)len);
return r;
}
struct ExplicitHugePagePool {
using region_queue_t = boost::lockfree::queue<void*>;
using instrumented_raw = ceph::buffer_instrumentation::instrumented_raw<
BlockDevice::hugepaged_raw_marker_t>;
struct mmaped_buffer_raw : public instrumented_raw {
region_queue_t& region_q; // for recycling
mmaped_buffer_raw(void* mmaped_region, ExplicitHugePagePool& parent)
: instrumented_raw(static_cast<char*>(mmaped_region), parent.buffer_size),
region_q(parent.region_q) {
// the `mmaped_region` has been passed to `raw` as the buffer's `data`
}
~mmaped_buffer_raw() override {
// don't delete nor unmmap; recycle the region instead
region_q.push(data);
}
};
ExplicitHugePagePool(const size_t buffer_size, size_t buffers_in_pool)
: buffer_size(buffer_size), region_q(buffers_in_pool) {
while (buffers_in_pool--) {
void* const mmaped_region = ::mmap(
nullptr,
buffer_size,
PROT_READ | PROT_WRITE,
#if defined(__FreeBSD__)
// FreeBSD doesn't have MAP_HUGETLB nor MAP_POPULATE but it has
// a different, more automated / implicit mechanisms. However,
// we want to mimic the Linux behavior as closely as possible
// also in the matter of error handling which is the reason
// behind MAP_ALIGNED_SUPER.
// See: https://lists.freebsd.org/pipermail/freebsd-questions/2014-August/260578.html
MAP_PRIVATE | MAP_ANONYMOUS | MAP_PREFAULT_READ | MAP_ALIGNED_SUPER,
#else
MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | MAP_HUGETLB,
#endif // __FreeBSD__
-1,
0);
if (mmaped_region == MAP_FAILED) {
ceph_abort("can't allocate huge buffer;"
" /proc/sys/vm/nr_hugepages misconfigured?");
} else {
region_q.push(mmaped_region);
}
}
}
~ExplicitHugePagePool() {
void* mmaped_region;
while (region_q.pop(mmaped_region)) {
::munmap(mmaped_region, buffer_size);
}
}
ceph::unique_leakable_ptr<buffer::raw> try_create() {
if (void* mmaped_region; region_q.pop(mmaped_region)) {
return ceph::unique_leakable_ptr<buffer::raw> {
new mmaped_buffer_raw(mmaped_region, *this)
};
} else {
// oops, empty queue.
return nullptr;
}
}
size_t get_buffer_size() const {
return buffer_size;
}
private:
const size_t buffer_size;
region_queue_t region_q;
};
struct HugePagePoolOfPools {
HugePagePoolOfPools(const std::map<size_t, size_t> conf)
: pools(conf.size(), [conf] (size_t index, auto emplacer) {
ceph_assert(index < conf.size());
// it could be replaced with a state-mutating lambda and
// `conf::erase()` but performance is not a concern here.
const auto [buffer_size, buffers_in_pool] =
*std::next(std::begin(conf), index);
emplacer.emplace(buffer_size, buffers_in_pool);
}) {
}
ceph::unique_leakable_ptr<buffer::raw> try_create(const size_t size) {
// thankfully to `conf` being a `std::map` we store the pools
// sorted by buffer sizes. this would allow to clamp to log(n)
// but I doubt admins want to have dozens of accelerated buffer
// size. let's keep this simple for now.
if (auto iter = std::find_if(std::begin(pools), std::end(pools),
[size] (const auto& pool) {
return size == pool.get_buffer_size();
});
iter != std::end(pools)) {
return iter->try_create();
}
return nullptr;
}
static HugePagePoolOfPools from_desc(const std::string& conf);
private:
// let's have some space inside (for 2 MB and 4 MB perhaps?)
// NOTE: we need tiny_vector as the boost::lockfree queue inside
// pool is not-movable.
ceph::containers::tiny_vector<ExplicitHugePagePool, 2> pools;
};
HugePagePoolOfPools HugePagePoolOfPools::from_desc(const std::string& desc) {
std::map<size_t, size_t> conf; // buffer_size -> buffers_in_pool
std::map<std::string, std::string> exploded_str_conf;
get_str_map(desc, &exploded_str_conf);
for (const auto& [buffer_size_s, buffers_in_pool_s] : exploded_str_conf) {
size_t buffer_size, buffers_in_pool;
if (sscanf(buffer_size_s.c_str(), "%zu", &buffer_size) != 1) {
ceph_abort("can't parse a key in the configuration");
}
if (sscanf(buffers_in_pool_s.c_str(), "%zu", &buffers_in_pool) != 1) {
ceph_abort("can't parse a value in the configuration");
}
conf[buffer_size] = buffers_in_pool;
}
return HugePagePoolOfPools{std::move(conf)};
}
// create a buffer basing on user-configurable. it's intended to make
// our buffers THP-able.
ceph::unique_leakable_ptr<buffer::raw> KernelDevice::create_custom_aligned(
const size_t len,
IOContext* const ioc) const
{
// just to preserve the logic of create_small_page_aligned().
if (len < CEPH_PAGE_SIZE) {
return ceph::buffer::create_small_page_aligned(len);
} else {
static HugePagePoolOfPools hp_pools = HugePagePoolOfPools::from_desc(
cct->_conf.get_val<std::string>("bdev_read_preallocated_huge_buffers")
);
if (auto lucky_raw = hp_pools.try_create(len); lucky_raw) {
dout(20) << __func__ << " allocated from huge pool"
<< " lucky_raw.data=" << (void*)lucky_raw->get_data()
<< " bdev_read_preallocated_huge_buffers="
<< cct->_conf.get_val<std::string>("bdev_read_preallocated_huge_buffers")
<< dendl;
ioc->flags |= IOContext::FLAG_DONT_CACHE;
return lucky_raw;
} else {
// fallthrough due to empty buffer pool. this can happen also
// when the configurable was explicitly set to 0.
dout(20) << __func__ << " cannot allocate from huge pool"
<< dendl;
}
}
const size_t custom_alignment = cct->_conf->bdev_read_buffer_alignment;
dout(20) << __func__ << " with the custom alignment;"
<< " len=" << len
<< " custom_alignment=" << custom_alignment
<< dendl;
return ceph::buffer::create_aligned(len, custom_alignment);
}
int KernelDevice::read(uint64_t off, uint64_t len, bufferlist *pbl,
IOContext *ioc,
bool buffered)
{
dout(5) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< " " << buffermode(buffered)
<< dendl;
ceph_assert(is_valid_io(off, len));
_aio_log_start(ioc, off, len);
auto start1 = mono_clock::now();
auto p = ceph::buffer::ptr_node::create(create_custom_aligned(len, ioc));
int r = ::pread(choose_fd(buffered, WRITE_LIFE_NOT_SET),
p->c_str(), len, off);
auto age = cct->_conf->bdev_debug_aio_log_age;
if (mono_clock::now() - start1 >= make_timespan(age)) {
derr << __func__ << " stalled read "
<< " 0x" << std::hex << off << "~" << len << std::dec
<< " " << buffermode(buffered)
<< " since " << start1 << ", timeout is "
<< age
<< "s" << dendl;
}
if (r < 0) {
if (ioc->allow_eio && is_expected_ioerr(-errno)) {
r = -EIO;
} else {
r = -errno;
}
derr << __func__ << " 0x" << std::hex << off << "~" << std::left
<< std::dec << " error: " << cpp_strerror(r) << dendl;
goto out;
}
ceph_assert((uint64_t)r == len);
pbl->push_back(std::move(p));
dout(40) << "data:\n";
pbl->hexdump(*_dout);
*_dout << dendl;
out:
_aio_log_finish(ioc, off, len);
return r < 0 ? r : 0;
}
int KernelDevice::aio_read(
uint64_t off,
uint64_t len,
bufferlist *pbl,
IOContext *ioc)
{
dout(5) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< dendl;
int r = 0;
#ifdef HAVE_LIBAIO
if (aio && dio) {
ceph_assert(is_valid_io(off, len));
_aio_log_start(ioc, off, len);
ioc->pending_aios.push_back(aio_t(ioc, fd_directs[WRITE_LIFE_NOT_SET]));
++ioc->num_pending;
aio_t& aio = ioc->pending_aios.back();
aio.bl.push_back(
ceph::buffer::ptr_node::create(create_custom_aligned(len, ioc)));
aio.bl.prepare_iov(&aio.iov);
aio.preadv(off, len);
dout(30) << aio << dendl;
pbl->append(aio.bl);
dout(5) << __func__ << " 0x" << std::hex << off << "~" << len
<< std::dec << " aio " << &aio << dendl;
} else
#endif
{
r = read(off, len, pbl, ioc, false);
}
return r;
}
int KernelDevice::direct_read_unaligned(uint64_t off, uint64_t len, char *buf)
{
uint64_t aligned_off = p2align(off, block_size);
uint64_t aligned_len = p2roundup(off+len, block_size) - aligned_off;
bufferptr p = ceph::buffer::create_small_page_aligned(aligned_len);
int r = 0;
auto start1 = mono_clock::now();
r = ::pread(fd_directs[WRITE_LIFE_NOT_SET], p.c_str(), aligned_len, aligned_off);
auto age = cct->_conf->bdev_debug_aio_log_age;
if (mono_clock::now() - start1 >= make_timespan(age)) {
derr << __func__ << " stalled read "
<< " 0x" << std::hex << off << "~" << len << std::dec
<< " since " << start1 << ", timeout is "
<< age
<< "s" << dendl;
}
if (r < 0) {
r = -errno;
derr << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< " error: " << cpp_strerror(r) << dendl;
goto out;
}
ceph_assert((uint64_t)r == aligned_len);
memcpy(buf, p.c_str() + (off - aligned_off), len);
dout(40) << __func__ << " data:\n";
bufferlist bl;
bl.append(buf, len);
bl.hexdump(*_dout);
*_dout << dendl;
out:
return r < 0 ? r : 0;
}
int KernelDevice::read_random(uint64_t off, uint64_t len, char *buf,
bool buffered)
{
dout(5) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< "buffered " << buffered
<< dendl;
ceph_assert(len > 0);
ceph_assert(off < size);
ceph_assert(off + len <= size);
int r = 0;
auto age = cct->_conf->bdev_debug_aio_log_age;
//if it's direct io and unaligned, we have to use a internal buffer
if (!buffered && ((off % block_size != 0)
|| (len % block_size != 0)
|| (uintptr_t(buf) % CEPH_PAGE_SIZE != 0)))
return direct_read_unaligned(off, len, buf);
auto start1 = mono_clock::now();
if (buffered) {
//buffered read
auto off0 = off;
char *t = buf;
uint64_t left = len;
while (left > 0) {
r = ::pread(fd_buffereds[WRITE_LIFE_NOT_SET], t, left, off);
if (r < 0) {
r = -errno;
derr << __func__ << " 0x" << std::hex << off << "~" << left
<< std::dec << " error: " << cpp_strerror(r) << dendl;
goto out;
}
off += r;
t += r;
left -= r;
}
if (mono_clock::now() - start1 >= make_timespan(age)) {
derr << __func__ << " stalled read "
<< " 0x" << std::hex << off0 << "~" << len << std::dec
<< " (buffered) since " << start1 << ", timeout is "
<< age
<< "s" << dendl;
}
} else {
//direct and aligned read
r = ::pread(fd_directs[WRITE_LIFE_NOT_SET], buf, len, off);
if (mono_clock::now() - start1 >= make_timespan(age)) {
derr << __func__ << " stalled read "
<< " 0x" << std::hex << off << "~" << len << std::dec
<< " (direct) since " << start1 << ", timeout is "
<< age
<< "s" << dendl;
}
if (r < 0) {
r = -errno;
derr << __func__ << " direct_aligned_read" << " 0x" << std::hex
<< off << "~" << std::left << std::dec << " error: " << cpp_strerror(r)
<< dendl;
goto out;
}
ceph_assert((uint64_t)r == len);
}
dout(40) << __func__ << " data:\n";
bufferlist bl;
bl.append(buf, len);
bl.hexdump(*_dout);
*_dout << dendl;
out:
return r < 0 ? r : 0;
}
int KernelDevice::invalidate_cache(uint64_t off, uint64_t len)
{
dout(5) << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< dendl;
ceph_assert(off % block_size == 0);
ceph_assert(len % block_size == 0);
int r = posix_fadvise(fd_buffereds[WRITE_LIFE_NOT_SET], off, len, POSIX_FADV_DONTNEED);
if (r) {
r = -r;
derr << __func__ << " 0x" << std::hex << off << "~" << len << std::dec
<< " error: " << cpp_strerror(r) << dendl;
}
return r;
}
| 43,353 | 28.89931 | 153 |
cc
|
null |
ceph-main/src/blk/kernel/KernelDevice.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BLK_KERNELDEVICE_H
#define CEPH_BLK_KERNELDEVICE_H
#include <atomic>
#include "include/types.h"
#include "include/interval_set.h"
#include "common/Thread.h"
#include "include/utime.h"
#include "aio/aio.h"
#include "BlockDevice.h"
#include "extblkdev/ExtBlkDevPlugin.h"
#define RW_IO_MAX (INT_MAX & CEPH_PAGE_MASK)
class KernelDevice : public BlockDevice {
protected:
std::string path;
private:
std::vector<int> fd_directs, fd_buffereds;
bool enable_wrt = true;
bool aio, dio;
ExtBlkDevInterfaceRef ebd_impl; // structure for retrieving compression state from extended block device
std::string devname; ///< kernel dev name (/sys/block/$devname), if any
ceph::mutex debug_lock = ceph::make_mutex("KernelDevice::debug_lock");
interval_set<uint64_t> debug_inflight;
std::atomic<bool> io_since_flush = {false};
ceph::mutex flush_mutex = ceph::make_mutex("KernelDevice::flush_mutex");
std::unique_ptr<io_queue_t> io_queue;
aio_callback_t discard_callback;
void *discard_callback_priv;
bool aio_stop;
bool discard_started;
bool discard_stop;
ceph::mutex discard_lock = ceph::make_mutex("KernelDevice::discard_lock");
ceph::condition_variable discard_cond;
bool discard_running = false;
interval_set<uint64_t> discard_queued;
interval_set<uint64_t> discard_finishing;
struct AioCompletionThread : public Thread {
KernelDevice *bdev;
explicit AioCompletionThread(KernelDevice *b) : bdev(b) {}
void *entry() override {
bdev->_aio_thread();
return NULL;
}
} aio_thread;
struct DiscardThread : public Thread {
KernelDevice *bdev;
explicit DiscardThread(KernelDevice *b) : bdev(b) {}
void *entry() override {
bdev->_discard_thread();
return NULL;
}
} discard_thread;
std::atomic_int injecting_crash;
virtual int _post_open() { return 0; } // hook for child implementations
virtual void _pre_close() { } // hook for child implementations
void _aio_thread();
void _discard_thread();
int _queue_discard(interval_set<uint64_t> &to_release);
bool try_discard(interval_set<uint64_t> &to_release, bool async = true) override;
int _aio_start();
void _aio_stop();
void _discard_start();
void _discard_stop();
void _aio_log_start(IOContext *ioc, uint64_t offset, uint64_t length);
void _aio_log_finish(IOContext *ioc, uint64_t offset, uint64_t length);
int _sync_write(uint64_t off, ceph::buffer::list& bl, bool buffered, int write_hint);
int _lock();
int direct_read_unaligned(uint64_t off, uint64_t len, char *buf);
// stalled aio debugging
aio_list_t debug_queue;
ceph::mutex debug_queue_lock = ceph::make_mutex("KernelDevice::debug_queue_lock");
aio_t *debug_oldest = nullptr;
utime_t debug_stall_since;
void debug_aio_link(aio_t& aio);
void debug_aio_unlink(aio_t& aio);
int choose_fd(bool buffered, int write_hint) const;
ceph::unique_leakable_ptr<buffer::raw> create_custom_aligned(size_t len, IOContext* ioc) const;
public:
KernelDevice(CephContext* cct, aio_callback_t cb, void *cbpriv, aio_callback_t d_cb, void *d_cbpriv);
void aio_submit(IOContext *ioc) override;
void discard_drain() override;
int collect_metadata(const std::string& prefix, std::map<std::string,std::string> *pm) const override;
int get_devname(std::string *s) const override {
if (devname.empty()) {
return -ENOENT;
}
*s = devname;
return 0;
}
int get_devices(std::set<std::string> *ls) const override;
int get_ebd_state(ExtBlkDevState &state) const override;
int read(uint64_t off, uint64_t len, ceph::buffer::list *pbl,
IOContext *ioc,
bool buffered) override;
int aio_read(uint64_t off, uint64_t len, ceph::buffer::list *pbl,
IOContext *ioc) override;
int read_random(uint64_t off, uint64_t len, char *buf, bool buffered) override;
int write(uint64_t off, ceph::buffer::list& bl, bool buffered, int write_hint = WRITE_LIFE_NOT_SET) override;
int aio_write(uint64_t off, ceph::buffer::list& bl,
IOContext *ioc,
bool buffered,
int write_hint = WRITE_LIFE_NOT_SET) override;
int flush() override;
int _discard(uint64_t offset, uint64_t len);
// for managing buffered readers/writers
int invalidate_cache(uint64_t off, uint64_t len) override;
int open(const std::string& path) override;
void close() override;
};
#endif
| 4,767 | 29.369427 | 111 |
h
|
null |
ceph-main/src/blk/kernel/io_uring.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "io_uring.h"
#if defined(HAVE_LIBURING)
#include "liburing.h"
#include <sys/epoll.h>
using std::list;
using std::make_unique;
struct ioring_data {
struct io_uring io_uring;
pthread_mutex_t cq_mutex;
pthread_mutex_t sq_mutex;
int epoll_fd = -1;
std::map<int, int> fixed_fds_map;
};
static int ioring_get_cqe(struct ioring_data *d, unsigned int max,
struct aio_t **paio)
{
struct io_uring *ring = &d->io_uring;
struct io_uring_cqe *cqe;
unsigned nr = 0;
unsigned head;
io_uring_for_each_cqe(ring, head, cqe) {
struct aio_t *io = (struct aio_t *)(uintptr_t) io_uring_cqe_get_data(cqe);
io->rval = cqe->res;
paio[nr++] = io;
if (nr == max)
break;
}
io_uring_cq_advance(ring, nr);
return nr;
}
static int find_fixed_fd(struct ioring_data *d, int real_fd)
{
auto it = d->fixed_fds_map.find(real_fd);
if (it == d->fixed_fds_map.end())
return -1;
return it->second;
}
static void init_sqe(struct ioring_data *d, struct io_uring_sqe *sqe,
struct aio_t *io)
{
int fixed_fd = find_fixed_fd(d, io->fd);
ceph_assert(fixed_fd != -1);
if (io->iocb.aio_lio_opcode == IO_CMD_PWRITEV)
io_uring_prep_writev(sqe, fixed_fd, &io->iov[0],
io->iov.size(), io->offset);
else if (io->iocb.aio_lio_opcode == IO_CMD_PREADV)
io_uring_prep_readv(sqe, fixed_fd, &io->iov[0],
io->iov.size(), io->offset);
else
ceph_assert(0);
io_uring_sqe_set_data(sqe, io);
io_uring_sqe_set_flags(sqe, IOSQE_FIXED_FILE);
}
static int ioring_queue(struct ioring_data *d, void *priv,
list<aio_t>::iterator beg, list<aio_t>::iterator end)
{
struct io_uring *ring = &d->io_uring;
struct aio_t *io = nullptr;
ceph_assert(beg != end);
do {
struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
if (!sqe)
break;
io = &*beg;
io->priv = priv;
init_sqe(d, sqe, io);
} while (++beg != end);
if (!io)
/* Queue is full, go and reap something first */
return 0;
return io_uring_submit(ring);
}
static void build_fixed_fds_map(struct ioring_data *d,
std::vector<int> &fds)
{
int fixed_fd = 0;
for (int real_fd : fds) {
d->fixed_fds_map[real_fd] = fixed_fd++;
}
}
ioring_queue_t::ioring_queue_t(unsigned iodepth_, bool hipri_, bool sq_thread_) :
d(make_unique<ioring_data>()),
iodepth(iodepth_),
hipri(hipri_),
sq_thread(sq_thread_)
{
}
ioring_queue_t::~ioring_queue_t()
{
}
int ioring_queue_t::init(std::vector<int> &fds)
{
unsigned flags = 0;
pthread_mutex_init(&d->cq_mutex, NULL);
pthread_mutex_init(&d->sq_mutex, NULL);
if (hipri)
flags |= IORING_SETUP_IOPOLL;
if (sq_thread)
flags |= IORING_SETUP_SQPOLL;
int ret = io_uring_queue_init(iodepth, &d->io_uring, flags);
if (ret < 0)
return ret;
ret = io_uring_register_files(&d->io_uring,
&fds[0], fds.size());
if (ret < 0) {
ret = -errno;
goto close_ring_fd;
}
build_fixed_fds_map(d.get(), fds);
d->epoll_fd = epoll_create1(0);
if (d->epoll_fd < 0) {
ret = -errno;
goto close_ring_fd;
}
struct epoll_event ev;
ev.events = EPOLLIN;
ret = epoll_ctl(d->epoll_fd, EPOLL_CTL_ADD, d->io_uring.ring_fd, &ev);
if (ret < 0) {
ret = -errno;
goto close_epoll_fd;
}
return 0;
close_epoll_fd:
close(d->epoll_fd);
close_ring_fd:
io_uring_queue_exit(&d->io_uring);
return ret;
}
void ioring_queue_t::shutdown()
{
d->fixed_fds_map.clear();
close(d->epoll_fd);
d->epoll_fd = -1;
io_uring_queue_exit(&d->io_uring);
}
int ioring_queue_t::submit_batch(aio_iter beg, aio_iter end,
uint16_t aios_size, void *priv,
int *retries)
{
(void)aios_size;
(void)retries;
pthread_mutex_lock(&d->sq_mutex);
int rc = ioring_queue(d.get(), priv, beg, end);
pthread_mutex_unlock(&d->sq_mutex);
return rc;
}
int ioring_queue_t::get_next_completed(int timeout_ms, aio_t **paio, int max)
{
get_cqe:
pthread_mutex_lock(&d->cq_mutex);
int events = ioring_get_cqe(d.get(), max, paio);
pthread_mutex_unlock(&d->cq_mutex);
if (events == 0) {
struct epoll_event ev;
int ret = TEMP_FAILURE_RETRY(epoll_wait(d->epoll_fd, &ev, 1, timeout_ms));
if (ret < 0)
events = -errno;
else if (ret > 0)
/* Time to reap */
goto get_cqe;
}
return events;
}
bool ioring_queue_t::supported()
{
struct io_uring ring;
int ret = io_uring_queue_init(16, &ring, 0);
if (ret) {
return false;
}
io_uring_queue_exit(&ring);
return true;
}
#else // #if defined(HAVE_LIBURING)
struct ioring_data {};
ioring_queue_t::ioring_queue_t(unsigned iodepth_, bool hipri_, bool sq_thread_)
{
ceph_assert(0);
}
ioring_queue_t::~ioring_queue_t()
{
ceph_assert(0);
}
int ioring_queue_t::init(std::vector<int> &fds)
{
ceph_assert(0);
}
void ioring_queue_t::shutdown()
{
ceph_assert(0);
}
int ioring_queue_t::submit_batch(aio_iter beg, aio_iter end,
uint16_t aios_size, void *priv,
int *retries)
{
ceph_assert(0);
}
int ioring_queue_t::get_next_completed(int timeout_ms, aio_t **paio, int max)
{
ceph_assert(0);
}
bool ioring_queue_t::supported()
{
return false;
}
#endif // #if defined(HAVE_LIBURING)
| 5,354 | 19.207547 | 81 |
cc
|
null |
ceph-main/src/blk/kernel/io_uring.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "acconfig.h"
#include "include/types.h"
#include "aio/aio.h"
struct ioring_data;
struct ioring_queue_t final : public io_queue_t {
std::unique_ptr<ioring_data> d;
unsigned iodepth = 0;
bool hipri = false;
bool sq_thread = false;
typedef std::list<aio_t>::iterator aio_iter;
// Returns true if arch is x86-64 and kernel supports io_uring
static bool supported();
ioring_queue_t(unsigned iodepth_, bool hipri_, bool sq_thread_);
~ioring_queue_t() final;
int init(std::vector<int> &fds) final;
void shutdown() final;
int submit_batch(aio_iter begin, aio_iter end, uint16_t aios_size,
void *priv, int *retries) final;
int get_next_completed(int timeout_ms, aio_t **paio, int max) final;
};
| 861 | 24.352941 | 70 |
h
|
null |
ceph-main/src/blk/pmem/PMEMDevice.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Intel <[email protected]>
*
* Author: Jianpeng Ma <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <stdio.h>
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <filesystem>
#include <fstream>
#include <fmt/format.h>
#include "PMEMDevice.h"
#include "libpmem.h"
#include "include/types.h"
#include "include/compat.h"
#include "include/stringify.h"
#include "common/errno.h"
#include "common/debug.h"
#include "common/blkdev.h"
#if defined(HAVE_LIBDML)
#include <dml/dml.hpp>
using execution_path = dml::automatic;
#endif
#define dout_context cct
#define dout_subsys ceph_subsys_bdev
#undef dout_prefix
#define dout_prefix *_dout << "bdev-PMEM(" << path << ") "
PMEMDevice::PMEMDevice(CephContext *cct, aio_callback_t cb, void *cbpriv)
: BlockDevice(cct, cb, cbpriv),
fd(-1), addr(0),
injecting_crash(0)
{
}
int PMEMDevice::_lock()
{
struct flock l;
memset(&l, 0, sizeof(l));
l.l_type = F_WRLCK;
l.l_whence = SEEK_SET;
l.l_start = 0;
l.l_len = 0;
int r = ::fcntl(fd, F_SETLK, &l);
if (r < 0)
return -errno;
return 0;
}
static int pmem_check_file_type(int fd, const char *pmem_file, uint64_t *total_size)
{
namespace fs = std::filesystem;
if (!fs::is_character_file(pmem_file)) {
return -EINVAL;
}
struct stat file_stat;
if (::fstat(fd, &file_stat)) {
return -EINVAL;
}
fs::path char_dir = fmt::format("/sys/dev/char/{}:{}",
major(file_stat.st_rdev),
minor(file_stat.st_rdev));
// Need to check if it is a DAX device
if (auto subsys_path = char_dir / "subsystem";
fs::read_symlink(subsys_path).filename().string() != "dax") {
return -EINVAL;
}
if (total_size == nullptr) {
return 0;
}
if (std::ifstream size_file(char_dir / "size"); size_file) {
size_file >> *total_size;
return size_file ? 0 : -EINVAL;
} else {
return -EINVAL;
}
}
int PMEMDevice::open(const std::string& p)
{
path = p;
int r = 0;
dout(1) << __func__ << " path " << path << dendl;
fd = ::open(path.c_str(), O_RDWR | O_CLOEXEC);
if (fd < 0) {
r = -errno;
derr << __func__ << " open got: " << cpp_strerror(r) << dendl;
return r;
}
r = pmem_check_file_type(fd, path.c_str(), &size);
if (!r) {
dout(1) << __func__ << " This path " << path << " is a devdax dev " << dendl;
devdax_device = true;
// If using devdax char device, set it to not rotational device.
rotational = false;
}
r = _lock();
if (r < 0) {
derr << __func__ << " failed to lock " << path << ": " << cpp_strerror(r)
<< dendl;
goto out_fail;
}
struct stat st;
r = ::fstat(fd, &st);
if (r < 0) {
r = -errno;
derr << __func__ << " fstat got " << cpp_strerror(r) << dendl;
goto out_fail;
}
size_t map_len;
addr = (char *)pmem_map_file(path.c_str(), 0,
devdax_device ? 0: PMEM_FILE_EXCL, O_RDWR,
&map_len, NULL);
if (addr == NULL) {
derr << __func__ << " pmem_map_file failed: " << pmem_errormsg() << dendl;
goto out_fail;
}
size = map_len;
// Operate as though the block size is 4 KB. The backing file
// blksize doesn't strictly matter except that some file systems may
// require a read/modify/write if we write something smaller than
// it.
block_size = g_conf()->bdev_block_size;
if (block_size != (unsigned)st.st_blksize) {
dout(1) << __func__ << " backing device/file reports st_blksize "
<< st.st_blksize << ", using bdev_block_size "
<< block_size << " anyway" << dendl;
}
dout(1) << __func__
<< " size " << size
<< " (" << byte_u_t(size) << ")"
<< " block_size " << block_size
<< " (" << byte_u_t(block_size) << ")"
<< dendl;
return 0;
out_fail:
VOID_TEMP_FAILURE_RETRY(::close(fd));
fd = -1;
return r;
}
void PMEMDevice::close()
{
dout(1) << __func__ << dendl;
ceph_assert(addr != NULL);
if (devdax_device) {
devdax_device = false;
}
pmem_unmap(addr, size);
ceph_assert(fd >= 0);
VOID_TEMP_FAILURE_RETRY(::close(fd));
fd = -1;
path.clear();
}
int PMEMDevice::collect_metadata(const std::string& prefix, std::map<std::string,std::string> *pm) const
{
(*pm)[prefix + "rotational"] = stringify((int)(bool)rotational);
(*pm)[prefix + "size"] = stringify(get_size());
(*pm)[prefix + "block_size"] = stringify(get_block_size());
(*pm)[prefix + "driver"] = "PMEMDevice";
(*pm)[prefix + "type"] = "ssd";
struct stat st;
int r = ::fstat(fd, &st);
if (r < 0)
return -errno;
if (S_ISBLK(st.st_mode)) {
(*pm)[prefix + "access_mode"] = "blk";
char buffer[1024] = {0};
BlkDev blkdev(fd);
blkdev.model(buffer, sizeof(buffer));
(*pm)[prefix + "model"] = buffer;
buffer[0] = '\0';
blkdev.dev(buffer, sizeof(buffer));
(*pm)[prefix + "dev"] = buffer;
// nvme exposes a serial number
buffer[0] = '\0';
blkdev.serial(buffer, sizeof(buffer));
(*pm)[prefix + "serial"] = buffer;
} else if (S_ISCHR(st.st_mode)) {
(*pm)[prefix + "access_mode"] = "chardevice";
(*pm)[prefix + "path"] = path;
} else {
(*pm)[prefix + "access_mode"] = "file";
(*pm)[prefix + "path"] = path;
}
return 0;
}
bool PMEMDevice::support(const std::string &path)
{
int is_pmem = 0;
size_t map_len = 0;
int r = 0;
int local_fd;
local_fd = ::open(path.c_str(), O_RDWR);
if (local_fd < 0) {
return false;
}
r = pmem_check_file_type(local_fd, path.c_str(), NULL);
VOID_TEMP_FAILURE_RETRY(::close(local_fd));
int flags = PMEM_FILE_EXCL;
if (r == 0) {
flags = 0;
}
void *addr = pmem_map_file(path.c_str(), 0, flags, O_RDONLY, &map_len, &is_pmem);
if (addr != NULL) {
pmem_unmap(addr, map_len);
if (is_pmem) {
return true;
}
}
return false;
}
int PMEMDevice::flush()
{
//Because all write is persist. So no need
return 0;
}
void PMEMDevice::aio_submit(IOContext *ioc)
{
if (ioc->priv) {
ceph_assert(ioc->num_running == 0);
aio_callback(aio_callback_priv, ioc->priv);
} else {
ioc->try_aio_wake();
}
return;
}
int PMEMDevice::write(uint64_t off, bufferlist& bl, bool buffered, int write_hint)
{
uint64_t len = bl.length();
dout(20) << __func__ << " " << off << "~" << len << dendl;
ceph_assert(is_valid_io(off, len));
dout(40) << "data:\n";
bl.hexdump(*_dout);
*_dout << dendl;
if (g_conf()->bdev_inject_crash &&
rand() % g_conf()->bdev_inject_crash == 0) {
derr << __func__ << " bdev_inject_crash: dropping io " << off << "~" << len
<< dendl;
++injecting_crash;
return 0;
}
bufferlist::iterator p = bl.begin();
uint64_t off1 = off;
while (len) {
const char *data;
uint32_t l = p.get_ptr_and_advance(len, &data);
#if defined(HAVE_LIBDML)
// Take care of the persistency issue
auto result = dml::execute<execution_path>(dml::mem_move, dml::make_view(data, l), dml::make_view(addr + off1, l));
ceph_assert(result.status == dml::status_code::ok);
#else
pmem_memcpy_persist(addr + off1, data, l);
#endif
len -= l;
off1 += l;
}
return 0;
}
int PMEMDevice::aio_write(
uint64_t off,
bufferlist &bl,
IOContext *ioc,
bool buffered,
int write_hint)
{
return write(off, bl, buffered);
}
int PMEMDevice::read(uint64_t off, uint64_t len, bufferlist *pbl,
IOContext *ioc,
bool buffered)
{
dout(5) << __func__ << " " << off << "~" << len << dendl;
ceph_assert(is_valid_io(off, len));
bufferptr p = buffer::create_small_page_aligned(len);
#if defined(HAVE_LIBDML)
auto result = dml::execute<execution_path>(dml::mem_move, dml::make_view(addr + off, len), dml::make_view(p.c_str(), len));
ceph_assert(result.status == dml::status_code::ok);
#else
memcpy(p.c_str(), addr + off, len);
#endif
pbl->clear();
pbl->push_back(std::move(p));
dout(40) << "data:\n";
pbl->hexdump(*_dout);
*_dout << dendl;
return 0;
}
int PMEMDevice::aio_read(uint64_t off, uint64_t len, bufferlist *pbl,
IOContext *ioc)
{
return read(off, len, pbl, ioc, false);
}
int PMEMDevice::read_random(uint64_t off, uint64_t len, char *buf, bool buffered)
{
dout(5) << __func__ << " " << off << "~" << len << dendl;
ceph_assert(is_valid_io(off, len));
#if defined(HAVE_LIBDML)
auto result = dml::execute<execution_path>(dml::mem_move, dml::make_view(addr + off, len), dml::make_view(buf, len));
ceph_assert(result.status == dml::status_code::ok);
#else
memcpy(buf, addr + off, len);
#endif
return 0;
}
int PMEMDevice::invalidate_cache(uint64_t off, uint64_t len)
{
dout(5) << __func__ << " " << off << "~" << len << dendl;
return 0;
}
| 9,091 | 22.989446 | 125 |
cc
|
null |
ceph-main/src/blk/pmem/PMEMDevice.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Intel <[email protected]>
*
* Author: Jianpeng Ma <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BLK_PMEMDEVICE_H
#define CEPH_BLK_PMEMDEVICE_H
#include <atomic>
#include <map>
#include <string>
#include "os/fs/FS.h"
#include "include/interval_set.h"
#include "aio/aio.h"
#include "BlockDevice.h"
class PMEMDevice : public BlockDevice {
int fd;
char *addr; //the address of mmap
std::string path;
bool devdax_device = false;
ceph::mutex debug_lock = ceph::make_mutex("PMEMDevice::debug_lock");
interval_set<uint64_t> debug_inflight;
std::atomic_int injecting_crash;
int _lock();
public:
PMEMDevice(CephContext *cct, aio_callback_t cb, void *cbpriv);
bool supported_bdev_label() override { return !devdax_device; }
void aio_submit(IOContext *ioc) override;
int collect_metadata(const std::string& prefix, std::map<std::string,std::string> *pm) const override;
static bool support(const std::string& path);
int read(uint64_t off, uint64_t len, bufferlist *pbl,
IOContext *ioc,
bool buffered) override;
int aio_read(uint64_t off, uint64_t len, bufferlist *pbl,
IOContext *ioc) override;
int read_random(uint64_t off, uint64_t len, char *buf, bool buffered) override;
int write(uint64_t off, bufferlist& bl, bool buffered, int write_hint = WRITE_LIFE_NOT_SET) override;
int aio_write(uint64_t off, bufferlist& bl,
IOContext *ioc,
bool buffered,
int write_hint = WRITE_LIFE_NOT_SET) override;
int flush() override;
// for managing buffered readers/writers
int invalidate_cache(uint64_t off, uint64_t len) override;
int open(const std::string &path) override;
void close() override;
private:
bool is_valid_io(uint64_t off, uint64_t len) const {
return (len > 0 &&
off < size &&
off + len <= size);
}
};
#endif
| 2,206 | 26.936709 | 104 |
h
|
null |
ceph-main/src/blk/spdk/NVMEDevice.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
//
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 XSky <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <unistd.h>
#include <stdlib.h>
#include <strings.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <chrono>
#include <fstream>
#include <functional>
#include <map>
#include <thread>
#include <boost/intrusive/slist.hpp>
#include <spdk/nvme.h>
#include "include/intarith.h"
#include "include/stringify.h"
#include "include/types.h"
#include "include/compat.h"
#include "common/errno.h"
#include "common/debug.h"
#include "common/perf_counters.h"
#include "NVMEDevice.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_bdev
#undef dout_prefix
#define dout_prefix *_dout << "bdev(" << sn << ") "
using namespace std;
static constexpr uint16_t data_buffer_default_num = 1024;
static constexpr uint32_t data_buffer_size = 8192;
static constexpr uint16_t inline_segment_num = 32;
/* Default to 10 seconds for the keep alive value. This value is arbitrary. */
static constexpr uint32_t nvme_ctrlr_keep_alive_timeout_in_ms = 10000;
static void io_complete(void *t, const struct spdk_nvme_cpl *completion);
struct IORequest {
uint16_t cur_seg_idx = 0;
uint16_t nseg;
uint32_t cur_seg_left = 0;
void *inline_segs[inline_segment_num];
void **extra_segs = nullptr;
};
namespace bi = boost::intrusive;
struct data_cache_buf : public bi::slist_base_hook<bi::link_mode<bi::normal_link>>
{};
struct Task;
class SharedDriverData {
unsigned id;
spdk_nvme_transport_id trid;
spdk_nvme_ctrlr *ctrlr;
spdk_nvme_ns *ns;
uint32_t block_size = 0;
uint64_t size = 0;
std::thread admin_thread;
public:
std::vector<NVMEDevice*> registered_devices;
friend class SharedDriverQueueData;
SharedDriverData(unsigned id_, const spdk_nvme_transport_id& trid_,
spdk_nvme_ctrlr *c, spdk_nvme_ns *ns_)
: id(id_),
trid(trid_),
ctrlr(c),
ns(ns_) {
block_size = spdk_nvme_ns_get_extended_sector_size(ns);
size = spdk_nvme_ns_get_size(ns);
if (trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
return;
}
// For Non-PCIe transport, we need to send keep-alive periodically.
admin_thread = std::thread(
[this]() {
int rc;
while (true) {
rc = spdk_nvme_ctrlr_process_admin_completions(ctrlr);
ceph_assert(rc >= 0);
sleep(1);
}
}
);
}
bool is_equal(const spdk_nvme_transport_id& trid2) const {
return spdk_nvme_transport_id_compare(&trid, &trid2) == 0;
}
~SharedDriverData() {
if (admin_thread.joinable()) {
admin_thread.join();
}
}
void register_device(NVMEDevice *device) {
registered_devices.push_back(device);
}
void remove_device(NVMEDevice *device) {
std::vector<NVMEDevice*> new_devices;
for (auto &&it : registered_devices) {
if (it != device)
new_devices.push_back(it);
}
registered_devices.swap(new_devices);
}
uint32_t get_block_size() {
return block_size;
}
uint64_t get_size() {
return size;
}
};
class SharedDriverQueueData {
NVMEDevice *bdev;
SharedDriverData *driver;
spdk_nvme_ctrlr *ctrlr;
spdk_nvme_ns *ns;
std::string sn;
uint32_t block_size;
uint32_t max_queue_depth;
struct spdk_nvme_qpair *qpair;
int alloc_buf_from_pool(Task *t, bool write);
public:
uint32_t current_queue_depth = 0;
std::atomic_ulong completed_op_seq, queue_op_seq;
bi::slist<data_cache_buf, bi::constant_time_size<true>> data_buf_list;
void _aio_handle(Task *t, IOContext *ioc);
SharedDriverQueueData(NVMEDevice *bdev, SharedDriverData *driver)
: bdev(bdev),
driver(driver) {
ctrlr = driver->ctrlr;
ns = driver->ns;
block_size = driver->block_size;
struct spdk_nvme_io_qpair_opts opts = {};
spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
opts.qprio = SPDK_NVME_QPRIO_URGENT;
// usable queue depth should minus 1 to avoid overflow.
max_queue_depth = opts.io_queue_size - 1;
qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));
ceph_assert(qpair != NULL);
// allocate spdk dma memory
for (uint16_t i = 0; i < data_buffer_default_num; i++) {
void *b = spdk_dma_zmalloc(data_buffer_size, CEPH_PAGE_SIZE, NULL);
if (!b) {
derr << __func__ << " failed to create memory pool for nvme data buffer" << dendl;
ceph_assert(b);
}
data_buf_list.push_front(*reinterpret_cast<data_cache_buf *>(b));
}
}
~SharedDriverQueueData() {
if (qpair) {
spdk_nvme_ctrlr_free_io_qpair(qpair);
}
data_buf_list.clear_and_dispose(spdk_dma_free);
}
};
struct Task {
NVMEDevice *device;
IOContext *ctx = nullptr;
IOCommand command;
uint64_t offset;
uint64_t len;
bufferlist bl;
std::function<void()> fill_cb;
Task *next = nullptr;
int64_t return_code;
Task *primary = nullptr;
IORequest io_request = {};
SharedDriverQueueData *queue = nullptr;
// reference count by subtasks.
int ref = 0;
Task(NVMEDevice *dev, IOCommand c, uint64_t off, uint64_t l, int64_t rc = 0,
Task *p = nullptr)
: device(dev), command(c), offset(off), len(l),
return_code(rc), primary(p) {
if (primary) {
primary->ref++;
return_code = primary->return_code;
}
}
~Task() {
if (primary)
primary->ref--;
ceph_assert(!io_request.nseg);
}
void release_segs(SharedDriverQueueData *queue_data) {
if (io_request.extra_segs) {
for (uint16_t i = 0; i < io_request.nseg; i++) {
auto buf = reinterpret_cast<data_cache_buf *>(io_request.extra_segs[i]);
queue_data->data_buf_list.push_front(*buf);
}
delete io_request.extra_segs;
} else if (io_request.nseg) {
for (uint16_t i = 0; i < io_request.nseg; i++) {
auto buf = reinterpret_cast<data_cache_buf *>(io_request.inline_segs[i]);
queue_data->data_buf_list.push_front(*buf);
}
}
ctx->total_nseg -= io_request.nseg;
io_request.nseg = 0;
}
void copy_to_buf(char *buf, uint64_t off, uint64_t len) {
uint64_t copied = 0;
uint64_t left = len;
void **segs = io_request.extra_segs ? io_request.extra_segs : io_request.inline_segs;
uint16_t i = 0;
while (left > 0) {
char *src = static_cast<char*>(segs[i++]);
uint64_t need_copy = std::min(left, data_buffer_size-off);
memcpy(buf+copied, src+off, need_copy);
off = 0;
left -= need_copy;
copied += need_copy;
}
}
};
static void data_buf_reset_sgl(void *cb_arg, uint32_t sgl_offset)
{
Task *t = static_cast<Task*>(cb_arg);
uint32_t i = sgl_offset / data_buffer_size;
uint32_t offset = i * data_buffer_size;
ceph_assert(i <= t->io_request.nseg);
for (; i < t->io_request.nseg; i++) {
offset += data_buffer_size;
if (offset > sgl_offset) {
if (offset > t->len)
offset = t->len;
break;
}
}
t->io_request.cur_seg_idx = i;
t->io_request.cur_seg_left = offset - sgl_offset;
return ;
}
static int data_buf_next_sge(void *cb_arg, void **address, uint32_t *length)
{
uint32_t size;
void *addr;
Task *t = static_cast<Task*>(cb_arg);
if (t->io_request.cur_seg_idx >= t->io_request.nseg) {
*length = 0;
*address = 0;
return 0;
}
addr = t->io_request.extra_segs ? t->io_request.extra_segs[t->io_request.cur_seg_idx] : t->io_request.inline_segs[t->io_request.cur_seg_idx];
size = data_buffer_size;
if (t->io_request.cur_seg_idx == t->io_request.nseg - 1) {
uint64_t tail = t->len % data_buffer_size;
if (tail) {
size = (uint32_t) tail;
}
}
if (t->io_request.cur_seg_left) {
*address = (void *)((uint64_t)addr + size - t->io_request.cur_seg_left);
*length = t->io_request.cur_seg_left;
t->io_request.cur_seg_left = 0;
} else {
*address = addr;
*length = size;
}
t->io_request.cur_seg_idx++;
return 0;
}
int SharedDriverQueueData::alloc_buf_from_pool(Task *t, bool write)
{
uint64_t count = t->len / data_buffer_size;
if (t->len % data_buffer_size)
++count;
void **segs;
if (count > data_buf_list.size())
return -ENOMEM;
if (count <= inline_segment_num) {
segs = t->io_request.inline_segs;
} else {
t->io_request.extra_segs = new void*[count];
segs = t->io_request.extra_segs;
}
for (uint16_t i = 0; i < count; i++) {
ceph_assert(!data_buf_list.empty());
segs[i] = &data_buf_list.front();
ceph_assert(segs[i] != nullptr);
data_buf_list.pop_front();
}
t->io_request.nseg = count;
t->ctx->total_nseg += count;
if (write) {
auto blp = t->bl.begin();
uint32_t len = 0;
uint16_t i = 0;
for (; i < count - 1; ++i) {
blp.copy(data_buffer_size, static_cast<char*>(segs[i]));
len += data_buffer_size;
}
blp.copy(t->bl.length() - len, static_cast<char*>(segs[i]));
}
return 0;
}
void SharedDriverQueueData::_aio_handle(Task *t, IOContext *ioc)
{
dout(20) << __func__ << " start" << dendl;
int r = 0;
uint64_t lba_off, lba_count;
uint32_t max_io_completion = (uint32_t)g_conf().get_val<uint64_t>("bluestore_spdk_max_io_completion");
uint64_t io_sleep_in_us = g_conf().get_val<uint64_t>("bluestore_spdk_io_sleep");
while (ioc->num_running) {
again:
dout(40) << __func__ << " polling" << dendl;
if (current_queue_depth) {
r = spdk_nvme_qpair_process_completions(qpair, max_io_completion);
if (r < 0) {
ceph_abort();
} else if (r == 0) {
usleep(io_sleep_in_us);
}
}
for (; t; t = t->next) {
if (current_queue_depth == max_queue_depth) {
// no slots
goto again;
}
t->queue = this;
lba_off = t->offset / block_size;
lba_count = t->len / block_size;
switch (t->command) {
case IOCommand::WRITE_COMMAND:
{
dout(20) << __func__ << " write command issued " << lba_off << "~" << lba_count << dendl;
r = alloc_buf_from_pool(t, true);
if (r < 0) {
goto again;
}
r = spdk_nvme_ns_cmd_writev(
ns, qpair, lba_off, lba_count, io_complete, t, 0,
data_buf_reset_sgl, data_buf_next_sge);
if (r < 0) {
derr << __func__ << " failed to do write command: " << cpp_strerror(r) << dendl;
t->ctx->nvme_task_first = t->ctx->nvme_task_last = nullptr;
t->release_segs(this);
delete t;
ceph_abort();
}
break;
}
case IOCommand::READ_COMMAND:
{
dout(20) << __func__ << " read command issued " << lba_off << "~" << lba_count << dendl;
r = alloc_buf_from_pool(t, false);
if (r < 0) {
goto again;
}
r = spdk_nvme_ns_cmd_readv(
ns, qpair, lba_off, lba_count, io_complete, t, 0,
data_buf_reset_sgl, data_buf_next_sge);
if (r < 0) {
derr << __func__ << " failed to read: " << cpp_strerror(r) << dendl;
t->release_segs(this);
delete t;
ceph_abort();
}
break;
}
case IOCommand::FLUSH_COMMAND:
{
dout(20) << __func__ << " flush command issueed " << dendl;
r = spdk_nvme_ns_cmd_flush(ns, qpair, io_complete, t);
if (r < 0) {
derr << __func__ << " failed to flush: " << cpp_strerror(r) << dendl;
t->release_segs(this);
delete t;
ceph_abort();
}
break;
}
}
current_queue_depth++;
}
}
dout(20) << __func__ << " end" << dendl;
}
#define dout_subsys ceph_subsys_bdev
#undef dout_prefix
#define dout_prefix *_dout << "bdev "
class NVMEManager {
public:
struct ProbeContext {
spdk_nvme_transport_id trid;
NVMEManager *manager;
SharedDriverData *driver;
bool done;
};
private:
ceph::mutex lock = ceph::make_mutex("NVMEManager::lock");
bool stopping = false;
std::vector<SharedDriverData*> shared_driver_datas;
std::thread dpdk_thread;
ceph::mutex probe_queue_lock = ceph::make_mutex("NVMEManager::probe_queue_lock");
ceph::condition_variable probe_queue_cond;
std::list<ProbeContext*> probe_queue;
public:
NVMEManager() {}
~NVMEManager() {
if (!dpdk_thread.joinable())
return;
{
std::lock_guard guard(probe_queue_lock);
stopping = true;
probe_queue_cond.notify_all();
}
dpdk_thread.join();
}
int try_get(const spdk_nvme_transport_id& trid, SharedDriverData **driver);
void register_ctrlr(const spdk_nvme_transport_id& trid, spdk_nvme_ctrlr *c, SharedDriverData **driver) {
ceph_assert(ceph_mutex_is_locked(lock));
spdk_nvme_ns *ns;
int num_ns = spdk_nvme_ctrlr_get_num_ns(c);
ceph_assert(num_ns >= 1);
if (num_ns > 1) {
dout(0) << __func__ << " namespace count larger than 1, currently only use the first namespace" << dendl;
}
ns = spdk_nvme_ctrlr_get_ns(c, 1);
if (!ns) {
derr << __func__ << " failed to get namespace at 1" << dendl;
ceph_abort();
}
dout(1) << __func__ << " successfully attach nvme device at" << trid.traddr << dendl;
// only support one device per osd now!
ceph_assert(shared_driver_datas.empty());
// index 0 is occurred by master thread
shared_driver_datas.push_back(new SharedDriverData(shared_driver_datas.size()+1, trid, c, ns));
*driver = shared_driver_datas.back();
}
};
static NVMEManager manager;
static bool probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, struct spdk_nvme_ctrlr_opts *opts)
{
NVMEManager::ProbeContext *ctx = static_cast<NVMEManager::ProbeContext*>(cb_ctx);
bool do_attach = false;
if (trid->trtype == SPDK_NVME_TRANSPORT_PCIE) {
do_attach = spdk_nvme_transport_id_compare(&ctx->trid, trid) == 0;
if (!do_attach) {
dout(0) << __func__ << " device traddr (" << ctx->trid.traddr
<< ") not match " << trid->traddr << dendl;
}
} else {
// for non-pcie devices, should always match the specified trid
assert(!spdk_nvme_transport_id_compare(&ctx->trid, trid));
do_attach = true;
}
if (do_attach) {
dout(0) << __func__ << " found device at: "
<< "trtype=" << spdk_nvme_transport_id_trtype_str(trid->trtype) << ", "
<< "traddr=" << trid->traddr << dendl;
opts->io_queue_size = UINT16_MAX;
opts->io_queue_requests = UINT16_MAX;
opts->keep_alive_timeout_ms = nvme_ctrlr_keep_alive_timeout_in_ms;
}
return do_attach;
}
static void attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
{
auto ctx = static_cast<NVMEManager::ProbeContext*>(cb_ctx);
ctx->manager->register_ctrlr(ctx->trid, ctrlr, &ctx->driver);
}
static int hex2dec(unsigned char c)
{
if (isdigit(c))
return c - '0';
else if (isupper(c))
return c - 'A' + 10;
else
return c - 'a' + 10;
}
static int find_first_bitset(const string& s)
{
auto e = s.rend();
if (s.compare(0, 2, "0x") == 0 ||
s.compare(0, 2, "0X") == 0) {
advance(e, -2);
}
auto p = s.rbegin();
for (int pos = 0; p != e; ++p, pos += 4) {
if (!isxdigit(*p)) {
return -EINVAL;
}
if (int val = hex2dec(*p); val != 0) {
return pos + ffs(val);
}
}
return 0;
}
int NVMEManager::try_get(const spdk_nvme_transport_id& trid, SharedDriverData **driver)
{
std::lock_guard l(lock);
for (auto &&it : shared_driver_datas) {
if (it->is_equal(trid)) {
*driver = it;
return 0;
}
}
auto coremask_arg = g_conf().get_val<std::string>("bluestore_spdk_coremask");
int m_core_arg = find_first_bitset(coremask_arg);
// at least one core is needed for using spdk
if (m_core_arg <= 0) {
derr << __func__ << " invalid bluestore_spdk_coremask, "
<< "at least one core is needed" << dendl;
return -ENOENT;
}
m_core_arg -= 1;
uint32_t mem_size_arg = (uint32_t)g_conf().get_val<Option::size_t>("bluestore_spdk_mem");
if (!dpdk_thread.joinable()) {
dpdk_thread = std::thread(
[this, coremask_arg, m_core_arg, mem_size_arg, trid]() {
struct spdk_env_opts opts;
struct spdk_pci_addr addr;
int r;
bool local_pci_device = false;
int rc = spdk_pci_addr_parse(&addr, trid.traddr);
if (!rc) {
local_pci_device = true;
opts.pci_whitelist = &addr;
opts.num_pci_addr = 1;
}
spdk_env_opts_init(&opts);
opts.name = "nvme-device-manager";
opts.core_mask = coremask_arg.c_str();
opts.master_core = m_core_arg;
opts.mem_size = mem_size_arg;
spdk_env_init(&opts);
spdk_unaffinitize_thread();
std::unique_lock l(probe_queue_lock);
while (!stopping) {
if (!probe_queue.empty()) {
ProbeContext* ctxt = probe_queue.front();
probe_queue.pop_front();
r = spdk_nvme_probe(local_pci_device ? NULL : &trid, ctxt, probe_cb, attach_cb, NULL);
if (r < 0) {
ceph_assert(!ctxt->driver);
derr << __func__ << " device probe nvme failed" << dendl;
}
ctxt->done = true;
probe_queue_cond.notify_all();
} else {
probe_queue_cond.wait(l);
}
}
for (auto p : probe_queue)
p->done = true;
probe_queue_cond.notify_all();
}
);
}
ProbeContext ctx{trid, this, nullptr, false};
{
std::unique_lock l(probe_queue_lock);
probe_queue.push_back(&ctx);
while (!ctx.done)
probe_queue_cond.wait(l);
}
if (!ctx.driver)
return -1;
*driver = ctx.driver;
return 0;
}
void io_complete(void *t, const struct spdk_nvme_cpl *completion)
{
Task *task = static_cast<Task*>(t);
IOContext *ctx = task->ctx;
SharedDriverQueueData *queue = task->queue;
ceph_assert(queue != NULL);
ceph_assert(ctx != NULL);
--queue->current_queue_depth;
if (task->command == IOCommand::WRITE_COMMAND) {
ceph_assert(!spdk_nvme_cpl_is_error(completion));
dout(20) << __func__ << " write/zero op successfully, left "
<< queue->queue_op_seq - queue->completed_op_seq << dendl;
// check waiting count before doing callback (which may
// destroy this ioc).
if (ctx->priv) {
if (!--ctx->num_running) {
task->device->aio_callback(task->device->aio_callback_priv, ctx->priv);
}
} else {
ctx->try_aio_wake();
}
task->release_segs(queue);
delete task;
} else if (task->command == IOCommand::READ_COMMAND) {
ceph_assert(!spdk_nvme_cpl_is_error(completion));
dout(20) << __func__ << " read op successfully" << dendl;
task->fill_cb();
task->release_segs(queue);
// read submitted by AIO
if (!task->return_code) {
if (ctx->priv) {
if (!--ctx->num_running) {
task->device->aio_callback(task->device->aio_callback_priv, ctx->priv);
}
} else {
ctx->try_aio_wake();
}
delete task;
} else {
if (Task* primary = task->primary; primary != nullptr) {
delete task;
if (!primary->ref)
primary->return_code = 0;
} else {
task->return_code = 0;
}
--ctx->num_running;
}
} else {
ceph_assert(task->command == IOCommand::FLUSH_COMMAND);
ceph_assert(!spdk_nvme_cpl_is_error(completion));
dout(20) << __func__ << " flush op successfully" << dendl;
task->return_code = 0;
}
}
// ----------------
#undef dout_prefix
#define dout_prefix *_dout << "bdev(" << name << ") "
NVMEDevice::NVMEDevice(CephContext* cct, aio_callback_t cb, void *cbpriv)
: BlockDevice(cct, cb, cbpriv),
driver(nullptr)
{
}
bool NVMEDevice::support(const std::string& path)
{
char buf[PATH_MAX + 1];
int r = ::readlink(path.c_str(), buf, sizeof(buf) - 1);
if (r >= 0) {
buf[r] = '\0';
char *bname = ::basename(buf);
if (strncmp(bname, SPDK_PREFIX, sizeof(SPDK_PREFIX)-1) == 0) {
return true;
}
}
return false;
}
int NVMEDevice::open(const string& p)
{
dout(1) << __func__ << " path " << p << dendl;
std::ifstream ifs(p);
if (!ifs) {
derr << __func__ << " unable to open " << p << dendl;
return -1;
}
string val;
std::getline(ifs, val);
spdk_nvme_transport_id trid;
if (int r = spdk_nvme_transport_id_parse(&trid, val.c_str()); r) {
derr << __func__ << " unable to read " << p << ": " << cpp_strerror(r)
<< dendl;
return r;
}
if (int r = manager.try_get(trid, &driver); r < 0) {
derr << __func__ << " failed to get nvme device with transport address "
<< trid.traddr << " type " << trid.trtype << dendl;
return r;
}
driver->register_device(this);
block_size = driver->get_block_size();
size = driver->get_size();
name = trid.traddr;
//nvme is non-rotational device.
rotational = false;
// round size down to an even block
size &= ~(block_size - 1);
dout(1) << __func__ << " size " << size << " (" << byte_u_t(size) << ")"
<< " block_size " << block_size << " (" << byte_u_t(block_size)
<< ")" << dendl;
return 0;
}
void NVMEDevice::close()
{
dout(1) << __func__ << dendl;
name.clear();
driver->remove_device(this);
dout(1) << __func__ << " end" << dendl;
}
int NVMEDevice::collect_metadata(const string& prefix, map<string,string> *pm) const
{
(*pm)[prefix + "rotational"] = "0";
(*pm)[prefix + "size"] = stringify(get_size());
(*pm)[prefix + "block_size"] = stringify(get_block_size());
(*pm)[prefix + "driver"] = "NVMEDevice";
(*pm)[prefix + "type"] = "nvme";
(*pm)[prefix + "access_mode"] = "spdk";
(*pm)[prefix + "nvme_serial_number"] = name;
return 0;
}
int NVMEDevice::flush()
{
return 0;
}
void NVMEDevice::aio_submit(IOContext *ioc)
{
dout(20) << __func__ << " ioc " << ioc << " pending "
<< ioc->num_pending.load() << " running "
<< ioc->num_running.load() << dendl;
int pending = ioc->num_pending.load();
Task *t = static_cast<Task*>(ioc->nvme_task_first);
if (pending && t) {
ioc->num_running += pending;
ioc->num_pending -= pending;
ceph_assert(ioc->num_pending.load() == 0); // we should be only thread doing this
// Only need to push the first entry
ioc->nvme_task_first = ioc->nvme_task_last = nullptr;
thread_local SharedDriverQueueData queue_t = SharedDriverQueueData(this, driver);
queue_t._aio_handle(t, ioc);
}
}
static void ioc_append_task(IOContext *ioc, Task *t)
{
Task *first, *last;
first = static_cast<Task*>(ioc->nvme_task_first);
last = static_cast<Task*>(ioc->nvme_task_last);
if (last)
last->next = t;
if (!first)
ioc->nvme_task_first = t;
ioc->nvme_task_last = t;
++ioc->num_pending;
}
static void write_split(
NVMEDevice *dev,
uint64_t off,
bufferlist &bl,
IOContext *ioc)
{
uint64_t remain_len = bl.length(), begin = 0, write_size;
Task *t;
// This value may need to be got from configuration later.
uint64_t split_size = 131072; // 128KB.
while (remain_len > 0) {
write_size = std::min(remain_len, split_size);
t = new Task(dev, IOCommand::WRITE_COMMAND, off + begin, write_size);
// TODO: if upper layer alloc memory with known physical address,
// we can reduce this copy
bl.splice(0, write_size, &t->bl);
remain_len -= write_size;
t->ctx = ioc;
ioc_append_task(ioc, t);
begin += write_size;
}
}
static void make_read_tasks(
NVMEDevice *dev,
uint64_t aligned_off,
IOContext *ioc, char *buf, uint64_t aligned_len, Task *primary,
uint64_t orig_off, uint64_t orig_len)
{
// This value may need to be got from configuration later.
uint64_t split_size = 131072; // 128KB.
uint64_t tmp_off = orig_off - aligned_off, remain_orig_len = orig_len;
auto begin = aligned_off;
const auto aligned_end = begin + aligned_len;
for (; begin < aligned_end; begin += split_size) {
auto read_size = std::min(aligned_end - begin, split_size);
auto tmp_len = std::min(remain_orig_len, read_size - tmp_off);
Task *t = nullptr;
if (primary && (aligned_len <= split_size)) {
t = primary;
} else {
t = new Task(dev, IOCommand::READ_COMMAND, begin, read_size, 0, primary);
}
t->ctx = ioc;
// TODO: if upper layer alloc memory with known physical address,
// we can reduce this copy
t->fill_cb = [buf, t, tmp_off, tmp_len] {
t->copy_to_buf(buf, tmp_off, tmp_len);
};
ioc_append_task(ioc, t);
remain_orig_len -= tmp_len;
buf += tmp_len;
tmp_off = 0;
}
}
int NVMEDevice::aio_write(
uint64_t off,
bufferlist &bl,
IOContext *ioc,
bool buffered,
int write_hint)
{
uint64_t len = bl.length();
dout(20) << __func__ << " " << off << "~" << len << " ioc " << ioc
<< " buffered " << buffered << dendl;
ceph_assert(is_valid_io(off, len));
write_split(this, off, bl, ioc);
dout(5) << __func__ << " " << off << "~" << len << dendl;
return 0;
}
int NVMEDevice::write(uint64_t off, bufferlist &bl, bool buffered, int write_hint)
{
uint64_t len = bl.length();
dout(20) << __func__ << " " << off << "~" << len << " buffered "
<< buffered << dendl;
ceph_assert(off % block_size == 0);
ceph_assert(len % block_size == 0);
ceph_assert(len > 0);
ceph_assert(off < size);
ceph_assert(off + len <= size);
IOContext ioc(cct, NULL);
write_split(this, off, bl, &ioc);
dout(5) << __func__ << " " << off << "~" << len << dendl;
aio_submit(&ioc);
ioc.aio_wait();
return 0;
}
int NVMEDevice::read(uint64_t off, uint64_t len, bufferlist *pbl,
IOContext *ioc,
bool buffered)
{
dout(5) << __func__ << " " << off << "~" << len << " ioc " << ioc << dendl;
ceph_assert(is_valid_io(off, len));
Task t(this, IOCommand::READ_COMMAND, off, len, 1);
bufferptr p = buffer::create_small_page_aligned(len);
char *buf = p.c_str();
// for sync read, need to control IOContext in itself
IOContext read_ioc(cct, nullptr);
make_read_tasks(this, off, &read_ioc, buf, len, &t, off, len);
dout(5) << __func__ << " " << off << "~" << len << dendl;
aio_submit(&read_ioc);
pbl->push_back(std::move(p));
return t.return_code;
}
int NVMEDevice::aio_read(
uint64_t off,
uint64_t len,
bufferlist *pbl,
IOContext *ioc)
{
dout(20) << __func__ << " " << off << "~" << len << " ioc " << ioc << dendl;
ceph_assert(is_valid_io(off, len));
bufferptr p = buffer::create_small_page_aligned(len);
pbl->append(p);
char* buf = p.c_str();
make_read_tasks(this, off, ioc, buf, len, NULL, off, len);
dout(5) << __func__ << " " << off << "~" << len << dendl;
return 0;
}
int NVMEDevice::read_random(uint64_t off, uint64_t len, char *buf, bool buffered)
{
ceph_assert(len > 0);
ceph_assert(off < size);
ceph_assert(off + len <= size);
uint64_t aligned_off = p2align(off, block_size);
uint64_t aligned_len = p2roundup(off+len, block_size) - aligned_off;
dout(5) << __func__ << " " << off << "~" << len
<< " aligned " << aligned_off << "~" << aligned_len << dendl;
IOContext ioc(g_ceph_context, nullptr);
Task t(this, IOCommand::READ_COMMAND, aligned_off, aligned_len, 1);
make_read_tasks(this, aligned_off, &ioc, buf, aligned_len, &t, off, len);
aio_submit(&ioc);
return t.return_code;
}
int NVMEDevice::invalidate_cache(uint64_t off, uint64_t len)
{
dout(5) << __func__ << " " << off << "~" << len << dendl;
return 0;
}
| 28,078 | 27.276939 | 143 |
cc
|
null |
ceph-main/src/blk/spdk/NVMEDevice.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 XSky <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BLK_NVMEDEVICE
#define CEPH_BLK_NVMEDEVICE
#include <queue>
#include <map>
#include <limits>
// since _Static_assert introduced in c11
#define _Static_assert static_assert
#include "include/interval_set.h"
#include "common/ceph_time.h"
#include "BlockDevice.h"
enum class IOCommand {
READ_COMMAND,
WRITE_COMMAND,
FLUSH_COMMAND
};
class SharedDriverData;
class SharedDriverQueueData;
class NVMEDevice : public BlockDevice {
/**
* points to pinned, physically contiguous memory region;
* contains 4KB IDENTIFY structure for controller which is
* target for CONTROLLER IDENTIFY command during initialization
*/
SharedDriverData *driver;
std::string name;
public:
SharedDriverData *get_driver() { return driver; }
NVMEDevice(CephContext* cct, aio_callback_t cb, void *cbpriv);
bool supported_bdev_label() override { return false; }
static bool support(const std::string& path);
void aio_submit(IOContext *ioc) override;
int read(uint64_t off, uint64_t len, bufferlist *pbl,
IOContext *ioc,
bool buffered) override;
int aio_read(
uint64_t off,
uint64_t len,
bufferlist *pbl,
IOContext *ioc) override;
int aio_write(uint64_t off, bufferlist& bl,
IOContext *ioc,
bool buffered,
int write_hint = WRITE_LIFE_NOT_SET) override;
int write(uint64_t off, bufferlist& bl, bool buffered, int write_hint = WRITE_LIFE_NOT_SET) override;
int flush() override;
int read_random(uint64_t off, uint64_t len, char *buf, bool buffered) override;
// for managing buffered readers/writers
int invalidate_cache(uint64_t off, uint64_t len) override;
int open(const std::string& path) override;
void close() override;
int collect_metadata(const std::string& prefix, std::map<std::string,std::string> *pm) const override;
};
#endif
| 2,323 | 26.341176 | 104 |
h
|
null |
ceph-main/src/blk/zoned/HMSMRDevice.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
* Copyright (C) 2020 Abutalib Aghayev
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "HMSMRDevice.h"
extern "C" {
#include <libzbd/zbd.h>
}
#include "common/debug.h"
#include "common/errno.h"
#define dout_context cct
#define dout_subsys ceph_subsys_bdev
#undef dout_prefix
#define dout_prefix *_dout << "smrbdev(" << this << " " << path << ") "
using namespace std;
HMSMRDevice::HMSMRDevice(CephContext* cct,
aio_callback_t cb,
void *cbpriv,
aio_callback_t d_cb,
void *d_cbpriv)
: KernelDevice(cct, cb, cbpriv, d_cb, d_cbpriv)
{
}
bool HMSMRDevice::support(const std::string& path)
{
return zbd_device_is_zoned(path.c_str()) == 1;
}
int HMSMRDevice::_post_open()
{
dout(10) << __func__ << dendl;
zbd_fd = zbd_open(path.c_str(), O_RDWR | O_DIRECT | O_LARGEFILE, nullptr);
int r;
if (zbd_fd < 0) {
r = errno;
derr << __func__ << " zbd_open failed on " << path << ": "
<< cpp_strerror(r) << dendl;
return -r;
}
unsigned int nr_zones = 0;
std::vector<zbd_zone> zones;
if (zbd_report_nr_zones(zbd_fd, 0, 0, ZBD_RO_NOT_WP, &nr_zones) != 0) {
r = -errno;
derr << __func__ << " zbd_report_nr_zones failed on " << path << ": "
<< cpp_strerror(r) << dendl;
goto fail;
}
zones.resize(nr_zones);
if (zbd_report_zones(zbd_fd, 0, 0, ZBD_RO_NOT_WP, zones.data(), &nr_zones) != 0) {
r = -errno;
derr << __func__ << " zbd_report_zones failed on " << path << dendl;
goto fail;
}
zone_size = zbd_zone_len(&zones[0]);
conventional_region_size = nr_zones * zone_size;
dout(10) << __func__ << " setting zone size to " << zone_size
<< " and conventional region size to " << conventional_region_size
<< dendl;
return 0;
fail:
zbd_close(zbd_fd);
zbd_fd = -1;
return r;
}
void HMSMRDevice::_pre_close()
{
if (zbd_fd >= 0) {
zbd_close(zbd_fd);
zbd_fd = -1;
}
}
void HMSMRDevice::reset_all_zones()
{
dout(10) << __func__ << dendl;
zbd_reset_zones(zbd_fd, conventional_region_size, 0);
}
void HMSMRDevice::reset_zone(uint64_t zone)
{
dout(10) << __func__ << " zone 0x" << std::hex << zone << std::dec << dendl;
if (zbd_reset_zones(zbd_fd, zone * zone_size, zone_size) != 0) {
derr << __func__ << " resetting zone failed for zone 0x" << std::hex
<< zone << std::dec << dendl;
ceph_abort("zbd_reset_zones failed");
}
}
std::vector<uint64_t> HMSMRDevice::get_zones()
{
std::vector<zbd_zone> zones;
unsigned int num_zones = size / zone_size;
zones.resize(num_zones);
int r = zbd_report_zones(zbd_fd, 0, 0, ZBD_RO_ALL, zones.data(), &num_zones);
if (r != 0) {
derr << __func__ << " zbd_report_zones failed on " << path << ": "
<< cpp_strerror(errno) << dendl;
ceph_abort("zbd_report_zones failed");
}
std::vector<uint64_t> wp(num_zones);
for (unsigned i = 0; i < num_zones; ++i) {
wp[i] = zones[i].wp;
}
return wp;
}
| 3,243 | 23.575758 | 84 |
cc
|
null |
ceph-main/src/blk/zoned/HMSMRDevice.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Red Hat
* Copyright (C) 2020 Abutalib Aghayev
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BLK_HMSMRDEVICE_H
#define CEPH_BLK_HMSMRDEVICE_H
#include <atomic>
#include "include/types.h"
#include "include/interval_set.h"
#include "common/Thread.h"
#include "include/utime.h"
#include "aio/aio.h"
#include "BlockDevice.h"
#include "../kernel/KernelDevice.h"
class HMSMRDevice final : public KernelDevice {
int zbd_fd = -1; ///< fd for the zoned block device
public:
HMSMRDevice(CephContext* cct, aio_callback_t cb, void *cbpriv,
aio_callback_t d_cb, void *d_cbpriv);
static bool support(const std::string& path);
// open/close hooks for libzbd
int _post_open() override;
void _pre_close() override;
// smr-specific methods
bool is_smr() const final { return true; }
void reset_all_zones() override;
void reset_zone(uint64_t zone) override;
std::vector<uint64_t> get_zones() override;
};
#endif //CEPH_BLK_HMSMRDEVICE_H
| 1,323 | 23.981132 | 70 |
h
|
null |
ceph-main/src/ceph-volume/setup.py
|
from setuptools import setup, find_packages
import os
setup(
name='ceph-volume',
version='1.0.0',
packages=find_packages(),
author='',
author_email='[email protected]',
description='Deploy Ceph OSDs using different device technologies like lvm or physical disks',
license='LGPLv2+',
keywords='ceph volume disk devices lvm',
url="https://github.com/ceph/ceph",
zip_safe = False,
install_requires='ceph',
dependency_links=[''.join(['file://', os.path.join(os.getcwd(), '../',
'python-common#egg=ceph-1.0.0')])],
tests_require=[
'pytest >=2.1.3',
'tox',
'ceph',
],
entry_points = dict(
console_scripts = [
'ceph-volume = ceph_volume.main:Volume',
'ceph-volume-systemd = ceph_volume.systemd:main',
],
),
classifiers = [
'Environment :: Console',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
| 1,387 | 31.27907 | 98 |
py
|
null |
ceph-main/src/ceph-volume/tox_install_command.sh
|
#!/usr/bin/env bash
python -m pip install --editable="file://`pwd`/../python-common"
python -m pip install $@
| 110 | 26.75 | 64 |
sh
|
null |
ceph-main/src/ceph-volume/ceph_volume/__init__.py
|
from collections import namedtuple
sys_info = namedtuple('sys_info', ['devices'])
sys_info.devices = dict()
class UnloadedConfig(object):
"""
This class is used as the default value for conf.ceph so that if
a configuration file is not successfully loaded then it will give
a nice error message when values from the config are used.
"""
def __getattr__(self, *a):
raise RuntimeError("No valid ceph configuration file was loaded.")
conf = namedtuple('config', ['ceph', 'cluster', 'verbosity', 'path', 'log_path'])
conf.ceph = UnloadedConfig()
__version__ = "1.0.0"
__release__ = "reef"
| 623 | 26.130435 | 81 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/configuration.py
|
import contextlib
import logging
import os
import re
from ceph_volume import terminal, conf
from ceph_volume import exceptions
from sys import version_info as sys_version_info
if sys_version_info.major >= 3:
import configparser
conf_parentclass = configparser.ConfigParser
elif sys_version_info.major < 3:
import ConfigParser as configparser
conf_parentclass = configparser.SafeConfigParser
else:
raise RuntimeError('Not expecting python version > 3 yet.')
logger = logging.getLogger(__name__)
class _TrimIndentFile(object):
"""
This is used to take a file-like object and removes any
leading tabs from each line when it's read. This is important
because some ceph configuration files include tabs which break
ConfigParser.
"""
def __init__(self, fp):
self.fp = fp
def readline(self):
line = self.fp.readline()
return line.lstrip(' \t')
def __iter__(self):
return iter(self.readline, '')
def load_ceph_conf_path(cluster_name='ceph'):
abspath = '/etc/ceph/%s.conf' % cluster_name
conf.path = os.getenv('CEPH_CONF', abspath)
conf.cluster = cluster_name
def load(abspath=None):
if abspath is None:
abspath = conf.path
if not os.path.exists(abspath):
raise exceptions.ConfigurationError(abspath=abspath)
parser = Conf()
try:
ceph_file = open(abspath)
trimmed_conf = _TrimIndentFile(ceph_file)
with contextlib.closing(ceph_file):
parser.read_conf(trimmed_conf)
conf.ceph = parser
return parser
except configparser.ParsingError as error:
logger.exception('Unable to parse INI-style file: %s' % abspath)
terminal.error(str(error))
raise RuntimeError('Unable to read configuration file: %s' % abspath)
class Conf(conf_parentclass):
"""
Subclasses from ConfigParser to give a few helpers for Ceph
configuration.
"""
def read_path(self, path):
self.path = path
return self.read(path)
def is_valid(self):
try:
self.get('global', 'fsid')
except (configparser.NoSectionError, configparser.NoOptionError):
raise exceptions.ConfigurationKeyError('global', 'fsid')
def optionxform(self, s):
s = s.replace('_', ' ')
s = '_'.join(s.split())
return s
def get_safe(self, section, key, default=None, check_valid=True):
"""
Attempt to get a configuration value from a certain section
in a ``cfg`` object but returning None if not found. Avoids the need
to be doing try/except {ConfigParser Exceptions} every time.
"""
if check_valid:
self.is_valid()
try:
return self.get(section, key)
except (configparser.NoSectionError, configparser.NoOptionError):
return default
def get_list(self, section, key, default=None, split=','):
"""
Assumes that the value for a given key is going to be a list separated
by commas. It gets rid of trailing comments. If just one item is
present it returns a list with a single item, if no key is found an
empty list is returned.
Optionally split on other characters besides ',' and return a fallback
value if no items are found.
"""
self.is_valid()
value = self.get_safe(section, key, [])
if value == []:
if default is not None:
return default
return value
# strip comments
value = re.split(r'\s+#', value)[0]
# split on commas
value = value.split(split)
# strip spaces
return [x.strip() for x in value]
# XXX Almost all of it lifted from the original ConfigParser._read method,
# except for the parsing of '#' in lines. This is only a problem in Python 2.7, and can be removed
# once tooling is Python3 only with `Conf(inline_comment_prefixes=('#',';'))`
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname].append(value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == 'DEFAULT':
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise configparser.MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
optname = self.optionxform(optname.rstrip())
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
# XXX Added support for '#' inline comments
if vi in ('=', ':') and (';' in optval or '#' in optval):
# strip comments
optval = re.split(r'\s+(;|#)', optval)[0]
# if what is left is comment as a value, fallback to an empty string
# that is: `foo = ;` would mean `foo` is '', which brings parity with
# what ceph-conf tool does
if optval in [';','#']:
optval = ''
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = configparser.ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
# join the multi-line values collected while reading
all_sections = [self._defaults]
all_sections.extend(self._sections.values())
for options in all_sections:
for name, val in options.items():
if isinstance(val, list):
options[name] = '\n'.join(val)
def read_conf(self, conffile):
if sys_version_info.major >= 3:
self.read_file(conffile)
elif sys_version_info.major < 3:
self.readfp(conffile)
else:
raise RuntimeError('Not expecting python version > 3 yet.')
| 8,831 | 36.905579 | 102 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/decorators.py
|
import os
import sys
from ceph_volume import terminal, exceptions
from functools import wraps
def needs_root(func):
"""
Check for super user privileges on functions/methods. Raise
``SuperUserError`` with a nice message.
"""
@wraps(func)
def is_root(*a, **kw):
if not os.getuid() == 0 and not os.environ.get('CEPH_VOLUME_SKIP_NEEDS_ROOT', False):
raise exceptions.SuperUserError()
return func(*a, **kw)
return is_root
def catches(catch=None, handler=None, exit=True):
"""
Very simple decorator that tries any of the exception(s) passed in as
a single exception class or tuple (containing multiple ones) returning the
exception message and optionally handling the problem if it rises with the
handler if it is provided.
So instead of douing something like this::
def bar():
try:
some_call()
print("Success!")
except TypeError, exc:
print("Error while handling some call: %s" % exc)
sys.exit(1)
You would need to decorate it like this to have the same effect::
@catches(TypeError)
def bar():
some_call()
print("Success!")
If multiple exceptions need to be caught they need to be provided as a
tuple::
@catches((TypeError, AttributeError))
def bar():
some_call()
print("Success!")
"""
catch = catch or Exception
def decorate(f):
@wraps(f)
def newfunc(*a, **kw):
try:
return f(*a, **kw)
except catch as e:
import logging
logger = logging.getLogger('ceph_volume')
logger.exception('exception caught by decorator')
if os.environ.get('CEPH_VOLUME_DEBUG'):
raise
if handler:
return handler(e)
else:
sys.stderr.write(make_exception_message(e))
if exit:
sys.exit(1)
return newfunc
return decorate
#
# Decorator helpers
#
def make_exception_message(exc):
"""
An exception is passed in and this function
returns the proper string depending on the result
so it is readable enough.
"""
if str(exc):
return '%s %s: %s\n' % (terminal.red_arrow, exc.__class__.__name__, exc)
else:
return '%s %s\n' % (terminal.red_arrow, exc.__class__.__name__)
| 2,535 | 26.868132 | 93 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/exceptions.py
|
import os
class ConfigurationError(Exception):
def __init__(self, cluster_name='ceph', path='/etc/ceph', abspath=None):
self.cluster_name = cluster_name
self.path = path
self.abspath = abspath or "%s.conf" % os.path.join(self.path, self.cluster_name)
def __str__(self):
return 'Unable to load expected Ceph config at: %s' % self.abspath
class ConfigurationSectionError(Exception):
def __init__(self, section):
self.section = section
def __str__(self):
return 'Unable to find expected configuration section: "%s"' % self.section
class ConfigurationKeyError(Exception):
def __init__(self, section, key):
self.section = section
self.key = key
def __str__(self):
return 'Unable to find expected configuration key: "%s" from section "%s"' % (
self.key,
self.section
)
class SuffixParsingError(Exception):
def __init__(self, suffix, part=None):
self.suffix = suffix
self.part = part
def __str__(self):
return 'Unable to parse the %s from systemd suffix: %s' % (self.part, self.suffix)
class SuperUserError(Exception):
def __str__(self):
return 'This command needs to be executed with sudo or as root'
class SizeAllocationError(Exception):
def __init__(self, requested, available):
self.requested = requested
self.available = available
def __str__(self):
msg = 'Unable to allocate size (%s), not enough free space (%s)' % (
self.requested, self.available
)
return msg
| 1,616 | 24.265625 | 90 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/log.py
|
import logging
import os
from ceph_volume import terminal
from ceph_volume import conf
BASE_FORMAT = "[%(name)s][%(levelname)-6s] %(message)s"
FILE_FORMAT = "[%(asctime)s]" + BASE_FORMAT
def setup(name='ceph-volume.log', log_path=None, log_level=None):
log_path = log_path or conf.log_path
# if a non-root user calls help or other no-sudo-required command the
# logger will fail to write to /var/lib/ceph/ so this /tmp/ path is used as
# a fallback
tmp_log_file = os.path.join('/tmp/', name)
root_logger = logging.getLogger()
# The default path is where all ceph log files are, and will get rotated by
# Ceph's logrotate rules.
log_level = log_level or "DEBUG"
log_level = getattr(logging, log_level.upper())
root_logger.setLevel(log_level)
try:
fh = logging.FileHandler(log_path)
except (OSError, IOError) as err:
terminal.warning("Falling back to /tmp/ for logging. Can't use %s" % log_path)
terminal.warning(str(err))
conf.log_path = tmp_log_file
fh = logging.FileHandler(tmp_log_file)
fh.setLevel(log_level)
fh.setFormatter(logging.Formatter(FILE_FORMAT))
root_logger.addHandler(fh)
def setup_console():
# TODO: At some point ceph-volume should stop using the custom logger
# interface that exists in terminal.py and use the logging module to
# produce output for the terminal
# Console Logger
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('[terminal] %(message)s'))
sh.setLevel(logging.DEBUG)
terminal_logger = logging.getLogger('terminal')
# allow all levels at root_logger, handlers control individual levels
terminal_logger.addHandler(sh)
| 1,718 | 33.38 | 86 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/main.py
|
from __future__ import print_function
import argparse
import os
import pkg_resources
import sys
import logging
from ceph_volume.decorators import catches
from ceph_volume import log, devices, configuration, conf, exceptions, terminal, inventory, drive_group, activate
class Volume(object):
_help = """
ceph-volume: Deploy Ceph OSDs using different device technologies like lvm or
physical disks.
Log Path: {log_path}
Ceph Conf: {ceph_path}
{sub_help}
{plugins}
{environ_vars}
{warning}
"""
def __init__(self, argv=None, parse=True):
self.mapper = {
'lvm': devices.lvm.LVM,
'simple': devices.simple.Simple,
'raw': devices.raw.Raw,
'inventory': inventory.Inventory,
'activate': activate.Activate,
'drive-group': drive_group.Deploy,
}
self.plugin_help = "No plugins found/loaded"
if argv is None:
self.argv = sys.argv
else:
self.argv = argv
if parse:
self.main(self.argv)
def help(self, warning=False):
warning = 'See "ceph-volume --help" for full list of options.' if warning else ''
return self._help.format(
warning=warning,
log_path=conf.log_path,
ceph_path=self.stat_ceph_conf(),
plugins=self.plugin_help,
sub_help=terminal.subhelp(self.mapper),
environ_vars=self.get_environ_vars()
)
def get_environ_vars(self):
environ_vars = []
for key, value in os.environ.items():
if key.startswith('CEPH_'):
environ_vars.append("%s=%s" % (key, value))
if not environ_vars:
return ''
else:
environ_vars.insert(0, '\nEnviron Variables:')
return '\n'.join(environ_vars)
def enable_plugins(self):
"""
Load all plugins available, add them to the mapper and extend the help
string with the information from each one
"""
plugins = _load_library_extensions()
for plugin in plugins:
self.mapper[plugin._ceph_volume_name_] = plugin
self.plugin_help = '\n'.join(['%-19s %s\n' % (
plugin.name, getattr(plugin, 'help_menu', ''))
for plugin in plugins])
if self.plugin_help:
self.plugin_help = '\nPlugins:\n' + self.plugin_help
def load_log_path(self):
conf.log_path = os.getenv('CEPH_VOLUME_LOG_PATH', '/var/log/ceph')
def stat_ceph_conf(self):
try:
configuration.load(conf.path)
return terminal.green(conf.path)
except exceptions.ConfigurationError as error:
return terminal.red(error)
def _get_split_args(self):
subcommands = self.mapper.keys()
slice_on_index = len(self.argv) + 1
pruned_args = self.argv[1:]
for count, arg in enumerate(pruned_args):
if arg in subcommands:
slice_on_index = count
break
return pruned_args[:slice_on_index], pruned_args[slice_on_index:]
@catches()
def main(self, argv):
# these need to be available for the help, which gets parsed super
# early
configuration.load_ceph_conf_path()
self.load_log_path()
self.enable_plugins()
main_args, subcommand_args = self._get_split_args()
# no flags where passed in, return the help menu instead of waiting for
# argparse which will end up complaning that there are no args
if len(argv) <= 1:
print(self.help(warning=True))
raise SystemExit(0)
parser = argparse.ArgumentParser(
prog='ceph-volume',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.help(),
)
parser.add_argument(
'--cluster',
default='ceph',
help='Cluster name (defaults to "ceph")',
)
parser.add_argument(
'--log-level',
default='debug',
choices=['debug', 'info', 'warning', 'error', 'critical'],
help='Change the file log level (defaults to debug)',
)
parser.add_argument(
'--log-path',
default='/var/log/ceph/',
help='Change the log path (defaults to /var/log/ceph)',
)
args = parser.parse_args(main_args)
conf.log_path = args.log_path
if os.path.isdir(conf.log_path):
conf.log_path = os.path.join(args.log_path, 'ceph-volume.log')
log.setup(log_level=args.log_level)
log.setup_console()
logger = logging.getLogger(__name__)
logger.info("Running command: ceph-volume %s %s", " ".join(main_args), " ".join(subcommand_args))
# set all variables from args and load everything needed according to
# them
configuration.load_ceph_conf_path(cluster_name=args.cluster)
try:
conf.ceph = configuration.load(conf.path)
except exceptions.ConfigurationError as error:
# we warn only here, because it is possible that the configuration
# file is not needed, or that it will be loaded by some other means
# (like reading from lvm tags)
logger.warning('ignoring inability to load ceph.conf', exc_info=1)
terminal.yellow(error)
# dispatch to sub-commands
terminal.dispatch(self.mapper, subcommand_args)
def _load_library_extensions():
"""
Locate all setuptools entry points by the name 'ceph_volume_handlers'
and initialize them.
Any third-party library may register an entry point by adding the
following to their setup.py::
entry_points = {
'ceph_volume_handlers': [
'plugin_name = mylib.mymodule:Handler_Class',
],
},
`plugin_name` will be used to load it as a sub command.
"""
logger = logging.getLogger('ceph_volume.plugins')
group = 'ceph_volume_handlers'
entry_points = pkg_resources.iter_entry_points(group=group)
plugins = []
for ep in entry_points:
try:
logger.debug('loading %s' % ep.name)
plugin = ep.load()
plugin._ceph_volume_name_ = ep.name
plugins.append(plugin)
except Exception as error:
logger.exception("Error initializing plugin %s: %s" % (ep, error))
return plugins
| 6,471 | 34.173913 | 113 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/process.py
|
from fcntl import fcntl, F_GETFL, F_SETFL
from os import O_NONBLOCK, read, path
import subprocess
from select import select
from ceph_volume import terminal
from ceph_volume.util import as_bytes
from ceph_volume.util.system import which, run_host_cmd, host_rootfs
import logging
logger = logging.getLogger(__name__)
def log_output(descriptor, message, terminal_logging, logfile_logging):
"""
log output to both the logger and the terminal if terminal_logging is
enabled
"""
if not message:
return
message = message.strip()
line = '%s %s' % (descriptor, message)
if terminal_logging:
getattr(terminal, descriptor)(message)
if logfile_logging:
logger.info(line)
def log_descriptors(reads, process, terminal_logging):
"""
Helper to send output to the terminal while polling the subprocess
"""
# these fcntl are set to O_NONBLOCK for the filedescriptors coming from
# subprocess so that the logging does not block. Without these a prompt in
# a subprocess output would hang and nothing would get printed. Note how
# these are just set when logging subprocess, not globally.
stdout_flags = fcntl(process.stdout, F_GETFL) # get current p.stdout flags
stderr_flags = fcntl(process.stderr, F_GETFL) # get current p.stderr flags
fcntl(process.stdout, F_SETFL, stdout_flags | O_NONBLOCK)
fcntl(process.stderr, F_SETFL, stderr_flags | O_NONBLOCK)
descriptor_names = {
process.stdout.fileno(): 'stdout',
process.stderr.fileno(): 'stderr'
}
for descriptor in reads:
descriptor_name = descriptor_names[descriptor]
try:
message = read(descriptor, 1024)
if not isinstance(message, str):
message = message.decode('utf-8')
log_output(descriptor_name, message, terminal_logging, True)
except (IOError, OSError):
# nothing else to log
pass
def obfuscate(command_, on=None):
"""
Certain commands that are useful to log might contain information that
should be replaced by '*' like when creating OSDs and the keyrings are
being passed, which should not be logged.
:param on: A string (will match a flag) or an integer (will match an index)
If matching on a flag (when ``on`` is a string) it will obfuscate on the
value for that flag. That is a command like ['ls', '-l', '/'] that calls
`obfuscate(command, on='-l')` will obfustace '/' which is the value for
`-l`.
The reason for `on` to allow either a string or an integer, altering
behavior for both is because it is easier for ``run`` and ``call`` to just
pop a value to obfuscate (vs. allowing an index or a flag)
"""
command = command_[:]
msg = "Running command: %s" % ' '.join(command)
if on in [None, False]:
return msg
if isinstance(on, int):
index = on
else:
try:
index = command.index(on) + 1
except ValueError:
# if the flag just doesn't exist then it doesn't matter just return
# the base msg
return msg
try:
command[index] = '*' * len(command[index])
except IndexError: # the index was completely out of range
return msg
return "Running command: %s" % ' '.join(command)
def run(command, run_on_host=False, **kw):
"""
A real-time-logging implementation of a remote subprocess.Popen call where
a command is just executed on the remote end and no other handling is done.
:param command: The command to pass in to the remote subprocess.Popen as a list
:param stop_on_error: If a nonzero exit status is return, it raises a ``RuntimeError``
:param fail_msg: If a nonzero exit status is returned this message will be included in the log
"""
executable = which(command.pop(0), run_on_host)
command.insert(0, executable)
if run_on_host and path.isdir(host_rootfs):
command = run_host_cmd + command
stop_on_error = kw.pop('stop_on_error', True)
command_msg = obfuscate(command, kw.pop('obfuscate', None))
fail_msg = kw.pop('fail_msg', None)
logger.info(command_msg)
terminal.write(command_msg)
terminal_logging = kw.pop('terminal_logging', True)
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
**kw
)
while True:
reads, _, _ = select(
[process.stdout.fileno(), process.stderr.fileno()],
[], []
)
log_descriptors(reads, process, terminal_logging)
if process.poll() is not None:
# ensure we do not have anything pending in stdout or stderr
log_descriptors(reads, process, terminal_logging)
break
returncode = process.wait()
if returncode != 0:
msg = "command returned non-zero exit status: %s" % returncode
if fail_msg:
logger.warning(fail_msg)
if terminal_logging:
terminal.warning(fail_msg)
if stop_on_error:
raise RuntimeError(msg)
else:
if terminal_logging:
terminal.warning(msg)
logger.warning(msg)
def call(command, run_on_host=False, **kw):
"""
Similar to ``subprocess.Popen`` with the following changes:
* returns stdout, stderr, and exit code (vs. just the exit code)
* logs the full contents of stderr and stdout (separately) to the file log
By default, no terminal output is given, not even the command that is going
to run.
Useful when system calls are needed to act on output, and that same output
shouldn't get displayed on the terminal.
Optionally, the command can be displayed on the terminal and the log file,
and log file output can be turned off. This is useful to prevent sensitive
output going to stderr/stdout and being captured on a log file.
:param terminal_verbose: Log command output to terminal, defaults to False, and
it is forcefully set to True if a return code is non-zero
:param logfile_verbose: Log stderr/stdout output to log file. Defaults to True
:param verbose_on_failure: On a non-zero exit status, it will forcefully set logging ON for
the terminal. Defaults to True
"""
executable = which(command.pop(0), run_on_host)
command.insert(0, executable)
if run_on_host and path.isdir(host_rootfs):
command = run_host_cmd + command
terminal_verbose = kw.pop('terminal_verbose', False)
logfile_verbose = kw.pop('logfile_verbose', True)
verbose_on_failure = kw.pop('verbose_on_failure', True)
show_command = kw.pop('show_command', False)
command_msg = "Running command: %s" % ' '.join(command)
stdin = kw.pop('stdin', None)
logger.info(command_msg)
if show_command:
terminal.write(command_msg)
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
close_fds=True,
**kw
)
if stdin:
stdout_stream, stderr_stream = process.communicate(as_bytes(stdin))
else:
stdout_stream = process.stdout.read()
stderr_stream = process.stderr.read()
returncode = process.wait()
if not isinstance(stdout_stream, str):
stdout_stream = stdout_stream.decode('utf-8')
if not isinstance(stderr_stream, str):
stderr_stream = stderr_stream.decode('utf-8')
stdout = stdout_stream.splitlines()
stderr = stderr_stream.splitlines()
if returncode != 0:
# set to true so that we can log the stderr/stdout that callers would
# do anyway as long as verbose_on_failure is set (defaults to True)
if verbose_on_failure:
terminal_verbose = True
# logfiles aren't disruptive visually, unlike the terminal, so this
# should always be on when there is a failure
logfile_verbose = True
# the following can get a messed up order in the log if the system call
# returns output with both stderr and stdout intermingled. This separates
# that.
for line in stdout:
log_output('stdout', line, terminal_verbose, logfile_verbose)
for line in stderr:
log_output('stderr', line, terminal_verbose, logfile_verbose)
return stdout, stderr, returncode
| 8,463 | 35.8 | 98 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/terminal.py
|
import logging
import sys
terminal_logger = logging.getLogger('terminal')
class colorize(str):
"""
Pretty simple to use::
colorize.make('foo').bold
colorize.make('foo').green
colorize.make('foo').yellow
colorize.make('foo').red
colorize.make('foo').blue
Otherwise you could go the long way (for example if you are
testing this class)::
string = colorize('foo')
string._set_attributes()
string.red
"""
def __init__(self, string):
self.appends = ''
self.prepends = ''
self.isatty = sys.__stderr__.isatty()
def _set_attributes(self):
"""
Sets the attributes here because the str class does not
allow to pass in anything other than a string to the constructor
so we can't really mess with the other attributes.
"""
for k, v in self.__colors__.items():
setattr(self, k, self.make_color(v))
def make_color(self, color):
if not self.isatty:
return self
return color + self + '\033[0m' + self.appends
@property
def __colors__(self):
return dict(
blue='\033[34m',
green='\033[92m',
yellow='\033[33m',
red='\033[91m',
bold='\033[1m',
ends='\033[0m'
)
@classmethod
def make(cls, string):
"""
A helper method to return itself and workaround the fact that
the str object doesn't allow extra arguments passed in to the
constructor
"""
obj = cls(string)
obj._set_attributes()
return obj
#
# Common string manipulations
#
yellow = lambda x: colorize.make(x).yellow # noqa
blue = lambda x: colorize.make(x).blue # noqa
green = lambda x: colorize.make(x).green # noqa
red = lambda x: colorize.make(x).red # noqa
bold = lambda x: colorize.make(x).bold # noqa
red_arrow = red('--> ')
blue_arrow = blue('--> ')
green_arrow = green('--> ')
yellow_arrow = yellow('--> ')
class _Write(object):
def __init__(self, _writer=None, prefix='', suffix='', flush=False):
# we can't set sys.stderr as the default for _writer. otherwise
# pytest's capturing gets confused
self._writer = _writer or sys.stderr
self.suffix = suffix
self.prefix = prefix
self.flush = flush
def bold(self, string):
self.write(bold(string))
def raw(self, string):
if not string.endswith('\n'):
string = '%s\n' % string
self.write(string)
def write(self, line):
entry = self.prefix + line + self.suffix
try:
self._writer.write(entry)
if self.flush:
self._writer.flush()
except (UnicodeDecodeError, UnicodeEncodeError):
try:
terminal_logger.info(entry.strip('\n'))
except (AttributeError, TypeError):
terminal_logger.info(entry)
def stdout(msg):
return _Write(prefix=blue(' stdout: ')).raw(msg)
def stderr(msg):
return _Write(prefix=yellow(' stderr: ')).raw(msg)
def write(msg):
return _Write().raw(msg)
def error(msg):
return _Write(prefix=red_arrow).raw(msg)
def info(msg):
return _Write(prefix=blue_arrow).raw(msg)
def debug(msg):
return _Write(prefix=blue_arrow).raw(msg)
def warning(msg):
return _Write(prefix=yellow_arrow).raw(msg)
def success(msg):
return _Write(prefix=green_arrow).raw(msg)
class MultiLogger(object):
"""
Proxy class to be able to report on both logger instances and terminal
messages avoiding the issue of having to call them both separately
Initialize it in the same way a logger object::
logger = terminal.MultiLogger(__name__)
"""
def __init__(self, name):
self.logger = logging.getLogger(name)
def _make_record(self, msg, *args):
if len(str(args)):
try:
return msg % args
except TypeError:
self.logger.exception('unable to produce log record: %s' % msg)
return msg
def warning(self, msg, *args):
record = self._make_record(msg, *args)
warning(record)
self.logger.warning(record)
def debug(self, msg, *args):
record = self._make_record(msg, *args)
debug(record)
self.logger.debug(record)
def info(self, msg, *args):
record = self._make_record(msg, *args)
info(record)
self.logger.info(record)
def error(self, msg, *args):
record = self._make_record(msg, *args)
error(record)
self.logger.error(record)
def dispatch(mapper, argv=None):
argv = argv or sys.argv
for count, arg in enumerate(argv, 1):
if arg in mapper.keys():
instance = mapper.get(arg)(argv[count:])
if hasattr(instance, 'main'):
instance.main()
raise SystemExit(0)
def subhelp(mapper):
"""
Look at every value of every key in the mapper and will output any
``class.help`` possible to return it as a string that will be sent to
stderr.
"""
help_text_lines = []
for key, value in mapper.items():
try:
help_text = value.help
except AttributeError:
continue
help_text_lines.append("%-24s %s" % (key, help_text))
if help_text_lines:
return "Available subcommands:\n\n%s" % '\n'.join(help_text_lines)
return ''
| 5,516 | 24.660465 | 79 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/activate/__init__.py
|
from .main import Activate # noqa
| 34 | 16.5 | 33 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/activate/main.py
|
# -*- coding: utf-8 -*-
import argparse
from ceph_volume import terminal
from ceph_volume.devices.lvm.activate import Activate as LVMActivate
from ceph_volume.devices.raw.activate import Activate as RAWActivate
from ceph_volume.devices.simple.activate import Activate as SimpleActivate
class Activate(object):
help = "Activate an OSD"
def __init__(self, argv):
self.argv = argv
def main(self):
parser = argparse.ArgumentParser(
prog='ceph-volume activate',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.help,
)
parser.add_argument(
'--osd-id',
help='OSD ID to activate'
)
parser.add_argument(
'--osd-uuid',
help='OSD UUID to activate'
)
parser.add_argument(
'--no-systemd',
dest='no_systemd',
action='store_true',
help='Skip creating and enabling systemd units and starting OSD services'
)
parser.add_argument(
'--no-tmpfs',
action='store_true',
help='Do not use a tmpfs mount for OSD data dir'
)
self.args = parser.parse_args(self.argv)
# first try raw
try:
RAWActivate([]).activate(
devs=None,
start_osd_id=self.args.osd_id,
start_osd_uuid=self.args.osd_uuid,
tmpfs=not self.args.no_tmpfs,
systemd=not self.args.no_systemd,
)
return
except Exception as e:
terminal.info(f'Failed to activate via raw: {e}')
# then try lvm
try:
LVMActivate([]).activate(
argparse.Namespace(
osd_id=self.args.osd_id,
osd_fsid=self.args.osd_uuid,
no_tmpfs=self.args.no_tmpfs,
no_systemd=self.args.no_systemd,
)
)
return
except Exception as e:
terminal.info(f'Failed to activate via LVM: {e}')
# then try simple
try:
SimpleActivate([]).activate(
argparse.Namespace(
osd_id=self.args.osd_id,
osd_fsid=self.args.osd_uuid,
no_systemd=self.args.no_systemd,
)
)
return
except Exception as e:
terminal.info(f'Failed to activate via simple: {e}')
terminal.error('Failed to activate any OSD(s)')
| 2,588 | 29.104651 | 85 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/api/__init__.py
|
"""
Device API that can be shared among other implementations.
"""
| 67 | 16 | 58 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/api/lvm.py
|
"""
API for CRUD lvm tag operations. Follows the Ceph LVM tag naming convention
that prefixes tags with ``ceph.`` and uses ``=`` for assignment, and provides
set of utilities for interacting with LVM.
"""
import logging
import os
import uuid
from itertools import repeat
from math import floor
from ceph_volume import process, util, conf
from ceph_volume.exceptions import SizeAllocationError
logger = logging.getLogger(__name__)
def convert_filters_to_str(filters):
"""
Convert filter args from dictionary to following format -
filters={filter_name=filter_val,...}
"""
if not filters:
return filters
filter_arg = ''
for k, v in filters.items():
filter_arg += k + '=' + v + ','
# get rid of extra comma at the end
filter_arg = filter_arg[:len(filter_arg) - 1]
return filter_arg
def convert_tags_to_str(tags):
"""
Convert tags from dictionary to following format -
tags={tag_name=tag_val,...}
"""
if not tags:
return tags
tag_arg = 'tags={'
for k, v in tags.items():
tag_arg += k + '=' + v + ','
# get rid of extra comma at the end
tag_arg = tag_arg[:len(tag_arg) - 1] + '}'
return tag_arg
def make_filters_lvmcmd_ready(filters, tags):
"""
Convert filters (including tags) from dictionary to following format -
filter_name=filter_val...,tags={tag_name=tag_val,...}
The command will look as follows =
lvs -S filter_name=filter_val...,tags={tag_name=tag_val,...}
"""
filters = convert_filters_to_str(filters)
tags = convert_tags_to_str(tags)
if filters and tags:
return filters + ',' + tags
if filters and not tags:
return filters
if not filters and tags:
return tags
else:
return ''
def _output_parser(output, fields):
"""
Newer versions of LVM allow ``--reportformat=json``, but older versions,
like the one included in Xenial do not. LVM has the ability to filter and
format its output so we assume the output will be in a format this parser
can handle (using ';' as a delimiter)
:param fields: A string, possibly using ',' to group many items, as it
would be used on the CLI
:param output: The CLI output from the LVM call
"""
field_items = fields.split(',')
report = []
for line in output:
# clear the leading/trailing whitespace
line = line.strip()
# remove the extra '"' in each field
line = line.replace('"', '')
# prevent moving forward with empty contents
if not line:
continue
# splitting on ';' because that is what the lvm call uses as
# '--separator'
output_items = [i.strip() for i in line.split(';')]
# map the output to the fields
report.append(
dict(zip(field_items, output_items))
)
return report
def _splitname_parser(line):
"""
Parses the output from ``dmsetup splitname``, that should contain prefixes
(--nameprefixes) and set the separator to ";"
Output for /dev/mapper/vg-lv will usually look like::
DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''
The ``VG_NAME`` will usually not be what other callers need (e.g. just 'vg'
in the example), so this utility will split ``/dev/mapper/`` out, so that
the actual volume group name is kept
:returns: dictionary with stripped prefixes
"""
parsed = {}
try:
parts = line[0].split(';')
except IndexError:
logger.exception('Unable to parse mapper device: %s', line)
return parsed
for part in parts:
part = part.replace("'", '')
key, value = part.split('=')
if 'DM_VG_NAME' in key:
value = value.split('/dev/mapper/')[-1]
key = key.split('DM_')[-1]
parsed[key] = value
return parsed
def sizing(device_size, parts=None, size=None):
"""
Calculate proper sizing to fully utilize the volume group in the most
efficient way possible. To prevent situations where LVM might accept
a percentage that is beyond the vg's capabilities, it will refuse with
an error when requesting a larger-than-possible parameter, in addition
to rounding down calculations.
A dictionary with different sizing parameters is returned, to make it
easier for others to choose what they need in order to create logical
volumes::
>>> sizing(100, parts=2)
>>> {'parts': 2, 'percentages': 50, 'sizes': 50}
"""
if parts is not None and size is not None:
raise ValueError(
"Cannot process sizing with both parts (%s) and size (%s)" % (parts, size)
)
if size and size > device_size:
raise SizeAllocationError(size, device_size)
def get_percentage(parts):
return int(floor(100 / float(parts)))
if parts is not None:
# Prevent parts being 0, falling back to 1 (100% usage)
parts = parts or 1
percentages = get_percentage(parts)
if size:
parts = int(device_size / size) or 1
percentages = get_percentage(parts)
sizes = device_size / parts if parts else int(floor(device_size))
return {
'parts': parts,
'percentages': percentages,
'sizes': int(sizes/1024/1024/1024),
}
def parse_tags(lv_tags):
"""
Return a dictionary mapping of all the tags associated with
a Volume from the comma-separated tags coming from the LVM API
Input look like::
"ceph.osd_fsid=aaa-fff-bbbb,ceph.osd_id=0"
For the above example, the expected return value would be::
{
"ceph.osd_fsid": "aaa-fff-bbbb",
"ceph.osd_id": "0"
}
"""
if not lv_tags:
return {}
tag_mapping = {}
tags = lv_tags.split(',')
for tag_assignment in tags:
if not tag_assignment.startswith('ceph.'):
continue
key, value = tag_assignment.split('=', 1)
tag_mapping[key] = value
return tag_mapping
def _vdo_parents(devices):
"""
It is possible we didn't get a logical volume, or a mapper path, but
a device like /dev/sda2, to resolve this, we must look at all the slaves of
every single device in /sys/block and if any of those devices is related to
VDO devices, then we can add the parent
"""
parent_devices = []
for parent in os.listdir('/sys/block'):
for slave in os.listdir('/sys/block/%s/slaves' % parent):
if slave in devices:
parent_devices.append('/dev/%s' % parent)
parent_devices.append(parent)
return parent_devices
def _vdo_slaves(vdo_names):
"""
find all the slaves associated with each vdo name (from realpath) by going
into /sys/block/<realpath>/slaves
"""
devices = []
for vdo_name in vdo_names:
mapper_path = '/dev/mapper/%s' % vdo_name
if not os.path.exists(mapper_path):
continue
# resolve the realpath and realname of the vdo mapper
vdo_realpath = os.path.realpath(mapper_path)
vdo_realname = vdo_realpath.split('/')[-1]
slaves_path = '/sys/block/%s/slaves' % vdo_realname
if not os.path.exists(slaves_path):
continue
devices.append(vdo_realpath)
devices.append(mapper_path)
devices.append(vdo_realname)
for slave in os.listdir(slaves_path):
devices.append('/dev/%s' % slave)
devices.append(slave)
return devices
def _is_vdo(path):
"""
A VDO device can be composed from many different devices, go through each
one of those devices and its slaves (if any) and correlate them back to
/dev/mapper and their realpaths, and then check if they appear as part of
/sys/kvdo/<name>/statistics
From the realpath of a logical volume, determine if it is a VDO device or
not, by correlating it to the presence of the name in
/sys/kvdo/<name>/statistics and all the previously captured devices
"""
if not os.path.isdir('/sys/kvdo'):
return False
realpath = os.path.realpath(path)
realpath_name = realpath.split('/')[-1]
devices = []
vdo_names = set()
# get all the vdo names
for dirname in os.listdir('/sys/kvdo/'):
if os.path.isdir('/sys/kvdo/%s/statistics' % dirname):
vdo_names.add(dirname)
# find all the slaves associated with each vdo name (from realpath) by
# going into /sys/block/<realpath>/slaves
devices.extend(_vdo_slaves(vdo_names))
# Find all possible parents, looking into slaves that are related to VDO
devices.extend(_vdo_parents(devices))
return any([
path in devices,
realpath in devices,
realpath_name in devices])
def is_vdo(path):
"""
Detect if a path is backed by VDO, proxying the actual call to _is_vdo so
that we can prevent an exception breaking OSD creation. If an exception is
raised, it will get captured and logged to file, while returning
a ``False``.
"""
try:
if _is_vdo(path):
return '1'
return '0'
except Exception:
logger.exception('Unable to properly detect device as VDO: %s', path)
return '0'
def dmsetup_splitname(dev):
"""
Run ``dmsetup splitname`` and parse the results.
.. warning:: This call does not ensure that the device is correct or that
it exists. ``dmsetup`` will happily take a non existing path and still
return a 0 exit status.
"""
command = [
'dmsetup', 'splitname', '--noheadings',
"--separator=';'", '--nameprefixes', dev
]
out, err, rc = process.call(command)
return _splitname_parser(out)
def is_ceph_device(lv):
try:
lv.tags['ceph.osd_id']
except (KeyError, AttributeError):
logger.warning('device is not part of ceph: %s', lv)
return False
if lv.tags['ceph.osd_id'] == 'null':
return False
else:
return True
####################################
#
# Code for LVM Physical Volumes
#
################################
PV_FIELDS = 'pv_name,pv_tags,pv_uuid,vg_name,lv_uuid'
class PVolume(object):
"""
Represents a Physical Volume from LVM, with some top-level attributes like
``pv_name`` and parsed tags as a dictionary of key/value pairs.
"""
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
self.pv_api = kw
self.name = kw['pv_name']
self.tags = parse_tags(kw['pv_tags'])
def __str__(self):
return '<%s>' % self.pv_api['pv_name']
def __repr__(self):
return self.__str__()
def set_tags(self, tags):
"""
:param tags: A dictionary of tag names and values, like::
{
"ceph.osd_fsid": "aaa-fff-bbbb",
"ceph.osd_id": "0"
}
At the end of all modifications, the tags are refreshed to reflect
LVM's most current view.
"""
for k, v in tags.items():
self.set_tag(k, v)
# after setting all the tags, refresh them for the current object, use the
# pv_* identifiers to filter because those shouldn't change
pv_object = self.get_single_pv(filter={'pv_name': self.pv_name,
'pv_uuid': self.pv_uuid})
if not pv_object:
raise RuntimeError('No PV was found.')
self.tags = pv_object.tags
def set_tag(self, key, value):
"""
Set the key/value pair as an LVM tag. Does not "refresh" the values of
the current object for its tags. Meant to be a "fire and forget" type
of modification.
**warning**: Altering tags on a PV has to be done ensuring that the
device is actually the one intended. ``pv_name`` is *not* a persistent
value, only ``pv_uuid`` is. Using ``pv_uuid`` is the best way to make
sure the device getting changed is the one needed.
"""
# remove it first if it exists
if self.tags.get(key):
current_value = self.tags[key]
tag = "%s=%s" % (key, current_value)
process.call(['pvchange', '--deltag', tag, self.pv_name], run_on_host=True)
process.call(
[
'pvchange',
'--addtag', '%s=%s' % (key, value), self.pv_name
],
run_on_host=True
)
def create_pv(device):
"""
Create a physical volume from a device, useful when devices need to be later mapped
to journals.
"""
process.run([
'pvcreate',
'-v', # verbose
'-f', # force it
'--yes', # answer yes to any prompts
device
], run_on_host=True)
def remove_pv(pv_name):
"""
Removes a physical volume using a double `-f` to prevent prompts and fully
remove anything related to LVM. This is tremendously destructive, but so is all other actions
when zapping a device.
In the case where multiple PVs are found, it will ignore that fact and
continue with the removal, specifically in the case of messages like::
WARNING: PV $UUID /dev/DEV-1 was already found on /dev/DEV-2
These situations can be avoided with custom filtering rules, which this API
cannot handle while accommodating custom user filters.
"""
fail_msg = "Unable to remove vg %s" % pv_name
process.run(
[
'pvremove',
'-v', # verbose
'-f', # force it
'-f', # force it
pv_name
],
run_on_host=True,
fail_msg=fail_msg,
)
def get_pvs(fields=PV_FIELDS, filters='', tags=None):
"""
Return a list of PVs that are available on the system and match the
filters and tags passed. Argument filters takes a dictionary containing
arguments required by -S option of LVM. Passing a list of LVM tags can be
quite tricky to pass as a dictionary within dictionary, therefore pass
dictionary of tags via tags argument and tricky part will be taken care of
by the helper methods.
:param fields: string containing list of fields to be displayed by the
pvs command
:param sep: string containing separator to be used between two fields
:param filters: dictionary containing LVM filters
:param tags: dictionary containng LVM tags
:returns: list of class PVolume object representing pvs on the system
"""
filters = make_filters_lvmcmd_ready(filters, tags)
args = ['pvs', '--noheadings', '--readonly', '--separator=";"', '-S',
filters, '-o', fields]
stdout, stderr, returncode = process.call(args, run_on_host=True, verbose_on_failure=False)
pvs_report = _output_parser(stdout, fields)
return [PVolume(**pv_report) for pv_report in pvs_report]
def get_single_pv(fields=PV_FIELDS, filters=None, tags=None):
"""
Wrapper of get_pvs() meant to be a convenience method to avoid the phrase::
pvs = get_pvs()
if len(pvs) >= 1:
pv = pvs[0]
"""
pvs = get_pvs(fields=fields, filters=filters, tags=tags)
if len(pvs) == 0:
return None
if len(pvs) > 1:
raise RuntimeError('Filters {} matched more than 1 PV present on this host.'.format(str(filters)))
return pvs[0]
################################
#
# Code for LVM Volume Groups
#
#############################
VG_FIELDS = 'vg_name,pv_count,lv_count,vg_attr,vg_extent_count,vg_free_count,vg_extent_size'
VG_CMD_OPTIONS = ['--noheadings', '--readonly', '--units=b', '--nosuffix', '--separator=";"']
class VolumeGroup(object):
"""
Represents an LVM group, with some top-level attributes like ``vg_name``
"""
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
self.name = kw['vg_name']
if not self.name:
raise ValueError('VolumeGroup must have a non-empty name')
self.tags = parse_tags(kw.get('vg_tags', ''))
def __str__(self):
return '<%s>' % self.name
def __repr__(self):
return self.__str__()
@property
def free(self):
"""
Return free space in VG in bytes
"""
return int(self.vg_extent_size) * int(self.vg_free_count)
@property
def free_percent(self):
"""
Return free space in VG in bytes
"""
return int(self.vg_free_count) / int(self.vg_extent_count)
@property
def size(self):
"""
Returns VG size in bytes
"""
return int(self.vg_extent_size) * int(self.vg_extent_count)
def sizing(self, parts=None, size=None):
"""
Calculate proper sizing to fully utilize the volume group in the most
efficient way possible. To prevent situations where LVM might accept
a percentage that is beyond the vg's capabilities, it will refuse with
an error when requesting a larger-than-possible parameter, in addition
to rounding down calculations.
A dictionary with different sizing parameters is returned, to make it
easier for others to choose what they need in order to create logical
volumes::
>>> data_vg.free
1024
>>> data_vg.sizing(parts=4)
{'parts': 4, 'sizes': 256, 'percentages': 25}
>>> data_vg.sizing(size=512)
{'parts': 2, 'sizes': 512, 'percentages': 50}
:param parts: Number of parts to create LVs from
:param size: Size in gigabytes to divide the VG into
:raises SizeAllocationError: When requested size cannot be allocated with
:raises ValueError: If both ``parts`` and ``size`` are given
"""
if parts is not None and size is not None:
raise ValueError(
"Cannot process sizing with both parts (%s) and size (%s)" % (parts, size)
)
# if size is given we need to map that to extents so that we avoid
# issues when trying to get this right with a size in gigabytes find
# the percentage first, cheating, because these values are thrown out
vg_free_count = util.str_to_int(self.vg_free_count)
if size:
size = size * 1024 * 1024 * 1024
extents = int(size / int(self.vg_extent_size))
disk_sizing = sizing(self.free, size=size, parts=parts)
else:
if parts is not None:
# Prevent parts being 0, falling back to 1 (100% usage)
parts = parts or 1
size = int(self.free / parts)
extents = size * vg_free_count / self.free
disk_sizing = sizing(self.free, parts=parts)
extent_sizing = sizing(vg_free_count, size=extents)
disk_sizing['extents'] = int(extents)
disk_sizing['percentages'] = extent_sizing['percentages']
return disk_sizing
def bytes_to_extents(self, size):
'''
Return a how many free extents we can fit into a size in bytes. This has
some uncertainty involved. If size/extent_size is within 1% of the
actual free extents we will return the extent count, otherwise we'll
throw an error.
This accomodates for the size calculation in batch. We need to report
the OSD layout but have not yet created any LVM structures. We use the
disk size in batch if no VG is present and that will overshoot the
actual free_extent count due to LVM overhead.
'''
b_to_ext = int(size / int(self.vg_extent_size))
if b_to_ext < int(self.vg_free_count):
# return bytes in extents if there is more space
return b_to_ext
elif b_to_ext / int(self.vg_free_count) - 1 < 0.01:
# return vg_fre_count if its less then 1% off
logger.info(
'bytes_to_extents results in {} but only {} '
'are available, adjusting the latter'.format(b_to_ext,
self.vg_free_count))
return int(self.vg_free_count)
# else raise an exception
raise RuntimeError('Can\'t convert {} to free extents, only {} ({} '
'bytes) are free'.format(size, self.vg_free_count,
self.free))
def slots_to_extents(self, slots):
'''
Return how many extents fit the VG slot times
'''
return int(int(self.vg_extent_count) / slots)
def create_vg(devices, name=None, name_prefix=None):
"""
Create a Volume Group. Command looks like::
vgcreate --force --yes group_name device
Once created the volume group is returned as a ``VolumeGroup`` object
:param devices: A list of devices to create a VG. Optionally, a single
device (as a string) can be used.
:param name: Optionally set the name of the VG, defaults to 'ceph-{uuid}'
:param name_prefix: Optionally prefix the name of the VG, which will get combined
with a UUID string
"""
if isinstance(devices, set):
devices = list(devices)
if not isinstance(devices, list):
devices = [devices]
if name_prefix:
name = "%s-%s" % (name_prefix, str(uuid.uuid4()))
elif name is None:
name = "ceph-%s" % str(uuid.uuid4())
process.run([
'vgcreate',
'--force',
'--yes',
name] + devices,
run_on_host=True
)
return get_single_vg(filters={'vg_name': name})
def extend_vg(vg, devices):
"""
Extend a Volume Group. Command looks like::
vgextend --force --yes group_name [device, ...]
Once created the volume group is extended and returned as a ``VolumeGroup`` object
:param vg: A VolumeGroup object
:param devices: A list of devices to extend the VG. Optionally, a single
device (as a string) can be used.
"""
if not isinstance(devices, list):
devices = [devices]
process.run([
'vgextend',
'--force',
'--yes',
vg.name] + devices,
run_on_host=True
)
return get_single_vg(filters={'vg_name': vg.name})
def reduce_vg(vg, devices):
"""
Reduce a Volume Group. Command looks like::
vgreduce --force --yes group_name [device, ...]
:param vg: A VolumeGroup object
:param devices: A list of devices to remove from the VG. Optionally, a
single device (as a string) can be used.
"""
if not isinstance(devices, list):
devices = [devices]
process.run([
'vgreduce',
'--force',
'--yes',
vg.name] + devices,
run_on_host=True
)
return get_single_vg(filter={'vg_name': vg.name})
def remove_vg(vg_name):
"""
Removes a volume group.
"""
if not vg_name:
logger.warning('Skipping removal of invalid VG name: "%s"', vg_name)
return
fail_msg = "Unable to remove vg %s" % vg_name
process.run(
[
'vgremove',
'-v', # verbose
'-f', # force it
vg_name
],
run_on_host=True,
fail_msg=fail_msg,
)
def get_vgs(fields=VG_FIELDS, filters='', tags=None):
"""
Return a list of VGs that are available on the system and match the
filters and tags passed. Argument filters takes a dictionary containing
arguments required by -S option of LVM. Passing a list of LVM tags can be
quite tricky to pass as a dictionary within dictionary, therefore pass
dictionary of tags via tags argument and tricky part will be taken care of
by the helper methods.
:param fields: string containing list of fields to be displayed by the
vgs command
:param sep: string containing separator to be used between two fields
:param filters: dictionary containing LVM filters
:param tags: dictionary containng LVM tags
:returns: list of class VolumeGroup object representing vgs on the system
"""
filters = make_filters_lvmcmd_ready(filters, tags)
args = ['vgs'] + VG_CMD_OPTIONS + ['-S', filters, '-o', fields]
stdout, stderr, returncode = process.call(args, run_on_host=True, verbose_on_failure=False)
vgs_report =_output_parser(stdout, fields)
return [VolumeGroup(**vg_report) for vg_report in vgs_report]
def get_single_vg(fields=VG_FIELDS, filters=None, tags=None):
"""
Wrapper of get_vgs() meant to be a convenience method to avoid the phrase::
vgs = get_vgs()
if len(vgs) >= 1:
vg = vgs[0]
"""
vgs = get_vgs(fields=fields, filters=filters, tags=tags)
if len(vgs) == 0:
return None
if len(vgs) > 1:
raise RuntimeError('Filters {} matched more than 1 VG present on this host.'.format(str(filters)))
return vgs[0]
def get_device_vgs(device, name_prefix=''):
stdout, stderr, returncode = process.call(
['pvs'] + VG_CMD_OPTIONS + ['-o', VG_FIELDS, device],
run_on_host=True,
verbose_on_failure=False
)
vgs = _output_parser(stdout, VG_FIELDS)
return [VolumeGroup(**vg) for vg in vgs if vg['vg_name'] and vg['vg_name'].startswith(name_prefix)]
def get_all_devices_vgs(name_prefix=''):
vg_fields = f'pv_name,{VG_FIELDS}'
cmd = ['pvs'] + VG_CMD_OPTIONS + ['-o', vg_fields]
stdout, stderr, returncode = process.call(
cmd,
run_on_host=True,
verbose_on_failure=False
)
vgs = _output_parser(stdout, vg_fields)
return [VolumeGroup(**vg) for vg in vgs if vg['vg_name']]
#################################
#
# Code for LVM Logical Volumes
#
###############################
LV_FIELDS = 'lv_tags,lv_path,lv_name,vg_name,lv_uuid,lv_size'
LV_CMD_OPTIONS = ['--noheadings', '--readonly', '--separator=";"', '-a',
'--units=b', '--nosuffix']
class Volume(object):
"""
Represents a Logical Volume from LVM, with some top-level attributes like
``lv_name`` and parsed tags as a dictionary of key/value pairs.
"""
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
self.lv_api = kw
self.name = kw['lv_name']
if not self.name:
raise ValueError('Volume must have a non-empty name')
self.tags = parse_tags(kw['lv_tags'])
self.encrypted = self.tags.get('ceph.encrypted', '0') == '1'
self.used_by_ceph = 'ceph.osd_id' in self.tags
def __str__(self):
return '<%s>' % self.lv_api['lv_path']
def __repr__(self):
return self.__str__()
def as_dict(self):
obj = {}
obj.update(self.lv_api)
obj['tags'] = self.tags
obj['name'] = self.name
obj['type'] = self.tags['ceph.type']
obj['path'] = self.lv_path
return obj
def report(self):
if not self.used_by_ceph:
return {
'name': self.lv_name,
'comment': 'not used by ceph'
}
else:
type_ = self.tags['ceph.type']
report = {
'name': self.lv_name,
'osd_id': self.tags['ceph.osd_id'],
'cluster_name': self.tags.get('ceph.cluster_name', conf.cluster),
'type': type_,
'osd_fsid': self.tags['ceph.osd_fsid'],
'cluster_fsid': self.tags['ceph.cluster_fsid'],
'osdspec_affinity': self.tags.get('ceph.osdspec_affinity', ''),
}
type_uuid = '{}_uuid'.format(type_)
report[type_uuid] = self.tags['ceph.{}'.format(type_uuid)]
return report
def _format_tag_args(self, op, tags):
tag_args = ['{}={}'.format(k, v) for k, v in tags.items()]
# weird but efficient way of ziping two lists and getting a flat list
return list(sum(zip(repeat(op), tag_args), ()))
def clear_tags(self, keys=None):
"""
Removes all or passed tags from the Logical Volume.
"""
if not keys:
keys = self.tags.keys()
del_tags = {k: self.tags[k] for k in keys if k in self.tags}
if not del_tags:
# nothing to clear
return
del_tag_args = self._format_tag_args('--deltag', del_tags)
# --deltag returns successful even if the to be deleted tag is not set
process.call(['lvchange'] + del_tag_args + [self.lv_path], run_on_host=True)
for k in del_tags.keys():
del self.tags[k]
def set_tags(self, tags):
"""
:param tags: A dictionary of tag names and values, like::
{
"ceph.osd_fsid": "aaa-fff-bbbb",
"ceph.osd_id": "0"
}
At the end of all modifications, the tags are refreshed to reflect
LVM's most current view.
"""
self.clear_tags(tags.keys())
add_tag_args = self._format_tag_args('--addtag', tags)
process.call(['lvchange'] + add_tag_args + [self.lv_path], run_on_host=True)
for k, v in tags.items():
self.tags[k] = v
def clear_tag(self, key):
if self.tags.get(key):
current_value = self.tags[key]
tag = "%s=%s" % (key, current_value)
process.call(['lvchange', '--deltag', tag, self.lv_path], run_on_host=True)
del self.tags[key]
def set_tag(self, key, value):
"""
Set the key/value pair as an LVM tag.
"""
# remove it first if it exists
self.clear_tag(key)
process.call(
[
'lvchange',
'--addtag', '%s=%s' % (key, value), self.lv_path
],
run_on_host=True
)
self.tags[key] = value
def deactivate(self):
"""
Deactivate the LV by calling lvchange -an
"""
process.call(['lvchange', '-an', self.lv_path], run_on_host=True)
def create_lv(name_prefix,
uuid,
vg=None,
device=None,
slots=None,
extents=None,
size=None,
tags=None):
"""
Create a Logical Volume in a Volume Group. Command looks like::
lvcreate -L 50G -n gfslv vg0
``name_prefix`` is required. If ``size`` is provided its expected to be a
byte count. Tags are an optional dictionary and is expected to
conform to the convention of prefixing them with "ceph." like::
{"ceph.block_device": "/dev/ceph/osd-1"}
:param name_prefix: name prefix for the LV, typically somehting like ceph-osd-block
:param uuid: UUID to ensure uniqueness; is combined with name_prefix to
form the LV name
:param vg: optional, pass an existing VG to create LV
:param device: optional, device to use. Either device of vg must be passed
:param slots: optional, number of slots to divide vg up, LV will occupy one
one slot if enough space is available
:param extends: optional, how many lvm extends to use, supersedes slots
:param size: optional, target LV size in bytes, supersedes extents,
resulting LV might be smaller depending on extent
size of the underlying VG
:param tags: optional, a dict of lvm tags to set on the LV
"""
name = '{}-{}'.format(name_prefix, uuid)
if not vg:
if not device:
raise RuntimeError("Must either specify vg or device, none given")
# check if a vgs starting with ceph already exists
vgs = get_device_vgs(device, 'ceph')
if vgs:
vg = vgs[0]
else:
# create on if not
vg = create_vg(device, name_prefix='ceph')
assert(vg)
if size:
extents = vg.bytes_to_extents(size)
logger.debug('size was passed: {} -> {}'.format(size, extents))
elif slots and not extents:
extents = vg.slots_to_extents(slots)
logger.debug('slots was passed: {} -> {}'.format(slots, extents))
if extents:
command = [
'lvcreate',
'--yes',
'-l',
'{}'.format(extents),
'-n', name, vg.vg_name
]
# create the lv with all the space available, this is needed because the
# system call is different for LVM
else:
command = [
'lvcreate',
'--yes',
'-l',
'100%FREE',
'-n', name, vg.vg_name
]
process.run(command, run_on_host=True)
lv = get_single_lv(filters={'lv_name': name, 'vg_name': vg.vg_name})
if tags is None:
tags = {
"ceph.osd_id": "null",
"ceph.type": "null",
"ceph.cluster_fsid": "null",
"ceph.osd_fsid": "null",
}
# when creating a distinct type, the caller doesn't know what the path will
# be so this function will set it after creation using the mapping
# XXX add CEPH_VOLUME_LVM_DEBUG to enable -vvvv on lv operations
type_path_tag = {
'data': 'ceph.data_device',
'block': 'ceph.block_device',
'wal': 'ceph.wal_device',
'db': 'ceph.db_device',
'lockbox': 'ceph.lockbox_device', # XXX might not ever need this lockbox sorcery
}
path_tag = type_path_tag.get(tags.get('ceph.type'))
if path_tag:
tags.update({path_tag: lv.lv_path})
lv.set_tags(tags)
return lv
def create_lvs(volume_group, parts=None, size=None, name_prefix='ceph-lv'):
"""
Create multiple Logical Volumes from a Volume Group by calculating the
proper extents from ``parts`` or ``size``. A custom prefix can be used
(defaults to ``ceph-lv``), these names are always suffixed with a uuid.
LV creation in ceph-volume will require tags, this is expected to be
pre-computed by callers who know Ceph metadata like OSD IDs and FSIDs. It
will probably not be the case when mass-creating LVs, so common/default
tags will be set to ``"null"``.
.. note:: LVs that are not in use can be detected by querying LVM for tags that are
set to ``"null"``.
:param volume_group: The volume group (vg) to use for LV creation
:type group: ``VolumeGroup()`` object
:param parts: Number of LVs to create *instead of* ``size``.
:type parts: int
:param size: Size (in gigabytes) of LVs to create, e.g. "as many 10gb LVs as possible"
:type size: int
:param extents: The number of LVM extents to use to create the LV. Useful if looking to have
accurate LV sizes (LVM rounds sizes otherwise)
"""
if parts is None and size is None:
# fallback to just one part (using 100% of the vg)
parts = 1
lvs = []
tags = {
"ceph.osd_id": "null",
"ceph.type": "null",
"ceph.cluster_fsid": "null",
"ceph.osd_fsid": "null",
}
sizing = volume_group.sizing(parts=parts, size=size)
for part in range(0, sizing['parts']):
size = sizing['sizes']
extents = sizing['extents']
lvs.append(
create_lv(name_prefix, uuid.uuid4(), vg=volume_group, extents=extents, tags=tags)
)
return lvs
def remove_lv(lv):
"""
Removes a logical volume given it's absolute path.
Will return True if the lv is successfully removed or
raises a RuntimeError if the removal fails.
:param lv: A ``Volume`` object or the path for an LV
"""
if isinstance(lv, Volume):
path = lv.lv_path
else:
path = lv
stdout, stderr, returncode = process.call(
[
'lvremove',
'-v', # verbose
'-f', # force it
path
],
run_on_host=True,
show_command=True,
terminal_verbose=True,
)
if returncode != 0:
raise RuntimeError("Unable to remove %s" % path)
return True
def get_lvs(fields=LV_FIELDS, filters='', tags=None):
"""
Return a list of LVs that are available on the system and match the
filters and tags passed. Argument filters takes a dictionary containing
arguments required by -S option of LVM. Passing a list of LVM tags can be
quite tricky to pass as a dictionary within dictionary, therefore pass
dictionary of tags via tags argument and tricky part will be taken care of
by the helper methods.
:param fields: string containing list of fields to be displayed by the
lvs command
:param sep: string containing separator to be used between two fields
:param filters: dictionary containing LVM filters
:param tags: dictionary containng LVM tags
:returns: list of class Volume object representing LVs on the system
"""
filters = make_filters_lvmcmd_ready(filters, tags)
args = ['lvs'] + LV_CMD_OPTIONS + ['-S', filters, '-o', fields]
stdout, stderr, returncode = process.call(args, run_on_host=True, verbose_on_failure=False)
lvs_report = _output_parser(stdout, fields)
return [Volume(**lv_report) for lv_report in lvs_report]
def get_single_lv(fields=LV_FIELDS, filters=None, tags=None):
"""
Wrapper of get_lvs() meant to be a convenience method to avoid the phrase::
lvs = get_lvs()
if len(lvs) >= 1:
lv = lvs[0]
"""
lvs = get_lvs(fields=fields, filters=filters, tags=tags)
if len(lvs) == 0:
return None
if len(lvs) > 1:
raise RuntimeError('Filters {} matched more than 1 LV present on this host.'.format(str(filters)))
return lvs[0]
def get_lvs_from_osd_id(osd_id):
return get_lvs(tags={'ceph.osd_id': osd_id})
def get_single_lv_from_osd_id(osd_id):
return get_single_lv(tags={'ceph.osd_id': osd_id})
def get_lv_by_name(name):
stdout, stderr, returncode = process.call(
['lvs', '--noheadings', '-o', LV_FIELDS, '-S',
'lv_name={}'.format(name)],
run_on_host=True,
verbose_on_failure=False
)
lvs = _output_parser(stdout, LV_FIELDS)
return [Volume(**lv) for lv in lvs]
def get_lvs_by_tag(lv_tag):
stdout, stderr, returncode = process.call(
['lvs', '--noheadings', '--separator=";"', '-a', '-o', LV_FIELDS, '-S',
'lv_tags={{{}}}'.format(lv_tag)],
run_on_host=True,
verbose_on_failure=False
)
lvs = _output_parser(stdout, LV_FIELDS)
return [Volume(**lv) for lv in lvs]
def get_device_lvs(device, name_prefix=''):
stdout, stderr, returncode = process.call(
['pvs'] + LV_CMD_OPTIONS + ['-o', LV_FIELDS, device],
run_on_host=True,
verbose_on_failure=False
)
lvs = _output_parser(stdout, LV_FIELDS)
return [Volume(**lv) for lv in lvs if lv['lv_name'] and
lv['lv_name'].startswith(name_prefix)]
def get_lvs_from_path(devpath):
lvs = []
if os.path.isabs(devpath):
# we have a block device
lvs = get_device_lvs(devpath)
if not lvs:
# maybe this was a LV path /dev/vg_name/lv_name or /dev/mapper/
lvs = get_lvs(filters={'path': devpath})
return lvs
def get_lv_by_fullname(full_name):
"""
returns LV by the specified LV's full name (formatted as vg_name/lv_name)
"""
try:
vg_name, lv_name = full_name.split('/')
res_lv = get_single_lv(filters={'lv_name': lv_name,
'vg_name': vg_name})
except ValueError:
res_lv = None
return res_lv
| 39,582 | 31.659241 | 106 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/__init__.py
|
from . import lvm, simple, raw # noqa
| 38 | 18.5 | 37 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/lvm/__init__.py
|
from .main import LVM # noqa
| 29 | 14 | 28 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/lvm/activate.py
|
from __future__ import print_function
import argparse
import logging
import os
from textwrap import dedent
from ceph_volume import process, conf, decorators, terminal, configuration
from ceph_volume.util import system, disk
from ceph_volume.util import prepare as prepare_utils
from ceph_volume.util import encryption as encryption_utils
from ceph_volume.systemd import systemctl
from ceph_volume.api import lvm as api
from .listing import direct_report
logger = logging.getLogger(__name__)
def get_osd_device_path(osd_lvs, device_type, dmcrypt_secret=None):
"""
``device_type`` can be one of ``db``, ``wal`` or ``block`` so that we can
query LVs on system and fallback to querying the uuid if that is not
present.
Return a path if possible, failing to do that a ``None``, since some of
these devices are optional.
"""
osd_block_lv = None
for lv in osd_lvs:
if lv.tags.get('ceph.type') == 'block':
osd_block_lv = lv
break
if osd_block_lv:
is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
logger.debug('Found block device (%s) with encryption: %s', osd_block_lv.name, is_encrypted)
uuid_tag = 'ceph.%s_uuid' % device_type
device_uuid = osd_block_lv.tags.get(uuid_tag)
if not device_uuid:
return None
device_lv = None
for lv in osd_lvs:
if lv.tags.get('ceph.type') == device_type:
device_lv = lv
break
if device_lv:
if is_encrypted:
encryption_utils.luks_open(dmcrypt_secret, device_lv.lv_path, device_uuid)
return '/dev/mapper/%s' % device_uuid
return device_lv.lv_path
# this could be a regular device, so query it with blkid
physical_device = disk.get_device_from_partuuid(device_uuid)
if physical_device:
if is_encrypted:
encryption_utils.luks_open(dmcrypt_secret, physical_device, device_uuid)
return '/dev/mapper/%s' % device_uuid
return physical_device
raise RuntimeError('could not find %s with uuid %s' % (device_type, device_uuid))
def activate_bluestore(osd_lvs, no_systemd=False, no_tmpfs=False):
for lv in osd_lvs:
if lv.tags.get('ceph.type') == 'block':
osd_block_lv = lv
break
else:
raise RuntimeError('could not find a bluestore OSD to activate')
is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
dmcrypt_secret = None
osd_id = osd_block_lv.tags['ceph.osd_id']
conf.cluster = osd_block_lv.tags['ceph.cluster_name']
osd_fsid = osd_block_lv.tags['ceph.osd_fsid']
configuration.load_ceph_conf_path(osd_block_lv.tags['ceph.cluster_name'])
configuration.load()
# mount on tmpfs the osd directory
osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
if not system.path_is_mounted(osd_path):
# mkdir -p and mount as tmpfs
prepare_utils.create_osd_path(osd_id, tmpfs=not no_tmpfs)
# XXX This needs to be removed once ceph-bluestore-tool can deal with
# symlinks that exist in the osd dir
for link_name in ['block', 'block.db', 'block.wal']:
link_path = os.path.join(osd_path, link_name)
if os.path.exists(link_path):
os.unlink(os.path.join(osd_path, link_name))
# encryption is handled here, before priming the OSD dir
if is_encrypted:
osd_lv_path = '/dev/mapper/%s' % osd_block_lv.lv_uuid
lockbox_secret = osd_block_lv.tags['ceph.cephx_lockbox_secret']
encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret)
dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
encryption_utils.luks_open(dmcrypt_secret, osd_block_lv.lv_path, osd_block_lv.lv_uuid)
else:
osd_lv_path = osd_block_lv.lv_path
db_device_path = get_osd_device_path(osd_lvs, 'db', dmcrypt_secret=dmcrypt_secret)
wal_device_path = get_osd_device_path(osd_lvs, 'wal', dmcrypt_secret=dmcrypt_secret)
# Once symlinks are removed, the osd dir can be 'primed again. chown first,
# regardless of what currently exists so that ``prime-osd-dir`` can succeed
# even if permissions are somehow messed up
system.chown(osd_path)
prime_command = [
'ceph-bluestore-tool', '--cluster=%s' % conf.cluster,
'prime-osd-dir', '--dev', osd_lv_path,
'--path', osd_path, '--no-mon-config']
process.run(prime_command)
# always re-do the symlink regardless if it exists, so that the block,
# block.wal, and block.db devices that may have changed can be mapped
# correctly every time
process.run(['ln', '-snf', osd_lv_path, os.path.join(osd_path, 'block')])
system.chown(os.path.join(osd_path, 'block'))
system.chown(osd_path)
if db_device_path:
destination = os.path.join(osd_path, 'block.db')
process.run(['ln', '-snf', db_device_path, destination])
system.chown(db_device_path)
system.chown(destination)
if wal_device_path:
destination = os.path.join(osd_path, 'block.wal')
process.run(['ln', '-snf', wal_device_path, destination])
system.chown(wal_device_path)
system.chown(destination)
if no_systemd is False:
# enable the ceph-volume unit for this OSD
systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
# enable the OSD
systemctl.enable_osd(osd_id)
# start the OSD
systemctl.start_osd(osd_id)
terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
class Activate(object):
help = 'Discover and mount the LVM device associated with an OSD ID and start the Ceph OSD'
def __init__(self, argv):
self.argv = argv
@decorators.needs_root
def activate_all(self, args):
listed_osds = direct_report()
osds = {}
for osd_id, devices in listed_osds.items():
# the metadata for all devices in each OSD will contain
# the FSID which is required for activation
for device in devices:
fsid = device.get('tags', {}).get('ceph.osd_fsid')
if fsid:
osds[fsid] = osd_id
break
if not osds:
terminal.warning('Was unable to find any OSDs to activate')
terminal.warning('Verify OSDs are present with "ceph-volume lvm list"')
return
for osd_fsid, osd_id in osds.items():
if not args.no_systemd and systemctl.osd_is_active(osd_id):
terminal.warning(
'OSD ID %s FSID %s process is active. Skipping activation' % (osd_id, osd_fsid)
)
else:
terminal.info('Activating OSD ID %s FSID %s' % (osd_id, osd_fsid))
self.activate(args, osd_id=osd_id, osd_fsid=osd_fsid)
@decorators.needs_root
def activate(self, args, osd_id=None, osd_fsid=None):
"""
:param args: The parsed arguments coming from the CLI
:param osd_id: When activating all, this gets populated with an
existing OSD ID
:param osd_fsid: When activating all, this gets populated with an
existing OSD FSID
"""
osd_id = osd_id if osd_id else args.osd_id
osd_fsid = osd_fsid if osd_fsid else args.osd_fsid
if osd_id and osd_fsid:
tags = {'ceph.osd_id': osd_id, 'ceph.osd_fsid': osd_fsid}
elif not osd_id and osd_fsid:
tags = {'ceph.osd_fsid': osd_fsid}
elif osd_id and not osd_fsid:
raise RuntimeError('could not activate osd.{}, please provide the '
'osd_fsid too'.format(osd_id))
else:
raise RuntimeError('Please provide both osd_id and osd_fsid')
lvs = api.get_lvs(tags=tags)
if not lvs:
raise RuntimeError('could not find osd.%s with osd_fsid %s' %
(osd_id, osd_fsid))
# This argument is only available when passed in directly or via
# systemd, not when ``create`` is being used
# placeholder when a new objectstore support will be added
if getattr(args, 'auto_detect_objectstore', False):
logger.info('auto detecting objectstore')
return activate_bluestore(lvs, args.no_systemd)
# explicit 'objectstore' flags take precedence
if getattr(args, 'bluestore', False):
activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
elif any('ceph.block_device' in lv.tags for lv in lvs):
activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
def main(self):
sub_command_help = dedent("""
Activate OSDs by discovering them with LVM and mounting them in their
appropriate destination:
ceph-volume lvm activate {ID} {FSID}
The lvs associated with the OSD need to have been prepared previously,
so that all needed tags and metadata exist.
When migrating OSDs, or a multiple-osd activation is needed, the
``--all`` flag can be used instead of the individual ID and FSID:
ceph-volume lvm activate --all
""")
parser = argparse.ArgumentParser(
prog='ceph-volume lvm activate',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=sub_command_help,
)
parser.add_argument(
'osd_id',
metavar='ID',
nargs='?',
help='The ID of the OSD, usually an integer, like 0'
)
parser.add_argument(
'osd_fsid',
metavar='FSID',
nargs='?',
help='The FSID of the OSD, similar to a SHA1'
)
parser.add_argument(
'--auto-detect-objectstore',
action='store_true',
help='Autodetect the objectstore by inspecting the OSD',
)
parser.add_argument(
'--bluestore',
action='store_true',
help='force bluestore objectstore activation',
)
parser.add_argument(
'--all',
dest='activate_all',
action='store_true',
help='Activate all OSDs found in the system',
)
parser.add_argument(
'--no-systemd',
dest='no_systemd',
action='store_true',
help='Skip creating and enabling systemd units and starting OSD services',
)
parser.add_argument(
'--no-tmpfs',
action='store_true',
help='Do not use a tmpfs mount for OSD data dir'
)
if len(self.argv) == 0:
print(sub_command_help)
return
args = parser.parse_args(self.argv)
if args.activate_all:
self.activate_all(args)
else:
self.activate(args)
| 11,005 | 38.028369 | 100 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/lvm/batch.py
|
import argparse
from collections import namedtuple
import json
import logging
from textwrap import dedent
from ceph_volume import terminal, decorators
from ceph_volume.util import disk, prompt_bool, arg_validators, templates
from ceph_volume.util import prepare
from . import common
from .create import Create
from .prepare import Prepare
mlogger = terminal.MultiLogger(__name__)
logger = logging.getLogger(__name__)
device_list_template = """
* {path: <25} {size: <10} {state}"""
def device_formatter(devices):
lines = []
for path, details in devices:
lines.append(device_list_template.format(
path=path, size=details['human_readable_size'],
state='solid' if details['rotational'] == '0' else 'rotational')
)
return ''.join(lines)
def ensure_disjoint_device_lists(data, db=[], wal=[]):
# check that all device lists are disjoint with each other
if not all([set(data).isdisjoint(set(db)),
set(data).isdisjoint(set(wal)),
set(db).isdisjoint(set(wal))]):
raise Exception('Device lists are not disjoint')
def separate_devices_from_lvs(devices):
phys = []
lvm = []
for d in devices:
phys.append(d) if d.is_device else lvm.append(d)
return phys, lvm
def get_physical_osds(devices, args):
'''
Goes through passed physical devices and assigns OSDs
'''
data_slots = args.osds_per_device
if args.data_slots:
data_slots = max(args.data_slots, args.osds_per_device)
rel_data_size = args.data_allocate_fraction / data_slots
mlogger.debug('relative data size: {}'.format(rel_data_size))
ret = []
for dev in devices:
if dev.available_lvm:
dev_size = dev.vg_size[0]
abs_size = disk.Size(b=int(dev_size * rel_data_size))
free_size = dev.vg_free[0]
for _ in range(args.osds_per_device):
if abs_size > free_size:
break
free_size -= abs_size.b
osd_id = None
if args.osd_ids:
osd_id = args.osd_ids.pop()
ret.append(Batch.OSD(dev.path,
rel_data_size,
abs_size,
args.osds_per_device,
osd_id,
'dmcrypt' if args.dmcrypt else None,
dev.symlink))
return ret
def get_lvm_osds(lvs, args):
'''
Goes through passed LVs and assigns planned osds
'''
ret = []
for lv in lvs:
if lv.used_by_ceph:
continue
osd_id = None
if args.osd_ids:
osd_id = args.osd_ids.pop()
osd = Batch.OSD("{}/{}".format(lv.vg_name, lv.lv_name),
100.0,
disk.Size(b=int(lv.lvs[0].lv_size)),
1,
osd_id,
'dmcrypt' if args.dmcrypt else None)
ret.append(osd)
return ret
def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, args):
requested_slots = getattr(args, '{}_slots'.format(type_))
if not requested_slots or requested_slots < fast_slots_per_device:
if requested_slots:
mlogger.info('{}_slots argument is too small, ignoring'.format(type_))
requested_slots = fast_slots_per_device
requested_size = getattr(args, '{}_size'.format(type_), 0)
if not requested_size or requested_size == 0:
# no size argument was specified, check ceph.conf
get_size_fct = getattr(prepare, 'get_{}_size'.format(type_))
requested_size = get_size_fct(lv_format=False)
ret = []
vg_device_map = group_devices_by_vg(devices)
for vg_name, vg_devices in vg_device_map.items():
for dev in vg_devices:
if not dev.available_lvm:
continue
# any LV present is considered a taken slot
occupied_slots = len(dev.lvs)
# prior to v15.2.8, db/wal deployments were grouping multiple fast devices into single VGs - we need to
# multiply requested_slots (per device) by the number of devices in the VG in order to ensure that
# abs_size is calculated correctly from vg_size
if vg_name == 'unused_devices':
slots_for_vg = requested_slots
else:
if len(vg_devices) > 1:
slots_for_vg = len(args.devices)
else:
slots_for_vg = len(vg_devices) * requested_slots
dev_size = dev.vg_size[0]
# this only looks at the first vg on device, unsure if there is a better
# way
abs_size = disk.Size(b=int(dev_size / slots_for_vg))
free_size = dev.vg_free[0]
relative_size = int(abs_size) / dev_size
if requested_size:
if requested_size <= abs_size:
abs_size = requested_size
relative_size = int(abs_size) / dev_size
else:
mlogger.error(
'{} was requested for {}, but only {} can be fulfilled'.format(
requested_size,
'{}_size'.format(type_),
abs_size,
))
exit(1)
while abs_size <= free_size and len(ret) < new_osds and occupied_slots < fast_slots_per_device:
free_size -= abs_size.b
occupied_slots += 1
ret.append((dev.path, relative_size, abs_size, requested_slots))
return ret
def group_devices_by_vg(devices):
result = dict()
result['unused_devices'] = []
for dev in devices:
if len(dev.vgs) > 0:
vg_name = dev.vgs[0].name
if vg_name in result:
result[vg_name].append(dev)
else:
result[vg_name] = [dev]
else:
result['unused_devices'].append(dev)
return result
def get_lvm_fast_allocs(lvs):
return [("{}/{}".format(d.vg_name, d.lv_name), 100.0,
disk.Size(b=int(d.lvs[0].lv_size)), 1) for d in lvs if not
d.journal_used_by_ceph]
class Batch(object):
help = 'Automatically size devices for multi-OSD provisioning with minimal interaction'
_help = dedent("""
Automatically size devices ready for OSD provisioning based on default strategies.
Usage:
ceph-volume lvm batch [DEVICE...]
Devices can be physical block devices or LVs.
Optional reporting on possible outcomes is enabled with --report
ceph-volume lvm batch --report [DEVICE...]
""")
def __init__(self, argv):
parser = argparse.ArgumentParser(
prog='ceph-volume lvm batch',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self._help,
)
parser.add_argument(
'devices',
metavar='DEVICES',
nargs='*',
type=arg_validators.ValidBatchDataDevice(),
default=[],
help='Devices to provision OSDs',
)
parser.add_argument(
'--db-devices',
nargs='*',
type=arg_validators.ValidBatchDevice(),
default=[],
help='Devices to provision OSDs db volumes',
)
parser.add_argument(
'--wal-devices',
nargs='*',
type=arg_validators.ValidBatchDevice(),
default=[],
help='Devices to provision OSDs wal volumes',
)
parser.add_argument(
'--auto',
action='store_true',
help=('deploy multi-device OSDs if rotational and non-rotational drives '
'are passed in DEVICES'),
default=True
)
parser.add_argument(
'--no-auto',
action='store_false',
dest='auto',
help=('deploy standalone OSDs if rotational and non-rotational drives '
'are passed in DEVICES'),
)
parser.add_argument(
'--bluestore',
action='store_true',
help='bluestore objectstore (default)',
)
parser.add_argument(
'--report',
action='store_true',
help='Only report on OSD that would be created and exit',
)
parser.add_argument(
'--yes',
action='store_true',
help='Avoid prompting for confirmation when provisioning',
)
parser.add_argument(
'--format',
help='output format, defaults to "pretty"',
default='pretty',
choices=['json', 'json-pretty', 'pretty'],
)
parser.add_argument(
'--dmcrypt',
action='store_true',
help='Enable device encryption via dm-crypt',
)
parser.add_argument(
'--crush-device-class',
dest='crush_device_class',
help='Crush device class to assign this OSD to',
default=""
)
parser.add_argument(
'--no-systemd',
dest='no_systemd',
action='store_true',
help='Skip creating and enabling systemd units and starting OSD services',
)
parser.add_argument(
'--osds-per-device',
type=int,
default=1,
help='Provision more than 1 (the default) OSD per device',
)
parser.add_argument(
'--data-slots',
type=int,
help=('Provision more than 1 (the default) OSD slot per device'
' if more slots then osds-per-device are specified, slots'
'will stay unoccupied'),
)
parser.add_argument(
'--data-allocate-fraction',
type=arg_validators.ValidFraction(),
help='Fraction to allocate from data device (0,1.0]',
default=1.0
)
parser.add_argument(
'--block-db-size',
type=disk.Size.parse,
help='Set (or override) the "bluestore_block_db_size" value, in bytes'
)
parser.add_argument(
'--block-db-slots',
type=int,
help='Provision slots on DB device, can remain unoccupied'
)
parser.add_argument(
'--block-wal-size',
type=disk.Size.parse,
help='Set (or override) the "bluestore_block_wal_size" value, in bytes'
)
parser.add_argument(
'--block-wal-slots',
type=int,
help='Provision slots on WAL device, can remain unoccupied'
)
parser.add_argument(
'--prepare',
action='store_true',
help='Only prepare all OSDs, do not activate',
)
parser.add_argument(
'--osd-ids',
nargs='*',
default=[],
help='Reuse existing OSD ids',
type=arg_validators.valid_osd_id
)
self.args = parser.parse_args(argv)
self.parser = parser
for dev_list in ['', 'db_', 'wal_']:
setattr(self, '{}usable'.format(dev_list), [])
def report(self, plan):
report = self._create_report(plan)
print(report)
def _create_report(self, plan):
if self.args.format == 'pretty':
report = ''
report += templates.total_osds.format(total_osds=len(plan))
report += templates.osd_component_titles
for osd in plan:
report += templates.osd_header
report += osd.report()
return report
else:
json_report = []
for osd in plan:
json_report.append(osd.report_json())
if self.args.format == 'json':
return json.dumps(json_report)
elif self.args.format == 'json-pretty':
return json.dumps(json_report, indent=4,
sort_keys=True)
def _check_slot_args(self):
'''
checking if -slots args are consistent with other arguments
'''
if self.args.data_slots and self.args.osds_per_device:
if self.args.data_slots < self.args.osds_per_device:
raise ValueError('data_slots is smaller then osds_per_device')
def _sort_rotational_disks(self):
'''
Helper for legacy auto behaviour.
Sorts drives into rotating and non-rotating, the latter being used for
db.
'''
mlogger.warning('DEPRECATION NOTICE')
mlogger.warning('You are using the legacy automatic disk sorting behavior')
mlogger.warning('The Pacific release will change the default to --no-auto')
rotating = []
ssd = []
for d in self.args.devices:
rotating.append(d) if d.rotational else ssd.append(d)
if ssd and not rotating:
# no need for additional sorting, we'll only deploy standalone on ssds
return
self.args.devices = rotating
self.args.db_devices = ssd
@decorators.needs_root
def main(self):
if not self.args.devices:
return self.parser.print_help()
# Default to bluestore here since defaulting it in add_argument may
# cause both to be True
if not self.args.bluestore:
self.args.bluestore = True
if (self.args.auto and not self.args.db_devices and not
self.args.wal_devices):
self._sort_rotational_disks()
self._check_slot_args()
ensure_disjoint_device_lists(self.args.devices,
self.args.db_devices,
self.args.wal_devices)
plan = self.get_plan(self.args)
if self.args.report:
self.report(plan)
return 0
if not self.args.yes:
self.report(plan)
terminal.info('The above OSDs would be created if the operation continues')
if not prompt_bool('do you want to proceed? (yes/no)'):
terminal.error('aborting OSD provisioning')
raise SystemExit(0)
self._execute(plan)
def _execute(self, plan):
defaults = common.get_default_args()
global_args = [
'bluestore',
'dmcrypt',
'crush_device_class',
'no_systemd',
]
defaults.update({arg: getattr(self.args, arg) for arg in global_args})
for osd in plan:
args = osd.get_args(defaults)
if self.args.prepare:
p = Prepare([])
p.safe_prepare(argparse.Namespace(**args))
else:
c = Create([])
c.create(argparse.Namespace(**args))
def get_plan(self, args):
if args.bluestore:
plan = self.get_deployment_layout(args, args.devices, args.db_devices,
args.wal_devices)
return plan
def get_deployment_layout(self, args, devices, fast_devices=[],
very_fast_devices=[]):
'''
The methods here are mostly just organization, error reporting and
setting up of (default) args. The heavy lifting code for the deployment
layout can be found in the static get_*_osds and get_*_fast_allocs
functions.
'''
plan = []
phys_devs, lvm_devs = separate_devices_from_lvs(devices)
mlogger.debug(('passed data devices: {} physical,'
' {} LVM').format(len(phys_devs), len(lvm_devs)))
plan.extend(get_physical_osds(phys_devs, args))
plan.extend(get_lvm_osds(lvm_devs, args))
num_osds = len(plan)
if num_osds == 0:
mlogger.info('All data devices are unavailable')
return plan
requested_osds = args.osds_per_device * len(phys_devs) + len(lvm_devs)
if args.bluestore:
fast_type = 'block_db'
fast_allocations = self.fast_allocations(fast_devices,
requested_osds,
num_osds,
fast_type)
if fast_devices and not fast_allocations:
mlogger.info('{} fast devices were passed, but none are available'.format(len(fast_devices)))
return []
if fast_devices and not len(fast_allocations) == num_osds:
mlogger.error('{} fast allocations != {} num_osds'.format(
len(fast_allocations), num_osds))
exit(1)
very_fast_allocations = self.fast_allocations(very_fast_devices,
requested_osds,
num_osds,
'block_wal')
if very_fast_devices and not very_fast_allocations:
mlogger.info('{} very fast devices were passed, but none are available'.format(len(very_fast_devices)))
return []
if very_fast_devices and not len(very_fast_allocations) == num_osds:
mlogger.error('{} very fast allocations != {} num_osds'.format(
len(very_fast_allocations), num_osds))
exit(1)
for osd in plan:
if fast_devices:
osd.add_fast_device(*fast_allocations.pop(),
type_=fast_type)
if very_fast_devices and args.bluestore:
osd.add_very_fast_device(*very_fast_allocations.pop())
return plan
def fast_allocations(self, devices, requested_osds, new_osds, type_):
ret = []
if not devices:
return ret
phys_devs, lvm_devs = separate_devices_from_lvs(devices)
mlogger.debug(('passed {} devices: {} physical,'
' {} LVM').format(type_, len(phys_devs), len(lvm_devs)))
ret.extend(get_lvm_fast_allocs(lvm_devs))
# fill up uneven distributions across fast devices: 5 osds and 2 fast
# devices? create 3 slots on each device rather then deploying
# heterogeneous osds
slot_divider = max(1, len(phys_devs))
if (requested_osds - len(lvm_devs)) % slot_divider:
fast_slots_per_device = int((requested_osds - len(lvm_devs)) / slot_divider) + 1
else:
fast_slots_per_device = int((requested_osds - len(lvm_devs)) / slot_divider)
ret.extend(get_physical_fast_allocs(phys_devs,
type_,
fast_slots_per_device,
new_osds,
self.args))
return ret
class OSD(object):
'''
This class simply stores info about to-be-deployed OSDs and provides an
easy way to retrieve the necessary create arguments.
'''
VolSpec = namedtuple('VolSpec',
['path',
'rel_size',
'abs_size',
'slots',
'type_'])
def __init__(self,
data_path,
rel_size,
abs_size,
slots,
id_,
encryption,
symlink=None):
self.id_ = id_
self.data = self.VolSpec(path=data_path,
rel_size=rel_size,
abs_size=abs_size,
slots=slots,
type_='data')
self.fast = None
self.very_fast = None
self.encryption = encryption
self.symlink = symlink
def add_fast_device(self, path, rel_size, abs_size, slots, type_):
self.fast = self.VolSpec(path=path,
rel_size=rel_size,
abs_size=abs_size,
slots=slots,
type_=type_)
def add_very_fast_device(self, path, rel_size, abs_size, slots):
self.very_fast = self.VolSpec(path=path,
rel_size=rel_size,
abs_size=abs_size,
slots=slots,
type_='block_wal')
def _get_osd_plan(self):
plan = {
'data': self.data.path,
'data_size': self.data.abs_size,
'encryption': self.encryption,
}
if self.fast:
type_ = self.fast.type_.replace('.', '_')
plan.update(
{
type_: self.fast.path,
'{}_size'.format(type_): self.fast.abs_size,
})
if self.very_fast:
plan.update(
{
'block_wal': self.very_fast.path,
'block_wal_size': self.very_fast.abs_size,
})
if self.id_:
plan.update({'osd_id': self.id_})
return plan
def get_args(self, defaults):
my_defaults = defaults.copy()
my_defaults.update(self._get_osd_plan())
return my_defaults
def report(self):
report = ''
if self.id_:
report += templates.osd_reused_id.format(
id_=self.id_)
if self.encryption:
report += templates.osd_encryption.format(
enc=self.encryption)
path = self.data.path
if self.symlink:
path = f'{self.symlink} -> {self.data.path}'
report += templates.osd_component.format(
_type=self.data.type_,
path=path,
size=self.data.abs_size,
percent=self.data.rel_size)
if self.fast:
report += templates.osd_component.format(
_type=self.fast.type_,
path=self.fast.path,
size=self.fast.abs_size,
percent=self.fast.rel_size)
if self.very_fast:
report += templates.osd_component.format(
_type=self.very_fast.type_,
path=self.very_fast.path,
size=self.very_fast.abs_size,
percent=self.very_fast.rel_size)
return report
def report_json(self):
# cast all values to string so that the report can be dumped in to
# json.dumps
return {k: str(v) for k, v in self._get_osd_plan().items()}
| 23,228 | 35.754747 | 115 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/lvm/common.py
|
from ceph_volume.util import arg_validators, disk
from ceph_volume import process, conf
from ceph_volume import terminal
from ceph_volume.devices.lvm.zap import Zap
import argparse
def rollback_osd(args, osd_id=None):
"""
When the process of creating or preparing fails, the OSD needs to be
destroyed so that the ID can be reused. This prevents from leaving the ID
around as "used" on the monitor, which can cause confusion if expecting
sequential OSD IDs.
The usage of `destroy-new` allows this to be done without requiring the
admin keyring (otherwise needed for destroy and purge commands)
"""
if not osd_id:
# it means that it wasn't generated, so there is nothing to rollback here
return
# once here, this is an error condition that needs to be rolled back
terminal.error('Was unable to complete a new OSD, will rollback changes')
osd_name = 'osd.%s'
bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster
cmd = [
'ceph',
'--cluster', conf.cluster,
'--name', 'client.bootstrap-osd',
'--keyring', bootstrap_keyring,
'osd', 'purge-new', osd_name % osd_id,
'--yes-i-really-mean-it',
]
process.run(cmd)
Zap(['--destroy', '--osd-id', osd_id]).main()
common_args = {
'--data': {
'help': 'OSD data path. A physical device or logical volume',
'required': True,
'type': arg_validators.ValidDataDevice(as_string=True),
#'default':,
#'type':,
},
'--data-size': {
'help': 'Size of data LV in case a device was passed in --data',
'default': '0',
'type': disk.Size.parse
},
'--data-slots': {
'help': ('Intended number of slots on data device. The new OSD gets one'
'of those slots or 1/nth of the available capacity'),
'type': int,
'default': 1,
},
'--osd-id': {
'help': 'Reuse an existing OSD id',
'default': None,
'type': arg_validators.valid_osd_id,
},
'--osd-fsid': {
'help': 'Reuse an existing OSD fsid',
'default': None,
},
'--cluster-fsid': {
'help': 'Specify the cluster fsid, useful when no ceph.conf is available',
'default': None,
},
'--crush-device-class': {
'dest': 'crush_device_class',
'help': 'Crush device class to assign this OSD to',
'default': "",
},
'--dmcrypt': {
'action': 'store_true',
'help': 'Enable device encryption via dm-crypt',
},
'--no-systemd': {
'dest': 'no_systemd',
'action': 'store_true',
'help': 'Skip creating and enabling systemd units and starting OSD services when activating',
},
}
bluestore_args = {
'--bluestore': {
'action': 'store_true',
'help': 'Use the bluestore objectstore',
},
'--block.db': {
'dest': 'block_db',
'help': 'Path to bluestore block.db logical volume or device',
'type': arg_validators.ValidDevice(as_string=True),
},
'--block.db-size': {
'dest': 'block_db_size',
'help': 'Size of block.db LV in case device was passed in --block.db',
'default': '0',
'type': disk.Size.parse
},
'--block.db-slots': {
'dest': 'block_db_slots',
'help': ('Intended number of slots on db device. The new OSD gets one'
'of those slots or 1/nth of the available capacity'),
'type': int,
'default': 1,
},
'--block.wal': {
'dest': 'block_wal',
'help': 'Path to bluestore block.wal logical volume or device',
'type': arg_validators.ValidDevice(as_string=True),
},
'--block.wal-size': {
'dest': 'block_wal_size',
'help': 'Size of block.wal LV in case device was passed in --block.wal',
'default': '0',
'type': disk.Size.parse
},
'--block.wal-slots': {
'dest': 'block_wal_slots',
'help': ('Intended number of slots on wal device. The new OSD gets one'
'of those slots or 1/nth of the available capacity'),
'type': int,
'default': 1,
},
}
def get_default_args():
defaults = {}
def format_name(name):
return name.strip('-').replace('-', '_').replace('.', '_')
for argset in (common_args, bluestore_args):
defaults.update({format_name(name): val.get('default', None) for name, val in argset.items()})
return defaults
def common_parser(prog, description):
"""
Both prepare and create share the same parser, those are defined here to
avoid duplication
"""
parser = argparse.ArgumentParser(
prog=prog,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description,
)
bluestore_group = parser.add_argument_group('bluestore')
for name, kwargs in common_args.items():
parser.add_argument(name, **kwargs)
for name, kwargs in bluestore_args.items():
bluestore_group.add_argument(name, **kwargs)
# Do not parse args, so that consumers can do something before the args get
# parsed triggering argparse behavior
return parser
create_parser = common_parser # noqa
prepare_parser = common_parser # noqa
| 5,294 | 31.090909 | 102 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/lvm/create.py
|
from __future__ import print_function
from textwrap import dedent
import logging
from ceph_volume.util import system
from ceph_volume.util.arg_validators import exclude_group_options
from ceph_volume import decorators, terminal
from .common import create_parser, rollback_osd
from .prepare import Prepare
from .activate import Activate
logger = logging.getLogger(__name__)
class Create(object):
help = 'Create a new OSD from an LVM device'
def __init__(self, argv):
self.argv = argv
@decorators.needs_root
def create(self, args):
if not args.osd_fsid:
args.osd_fsid = system.generate_uuid()
prepare_step = Prepare([])
prepare_step.safe_prepare(args)
osd_id = prepare_step.osd_id
try:
# we try this for activate only when 'creating' an OSD, because a rollback should not
# happen when doing normal activation. For example when starting an OSD, systemd will call
# activate, which would never need to be rolled back.
Activate([]).activate(args)
except Exception:
logger.exception('lvm activate was unable to complete, while creating the OSD')
logger.info('will rollback OSD ID creation')
rollback_osd(args, osd_id)
raise
terminal.success("ceph-volume lvm create successful for: %s" % args.data)
def main(self):
sub_command_help = dedent("""
Create an OSD by assigning an ID and FSID, registering them with the
cluster with an ID and FSID, formatting and mounting the volume, adding
all the metadata to the logical volumes using LVM tags, and starting
the OSD daemon. This is a convenience command that combines the prepare
and activate steps.
Encryption is supported via dmcrypt and the --dmcrypt flag.
Existing logical volume (lv):
ceph-volume lvm create --data {vg/lv}
Existing block device (a logical volume will be created):
ceph-volume lvm create --data /path/to/device
Optionally, can consume db and wal block devices, partitions or logical
volumes. A device will get a logical volume, partitions and existing
logical volumes will be used as is:
ceph-volume lvm create --data {vg/lv} --block.wal {partition} --block.db {/path/to/device}
""")
parser = create_parser(
prog='ceph-volume lvm create',
description=sub_command_help,
)
if len(self.argv) == 0:
print(sub_command_help)
return
exclude_group_options(parser, groups=['bluestore'], argv=self.argv)
args = parser.parse_args(self.argv)
# Default to bluestore here since defaulting it in add_argument may
# cause both to be True
if not args.bluestore:
args.bluestore = True
self.create(args)
| 2,925 | 36.512821 | 102 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/lvm/deactivate.py
|
import argparse
import logging
import sys
from textwrap import dedent
from ceph_volume import conf
from ceph_volume.util import encryption, system
from ceph_volume.api.lvm import get_lvs_by_tag
logger = logging.getLogger(__name__)
def deactivate_osd(osd_id=None, osd_uuid=None):
lvs = []
if osd_uuid is not None:
lvs = get_lvs_by_tag('ceph.osd_fsid={}'.format(osd_uuid))
osd_id = next(lv.tags['ceph.osd_id'] for lv in lvs)
else:
lvs = get_lvs_by_tag('ceph.osd_id={}'.format(osd_id))
data_lv = next(lv for lv in lvs if lv.tags['ceph.type'] in ['data', 'block'])
conf.cluster = data_lv.tags['ceph.cluster_name']
logger.debug('Found cluster name {}'.format(conf.cluster))
tmpfs_path = '/var/lib/ceph/osd/{}-{}'.format(conf.cluster, osd_id)
system.unmount_tmpfs(tmpfs_path)
for lv in lvs:
if lv.tags.get('ceph.encrypted', '0') == '1':
encryption.dmcrypt_close(lv.lv_uuid)
class Deactivate(object):
help = 'Deactivate OSDs'
def deactivate(self, args=None):
if args:
self.args = args
try:
deactivate_osd(self.args.osd_id, self.args.osd_uuid)
except StopIteration:
logger.error(('No data or block LV found for OSD'
'{}').format(self.args.osd_id))
sys.exit(1)
def __init__(self, argv):
self.argv = argv
def main(self):
sub_command_help = dedent("""
Deactivate unmounts and OSDs tmpfs and closes any crypt devices.
ceph-volume lvm deactivate {ID} {FSID}
""")
parser = argparse.ArgumentParser(
prog='ceph-volume lvm deactivate',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=sub_command_help,
)
parser.add_argument(
'osd_id',
nargs='?',
help='The ID of the OSD'
)
parser.add_argument(
'osd_uuid',
nargs='?',
help='The UUID of the OSD, similar to a SHA1, takes precedence over osd_id'
)
# parser.add_argument(
# '--all',
# action='store_true',
# help='Deactivate all OSD volumes found in the system',
# )
if len(self.argv) == 0:
print(sub_command_help)
return
args = parser.parse_args(self.argv)
# Default to bluestore here since defaulting it in add_argument may
# cause both to be True
if not args.osd_id and not args.osd_uuid:
raise ValueError(('Can not identify OSD, pass either all or'
'osd_id or osd_uuid'))
self.deactivate(args)
| 2,721 | 29.58427 | 87 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/lvm/listing.py
|
from __future__ import print_function
import argparse
import json
import logging
from textwrap import dedent
from ceph_volume import decorators
from ceph_volume.api import lvm as api
logger = logging.getLogger(__name__)
osd_list_header_template = """\n
{osd_id:=^20}"""
osd_device_header_template = """
{type: <13} {path}
"""
device_metadata_item_template = """
{tag_name: <25} {value}"""
def readable_tag(tag):
actual_name = tag.split('.')[-1]
return actual_name.replace('_', ' ')
def pretty_report(report):
output = []
for osd_id, devices in sorted(report.items()):
output.append(
osd_list_header_template.format(osd_id=" osd.%s " % osd_id)
)
for device in devices:
output.append(
osd_device_header_template.format(
type='[%s]' % device['type'],
path=device['path']
)
)
for tag_name, value in sorted(device.get('tags', {}).items()):
output.append(
device_metadata_item_template.format(
tag_name=readable_tag(tag_name),
value=value
)
)
if not device.get('devices'):
continue
else:
output.append(
device_metadata_item_template.format(
tag_name='devices',
value=','.join(device['devices'])
)
)
print(''.join(output))
def direct_report():
"""
Other non-cli consumers of listing information will want to consume the
report without the need to parse arguments or other flags. This helper
bypasses the need to deal with the class interface which is meant for cli
handling.
"""
return List([]).full_report()
# TODO: Perhaps, get rid of this class and simplify this module further?
class List(object):
help = 'list logical volumes and devices associated with Ceph'
def __init__(self, argv):
self.argv = argv
@decorators.needs_root
def list(self, args):
report = self.single_report(args.device) if args.device else \
self.full_report()
if args.format == 'json':
# If the report is empty, we don't return a non-zero exit status
# because it is assumed this is going to be consumed by automated
# systems like ceph-ansible which would be forced to ignore the
# non-zero exit status if all they need is the information in the
# JSON object
print(json.dumps(report, indent=4, sort_keys=True))
else:
if not report:
raise SystemExit('No valid Ceph lvm devices found')
pretty_report(report)
def create_report(self, lvs):
"""
Create a report for LVM dev(s) passed. Returns '{}' to denote failure.
"""
report = {}
pvs = api.get_pvs()
for lv in lvs:
if not api.is_ceph_device(lv):
continue
osd_id = lv.tags['ceph.osd_id']
report.setdefault(osd_id, [])
lv_report = lv.as_dict()
lv_report['devices'] = [pv.name for pv in pvs if pv.lv_uuid == lv.lv_uuid] if pvs else []
report[osd_id].append(lv_report)
phys_devs = self.create_report_non_lv_device(lv)
if phys_devs:
report[osd_id].append(phys_devs)
return report
def create_report_non_lv_device(self, lv):
report = {}
if lv.tags.get('ceph.type', '') in ['data', 'block']:
for dev_type in ['journal', 'wal', 'db']:
dev = lv.tags.get('ceph.{}_device'.format(dev_type), '')
# counting / in the device name seems brittle but should work,
# lvs will have 3
if dev and dev.count('/') == 2:
device_uuid = lv.tags.get('ceph.{}_uuid'.format(dev_type))
report = {'tags': {'PARTUUID': device_uuid},
'type': dev_type,
'path': dev}
return report
def full_report(self):
"""
Create a report of all Ceph LVs. Returns '{}' to denote failure.
"""
return self.create_report(api.get_lvs())
def single_report(self, arg):
"""
Generate a report for a single device. This can be either a logical
volume in the form of vg/lv, a device with an absolute path like
/dev/sda1 or /dev/sda, or a list of devices under same OSD ID.
Return value '{}' denotes failure.
"""
if isinstance(arg, int) or arg.isdigit():
lv = api.get_lvs_from_osd_id(arg)
elif arg[0] == '/':
lv = api.get_lvs_from_path(arg)
else:
lv = [api.get_single_lv(filters={'lv_name': arg.split('/')[1]})]
report = self.create_report(lv)
if not report:
# check if device is a non-lvm journals or wal/db
for dev_type in ['journal', 'wal', 'db']:
lvs = api.get_lvs(tags={
'ceph.{}_device'.format(dev_type): arg})
if lvs:
# just taking the first lv here should work
lv = lvs[0]
phys_dev = self.create_report_non_lv_device(lv)
osd_id = lv.tags.get('ceph.osd_id')
if osd_id:
report[osd_id] = [phys_dev]
return report
def main(self):
sub_command_help = dedent("""
List devices or logical volumes associated with Ceph. An association is
determined if a device has information relating to an OSD. This is
verified by querying LVM's metadata and correlating it with devices.
The lvs associated with the OSD need to have been prepared previously,
so that all needed tags and metadata exist.
Full listing of all system devices associated with a cluster::
ceph-volume lvm list
List devices under same OSD ID::
ceph-volume lvm list <OSD-ID>
List a particular device, reporting all metadata about it::
ceph-volume lvm list /dev/sda1
List a logical volume, along with all its metadata (vg is a volume
group, and lv the logical volume name)::
ceph-volume lvm list {vg/lv}
""")
parser = argparse.ArgumentParser(
prog='ceph-volume lvm list',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=sub_command_help,
)
parser.add_argument(
'device',
metavar='DEVICE',
nargs='?',
help='Path to an lv (as vg/lv) or to a device like /dev/sda1'
)
parser.add_argument(
'--format',
help='output format, defaults to "pretty"',
default='pretty',
choices=['json', 'pretty'],
)
args = parser.parse_args(self.argv)
self.list(args)
| 7,185 | 31.080357 | 101 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/lvm/main.py
|
import argparse
from textwrap import dedent
from ceph_volume import terminal
from . import activate
from . import deactivate
from . import prepare
from . import create
from . import trigger
from . import listing
from . import zap
from . import batch
from . import migrate
class LVM(object):
help = 'Use LVM and LVM-based technologies to deploy OSDs'
_help = dedent("""
Use LVM and LVM-based technologies to deploy OSDs
{sub_help}
""")
mapper = {
'activate': activate.Activate,
'deactivate': deactivate.Deactivate,
'batch': batch.Batch,
'prepare': prepare.Prepare,
'create': create.Create,
'trigger': trigger.Trigger,
'list': listing.List,
'zap': zap.Zap,
'migrate': migrate.Migrate,
'new-wal': migrate.NewWAL,
'new-db': migrate.NewDB,
}
def __init__(self, argv):
self.argv = argv
def print_help(self, sub_help):
return self._help.format(sub_help=sub_help)
def main(self):
terminal.dispatch(self.mapper, self.argv)
parser = argparse.ArgumentParser(
prog='ceph-volume lvm',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.print_help(terminal.subhelp(self.mapper)),
)
parser.parse_args(self.argv)
if len(self.argv) <= 1:
return parser.print_help()
| 1,409 | 24.636364 | 71 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/lvm/migrate.py
|
from __future__ import print_function
import argparse
import logging
import os
from textwrap import dedent
from ceph_volume.util import system, disk, merge_dict
from ceph_volume.util.device import Device
from ceph_volume.util.arg_validators import valid_osd_id
from ceph_volume import decorators, terminal, process
from ceph_volume.api import lvm as api
from ceph_volume.systemd import systemctl
logger = logging.getLogger(__name__)
mlogger = terminal.MultiLogger(__name__)
def get_cluster_name(osd_id, osd_fsid):
"""
From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the
system that match those tag values, then return cluster_name for the first
one.
"""
lv_tags = {}
lv_tags['ceph.osd_id'] = osd_id
lv_tags['ceph.osd_fsid'] = osd_fsid
lvs = api.get_lvs(tags=lv_tags)
if not lvs:
mlogger.error(
'Unable to find any LV for source OSD: id:{} fsid:{}'.format(
osd_id, osd_fsid) )
raise SystemExit('Unexpected error, terminating')
return next(iter(lvs)).tags["ceph.cluster_name"]
def get_osd_path(osd_id, osd_fsid):
return '/var/lib/ceph/osd/{}-{}'.format(
get_cluster_name(osd_id, osd_fsid), osd_id)
def find_associated_devices(osd_id, osd_fsid):
"""
From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the
system that match those tag values, further detect if any partitions are
part of the OSD, and then return the set of LVs and partitions (if any).
"""
lv_tags = {}
lv_tags['ceph.osd_id'] = osd_id
lv_tags['ceph.osd_fsid'] = osd_fsid
lvs = api.get_lvs(tags=lv_tags)
if not lvs:
mlogger.error(
'Unable to find any LV for source OSD: id:{} fsid:{}'.format(
osd_id, osd_fsid) )
raise SystemExit('Unexpected error, terminating')
devices = set(ensure_associated_lvs(lvs, lv_tags))
return [(Device(path), type) for path, type in devices if path]
def ensure_associated_lvs(lvs, lv_tags):
"""
Go through each LV and ensure if backing devices (journal, wal, block)
are LVs or partitions, so that they can be accurately reported.
"""
# look for many LVs for each backing type, because it is possible to
# receive a filtering for osd.1, and have multiple failed deployments
# leaving many journals with osd.1 - usually, only a single LV will be
# returned
block_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'block'}))
db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'}))
wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'}))
backing_devices = [(block_lvs, 'block'), (db_lvs, 'db'),
(wal_lvs, 'wal')]
verified_devices = []
for lv in lvs:
# go through each lv and append it, otherwise query `blkid` to find
# a physical device. Do this for each type (journal,db,wal) regardless
# if they have been processed in the previous LV, so that bad devices
# with the same ID can be caught
for ceph_lvs, type in backing_devices:
if ceph_lvs:
verified_devices.extend([(l.lv_path, type) for l in ceph_lvs])
continue
# must be a disk partition, by querying blkid by the uuid we are
# ensuring that the device path is always correct
try:
device_uuid = lv.tags['ceph.{}_uuid'.format(type)]
except KeyError:
# Bluestore will not have ceph.journal_uuid, and Filestore
# will not not have ceph.db_uuid
continue
osd_device = disk.get_device_from_partuuid(device_uuid)
if not osd_device:
# if the osd_device is not found by the partuuid, then it is
# not possible to ensure this device exists anymore, so skip it
continue
verified_devices.append((osd_device, type))
return verified_devices
class VolumeTagTracker(object):
def __init__(self, devices, target_lv):
self.target_lv = target_lv
self.data_device = self.db_device = self.wal_device = None
for device, type in devices:
if type == 'block':
self.data_device = device
elif type == 'db':
self.db_device = device
elif type == 'wal':
self.wal_device = device
if not self.data_device:
mlogger.error('Data device not found')
raise SystemExit(
"Unexpected error, terminating")
if not self.data_device.is_lv:
mlogger.error('Data device isn\'t LVM')
raise SystemExit(
"Unexpected error, terminating")
self.old_target_tags = self.target_lv.tags.copy()
self.old_data_tags = (
self.data_device.lv_api.tags.copy()
if self.data_device.is_lv else None)
self.old_db_tags = (
self.db_device.lv_api.tags.copy()
if self.db_device and self.db_device.is_lv else None)
self.old_wal_tags = (
self.wal_device.lv_api.tags.copy()
if self.wal_device and self.wal_device.is_lv else None)
def update_tags_when_lv_create(self, create_type):
tags = {}
if not self.data_device.is_lv:
mlogger.warning(
'Data device is not LVM, wouldn\'t update LVM tags')
else:
tags["ceph.{}_uuid".format(create_type)] = self.target_lv.lv_uuid
tags["ceph.{}_device".format(create_type)] = self.target_lv.lv_path
self.data_device.lv_api.set_tags(tags)
tags = self.data_device.lv_api.tags.copy()
tags["ceph.type"] = create_type
self.target_lv.set_tags(tags)
aux_dev = None
if create_type == "db" and self.wal_device:
aux_dev = self.wal_device
elif create_type == "wal" and self.db_device:
aux_dev = self.db_device
else:
return
if not aux_dev.is_lv:
mlogger.warning(
'{} device is not LVM, wouldn\'t update LVM tags'.format(
create_type.upper()))
else:
tags = {}
tags["ceph.{}_uuid".format(create_type)] = self.target_lv.lv_uuid
tags["ceph.{}_device".format(create_type)] = self.target_lv.lv_path
aux_dev.lv_api.set_tags(tags)
def remove_lvs(self, source_devices, target_type):
remaining_devices = [self.data_device, self.db_device, self.wal_device]
outdated_tags = []
for device, type in source_devices:
if type == "block" or type == target_type:
continue
remaining_devices.remove(device)
if device.is_lv:
outdated_tags.append("ceph.{}_uuid".format(type))
outdated_tags.append("ceph.{}_device".format(type))
device.lv_api.clear_tags()
if len(outdated_tags) > 0:
for d in remaining_devices:
if d and d.is_lv:
d.lv_api.clear_tags(outdated_tags)
def replace_lvs(self, source_devices, target_type):
remaining_devices = [self.data_device]
if self.db_device:
remaining_devices.append(self.db_device)
if self.wal_device:
remaining_devices.append(self.wal_device)
outdated_tags = []
for device, type in source_devices:
if type == "block":
continue
remaining_devices.remove(device)
if device.is_lv:
outdated_tags.append("ceph.{}_uuid".format(type))
outdated_tags.append("ceph.{}_device".format(type))
device.lv_api.clear_tags()
new_tags = {}
new_tags["ceph.{}_uuid".format(target_type)] = self.target_lv.lv_uuid
new_tags["ceph.{}_device".format(target_type)] = self.target_lv.lv_path
for d in remaining_devices:
if d and d.is_lv:
if len(outdated_tags) > 0:
d.lv_api.clear_tags(outdated_tags)
d.lv_api.set_tags(new_tags)
if not self.data_device.is_lv:
mlogger.warning(
'Data device is not LVM, wouldn\'t properly update target LVM tags')
else:
tags = self.data_device.lv_api.tags.copy()
tags["ceph.type"] = target_type
tags["ceph.{}_uuid".format(target_type)] = self.target_lv.lv_uuid
tags["ceph.{}_device".format(target_type)] = self.target_lv.lv_path
self.target_lv.set_tags(tags)
def undo(self):
mlogger.info(
'Undoing lv tag set')
if self.data_device:
if self.old_data_tags:
self.data_device.lv_api.set_tags(self.old_data_tags)
else:
self.data_device.lv_api.clear_tags()
if self.db_device:
if self.old_db_tags:
self.db_device.lv_api.set_tags(self.old_db_tags)
else:
self.db_device.lv_api.clear_tags()
if self.wal_device:
if self.old_wal_tags:
self.wal_device.lv_api.set_tags(self.old_wal_tags)
else:
self.wal_device.lv_api.clear_tags()
if self.old_target_tags:
self.target_lv.set_tags(self.old_target_tags)
else:
self.target_lv.clear_tags()
class Migrate(object):
help = 'Migrate BlueFS data from to another LVM device'
def __init__(self, argv):
self.argv = argv
self.osd_id = None
def get_source_devices(self, devices, target_type=""):
ret = []
for device, type in devices:
if type == target_type:
continue
if type == 'block':
if 'data' not in self.args.from_:
continue;
elif type == 'db':
if 'db' not in self.args.from_:
continue;
elif type == 'wal':
if 'wal' not in self.args.from_:
continue;
ret.append([device, type])
if ret == []:
mlogger.error('Source device list is empty')
raise SystemExit(
'Unable to migrate to : {}'.format(self.args.target))
return ret
# ceph-bluestore-tool uses the following replacement rules
# (in the order of precedence, stop on the first match)
# if source list has DB volume - target device replaces it.
# if source list has WAL volume - target device replace it.
# if source list has slow volume only - operation isn't permitted,
# requires explicit allocation via new-db/new-wal command.detects which
def get_target_type_by_source(self, devices):
ret = None
for device, type in devices:
if type == 'db':
return 'db'
elif type == 'wal':
ret = 'wal'
return ret
def get_filename_by_type(self, type):
filename = 'block'
if type == 'db' or type == 'wal':
filename += '.' + type
return filename
def get_source_args(self, osd_path, devices):
ret = []
for device, type in devices:
ret = ret + ["--devs-source", os.path.join(
osd_path, self.get_filename_by_type(type))]
return ret
@decorators.needs_root
def migrate_to_new(self, osd_id, osd_fsid, devices, target_lv):
source_devices = self.get_source_devices(devices)
target_type = self.get_target_type_by_source(source_devices)
if not target_type:
mlogger.error(
"Unable to determine new volume type,"
" please use new-db or new-wal command before.")
raise SystemExit(
"Unable to migrate to : {}".format(self.args.target))
target_path = target_lv.lv_path
try:
tag_tracker = VolumeTagTracker(devices, target_lv)
# we need to update lvm tags for all the remaining volumes
# and clear for ones which to be removed
# ceph-bluestore-tool removes source volume(s) other than block one
# and attaches target one after successful migration
tag_tracker.replace_lvs(source_devices, target_type)
osd_path = get_osd_path(osd_id, osd_fsid)
source_args = self.get_source_args(osd_path, source_devices)
mlogger.info("Migrate to new, Source: {} Target: {}".format(
source_args, target_path))
stdout, stderr, exit_code = process.call([
'ceph-bluestore-tool',
'--path',
osd_path,
'--dev-target',
target_path,
'--command',
'bluefs-bdev-migrate'] +
source_args)
if exit_code != 0:
mlogger.error(
'Failed to migrate device, error code:{}'.format(exit_code))
raise SystemExit(
'Failed to migrate to : {}'.format(self.args.target))
else:
system.chown(os.path.join(osd_path, "block.{}".format(
target_type)))
terminal.success('Migration successful.')
except:
tag_tracker.undo()
raise
return
@decorators.needs_root
def migrate_to_existing(self, osd_id, osd_fsid, devices, target_lv):
target_type = target_lv.tags["ceph.type"]
if target_type == "wal":
mlogger.error("Migrate to WAL is not supported")
raise SystemExit(
"Unable to migrate to : {}".format(self.args.target))
target_filename = self.get_filename_by_type(target_type)
if (target_filename == ""):
mlogger.error(
"Target Logical Volume doesn't have proper volume type "
"(ceph.type LVM tag): {}".format(target_type))
raise SystemExit(
"Unable to migrate to : {}".format(self.args.target))
osd_path = get_osd_path(osd_id, osd_fsid)
source_devices = self.get_source_devices(devices, target_type)
target_path = os.path.join(osd_path, target_filename)
tag_tracker = VolumeTagTracker(devices, target_lv)
try:
# ceph-bluestore-tool removes source volume(s) other than
# block and target ones after successful migration
tag_tracker.remove_lvs(source_devices, target_type)
source_args = self.get_source_args(osd_path, source_devices)
mlogger.info("Migrate to existing, Source: {} Target: {}".format(
source_args, target_path))
stdout, stderr, exit_code = process.call([
'ceph-bluestore-tool',
'--path',
osd_path,
'--dev-target',
target_path,
'--command',
'bluefs-bdev-migrate'] +
source_args)
if exit_code != 0:
mlogger.error(
'Failed to migrate device, error code:{}'.format(exit_code))
raise SystemExit(
'Failed to migrate to : {}'.format(self.args.target))
else:
terminal.success('Migration successful.')
except:
tag_tracker.undo()
raise
return
@decorators.needs_root
def migrate_osd(self):
if self.args.osd_id and not self.args.no_systemd:
osd_is_running = systemctl.osd_is_active(self.args.osd_id)
if osd_is_running:
mlogger.error('OSD is running, stop it with: '
'systemctl stop ceph-osd@{}'.format(
self.args.osd_id))
raise SystemExit(
'Unable to migrate devices associated with OSD ID: {}'
.format(self.args.osd_id))
target_lv = api.get_lv_by_fullname(self.args.target)
if not target_lv:
mlogger.error(
'Target path "{}" is not a Logical Volume'.format(
self.args.target))
raise SystemExit(
'Unable to migrate to : {}'.format(self.args.target))
devices = find_associated_devices(self.args.osd_id, self.args.osd_fsid)
if (not target_lv.used_by_ceph):
self.migrate_to_new(self.args.osd_id, self.args.osd_fsid,
devices,
target_lv)
else:
if (target_lv.tags['ceph.osd_id'] != self.args.osd_id or
target_lv.tags['ceph.osd_fsid'] != self.args.osd_fsid):
mlogger.error(
'Target Logical Volume isn\'t used by the specified OSD: '
'{} FSID: {}'.format(self.args.osd_id,
self.args.osd_fsid))
raise SystemExit(
'Unable to migrate to : {}'.format(self.args.target))
self.migrate_to_existing(self.args.osd_id, self.args.osd_fsid,
devices,
target_lv)
def make_parser(self, prog, sub_command_help):
parser = argparse.ArgumentParser(
prog=prog,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=sub_command_help,
)
parser.add_argument(
'--osd-id',
required=True,
help='Specify an OSD ID to detect associated devices for zapping',
type=valid_osd_id
)
parser.add_argument(
'--osd-fsid',
required=True,
help='Specify an OSD FSID to detect associated devices for zapping',
)
parser.add_argument(
'--target',
required=True,
help='Specify target Logical Volume (LV) to migrate data to',
)
parser.add_argument(
'--from',
nargs='*',
dest='from_',
required=True,
choices=['data', 'db', 'wal'],
help='Copy BlueFS data from DB device',
)
parser.add_argument(
'--no-systemd',
dest='no_systemd',
action='store_true',
help='Skip checking OSD systemd unit',
)
return parser
def main(self):
sub_command_help = dedent("""
Moves BlueFS data from source volume(s) to the target one, source
volumes (except the main (i.e. data or block) one) are removed on
success. LVM volumes are permitted for Target only, both already
attached or new logical one. In the latter case it is attached to OSD
replacing one of the source devices. Following replacement rules apply
(in the order of precedence, stop on the first match):
* if source list has DB volume - target device replaces it.
* if source list has WAL volume - target device replace it.
* if source list has slow volume only - operation is not permitted,
requires explicit allocation via new-db/new-wal command.
Example calls for supported scenarios:
Moves BlueFS data from main device to LV already attached as DB:
ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/db
Moves BlueFS data from shared main device to LV which will be attached
as a new DB:
ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/new_db
Moves BlueFS data from DB device to new LV, DB is replaced:
ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db --target vgname/new_db
Moves BlueFS data from main and DB devices to new LV, DB is replaced:
ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db --target vgname/new_db
Moves BlueFS data from main, DB and WAL devices to new LV, WAL is
removed and DB is replaced:
ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db wal --target vgname/new_db
Moves BlueFS data from main, DB and WAL devices to main device, WAL
and DB are removed:
ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db wal --target vgname/data
""")
parser = self.make_parser('ceph-volume lvm migrate', sub_command_help)
if len(self.argv) == 0:
print(sub_command_help)
return
self.args = parser.parse_args(self.argv)
self.migrate_osd()
class NewVolume(object):
def __init__(self, create_type, argv):
self.create_type = create_type
self.argv = argv
def make_parser(self, prog, sub_command_help):
parser = argparse.ArgumentParser(
prog=prog,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=sub_command_help,
)
parser.add_argument(
'--osd-id',
required=True,
help='Specify an OSD ID to attach new volume to',
type=valid_osd_id,
)
parser.add_argument(
'--osd-fsid',
required=True,
help='Specify an OSD FSIDto attach new volume to',
)
parser.add_argument(
'--target',
required=True,
help='Specify target Logical Volume (LV) to attach',
)
parser.add_argument(
'--no-systemd',
dest='no_systemd',
action='store_true',
help='Skip checking OSD systemd unit',
)
return parser
@decorators.needs_root
def make_new_volume(self, osd_id, osd_fsid, devices, target_lv):
osd_path = get_osd_path(osd_id, osd_fsid)
mlogger.info(
'Making new volume at {} for OSD: {} ({})'.format(
target_lv.lv_path, osd_id, osd_path))
tag_tracker = VolumeTagTracker(devices, target_lv)
try:
tag_tracker.update_tags_when_lv_create(self.create_type)
stdout, stderr, exit_code = process.call([
'ceph-bluestore-tool',
'--path',
osd_path,
'--dev-target',
target_lv.lv_path,
'--command',
'bluefs-bdev-new-{}'.format(self.create_type)
])
if exit_code != 0:
mlogger.error(
'failed to attach new volume, error code:{}'.format(
exit_code))
raise SystemExit(
"Failed to attach new volume: {}".format(
self.args.target))
else:
system.chown(os.path.join(osd_path, "block.{}".format(
self.create_type)))
terminal.success('New volume attached.')
except:
tag_tracker.undo()
raise
return
@decorators.needs_root
def new_volume(self):
if self.args.osd_id and not self.args.no_systemd:
osd_is_running = systemctl.osd_is_active(self.args.osd_id)
if osd_is_running:
mlogger.error('OSD ID is running, stop it with:'
' systemctl stop ceph-osd@{}'.format(self.args.osd_id))
raise SystemExit(
'Unable to attach new volume for OSD: {}'.format(
self.args.osd_id))
target_lv = api.get_lv_by_fullname(self.args.target)
if not target_lv:
mlogger.error(
'Target path {} is not a Logical Volume'.format(
self.args.target))
raise SystemExit(
'Unable to attach new volume : {}'.format(self.args.target))
if target_lv.used_by_ceph:
mlogger.error(
'Target Logical Volume is already used by ceph: {}'.format(
self.args.target))
raise SystemExit(
'Unable to attach new volume : {}'.format(self.args.target))
else:
devices = find_associated_devices(self.args.osd_id,
self.args.osd_fsid)
self.make_new_volume(
self.args.osd_id,
self.args.osd_fsid,
devices,
target_lv)
class NewWAL(NewVolume):
help = 'Allocate new WAL volume for OSD at specified Logical Volume'
def __init__(self, argv):
super(NewWAL, self).__init__("wal", argv)
def main(self):
sub_command_help = dedent("""
Attaches the given logical volume to the given OSD as a WAL volume.
Logical volume format is vg/lv. Fails if OSD has already got attached DB.
Example:
Attach vgname/lvname as a WAL volume to OSD 1
ceph-volume lvm new-wal --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --target vgname/new_wal
""")
parser = self.make_parser('ceph-volume lvm new-wal', sub_command_help)
if len(self.argv) == 0:
print(sub_command_help)
return
self.args = parser.parse_args(self.argv)
self.new_volume()
class NewDB(NewVolume):
help = 'Allocate new DB volume for OSD at specified Logical Volume'
def __init__(self, argv):
super(NewDB, self).__init__("db", argv)
def main(self):
sub_command_help = dedent("""
Attaches the given logical volume to the given OSD as a DB volume.
Logical volume format is vg/lv. Fails if OSD has already got attached DB.
Example:
Attach vgname/lvname as a DB volume to OSD 1
ceph-volume lvm new-db --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D --target vgname/new_db
""")
parser = self.make_parser('ceph-volume lvm new-db', sub_command_help)
if len(self.argv) == 0:
print(sub_command_help)
return
self.args = parser.parse_args(self.argv)
self.new_volume()
| 26,204 | 36.759366 | 120 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/lvm/prepare.py
|
from __future__ import print_function
import json
import logging
from textwrap import dedent
from ceph_volume.util import prepare as prepare_utils
from ceph_volume.util import encryption as encryption_utils
from ceph_volume.util import system, disk
from ceph_volume.util.arg_validators import exclude_group_options
from ceph_volume import conf, decorators, terminal
from ceph_volume.api import lvm as api
from .common import prepare_parser, rollback_osd
logger = logging.getLogger(__name__)
def prepare_dmcrypt(key, device, device_type, tags):
"""
Helper for devices that are encrypted. The operations needed for
block, db, wal devices are all the same
"""
if not device:
return ''
tag_name = 'ceph.%s_uuid' % device_type
uuid = tags[tag_name]
# format data device
encryption_utils.luks_format(
key,
device
)
encryption_utils.luks_open(
key,
device,
uuid
)
return '/dev/mapper/%s' % uuid
def prepare_bluestore(block, wal, db, secrets, tags, osd_id, fsid):
"""
:param block: The name of the logical volume for the bluestore data
:param wal: a regular/plain disk or logical volume, to be used for block.wal
:param db: a regular/plain disk or logical volume, to be used for block.db
:param secrets: A dict with the secrets needed to create the osd (e.g. cephx)
:param id_: The OSD id
:param fsid: The OSD fsid, also known as the OSD UUID
"""
cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key())
# encryption-only operations
if secrets.get('dmcrypt_key'):
# If encrypted, there is no need to create the lockbox keyring file because
# bluestore re-creates the files and does not have support for other files
# like the custom lockbox one. This will need to be done on activation.
# format and open ('decrypt' devices) and re-assign the device and journal
# variables so that the rest of the process can use the mapper paths
key = secrets['dmcrypt_key']
block = prepare_dmcrypt(key, block, 'block', tags)
wal = prepare_dmcrypt(key, wal, 'wal', tags)
db = prepare_dmcrypt(key, db, 'db', tags)
# create the directory
prepare_utils.create_osd_path(osd_id, tmpfs=True)
# symlink the block
prepare_utils.link_block(block, osd_id)
# get the latest monmap
prepare_utils.get_monmap(osd_id)
# write the OSD keyring if it doesn't exist already
prepare_utils.write_keyring(osd_id, cephx_secret)
# prepare the osd filesystem
prepare_utils.osd_mkfs_bluestore(
osd_id, fsid,
keyring=cephx_secret,
wal=wal,
db=db
)
class Prepare(object):
help = 'Format an LVM device and associate it with an OSD'
def __init__(self, argv):
self.argv = argv
self.osd_id = None
def get_ptuuid(self, argument):
uuid = disk.get_partuuid(argument)
if not uuid:
terminal.error('blkid could not detect a PARTUUID for device: %s' % argument)
raise RuntimeError('unable to use device')
return uuid
def setup_device(self, device_type, device_name, tags, size, slots):
"""
Check if ``device`` is an lv, if so, set the tags, making sure to
update the tags with the lv_uuid and lv_path which the incoming tags
will not have.
If the device is not a logical volume, then retrieve the partition UUID
by querying ``blkid``
"""
if device_name is None:
return '', '', tags
tags['ceph.type'] = device_type
tags['ceph.vdo'] = api.is_vdo(device_name)
try:
vg_name, lv_name = device_name.split('/')
lv = api.get_single_lv(filters={'lv_name': lv_name,
'vg_name': vg_name})
except ValueError:
lv = None
if lv:
lv_uuid = lv.lv_uuid
path = lv.lv_path
tags['ceph.%s_uuid' % device_type] = lv_uuid
tags['ceph.%s_device' % device_type] = path
lv.set_tags(tags)
elif disk.is_device(device_name):
# We got a disk, create an lv
lv_type = "osd-{}".format(device_type)
name_uuid = system.generate_uuid()
kwargs = {
'device': device_name,
'tags': tags,
'slots': slots
}
#TODO use get_block_db_size and co here to get configured size in
#conf file
if size != 0:
kwargs['size'] = size
lv = api.create_lv(
lv_type,
name_uuid,
**kwargs)
path = lv.lv_path
tags['ceph.{}_device'.format(device_type)] = path
tags['ceph.{}_uuid'.format(device_type)] = lv.lv_uuid
lv_uuid = lv.lv_uuid
lv.set_tags(tags)
else:
# otherwise assume this is a regular disk partition
name_uuid = self.get_ptuuid(device_name)
path = device_name
tags['ceph.%s_uuid' % device_type] = name_uuid
tags['ceph.%s_device' % device_type] = path
lv_uuid = name_uuid
return path, lv_uuid, tags
def prepare_data_device(self, device_type, osd_uuid):
"""
Check if ``arg`` is a device or partition to create an LV out of it
with a distinct volume group name, assigning LV tags on it and
ultimately, returning the logical volume object. Failing to detect
a device or partition will result in error.
:param arg: The value of ``--data`` when parsing args
:param device_type: Usually ``block``
:param osd_uuid: The OSD uuid
"""
device = self.args.data
if disk.is_partition(device) or disk.is_device(device):
# we must create a vg, and then a single lv
lv_name_prefix = "osd-{}".format(device_type)
kwargs = {'device': device,
'tags': {'ceph.type': device_type},
'slots': self.args.data_slots,
}
logger.debug('data device size: {}'.format(self.args.data_size))
if self.args.data_size != 0:
kwargs['size'] = self.args.data_size
return api.create_lv(
lv_name_prefix,
osd_uuid,
**kwargs)
else:
error = [
'Cannot use device ({}).'.format(device),
'A vg/lv path or an existing device is needed']
raise RuntimeError(' '.join(error))
raise RuntimeError('no data logical volume found with: {}'.format(device))
def safe_prepare(self, args=None):
"""
An intermediate step between `main()` and `prepare()` so that we can
capture the `self.osd_id` in case we need to rollback
:param args: Injected args, usually from `lvm create` which compounds
both `prepare` and `create`
"""
if args is not None:
self.args = args
try:
vgname, lvname = self.args.data.split('/')
lv = api.get_single_lv(filters={'lv_name': lvname,
'vg_name': vgname})
except ValueError:
lv = None
if api.is_ceph_device(lv):
logger.info("device {} is already used".format(self.args.data))
raise RuntimeError("skipping {}, it is already prepared".format(self.args.data))
try:
self.prepare()
except Exception:
logger.exception('lvm prepare was unable to complete')
logger.info('will rollback OSD ID creation')
rollback_osd(self.args, self.osd_id)
raise
terminal.success("ceph-volume lvm prepare successful for: %s" % self.args.data)
def get_cluster_fsid(self):
"""
Allows using --cluster-fsid as an argument, but can fallback to reading
from ceph.conf if that is unset (the default behavior).
"""
if self.args.cluster_fsid:
return self.args.cluster_fsid
else:
return conf.ceph.get('global', 'fsid')
@decorators.needs_root
def prepare(self):
# FIXME we don't allow re-using a keyring, we always generate one for the
# OSD, this needs to be fixed. This could either be a file (!) or a string
# (!!) or some flags that we would need to compound into a dict so that we
# can convert to JSON (!!!)
secrets = {'cephx_secret': prepare_utils.create_key()}
cephx_lockbox_secret = ''
encrypted = 1 if self.args.dmcrypt else 0
cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key()
if encrypted:
secrets['dmcrypt_key'] = encryption_utils.create_dmcrypt_key()
secrets['cephx_lockbox_secret'] = cephx_lockbox_secret
cluster_fsid = self.get_cluster_fsid()
osd_fsid = self.args.osd_fsid or system.generate_uuid()
crush_device_class = self.args.crush_device_class
if crush_device_class:
secrets['crush_device_class'] = crush_device_class
# reuse a given ID if it exists, otherwise create a new ID
self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets), osd_id=self.args.osd_id)
tags = {
'ceph.osd_fsid': osd_fsid,
'ceph.osd_id': self.osd_id,
'ceph.cluster_fsid': cluster_fsid,
'ceph.cluster_name': conf.cluster,
'ceph.crush_device_class': crush_device_class,
'ceph.osdspec_affinity': prepare_utils.get_osdspec_affinity()
}
if self.args.bluestore:
try:
vg_name, lv_name = self.args.data.split('/')
block_lv = api.get_single_lv(filters={'lv_name': lv_name,
'vg_name': vg_name})
except ValueError:
block_lv = None
if not block_lv:
block_lv = self.prepare_data_device('block', osd_fsid)
tags['ceph.block_device'] = block_lv.lv_path
tags['ceph.block_uuid'] = block_lv.lv_uuid
tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret
tags['ceph.encrypted'] = encrypted
tags['ceph.vdo'] = api.is_vdo(block_lv.lv_path)
wal_device, wal_uuid, tags = self.setup_device(
'wal',
self.args.block_wal,
tags,
self.args.block_wal_size,
self.args.block_wal_slots)
db_device, db_uuid, tags = self.setup_device(
'db',
self.args.block_db,
tags,
self.args.block_db_size,
self.args.block_db_slots)
tags['ceph.type'] = 'block'
block_lv.set_tags(tags)
prepare_bluestore(
block_lv.lv_path,
wal_device,
db_device,
secrets,
tags,
self.osd_id,
osd_fsid,
)
def main(self):
sub_command_help = dedent("""
Prepare an OSD by assigning an ID and FSID, registering them with the
cluster with an ID and FSID, formatting and mounting the volume, and
finally by adding all the metadata to the logical volumes using LVM
tags, so that it can later be discovered.
Once the OSD is ready, an ad-hoc systemd unit will be enabled so that
it can later get activated and the OSD daemon can get started.
Encryption is supported via dmcrypt and the --dmcrypt flag.
Existing logical volume (lv):
ceph-volume lvm prepare --data {vg/lv}
Existing block device (a logical volume will be created):
ceph-volume lvm prepare --data /path/to/device
Optionally, can consume db and wal devices, partitions or logical
volumes. A device will get a logical volume, partitions and existing
logical volumes will be used as is:
ceph-volume lvm prepare --data {vg/lv} --block.wal {partition} --block.db {/path/to/device}
""")
parser = prepare_parser(
prog='ceph-volume lvm prepare',
description=sub_command_help,
)
if len(self.argv) == 0:
print(sub_command_help)
return
exclude_group_options(parser, argv=self.argv, groups=['bluestore'])
self.args = parser.parse_args(self.argv)
# Default to bluestore here since defaulting it in add_argument may
# cause both to be True
if not self.args.bluestore:
self.args.bluestore = True
self.safe_prepare()
| 12,925 | 37.017647 | 103 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/lvm/trigger.py
|
from __future__ import print_function
import argparse
from textwrap import dedent
from ceph_volume.exceptions import SuffixParsingError
from ceph_volume import decorators
from .activate import Activate
def parse_osd_id(string):
osd_id = string.split('-', 1)[0]
if not osd_id:
raise SuffixParsingError('OSD id', string)
if osd_id.isdigit():
return osd_id
raise SuffixParsingError('OSD id', string)
def parse_osd_uuid(string):
osd_id = '%s-' % parse_osd_id(string)
# remove the id first
osd_uuid = string.split(osd_id, 1)[-1]
if not osd_uuid:
raise SuffixParsingError('OSD uuid', string)
return osd_uuid
class Trigger(object):
help = 'systemd helper to activate an OSD'
def __init__(self, argv):
self.argv = argv
@decorators.needs_root
def main(self):
sub_command_help = dedent("""
** DO NOT USE DIRECTLY **
This tool is meant to help the systemd unit that knows about OSDs.
Proxy OSD activation to ``ceph-volume lvm activate`` by parsing the
input from systemd, detecting the UUID and ID associated with an OSD::
ceph-volume lvm trigger {SYSTEMD-DATA}
The systemd "data" is expected to be in the format of::
{OSD ID}-{OSD UUID}
The lvs associated with the OSD need to have been prepared previously,
so that all needed tags and metadata exist.
""")
parser = argparse.ArgumentParser(
prog='ceph-volume lvm trigger',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=sub_command_help,
)
parser.add_argument(
'systemd_data',
metavar='SYSTEMD_DATA',
nargs='?',
help='Data from a systemd unit containing ID and UUID of the OSD, like asdf-lkjh-0'
)
if len(self.argv) == 0:
print(sub_command_help)
return
args = parser.parse_args(self.argv)
osd_id = parse_osd_id(args.systemd_data)
osd_uuid = parse_osd_uuid(args.systemd_data)
Activate(['--auto-detect-objectstore', osd_id, osd_uuid]).main()
| 2,174 | 29.633803 | 95 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/lvm/zap.py
|
import argparse
import os
import logging
import time
from textwrap import dedent
from ceph_volume import decorators, terminal, process
from ceph_volume.api import lvm as api
from ceph_volume.util import system, encryption, disk, arg_validators, str_to_int, merge_dict
from ceph_volume.util.device import Device
from ceph_volume.systemd import systemctl
logger = logging.getLogger(__name__)
mlogger = terminal.MultiLogger(__name__)
def wipefs(path):
"""
Removes the filesystem from an lv or partition.
Environment variables supported::
* ``CEPH_VOLUME_WIPEFS_TRIES``: Defaults to 8
* ``CEPH_VOLUME_WIPEFS_INTERVAL``: Defaults to 5
"""
tries = str_to_int(
os.environ.get('CEPH_VOLUME_WIPEFS_TRIES', 8)
)
interval = str_to_int(
os.environ.get('CEPH_VOLUME_WIPEFS_INTERVAL', 5)
)
for trying in range(tries):
stdout, stderr, exit_code = process.call([
'wipefs',
'--all',
path
])
if exit_code != 0:
# this could narrow the retry by poking in the stderr of the output
# to verify that 'probing initialization failed' appears, but
# better to be broad in this retry to prevent missing on
# a different message that needs to be retried as well
terminal.warning(
'failed to wipefs device, will try again to workaround probable race condition'
)
time.sleep(interval)
else:
return
raise RuntimeError("could not complete wipefs on device: %s" % path)
def zap_data(path):
"""
Clears all data from the given path. Path should be
an absolute path to an lv or partition.
10M of data is written to the path to make sure that
there is no trace left of any previous Filesystem.
"""
process.run([
'dd',
'if=/dev/zero',
'of={path}'.format(path=path),
'bs=1M',
'count=10',
'conv=fsync'
])
def find_associated_devices(osd_id=None, osd_fsid=None):
"""
From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the
system that match those tag values, further detect if any partitions are
part of the OSD, and then return the set of LVs and partitions (if any).
"""
lv_tags = {}
if osd_id:
lv_tags['ceph.osd_id'] = osd_id
if osd_fsid:
lv_tags['ceph.osd_fsid'] = osd_fsid
lvs = api.get_lvs(tags=lv_tags)
if not lvs:
raise RuntimeError('Unable to find any LV for zapping OSD: '
'%s' % osd_id or osd_fsid)
devices_to_zap = ensure_associated_lvs(lvs, lv_tags)
return [Device(path) for path in set(devices_to_zap) if path]
def ensure_associated_lvs(lvs, lv_tags={}):
"""
Go through each LV and ensure if backing devices (journal, wal, block)
are LVs or partitions, so that they can be accurately reported.
"""
# look for many LVs for each backing type, because it is possible to
# receive a filtering for osd.1, and have multiple failed deployments
# leaving many journals with osd.1 - usually, only a single LV will be
# returned
db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'}))
wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'}))
backing_devices = [(db_lvs, 'db'),
(wal_lvs, 'wal')]
verified_devices = []
for lv in lvs:
# go through each lv and append it, otherwise query `blkid` to find
# a physical device. Do this for each type (journal,db,wal) regardless
# if they have been processed in the previous LV, so that bad devices
# with the same ID can be caught
for ceph_lvs, _type in backing_devices:
if ceph_lvs:
verified_devices.extend([l.lv_path for l in ceph_lvs])
continue
# must be a disk partition, by querying blkid by the uuid we are
# ensuring that the device path is always correct
try:
device_uuid = lv.tags['ceph.%s_uuid' % _type]
except KeyError:
# Bluestore will not have ceph.journal_uuid, and Filestore
# will not not have ceph.db_uuid
continue
osd_device = disk.get_device_from_partuuid(device_uuid)
if not osd_device:
# if the osd_device is not found by the partuuid, then it is
# not possible to ensure this device exists anymore, so skip it
continue
verified_devices.append(osd_device)
verified_devices.append(lv.lv_path)
# reduce the list from all the duplicates that were added
return list(set(verified_devices))
class Zap(object):
help = 'Removes all data and filesystems from a logical volume or partition.'
def __init__(self, argv):
self.argv = argv
def unmount_lv(self, lv):
if lv.tags.get('ceph.cluster_name') and lv.tags.get('ceph.osd_id'):
lv_path = "/var/lib/ceph/osd/{}-{}".format(lv.tags['ceph.cluster_name'], lv.tags['ceph.osd_id'])
else:
lv_path = lv.lv_path
dmcrypt_uuid = lv.lv_uuid
dmcrypt = lv.encrypted
if system.path_is_mounted(lv_path):
mlogger.info("Unmounting %s", lv_path)
system.unmount(lv_path)
if dmcrypt and dmcrypt_uuid:
self.dmcrypt_close(dmcrypt_uuid)
def zap_lv(self, device):
"""
Device examples: vg-name/lv-name, /dev/vg-name/lv-name
Requirements: Must be a logical volume (LV)
"""
lv = api.get_single_lv(filters={'lv_name': device.lv_name, 'vg_name':
device.vg_name})
self.unmount_lv(lv)
wipefs(device.path)
zap_data(device.path)
if self.args.destroy:
lvs = api.get_lvs(filters={'vg_name': device.vg_name})
if lvs == []:
mlogger.info('No LVs left, exiting', device.vg_name)
return
elif len(lvs) <= 1:
mlogger.info('Only 1 LV left in VG, will proceed to destroy '
'volume group %s', device.vg_name)
pvs = api.get_pvs(filters={'lv_uuid': lv.lv_uuid})
api.remove_vg(device.vg_name)
for pv in pvs:
api.remove_pv(pv.pv_name)
else:
mlogger.info('More than 1 LV left in VG, will proceed to '
'destroy LV only')
mlogger.info('Removing LV because --destroy was given: %s',
device.path)
api.remove_lv(device.path)
elif lv:
# just remove all lvm metadata, leaving the LV around
lv.clear_tags()
def zap_partition(self, device):
"""
Device example: /dev/sda1
Requirements: Must be a partition
"""
if device.is_encrypted:
# find the holder
holders = [
'/dev/%s' % holder for holder in device.sys_api.get('holders', [])
]
for mapper_uuid in os.listdir('/dev/mapper'):
mapper_path = os.path.join('/dev/mapper', mapper_uuid)
if os.path.realpath(mapper_path) in holders:
self.dmcrypt_close(mapper_uuid)
if system.device_is_mounted(device.path):
mlogger.info("Unmounting %s", device.path)
system.unmount(device.path)
wipefs(device.path)
zap_data(device.path)
if self.args.destroy:
mlogger.info("Destroying partition since --destroy was used: %s" % device.path)
disk.remove_partition(device)
def zap_lvm_member(self, device):
"""
An LVM member may have more than one LV and or VG, for example if it is
a raw device with multiple partitions each belonging to a different LV
Device example: /dev/sda
Requirements: An LV or VG present in the device, making it an LVM member
"""
for lv in device.lvs:
if lv.lv_name:
mlogger.info('Zapping lvm member {}. lv_path is {}'.format(device.path, lv.lv_path))
self.zap_lv(Device(lv.lv_path))
else:
vg = api.get_single_vg(filters={'vg_name': lv.vg_name})
if vg:
mlogger.info('Found empty VG {}, removing'.format(vg.vg_name))
api.remove_vg(vg.vg_name)
def zap_raw_device(self, device):
"""
Any whole (raw) device passed in as input will be processed here,
checking for LVM membership and partitions (if any).
Device example: /dev/sda
Requirements: None
"""
if not self.args.destroy:
# the use of dd on a raw device causes the partition table to be
# destroyed
mlogger.warning(
'--destroy was not specified, but zapping a whole device will remove the partition table'
)
# look for partitions and zap those
for part_name in device.sys_api.get('partitions', {}).keys():
self.zap_partition(Device('/dev/%s' % part_name))
wipefs(device.path)
zap_data(device.path)
@decorators.needs_root
def zap(self, devices=None):
devices = devices or self.args.devices
for device in devices:
mlogger.info("Zapping: %s", device.path)
if device.is_mapper and not device.is_mpath:
terminal.error("Refusing to zap the mapper device: {}".format(device))
raise SystemExit(1)
if device.is_lvm_member:
self.zap_lvm_member(device)
if device.is_lv:
self.zap_lv(device)
if device.is_partition:
self.zap_partition(device)
if device.is_device:
self.zap_raw_device(device)
if self.args.devices:
terminal.success(
"Zapping successful for: %s" % ", ".join([str(d) for d in self.args.devices])
)
else:
identifier = self.args.osd_id or self.args.osd_fsid
terminal.success(
"Zapping successful for OSD: %s" % identifier
)
@decorators.needs_root
def zap_osd(self):
if self.args.osd_id and not self.args.no_systemd:
osd_is_running = systemctl.osd_is_active(self.args.osd_id)
if osd_is_running:
mlogger.error("OSD ID %s is running, stop it with:" % self.args.osd_id)
mlogger.error("systemctl stop ceph-osd@%s" % self.args.osd_id)
raise SystemExit("Unable to zap devices associated with OSD ID: %s" % self.args.osd_id)
devices = find_associated_devices(self.args.osd_id, self.args.osd_fsid)
self.zap(devices)
def dmcrypt_close(self, dmcrypt_uuid):
dmcrypt_path = "/dev/mapper/{}".format(dmcrypt_uuid)
mlogger.info("Closing encrypted path %s", dmcrypt_path)
encryption.dmcrypt_close(dmcrypt_path)
def main(self):
sub_command_help = dedent("""
Zaps the given logical volume(s), raw device(s) or partition(s) for reuse by ceph-volume.
If given a path to a logical volume it must be in the format of vg/lv. Any
filesystems present on the given device, vg/lv, or partition will be removed and
all data will be purged.
If the logical volume, raw device or partition is being used for any ceph related
mount points they will be unmounted.
However, the lv or partition will be kept intact.
Example calls for supported scenarios:
Zapping a logical volume:
ceph-volume lvm zap {vg name/lv name}
Zapping a partition:
ceph-volume lvm zap /dev/sdc1
Zapping many raw devices:
ceph-volume lvm zap /dev/sda /dev/sdb /db/sdc
Zapping devices associated with an OSD ID:
ceph-volume lvm zap --osd-id 1
Optionally include the OSD FSID
ceph-volume lvm zap --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D
If the --destroy flag is given and you are zapping a raw device or partition
then all vgs and lvs that exist on that raw device or partition will be destroyed.
This is especially useful if a raw device or partition was used by ceph-volume lvm create
or ceph-volume lvm prepare commands previously and now you want to reuse that device.
For example:
ceph-volume lvm zap /dev/sda --destroy
If the --destroy flag is given and you are zapping an lv then the lv is still
kept intact for reuse.
""")
parser = argparse.ArgumentParser(
prog='ceph-volume lvm zap',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=sub_command_help,
)
parser.add_argument(
'devices',
metavar='DEVICES',
nargs='*',
type=arg_validators.ValidZapDevice(gpt_ok=True),
default=[],
help='Path to one or many lv (as vg/lv), partition (as /dev/sda1) or device (as /dev/sda)'
)
parser.add_argument(
'--destroy',
action='store_true',
default=False,
help='Destroy all volume groups and logical volumes if you are zapping a raw device or partition',
)
parser.add_argument(
'--osd-id',
type=arg_validators.valid_osd_id,
help='Specify an OSD ID to detect associated devices for zapping',
)
parser.add_argument(
'--osd-fsid',
help='Specify an OSD FSID to detect associated devices for zapping',
)
parser.add_argument(
'--no-systemd',
dest='no_systemd',
action='store_true',
help='Skip systemd unit checks',
)
if len(self.argv) == 0:
print(sub_command_help)
return
self.args = parser.parse_args(self.argv)
if self.args.osd_id or self.args.osd_fsid:
self.zap_osd()
else:
self.zap()
| 14,382 | 34.339066 | 110 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/raw/__init__.py
|
from .main import Raw # noqa
| 29 | 14 | 28 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.