Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/qa/tasks/ceph_iscsi_client.py
|
"""
Set up ceph-iscsi client.
"""
import logging
import contextlib
from textwrap import dedent
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Set up ceph-iscsi client.
tasks:
ceph_iscsi_client:
clients: [client.1]
"""
log.info('Setting up ceph-iscsi client...')
for role in config['clients']:
(remote,) = (ctx.cluster.only(role).remotes.keys())
conf = dedent('''
InitiatorName=iqn.1994-05.com.redhat:client
''')
path = "/etc/iscsi/initiatorname.iscsi"
remote.sudo_write_file(path, conf, mkdir=True)
# the restart is needed after the above change is applied
remote.run(args=['sudo', 'systemctl', 'restart', 'iscsid'])
remote.run(args=['sudo', 'modprobe', 'dm_multipath'])
remote.run(args=['sudo', 'mpathconf', '--enable'])
conf = dedent('''
devices {
device {
vendor "LIO-ORG"
product "TCMU device"
hardware_handler "1 alua"
path_grouping_policy "failover"
path_selector "queue-length 0"
failback 60
path_checker tur
prio alua
prio_args exclusive_pref_bit
fast_io_fail_tmo 25
no_path_retry queue
}
}
''')
path = "/etc/multipath.conf"
remote.sudo_write_file(path, conf, append=True)
remote.run(args=['sudo', 'systemctl', 'start', 'multipathd'])
yield
| 1,796 | 30.526316 | 69 |
py
|
null |
ceph-main/qa/tasks/ceph_manager.py
|
"""
ceph manager -- Thrasher and CephManager objects
"""
from functools import wraps
import contextlib
import errno
import random
import signal
import time
import gevent
import base64
import json
import logging
import threading
import traceback
import os
import shlex
from io import BytesIO, StringIO
from subprocess import DEVNULL
from teuthology import misc as teuthology
from tasks.scrub import Scrubber
from tasks.util.rados import cmd_erasure_code_profile
from tasks.util import get_remote
from teuthology.contextutil import safe_while
from teuthology.orchestra.remote import Remote
from teuthology.orchestra import run
from teuthology.parallel import parallel
from teuthology.exceptions import CommandFailedError
from tasks.thrasher import Thrasher
DEFAULT_CONF_PATH = '/etc/ceph/ceph.conf'
log = logging.getLogger(__name__)
# this is for cephadm clusters
def shell(ctx, cluster_name, remote, args, name=None, **kwargs):
extra_args = []
if name:
extra_args = ['-n', name]
return remote.run(
args=[
'sudo',
ctx.cephadm,
'--image', ctx.ceph[cluster_name].image,
'shell',
] + extra_args + [
'--fsid', ctx.ceph[cluster_name].fsid,
'--',
] + args,
**kwargs
)
# this is for rook clusters
def toolbox(ctx, cluster_name, args, **kwargs):
return ctx.rook[cluster_name].remote.run(
args=[
'kubectl',
'-n', 'rook-ceph',
'exec',
ctx.rook[cluster_name].toolbox,
'--',
] + args,
**kwargs
)
def write_conf(ctx, conf_path=DEFAULT_CONF_PATH, cluster='ceph'):
conf_fp = BytesIO()
ctx.ceph[cluster].conf.write(conf_fp)
conf_fp.seek(0)
writes = ctx.cluster.run(
args=[
'sudo', 'mkdir', '-p', '/etc/ceph', run.Raw('&&'),
'sudo', 'chmod', '0755', '/etc/ceph', run.Raw('&&'),
'sudo', 'tee', conf_path, run.Raw('&&'),
'sudo', 'chmod', '0644', conf_path,
run.Raw('>'), '/dev/null',
],
stdin=run.PIPE,
wait=False)
teuthology.feed_many_stdins_and_close(conf_fp, writes)
run.wait(writes)
def get_valgrind_args(testdir, name, preamble, v, exit_on_first_error=True, cd=True):
"""
Build a command line for running valgrind.
testdir - test results directory
name - name of daemon (for naming hte log file)
preamble - stuff we should run before valgrind
v - valgrind arguments
"""
if v is None:
return preamble
if not isinstance(v, list):
v = [v]
# https://tracker.ceph.com/issues/44362
preamble.extend([
'env', 'OPENSSL_ia32cap=~0x1000000000000000',
])
val_path = '/var/log/ceph/valgrind'
if '--tool=memcheck' in v or '--tool=helgrind' in v:
extra_args = [
'valgrind',
'--trace-children=no',
'--child-silent-after-fork=yes',
'--soname-synonyms=somalloc=*tcmalloc*',
'--num-callers=50',
'--suppressions={tdir}/valgrind.supp'.format(tdir=testdir),
'--xml=yes',
'--xml-file={vdir}/{n}.log'.format(vdir=val_path, n=name),
'--time-stamp=yes',
'--vgdb=yes',
]
else:
extra_args = [
'valgrind',
'--trace-children=no',
'--child-silent-after-fork=yes',
'--soname-synonyms=somalloc=*tcmalloc*',
'--suppressions={tdir}/valgrind.supp'.format(tdir=testdir),
'--log-file={vdir}/{n}.log'.format(vdir=val_path, n=name),
'--time-stamp=yes',
'--vgdb=yes',
]
if exit_on_first_error:
extra_args.extend([
# at least Valgrind 3.14 is required
'--exit-on-first-error=yes',
'--error-exitcode=42',
])
args = []
if cd:
args += ['cd', testdir, run.Raw('&&')]
args += preamble + extra_args + v
log.debug('running %s under valgrind with args %s', name, args)
return args
def mount_osd_data(ctx, remote, cluster, osd):
"""
Mount a remote OSD
:param ctx: Context
:param remote: Remote site
:param cluster: name of ceph cluster
:param osd: Osd name
"""
log.debug('Mounting data for osd.{o} on {r}'.format(o=osd, r=remote))
role = "{0}.osd.{1}".format(cluster, osd)
alt_role = role if cluster != 'ceph' else "osd.{0}".format(osd)
if remote in ctx.disk_config.remote_to_roles_to_dev:
if alt_role in ctx.disk_config.remote_to_roles_to_dev[remote]:
role = alt_role
if role not in ctx.disk_config.remote_to_roles_to_dev[remote]:
return
dev = ctx.disk_config.remote_to_roles_to_dev[remote][role]
mount_options = ctx.disk_config.\
remote_to_roles_to_dev_mount_options[remote][role]
fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][role]
mnt = os.path.join('/var/lib/ceph/osd', '{0}-{1}'.format(cluster, osd))
log.info('Mounting osd.{o}: dev: {n}, cluster: {c}'
'mountpoint: {p}, type: {t}, options: {v}'.format(
o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options,
c=cluster))
remote.run(
args=[
'sudo',
'mount',
'-t', fstype,
'-o', ','.join(mount_options),
dev,
mnt,
]
)
def log_exc(func):
@wraps(func)
def wrapper(self):
try:
return func(self)
except:
self.log(traceback.format_exc())
raise
return wrapper
class PoolType:
REPLICATED = 1
ERASURE_CODED = 3
class OSDThrasher(Thrasher):
"""
Object used to thrash Ceph
"""
def __init__(self, manager, config, name, logger):
super(OSDThrasher, self).__init__()
self.ceph_manager = manager
self.cluster = manager.cluster
self.ceph_manager.wait_for_clean()
osd_status = self.ceph_manager.get_osd_status()
self.in_osds = osd_status['in']
self.live_osds = osd_status['live']
self.out_osds = osd_status['out']
self.dead_osds = osd_status['dead']
self.stopping = False
self.logger = logger
self.config = config
self.name = name
self.revive_timeout = self.config.get("revive_timeout", 360)
self.pools_to_fix_pgp_num = set()
if self.config.get('powercycle'):
self.revive_timeout += 120
self.clean_wait = self.config.get('clean_wait', 0)
self.minin = self.config.get("min_in", 4)
self.chance_move_pg = self.config.get('chance_move_pg', 1.0)
self.sighup_delay = self.config.get('sighup_delay')
self.optrack_toggle_delay = self.config.get('optrack_toggle_delay')
self.dump_ops_enable = self.config.get('dump_ops_enable')
self.noscrub_toggle_delay = self.config.get('noscrub_toggle_delay')
self.chance_thrash_cluster_full = self.config.get('chance_thrash_cluster_full', .05)
self.chance_thrash_pg_upmap = self.config.get('chance_thrash_pg_upmap', 1.0)
self.chance_thrash_pg_upmap_items = self.config.get('chance_thrash_pg_upmap', 1.0)
self.random_eio = self.config.get('random_eio')
self.chance_force_recovery = self.config.get('chance_force_recovery', 0.3)
num_osds = self.in_osds + self.out_osds
self.max_pgs = self.config.get("max_pgs_per_pool_osd", 1200) * len(num_osds)
self.min_pgs = self.config.get("min_pgs_per_pool_osd", 1) * len(num_osds)
if self.config is None:
self.config = dict()
# prevent monitor from auto-marking things out while thrasher runs
# try both old and new tell syntax, in case we are testing old code
self.saved_options = []
# assuming that the default settings do not vary from one daemon to
# another
first_mon = teuthology.get_first_mon(manager.ctx, self.config).split('.')
opts = [('mon', 'mon_osd_down_out_interval', 0)]
#why do we disable marking an OSD out automatically? :/
for service, opt, new_value in opts:
old_value = manager.get_config(first_mon[0],
first_mon[1],
opt)
self.saved_options.append((service, opt, old_value))
manager.inject_args(service, '*', opt, new_value)
# initialize ceph_objectstore_tool property - must be done before
# do_thrash is spawned - http://tracker.ceph.com/issues/18799
if (self.config.get('powercycle') or
not self.cmd_exists_on_osds("ceph-objectstore-tool") or
self.config.get('disable_objectstore_tool_tests', False)):
self.ceph_objectstore_tool = False
if self.config.get('powercycle'):
self.log("Unable to test ceph-objectstore-tool, "
"powercycle testing")
else:
self.log("Unable to test ceph-objectstore-tool, "
"not available on all OSD nodes")
else:
self.ceph_objectstore_tool = \
self.config.get('ceph_objectstore_tool', True)
# spawn do_thrash
self.thread = gevent.spawn(self.do_thrash)
if self.sighup_delay:
self.sighup_thread = gevent.spawn(self.do_sighup)
if self.optrack_toggle_delay:
self.optrack_toggle_thread = gevent.spawn(self.do_optrack_toggle)
if self.dump_ops_enable == "true":
self.dump_ops_thread = gevent.spawn(self.do_dump_ops)
if self.noscrub_toggle_delay:
self.noscrub_toggle_thread = gevent.spawn(self.do_noscrub_toggle)
def log(self, msg, *args, **kwargs):
self.logger.info(msg, *args, **kwargs)
def cmd_exists_on_osds(self, cmd):
if self.ceph_manager.cephadm or self.ceph_manager.rook:
return True
allremotes = self.ceph_manager.ctx.cluster.only(\
teuthology.is_type('osd', self.cluster)).remotes.keys()
allremotes = list(set(allremotes))
for remote in allremotes:
proc = remote.run(args=['type', cmd], wait=True,
check_status=False, stdout=BytesIO(),
stderr=BytesIO())
if proc.exitstatus != 0:
return False;
return True;
def run_ceph_objectstore_tool(self, remote, osd, cmd):
if self.ceph_manager.cephadm:
return shell(
self.ceph_manager.ctx, self.ceph_manager.cluster, remote,
args=['ceph-objectstore-tool', '--err-to-stderr'] + cmd,
name=osd,
wait=True, check_status=False,
stdout=StringIO(),
stderr=StringIO())
elif self.ceph_manager.rook:
assert False, 'not implemented'
else:
return remote.run(
args=['sudo', 'adjust-ulimits', 'ceph-objectstore-tool', '--err-to-stderr'] + cmd,
wait=True, check_status=False,
stdout=StringIO(),
stderr=StringIO())
def run_ceph_bluestore_tool(self, remote, osd, cmd):
if self.ceph_manager.cephadm:
return shell(
self.ceph_manager.ctx, self.ceph_manager.cluster, remote,
args=['ceph-bluestore-tool', '--err-to-stderr'] + cmd,
name=osd,
wait=True, check_status=False,
stdout=StringIO(),
stderr=StringIO())
elif self.ceph_manager.rook:
assert False, 'not implemented'
else:
return remote.run(
args=['sudo', 'ceph-bluestore-tool', '--err-to-stderr'] + cmd,
wait=True, check_status=False,
stdout=StringIO(),
stderr=StringIO())
def kill_osd(self, osd=None, mark_down=False, mark_out=False):
"""
:param osd: Osd to be killed.
:mark_down: Mark down if true.
:mark_out: Mark out if true.
"""
if osd is None:
osd = random.choice(self.live_osds)
self.log("Killing osd %s, live_osds are %s" % (str(osd),
str(self.live_osds)))
self.live_osds.remove(osd)
self.dead_osds.append(osd)
self.ceph_manager.kill_osd(osd)
if mark_down:
self.ceph_manager.mark_down_osd(osd)
if mark_out and osd in self.in_osds:
self.out_osd(osd)
if self.ceph_objectstore_tool:
self.log("Testing ceph-objectstore-tool on down osd.%s" % osd)
remote = self.ceph_manager.find_remote('osd', osd)
FSPATH = self.ceph_manager.get_filepath()
JPATH = os.path.join(FSPATH, "journal")
exp_osd = imp_osd = osd
self.log('remote for osd %s is %s' % (osd, remote))
exp_remote = imp_remote = remote
# If an older osd is available we'll move a pg from there
if (len(self.dead_osds) > 1 and
random.random() < self.chance_move_pg):
exp_osd = random.choice(self.dead_osds[:-1])
exp_remote = self.ceph_manager.find_remote('osd', exp_osd)
self.log('remote for exp osd %s is %s' % (exp_osd, exp_remote))
prefix = [
'--no-mon-config',
'--log-file=/var/log/ceph/objectstore_tool.$pid.log',
]
if self.ceph_manager.rook:
assert False, 'not implemented'
if not self.ceph_manager.cephadm:
# ceph-objectstore-tool might be temporarily absent during an
# upgrade - see http://tracker.ceph.com/issues/18014
with safe_while(sleep=15, tries=40, action="type ceph-objectstore-tool") as proceed:
while proceed():
proc = exp_remote.run(args=['type', 'ceph-objectstore-tool'],
wait=True, check_status=False, stdout=BytesIO(),
stderr=BytesIO())
if proc.exitstatus == 0:
break
log.debug("ceph-objectstore-tool binary not present, trying again")
# ceph-objectstore-tool might bogusly fail with "OSD has the store locked"
# see http://tracker.ceph.com/issues/19556
with safe_while(sleep=15, tries=40, action="ceph-objectstore-tool --op list-pgs") as proceed:
while proceed():
proc = self.run_ceph_objectstore_tool(
exp_remote, 'osd.%s' % exp_osd,
prefix + [
'--data-path', FSPATH.format(id=exp_osd),
'--journal-path', JPATH.format(id=exp_osd),
'--op', 'list-pgs',
])
if proc.exitstatus == 0:
break
elif (proc.exitstatus == 1 and
proc.stderr.getvalue() == "OSD has the store locked"):
continue
else:
raise Exception("ceph-objectstore-tool: "
"exp list-pgs failure with status {ret}".
format(ret=proc.exitstatus))
pgs = proc.stdout.getvalue().split('\n')[:-1]
if len(pgs) == 0:
self.log("No PGs found for osd.{osd}".format(osd=exp_osd))
return
pg = random.choice(pgs)
#exp_path = teuthology.get_testdir(self.ceph_manager.ctx)
#exp_path = os.path.join(exp_path, '{0}.data'.format(self.cluster))
exp_path = os.path.join('/var/log/ceph', # available inside 'shell' container
"exp.{pg}.{id}".format(
pg=pg,
id=exp_osd))
if self.ceph_manager.cephadm:
exp_host_path = os.path.join(
'/var/log/ceph',
self.ceph_manager.ctx.ceph[self.ceph_manager.cluster].fsid,
"exp.{pg}.{id}".format(
pg=pg,
id=exp_osd))
else:
exp_host_path = exp_path
# export
# Can't use new export-remove op since this is part of upgrade testing
proc = self.run_ceph_objectstore_tool(
exp_remote, 'osd.%s' % exp_osd,
prefix + [
'--data-path', FSPATH.format(id=exp_osd),
'--journal-path', JPATH.format(id=exp_osd),
'--op', 'export',
'--pgid', pg,
'--file', exp_path,
])
if proc.exitstatus:
raise Exception("ceph-objectstore-tool: "
"export failure with status {ret}".
format(ret=proc.exitstatus))
# remove
proc = self.run_ceph_objectstore_tool(
exp_remote, 'osd.%s' % exp_osd,
prefix + [
'--data-path', FSPATH.format(id=exp_osd),
'--journal-path', JPATH.format(id=exp_osd),
'--force',
'--op', 'remove',
'--pgid', pg,
])
if proc.exitstatus:
raise Exception("ceph-objectstore-tool: "
"remove failure with status {ret}".
format(ret=proc.exitstatus))
# If there are at least 2 dead osds we might move the pg
if exp_osd != imp_osd:
# If pg isn't already on this osd, then we will move it there
proc = self.run_ceph_objectstore_tool(
imp_remote,
'osd.%s' % imp_osd,
prefix + [
'--data-path', FSPATH.format(id=imp_osd),
'--journal-path', JPATH.format(id=imp_osd),
'--op', 'list-pgs',
])
if proc.exitstatus:
raise Exception("ceph-objectstore-tool: "
"imp list-pgs failure with status {ret}".
format(ret=proc.exitstatus))
pgs = proc.stdout.getvalue().split('\n')[:-1]
if pg not in pgs:
self.log("Moving pg {pg} from osd.{fosd} to osd.{tosd}".
format(pg=pg, fosd=exp_osd, tosd=imp_osd))
if imp_remote != exp_remote:
# Copy export file to the other machine
self.log("Transfer export file from {srem} to {trem}".
format(srem=exp_remote, trem=imp_remote))
# just in case an upgrade make /var/log/ceph unreadable by non-root,
exp_remote.run(args=['sudo', 'chmod', '777',
'/var/log/ceph'])
imp_remote.run(args=['sudo', 'chmod', '777',
'/var/log/ceph'])
tmpexport = Remote.get_file(exp_remote, exp_host_path,
sudo=True)
if exp_host_path != exp_path:
# push to /var/log/ceph, then rename (we can't
# chmod 777 the /var/log/ceph/$fsid mountpoint)
Remote.put_file(imp_remote, tmpexport, exp_path)
imp_remote.run(args=[
'sudo', 'mv', exp_path, exp_host_path])
else:
Remote.put_file(imp_remote, tmpexport, exp_host_path)
os.remove(tmpexport)
else:
# Can't move the pg after all
imp_osd = exp_osd
imp_remote = exp_remote
# import
proc = self.run_ceph_objectstore_tool(
imp_remote, 'osd.%s' % imp_osd,
[
'--data-path', FSPATH.format(id=imp_osd),
'--journal-path', JPATH.format(id=imp_osd),
'--log-file=/var/log/ceph/objectstore_tool.$pid.log',
'--op', 'import',
'--file', exp_path,
])
if proc.exitstatus == 1:
bogosity = "The OSD you are using is older than the exported PG"
if bogosity in proc.stderr.getvalue():
self.log("OSD older than exported PG"
"...ignored")
elif proc.exitstatus == 10:
self.log("Pool went away before processing an import"
"...ignored")
elif proc.exitstatus == 11:
self.log("Attempt to import an incompatible export"
"...ignored")
elif proc.exitstatus == 12:
# this should be safe to ignore because we only ever move 1
# copy of the pg at a time, and merge is only initiated when
# all replicas are peered and happy. /me crosses fingers
self.log("PG merged on target"
"...ignored")
elif proc.exitstatus:
raise Exception("ceph-objectstore-tool: "
"import failure with status {ret}".
format(ret=proc.exitstatus))
cmd = "sudo rm -f {file}".format(file=exp_host_path)
exp_remote.run(args=cmd)
if imp_remote != exp_remote:
imp_remote.run(args=cmd)
def blackhole_kill_osd(self, osd=None):
"""
If all else fails, kill the osd.
:param osd: Osd to be killed.
"""
if osd is None:
osd = random.choice(self.live_osds)
self.log("Blackholing and then killing osd %s, live_osds are %s" %
(str(osd), str(self.live_osds)))
self.live_osds.remove(osd)
self.dead_osds.append(osd)
self.ceph_manager.blackhole_kill_osd(osd)
def revive_osd(self, osd=None, skip_admin_check=False):
"""
Revive the osd.
:param osd: Osd to be revived.
"""
if osd is None:
osd = random.choice(self.dead_osds)
self.log("Reviving osd %s" % (str(osd),))
self.ceph_manager.revive_osd(
osd,
self.revive_timeout,
skip_admin_check=skip_admin_check)
self.dead_osds.remove(osd)
self.live_osds.append(osd)
if self.random_eio > 0 and osd == self.rerrosd:
self.ceph_manager.set_config(self.rerrosd,
filestore_debug_random_read_err = self.random_eio)
self.ceph_manager.set_config(self.rerrosd,
bluestore_debug_random_read_err = self.random_eio)
def out_osd(self, osd=None):
"""
Mark the osd out
:param osd: Osd to be marked.
"""
if osd is None:
osd = random.choice(self.in_osds)
self.log("Removing osd %s, in_osds are: %s" %
(str(osd), str(self.in_osds)))
self.ceph_manager.mark_out_osd(osd)
self.in_osds.remove(osd)
self.out_osds.append(osd)
def in_osd(self, osd=None):
"""
Mark the osd out
:param osd: Osd to be marked.
"""
if osd is None:
osd = random.choice(self.out_osds)
if osd in self.dead_osds:
return self.revive_osd(osd)
self.log("Adding osd %s" % (str(osd),))
self.out_osds.remove(osd)
self.in_osds.append(osd)
self.ceph_manager.mark_in_osd(osd)
self.log("Added osd %s" % (str(osd),))
def reweight_osd_or_by_util(self, osd=None):
"""
Reweight an osd that is in
:param osd: Osd to be marked.
"""
if osd is not None or random.choice([True, False]):
if osd is None:
osd = random.choice(self.in_osds)
val = random.uniform(.1, 1.0)
self.log("Reweighting osd %s to %s" % (str(osd), str(val)))
self.ceph_manager.raw_cluster_cmd('osd', 'reweight',
str(osd), str(val))
else:
# do it several times, the option space is large
for i in range(5):
options = {
'max_change': random.choice(['0.05', '1.0', '3.0']),
'overage': random.choice(['110', '1000']),
'type': random.choice([
'reweight-by-utilization',
'test-reweight-by-utilization']),
}
self.log("Reweighting by: %s"%(str(options),))
self.ceph_manager.raw_cluster_cmd(
'osd',
options['type'],
options['overage'],
options['max_change'])
def primary_affinity(self, osd=None):
self.log("primary_affinity")
if osd is None:
osd = random.choice(self.in_osds)
if random.random() >= .5:
pa = random.random()
elif random.random() >= .5:
pa = 1
else:
pa = 0
self.log('Setting osd %s primary_affinity to %f' % (str(osd), pa))
self.ceph_manager.raw_cluster_cmd('osd', 'primary-affinity',
str(osd), str(pa))
def thrash_cluster_full(self):
"""
Set and unset cluster full condition
"""
self.log('Setting full ratio to .001')
self.ceph_manager.raw_cluster_cmd('osd', 'set-full-ratio', '.001')
time.sleep(1)
self.log('Setting full ratio back to .95')
self.ceph_manager.raw_cluster_cmd('osd', 'set-full-ratio', '.95')
def thrash_pg_upmap(self):
"""
Install or remove random pg_upmap entries in OSDMap
"""
self.log("thrash_pg_upmap")
from random import shuffle
out = self.ceph_manager.raw_cluster_cmd('osd', 'dump', '-f', 'json-pretty')
j = json.loads(out)
self.log('j is %s' % j)
try:
if random.random() >= .3:
pgs = self.ceph_manager.get_pg_stats()
if not pgs:
self.log('No pgs; doing nothing')
return
pg = random.choice(pgs)
pgid = str(pg['pgid'])
poolid = int(pgid.split('.')[0])
sizes = [x['size'] for x in j['pools'] if x['pool'] == poolid]
if len(sizes) == 0:
self.log('No pools; doing nothing')
return
n = sizes[0]
osds = self.in_osds + self.out_osds
shuffle(osds)
osds = osds[0:n]
self.log('Setting %s to %s' % (pgid, osds))
cmd = ['osd', 'pg-upmap', pgid] + [str(x) for x in osds]
self.log('cmd %s' % cmd)
self.ceph_manager.raw_cluster_cmd(*cmd)
else:
m = j['pg_upmap']
if len(m) > 0:
shuffle(m)
pg = m[0]['pgid']
self.log('Clearing pg_upmap on %s' % pg)
self.ceph_manager.raw_cluster_cmd(
'osd',
'rm-pg-upmap',
pg)
else:
self.log('No pg_upmap entries; doing nothing')
except CommandFailedError:
self.log('Failed to rm-pg-upmap, ignoring')
def thrash_pg_upmap_items(self):
"""
Install or remove random pg_upmap_items entries in OSDMap
"""
self.log("thrash_pg_upmap_items")
from random import shuffle
out = self.ceph_manager.raw_cluster_cmd('osd', 'dump', '-f', 'json-pretty')
j = json.loads(out)
self.log('j is %s' % j)
try:
if random.random() >= .3:
pgs = self.ceph_manager.get_pg_stats()
if not pgs:
self.log('No pgs; doing nothing')
return
pg = random.choice(pgs)
pgid = str(pg['pgid'])
poolid = int(pgid.split('.')[0])
sizes = [x['size'] for x in j['pools'] if x['pool'] == poolid]
if len(sizes) == 0:
self.log('No pools; doing nothing')
return
n = sizes[0]
osds = self.in_osds + self.out_osds
shuffle(osds)
osds = osds[0:n*2]
self.log('Setting %s to %s' % (pgid, osds))
cmd = ['osd', 'pg-upmap-items', pgid] + [str(x) for x in osds]
self.log('cmd %s' % cmd)
self.ceph_manager.raw_cluster_cmd(*cmd)
else:
m = j['pg_upmap_items']
if len(m) > 0:
shuffle(m)
pg = m[0]['pgid']
self.log('Clearing pg_upmap on %s' % pg)
self.ceph_manager.raw_cluster_cmd(
'osd',
'rm-pg-upmap-items',
pg)
else:
self.log('No pg_upmap entries; doing nothing')
except CommandFailedError:
self.log('Failed to rm-pg-upmap-items, ignoring')
def force_recovery(self):
"""
Force recovery on some of PGs
"""
backfill = random.random() >= 0.5
j = self.ceph_manager.get_pgids_to_force(backfill)
if j:
try:
if backfill:
self.ceph_manager.raw_cluster_cmd('pg', 'force-backfill', *j)
else:
self.ceph_manager.raw_cluster_cmd('pg', 'force-recovery', *j)
except CommandFailedError:
self.log('Failed to force backfill|recovery, ignoring')
def cancel_force_recovery(self):
"""
Force recovery on some of PGs
"""
backfill = random.random() >= 0.5
j = self.ceph_manager.get_pgids_to_cancel_force(backfill)
if j:
try:
if backfill:
self.ceph_manager.raw_cluster_cmd('pg', 'cancel-force-backfill', *j)
else:
self.ceph_manager.raw_cluster_cmd('pg', 'cancel-force-recovery', *j)
except CommandFailedError:
self.log('Failed to force backfill|recovery, ignoring')
def force_cancel_recovery(self):
"""
Force or cancel forcing recovery
"""
if random.random() >= 0.4:
self.force_recovery()
else:
self.cancel_force_recovery()
def all_up(self):
"""
Make sure all osds are up and not out.
"""
while len(self.dead_osds) > 0:
self.log("reviving osd")
self.revive_osd()
while len(self.out_osds) > 0:
self.log("inning osd")
self.in_osd()
def all_up_in(self):
"""
Make sure all osds are up and fully in.
"""
self.all_up();
for osd in self.live_osds:
self.ceph_manager.raw_cluster_cmd('osd', 'reweight',
str(osd), str(1))
self.ceph_manager.raw_cluster_cmd('osd', 'primary-affinity',
str(osd), str(1))
def do_join(self):
"""
Break out of this Ceph loop
"""
self.stopping = True
self.thread.get()
if self.sighup_delay:
self.log("joining the do_sighup greenlet")
self.sighup_thread.get()
if self.optrack_toggle_delay:
self.log("joining the do_optrack_toggle greenlet")
self.optrack_toggle_thread.join()
if self.dump_ops_enable == "true":
self.log("joining the do_dump_ops greenlet")
self.dump_ops_thread.join()
if self.noscrub_toggle_delay:
self.log("joining the do_noscrub_toggle greenlet")
self.noscrub_toggle_thread.join()
def grow_pool(self):
"""
Increase the size of the pool
"""
pool = self.ceph_manager.get_pool()
if pool is None:
return
self.log("Growing pool %s" % (pool,))
if self.ceph_manager.expand_pool(pool,
self.config.get('pool_grow_by', 10),
self.max_pgs):
self.pools_to_fix_pgp_num.add(pool)
def shrink_pool(self):
"""
Decrease the size of the pool
"""
pool = self.ceph_manager.get_pool()
if pool is None:
return
_ = self.ceph_manager.get_pool_pg_num(pool)
self.log("Shrinking pool %s" % (pool,))
if self.ceph_manager.contract_pool(
pool,
self.config.get('pool_shrink_by', 10),
self.min_pgs):
self.pools_to_fix_pgp_num.add(pool)
def fix_pgp_num(self, pool=None):
"""
Fix number of pgs in pool.
"""
if pool is None:
pool = self.ceph_manager.get_pool()
if not pool:
return
force = False
else:
force = True
self.log("fixing pg num pool %s" % (pool,))
if self.ceph_manager.set_pool_pgpnum(pool, force):
self.pools_to_fix_pgp_num.discard(pool)
def test_pool_min_size(self):
"""
Loop to selectively push PGs below their min_size and test that recovery
still occurs.
"""
self.log("test_pool_min_size")
self.all_up()
time.sleep(60) # buffer time for recovery to start.
self.ceph_manager.wait_for_recovery(
timeout=self.config.get('timeout')
)
minout = int(self.config.get("min_out", 1))
minlive = int(self.config.get("min_live", 2))
mindead = int(self.config.get("min_dead", 1))
self.log("doing min_size thrashing")
self.ceph_manager.wait_for_clean(timeout=180)
assert self.ceph_manager.is_clean(), \
'not clean before minsize thrashing starts'
while not self.stopping:
# look up k and m from all the pools on each loop, in case it
# changes as the cluster runs
k = 0
m = 99
has_pools = False
pools_json = self.ceph_manager.get_osd_dump_json()['pools']
for pool_json in pools_json:
pool = pool_json['pool_name']
has_pools = True
pool_type = pool_json['type'] # 1 for rep, 3 for ec
min_size = pool_json['min_size']
self.log("pool {pool} min_size is {min_size}".format(pool=pool,min_size=min_size))
try:
ec_profile = self.ceph_manager.get_pool_property(pool, 'erasure_code_profile')
if pool_type != PoolType.ERASURE_CODED:
continue
ec_profile = pool_json['erasure_code_profile']
ec_profile_json = self.ceph_manager.raw_cluster_cmd(
'osd',
'erasure-code-profile',
'get',
ec_profile,
'--format=json')
ec_json = json.loads(ec_profile_json)
local_k = int(ec_json['k'])
local_m = int(ec_json['m'])
self.log("pool {pool} local_k={k} local_m={m}".format(pool=pool,
k=local_k, m=local_m))
if local_k > k:
self.log("setting k={local_k} from previous {k}".format(local_k=local_k, k=k))
k = local_k
if local_m < m:
self.log("setting m={local_m} from previous {m}".format(local_m=local_m, m=m))
m = local_m
except CommandFailedError:
self.log("failed to read erasure_code_profile. %s was likely removed", pool)
continue
if has_pools :
self.log("using k={k}, m={m}".format(k=k,m=m))
else:
self.log("No pools yet, waiting")
time.sleep(5)
continue
if minout > len(self.out_osds): # kill OSDs and mark out
self.log("forced to out an osd")
self.kill_osd(mark_out=True)
continue
elif mindead > len(self.dead_osds): # kill OSDs but force timeout
self.log("forced to kill an osd")
self.kill_osd()
continue
else: # make mostly-random choice to kill or revive OSDs
minup = max(minlive, k)
rand_val = random.uniform(0, 1)
self.log("choosing based on number of live OSDs and rand val {rand}".\
format(rand=rand_val))
if len(self.live_osds) > minup+1 and rand_val < 0.5:
# chose to knock out as many OSDs as we can w/out downing PGs
most_killable = min(len(self.live_osds) - minup, m)
self.log("chose to kill {n} OSDs".format(n=most_killable))
for i in range(1, most_killable):
self.kill_osd(mark_out=True)
time.sleep(10)
# try a few times since there might be a concurrent pool
# creation or deletion
with safe_while(
sleep=25, tries=5,
action='check for active or peered') as proceed:
while proceed():
if self.ceph_manager.all_active_or_peered():
break
self.log('not all PGs are active or peered')
else: # chose to revive OSDs, bring up a random fraction of the dead ones
self.log("chose to revive osds")
for i in range(1, int(rand_val * len(self.dead_osds))):
self.revive_osd(i)
# let PGs repair themselves or our next knockout might kill one
self.ceph_manager.wait_for_clean(timeout=self.config.get('timeout'))
# / while not self.stopping
self.all_up_in()
self.ceph_manager.wait_for_recovery(
timeout=self.config.get('timeout')
)
def inject_pause(self, conf_key, duration, check_after, should_be_down):
"""
Pause injection testing. Check for osd being down when finished.
"""
the_one = random.choice(self.live_osds)
self.log("inject_pause on osd.{osd}".format(osd=the_one))
self.log(
"Testing {key} pause injection for duration {duration}".format(
key=conf_key,
duration=duration
))
self.log(
"Checking after {after}, should_be_down={shouldbedown}".format(
after=check_after,
shouldbedown=should_be_down
))
self.ceph_manager.set_config(the_one, **{conf_key: duration})
if not should_be_down:
return
time.sleep(check_after)
status = self.ceph_manager.get_osd_status()
assert the_one in status['down']
time.sleep(duration - check_after + 20)
status = self.ceph_manager.get_osd_status()
assert not the_one in status['down']
def test_backfill_full(self):
"""
Test backfills stopping when the replica fills up.
First, use injectfull admin command to simulate a now full
osd by setting it to 0 on all of the OSDs.
Second, on a random subset, set
osd_debug_skip_full_check_in_backfill_reservation to force
the more complicated check in do_scan to be exercised.
Then, verify that all backfillings stop.
"""
self.log("injecting backfill full")
for i in self.live_osds:
self.ceph_manager.set_config(
i,
osd_debug_skip_full_check_in_backfill_reservation=
random.choice(['false', 'true']))
self.ceph_manager.osd_admin_socket(i, command=['injectfull', 'backfillfull'],
check_status=True, timeout=30, stdout=DEVNULL)
for i in range(30):
status = self.ceph_manager.compile_pg_status()
if 'backfilling' not in status.keys():
break
self.log(
"waiting for {still_going} backfillings".format(
still_going=status.get('backfilling')))
time.sleep(1)
assert('backfilling' not in self.ceph_manager.compile_pg_status().keys())
for i in self.live_osds:
self.ceph_manager.set_config(
i,
osd_debug_skip_full_check_in_backfill_reservation='false')
self.ceph_manager.osd_admin_socket(i, command=['injectfull', 'none'],
check_status=True, timeout=30, stdout=DEVNULL)
def generate_random_sharding(self):
prefixes = [
'm','O','P','L'
]
new_sharding = ''
for prefix in prefixes:
choose = random.choice([False, True])
if not choose:
continue
if new_sharding != '':
new_sharding = new_sharding + ' '
columns = random.randint(1, 5)
do_hash = random.choice([False, True])
if do_hash:
low_hash = random.choice([0, 5, 8])
do_high_hash = random.choice([False, True])
if do_high_hash:
high_hash = random.choice([8, 16, 30]) + low_hash
new_sharding = new_sharding + prefix + '(' + str(columns) + ',' + str(low_hash) + '-' + str(high_hash) + ')'
else:
new_sharding = new_sharding + prefix + '(' + str(columns) + ',' + str(low_hash) + '-)'
else:
if columns == 1:
new_sharding = new_sharding + prefix
else:
new_sharding = new_sharding + prefix + '(' + str(columns) + ')'
return new_sharding
def test_bluestore_reshard_action(self):
"""
Test if resharding of bluestore works properly.
If bluestore is not used, or bluestore is in version that
does not support sharding, skip.
"""
osd = random.choice(self.dead_osds)
remote = self.ceph_manager.find_remote('osd', osd)
FSPATH = self.ceph_manager.get_filepath()
prefix = [
'--no-mon-config',
'--log-file=/var/log/ceph/bluestore_tool.$pid.log',
'--log-level=10',
'--path', FSPATH.format(id=osd)
]
# sanity check if bluestore-tool accessible
self.log('checking if target objectstore is bluestore on osd.%s' % osd)
cmd = prefix + [
'show-label'
]
proc = self.run_ceph_bluestore_tool(remote, 'osd.%s' % osd, cmd)
if proc.exitstatus != 0:
raise Exception("ceph-bluestore-tool access failed.")
# check if sharding is possible
self.log('checking if target bluestore supports sharding on osd.%s' % osd)
cmd = prefix + [
'show-sharding'
]
proc = self.run_ceph_bluestore_tool(remote, 'osd.%s' % osd, cmd)
if proc.exitstatus != 0:
self.log("Unable to test resharding, "
"ceph-bluestore-tool does not support it.")
return
# now go for reshard to something else
self.log('applying new sharding to bluestore on osd.%s' % osd)
new_sharding = self.config.get('bluestore_new_sharding','random')
if new_sharding == 'random':
self.log('generate random sharding')
new_sharding = self.generate_random_sharding()
self.log("applying new sharding: " + new_sharding)
cmd = prefix + [
'--sharding', new_sharding,
'reshard'
]
proc = self.run_ceph_bluestore_tool(remote, 'osd.%s' % osd, cmd)
if proc.exitstatus != 0:
raise Exception("ceph-bluestore-tool resharding failed.")
# now do fsck to
self.log('running fsck to verify new sharding on osd.%s' % osd)
cmd = prefix + [
'fsck'
]
proc = self.run_ceph_bluestore_tool(remote, 'osd.%s' % osd, cmd)
if proc.exitstatus != 0:
raise Exception("ceph-bluestore-tool fsck failed.")
self.log('resharding successfully completed')
def test_bluestore_reshard(self):
"""
1) kills an osd
2) reshards bluestore on killed osd
3) revives the osd
"""
self.log('test_bluestore_reshard started')
self.kill_osd(mark_down=True, mark_out=True)
self.test_bluestore_reshard_action()
self.revive_osd()
self.log('test_bluestore_reshard completed')
def test_map_discontinuity(self):
"""
1) Allows the osds to recover
2) kills an osd
3) allows the remaining osds to recover
4) waits for some time
5) revives the osd
This sequence should cause the revived osd to have to handle
a map gap since the mons would have trimmed
"""
self.log("test_map_discontinuity")
while len(self.in_osds) < (self.minin + 1):
self.in_osd()
self.log("Waiting for recovery")
self.ceph_manager.wait_for_all_osds_up(
timeout=self.config.get('timeout')
)
# now we wait 20s for the pg status to change, if it takes longer,
# the test *should* fail!
time.sleep(20)
self.ceph_manager.wait_for_clean(
timeout=self.config.get('timeout')
)
# now we wait 20s for the backfill replicas to hear about the clean
time.sleep(20)
self.log("Recovered, killing an osd")
self.kill_osd(mark_down=True, mark_out=True)
self.log("Waiting for clean again")
self.ceph_manager.wait_for_clean(
timeout=self.config.get('timeout')
)
self.log("Waiting for trim")
time.sleep(int(self.config.get("map_discontinuity_sleep_time", 40)))
self.revive_osd()
def choose_action(self):
"""
Random action selector.
"""
chance_down = self.config.get('chance_down', 0.4)
_ = self.config.get('chance_test_min_size', 0)
chance_test_backfill_full = \
self.config.get('chance_test_backfill_full', 0)
if isinstance(chance_down, int):
chance_down = float(chance_down) / 100
minin = self.minin
minout = int(self.config.get("min_out", 0))
minlive = int(self.config.get("min_live", 2))
mindead = int(self.config.get("min_dead", 0))
self.log('choose_action: min_in %d min_out '
'%d min_live %d min_dead %d '
'chance_down %.2f' %
(minin, minout, minlive, mindead, chance_down))
actions = []
if len(self.in_osds) > minin:
actions.append((self.out_osd, 1.0,))
if len(self.live_osds) > minlive and chance_down > 0:
actions.append((self.kill_osd, chance_down,))
if len(self.out_osds) > minout:
actions.append((self.in_osd, 1.7,))
if len(self.dead_osds) > mindead:
actions.append((self.revive_osd, 1.0,))
if self.config.get('thrash_primary_affinity', True):
actions.append((self.primary_affinity, 1.0,))
actions.append((self.reweight_osd_or_by_util,
self.config.get('reweight_osd', .5),))
actions.append((self.grow_pool,
self.config.get('chance_pgnum_grow', 0),))
actions.append((self.shrink_pool,
self.config.get('chance_pgnum_shrink', 0),))
actions.append((self.fix_pgp_num,
self.config.get('chance_pgpnum_fix', 0),))
actions.append((self.test_pool_min_size,
self.config.get('chance_test_min_size', 0),))
actions.append((self.test_backfill_full,
chance_test_backfill_full,))
if self.chance_thrash_cluster_full > 0:
actions.append((self.thrash_cluster_full, self.chance_thrash_cluster_full,))
if self.chance_thrash_pg_upmap > 0:
actions.append((self.thrash_pg_upmap, self.chance_thrash_pg_upmap,))
if self.chance_thrash_pg_upmap_items > 0:
actions.append((self.thrash_pg_upmap_items, self.chance_thrash_pg_upmap_items,))
if self.chance_force_recovery > 0:
actions.append((self.force_cancel_recovery, self.chance_force_recovery))
for key in ['heartbeat_inject_failure', 'filestore_inject_stall']:
for scenario in [
(lambda:
self.inject_pause(key,
self.config.get('pause_short', 3),
0,
False),
self.config.get('chance_inject_pause_short', 1),),
(lambda:
self.inject_pause(key,
self.config.get('pause_long', 80),
self.config.get('pause_check_after', 70),
True),
self.config.get('chance_inject_pause_long', 0),)]:
actions.append(scenario)
# only consider resharding if objectstore is bluestore
cluster_name = self.ceph_manager.cluster
cluster = self.ceph_manager.ctx.ceph[cluster_name]
if cluster.conf.get('osd', {}).get('osd objectstore', 'bluestore') == 'bluestore':
actions.append((self.test_bluestore_reshard,
self.config.get('chance_bluestore_reshard', 0),))
total = sum([y for (x, y) in actions])
val = random.uniform(0, total)
for (action, prob) in actions:
if val < prob:
return action
val -= prob
return None
def do_thrash(self):
"""
_do_thrash() wrapper.
"""
try:
self._do_thrash()
except Exception as e:
# See _run exception comment for MDSThrasher
self.set_thrasher_exception(e)
self.logger.exception("exception:")
# Allow successful completion so gevent doesn't see an exception.
# The DaemonWatchdog will observe the error and tear down the test.
@log_exc
def do_sighup(self):
"""
Loops and sends signal.SIGHUP to a random live osd.
Loop delay is controlled by the config value sighup_delay.
"""
delay = float(self.sighup_delay)
self.log("starting do_sighup with a delay of {0}".format(delay))
while not self.stopping:
osd = random.choice(self.live_osds)
self.ceph_manager.signal_osd(osd, signal.SIGHUP, silent=True)
time.sleep(delay)
@log_exc
def do_optrack_toggle(self):
"""
Loops and toggle op tracking to all osds.
Loop delay is controlled by the config value optrack_toggle_delay.
"""
delay = float(self.optrack_toggle_delay)
osd_state = "true"
self.log("starting do_optrack_toggle with a delay of {0}".format(delay))
while not self.stopping:
if osd_state == "true":
osd_state = "false"
else:
osd_state = "true"
try:
self.ceph_manager.inject_args('osd', '*',
'osd_enable_op_tracker',
osd_state)
except CommandFailedError:
self.log('Failed to tell all osds, ignoring')
gevent.sleep(delay)
@log_exc
def do_dump_ops(self):
"""
Loops and does op dumps on all osds
"""
self.log("starting do_dump_ops")
while not self.stopping:
for osd in self.live_osds:
# Ignore errors because live_osds is in flux
self.ceph_manager.osd_admin_socket(osd, command=['dump_ops_in_flight'],
check_status=False, timeout=30, stdout=DEVNULL)
self.ceph_manager.osd_admin_socket(osd, command=['dump_blocked_ops'],
check_status=False, timeout=30, stdout=DEVNULL)
self.ceph_manager.osd_admin_socket(osd, command=['dump_historic_ops'],
check_status=False, timeout=30, stdout=DEVNULL)
gevent.sleep(0)
@log_exc
def do_noscrub_toggle(self):
"""
Loops and toggle noscrub flags
Loop delay is controlled by the config value noscrub_toggle_delay.
"""
delay = float(self.noscrub_toggle_delay)
scrub_state = "none"
self.log("starting do_noscrub_toggle with a delay of {0}".format(delay))
while not self.stopping:
if scrub_state == "none":
self.ceph_manager.raw_cluster_cmd('osd', 'set', 'noscrub')
scrub_state = "noscrub"
elif scrub_state == "noscrub":
self.ceph_manager.raw_cluster_cmd('osd', 'set', 'nodeep-scrub')
scrub_state = "both"
elif scrub_state == "both":
self.ceph_manager.raw_cluster_cmd('osd', 'unset', 'noscrub')
scrub_state = "nodeep-scrub"
else:
self.ceph_manager.raw_cluster_cmd('osd', 'unset', 'nodeep-scrub')
scrub_state = "none"
gevent.sleep(delay)
self.ceph_manager.raw_cluster_cmd('osd', 'unset', 'noscrub')
self.ceph_manager.raw_cluster_cmd('osd', 'unset', 'nodeep-scrub')
@log_exc
def _do_thrash(self):
"""
Loop to select random actions to thrash ceph manager with.
"""
cleanint = self.config.get("clean_interval", 60)
scrubint = self.config.get("scrub_interval", -1)
maxdead = self.config.get("max_dead", 0)
delay = self.config.get("op_delay", 5)
self.rerrosd = self.live_osds[0]
if self.random_eio > 0:
self.ceph_manager.inject_args('osd', self.rerrosd,
'filestore_debug_random_read_err',
self.random_eio)
self.ceph_manager.inject_args('osd', self.rerrosd,
'bluestore_debug_random_read_err',
self.random_eio)
self.log("starting do_thrash")
while not self.stopping:
to_log = [str(x) for x in ["in_osds: ", self.in_osds,
"out_osds: ", self.out_osds,
"dead_osds: ", self.dead_osds,
"live_osds: ", self.live_osds]]
self.log(" ".join(to_log))
if random.uniform(0, 1) < (float(delay) / cleanint):
while len(self.dead_osds) > maxdead:
self.revive_osd()
for osd in self.in_osds:
self.ceph_manager.raw_cluster_cmd('osd', 'reweight',
str(osd), str(1))
if random.uniform(0, 1) < float(
self.config.get('chance_test_map_discontinuity', 0)) \
and len(self.live_osds) > 5: # avoid m=2,k=2 stall, w/ some buffer for crush being picky
self.test_map_discontinuity()
else:
self.ceph_manager.wait_for_recovery(
timeout=self.config.get('timeout')
)
time.sleep(self.clean_wait)
if scrubint > 0:
if random.uniform(0, 1) < (float(delay) / scrubint):
self.log('Scrubbing while thrashing being performed')
Scrubber(self.ceph_manager, self.config)
self.choose_action()()
time.sleep(delay)
self.all_up()
if self.random_eio > 0:
self.ceph_manager.inject_args('osd', self.rerrosd,
'filestore_debug_random_read_err', '0.0')
self.ceph_manager.inject_args('osd', self.rerrosd,
'bluestore_debug_random_read_err', '0.0')
for pool in list(self.pools_to_fix_pgp_num):
if self.ceph_manager.get_pool_pg_num(pool) > 0:
self.fix_pgp_num(pool)
self.pools_to_fix_pgp_num.clear()
for service, opt, saved_value in self.saved_options:
self.ceph_manager.inject_args(service, '*', opt, saved_value)
self.saved_options = []
self.all_up_in()
class ObjectStoreTool:
def __init__(self, manager, pool, **kwargs):
self.manager = manager
self.pool = pool
self.osd = kwargs.get('osd', None)
self.object_name = kwargs.get('object_name', None)
self.do_revive = kwargs.get('do_revive', True)
if self.osd and self.pool and self.object_name:
if self.osd == "primary":
self.osd = self.manager.get_object_primary(self.pool,
self.object_name)
assert self.osd is not None
if self.object_name:
self.pgid = self.manager.get_object_pg_with_shard(self.pool,
self.object_name,
self.osd)
self.remote = next(iter(self.manager.ctx.\
cluster.only('osd.{o}'.format(o=self.osd)).remotes.keys()))
path = self.manager.get_filepath().format(id=self.osd)
self.paths = ("--data-path {path} --journal-path {path}/journal".
format(path=path))
def build_cmd(self, options, args, stdin):
lines = []
if self.object_name:
lines.append("object=$(sudo adjust-ulimits ceph-objectstore-tool "
"{paths} --pgid {pgid} --op list |"
"grep '\"oid\":\"{name}\"')".
format(paths=self.paths,
pgid=self.pgid,
name=self.object_name))
args = '"$object" ' + args
options += " --pgid {pgid}".format(pgid=self.pgid)
cmd = ("sudo adjust-ulimits ceph-objectstore-tool {paths} {options} {args}".
format(paths=self.paths,
args=args,
options=options))
if stdin:
cmd = ("echo {payload} | base64 --decode | {cmd}".
format(payload=base64.encode(stdin),
cmd=cmd))
lines.append(cmd)
return "\n".join(lines)
def run(self, options, args):
self.manager.kill_osd(self.osd)
cmd = self.build_cmd(options, args, None)
self.manager.log(cmd)
try:
proc = self.remote.run(args=['bash', '-e', '-x', '-c', cmd],
check_status=False,
stdout=BytesIO(),
stderr=BytesIO())
proc.wait()
if proc.exitstatus != 0:
self.manager.log("failed with " + str(proc.exitstatus))
error = proc.stdout.getvalue().decode() + " " + \
proc.stderr.getvalue().decode()
raise Exception(error)
finally:
if self.do_revive:
self.manager.revive_osd(self.osd)
self.manager.wait_till_osd_is_up(self.osd, 300)
# XXX: this class has nothing to do with the Ceph daemon (ceph-mgr) of
# the same name.
class CephManager:
"""
Ceph manager object.
Contains several local functions that form a bulk of this module.
:param controller: the remote machine where the Ceph commands should be
executed
:param ctx: the cluster context
:param config: path to Ceph config file
:param logger: for logging messages
:param cluster: name of the Ceph cluster
"""
def __init__(self, controller, ctx=None, config=None, logger=None,
cluster='ceph', cephadm=False, rook=False) -> None:
self.lock = threading.RLock()
self.ctx = ctx
self.config = config
self.controller = controller
self.next_pool_id = 0
self.cluster = cluster
if (logger):
self.log = lambda x: logger.info(x)
else:
def tmp(x):
"""
implement log behavior.
"""
print(x)
self.log = tmp
if self.config is None:
self.config = dict()
# NOTE: These variables are meant to be overriden by vstart_runner.py.
self.rook = rook
self.cephadm = cephadm
self.testdir = teuthology.get_testdir(self.ctx)
# prefix args for ceph cmds to be executed
pre = ['adjust-ulimits', 'ceph-coverage',
f'{self.testdir}/archive/coverage']
self.CEPH_CMD = ['sudo'] + pre + ['timeout', '120', 'ceph',
'--cluster', self.cluster]
self.RADOS_CMD = pre + ['rados', '--cluster', self.cluster]
self.run_ceph_w_prefix = ['sudo', 'daemon-helper', 'kill', 'ceph',
'--cluster', self.cluster]
pools = self.list_pools()
self.pools = {}
for pool in pools:
# we may race with a pool deletion; ignore failures here
try:
self.pools[pool] = self.get_pool_int_property(pool, 'pg_num')
except CommandFailedError:
self.log('Failed to get pg_num from pool %s, ignoring' % pool)
def ceph(self, cmd, **kwargs):
"""
Simple Ceph admin command wrapper around run_cluster_cmd.
"""
kwargs.pop('args', None)
args = shlex.split(cmd)
stdout = kwargs.pop('stdout', StringIO())
stderr = kwargs.pop('stderr', StringIO())
return self.run_cluster_cmd(args=args, stdout=stdout, stderr=stderr, **kwargs)
def run_cluster_cmd(self, **kwargs):
"""
Run a Ceph command and return the object representing the process
for the command.
Accepts arguments same as that of teuthology.orchestra.run.run()
"""
if isinstance(kwargs['args'], str):
kwargs['args'] = shlex.split(kwargs['args'])
elif isinstance(kwargs['args'], tuple):
kwargs['args'] = list(kwargs['args'])
prefixcmd = []
timeoutcmd = kwargs.pop('timeoutcmd', None)
if timeoutcmd is not None:
prefixcmd += ['timeout', str(timeoutcmd)]
if self.cephadm:
prefixcmd += ['ceph']
cmd = prefixcmd + list(kwargs['args'])
return shell(self.ctx, self.cluster, self.controller,
args=cmd,
stdout=StringIO(),
check_status=kwargs.get('check_status', True))
elif self.rook:
prefixcmd += ['ceph']
cmd = prefixcmd + list(kwargs['args'])
return toolbox(self.ctx, self.cluster,
args=cmd,
stdout=StringIO(),
check_status=kwargs.get('check_status', True))
else:
kwargs['args'] = prefixcmd + self.CEPH_CMD + kwargs['args']
return self.controller.run(**kwargs)
def raw_cluster_cmd(self, *args, **kwargs) -> str:
"""
Start ceph on a raw cluster. Return count
"""
if kwargs.get('args') is None and args:
kwargs['args'] = args
kwargs['stdout'] = kwargs.pop('stdout', StringIO())
return self.run_cluster_cmd(**kwargs).stdout.getvalue()
def raw_cluster_cmd_result(self, *args, **kwargs):
"""
Start ceph on a cluster. Return success or failure information.
"""
if kwargs.get('args') is None and args:
kwargs['args'] = args
kwargs['check_status'] = False
return self.run_cluster_cmd(**kwargs).exitstatus
def get_keyring(self, client_id):
"""
Return keyring for the given client.
:param client_id: str
:return keyring: str
"""
if client_id.find('client.') != -1:
client_id = client_id.replace('client.', '')
keyring = self.run_cluster_cmd(args=f'auth get client.{client_id}',
stdout=StringIO()).stdout.getvalue()
assert isinstance(keyring, str) and keyring != ''
return keyring
def run_ceph_w(self, watch_channel=None):
"""
Execute "ceph -w" in the background with stdout connected to a BytesIO,
and return the RemoteProcess.
:param watch_channel: Specifies the channel to be watched. This can be
'cluster', 'audit', ...
:type watch_channel: str
"""
args = self.run_ceph_w_prefix + ['-w']
if watch_channel is not None:
args.append("--watch-channel")
args.append(watch_channel)
return self.controller.run(args=args, wait=False, stdout=StringIO(), stdin=run.PIPE)
def get_mon_socks(self):
"""
Get monitor sockets.
:return socks: tuple of strings; strings are individual sockets.
"""
from json import loads
output = loads(self.raw_cluster_cmd(['--format=json', 'mon', 'dump']))
socks = []
for mon in output['mons']:
for addrvec_mem in mon['public_addrs']['addrvec']:
socks.append(addrvec_mem['addr'])
return tuple(socks)
def get_msgrv1_mon_socks(self):
"""
Get monitor sockets that use msgrv1 to operate.
:return socks: tuple of strings; strings are individual sockets.
"""
from json import loads
output = loads(self.raw_cluster_cmd('--format=json', 'mon', 'dump'))
socks = []
for mon in output['mons']:
for addrvec_mem in mon['public_addrs']['addrvec']:
if addrvec_mem['type'] == 'v1':
socks.append(addrvec_mem['addr'])
return tuple(socks)
def get_msgrv2_mon_socks(self):
"""
Get monitor sockets that use msgrv2 to operate.
:return socks: tuple of strings; strings are individual sockets.
"""
from json import loads
output = loads(self.raw_cluster_cmd('--format=json', 'mon', 'dump'))
socks = []
for mon in output['mons']:
for addrvec_mem in mon['public_addrs']['addrvec']:
if addrvec_mem['type'] == 'v2':
socks.append(addrvec_mem['addr'])
return tuple(socks)
def flush_pg_stats(self, osds, no_wait=None, wait_for_mon=300):
"""
Flush pg stats from a list of OSD ids, ensuring they are reflected
all the way to the monitor. Luminous and later only.
:param osds: list of OSDs to flush
:param no_wait: list of OSDs not to wait for seq id. by default, we
wait for all specified osds, but some of them could be
moved out of osdmap, so we cannot get their updated
stat seq from monitor anymore. in that case, you need
to pass a blocklist.
:param wait_for_mon: wait for mon to be synced with mgr. 0 to disable
it. (5 min by default)
"""
if no_wait is None:
no_wait = []
def flush_one_osd(osd: int, wait_for_mon: int):
need = int(self.raw_cluster_cmd('tell', 'osd.%d' % osd, 'flush_pg_stats'))
if not wait_for_mon:
return
if osd in no_wait:
return
got = 0
while wait_for_mon > 0:
got = int(self.raw_cluster_cmd('osd', 'last-stat-seq', 'osd.%d' % osd))
self.log('need seq {need} got {got} for osd.{osd}'.format(
need=need, got=got, osd=osd))
if got >= need:
break
A_WHILE = 1
time.sleep(A_WHILE)
wait_for_mon -= A_WHILE
else:
raise Exception('timed out waiting for mon to be updated with '
'osd.{osd}: {got} < {need}'.
format(osd=osd, got=got, need=need))
with parallel() as p:
for osd in osds:
p.spawn(flush_one_osd, osd, wait_for_mon)
def flush_all_pg_stats(self):
self.flush_pg_stats(range(len(self.get_osd_dump())))
def do_rados(self, cmd, pool=None, namespace=None, remote=None, **kwargs):
"""
Execute a remote rados command.
"""
if remote is None:
remote = self.controller
pre = self.RADOS_CMD + [] # deep-copying!
if pool is not None:
pre += ['--pool', pool]
if namespace is not None:
pre += ['--namespace', namespace]
pre.extend(cmd)
proc = remote.run(
args=pre,
wait=True,
**kwargs
)
return proc
def rados_write_objects(self, pool, num_objects, size,
timelimit, threads, cleanup=False):
"""
Write rados objects
Threads not used yet.
"""
args = [
'--num-objects', num_objects,
'-b', size,
'bench', timelimit,
'write'
]
if not cleanup:
args.append('--no-cleanup')
return self.do_rados(map(str, args), pool=pool)
def do_put(self, pool, obj, fname, namespace=None):
"""
Implement rados put operation
"""
args = ['put', obj, fname]
return self.do_rados(
args,
check_status=False,
pool=pool,
namespace=namespace
).exitstatus
def do_get(self, pool, obj, fname='/dev/null', namespace=None):
"""
Implement rados get operation
"""
args = ['get', obj, fname]
return self.do_rados(
args,
check_status=False,
pool=pool,
namespace=namespace,
).exitstatus
def do_rm(self, pool, obj, namespace=None):
"""
Implement rados rm operation
"""
args = ['rm', obj]
return self.do_rados(
args,
check_status=False,
pool=pool,
namespace=namespace
).exitstatus
def osd_admin_socket(self, osd_id, command, check_status=True, timeout=0, stdout=None):
if stdout is None:
stdout = StringIO()
return self.admin_socket('osd', osd_id, command, check_status, timeout, stdout)
def find_remote(self, service_type, service_id):
"""
Get the Remote for the host where a particular service runs.
:param service_type: 'mds', 'osd', 'client'
:param service_id: The second part of a role, e.g. '0' for
the role 'client.0'
:return: a Remote instance for the host where the
requested role is placed
"""
return get_remote(self.ctx, self.cluster,
service_type, service_id)
def admin_socket(self, service_type, service_id,
command, check_status=True, timeout=0, stdout=None):
"""
Remotely start up ceph specifying the admin socket
:param command: a list of words to use as the command
to the admin socket
"""
if stdout is None:
stdout = StringIO()
remote = self.find_remote(service_type, service_id)
if self.cephadm:
return shell(
self.ctx, self.cluster, remote,
args=[
'ceph', 'daemon', '%s.%s' % (service_type, service_id),
] + command,
stdout=stdout,
wait=True,
check_status=check_status,
)
if self.rook:
assert False, 'not implemented'
args = [
'sudo',
'adjust-ulimits',
'ceph-coverage',
f'{self.testdir}/archive/coverage',
'timeout',
str(timeout),
'ceph',
'--cluster',
self.cluster,
'--admin-daemon',
'/var/run/ceph/{cluster}-{type}.{id}.asok'.format(
cluster=self.cluster,
type=service_type,
id=service_id),
]
args.extend(command)
return remote.run(
args=args,
stdout=stdout,
wait=True,
check_status=check_status
)
def objectstore_tool(self, pool, options, args, **kwargs):
return ObjectStoreTool(self, pool, **kwargs).run(options, args)
def get_pgid(self, pool, pgnum):
"""
:param pool: pool name
:param pgnum: pg number
:returns: a string representing this pg.
"""
poolnum = self.get_pool_num(pool)
pg_str = "{poolnum}.{pgnum}".format(
poolnum=poolnum,
pgnum=pgnum)
return pg_str
def get_pg_replica(self, pool, pgnum):
"""
get replica for pool, pgnum (e.g. (data, 0)->0
"""
pg_str = self.get_pgid(pool, pgnum)
output = self.raw_cluster_cmd("pg", "map", pg_str, '--format=json')
j = json.loads('\n'.join(output.split('\n')[1:]))
return int(j['acting'][-1])
assert False
def wait_for_pg_stats(func):
# both osd_mon_report_interval and mgr_stats_period are 5 seconds
# by default, and take the faulty injection in ms into consideration,
# 12 seconds are more than enough
delays = [1, 1, 2, 3, 5, 8, 13, 0]
@wraps(func)
def wrapper(self, *args, **kwargs):
exc = None
for delay in delays:
try:
return func(self, *args, **kwargs)
except AssertionError as e:
time.sleep(delay)
exc = e
raise exc
return wrapper
def get_pg_primary(self, pool, pgnum):
"""
get primary for pool, pgnum (e.g. (data, 0)->0
"""
pg_str = self.get_pgid(pool, pgnum)
output = self.raw_cluster_cmd("pg", "map", pg_str, '--format=json')
j = json.loads('\n'.join(output.split('\n')[1:]))
return int(j['acting'][0])
assert False
def get_pool_num(self, pool):
"""
get number for pool (e.g., data -> 2)
"""
return int(self.get_pool_dump(pool)['pool'])
def list_pools(self):
"""
list all pool names
"""
osd_dump = self.get_osd_dump_json()
self.log(osd_dump['pools'])
return [str(i['pool_name']) for i in osd_dump['pools']]
def clear_pools(self):
"""
remove all pools
"""
[self.remove_pool(i) for i in self.list_pools()]
def kick_recovery_wq(self, osdnum):
"""
Run kick_recovery_wq on cluster.
"""
return self.raw_cluster_cmd(
'tell', "osd.%d" % (int(osdnum),),
'debug',
'kick_recovery_wq',
'0')
def wait_run_admin_socket(self, service_type,
service_id, args=['version'], timeout=75, stdout=None):
"""
If osd_admin_socket call succeeds, return. Otherwise wait
five seconds and try again.
"""
if stdout is None:
stdout = StringIO()
tries = 0
while True:
proc = self.admin_socket(service_type, service_id,
args, check_status=False, stdout=stdout)
if proc.exitstatus == 0:
return proc
else:
tries += 1
if (tries * 5) > timeout:
raise Exception('timed out waiting for admin_socket '
'to appear after {type}.{id} restart'.
format(type=service_type,
id=service_id))
self.log("waiting on admin_socket for {type}-{id}, "
"{command}".format(type=service_type,
id=service_id,
command=args))
time.sleep(5)
def get_pool_dump(self, pool):
"""
get the osd dump part of a pool
"""
osd_dump = self.get_osd_dump_json()
for i in osd_dump['pools']:
if i['pool_name'] == pool:
return i
assert False
def get_config(self, service_type, service_id, name):
"""
:param node: like 'mon.a'
:param name: the option name
"""
proc = self.wait_run_admin_socket(service_type, service_id,
['config', 'show'])
j = json.loads(proc.stdout.getvalue())
return j[name]
def inject_args(self, service_type, service_id, name, value):
whom = '{0}.{1}'.format(service_type, service_id)
if isinstance(value, bool):
value = 'true' if value else 'false'
opt_arg = '--{name}={value}'.format(name=name, value=value)
self.raw_cluster_cmd('--', 'tell', whom, 'injectargs', opt_arg)
def set_config(self, osdnum, **argdict):
"""
:param osdnum: osd number
:param argdict: dictionary containing values to set.
"""
for k, v in argdict.items():
self.wait_run_admin_socket(
'osd', osdnum,
['config', 'set', str(k), str(v)])
def raw_cluster_status(self):
"""
Get status from cluster
"""
status = self.raw_cluster_cmd('status', '--format=json')
return json.loads(status)
def raw_osd_status(self):
"""
Get osd status from cluster
"""
return self.raw_cluster_cmd('osd', 'dump')
def get_osd_status(self):
"""
Get osd statuses sorted by states that the osds are in.
"""
osd_lines = list(filter(
lambda x: x.startswith('osd.') and (("up" in x) or ("down" in x)),
self.raw_osd_status().split('\n')))
self.log(osd_lines)
in_osds = [int(i[4:].split()[0])
for i in filter(lambda x: " in " in x, osd_lines)]
out_osds = [int(i[4:].split()[0])
for i in filter(lambda x: " out " in x, osd_lines)]
up_osds = [int(i[4:].split()[0])
for i in filter(lambda x: " up " in x, osd_lines)]
down_osds = [int(i[4:].split()[0])
for i in filter(lambda x: " down " in x, osd_lines)]
dead_osds = [int(x.id_)
for x in filter(lambda x:
not x.running(),
self.ctx.daemons.
iter_daemons_of_role('osd', self.cluster))]
live_osds = [int(x.id_) for x in
filter(lambda x:
x.running(),
self.ctx.daemons.iter_daemons_of_role('osd',
self.cluster))]
return {'in': in_osds, 'out': out_osds, 'up': up_osds,
'down': down_osds, 'dead': dead_osds, 'live': live_osds,
'raw': osd_lines}
def get_num_pgs(self):
"""
Check cluster status for the number of pgs
"""
status = self.raw_cluster_status()
self.log(status)
return status['pgmap']['num_pgs']
def create_erasure_code_profile(self, profile_name, profile):
"""
Create an erasure code profile name that can be used as a parameter
when creating an erasure coded pool.
"""
with self.lock:
args = cmd_erasure_code_profile(profile_name, profile)
self.raw_cluster_cmd(*args)
def create_pool_with_unique_name(self, pg_num=16,
erasure_code_profile_name=None,
min_size=None,
erasure_code_use_overwrites=False):
"""
Create a pool named unique_pool_X where X is unique.
"""
name = ""
with self.lock:
name = "unique_pool_%s" % (str(self.next_pool_id),)
self.next_pool_id += 1
self.create_pool(
name,
pg_num,
erasure_code_profile_name=erasure_code_profile_name,
min_size=min_size,
erasure_code_use_overwrites=erasure_code_use_overwrites)
return name
@contextlib.contextmanager
def pool(self, pool_name, pg_num=16, erasure_code_profile_name=None):
self.create_pool(pool_name, pg_num, erasure_code_profile_name)
yield
self.remove_pool(pool_name)
def create_pool(self, pool_name, pg_num=16,
erasure_code_profile_name=None,
min_size=None,
erasure_code_use_overwrites=False):
"""
Create a pool named from the pool_name parameter.
:param pool_name: name of the pool being created.
:param pg_num: initial number of pgs.
:param erasure_code_profile_name: if set and !None create an
erasure coded pool using the profile
:param erasure_code_use_overwrites: if true, allow overwrites
"""
with self.lock:
assert isinstance(pool_name, str)
assert isinstance(pg_num, int)
assert pool_name not in self.pools
self.log("creating pool_name %s" % (pool_name,))
if erasure_code_profile_name:
self.raw_cluster_cmd('osd', 'pool', 'create',
pool_name, str(pg_num), str(pg_num),
'erasure', erasure_code_profile_name)
else:
self.raw_cluster_cmd('osd', 'pool', 'create',
pool_name, str(pg_num))
if min_size is not None:
self.raw_cluster_cmd(
'osd', 'pool', 'set', pool_name,
'min_size',
str(min_size))
if erasure_code_use_overwrites:
self.raw_cluster_cmd(
'osd', 'pool', 'set', pool_name,
'allow_ec_overwrites',
'true')
self.raw_cluster_cmd(
'osd', 'pool', 'application', 'enable',
pool_name, 'rados', '--yes-i-really-mean-it',
run.Raw('||'), 'true')
self.pools[pool_name] = pg_num
time.sleep(1)
def add_pool_snap(self, pool_name, snap_name):
"""
Add pool snapshot
:param pool_name: name of pool to snapshot
:param snap_name: name of snapshot to take
"""
self.raw_cluster_cmd('osd', 'pool', 'mksnap',
str(pool_name), str(snap_name))
def remove_pool_snap(self, pool_name, snap_name):
"""
Remove pool snapshot
:param pool_name: name of pool to snapshot
:param snap_name: name of snapshot to remove
"""
self.raw_cluster_cmd('osd', 'pool', 'rmsnap',
str(pool_name), str(snap_name))
def remove_pool(self, pool_name):
"""
Remove the indicated pool
:param pool_name: Pool to be removed
"""
with self.lock:
assert isinstance(pool_name, str)
assert pool_name in self.pools
self.log("removing pool_name %s" % (pool_name,))
del self.pools[pool_name]
self.raw_cluster_cmd('osd', 'pool', 'rm', pool_name, pool_name,
"--yes-i-really-really-mean-it")
def get_pool(self):
"""
Pick a random pool
"""
with self.lock:
if self.pools:
return random.sample(self.pools.keys(), 1)[0]
def get_pool_pg_num(self, pool_name):
"""
Return the number of pgs in the pool specified.
"""
with self.lock:
assert isinstance(pool_name, str)
if pool_name in self.pools:
return self.pools[pool_name]
return 0
def get_pool_property(self, pool_name, prop):
"""
:param pool_name: pool
:param prop: property to be checked.
:returns: property as string
"""
with self.lock:
assert isinstance(pool_name, str)
assert isinstance(prop, str)
output = self.raw_cluster_cmd(
'osd',
'pool',
'get',
pool_name,
prop)
return output.split()[1]
def get_pool_int_property(self, pool_name, prop):
return int(self.get_pool_property(pool_name, prop))
def set_pool_property(self, pool_name, prop, val):
"""
:param pool_name: pool
:param prop: property to be set.
:param val: value to set.
This routine retries if set operation fails.
"""
with self.lock:
assert isinstance(pool_name, str)
assert isinstance(prop, str)
assert isinstance(val, int)
tries = 0
while True:
r = self.raw_cluster_cmd_result(
'osd',
'pool',
'set',
pool_name,
prop,
str(val))
if r != 11: # EAGAIN
break
tries += 1
if tries > 50:
raise Exception('timed out getting EAGAIN '
'when setting pool property %s %s = %s' %
(pool_name, prop, val))
self.log('got EAGAIN setting pool property, '
'waiting a few seconds...')
time.sleep(2)
def expand_pool(self, pool_name, by, max_pgs):
"""
Increase the number of pgs in a pool
"""
with self.lock:
assert isinstance(pool_name, str)
assert isinstance(by, int)
assert pool_name in self.pools
if self.get_num_creating() > 0:
return False
if (self.pools[pool_name] + by) > max_pgs:
return False
self.log("increase pool size by %d" % (by,))
new_pg_num = self.pools[pool_name] + by
self.set_pool_property(pool_name, "pg_num", new_pg_num)
self.pools[pool_name] = new_pg_num
return True
def contract_pool(self, pool_name, by, min_pgs):
"""
Decrease the number of pgs in a pool
"""
with self.lock:
self.log('contract_pool %s by %s min %s' % (
pool_name, str(by), str(min_pgs)))
assert isinstance(pool_name, str)
assert isinstance(by, int)
assert pool_name in self.pools
if self.get_num_creating() > 0:
self.log('too many creating')
return False
proj = self.pools[pool_name] - by
if proj < min_pgs:
self.log('would drop below min_pgs, proj %d, currently %d' % (proj,self.pools[pool_name],))
return False
self.log("decrease pool size by %d" % (by,))
new_pg_num = self.pools[pool_name] - by
self.set_pool_property(pool_name, "pg_num", new_pg_num)
self.pools[pool_name] = new_pg_num
return True
def stop_pg_num_changes(self):
"""
Reset all pg_num_targets back to pg_num, canceling splits and merges
"""
self.log('Canceling any pending splits or merges...')
osd_dump = self.get_osd_dump_json()
try:
for pool in osd_dump['pools']:
if pool['pg_num'] != pool['pg_num_target']:
self.log('Setting pool %s (%d) pg_num %d -> %d' %
(pool['pool_name'], pool['pool'],
pool['pg_num_target'],
pool['pg_num']))
self.raw_cluster_cmd('osd', 'pool', 'set', pool['pool_name'],
'pg_num', str(pool['pg_num']))
except KeyError:
# we don't support pg_num_target before nautilus
pass
def set_pool_pgpnum(self, pool_name, force):
"""
Set pgpnum property of pool_name pool.
"""
with self.lock:
assert isinstance(pool_name, str)
assert pool_name in self.pools
if not force and self.get_num_creating() > 0:
return False
self.set_pool_property(pool_name, 'pgp_num', self.pools[pool_name])
return True
def list_pg_unfound(self, pgid):
"""
return list of unfound pgs with the id specified
"""
r = None
offset = {}
while True:
out = self.raw_cluster_cmd('--', 'pg', pgid, 'list_unfound',
json.dumps(offset))
j = json.loads(out)
if r is None:
r = j
else:
r['objects'].extend(j['objects'])
if not 'more' in j:
break
if j['more'] == 0:
break
offset = j['objects'][-1]['oid']
if 'more' in r:
del r['more']
return r
def get_pg_stats(self):
"""
Dump the cluster and get pg stats
"""
out = self.raw_cluster_cmd('pg', 'dump', '--format=json')
j = json.loads('\n'.join(out.split('\n')[1:]))
try:
return j['pg_map']['pg_stats']
except KeyError:
return j['pg_stats']
def get_osd_df(self, osdid):
"""
Get the osd df stats
"""
out = self.raw_cluster_cmd('osd', 'df', 'name', 'osd.{}'.format(osdid),
'--format=json')
j = json.loads('\n'.join(out.split('\n')[1:]))
return j['nodes'][0]
def get_pool_df(self, name):
"""
Get the pool df stats
"""
out = self.raw_cluster_cmd('df', 'detail', '--format=json')
j = json.loads('\n'.join(out.split('\n')[1:]))
return next((p['stats'] for p in j['pools'] if p['name'] == name),
None)
def get_pgids_to_force(self, backfill):
"""
Return the randomized list of PGs that can have their recovery/backfill forced
"""
j = self.get_pg_stats();
pgids = []
if backfill:
wanted = ['degraded', 'backfilling', 'backfill_wait']
else:
wanted = ['recovering', 'degraded', 'recovery_wait']
for pg in j:
status = pg['state'].split('+')
for t in wanted:
if random.random() > 0.5 and not ('forced_backfill' in status or 'forced_recovery' in status) and t in status:
pgids.append(pg['pgid'])
break
return pgids
def get_pgids_to_cancel_force(self, backfill):
"""
Return the randomized list of PGs whose recovery/backfill priority is forced
"""
j = self.get_pg_stats();
pgids = []
if backfill:
wanted = 'forced_backfill'
else:
wanted = 'forced_recovery'
for pg in j:
status = pg['state'].split('+')
if wanted in status and random.random() > 0.5:
pgids.append(pg['pgid'])
return pgids
def compile_pg_status(self):
"""
Return a histogram of pg state values
"""
ret = {}
j = self.get_pg_stats()
for pg in j:
for status in pg['state'].split('+'):
if status not in ret:
ret[status] = 0
ret[status] += 1
return ret
@wait_for_pg_stats # type: ignore
def with_pg_state(self, pool, pgnum, check):
pgstr = self.get_pgid(pool, pgnum)
stats = self.get_single_pg_stats(pgstr)
assert(check(stats['state']))
@wait_for_pg_stats # type: ignore
def with_pg(self, pool, pgnum, check):
pgstr = self.get_pgid(pool, pgnum)
stats = self.get_single_pg_stats(pgstr)
return check(stats)
def get_last_scrub_stamp(self, pool, pgnum):
"""
Get the timestamp of the last scrub.
"""
stats = self.get_single_pg_stats(self.get_pgid(pool, pgnum))
return stats["last_scrub_stamp"]
def do_pg_scrub(self, pool, pgnum, stype):
"""
Scrub pg and wait for scrubbing to finish
"""
init = self.get_last_scrub_stamp(pool, pgnum)
RESEND_TIMEOUT = 120 # Must be a multiple of SLEEP_TIME
FATAL_TIMEOUT = RESEND_TIMEOUT * 3
SLEEP_TIME = 10
timer = 0
while init == self.get_last_scrub_stamp(pool, pgnum):
assert timer < FATAL_TIMEOUT, "fatal timeout trying to " + stype
self.log("waiting for scrub type %s" % (stype,))
if (timer % RESEND_TIMEOUT) == 0:
self.raw_cluster_cmd('pg', stype, self.get_pgid(pool, pgnum))
# The first time in this loop is the actual request
if timer != 0 and stype == "repair":
self.log("WARNING: Resubmitted a non-idempotent repair")
time.sleep(SLEEP_TIME)
timer += SLEEP_TIME
def wait_snap_trimming_complete(self, pool):
"""
Wait for snap trimming on pool to end
"""
POLL_PERIOD = 10
FATAL_TIMEOUT = 600
start = time.time()
poolnum = self.get_pool_num(pool)
poolnumstr = "%s." % (poolnum,)
while (True):
now = time.time()
if (now - start) > FATAL_TIMEOUT:
assert (now - start) < FATAL_TIMEOUT, \
'failed to complete snap trimming before timeout'
all_stats = self.get_pg_stats()
trimming = False
for pg in all_stats:
if (poolnumstr in pg['pgid']) and ('snaptrim' in pg['state']):
self.log("pg {pg} in trimming, state: {state}".format(
pg=pg['pgid'],
state=pg['state']))
trimming = True
if not trimming:
break
self.log("{pool} still trimming, waiting".format(pool=pool))
time.sleep(POLL_PERIOD)
def get_single_pg_stats(self, pgid):
"""
Return pg for the pgid specified.
"""
all_stats = self.get_pg_stats()
for pg in all_stats:
if pg['pgid'] == pgid:
return pg
return None
def get_object_pg_with_shard(self, pool, name, osdid):
"""
"""
pool_dump = self.get_pool_dump(pool)
object_map = self.get_object_map(pool, name)
if pool_dump["type"] == PoolType.ERASURE_CODED:
shard = object_map['acting'].index(osdid)
return "{pgid}s{shard}".format(pgid=object_map['pgid'],
shard=shard)
else:
return object_map['pgid']
def get_object_primary(self, pool, name):
"""
"""
object_map = self.get_object_map(pool, name)
return object_map['acting_primary']
def get_object_map(self, pool, name):
"""
osd map --format=json converted to a python object
:returns: the python object
"""
out = self.raw_cluster_cmd('--format=json', 'osd', 'map', pool, name)
return json.loads('\n'.join(out.split('\n')[1:]))
def get_osd_dump_json(self):
"""
osd dump --format=json converted to a python object
:returns: the python object
"""
out = self.raw_cluster_cmd('osd', 'dump', '--format=json')
return json.loads('\n'.join(out.split('\n')[1:]))
def get_osd_dump(self):
"""
Dump osds
:returns: all osds
"""
return self.get_osd_dump_json()['osds']
def get_osd_metadata(self):
"""
osd metadata --format=json converted to a python object
:returns: the python object containing osd metadata information
"""
out = self.raw_cluster_cmd('osd', 'metadata', '--format=json')
return json.loads('\n'.join(out.split('\n')[1:]))
def get_mgr_dump(self):
out = self.raw_cluster_cmd('mgr', 'dump', '--format=json')
return json.loads(out)
def get_stuck_pgs(self, type_, threshold):
"""
:returns: stuck pg information from the cluster
"""
out = self.raw_cluster_cmd('pg', 'dump_stuck', type_, str(threshold),
'--format=json')
return json.loads(out).get('stuck_pg_stats',[])
def get_num_unfound_objects(self):
"""
Check cluster status to get the number of unfound objects
"""
status = self.raw_cluster_status()
self.log(status)
return status['pgmap'].get('unfound_objects', 0)
def get_num_creating(self):
"""
Find the number of pgs in creating mode.
"""
pgs = self.get_pg_stats()
num = 0
for pg in pgs:
if 'creating' in pg['state']:
num += 1
return num
def get_num_active_clean(self):
"""
Find the number of active and clean pgs.
"""
pgs = self.get_pg_stats()
return self._get_num_active_clean(pgs)
def _get_num_active_clean(self, pgs):
num = 0
for pg in pgs:
if (pg['state'].count('active') and
pg['state'].count('clean') and
not pg['state'].count('stale')):
num += 1
return num
def get_num_active_recovered(self):
"""
Find the number of active and recovered pgs.
"""
pgs = self.get_pg_stats()
return self._get_num_active_recovered(pgs)
def _get_num_active_recovered(self, pgs):
num = 0
for pg in pgs:
if (pg['state'].count('active') and
not pg['state'].count('recover') and
not pg['state'].count('backfilling') and
not pg['state'].count('stale')):
num += 1
return num
def get_is_making_recovery_progress(self):
"""
Return whether there is recovery progress discernable in the
raw cluster status
"""
status = self.raw_cluster_status()
kps = status['pgmap'].get('recovering_keys_per_sec', 0)
bps = status['pgmap'].get('recovering_bytes_per_sec', 0)
ops = status['pgmap'].get('recovering_objects_per_sec', 0)
return kps > 0 or bps > 0 or ops > 0
def get_num_active(self):
"""
Find the number of active pgs.
"""
pgs = self.get_pg_stats()
return self._get_num_active(pgs)
def _get_num_active(self, pgs):
num = 0
for pg in pgs:
if pg['state'].count('active') and not pg['state'].count('stale'):
num += 1
return num
def get_num_down(self):
"""
Find the number of pgs that are down.
"""
pgs = self.get_pg_stats()
num = 0
for pg in pgs:
if ((pg['state'].count('down') and not
pg['state'].count('stale')) or
(pg['state'].count('incomplete') and not
pg['state'].count('stale'))):
num += 1
return num
def get_num_active_down(self):
"""
Find the number of pgs that are either active or down.
"""
pgs = self.get_pg_stats()
return self._get_num_active_down(pgs)
def _get_num_active_down(self, pgs):
num = 0
for pg in pgs:
if ((pg['state'].count('active') and not
pg['state'].count('stale')) or
(pg['state'].count('down') and not
pg['state'].count('stale')) or
(pg['state'].count('incomplete') and not
pg['state'].count('stale'))):
num += 1
return num
def get_num_peered(self):
"""
Find the number of PGs that are peered
"""
pgs = self.get_pg_stats()
return self._get_num_peered(pgs)
def _get_num_peered(self, pgs):
num = 0
for pg in pgs:
if pg['state'].count('peered') and not pg['state'].count('stale'):
num += 1
return num
def is_clean(self):
"""
True if all pgs are clean
"""
pgs = self.get_pg_stats()
if self._get_num_active_clean(pgs) == len(pgs):
return True
else:
self.dump_pgs_not_active_clean()
return False
def is_recovered(self):
"""
True if all pgs have recovered
"""
pgs = self.get_pg_stats()
return self._get_num_active_recovered(pgs) == len(pgs)
def is_active_or_down(self):
"""
True if all pgs are active or down
"""
pgs = self.get_pg_stats()
return self._get_num_active_down(pgs) == len(pgs)
def dump_pgs_not_active_clean(self):
"""
Dumps all pgs that are not active+clean
"""
pgs = self.get_pg_stats()
for pg in pgs:
if pg['state'] != 'active+clean':
self.log('PG %s is not active+clean' % pg['pgid'])
self.log(pg)
def dump_pgs_not_active_down(self):
"""
Dumps all pgs that are not active or down
"""
pgs = self.get_pg_stats()
for pg in pgs:
if 'active' not in pg['state'] and 'down' not in pg['state']:
self.log('PG %s is not active or down' % pg['pgid'])
self.log(pg)
def dump_pgs_not_active(self):
"""
Dumps all pgs that are not active
"""
pgs = self.get_pg_stats()
for pg in pgs:
if 'active' not in pg['state']:
self.log('PG %s is not active' % pg['pgid'])
self.log(pg)
def dump_pgs_not_active_peered(self, pgs):
for pg in pgs:
if (not pg['state'].count('active')) and (not pg['state'].count('peered')):
self.log('PG %s is not active or peered' % pg['pgid'])
self.log(pg)
def wait_for_clean(self, timeout=1200):
"""
Returns true when all pgs are clean.
"""
self.log("waiting for clean")
start = time.time()
num_active_clean = self.get_num_active_clean()
while not self.is_clean():
if timeout is not None:
if self.get_is_making_recovery_progress():
self.log("making progress, resetting timeout")
start = time.time()
else:
self.log("no progress seen, keeping timeout for now")
if time.time() - start >= timeout:
self.log('dumping pgs not clean')
self.dump_pgs_not_active_clean()
assert time.time() - start < timeout, \
'wait_for_clean: failed before timeout expired'
cur_active_clean = self.get_num_active_clean()
if cur_active_clean != num_active_clean:
start = time.time()
num_active_clean = cur_active_clean
time.sleep(3)
self.log("clean!")
def are_all_osds_up(self):
"""
Returns true if all osds are up.
"""
x = self.get_osd_dump()
return (len(x) == sum([(y['up'] > 0) for y in x]))
def wait_for_all_osds_up(self, timeout=None):
"""
When this exits, either the timeout has expired, or all
osds are up.
"""
self.log("waiting for all up")
start = time.time()
while not self.are_all_osds_up():
if timeout is not None:
assert time.time() - start < timeout, \
'timeout expired in wait_for_all_osds_up'
time.sleep(3)
self.log("all up!")
def pool_exists(self, pool):
if pool in self.list_pools():
return True
return False
def wait_for_pool(self, pool, timeout=300):
"""
Wait for a pool to exist
"""
self.log('waiting for pool %s to exist' % pool)
start = time.time()
while not self.pool_exists(pool):
if timeout is not None:
assert time.time() - start < timeout, \
'timeout expired in wait_for_pool'
time.sleep(3)
def wait_for_pools(self, pools):
for pool in pools:
self.wait_for_pool(pool)
def is_mgr_available(self):
x = self.get_mgr_dump()
return x.get('available', False)
def wait_for_mgr_available(self, timeout=None):
self.log("waiting for mgr available")
start = time.time()
while not self.is_mgr_available():
if timeout is not None:
assert time.time() - start < timeout, \
'timeout expired in wait_for_mgr_available'
time.sleep(3)
self.log("mgr available!")
def wait_for_recovery(self, timeout=None):
"""
Check peering. When this exists, we have recovered.
"""
self.log("waiting for recovery to complete")
start = time.time()
num_active_recovered = self.get_num_active_recovered()
while not self.is_recovered():
now = time.time()
if timeout is not None:
if self.get_is_making_recovery_progress():
self.log("making progress, resetting timeout")
start = time.time()
else:
self.log("no progress seen, keeping timeout for now")
if now - start >= timeout:
if self.is_recovered():
break
self.log('dumping pgs not recovered yet')
self.dump_pgs_not_active_clean()
assert now - start < timeout, \
'wait_for_recovery: failed before timeout expired'
cur_active_recovered = self.get_num_active_recovered()
if cur_active_recovered != num_active_recovered:
start = time.time()
num_active_recovered = cur_active_recovered
time.sleep(3)
self.log("recovered!")
def wait_for_active(self, timeout=None):
"""
Check peering. When this exists, we are definitely active
"""
self.log("waiting for peering to complete")
start = time.time()
num_active = self.get_num_active()
while not self.is_active():
if timeout is not None:
if time.time() - start >= timeout:
self.log('dumping pgs not active')
self.dump_pgs_not_active()
assert time.time() - start < timeout, \
'wait_for_active: failed before timeout expired'
cur_active = self.get_num_active()
if cur_active != num_active:
start = time.time()
num_active = cur_active
time.sleep(3)
self.log("active!")
def wait_for_active_or_down(self, timeout=None):
"""
Check peering. When this exists, we are definitely either
active or down
"""
self.log("waiting for peering to complete or become blocked")
start = time.time()
num_active_down = self.get_num_active_down()
while not self.is_active_or_down():
if timeout is not None:
if time.time() - start >= timeout:
self.log('dumping pgs not active or down')
self.dump_pgs_not_active_down()
assert time.time() - start < timeout, \
'wait_for_active_or_down: failed before timeout expired'
cur_active_down = self.get_num_active_down()
if cur_active_down != num_active_down:
start = time.time()
num_active_down = cur_active_down
time.sleep(3)
self.log("active or down!")
def osd_is_up(self, osd):
"""
Wrapper for osd check
"""
osds = self.get_osd_dump()
return osds[osd]['up'] > 0
def wait_till_osd_is_up(self, osd, timeout=None):
"""
Loop waiting for osd.
"""
self.log('waiting for osd.%d to be up' % osd)
start = time.time()
while not self.osd_is_up(osd):
if timeout is not None:
assert time.time() - start < timeout, \
'osd.%d failed to come up before timeout expired' % osd
time.sleep(3)
self.log('osd.%d is up' % osd)
def is_active(self):
"""
Wrapper to check if all pgs are active
"""
return self.get_num_active() == self.get_num_pgs()
def all_active_or_peered(self):
"""
Wrapper to check if all PGs are active or peered
"""
pgs = self.get_pg_stats()
if self._get_num_active(pgs) + self._get_num_peered(pgs) == len(pgs):
return True
else:
self.dump_pgs_not_active_peered(pgs)
return False
def wait_till_active(self, timeout=None):
"""
Wait until all pgs are active.
"""
self.log("waiting till active")
start = time.time()
while not self.is_active():
if timeout is not None:
if time.time() - start >= timeout:
self.log('dumping pgs not active')
self.dump_pgs_not_active()
assert time.time() - start < timeout, \
'wait_till_active: failed before timeout expired'
time.sleep(3)
self.log("active!")
def wait_till_pg_convergence(self, timeout=None):
start = time.time()
old_stats = None
active_osds = [osd['osd'] for osd in self.get_osd_dump()
if osd['in'] and osd['up']]
while True:
# strictly speaking, no need to wait for mon. but due to the
# "ms inject socket failures" setting, the osdmap could be delayed,
# so mgr is likely to ignore the pg-stat messages with pgs serving
# newly created pools which is not yet known by mgr. so, to make sure
# the mgr is updated with the latest pg-stats, waiting for mon/mgr is
# necessary.
self.flush_pg_stats(active_osds)
new_stats = dict((stat['pgid'], stat['state'])
for stat in self.get_pg_stats())
if old_stats == new_stats:
return old_stats
if timeout is not None:
assert time.time() - start < timeout, \
'failed to reach convergence before %d secs' % timeout
old_stats = new_stats
# longer than mgr_stats_period
time.sleep(5 + 1)
def mark_out_osd(self, osd):
"""
Wrapper to mark osd out.
"""
self.raw_cluster_cmd('osd', 'out', str(osd))
def kill_osd(self, osd):
"""
Kill osds by either power cycling (if indicated by the config)
or by stopping.
"""
if self.config.get('powercycle'):
remote = self.find_remote('osd', osd)
self.log('kill_osd on osd.{o} '
'doing powercycle of {s}'.format(o=osd, s=remote.name))
self._assert_ipmi(remote)
remote.console.power_off()
elif self.config.get('bdev_inject_crash') and self.config.get('bdev_inject_crash_probability'):
if random.uniform(0, 1) < self.config.get('bdev_inject_crash_probability', .5):
self.inject_args(
'osd', osd,
'bdev-inject-crash', self.config.get('bdev_inject_crash'))
try:
self.ctx.daemons.get_daemon('osd', osd, self.cluster).wait()
except:
pass
else:
raise RuntimeError('osd.%s did not fail' % osd)
else:
self.ctx.daemons.get_daemon('osd', osd, self.cluster).stop()
else:
self.ctx.daemons.get_daemon('osd', osd, self.cluster).stop()
@staticmethod
def _assert_ipmi(remote):
assert remote.console.has_ipmi_credentials, (
"powercycling requested but RemoteConsole is not "
"initialized. Check ipmi config.")
def blackhole_kill_osd(self, osd):
"""
Stop osd if nothing else works.
"""
self.inject_args('osd', osd,
'objectstore-blackhole', True)
time.sleep(2)
self.ctx.daemons.get_daemon('osd', osd, self.cluster).stop()
def revive_osd(self, osd, timeout=360, skip_admin_check=False):
"""
Revive osds by either power cycling (if indicated by the config)
or by restarting.
"""
if self.config.get('powercycle'):
remote = self.find_remote('osd', osd)
self.log('kill_osd on osd.{o} doing powercycle of {s}'.
format(o=osd, s=remote.name))
self._assert_ipmi(remote)
remote.console.power_on()
if not remote.console.check_status(300):
raise Exception('Failed to revive osd.{o} via ipmi'.
format(o=osd))
teuthology.reconnect(self.ctx, 60, [remote])
mount_osd_data(self.ctx, remote, self.cluster, str(osd))
self.make_admin_daemon_dir(remote)
self.ctx.daemons.get_daemon('osd', osd, self.cluster).reset()
self.ctx.daemons.get_daemon('osd', osd, self.cluster).restart()
if not skip_admin_check:
# wait for dump_ops_in_flight; this command doesn't appear
# until after the signal handler is installed and it is safe
# to stop the osd again without making valgrind leak checks
# unhappy. see #5924.
self.wait_run_admin_socket('osd', osd,
args=['dump_ops_in_flight'],
timeout=timeout, stdout=DEVNULL)
def mark_down_osd(self, osd):
"""
Cluster command wrapper
"""
self.raw_cluster_cmd('osd', 'down', str(osd))
def mark_in_osd(self, osd):
"""
Cluster command wrapper
"""
self.raw_cluster_cmd('osd', 'in', str(osd))
def signal_osd(self, osd, sig, silent=False):
"""
Wrapper to local get_daemon call which sends the given
signal to the given osd.
"""
self.ctx.daemons.get_daemon('osd', osd,
self.cluster).signal(sig, silent=silent)
## monitors
def signal_mon(self, mon, sig, silent=False):
"""
Wrapper to local get_daemon call
"""
self.ctx.daemons.get_daemon('mon', mon,
self.cluster).signal(sig, silent=silent)
def kill_mon(self, mon):
"""
Kill the monitor by either power cycling (if the config says so),
or by doing a stop.
"""
if self.config.get('powercycle'):
remote = self.find_remote('mon', mon)
self.log('kill_mon on mon.{m} doing powercycle of {s}'.
format(m=mon, s=remote.name))
self._assert_ipmi(remote)
remote.console.power_off()
else:
self.ctx.daemons.get_daemon('mon', mon, self.cluster).stop()
def revive_mon(self, mon):
"""
Restart by either power cycling (if the config says so),
or by doing a normal restart.
"""
if self.config.get('powercycle'):
remote = self.find_remote('mon', mon)
self.log('revive_mon on mon.{m} doing powercycle of {s}'.
format(m=mon, s=remote.name))
self._assert_ipmi(remote)
remote.console.power_on()
self.make_admin_daemon_dir(remote)
self.ctx.daemons.get_daemon('mon', mon, self.cluster).restart()
def revive_mgr(self, mgr):
"""
Restart by either power cycling (if the config says so),
or by doing a normal restart.
"""
if self.config.get('powercycle'):
remote = self.find_remote('mgr', mgr)
self.log('revive_mgr on mgr.{m} doing powercycle of {s}'.
format(m=mgr, s=remote.name))
self._assert_ipmi(remote)
remote.console.power_on()
self.make_admin_daemon_dir(remote)
self.ctx.daemons.get_daemon('mgr', mgr, self.cluster).restart()
def get_mon_status(self, mon):
"""
Extract all the monitor status information from the cluster
"""
out = self.raw_cluster_cmd('tell', 'mon.%s' % mon, 'mon_status')
return json.loads(out)
def get_mon_quorum(self):
"""
Extract monitor quorum information from the cluster
"""
out = self.raw_cluster_cmd('quorum_status')
j = json.loads(out)
return j['quorum']
def wait_for_mon_quorum_size(self, size, timeout=300):
"""
Loop until quorum size is reached.
"""
self.log('waiting for quorum size %d' % size)
sleep = 3
with safe_while(sleep=sleep,
tries=timeout // sleep,
action=f'wait for quorum size {size}') as proceed:
while proceed():
try:
if len(self.get_mon_quorum()) == size:
break
except CommandFailedError as e:
# could fail instea4d of blocked if the rotating key of the
# connected monitor is not updated yet after they form the
# quorum
if e.exitstatus == errno.EACCES:
pass
else:
raise
self.log("quorum is size %d" % size)
def get_mon_health(self, debug=False, detail=False):
"""
Extract all the monitor health information.
"""
if detail:
out = self.raw_cluster_cmd('health', 'detail', '--format=json')
else:
out = self.raw_cluster_cmd('health', '--format=json')
if debug:
self.log('health:\n{h}'.format(h=out))
return json.loads(out)
def wait_until_healthy(self, timeout=None):
self.log("wait_until_healthy")
start = time.time()
while self.get_mon_health()['status'] != 'HEALTH_OK':
if timeout is not None:
assert time.time() - start < timeout, \
'timeout expired in wait_until_healthy'
time.sleep(3)
self.log("wait_until_healthy done")
def get_filepath(self):
"""
Return path to osd data with {id} needing to be replaced
"""
return '/var/lib/ceph/osd/' + self.cluster + '-{id}'
def make_admin_daemon_dir(self, remote):
"""
Create /var/run/ceph directory on remote site.
:param ctx: Context
:param remote: Remote site
"""
remote.run(args=['sudo',
'install', '-d', '-m0777', '--', '/var/run/ceph', ], )
def get_service_task_status(self, service, status_key):
"""
Return daemon task status for a given ceph service.
:param service: ceph service (mds, osd, etc...)
:param status_key: matching task status key
"""
task_status = {}
status = self.raw_cluster_status()
try:
for k,v in status['servicemap']['services'][service]['daemons'].items():
ts = dict(v).get('task_status', None)
if ts:
task_status[k] = ts[status_key]
except KeyError: # catches missing service and status key
return {}
self.log(task_status)
return task_status
def utility_task(name):
"""
Generate ceph_manager subtask corresponding to ceph_manager
method name
"""
def task(ctx, config):
if config is None:
config = {}
args = config.get('args', [])
kwargs = config.get('kwargs', {})
cluster = config.get('cluster', 'ceph')
fn = getattr(ctx.managers[cluster], name)
fn(*args, **kwargs)
return task
revive_osd = utility_task("revive_osd")
revive_mon = utility_task("revive_mon")
kill_osd = utility_task("kill_osd")
kill_mon = utility_task("kill_mon")
create_pool = utility_task("create_pool")
remove_pool = utility_task("remove_pool")
wait_for_clean = utility_task("wait_for_clean")
flush_all_pg_stats = utility_task("flush_all_pg_stats")
set_pool_property = utility_task("set_pool_property")
do_pg_scrub = utility_task("do_pg_scrub")
wait_for_pool = utility_task("wait_for_pool")
wait_for_pools = utility_task("wait_for_pools")
| 124,143 | 37.339716 | 128 |
py
|
null |
ceph-main/qa/tasks/ceph_objectstore_tool.py
|
"""
ceph_objectstore_tool - Simple test of ceph-objectstore-tool utility
"""
from io import BytesIO
import contextlib
import json
import logging
import os
import sys
import tempfile
import time
from tasks import ceph_manager
from tasks.util.rados import (rados, create_replicated_pool, create_ec_pool)
from teuthology import misc as teuthology
from teuthology.orchestra import run
from teuthology.exceptions import CommandFailedError
# from util.rados import (rados, create_ec_pool,
# create_replicated_pool,
# create_cache_pool)
log = logging.getLogger(__name__)
# Should get cluster name "ceph" from somewhere
# and normal path from osd_data and osd_journal in conf
FSPATH = "/var/lib/ceph/osd/ceph-{id}"
JPATH = "/var/lib/ceph/osd/ceph-{id}/journal"
def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR,
BASE_NAME, DATALINECOUNT):
objects = range(1, NUM_OBJECTS + 1)
for i in objects:
NAME = BASE_NAME + "{num}".format(num=i)
LOCALNAME = os.path.join(DATADIR, NAME)
dataline = range(DATALINECOUNT)
fd = open(LOCALNAME, "w")
data = "This is the data for " + NAME + "\n"
for _ in dataline:
fd.write(data)
fd.close()
def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
BASE_NAME, DATALINECOUNT):
objects = range(1, NUM_OBJECTS + 1)
for i in objects:
NAME = BASE_NAME + "{num}".format(num=i)
DDNAME = os.path.join(DATADIR, NAME)
remote.run(args=['rm', '-f', DDNAME])
dataline = range(DATALINECOUNT)
data = "This is the data for " + NAME + "\n"
DATA = ""
for _ in dataline:
DATA += data
remote.write_file(DDNAME, DATA)
def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR,
BASE_NAME, DATALINECOUNT, POOL, db, ec):
ERRORS = 0
log.info("Creating {objs} objects in pool".format(objs=NUM_OBJECTS))
objects = range(1, NUM_OBJECTS + 1)
for i in objects:
NAME = BASE_NAME + "{num}".format(num=i)
DDNAME = os.path.join(DATADIR, NAME)
proc = rados(ctx, remote, ['-p', POOL, 'put', NAME, DDNAME],
wait=False)
# proc = remote.run(args=['rados', '-p', POOL, 'put', NAME, DDNAME])
ret = proc.wait()
if ret != 0:
log.critical("Rados put failed with status {ret}".
format(ret=proc.exitstatus))
sys.exit(1)
db[NAME] = {}
keys = range(i)
db[NAME]["xattr"] = {}
for k in keys:
if k == 0:
continue
mykey = "key{i}-{k}".format(i=i, k=k)
myval = "val{i}-{k}".format(i=i, k=k)
proc = remote.run(args=['rados', '-p', POOL, 'setxattr',
NAME, mykey, myval])
ret = proc.wait()
if ret != 0:
log.error("setxattr failed with {ret}".format(ret=ret))
ERRORS += 1
db[NAME]["xattr"][mykey] = myval
# Erasure coded pools don't support omap
if ec:
continue
# Create omap header in all objects but REPobject1
if i != 1:
myhdr = "hdr{i}".format(i=i)
proc = remote.run(args=['rados', '-p', POOL, 'setomapheader',
NAME, myhdr])
ret = proc.wait()
if ret != 0:
log.critical("setomapheader failed with {ret}".format(ret=ret))
ERRORS += 1
db[NAME]["omapheader"] = myhdr
db[NAME]["omap"] = {}
for k in keys:
if k == 0:
continue
mykey = "okey{i}-{k}".format(i=i, k=k)
myval = "oval{i}-{k}".format(i=i, k=k)
proc = remote.run(args=['rados', '-p', POOL, 'setomapval',
NAME, mykey, myval])
ret = proc.wait()
if ret != 0:
log.critical("setomapval failed with {ret}".format(ret=ret))
db[NAME]["omap"][mykey] = myval
return ERRORS
def get_lines(filename):
tmpfd = open(filename, "r")
line = True
lines = []
while line:
line = tmpfd.readline().rstrip('\n')
if line:
lines += [line]
tmpfd.close()
os.unlink(filename)
return lines
@contextlib.contextmanager
def task(ctx, config):
"""
Run ceph_objectstore_tool test
The config should be as follows::
ceph_objectstore_tool:
objects: 20 # <number of objects>
pgnum: 12
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'ceph_objectstore_tool task only accepts a dict for configuration'
log.info('Beginning ceph_objectstore_tool...')
log.debug(config)
log.debug(ctx)
clients = ctx.cluster.only(teuthology.is_type('client'))
assert len(clients.remotes) > 0, 'Must specify at least 1 client'
(cli_remote, _) = clients.remotes.popitem()
log.debug(cli_remote)
# clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys()))
# client = clients.popitem()
# log.info(client)
osds = ctx.cluster.only(teuthology.is_type('osd'))
log.info("OSDS")
log.info(osds)
log.info(osds.remotes)
manager = ctx.managers['ceph']
while (len(manager.get_osd_status()['up']) !=
len(manager.get_osd_status()['raw'])):
time.sleep(10)
while (len(manager.get_osd_status()['in']) !=
len(manager.get_osd_status()['up'])):
time.sleep(10)
manager.raw_cluster_cmd('osd', 'set', 'noout')
manager.raw_cluster_cmd('osd', 'set', 'nodown')
PGNUM = config.get('pgnum', 12)
log.info("pgnum: {num}".format(num=PGNUM))
ERRORS = 0
REP_POOL = "rep_pool"
REP_NAME = "REPobject"
create_replicated_pool(cli_remote, REP_POOL, PGNUM)
ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME)
EC_POOL = "ec_pool"
EC_NAME = "ECobject"
create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM)
ERRORS += test_objectstore(ctx, config, cli_remote,
EC_POOL, EC_NAME, ec=True)
if ERRORS == 0:
log.info("TEST PASSED")
else:
log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
assert ERRORS == 0
try:
yield
finally:
log.info('Ending ceph_objectstore_tool')
def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False):
manager = ctx.managers['ceph']
osds = ctx.cluster.only(teuthology.is_type('osd'))
TEUTHDIR = teuthology.get_testdir(ctx)
DATADIR = os.path.join(TEUTHDIR, "ceph.data")
DATALINECOUNT = 10000
ERRORS = 0
NUM_OBJECTS = config.get('objects', 10)
log.info("objects: {num}".format(num=NUM_OBJECTS))
pool_dump = manager.get_pool_dump(REP_POOL)
REPID = pool_dump['pool']
log.debug("repid={num}".format(num=REPID))
db = {}
LOCALDIR = tempfile.mkdtemp("cod")
cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR,
REP_NAME, DATALINECOUNT)
allremote = []
allremote.append(cli_remote)
allremote += list(osds.remotes.keys())
allremote = list(set(allremote))
for remote in allremote:
cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
REP_NAME, DATALINECOUNT)
ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR,
REP_NAME, DATALINECOUNT, REP_POOL, db, ec)
pgs = {}
for stats in manager.get_pg_stats():
if stats["pgid"].find(str(REPID) + ".") != 0:
continue
if pool_dump["type"] == ceph_manager.PoolType.REPLICATED:
for osd in stats["acting"]:
pgs.setdefault(osd, []).append(stats["pgid"])
elif pool_dump["type"] == ceph_manager.PoolType.ERASURE_CODED:
shard = 0
for osd in stats["acting"]:
pgs.setdefault(osd, []).append("{pgid}s{shard}".
format(pgid=stats["pgid"],
shard=shard))
shard += 1
else:
raise Exception("{pool} has an unexpected type {type}".
format(pool=REP_POOL, type=pool_dump["type"]))
log.info(pgs)
log.info(db)
for osd in manager.get_osd_status()['up']:
manager.kill_osd(osd)
time.sleep(5)
pgswithobjects = set()
objsinpg = {}
# Test --op list and generate json for all objects
log.info("Test --op list by generating json for all objects")
prefix = ("sudo ceph-objectstore-tool "
"--data-path {fpath} "
"--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH)
for remote in osds.remotes.keys():
log.debug(remote)
log.debug(osds.remotes[remote])
for role in osds.remotes[remote]:
if not role.startswith("osd."):
continue
osdid = int(role.split('.')[1])
log.info("process osd.{id} on {remote}".
format(id=osdid, remote=remote))
cmd = (prefix + "--op list").format(id=osdid)
try:
lines = remote.sh(cmd, check_status=False).splitlines()
for pgline in lines:
if not pgline:
continue
(pg, obj) = json.loads(pgline)
name = obj['oid']
if name in db:
pgswithobjects.add(pg)
objsinpg.setdefault(pg, []).append(name)
db[name].setdefault("pg2json",
{})[pg] = json.dumps(obj)
except CommandFailedError as e:
log.error("Bad exit status {ret} from --op list request".
format(ret=e.exitstatus))
ERRORS += 1
log.info(db)
log.info(pgswithobjects)
log.info(objsinpg)
if pool_dump["type"] == ceph_manager.PoolType.REPLICATED:
# Test get-bytes
log.info("Test get-bytes and set-bytes")
for basename in db.keys():
file = os.path.join(DATADIR, basename)
GETNAME = os.path.join(DATADIR, "get")
SETNAME = os.path.join(DATADIR, "set")
for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if not role.startswith("osd."):
continue
osdid = int(role.split('.')[1])
if osdid not in pgs:
continue
for pg, JSON in db[basename]["pg2json"].items():
if pg in pgs[osdid]:
cmd = ((prefix + "--pgid {pg}").
format(id=osdid, pg=pg).split())
cmd.append(run.Raw("'{json}'".format(json=JSON)))
cmd += ("get-bytes {fname}".
format(fname=GETNAME).split())
proc = remote.run(args=cmd, check_status=False)
if proc.exitstatus != 0:
remote.run(args="rm -f {getfile}".
format(getfile=GETNAME).split())
log.error("Bad exit status {ret}".
format(ret=proc.exitstatus))
ERRORS += 1
continue
cmd = ("diff -q {file} {getfile}".
format(file=file, getfile=GETNAME))
proc = remote.run(args=cmd.split())
if proc.exitstatus != 0:
log.error("Data from get-bytes differ")
# log.debug("Got:")
# cat_file(logging.DEBUG, GETNAME)
# log.debug("Expected:")
# cat_file(logging.DEBUG, file)
ERRORS += 1
remote.run(args="rm -f {getfile}".
format(getfile=GETNAME).split())
data = ("put-bytes going into {file}\n".
format(file=file))
remote.write_file(SETNAME, data)
cmd = ((prefix + "--pgid {pg}").
format(id=osdid, pg=pg).split())
cmd.append(run.Raw("'{json}'".format(json=JSON)))
cmd += ("set-bytes {fname}".
format(fname=SETNAME).split())
proc = remote.run(args=cmd, check_status=False)
proc.wait()
if proc.exitstatus != 0:
log.info("set-bytes failed for object {obj} "
"in pg {pg} osd.{id} ret={ret}".
format(obj=basename, pg=pg,
id=osdid, ret=proc.exitstatus))
ERRORS += 1
cmd = ((prefix + "--pgid {pg}").
format(id=osdid, pg=pg).split())
cmd.append(run.Raw("'{json}'".format(json=JSON)))
cmd += "get-bytes -".split()
try:
output = remote.sh(cmd, wait=True)
if data != output:
log.error("Data inconsistent after "
"set-bytes, got:")
log.error(output)
ERRORS += 1
except CommandFailedError as e:
log.error("get-bytes after "
"set-bytes ret={ret}".
format(ret=e.exitstatus))
ERRORS += 1
cmd = ((prefix + "--pgid {pg}").
format(id=osdid, pg=pg).split())
cmd.append(run.Raw("'{json}'".format(json=JSON)))
cmd += ("set-bytes {fname}".
format(fname=file).split())
proc = remote.run(args=cmd, check_status=False)
proc.wait()
if proc.exitstatus != 0:
log.info("set-bytes failed for object {obj} "
"in pg {pg} osd.{id} ret={ret}".
format(obj=basename, pg=pg,
id=osdid, ret=proc.exitstatus))
ERRORS += 1
log.info("Test list-attrs get-attr")
for basename in db.keys():
file = os.path.join(DATADIR, basename)
GETNAME = os.path.join(DATADIR, "get")
SETNAME = os.path.join(DATADIR, "set")
for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if not role.startswith("osd."):
continue
osdid = int(role.split('.')[1])
if osdid not in pgs:
continue
for pg, JSON in db[basename]["pg2json"].items():
if pg in pgs[osdid]:
cmd = ((prefix + "--pgid {pg}").
format(id=osdid, pg=pg).split())
cmd.append(run.Raw("'{json}'".format(json=JSON)))
cmd += ["list-attrs"]
try:
keys = remote.sh(cmd, wait=True, stderr=BytesIO()).split()
except CommandFailedError as e:
log.error("Bad exit status {ret}".
format(ret=e.exitstatus))
ERRORS += 1
continue
values = dict(db[basename]["xattr"])
for key in keys:
if (key == "_" or
key == "snapset" or
key == "hinfo_key"):
continue
key = key.strip("_")
if key not in values:
log.error("The key {key} should be present".
format(key=key))
ERRORS += 1
continue
exp = values.pop(key)
cmd = ((prefix + "--pgid {pg}").
format(id=osdid, pg=pg).split())
cmd.append(run.Raw("'{json}'".format(json=JSON)))
cmd += ("get-attr {key}".
format(key="_" + key).split())
try:
val = remote.sh(cmd, wait=True)
except CommandFailedError as e:
log.error("get-attr failed with {ret}".
format(ret=e.exitstatus))
ERRORS += 1
continue
if exp != val:
log.error("For key {key} got value {got} "
"instead of {expected}".
format(key=key, got=val,
expected=exp))
ERRORS += 1
if "hinfo_key" in keys:
cmd_prefix = prefix.format(id=osdid)
cmd = """
expected=$({prefix} --pgid {pg} '{json}' get-attr {key} | base64)
echo placeholder | {prefix} --pgid {pg} '{json}' set-attr {key} -
test $({prefix} --pgid {pg} '{json}' get-attr {key}) = placeholder
echo $expected | base64 --decode | \
{prefix} --pgid {pg} '{json}' set-attr {key} -
test $({prefix} --pgid {pg} '{json}' get-attr {key} | base64) = $expected
""".format(prefix=cmd_prefix, pg=pg, json=JSON,
key="hinfo_key")
log.debug(cmd)
proc = remote.run(args=['bash', '-e', '-x',
'-c', cmd],
check_status=False,
stdout=BytesIO(),
stderr=BytesIO())
proc.wait()
if proc.exitstatus != 0:
log.error("failed with " +
str(proc.exitstatus))
log.error(" ".join([
proc.stdout.getvalue().decode(),
proc.stderr.getvalue().decode(),
]))
ERRORS += 1
if len(values) != 0:
log.error("Not all keys found, remaining keys:")
log.error(values)
log.info("Test pg info")
for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if not role.startswith("osd."):
continue
osdid = int(role.split('.')[1])
if osdid not in pgs:
continue
for pg in pgs[osdid]:
cmd = ((prefix + "--op info --pgid {pg}").
format(id=osdid, pg=pg).split())
try:
info = remote.sh(cmd, wait=True)
except CommandFailedError as e:
log.error("Failure of --op info command with %s",
e.exitstatus)
ERRORS += 1
continue
if not str(pg) in info:
log.error("Bad data from info: %s", info)
ERRORS += 1
log.info("Test pg logging")
for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if not role.startswith("osd."):
continue
osdid = int(role.split('.')[1])
if osdid not in pgs:
continue
for pg in pgs[osdid]:
cmd = ((prefix + "--op log --pgid {pg}").
format(id=osdid, pg=pg).split())
try:
output = remote.sh(cmd, wait=True)
except CommandFailedError as e:
log.error("Getting log failed for pg {pg} "
"from osd.{id} with {ret}".
format(pg=pg, id=osdid, ret=e.exitstatus))
ERRORS += 1
continue
HASOBJ = pg in pgswithobjects
MODOBJ = "modify" in output
if HASOBJ != MODOBJ:
log.error("Bad log for pg {pg} from osd.{id}".
format(pg=pg, id=osdid))
MSG = (HASOBJ and [""] or ["NOT "])[0]
log.error("Log should {msg}have a modify entry".
format(msg=MSG))
ERRORS += 1
log.info("Test pg export")
EXP_ERRORS = 0
for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if not role.startswith("osd."):
continue
osdid = int(role.split('.')[1])
if osdid not in pgs:
continue
for pg in pgs[osdid]:
fpath = os.path.join(DATADIR, "osd{id}.{pg}".
format(id=osdid, pg=pg))
cmd = ((prefix + "--op export --pgid {pg} --file {file}").
format(id=osdid, pg=pg, file=fpath))
try:
remote.sh(cmd, wait=True)
except CommandFailedError as e:
log.error("Exporting failed for pg {pg} "
"on osd.{id} with {ret}".
format(pg=pg, id=osdid, ret=e.exitstatus))
EXP_ERRORS += 1
ERRORS += EXP_ERRORS
log.info("Test pg removal")
RM_ERRORS = 0
for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if not role.startswith("osd."):
continue
osdid = int(role.split('.')[1])
if osdid not in pgs:
continue
for pg in pgs[osdid]:
cmd = ((prefix + "--force --op remove --pgid {pg}").
format(pg=pg, id=osdid))
try:
remote.sh(cmd, wait=True)
except CommandFailedError as e:
log.error("Removing failed for pg {pg} "
"on osd.{id} with {ret}".
format(pg=pg, id=osdid, ret=e.exitstatus))
RM_ERRORS += 1
ERRORS += RM_ERRORS
IMP_ERRORS = 0
if EXP_ERRORS == 0 and RM_ERRORS == 0:
log.info("Test pg import")
for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if not role.startswith("osd."):
continue
osdid = int(role.split('.')[1])
if osdid not in pgs:
continue
for pg in pgs[osdid]:
fpath = os.path.join(DATADIR, "osd{id}.{pg}".
format(id=osdid, pg=pg))
cmd = ((prefix + "--op import --file {file}").
format(id=osdid, file=fpath))
try:
remote.sh(cmd, wait=True)
except CommandFailedError as e:
log.error("Import failed from {file} with {ret}".
format(file=fpath, ret=e.exitstatus))
IMP_ERRORS += 1
else:
log.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
ERRORS += IMP_ERRORS
if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
log.info("Restarting OSDs....")
# They are still look to be up because of setting nodown
for osd in manager.get_osd_status()['up']:
manager.revive_osd(osd)
# Wait for health?
time.sleep(5)
# Let scrub after test runs verify consistency of all copies
log.info("Verify replicated import data")
objects = range(1, NUM_OBJECTS + 1)
for i in objects:
NAME = REP_NAME + "{num}".format(num=i)
TESTNAME = os.path.join(DATADIR, "gettest")
REFNAME = os.path.join(DATADIR, NAME)
proc = rados(ctx, cli_remote,
['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False)
ret = proc.wait()
if ret != 0:
log.error("After import, rados get failed with {ret}".
format(ret=proc.exitstatus))
ERRORS += 1
continue
cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME,
ref=REFNAME)
proc = cli_remote.run(args=cmd, check_status=False)
proc.wait()
if proc.exitstatus != 0:
log.error("Data comparison failed for {obj}".format(obj=NAME))
ERRORS += 1
return ERRORS
| 26,396 | 38.81448 | 86 |
py
|
null |
ceph-main/qa/tasks/ceph_test_case.py
|
from typing import Optional, TYPE_CHECKING
import unittest
import time
import logging
from teuthology.exceptions import CommandFailedError
if TYPE_CHECKING:
from tasks.mgr.mgr_test_case import MgrCluster
log = logging.getLogger(__name__)
class TestTimeoutError(RuntimeError):
pass
class CephTestCase(unittest.TestCase):
"""
For test tasks that want to define a structured set of
tests implemented in python. Subclass this with appropriate
helpers for the subsystem you're testing.
"""
# Environment references
mounts = None
fs = None
recovery_fs = None
backup_fs = None
ceph_cluster = None
mds_cluster = None
mgr_cluster: Optional['MgrCluster'] = None
ctx = None
mon_manager = None
# Declarative test requirements: subclasses should override these to indicate
# their special needs. If not met, tests will be skipped.
REQUIRE_MEMSTORE = False
def setUp(self):
self._mon_configs_set = set()
self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
"Starting test {0}".format(self.id()))
if self.REQUIRE_MEMSTORE:
objectstore = self.ceph_cluster.get_config("osd_objectstore", "osd")
if objectstore != "memstore":
# You certainly *could* run this on a real OSD, but you don't want to sit
# here for hours waiting for the test to fill up a 1TB drive!
raise self.skipTest("Require `memstore` OSD backend (test " \
"would take too long on full sized OSDs")
def tearDown(self):
self.config_clear()
self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
"Ended test {0}".format(self.id()))
def config_clear(self):
for section, key in self._mon_configs_set:
self.config_rm(section, key)
self._mon_configs_set.clear()
def _fix_key(self, key):
return str(key).replace(' ', '_')
def config_get(self, section, key):
key = self._fix_key(key)
return self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "get", section, key).strip()
def config_show(self, entity, key):
key = self._fix_key(key)
return self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "show", entity, key).strip()
def config_minimal(self):
return self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "generate-minimal-conf").strip()
def config_rm(self, section, key):
key = self._fix_key(key)
self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "rm", section, key)
# simplification: skip removing from _mon_configs_set;
# let tearDown clear everything again
def config_set(self, section, key, value):
key = self._fix_key(key)
self._mon_configs_set.add((section, key))
self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "set", section, key, str(value))
def cluster_cmd(self, command: str):
assert self.ceph_cluster is not None
return self.ceph_cluster.mon_manager.raw_cluster_cmd(*(command.split(" ")))
def assert_cluster_log(self, expected_pattern, invert_match=False,
timeout=10, watch_channel=None, present=True):
"""
Context manager. Assert that during execution, or up to 5 seconds later,
the Ceph cluster log emits a message matching the expected pattern.
:param expected_pattern: A string that you expect to see in the log output
:type expected_pattern: str
:param watch_channel: Specifies the channel to be watched. This can be
'cluster', 'audit', ...
:type watch_channel: str
:param present: Assert the log entry is present (default: True) or not (False).
:type present: bool
"""
ceph_manager = self.ceph_cluster.mon_manager
class ContextManager(object):
def match(self):
found = expected_pattern in self.watcher_process.stdout.getvalue()
if invert_match:
return not found
return found
def __enter__(self):
self.watcher_process = ceph_manager.run_ceph_w(watch_channel)
def __exit__(self, exc_type, exc_val, exc_tb):
fail = False
if not self.watcher_process.finished:
# Check if we got an early match, wait a bit if we didn't
if present and self.match():
return
elif not present and self.match():
fail = True
else:
log.debug("No log hits yet, waiting...")
# Default monc tick interval is 10s, so wait that long and
# then some grace
time.sleep(5 + timeout)
self.watcher_process.stdin.close()
try:
self.watcher_process.wait()
except CommandFailedError:
pass
if present and not self.match():
log.error(f"Log output: \n{self.watcher_process.stdout.getvalue()}\n")
raise AssertionError(f"Expected log message found: '{expected_pattern}'")
elif fail or (not present and self.match()):
log.error(f"Log output: \n{self.watcher_process.stdout.getvalue()}\n")
raise AssertionError(f"Unexpected log message found: '{expected_pattern}'")
return ContextManager()
def wait_for_health(self, pattern, timeout, check_in_detail=None):
"""
Wait until 'ceph health' contains messages matching the pattern
Also check if @check_in_detail matches detailed health messages
only when @pattern is a code string.
"""
def seen_health_warning():
health = self.ceph_cluster.mon_manager.get_mon_health(debug=False, detail=bool(check_in_detail))
codes = [s for s in health['checks']]
summary_strings = [s[1]['summary']['message'] for s in health['checks'].items()]
if len(summary_strings) == 0:
log.debug("Not expected number of summary strings ({0})".format(summary_strings))
return False
else:
for ss in summary_strings:
if pattern in ss:
return True
if pattern in codes:
if not check_in_detail:
return True
# check if the string is in detail list if asked
detail_strings = [ss['message'] for ss in \
[s for s in health['checks'][pattern]['detail']]]
log.debug(f'detail_strings: {detail_strings}')
for ds in detail_strings:
if check_in_detail in ds:
return True
log.debug(f'detail string "{check_in_detail}" not found')
log.debug("Not found expected summary strings yet ({0})".format(summary_strings))
return False
log.info(f"waiting {timeout}s for health warning matching {pattern}")
self.wait_until_true(seen_health_warning, timeout)
def wait_for_health_clear(self, timeout):
"""
Wait until `ceph health` returns no messages
"""
def is_clear():
health = self.ceph_cluster.mon_manager.get_mon_health()
return len(health['checks']) == 0
self.wait_until_true(is_clear, timeout)
def wait_until_equal(self, get_fn, expect_val, timeout, reject_fn=None, period=5):
elapsed = 0
while True:
val = get_fn()
if val == expect_val:
return
elif reject_fn and reject_fn(val):
raise RuntimeError("wait_until_equal: forbidden value {0} seen".format(val))
else:
if elapsed >= timeout:
raise TestTimeoutError("Timed out after {0} seconds waiting for {1} (currently {2})".format(
elapsed, expect_val, val
))
else:
log.debug("wait_until_equal: {0} != {1}, waiting (timeout={2})...".format(val, expect_val, timeout))
time.sleep(period)
elapsed += period
log.debug("wait_until_equal: success")
@classmethod
def wait_until_true(cls, condition, timeout, check_fn=None, period=5):
elapsed = 0
retry_count = 0
while True:
if condition():
log.debug("wait_until_true: success in {0}s and {1} retries".format(elapsed, retry_count))
return
else:
if elapsed >= timeout:
if check_fn and check_fn() and retry_count < 5:
elapsed = 0
retry_count += 1
log.debug("wait_until_true: making progress, waiting (timeout={0} retry_count={1})...".format(timeout, retry_count))
else:
raise TestTimeoutError("Timed out after {0}s and {1} retries".format(elapsed, retry_count))
else:
log.debug("wait_until_true: waiting (timeout={0} retry_count={1})...".format(timeout, retry_count))
time.sleep(period)
elapsed += period
| 9,545 | 39.449153 | 140 |
py
|
null |
ceph-main/qa/tasks/cephadm.py
|
"""
Ceph cluster task, deployed via cephadm orchestrator
"""
import argparse
import configobj
import contextlib
import logging
import os
import json
import re
import uuid
import yaml
from copy import deepcopy
from io import BytesIO, StringIO
from tarfile import ReadError
from tasks.ceph_manager import CephManager
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology import packaging
from teuthology.orchestra import run
from teuthology.orchestra.daemon import DaemonGroup
from teuthology.config import config as teuth_config
from textwrap import dedent
from tasks.cephfs.filesystem import MDSCluster, Filesystem
from tasks.util import chacra
# these items we use from ceph.py should probably eventually move elsewhere
from tasks.ceph import get_mons, healthy
from tasks.vip import subst_vip
CEPH_ROLE_TYPES = ['mon', 'mgr', 'osd', 'mds', 'rgw', 'prometheus']
log = logging.getLogger(__name__)
def _shell(ctx, cluster_name, remote, args, extra_cephadm_args=[], **kwargs):
teuthology.get_testdir(ctx)
return remote.run(
args=[
'sudo',
ctx.cephadm,
'--image', ctx.ceph[cluster_name].image,
'shell',
'-c', '/etc/ceph/{}.conf'.format(cluster_name),
'-k', '/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
'--fsid', ctx.ceph[cluster_name].fsid,
] + extra_cephadm_args + [
'--',
] + args,
**kwargs
)
def build_initial_config(ctx, config):
cluster_name = config['cluster']
path = os.path.join(os.path.dirname(__file__), 'cephadm.conf')
conf = configobj.ConfigObj(path, file_error=True)
conf.setdefault('global', {})
conf['global']['fsid'] = ctx.ceph[cluster_name].fsid
# overrides
for section, keys in config.get('conf',{}).items():
for key, value in keys.items():
log.info(" override: [%s] %s = %s" % (section, key, value))
if section not in conf:
conf[section] = {}
conf[section][key] = value
return conf
def distribute_iscsi_gateway_cfg(ctx, conf_data):
"""
Distribute common gateway config to get the IPs.
These will help in iscsi clients with finding trusted_ip_list.
"""
log.info('Distributing iscsi-gateway.cfg...')
for remote, roles in ctx.cluster.remotes.items():
remote.write_file(
path='/etc/ceph/iscsi-gateway.cfg',
data=conf_data,
sudo=True)
def update_archive_setting(ctx, key, value):
"""
Add logs directory to job's info log file
"""
if ctx.archive is None:
return
with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file:
info_yaml = yaml.safe_load(info_file)
info_file.seek(0)
if 'archive' in info_yaml:
info_yaml['archive'][key] = value
else:
info_yaml['archive'] = {key: value}
yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
@contextlib.contextmanager
def normalize_hostnames(ctx):
"""
Ensure we have short hostnames throughout, for consistency between
remote.shortname and socket.gethostname() in cephadm.
"""
log.info('Normalizing hostnames...')
cluster = ctx.cluster.filter(lambda r: '.' in r.hostname)
cluster.run(args=[
'sudo',
'hostname',
run.Raw('$(hostname -s)'),
])
try:
yield
finally:
pass
@contextlib.contextmanager
def download_cephadm(ctx, config, ref):
cluster_name = config['cluster']
if config.get('cephadm_mode') != 'cephadm-package':
if ctx.config.get('redhat'):
_fetch_cephadm_from_rpm(ctx)
# TODO: come up with a sensible way to detect if we need an "old, uncompiled"
# cephadm
elif 'cephadm_git_url' in config and 'cephadm_branch' in config:
_fetch_cephadm_from_github(ctx, config, ref)
else:
_fetch_cephadm_from_chachra(ctx, config, cluster_name)
try:
yield
finally:
_rm_cluster(ctx, cluster_name)
if config.get('cephadm_mode') == 'root':
_rm_cephadm(ctx)
def _fetch_cephadm_from_rpm(ctx):
log.info("Copying cephadm installed from an RPM package")
# cephadm already installed from redhat.install task
ctx.cluster.run(
args=[
'cp',
run.Raw('$(which cephadm)'),
ctx.cephadm,
run.Raw('&&'),
'ls', '-l',
ctx.cephadm,
]
)
def _fetch_cephadm_from_github(ctx, config, ref):
ref = config.get('cephadm_branch', ref)
git_url = config.get('cephadm_git_url', teuth_config.get_ceph_git_url())
log.info('Downloading cephadm (repo %s ref %s)...' % (git_url, ref))
if git_url.startswith('https://github.com/'):
# git archive doesn't like https:// URLs, which we use with github.
rest = git_url.split('https://github.com/', 1)[1]
rest = re.sub(r'\.git/?$', '', rest).strip() # no .git suffix
ctx.cluster.run(
args=[
'curl', '--silent',
'https://raw.githubusercontent.com/' + rest + '/' + ref + '/src/cephadm/cephadm',
run.Raw('>'),
ctx.cephadm,
run.Raw('&&'),
'ls', '-l',
ctx.cephadm,
],
)
else:
ctx.cluster.run(
args=[
'git', 'clone', git_url, 'testrepo',
run.Raw('&&'),
'cd', 'testrepo',
run.Raw('&&'),
'git', 'show', f'{ref}:src/cephadm/cephadm',
run.Raw('>'),
ctx.cephadm,
run.Raw('&&'),
'ls', '-l', ctx.cephadm,
],
)
# sanity-check the resulting file and set executable bit
cephadm_file_size = '$(stat -c%s {})'.format(ctx.cephadm)
ctx.cluster.run(
args=[
'test', '-s', ctx.cephadm,
run.Raw('&&'),
'test', run.Raw(cephadm_file_size), "-gt", run.Raw('1000'),
run.Raw('&&'),
'chmod', '+x', ctx.cephadm,
],
)
def _fetch_cephadm_from_chachra(ctx, config, cluster_name):
log.info('Downloading "compiled" cephadm from cachra')
bootstrap_remote = ctx.ceph[cluster_name].bootstrap_remote
bp = packaging.get_builder_project()(
config.get('project', 'ceph'),
config,
ctx=ctx,
remote=bootstrap_remote,
)
log.info('builder_project result: %s' % (bp._result.json()))
flavor = config.get('flavor', 'default')
branch = config.get('branch')
sha1 = config.get('sha1')
# pull the cephadm binary from chacra
url = chacra.get_binary_url(
'cephadm',
project=bp.project,
distro=bp.distro.split('/')[0],
release=bp.distro.split('/')[1],
arch=bp.arch,
flavor=flavor,
branch=branch,
sha1=sha1,
)
log.info("Discovered cachra url: %s", url)
ctx.cluster.run(
args=[
'curl', '--silent', '-L', url,
run.Raw('>'),
ctx.cephadm,
run.Raw('&&'),
'ls', '-l',
ctx.cephadm,
],
)
# sanity-check the resulting file and set executable bit
cephadm_file_size = '$(stat -c%s {})'.format(ctx.cephadm)
ctx.cluster.run(
args=[
'test', '-s', ctx.cephadm,
run.Raw('&&'),
'test', run.Raw(cephadm_file_size), "-gt", run.Raw('1000'),
run.Raw('&&'),
'chmod', '+x', ctx.cephadm,
],
)
def _rm_cluster(ctx, cluster_name):
log.info('Removing cluster...')
ctx.cluster.run(args=[
'sudo',
ctx.cephadm,
'rm-cluster',
'--fsid', ctx.ceph[cluster_name].fsid,
'--force',
])
def _rm_cephadm(ctx):
log.info('Removing cephadm ...')
ctx.cluster.run(
args=[
'rm',
'-rf',
ctx.cephadm,
],
)
@contextlib.contextmanager
def ceph_log(ctx, config):
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
update_archive_setting(ctx, 'log', '/var/log/ceph')
try:
yield
except Exception:
# we need to know this below
ctx.summary['success'] = False
raise
finally:
log.info('Checking cluster log for badness...')
def first_in_ceph_log(pattern, excludes):
"""
Find the first occurrence of the pattern specified in the Ceph log,
Returns None if none found.
:param pattern: Pattern scanned for.
:param excludes: Patterns to ignore.
:return: First line of text (or None if not found)
"""
args = [
'sudo',
'egrep', pattern,
'/var/log/ceph/{fsid}/ceph.log'.format(
fsid=fsid),
]
if excludes:
for exclude in excludes:
args.extend([run.Raw('|'), 'egrep', '-v', exclude])
args.extend([
run.Raw('|'), 'head', '-n', '1',
])
r = ctx.ceph[cluster_name].bootstrap_remote.run(
stdout=StringIO(),
args=args,
)
stdout = r.stdout.getvalue()
if stdout != '':
return stdout
return None
if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]',
config.get('log-ignorelist')) is not None:
log.warning('Found errors (ERR|WRN|SEC) in cluster log')
ctx.summary['success'] = False
# use the most severe problem as the failure reason
if 'failure_reason' not in ctx.summary:
for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']:
match = first_in_ceph_log(pattern, config['log-ignorelist'])
if match is not None:
ctx.summary['failure_reason'] = \
'"{match}" in cluster log'.format(
match=match.rstrip('\n'),
)
break
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and ctx.summary['success']):
# and logs
log.info('Compressing logs...')
run.wait(
ctx.cluster.run(
args=[
'time',
'sudo',
'find',
'/var/log/ceph', # all logs, not just for the cluster
'/var/log/rbd-target-api', # ceph-iscsi
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'--max-args=1',
'--max-procs=0',
'--verbose',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'-5',
'--verbose',
'--',
],
wait=False,
),
)
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
try:
os.makedirs(path)
except OSError:
pass
for remote in ctx.cluster.remotes.keys():
sub = os.path.join(path, remote.shortname)
try:
os.makedirs(sub)
except OSError:
pass
try:
teuthology.pull_directory(remote, '/var/log/ceph', # everything
os.path.join(sub, 'log'))
except ReadError:
pass
@contextlib.contextmanager
def ceph_crash(ctx, config):
"""
Gather crash dumps from /var/lib/ceph/$fsid/crash
"""
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
update_archive_setting(ctx, 'crash', '/var/lib/ceph/crash')
try:
yield
finally:
if ctx.archive is not None:
log.info('Archiving crash dumps...')
path = os.path.join(ctx.archive, 'remote')
try:
os.makedirs(path)
except OSError:
pass
for remote in ctx.cluster.remotes.keys():
sub = os.path.join(path, remote.shortname)
try:
os.makedirs(sub)
except OSError:
pass
try:
teuthology.pull_directory(remote,
'/var/lib/ceph/%s/crash' % fsid,
os.path.join(sub, 'crash'))
except ReadError:
pass
@contextlib.contextmanager
def pull_image(ctx, config):
cluster_name = config['cluster']
log.info(f'Pulling image {ctx.ceph[cluster_name].image} on all hosts...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
ctx.cephadm,
'--image', ctx.ceph[cluster_name].image,
'pull',
],
wait=False,
)
)
try:
yield
finally:
pass
@contextlib.contextmanager
def ceph_bootstrap(ctx, config):
"""
Bootstrap ceph cluster.
:param ctx: the argparse.Namespace object
:param config: the config dict
"""
cluster_name = config['cluster']
testdir = teuthology.get_testdir(ctx)
fsid = ctx.ceph[cluster_name].fsid
bootstrap_remote = ctx.ceph[cluster_name].bootstrap_remote
first_mon = ctx.ceph[cluster_name].first_mon
first_mon_role = ctx.ceph[cluster_name].first_mon_role
mons = ctx.ceph[cluster_name].mons
ctx.cluster.run(args=[
'sudo', 'mkdir', '-p', '/etc/ceph',
]);
ctx.cluster.run(args=[
'sudo', 'chmod', '777', '/etc/ceph',
]);
try:
# write seed config
log.info('Writing seed config...')
conf_fp = BytesIO()
seed_config = build_initial_config(ctx, config)
seed_config.write(conf_fp)
bootstrap_remote.write_file(
path='{}/seed.{}.conf'.format(testdir, cluster_name),
data=conf_fp.getvalue())
log.debug('Final config:\n' + conf_fp.getvalue().decode())
ctx.ceph[cluster_name].conf = seed_config
# register initial daemons
ctx.daemons.register_daemon(
bootstrap_remote, 'mon', first_mon,
cluster=cluster_name,
fsid=fsid,
logger=log.getChild('mon.' + first_mon),
wait=False,
started=True,
)
if not ctx.ceph[cluster_name].roleless:
first_mgr = ctx.ceph[cluster_name].first_mgr
ctx.daemons.register_daemon(
bootstrap_remote, 'mgr', first_mgr,
cluster=cluster_name,
fsid=fsid,
logger=log.getChild('mgr.' + first_mgr),
wait=False,
started=True,
)
# bootstrap
log.info('Bootstrapping...')
cmd = [
'sudo',
ctx.cephadm,
'--image', ctx.ceph[cluster_name].image,
'-v',
'bootstrap',
'--fsid', fsid,
'--config', '{}/seed.{}.conf'.format(testdir, cluster_name),
'--output-config', '/etc/ceph/{}.conf'.format(cluster_name),
'--output-keyring',
'/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
'--output-pub-ssh-key', '{}/{}.pub'.format(testdir, cluster_name),
]
if config.get("no_cgroups_split") is True:
cmd.insert(cmd.index("bootstrap"), "--no-cgroups-split")
if config.get('registry-login'):
registry = config['registry-login']
cmd += [
"--registry-url", registry['url'],
"--registry-username", registry['username'],
"--registry-password", registry['password'],
]
if not ctx.ceph[cluster_name].roleless:
cmd += [
'--mon-id', first_mon,
'--mgr-id', first_mgr,
'--orphan-initial-daemons', # we will do it explicitly!
'--skip-monitoring-stack', # we'll provision these explicitly
]
if mons[first_mon_role].startswith('['):
cmd += ['--mon-addrv', mons[first_mon_role]]
else:
cmd += ['--mon-ip', mons[first_mon_role]]
if config.get('skip_dashboard'):
cmd += ['--skip-dashboard']
if config.get('skip_monitoring_stack'):
cmd += ['--skip-monitoring-stack']
if config.get('single_host_defaults'):
cmd += ['--single-host-defaults']
if not config.get('avoid_pacific_features', False):
cmd += ['--skip-admin-label']
# bootstrap makes the keyring root 0600, so +r it for our purposes
cmd += [
run.Raw('&&'),
'sudo', 'chmod', '+r',
'/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
]
bootstrap_remote.run(args=cmd)
# fetch keys and configs
log.info('Fetching config...')
ctx.ceph[cluster_name].config_file = \
bootstrap_remote.read_file(f'/etc/ceph/{cluster_name}.conf')
log.info('Fetching client.admin keyring...')
ctx.ceph[cluster_name].admin_keyring = \
bootstrap_remote.read_file(f'/etc/ceph/{cluster_name}.client.admin.keyring')
log.info('Fetching mon keyring...')
ctx.ceph[cluster_name].mon_keyring = \
bootstrap_remote.read_file(f'/var/lib/ceph/{fsid}/mon.{first_mon}/keyring', sudo=True)
# fetch ssh key, distribute to additional nodes
log.info('Fetching pub ssh key...')
ssh_pub_key = bootstrap_remote.read_file(
f'{testdir}/{cluster_name}.pub').decode('ascii').strip()
log.info('Installing pub ssh key for root users...')
ctx.cluster.run(args=[
'sudo', 'install', '-d', '-m', '0700', '/root/.ssh',
run.Raw('&&'),
'echo', ssh_pub_key,
run.Raw('|'),
'sudo', 'tee', '-a', '/root/.ssh/authorized_keys',
run.Raw('&&'),
'sudo', 'chmod', '0600', '/root/.ssh/authorized_keys',
])
# set options
if config.get('allow_ptrace', True):
_shell(ctx, cluster_name, bootstrap_remote,
['ceph', 'config', 'set', 'mgr', 'mgr/cephadm/allow_ptrace', 'true'])
if not config.get('avoid_pacific_features', False):
log.info('Distributing conf and client.admin keyring to all hosts + 0755')
_shell(ctx, cluster_name, bootstrap_remote,
['ceph', 'orch', 'client-keyring', 'set', 'client.admin',
'*', '--mode', '0755'],
check_status=False)
# add other hosts
for remote in ctx.cluster.remotes.keys():
if remote == bootstrap_remote:
continue
# note: this may be redundant (see above), but it avoids
# us having to wait for cephadm to do it.
log.info('Writing (initial) conf and keyring to %s' % remote.shortname)
remote.write_file(
path='/etc/ceph/{}.conf'.format(cluster_name),
data=ctx.ceph[cluster_name].config_file)
remote.write_file(
path='/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
data=ctx.ceph[cluster_name].admin_keyring)
log.info('Adding host %s to orchestrator...' % remote.shortname)
_shell(ctx, cluster_name, bootstrap_remote, [
'ceph', 'orch', 'host', 'add',
remote.shortname
])
r = _shell(ctx, cluster_name, bootstrap_remote,
['ceph', 'orch', 'host', 'ls', '--format=json'],
stdout=StringIO())
hosts = [node['hostname'] for node in json.loads(r.stdout.getvalue())]
assert remote.shortname in hosts
yield
finally:
log.info('Cleaning up testdir ceph.* files...')
ctx.cluster.run(args=[
'rm', '-f',
'{}/seed.{}.conf'.format(testdir, cluster_name),
'{}/{}.pub'.format(testdir, cluster_name),
])
log.info('Stopping all daemons...')
# this doesn't block until they are all stopped...
#ctx.cluster.run(args=['sudo', 'systemctl', 'stop', 'ceph.target'])
# stop the daemons we know
for role in ctx.daemons.resolve_role_list(None, CEPH_ROLE_TYPES, True):
cluster, type_, id_ = teuthology.split_role(role)
try:
ctx.daemons.get_daemon(type_, id_, cluster).stop()
except Exception:
log.exception(f'Failed to stop "{role}"')
raise
# tear down anything left (but leave the logs behind)
ctx.cluster.run(
args=[
'sudo',
ctx.cephadm,
'rm-cluster',
'--fsid', fsid,
'--force',
'--keep-logs',
],
check_status=False, # may fail if upgrading from old cephadm
)
# clean up /etc/ceph
ctx.cluster.run(args=[
'sudo', 'rm', '-f',
'/etc/ceph/{}.conf'.format(cluster_name),
'/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
])
@contextlib.contextmanager
def ceph_mons(ctx, config):
"""
Deploy any additional mons
"""
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
try:
daemons = {}
if config.get('add_mons_via_daemon_add'):
# This is the old way of adding mons that works with the (early) octopus
# cephadm scheduler.
num_mons = 1
for remote, roles in ctx.cluster.remotes.items():
for mon in [r for r in roles
if teuthology.is_type('mon', cluster_name)(r)]:
c_, _, id_ = teuthology.split_role(mon)
if c_ == cluster_name and id_ == ctx.ceph[cluster_name].first_mon:
continue
log.info('Adding %s on %s' % (mon, remote.shortname))
num_mons += 1
_shell(ctx, cluster_name, remote, [
'ceph', 'orch', 'daemon', 'add', 'mon',
remote.shortname + ':' + ctx.ceph[cluster_name].mons[mon] + '=' + id_,
])
ctx.daemons.register_daemon(
remote, 'mon', id_,
cluster=cluster_name,
fsid=fsid,
logger=log.getChild(mon),
wait=False,
started=True,
)
daemons[mon] = (remote, id_)
with contextutil.safe_while(sleep=1, tries=180) as proceed:
while proceed():
log.info('Waiting for %d mons in monmap...' % (num_mons))
r = _shell(
ctx=ctx,
cluster_name=cluster_name,
remote=remote,
args=[
'ceph', 'mon', 'dump', '-f', 'json',
],
stdout=StringIO(),
)
j = json.loads(r.stdout.getvalue())
if len(j['mons']) == num_mons:
break
else:
nodes = []
for remote, roles in ctx.cluster.remotes.items():
for mon in [r for r in roles
if teuthology.is_type('mon', cluster_name)(r)]:
c_, _, id_ = teuthology.split_role(mon)
log.info('Adding %s on %s' % (mon, remote.shortname))
nodes.append(remote.shortname
+ ':' + ctx.ceph[cluster_name].mons[mon]
+ '=' + id_)
if c_ == cluster_name and id_ == ctx.ceph[cluster_name].first_mon:
continue
daemons[mon] = (remote, id_)
_shell(ctx, cluster_name, remote, [
'ceph', 'orch', 'apply', 'mon',
str(len(nodes)) + ';' + ';'.join(nodes)]
)
for mgr, i in daemons.items():
remote, id_ = i
ctx.daemons.register_daemon(
remote, 'mon', id_,
cluster=cluster_name,
fsid=fsid,
logger=log.getChild(mon),
wait=False,
started=True,
)
with contextutil.safe_while(sleep=1, tries=180) as proceed:
while proceed():
log.info('Waiting for %d mons in monmap...' % (len(nodes)))
r = _shell(
ctx=ctx,
cluster_name=cluster_name,
remote=remote,
args=[
'ceph', 'mon', 'dump', '-f', 'json',
],
stdout=StringIO(),
)
j = json.loads(r.stdout.getvalue())
if len(j['mons']) == len(nodes):
break
# refresh our (final) ceph.conf file
bootstrap_remote = ctx.ceph[cluster_name].bootstrap_remote
log.info('Generating final ceph.conf file...')
r = _shell(
ctx=ctx,
cluster_name=cluster_name,
remote=bootstrap_remote,
args=[
'ceph', 'config', 'generate-minimal-conf',
],
stdout=StringIO(),
)
ctx.ceph[cluster_name].config_file = r.stdout.getvalue()
yield
finally:
pass
@contextlib.contextmanager
def ceph_mgrs(ctx, config):
"""
Deploy any additional mgrs
"""
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
try:
nodes = []
daemons = {}
for remote, roles in ctx.cluster.remotes.items():
for mgr in [r for r in roles
if teuthology.is_type('mgr', cluster_name)(r)]:
c_, _, id_ = teuthology.split_role(mgr)
log.info('Adding %s on %s' % (mgr, remote.shortname))
nodes.append(remote.shortname + '=' + id_)
if c_ == cluster_name and id_ == ctx.ceph[cluster_name].first_mgr:
continue
daemons[mgr] = (remote, id_)
if nodes:
_shell(ctx, cluster_name, remote, [
'ceph', 'orch', 'apply', 'mgr',
str(len(nodes)) + ';' + ';'.join(nodes)]
)
for mgr, i in daemons.items():
remote, id_ = i
ctx.daemons.register_daemon(
remote, 'mgr', id_,
cluster=cluster_name,
fsid=fsid,
logger=log.getChild(mgr),
wait=False,
started=True,
)
yield
finally:
pass
@contextlib.contextmanager
def ceph_osds(ctx, config):
"""
Deploy OSDs
"""
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
try:
log.info('Deploying OSDs...')
# provision OSDs in numeric order
id_to_remote = {}
devs_by_remote = {}
for remote, roles in ctx.cluster.remotes.items():
devs_by_remote[remote] = teuthology.get_scratch_devices(remote)
for osd in [r for r in roles
if teuthology.is_type('osd', cluster_name)(r)]:
_, _, id_ = teuthology.split_role(osd)
id_to_remote[int(id_)] = (osd, remote)
cur = 0
for osd_id in sorted(id_to_remote.keys()):
osd, remote = id_to_remote[osd_id]
_, _, id_ = teuthology.split_role(osd)
assert int(id_) == cur
devs = devs_by_remote[remote]
assert devs ## FIXME ##
dev = devs.pop()
if all(_ in dev for _ in ('lv', 'vg')):
short_dev = dev.replace('/dev/', '')
else:
short_dev = dev
log.info('Deploying %s on %s with %s...' % (
osd, remote.shortname, dev))
_shell(ctx, cluster_name, remote, [
'ceph-volume', 'lvm', 'zap', dev])
add_osd_args = ['ceph', 'orch', 'daemon', 'add', 'osd',
remote.shortname + ':' + short_dev]
osd_method = config.get('osd_method')
if osd_method:
add_osd_args.append(osd_method)
_shell(ctx, cluster_name, remote, add_osd_args)
ctx.daemons.register_daemon(
remote, 'osd', id_,
cluster=cluster_name,
fsid=fsid,
logger=log.getChild(osd),
wait=False,
started=True,
)
cur += 1
if cur == 0:
_shell(ctx, cluster_name, remote, [
'ceph', 'orch', 'apply', 'osd', '--all-available-devices',
])
# expect the number of scratch devs
num_osds = sum(map(len, devs_by_remote.values()))
assert num_osds
else:
# expect the number of OSDs we created
num_osds = cur
log.info(f'Waiting for {num_osds} OSDs to come up...')
with contextutil.safe_while(sleep=1, tries=120) as proceed:
while proceed():
p = _shell(ctx, cluster_name, ctx.ceph[cluster_name].bootstrap_remote,
['ceph', 'osd', 'stat', '-f', 'json'], stdout=StringIO())
j = json.loads(p.stdout.getvalue())
if int(j.get('num_up_osds', 0)) == num_osds:
break;
if not hasattr(ctx, 'managers'):
ctx.managers = {}
ctx.managers[cluster_name] = CephManager(
ctx.ceph[cluster_name].bootstrap_remote,
ctx=ctx,
logger=log.getChild('ceph_manager.' + cluster_name),
cluster=cluster_name,
cephadm=True,
)
yield
finally:
pass
@contextlib.contextmanager
def ceph_mdss(ctx, config):
"""
Deploy MDSss
"""
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
nodes = []
daemons = {}
for remote, roles in ctx.cluster.remotes.items():
for role in [r for r in roles
if teuthology.is_type('mds', cluster_name)(r)]:
c_, _, id_ = teuthology.split_role(role)
log.info('Adding %s on %s' % (role, remote.shortname))
nodes.append(remote.shortname + '=' + id_)
daemons[role] = (remote, id_)
if nodes:
_shell(ctx, cluster_name, remote, [
'ceph', 'orch', 'apply', 'mds',
'all',
str(len(nodes)) + ';' + ';'.join(nodes)]
)
for role, i in daemons.items():
remote, id_ = i
ctx.daemons.register_daemon(
remote, 'mds', id_,
cluster=cluster_name,
fsid=fsid,
logger=log.getChild(role),
wait=False,
started=True,
)
yield
@contextlib.contextmanager
def cephfs_setup(ctx, config):
mdss = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
# If there are any MDSs, then create a filesystem for them to use
# Do this last because requires mon cluster to be up and running
if len(mdss) > 0:
log.info('Setting up CephFS filesystem(s)...')
cephfs_config = config.get('cephfs', {})
fs_configs = cephfs_config.pop('fs', [{'name': 'cephfs'}])
set_allow_multifs = len(fs_configs) > 1
# wait for standbys to become available (slow due to valgrind, perhaps)
mdsc = MDSCluster(ctx)
with contextutil.safe_while(sleep=2,tries=150) as proceed:
while proceed():
if len(mdsc.get_standby_daemons()) >= len(mdss):
break
fss = []
for fs_config in fs_configs:
assert isinstance(fs_config, dict)
name = fs_config.pop('name')
temp = deepcopy(cephfs_config)
teuthology.deep_merge(temp, fs_config)
subvols = config.get('subvols', None)
if subvols:
teuthology.deep_merge(temp, {'subvols': subvols})
fs = Filesystem(ctx, fs_config=temp, name=name, create=True)
if set_allow_multifs:
fs.set_allow_multifs()
set_allow_multifs = False
fss.append(fs)
yield
for fs in fss:
fs.destroy()
else:
yield
@contextlib.contextmanager
def ceph_monitoring(daemon_type, ctx, config):
"""
Deploy prometheus, node-exporter, etc.
"""
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
nodes = []
daemons = {}
for remote, roles in ctx.cluster.remotes.items():
for role in [r for r in roles
if teuthology.is_type(daemon_type, cluster_name)(r)]:
c_, _, id_ = teuthology.split_role(role)
log.info('Adding %s on %s' % (role, remote.shortname))
nodes.append(remote.shortname + '=' + id_)
daemons[role] = (remote, id_)
if nodes:
_shell(ctx, cluster_name, remote, [
'ceph', 'orch', 'apply', daemon_type,
str(len(nodes)) + ';' + ';'.join(nodes)]
)
for role, i in daemons.items():
remote, id_ = i
ctx.daemons.register_daemon(
remote, daemon_type, id_,
cluster=cluster_name,
fsid=fsid,
logger=log.getChild(role),
wait=False,
started=True,
)
yield
@contextlib.contextmanager
def ceph_rgw(ctx, config):
"""
Deploy rgw
"""
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
nodes = {}
daemons = {}
for remote, roles in ctx.cluster.remotes.items():
for role in [r for r in roles
if teuthology.is_type('rgw', cluster_name)(r)]:
c_, _, id_ = teuthology.split_role(role)
log.info('Adding %s on %s' % (role, remote.shortname))
svc = '.'.join(id_.split('.')[0:2])
if svc not in nodes:
nodes[svc] = []
nodes[svc].append(remote.shortname + '=' + id_)
daemons[role] = (remote, id_)
for svc, nodes in nodes.items():
_shell(ctx, cluster_name, remote, [
'ceph', 'orch', 'apply', 'rgw', svc,
'--placement',
str(len(nodes)) + ';' + ';'.join(nodes)]
)
for role, i in daemons.items():
remote, id_ = i
ctx.daemons.register_daemon(
remote, 'rgw', id_,
cluster=cluster_name,
fsid=fsid,
logger=log.getChild(role),
wait=False,
started=True,
)
yield
@contextlib.contextmanager
def ceph_iscsi(ctx, config):
"""
Deploy iSCSIs
"""
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
nodes = []
daemons = {}
ips = []
for remote, roles in ctx.cluster.remotes.items():
for role in [r for r in roles
if teuthology.is_type('iscsi', cluster_name)(r)]:
c_, _, id_ = teuthology.split_role(role)
log.info('Adding %s on %s' % (role, remote.shortname))
nodes.append(remote.shortname + '=' + id_)
daemons[role] = (remote, id_)
ips.append(remote.ip_address)
trusted_ip_list = ','.join(ips)
if nodes:
poolname = 'datapool'
# ceph osd pool create datapool 3 3 replicated
_shell(ctx, cluster_name, remote, [
'ceph', 'osd', 'pool', 'create',
poolname, '3', '3', 'replicated']
)
_shell(ctx, cluster_name, remote, [
'rbd', 'pool', 'init', poolname]
)
# ceph orch apply iscsi datapool (admin)user (admin)password
_shell(ctx, cluster_name, remote, [
'ceph', 'orch', 'apply', 'iscsi',
poolname, 'admin', 'admin',
'--trusted_ip_list', trusted_ip_list,
'--placement', str(len(nodes)) + ';' + ';'.join(nodes)]
)
# used by iscsi client to identify valid gateway ip's
conf_data = dedent(f"""
[config]
trusted_ip_list = {trusted_ip_list}
""")
distribute_iscsi_gateway_cfg(ctx, conf_data)
for role, i in daemons.items():
remote, id_ = i
ctx.daemons.register_daemon(
remote, 'iscsi', id_,
cluster=cluster_name,
fsid=fsid,
logger=log.getChild(role),
wait=False,
started=True,
)
yield
@contextlib.contextmanager
def ceph_clients(ctx, config):
cluster_name = config['cluster']
log.info('Setting up client nodes...')
clients = ctx.cluster.only(teuthology.is_type('client', cluster_name))
for remote, roles_for_host in clients.remotes.items():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'client',
cluster_name):
name = teuthology.ceph_role(role)
client_keyring = '/etc/ceph/{0}.{1}.keyring'.format(cluster_name,
name)
r = _shell(
ctx=ctx,
cluster_name=cluster_name,
remote=remote,
args=[
'ceph', 'auth',
'get-or-create', name,
'mon', 'allow *',
'osd', 'allow *',
'mds', 'allow *',
'mgr', 'allow *',
],
stdout=StringIO(),
)
keyring = r.stdout.getvalue()
remote.sudo_write_file(client_keyring, keyring, mode='0644')
yield
@contextlib.contextmanager
def ceph_initial():
try:
yield
finally:
log.info('Teardown complete')
## public methods
@contextlib.contextmanager
def stop(ctx, config):
"""
Stop ceph daemons
For example::
tasks:
- ceph.stop: [mds.*]
tasks:
- ceph.stop: [osd.0, osd.2]
tasks:
- ceph.stop:
daemons: [osd.0, osd.2]
"""
if config is None:
config = {}
elif isinstance(config, list):
config = {'daemons': config}
daemons = ctx.daemons.resolve_role_list(
config.get('daemons', None), CEPH_ROLE_TYPES, True)
clusters = set()
for role in daemons:
cluster, type_, id_ = teuthology.split_role(role)
ctx.daemons.get_daemon(type_, id_, cluster).stop()
clusters.add(cluster)
# for cluster in clusters:
# ctx.ceph[cluster].watchdog.stop()
# ctx.ceph[cluster].watchdog.join()
yield
def shell(ctx, config):
"""
Execute (shell) commands
"""
cluster_name = config.get('cluster', 'ceph')
args = []
for k in config.pop('env', []):
args.extend(['-e', k + '=' + ctx.config.get(k, '')])
for k in config.pop('volumes', []):
args.extend(['-v', k])
if 'all-roles' in config and len(config) == 1:
a = config['all-roles']
roles = teuthology.all_roles(ctx.cluster)
config = dict((id_, a) for id_ in roles if not id_.startswith('host.'))
elif 'all-hosts' in config and len(config) == 1:
a = config['all-hosts']
roles = teuthology.all_roles(ctx.cluster)
config = dict((id_, a) for id_ in roles if id_.startswith('host.'))
for role, cmd in config.items():
(remote,) = ctx.cluster.only(role).remotes.keys()
log.info('Running commands on role %s host %s', role, remote.name)
if isinstance(cmd, list):
for c in cmd:
_shell(ctx, cluster_name, remote,
['bash', '-c', subst_vip(ctx, c)],
extra_cephadm_args=args)
else:
assert isinstance(cmd, str)
_shell(ctx, cluster_name, remote,
['bash', '-ex', '-c', subst_vip(ctx, cmd)],
extra_cephadm_args=args)
def apply(ctx, config):
"""
Apply spec
tasks:
- cephadm.apply:
specs:
- service_type: rgw
service_id: foo
spec:
rgw_frontend_port: 8000
- service_type: rgw
service_id: bar
spec:
rgw_frontend_port: 9000
zone: bar
realm: asdf
"""
cluster_name = config.get('cluster', 'ceph')
specs = config.get('specs', [])
y = subst_vip(ctx, yaml.dump_all(specs))
log.info(f'Applying spec(s):\n{y}')
_shell(
ctx, cluster_name, ctx.ceph[cluster_name].bootstrap_remote,
['ceph', 'orch', 'apply', '-i', '-'],
stdin=y,
)
def wait_for_service(ctx, config):
"""
Wait for a service to be fully started
tasks:
- cephadm.wait_for_service:
service: rgw.foo
timeout: 60 # defaults to 300
"""
cluster_name = config.get('cluster', 'ceph')
timeout = config.get('timeout', 300)
service = config.get('service')
assert service
log.info(
f'Waiting for {cluster_name} service {service} to start (timeout {timeout})...'
)
with contextutil.safe_while(sleep=1, tries=timeout) as proceed:
while proceed():
r = _shell(
ctx=ctx,
cluster_name=cluster_name,
remote=ctx.ceph[cluster_name].bootstrap_remote,
args=[
'ceph', 'orch', 'ls', '-f', 'json',
],
stdout=StringIO(),
)
j = json.loads(r.stdout.getvalue())
svc = None
for s in j:
if s['service_name'] == service:
svc = s
break
if svc:
log.info(
f"{service} has {s['status']['running']}/{s['status']['size']}"
)
if s['status']['running'] == s['status']['size']:
break
@contextlib.contextmanager
def tweaked_option(ctx, config):
"""
set an option, and then restore it with its original value
Note, due to the way how tasks are executed/nested, it's not suggested to
use this method as a standalone task. otherwise, it's likely that it will
restore the tweaked option at the /end/ of 'tasks' block.
"""
saved_options = {}
# we can complicate this when necessary
options = ['mon-health-to-clog']
type_, id_ = 'mon', '*'
cluster = config.get('cluster', 'ceph')
manager = ctx.managers[cluster]
if id_ == '*':
get_from = next(teuthology.all_roles_of_type(ctx.cluster, type_))
else:
get_from = id_
for option in options:
if option not in config:
continue
value = 'true' if config[option] else 'false'
option = option.replace('-', '_')
old_value = manager.get_config(type_, get_from, option)
if value != old_value:
saved_options[option] = old_value
manager.inject_args(type_, id_, option, value)
yield
for option, value in saved_options.items():
manager.inject_args(type_, id_, option, value)
@contextlib.contextmanager
def restart(ctx, config):
"""
restart ceph daemons
For example::
tasks:
- ceph.restart: [all]
For example::
tasks:
- ceph.restart: [osd.0, mon.1, mds.*]
or::
tasks:
- ceph.restart:
daemons: [osd.0, mon.1]
wait-for-healthy: false
wait-for-osds-up: true
:param ctx: Context
:param config: Configuration
"""
if config is None:
config = {}
elif isinstance(config, list):
config = {'daemons': config}
daemons = ctx.daemons.resolve_role_list(
config.get('daemons', None), CEPH_ROLE_TYPES, True)
clusters = set()
log.info('daemons %s' % daemons)
with tweaked_option(ctx, config):
for role in daemons:
cluster, type_, id_ = teuthology.split_role(role)
d = ctx.daemons.get_daemon(type_, id_, cluster)
assert d, 'daemon %s does not exist' % role
d.stop()
if type_ == 'osd':
ctx.managers[cluster].mark_down_osd(id_)
d.restart()
clusters.add(cluster)
if config.get('wait-for-healthy', True):
for cluster in clusters:
healthy(ctx=ctx, config=dict(cluster=cluster))
if config.get('wait-for-osds-up', False):
for cluster in clusters:
ctx.managers[cluster].wait_for_all_osds_up()
yield
@contextlib.contextmanager
def distribute_config_and_admin_keyring(ctx, config):
"""
Distribute a sufficient config and keyring for clients
"""
cluster_name = config['cluster']
log.info('Distributing (final) config and client.admin keyring...')
for remote, roles in ctx.cluster.remotes.items():
remote.write_file(
'/etc/ceph/{}.conf'.format(cluster_name),
ctx.ceph[cluster_name].config_file,
sudo=True)
remote.write_file(
path='/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
data=ctx.ceph[cluster_name].admin_keyring,
sudo=True)
try:
yield
finally:
ctx.cluster.run(args=[
'sudo', 'rm', '-f',
'/etc/ceph/{}.conf'.format(cluster_name),
'/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
])
@contextlib.contextmanager
def crush_setup(ctx, config):
cluster_name = config['cluster']
profile = config.get('crush_tunables', 'default')
log.info('Setting crush tunables to %s', profile)
_shell(ctx, cluster_name, ctx.ceph[cluster_name].bootstrap_remote,
args=['ceph', 'osd', 'crush', 'tunables', profile])
yield
@contextlib.contextmanager
def create_rbd_pool(ctx, config):
if config.get('create_rbd_pool', False):
cluster_name = config['cluster']
log.info('Waiting for OSDs to come up')
teuthology.wait_until_osds_up(
ctx,
cluster=ctx.cluster,
remote=ctx.ceph[cluster_name].bootstrap_remote,
ceph_cluster=cluster_name,
)
log.info('Creating RBD pool')
_shell(ctx, cluster_name, ctx.ceph[cluster_name].bootstrap_remote,
args=['sudo', 'ceph', '--cluster', cluster_name,
'osd', 'pool', 'create', 'rbd', '8'])
_shell(ctx, cluster_name, ctx.ceph[cluster_name].bootstrap_remote,
args=['sudo', 'ceph', '--cluster', cluster_name,
'osd', 'pool', 'application', 'enable',
'rbd', 'rbd', '--yes-i-really-mean-it'
])
yield
@contextlib.contextmanager
def _bypass():
yield
@contextlib.contextmanager
def initialize_config(ctx, config):
cluster_name = config['cluster']
testdir = teuthology.get_testdir(ctx)
ctx.ceph[cluster_name].thrashers = []
# fixme: setup watchdog, ala ceph.py
ctx.ceph[cluster_name].roleless = False # see below
first_ceph_cluster = False
if not hasattr(ctx, 'daemons'):
first_ceph_cluster = True
# cephadm mode?
if 'cephadm_mode' not in config:
config['cephadm_mode'] = 'root'
assert config['cephadm_mode'] in ['root', 'cephadm-package']
if config['cephadm_mode'] == 'root':
ctx.cephadm = testdir + '/cephadm'
else:
ctx.cephadm = 'cephadm' # in the path
if first_ceph_cluster:
# FIXME: this is global for all clusters
ctx.daemons = DaemonGroup(
use_cephadm=ctx.cephadm)
# uuid
fsid = str(uuid.uuid1())
log.info('Cluster fsid is %s' % fsid)
ctx.ceph[cluster_name].fsid = fsid
# mon ips
log.info('Choosing monitor IPs and ports...')
remotes_and_roles = ctx.cluster.remotes.items()
ips = [host for (host, port) in
(remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)]
if config.get('roleless', False):
# mons will be named after hosts
first_mon = None
max_mons = config.get('max_mons', 5)
for remote, _ in remotes_and_roles:
ctx.cluster.remotes[remote].append('mon.' + remote.shortname)
if not first_mon:
first_mon = remote.shortname
bootstrap_remote = remote
max_mons -= 1
if not max_mons:
break
log.info('No mon roles; fabricating mons')
roles = [role_list for (remote, role_list) in ctx.cluster.remotes.items()]
ctx.ceph[cluster_name].mons = get_mons(
roles, ips, cluster_name,
mon_bind_msgr2=config.get('mon_bind_msgr2', True),
mon_bind_addrvec=config.get('mon_bind_addrvec', True),
)
log.info('Monitor IPs: %s' % ctx.ceph[cluster_name].mons)
if config.get('roleless', False):
ctx.ceph[cluster_name].roleless = True
ctx.ceph[cluster_name].bootstrap_remote = bootstrap_remote
ctx.ceph[cluster_name].first_mon = first_mon
ctx.ceph[cluster_name].first_mon_role = 'mon.' + first_mon
else:
first_mon_role = sorted(ctx.ceph[cluster_name].mons.keys())[0]
_, _, first_mon = teuthology.split_role(first_mon_role)
(bootstrap_remote,) = ctx.cluster.only(first_mon_role).remotes.keys()
log.info('First mon is mon.%s on %s' % (first_mon,
bootstrap_remote.shortname))
ctx.ceph[cluster_name].bootstrap_remote = bootstrap_remote
ctx.ceph[cluster_name].first_mon = first_mon
ctx.ceph[cluster_name].first_mon_role = first_mon_role
others = ctx.cluster.remotes[bootstrap_remote]
mgrs = sorted([r for r in others
if teuthology.is_type('mgr', cluster_name)(r)])
if not mgrs:
raise RuntimeError('no mgrs on the same host as first mon %s' % first_mon)
_, _, first_mgr = teuthology.split_role(mgrs[0])
log.info('First mgr is %s' % (first_mgr))
ctx.ceph[cluster_name].first_mgr = first_mgr
yield
@contextlib.contextmanager
def task(ctx, config):
"""
Deploy ceph cluster using cephadm
For example, teuthology.yaml can contain the 'defaults' section:
defaults:
cephadm:
containers:
image: 'quay.io/ceph-ci/ceph'
Using overrides makes it possible to customize it per run.
The equivalent 'overrides' section looks like:
overrides:
cephadm:
containers:
image: 'quay.io/ceph-ci/ceph'
registry-login:
url: registry-url
username: registry-user
password: registry-password
:param ctx: the argparse.Namespace object
:param config: the config dict
"""
if config is None:
config = {}
assert isinstance(config, dict), \
"task only supports a dictionary for configuration"
overrides = ctx.config.get('overrides', {})
teuthology.deep_merge(config, overrides.get('ceph', {}))
teuthology.deep_merge(config, overrides.get('cephadm', {}))
log.info('Config: ' + str(config))
# set up cluster context
if not hasattr(ctx, 'ceph'):
ctx.ceph = {}
if 'cluster' not in config:
config['cluster'] = 'ceph'
cluster_name = config['cluster']
if cluster_name not in ctx.ceph:
ctx.ceph[cluster_name] = argparse.Namespace()
ctx.ceph[cluster_name].bootstrapped = False
# image
teuth_defaults = teuth_config.get('defaults', {})
cephadm_defaults = teuth_defaults.get('cephadm', {})
containers_defaults = cephadm_defaults.get('containers', {})
container_image_name = containers_defaults.get('image', None)
containers = config.get('containers', {})
container_image_name = containers.get('image', container_image_name)
if not hasattr(ctx.ceph[cluster_name], 'image'):
ctx.ceph[cluster_name].image = config.get('image')
ref = ctx.config.get("branch", "main")
if not ctx.ceph[cluster_name].image:
if not container_image_name:
raise Exception("Configuration error occurred. "
"The 'image' value is undefined for 'cephadm' task. "
"Please provide corresponding options in the task's "
"config, task 'overrides', or teuthology 'defaults' "
"section.")
sha1 = config.get('sha1')
flavor = config.get('flavor', 'default')
if sha1:
if flavor == "crimson":
ctx.ceph[cluster_name].image = container_image_name + ':' + sha1 + '-' + flavor
else:
ctx.ceph[cluster_name].image = container_image_name + ':' + sha1
ref = sha1
else:
# fall back to using the branch value
ctx.ceph[cluster_name].image = container_image_name + ':' + ref
log.info('Cluster image is %s' % ctx.ceph[cluster_name].image)
with contextutil.nested(
#if the cluster is already bootstrapped bypass corresponding methods
lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
else initialize_config(ctx=ctx, config=config),
lambda: ceph_initial(),
lambda: normalize_hostnames(ctx=ctx),
lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
else download_cephadm(ctx=ctx, config=config, ref=ref),
lambda: ceph_log(ctx=ctx, config=config),
lambda: ceph_crash(ctx=ctx, config=config),
lambda: pull_image(ctx=ctx, config=config),
lambda: _bypass() if (ctx.ceph[cluster_name].bootstrapped)\
else ceph_bootstrap(ctx, config),
lambda: crush_setup(ctx=ctx, config=config),
lambda: ceph_mons(ctx=ctx, config=config),
lambda: distribute_config_and_admin_keyring(ctx=ctx, config=config),
lambda: ceph_mgrs(ctx=ctx, config=config),
lambda: ceph_osds(ctx=ctx, config=config),
lambda: ceph_mdss(ctx=ctx, config=config),
lambda: cephfs_setup(ctx=ctx, config=config),
lambda: ceph_rgw(ctx=ctx, config=config),
lambda: ceph_iscsi(ctx=ctx, config=config),
lambda: ceph_monitoring('prometheus', ctx=ctx, config=config),
lambda: ceph_monitoring('node-exporter', ctx=ctx, config=config),
lambda: ceph_monitoring('alertmanager', ctx=ctx, config=config),
lambda: ceph_monitoring('grafana', ctx=ctx, config=config),
lambda: ceph_clients(ctx=ctx, config=config),
lambda: create_rbd_pool(ctx=ctx, config=config),
):
try:
if config.get('wait-for-healthy', True):
healthy(ctx=ctx, config=config)
log.info('Setup complete, yielding')
yield
finally:
log.info('Teardown begin')
| 56,456 | 32.565398 | 99 |
py
|
null |
ceph-main/qa/tasks/cephfs_mirror.py
|
"""
Task for running cephfs mirror daemons
"""
import logging
from teuthology.orchestra import run
from teuthology import misc
from teuthology.exceptions import ConfigError
from teuthology.task import Task
from tasks.ceph_manager import get_valgrind_args
from tasks.util import get_remote_for_role
log = logging.getLogger(__name__)
class CephFSMirror(Task):
def __init__(self, ctx, config):
super(CephFSMirror, self).__init__(ctx, config)
self.log = log
def setup(self):
super(CephFSMirror, self).setup()
try:
self.client = self.config['client']
except KeyError:
raise ConfigError('cephfs-mirror requires a client to connect')
self.cluster_name, type_, self.client_id = misc.split_role(self.client)
if not type_ == 'client':
raise ConfigError(f'client role {self.client} must be a client')
self.remote = get_remote_for_role(self.ctx, self.client)
def begin(self):
super(CephFSMirror, self).begin()
testdir = misc.get_testdir(self.ctx)
args = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'daemon-helper',
'term',
]
if 'valgrind' in self.config:
args = get_valgrind_args(
testdir, 'cephfs-mirror-{id}'.format(id=self.client),
args, self.config.get('valgrind'))
args.extend([
'cephfs-mirror',
'--cluster',
self.cluster_name,
'--id',
self.client_id,
])
if 'run_in_foreground' in self.config:
args.extend(['--foreground'])
self.ctx.daemons.add_daemon(
self.remote, 'cephfs-mirror', self.client,
args=args,
logger=self.log.getChild(self.client),
stdin=run.PIPE,
wait=False,
)
def end(self):
mirror_daemon = self.ctx.daemons.get_daemon('cephfs-mirror', self.client)
mirror_daemon.stop()
super(CephFSMirror, self).end()
task = CephFSMirror
| 2,149 | 28.054054 | 81 |
py
|
null |
ceph-main/qa/tasks/cephfs_mirror_thrash.py
|
"""
Task for thrashing cephfs-mirror daemons
"""
import contextlib
import logging
import random
import signal
import socket
import time
from gevent import sleep
from gevent.greenlet import Greenlet
from gevent.event import Event
from teuthology.exceptions import CommandFailedError
from teuthology.orchestra import run
from tasks.thrasher import Thrasher
log = logging.getLogger(__name__)
class CephFSMirrorThrasher(Thrasher, Greenlet):
"""
CephFSMirrorThrasher::
The CephFSMirrorThrasher thrashes cephfs-mirror daemons during execution of other
tasks (workunits, etc).
The config is optional. Many of the config parameters are a maximum value
to use when selecting a random value from a range. The config is a dict
containing some or all of:
cluster: [default: ceph] cluster to thrash
max_thrash: [default: 1] the maximum number of active cephfs-mirror daemons per
cluster will be thrashed at any given time.
min_thrash_delay: [default: 60] minimum number of seconds to delay before
thrashing again.
max_thrash_delay: [default: 120] maximum number of seconds to delay before
thrashing again.
max_revive_delay: [default: 10] maximum number of seconds to delay before
bringing back a thrashed cephfs-mirror daemon.
randomize: [default: true] enables randomization and use the max/min values
seed: [no default] seed the random number generator
Examples::
The following example disables randomization, and uses the max delay
values:
tasks:
- ceph:
- cephfs_mirror_thrash:
randomize: False
max_thrash_delay: 10
"""
def __init__(self, ctx, config, cluster, daemons):
super(CephFSMirrorThrasher, self).__init__()
self.ctx = ctx
self.config = config
self.cluster = cluster
self.daemons = daemons
self.logger = log
self.name = 'thrasher.cephfs_mirror.[{cluster}]'.format(cluster = cluster)
self.stopping = Event()
self.randomize = bool(self.config.get('randomize', True))
self.max_thrash = int(self.config.get('max_thrash', 1))
self.min_thrash_delay = float(self.config.get('min_thrash_delay', 5.0))
self.max_thrash_delay = float(self.config.get('max_thrash_delay', 10))
self.max_revive_delay = float(self.config.get('max_revive_delay', 15.0))
def _run(self):
try:
self.do_thrash()
except Exception as e:
# See _run exception comment for MDSThrasher
self.set_thrasher_exception(e)
self.logger.exception("exception:")
# Allow successful completion so gevent doesn't see an exception.
# The DaemonWatchdog will observe the error and tear down the test.
def log(self, x):
"""Write data to logger assigned to this CephFSMirrorThrasher"""
self.logger.info(x)
def stop(self):
self.stopping.set()
def do_thrash(self):
"""
Perform the random thrashing action
"""
self.log('starting thrash for cluster {cluster}'.format(cluster=self.cluster))
stats = {
"kill": 0,
}
while not self.stopping.is_set():
delay = self.max_thrash_delay
if self.randomize:
delay = random.randrange(self.min_thrash_delay, self.max_thrash_delay)
if delay > 0.0:
self.log('waiting for {delay} secs before thrashing'.format(delay=delay))
self.stopping.wait(delay)
if self.stopping.is_set():
continue
killed_daemons = []
weight = 1.0 / len(self.daemons)
count = 0
for daemon in self.daemons:
skip = random.uniform(0.0, 1.0)
if weight <= skip:
self.log('skipping daemon {label} with skip ({skip}) > weight ({weight})'.format(
label=daemon.id_, skip=skip, weight=weight))
continue
self.log('kill {label}'.format(label=daemon.id_))
try:
daemon.signal(signal.SIGTERM)
except Exception as e:
self.log(f'exception when stopping mirror daemon: {e}')
else:
killed_daemons.append(daemon)
stats['kill'] += 1
# if we've reached max_thrash, we're done
count += 1
if count >= self.max_thrash:
break
if killed_daemons:
# wait for a while before restarting
delay = self.max_revive_delay
if self.randomize:
delay = random.randrange(0.0, self.max_revive_delay)
self.log('waiting for {delay} secs before reviving daemons'.format(delay=delay))
sleep(delay)
for daemon in killed_daemons:
self.log('waiting for {label}'.format(label=daemon.id_))
try:
run.wait([daemon.proc], timeout=600)
except CommandFailedError:
pass
except:
self.log('Failed to stop {label}'.format(label=daemon.id_))
try:
# try to capture a core dump
daemon.signal(signal.SIGABRT)
except socket.error:
pass
raise
finally:
daemon.reset()
for daemon in killed_daemons:
self.log('reviving {label}'.format(label=daemon.id_))
daemon.start()
for stat in stats:
self.log("stat['{key}'] = {value}".format(key = stat, value = stats[stat]))
@contextlib.contextmanager
def task(ctx, config):
"""
Stress test the cephfs-mirror by thrashing while another task/workunit
is running.
Please refer to CephFSMirrorThrasher class for further information on the
available options.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'cephfs_mirror_thrash task only accepts a dict for configuration'
cluster = config.get('cluster', 'ceph')
daemons = list(ctx.daemons.iter_daemons_of_role('cephfs-mirror', cluster))
assert len(daemons) > 0, \
'cephfs_mirror_thrash task requires at least 1 cephfs-mirror daemon'
# choose random seed
if 'seed' in config:
seed = int(config['seed'])
else:
seed = int(time.time())
log.info('cephfs_mirror_thrash using random seed: {seed}'.format(seed=seed))
random.seed(seed)
thrasher = CephFSMirrorThrasher(ctx, config, cluster, daemons)
thrasher.start()
ctx.ceph[cluster].thrashers.append(thrasher)
try:
log.debug('Yielding')
yield
finally:
log.info('joining cephfs_mirror_thrash')
thrasher.stop()
if thrasher.exception is not None:
raise RuntimeError('error during thrashing')
thrasher.join()
log.info('done joining')
| 7,264 | 32.022727 | 101 |
py
|
null |
ceph-main/qa/tasks/cephfs_test_runner.py
|
import contextlib
import logging
import os
import unittest
from unittest import suite, loader, case
from teuthology.task import interactive
from teuthology import misc
from tasks.cephfs.filesystem import Filesystem, MDSCluster, CephCluster
from tasks.mgr.mgr_test_case import MgrCluster
log = logging.getLogger(__name__)
class DecoratingLoader(loader.TestLoader):
"""
A specialization of TestLoader that tags some extra attributes
onto test classes as they are loaded.
"""
def __init__(self, params):
self._params = params
super(DecoratingLoader, self).__init__()
def _apply_params(self, obj):
for k, v in self._params.items():
if obj.__class__ is type:
cls = obj
else:
cls = obj.__class__
setattr(cls, k, v)
def loadTestsFromTestCase(self, testCaseClass):
self._apply_params(testCaseClass)
return super(DecoratingLoader, self).loadTestsFromTestCase(testCaseClass)
def loadTestsFromName(self, name, module=None):
result = super(DecoratingLoader, self).loadTestsFromName(name, module)
# Special case for when we were called with the name of a method, we get
# a suite with one TestCase
tests_in_result = list(result)
if len(tests_in_result) == 1 and isinstance(tests_in_result[0], case.TestCase):
self._apply_params(tests_in_result[0])
return result
class LogStream(object):
def __init__(self):
self.buffer = ""
def write(self, data):
self.buffer += data
if "\n" in self.buffer:
lines = self.buffer.split("\n")
for line in lines[:-1]:
log.info(line)
self.buffer = lines[-1]
def flush(self):
pass
class InteractiveFailureResult(unittest.TextTestResult):
"""
Specialization that implements interactive-on-error style
behavior.
"""
ctx = None
def addFailure(self, test, err):
log.error(self._exc_info_to_string(err, test))
log.error("Failure in test '{0}', going interactive".format(
self.getDescription(test)
))
interactive.task(ctx=self.ctx, config=None)
def addError(self, test, err):
log.error(self._exc_info_to_string(err, test))
log.error("Error in test '{0}', going interactive".format(
self.getDescription(test)
))
interactive.task(ctx=self.ctx, config=None)
@contextlib.contextmanager
def task(ctx, config):
"""
Run the CephFS test cases.
Run everything in tasks/cephfs/test_*.py:
::
tasks:
- install:
- ceph:
- ceph-fuse:
- cephfs_test_runner:
`modules` argument allows running only some specific modules:
::
tasks:
...
- cephfs_test_runner:
modules:
- tasks.cephfs.test_sessionmap
- tasks.cephfs.test_auto_repair
By default, any cases that can't be run on the current cluster configuration
will generate a failure. When the optional `fail_on_skip` argument is set
to false, any tests that can't be run on the current configuration will
simply be skipped:
::
tasks:
...
- cephfs_test_runner:
fail_on_skip: false
"""
ceph_cluster = CephCluster(ctx)
if len(list(misc.all_roles_of_type(ctx.cluster, 'mds'))):
mds_cluster = MDSCluster(ctx)
fs = Filesystem(ctx)
else:
mds_cluster = None
fs = None
if len(list(misc.all_roles_of_type(ctx.cluster, 'mgr'))):
mgr_cluster = MgrCluster(ctx)
else:
mgr_cluster = None
# Mount objects, sorted by ID
if hasattr(ctx, 'mounts'):
mounts = [v for k, v in sorted(ctx.mounts.items(), key=lambda mount: mount[0])]
else:
# The test configuration has a filesystem but no fuse/kclient mounts
mounts = []
decorating_loader = DecoratingLoader({
"ctx": ctx,
"mounts": mounts,
"fs": fs,
"ceph_cluster": ceph_cluster,
"mds_cluster": mds_cluster,
"mgr_cluster": mgr_cluster,
})
fail_on_skip = config.get('fail_on_skip', True)
# Put useful things onto ctx for interactive debugging
ctx.fs = fs
ctx.mds_cluster = mds_cluster
ctx.mgr_cluster = mgr_cluster
# Depending on config, either load specific modules, or scan for moduless
if config and 'modules' in config and config['modules']:
module_suites = []
for mod_name in config['modules']:
# Test names like cephfs.test_auto_repair
module_suites.append(decorating_loader.loadTestsFromName(mod_name))
overall_suite = suite.TestSuite(module_suites)
else:
# Default, run all tests
overall_suite = decorating_loader.discover(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"cephfs/"
)
)
if ctx.config.get("interactive-on-error", False):
InteractiveFailureResult.ctx = ctx
result_class = InteractiveFailureResult
else:
result_class = unittest.TextTestResult
class LoggingResult(result_class):
def startTest(self, test):
log.info("Starting test: {0}".format(self.getDescription(test)))
return super(LoggingResult, self).startTest(test)
def addSkip(self, test, reason):
if fail_on_skip:
# Don't just call addFailure because that requires a traceback
self.failures.append((test, reason))
else:
super(LoggingResult, self).addSkip(test, reason)
# Execute!
result = unittest.TextTestRunner(
stream=LogStream(),
resultclass=LoggingResult,
verbosity=2,
failfast=True).run(overall_suite)
if not result.wasSuccessful():
result.printErrors() # duplicate output at end for convenience
bad_tests = []
for test, error in result.errors:
bad_tests.append(str(test))
for test, failure in result.failures:
bad_tests.append(str(test))
raise RuntimeError("Test failure: {0}".format(", ".join(bad_tests)))
yield
| 6,319 | 28.53271 | 87 |
py
|
null |
ceph-main/qa/tasks/cephfs_upgrade_snap.py
|
"""
Upgrade cluster snap format.
"""
import logging
import time
from tasks.cephfs.filesystem import Filesystem
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Upgrade CephFS file system snap format.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'snap-upgrade task only accepts a dict for configuration'
fs = Filesystem(ctx)
mds_map = fs.get_mds_map()
assert(mds_map['max_mds'] == 1)
json = fs.run_scrub(["start", "/", "force", "recursive", "repair"])
if not json or json['return_code'] == 0:
assert(fs.wait_until_scrub_complete(tag=json["scrub_tag"]) == True)
log.info("scrub / completed")
else:
log.info("scrub / failed: {}".format(json))
json = fs.run_scrub(["start", "~mdsdir", "force", "recursive", "repair"])
if not json or json['return_code'] == 0:
assert(fs.wait_until_scrub_complete(tag=json["scrub_tag"]) == True)
log.info("scrub ~mdsdir completed")
else:
log.info("scrub / failed: {}".format(json))
for i in range(0, 10):
mds_map = fs.get_mds_map()
if (mds_map['flags'] & (1<<1)) != 0 and (mds_map['flags'] & (1<<4)) != 0:
break
time.sleep(10)
assert((mds_map['flags'] & (1<<1)) != 0) # Test CEPH_MDSMAP_ALLOW_SNAPS
assert((mds_map['flags'] & (1<<4)) != 0) # Test CEPH_MDSMAP_ALLOW_MULTIMDS_SNAPS
| 1,422 | 28.645833 | 84 |
py
|
null |
ceph-main/qa/tasks/check_counter.py
|
import logging
import json
from teuthology.task import Task
from teuthology import misc
from tasks import ceph_manager
log = logging.getLogger(__name__)
class CheckCounter(Task):
"""
Use this task to validate that some daemon perf counters were
incremented by the nested tasks.
Config:
'cluster_name': optional, specify which cluster
'target': dictionary of daemon type to list of performance counters.
'dry_run': just log the value of the counters, don't fail if they
aren't nonzero.
Success condition is that for all of the named counters, at least
one of the daemons of that type has the counter nonzero.
Example to check cephfs dirfrag splits are happening:
- install:
- ceph:
- ceph-fuse:
- check-counter:
counters:
mds:
- "mds.dir_split"
-
name: "mds.dir_update"
min: 3
- workunit: ...
"""
@property
def admin_remote(self):
first_mon = misc.get_first_mon(self.ctx, None)
(result,) = self.ctx.cluster.only(first_mon).remotes.keys()
return result
def start(self):
log.info("START")
def end(self):
overrides = self.ctx.config.get('overrides', {})
misc.deep_merge(self.config, overrides.get('check-counter', {}))
cluster_name = self.config.get('cluster_name', None)
dry_run = self.config.get('dry_run', False)
targets = self.config.get('counters', {})
if cluster_name is None:
cluster_name = next(iter(self.ctx.managers.keys()))
mon_manager = ceph_manager.CephManager(self.admin_remote, ctx=self.ctx, logger=log.getChild('ceph_manager'))
active_mgr = json.loads(mon_manager.raw_cluster_cmd("mgr", "dump", "--format=json-pretty"))["active_name"]
for daemon_type, counters in targets.items():
# List of 'a', 'b', 'c'...
daemon_ids = list(misc.all_roles_of_type(self.ctx.cluster, daemon_type))
daemons = dict([(daemon_id,
self.ctx.daemons.get_daemon(daemon_type, daemon_id))
for daemon_id in daemon_ids])
expected = set()
seen = set()
for daemon_id, daemon in daemons.items():
if not daemon.running():
log.info("Ignoring daemon {0}, it isn't running".format(daemon_id))
continue
elif daemon_type == 'mgr' and daemon_id != active_mgr:
continue
else:
log.debug("Getting stats from {0}".format(daemon_id))
manager = self.ctx.managers[cluster_name]
proc = manager.admin_socket(daemon_type, daemon_id, ["perf", "dump"])
response_data = proc.stdout.getvalue().strip()
if response_data:
perf_dump = json.loads(response_data)
else:
log.warning("No admin socket response from {0}, skipping".format(daemon_id))
continue
minval = ''
expected_val = ''
for counter in counters:
if isinstance(counter, dict):
name = counter['name']
if 'min' in counter:
minval = counter['min']
if 'expected_val' in counter:
expected_val = counter['expected_val']
else:
name = counter
minval = 1
expected.add(name)
val = perf_dump
for key in name.split('.'):
if key not in val:
log.warning(f"Counter '{name}' not found on daemon {daemon_type}.{daemon_id}")
val = None
break
val = val[key]
if val is not None:
log.info(f"Daemon {daemon_type}.{daemon_id} {name}={val}")
if isinstance(minval, int) and val >= minval:
seen.add(name)
elif isinstance(expected_val, int) and val == expected_val:
seen.add(name)
if not dry_run:
unseen = set(expected) - set(seen)
if unseen:
raise RuntimeError("The following counters failed to be set "
"on {0} daemons: {1}".format(
daemon_type, unseen
))
task = CheckCounter
| 4,748 | 35.251908 | 116 |
py
|
null |
ceph-main/qa/tasks/cifs_mount.py
|
"""
Mount cifs clients. Unmount when finished.
"""
import contextlib
import logging
import os
from teuthology import misc as teuthology
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Mount/unmount a cifs client.
The config is optional and defaults to mounting on all clients. If
a config is given, it is expected to be a list of clients to do
this operation on.
Example that starts smbd and mounts cifs on all nodes::
tasks:
- ceph:
- samba:
- cifs-mount:
- interactive:
Example that splits smbd and cifs:
tasks:
- ceph:
- samba: [samba.0]
- cifs-mount: [client.0]
- ceph-fuse: [client.1]
- interactive:
Example that specifies the share name:
tasks:
- ceph:
- ceph-fuse:
- samba:
samba.0:
cephfuse: "{testdir}/mnt.0"
- cifs-mount:
client.0:
share: cephfuse
:param ctx: Context
:param config: Configuration
"""
log.info('Mounting cifs clients...')
if config is None:
config = dict(('client.{id}'.format(id=id_), None)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
elif isinstance(config, list):
config = dict((name, None) for name in config)
clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
from .samba import get_sambas
samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')]
sambas = list(get_sambas(ctx=ctx, roles=samba_roles))
(ip, _) = sambas[0][1].ssh.get_transport().getpeername()
log.info('samba ip: {ip}'.format(ip=ip))
for id_, remote in clients:
mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format(
id=id_, remote=remote,mnt=mnt))
remote.run(
args=[
'mkdir',
'--',
mnt,
],
)
rolestr = 'client.{id_}'.format(id_=id_)
unc = "ceph"
log.info("config: {c}".format(c=config))
if config[rolestr] is not None and 'share' in config[rolestr]:
unc = config[rolestr]['share']
remote.run(
args=[
'sudo',
'mount',
'-t',
'cifs',
'//{sambaip}/{unc}'.format(sambaip=ip, unc=unc),
'-o',
'username=ubuntu,password=ubuntu',
mnt,
],
)
remote.run(
args=[
'sudo',
'chown',
'ubuntu:ubuntu',
'{m}/'.format(m=mnt),
],
)
try:
yield
finally:
log.info('Unmounting cifs clients...')
for id_, remote in clients:
remote.run(
args=[
'sudo',
'umount',
mnt,
],
)
for id_, remote in clients:
while True:
try:
remote.run(
args=[
'rmdir', '--', mnt,
run.Raw('2>&1'),
run.Raw('|'),
'grep', 'Device or resource busy',
],
)
import time
time.sleep(1)
except Exception:
break
| 3,758 | 26.23913 | 111 |
py
|
null |
ceph-main/qa/tasks/cram.py
|
"""
Cram tests
"""
import logging
import os
from tasks.util.workunit import get_refspec_after_overrides
from teuthology import misc as teuthology
from teuthology.parallel import parallel
from teuthology.orchestra import run
from teuthology.config import config as teuth_config
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Run all cram tests from the specified paths on the specified
clients. Each client runs tests in parallel as default, and
you can also disable it by adding "parallel: False" option.
Limitations:
Tests must have a .t suffix. Tests with duplicate names will
overwrite each other, so only the last one will run.
For example::
tasks:
- ceph:
- cram:
clients:
client.0:
- qa/test.t
- qa/test2.t]
client.1: [qa/test.t]
branch: foo
parallel: False
You can also run a list of cram tests on all clients::
tasks:
- ceph:
- cram:
clients:
all: [qa/test.t]
:param ctx: Context
:param config: Configuration
"""
assert isinstance(config, dict)
assert 'clients' in config and isinstance(config['clients'], dict), \
'configuration must contain a dictionary of clients'
clients = teuthology.replace_all_with_clients(ctx.cluster,
config['clients'])
testdir = teuthology.get_testdir(ctx)
overrides = ctx.config.get('overrides', {})
refspec = get_refspec_after_overrides(config, overrides)
_parallel = config.get('parallel', True)
git_url = teuth_config.get_ceph_qa_suite_git_url()
log.info('Pulling tests from %s ref %s', git_url, refspec)
try:
for client, tests in clients.items():
(remote,) = (ctx.cluster.only(client).remotes.keys())
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
remote.run(
args=[
'mkdir', '--', client_dir,
run.Raw('&&'),
'python3', '-m', 'venv', '{tdir}/virtualenv'.format(tdir=testdir),
run.Raw('&&'),
'{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
'install', 'cram==0.6',
],
)
clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
remote.run(args=refspec.clone(git_url, clone_dir))
for test in tests:
assert test.endswith('.t'), 'tests must end in .t'
remote.run(
args=[
'cp', '--', os.path.join(clone_dir, test), client_dir,
],
)
if _parallel:
with parallel() as p:
for role in clients.keys():
p.spawn(_run_tests, ctx, role)
else:
for role in clients.keys():
_run_tests(ctx, role)
finally:
for client, tests in clients.items():
(remote,) = (ctx.cluster.only(client).remotes.keys())
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
test_files = set([test.rsplit('/', 1)[1] for test in tests])
# remove test files unless they failed
for test_file in test_files:
abs_file = os.path.join(client_dir, test_file)
remote.run(
args=[
'test', '-f', abs_file + '.err',
run.Raw('||'),
'rm', '-f', '--', abs_file,
],
)
# ignore failure since more than one client may
# be run on a host, and the client dir should be
# non-empty if the test failed
clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
remote.run(
args=[
'rm', '-rf', '--',
'{tdir}/virtualenv'.format(tdir=testdir),
clone_dir,
run.Raw(';'),
'rmdir', '--ignore-fail-on-non-empty', client_dir,
],
)
def _run_tests(ctx, role):
"""
For each role, check to make sure it's a client, then run the cram on that client
:param ctx: Context
:param role: Roles
"""
assert isinstance(role, str)
PREFIX = 'client.'
if role.startswith(PREFIX):
id_ = role[len(PREFIX):]
else:
id_ = role
(remote,) = (ctx.cluster.only(role).remotes.keys())
ceph_ref = ctx.summary.get('ceph-sha1', 'master')
testdir = teuthology.get_testdir(ctx)
log.info('Running tests for %s...', role)
remote.run(
args=[
run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)),
run.Raw('CEPH_ID="{id}"'.format(id=id_)),
run.Raw('PATH=$PATH:/usr/sbin'),
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'{tdir}/virtualenv/bin/cram'.format(tdir=testdir),
'-v', '--',
run.Raw('{tdir}/archive/cram.{role}/*.t'.format(tdir=testdir, role=role)),
],
logger=log.getChild(role),
)
| 5,402 | 32.559006 | 87 |
py
|
null |
ceph-main/qa/tasks/create_verify_lfn_objects.py
|
"""
Rados modle-based integration tests
"""
import contextlib
import logging
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
For each combination of namespace and name_length, create
<num_objects> objects with name length <name_length>
on entry. On exit, verify that the objects still exist, can
be deleted, and then don't exist.
Usage::
create_verify_lfn_objects.py:
pool: <pool_name> default: 'data'
prefix: <prefix> default: ''
namespace: [<namespace>] default: ['']
num_objects: [<num_objects>] default: 10
name_length: [<name_length>] default: [400]
"""
pool = config.get('pool', 'data')
num_objects = config.get('num_objects', 10)
name_length = config.get('name_length', [400])
namespace = config.get('namespace', [None])
prefix = config.get('prefix', None)
manager = ctx.managers['ceph']
objects = []
for l in name_length:
for ns in namespace:
def object_name(i):
nslength = 0
if namespace != '':
nslength = len(namespace)
numstr = str(i)
fillerlen = l - nslength - len(prefix) - len(numstr)
assert fillerlen >= 0
return prefix + ('a'*fillerlen) + numstr
objects += [(ns, object_name(i)) for i in range(num_objects)]
for ns, name in objects:
err = manager.do_put(
pool,
name,
'/etc/resolv.conf',
namespace=ns)
log.info("err is " + str(err))
assert err == 0
try:
yield
finally:
log.info('ceph_verify_lfn_objects verifying...')
for ns, name in objects:
err = manager.do_get(
pool,
name,
namespace=ns)
log.info("err is " + str(err))
assert err == 0
log.info('ceph_verify_lfn_objects deleting...')
for ns, name in objects:
err = manager.do_rm(
pool,
name,
namespace=ns)
log.info("err is " + str(err))
assert err == 0
log.info('ceph_verify_lfn_objects verifying absent...')
for ns, name in objects:
err = manager.do_get(
pool,
name,
namespace=ns)
log.info("err is " + str(err))
assert err != 0
| 2,499 | 28.761905 | 74 |
py
|
null |
ceph-main/qa/tasks/d4ntests.py
|
import logging
from teuthology import misc as teuthology
from teuthology.task import Task
from teuthology.orchestra import run
from teuthology.packaging import remove_package
log = logging.getLogger(__name__)
def get_toxvenv_dir(ctx):
return ctx.tox.venv_path
def toxvenv_sh(ctx, remote, args, **kwargs):
activate = get_toxvenv_dir(ctx) + '/bin/activate'
return remote.sh(['source', activate, run.Raw('&&')] + args, **kwargs)
display_name='Foo'
email='[email protected]'
access_key='test3'
secret_key='test3'
class D4NTests(Task):
def __init__(self, ctx, config):
super(D4NTests, self).__init__(ctx, config)
self.log = log
log.info('D4N Tests: __INIT__ ')
clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(self.ctx.cluster, 'client')]
self.all_clients = []
for client in clients:
if client in self.config:
self.all_clients.extend([client])
if self.all_clients is None:
self.all_clients = 'client.0'
self.user = {'s3main': 'tester'}
def setup(self):
super(D4NTests, self).setup()
log.info('D4N Tests: SETUP')
def begin(self):
super(D4NTests, self).begin()
log.info('D4N Tests: BEGIN')
for (host, roles) in self.ctx.cluster.remotes.items():
log.debug('D4N Tests: Cluster config is: {cfg}'.format(cfg=roles))
log.debug('D4N Tests: Host is: {host}'.format(host=host))
self.create_user()
self.redis_startup()
def end(self):
super(D4NTests, self).end()
log.info('D4N Tests: END')
self.redis_shutdown()
for client in self.all_clients:
self.remove_packages(client)
self.delete_user(client)
def create_user(self):
log.info("D4N Tests: Creating S3 user...")
testdir = teuthology.get_testdir(self.ctx)
for client in self.all_clients:
for user in list(self.user.items()):
s3_user_id = 's3main'
log.debug(
'D4N Tests: Creating user {s3_user_id}'.format(s3_user_id=s3_user_id))
cluster_name, daemon_type, client_id = teuthology.split_role(
client)
client_with_id = daemon_type + '.' + client_id
self.ctx.cluster.only(client).run(
args=[
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'user', 'create',
'--uid', s3_user_id,
'--display-name', display_name,
'--access-key', access_key,
'--secret', secret_key,
'--email', email,
'--cluster', cluster_name,
],
)
def redis_startup(self):
try:
for client in self.all_clients:
self.ctx.cluster.only(client).run(
args=[
'sudo',
'redis-server',
'--daemonize',
'yes'
],
)
except Exception as err:
log.debug('D4N Tests: Error starting up a Redis server')
log.debug(err)
def redis_shutdown(self):
try:
for client in self.all_clients:
self.ctx.cluster.only(client).run(
args=[
'sudo',
'redis-cli',
'shutdown',
],
)
except Exception as err:
log.debug('D4N Tests: Error shutting down a Redis server')
log.debug(err)
def remove_packages(self, client):
(remote,) = self.ctx.cluster.only(client).remotes.keys()
remove_package('s3cmd', remote)
remove_package('redis', remote)
def delete_user(self, client):
log.info("D4N Tests: Deleting S3 user...")
testdir = teuthology.get_testdir(self.ctx)
for user in self.user.items():
s3_user_id = 's3main'
self.ctx.cluster.only(client).run(
args=[
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client,
'user', 'rm',
'--uid', s3_user_id,
'--purge-data',
'--cluster', 'ceph',
],
)
task = D4NTests
| 4,963 | 31.444444 | 90 |
py
|
null |
ceph-main/qa/tasks/daemonwatchdog.py
|
import logging
import signal
import time
from gevent import sleep
from gevent.greenlet import Greenlet
from gevent.event import Event
log = logging.getLogger(__name__)
class DaemonWatchdog(Greenlet):
"""
DaemonWatchdog::
Watch Ceph daemons for failures. If an extended failure is detected (i.e.
not intentional), then the watchdog will unmount file systems and send
SIGTERM to all daemons. The duration of an extended failure is configurable
with watchdog_daemon_timeout.
ceph:
watchdog:
daemon_restart [default: no]: restart daemon if "normal" exit (status==0).
daemon_timeout [default: 300]: number of seconds a daemon
is allowed to be failed before the
watchdog will bark.
"""
def __init__(self, ctx, config, thrashers):
super(DaemonWatchdog, self).__init__()
self.config = ctx.config.get('watchdog', {})
self.ctx = ctx
self.e = None
self.logger = log.getChild('daemon_watchdog')
self.cluster = config.get('cluster', 'ceph')
self.name = 'watchdog'
self.stopping = Event()
self.thrashers = thrashers
def _run(self):
try:
self.watch()
except Exception as e:
# See _run exception comment for MDSThrasher
self.e = e
self.logger.exception("exception:")
# allow successful completion so gevent doesn't see an exception...
def log(self, x):
"""Write data to logger"""
self.logger.info(x)
def stop(self):
self.stopping.set()
def bark(self):
self.log("BARK! unmounting mounts and killing all daemons")
if hasattr(self.ctx, 'mounts'):
for mount in self.ctx.mounts.values():
try:
mount.umount_wait(force=True)
except:
self.logger.exception("ignoring exception:")
daemons = []
daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('osd', cluster=self.cluster)))
daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('mds', cluster=self.cluster)))
daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('mon', cluster=self.cluster)))
daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('rgw', cluster=self.cluster)))
daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('mgr', cluster=self.cluster)))
for daemon in daemons:
try:
daemon.signal(signal.SIGTERM)
except:
self.logger.exception("ignoring exception:")
def watch(self):
self.log("watchdog starting")
daemon_timeout = int(self.config.get('daemon_timeout', 300))
daemon_restart = self.config.get('daemon_restart', False)
daemon_failure_time = {}
while not self.stopping.is_set():
bark = False
now = time.time()
osds = self.ctx.daemons.iter_daemons_of_role('osd', cluster=self.cluster)
mons = self.ctx.daemons.iter_daemons_of_role('mon', cluster=self.cluster)
mdss = self.ctx.daemons.iter_daemons_of_role('mds', cluster=self.cluster)
rgws = self.ctx.daemons.iter_daemons_of_role('rgw', cluster=self.cluster)
mgrs = self.ctx.daemons.iter_daemons_of_role('mgr', cluster=self.cluster)
daemon_failures = []
daemon_failures.extend(filter(lambda daemon: daemon.running() and daemon.proc.finished, osds))
daemon_failures.extend(filter(lambda daemon: daemon.running() and daemon.proc.finished, mons))
daemon_failures.extend(filter(lambda daemon: daemon.running() and daemon.proc.finished, mdss))
daemon_failures.extend(filter(lambda daemon: daemon.running() and daemon.proc.finished, rgws))
daemon_failures.extend(filter(lambda daemon: daemon.running() and daemon.proc.finished, mgrs))
for daemon in daemon_failures:
name = daemon.role + '.' + daemon.id_
dt = daemon_failure_time.setdefault(name, (daemon, now))
assert dt[0] is daemon
delta = now-dt[1]
self.log("daemon {name} is failed for ~{t:.0f}s".format(name=name, t=delta))
if delta > daemon_timeout:
bark = True
if daemon_restart == 'normal' and daemon.proc.exitstatus == 0:
self.log(f"attempting to restart daemon {name}")
daemon.restart()
# If a daemon is no longer failed, remove it from tracking:
for name in list(daemon_failure_time.keys()):
if name not in [d.role + '.' + d.id_ for d in daemon_failures]:
self.log("daemon {name} has been restored".format(name=name))
del daemon_failure_time[name]
for thrasher in self.thrashers:
if thrasher.exception is not None:
self.log("{name} failed".format(name=thrasher.name))
bark = True
if bark:
self.bark()
return
sleep(5)
self.log("watchdog finished")
| 5,606 | 42.465116 | 160 |
py
|
null |
ceph-main/qa/tasks/deduplication.py
|
"""
Run ceph-dedup-tool
"""
import contextlib
import logging
import gevent
from teuthology import misc as teuthology
import json
import time
from io import StringIO
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Run ceph-dedup-tool.
The config should be as follows::
ceph-dedup-tool:
clients: [client list]
op: <operation name>
pool: <pool name>
chunk_pool: <chunk pool name>
chunk_size: <chunk size>
chunk_algorithm: <chunk algorithm, fixed|fastcdc>
fingerprint_algorithm: <fingerprint algorithm, sha1|sha256|sha512>
chunk_dedup_threashold: <the number of duplicate chunks to trigger chunk dedup>
max_thread: <the number of threads>
wakeup_period: <duration>
For example::
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- deduplication:
clients: [client.0]
op: 'sample-dedup'
pool: 'default.rgw.buckets.data'
chunk_pool: 'low_tier'
chunk_size: 131072
chunk_algorithm: 'fastcdc'
fingerprint_algorithm: 'sha1'
chunk_dedup_threshold: 5
max_thread: 2
wakeup_period: 20
sampling_ratio: 100
"""
log.info('Beginning deduplication...')
assert isinstance(config, dict), \
"please list clients to run on"
args = [
'ceph-dedup-tool']
if config.get('op', None):
args.extend(['--op', config.get('op', None)])
if config.get('chunk_pool', None):
args.extend(['--chunk-pool', config.get('chunk_pool', None)])
if config.get('chunk_size', False):
args.extend(['--chunk-size', str(config.get('chunk_size', 131072))])
if config.get('chunk_algorithm', False):
args.extend(['--chunk-algorithm', config.get('chunk_algorithm', None)] )
if config.get('fingerprint_algorithm', False):
args.extend(['--fingerprint-algorithm', config.get('fingerprint_algorithm', None)] )
if config.get('chunk_dedup_threshold', False):
args.extend(['--chunk-dedup-threshold', str(config.get('chunk_dedup_threshold', 1))])
if config.get('max_thread', False):
args.extend(['--max-thread', str(config.get('max_thread', 2))])
if config.get('sampling_ratio', False):
args.extend(['--sampling-ratio', str(config.get('sampling_ratio', 100))])
if config.get('wakeup_period', False):
args.extend(['--wakeup-period', str(config.get('wakeup_period', 20))])
if config.get('pool', False):
args.extend(['--pool', config.get('pool', None)])
args.extend([
'--debug',
'--daemon',
'--loop'])
def thread():
run_remote(args, False, 0)
def run_remote(args, need_wait, client_num):
clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
log.info('clients are %s' % clients)
role = 'client.{id}'.format(id=client_num)
if role not in clients:
raise Exception('wrong client {c}'.format(c=role))
assert isinstance(role, str)
PREFIX = 'client.'
assert role.startswith(PREFIX)
testdir = teuthology.get_testdir(ctx)
cmd_args = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir)]
cmd_args.extend(args)
log.info("cmd: %s", cmd_args)
tries = 0
while True:
(remote,) = ctx.cluster.only(role).remotes.keys()
proc = remote.run(
args=cmd_args,
wait=need_wait, check_status=False,
stdout=StringIO(),
)
log.info('exitstatus {r}'.format(r=proc.exitstatus))
if proc.exitstatus == 0 or need_wait == False:
log.info('proc stdout ', proc.stdout.getvalue())
return proc.stdout.getvalue().strip()
tries += 1
if tries > 30:
raise Exception('timed out getting correct exitstatus')
time.sleep(30)
def get_chunk_objs(chunk_pool):
chunk_obj_list = run_remote(('rados ls -p ' + chunk_pool).split(), True, 1).split()
if chunk_obj_list == False:
return None
else:
return chunk_obj_list
def get_ref_list(chunk_pool, chunk_obj):
# get reference list of chunk object
dump_str = run_remote(
('ceph-dedup-tool --op dump-chunk-refs --chunk-pool '
+ chunk_pool + ' --object ' + chunk_obj).split(),
True, 1
)
# fail in case that reference object is not written
assert len(dump_str) > 0
log.info('{0} obj has {1} refs'
.format(chunk_obj, json.loads(dump_str)['count']))
# check if chunk object's reference object exists in base-tier
ref_list = json.loads(dump_str)['refs']
return ref_list
# To validate whether the sample-dedup operation works well, this function checks if
# 1. sample-dedup has been started and
# 2. reference of chunk objects' exists in correct base pool
def validate():
log.info('start validating sample-dedup')
base_pool = config.get('pool', None)
chunk_pool = config.get('chunk_pool', None)
max_validation_cnt = 15
retry_cnt = 0
# chunk objs for re-validation after chunk-repair
retry_chunk_objs = list()
# check whether sample-dedup has been started
chunk_obj_list = get_chunk_objs(chunk_pool)
while (chunk_obj_list == None or len(chunk_obj_list) == 0) and retry_cnt < max_validation_cnt:
# retry getting # chunk objs after 30 secs of sleep
time.sleep(30)
chunk_obj_list = get_chunk_objs(chunk_pool)
retry_cnt += 1
log.info('chunk pool empty. retry ', retry_cnt)
assert retry_cnt < max_validation_cnt
log.info('sample-dedup started successfully')
retry_cnt = 0
max_validation_cnt = 5
# validate chunk pool for max_validation_cnt times
while retry_cnt < max_validation_cnt:
for chunk_obj in chunk_obj_list:
ref_list = get_ref_list(chunk_pool, chunk_obj)
for ref in ref_list:
ret = run_remote(
('rados -p ' + base_pool + ' stat ' + ref['oid'])
.split(), True, 1
)
# check if ref exists in base pool
if ret == False or len(ret) == 0:
# if ref not exists in base pool, try repair in order to avoid
# false-positive inconsistent reference
ret = run_remote(('ceph osd pool stats ' + base_pool).split(), True, 1)
assert len(ret) > 0
base_pool_id = ret.split()[3]
ret = run_remote(
('ceph-dedup-tool --op chunk-repair --chunk-pool '
+ chunk_pool + ' --object ' + chunk_obj + ' --target-ref '
+ ref['oid'] + ' --target-ref-pool-id ' + base_pool_id)
.split(), True, 1
)
retry_chunk_objs.append(chunk_obj)
log.info('{0} obj exists in {1}'.format(ref['oid'], base_pool))
# retry validation for repaired objects
for chunk_obj in retry_chunk_objs:
ref_list = get_ref_list(chunk_pool, chunk_obj)
for ref in ref_list:
ret = run_remote(
('rados -p ' + base_pool + ' stat ' + ref['oid'])
.split(), True, 1
)
assert len(ret) > 0
log.info(
'{0} obj exists in {1} after repair'.format(ref['oid'],
base_pool)
)
retry_chunk_objs = list()
# get chunk objects for the next loop
chunk_obj_list = get_chunk_objs(chunk_pool)
retry_cnt += 1
time.sleep(30)
return True
running = gevent.spawn(thread)
checker = gevent.spawn(validate)
try:
yield
finally:
log.info('joining ceph-dedup-tool')
running.get()
checker.get()
| 8,576 | 37.809955 | 111 |
py
|
null |
ceph-main/qa/tasks/devstack.py
|
#!/usr/bin/env python
import contextlib
import logging
import textwrap
import time
from configparser import ConfigParser
from io import BytesIO, StringIO
from teuthology.orchestra import run
from teuthology import misc
from teuthology.contextutil import nested
log = logging.getLogger(__name__)
DEVSTACK_GIT_REPO = 'https://github.com/openstack-dev/devstack.git'
DS_STABLE_BRANCHES = ("havana", "grizzly")
is_devstack_node = lambda role: role.startswith('devstack')
is_osd_node = lambda role: role.startswith('osd')
@contextlib.contextmanager
def task(ctx, config):
if config is None:
config = {}
if not isinstance(config, dict):
raise TypeError("config must be a dict")
with nested(lambda: install(ctx=ctx, config=config),
lambda: smoke(ctx=ctx, config=config),
):
yield
@contextlib.contextmanager
def install(ctx, config):
"""
Install OpenStack DevStack and configure it to use a Ceph cluster for
Glance and Cinder.
Requires one node with a role 'devstack'
Since devstack runs rampant on the system it's used on, typically you will
want to reprovision that machine after using devstack on it.
Also, the default 2GB of RAM that is given to vps nodes is insufficient. I
recommend 4GB. Downburst can be instructed to give 4GB to a vps node by
adding this to the yaml:
downburst:
ram: 4G
This was created using documentation found here:
https://github.com/openstack-dev/devstack/blob/master/README.md
http://docs.ceph.com/en/latest/rbd/rbd-openstack/
"""
if config is None:
config = {}
if not isinstance(config, dict):
raise TypeError("config must be a dict")
devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys()))
devstack_branch = config.get("branch", "master")
install_devstack(devstack_node, devstack_branch)
try:
configure_devstack_and_ceph(ctx, config, devstack_node, an_osd_node)
yield
finally:
pass
def install_devstack(devstack_node, branch="master"):
log.info("Cloning DevStack repo...")
args = ['git', 'clone', DEVSTACK_GIT_REPO]
devstack_node.run(args=args)
if branch != "master":
if branch in DS_STABLE_BRANCHES and not branch.startswith("stable"):
branch = "stable/" + branch
log.info("Checking out {branch} branch...".format(branch=branch))
cmd = "cd devstack && git checkout " + branch
devstack_node.run(args=cmd)
log.info("Installing DevStack...")
args = ['cd', 'devstack', run.Raw('&&'), './stack.sh']
devstack_node.run(args=args)
def configure_devstack_and_ceph(ctx, config, devstack_node, ceph_node):
pool_size = config.get('pool_size', '128')
create_pools(ceph_node, pool_size)
distribute_ceph_conf(devstack_node, ceph_node)
# This is where we would install python-ceph and ceph-common but it appears
# the ceph task does that for us.
generate_ceph_keys(ceph_node)
distribute_ceph_keys(devstack_node, ceph_node)
secret_uuid = set_libvirt_secret(devstack_node, ceph_node)
update_devstack_config_files(devstack_node, secret_uuid)
set_apache_servername(devstack_node)
# Rebooting is the most-often-used method of restarting devstack services
misc.reboot(devstack_node)
start_devstack(devstack_node)
restart_apache(devstack_node)
def create_pools(ceph_node, pool_size):
log.info("Creating pools on Ceph cluster...")
for pool_name in ['volumes', 'images', 'backups']:
args = ['sudo', 'ceph', 'osd', 'pool', 'create', pool_name, pool_size]
ceph_node.run(args=args)
def distribute_ceph_conf(devstack_node, ceph_node):
log.info("Copying ceph.conf to DevStack node...")
ceph_conf_path = '/etc/ceph/ceph.conf'
ceph_conf = ceph_node.read_file(ceph_conf_path, sudo=True)
devstack_node.write_file(ceph_conf_path, ceph_conf, sudo=True)
def generate_ceph_keys(ceph_node):
log.info("Generating Ceph keys...")
ceph_auth_cmds = [
['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder', 'mon',
'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa
['sudo', 'ceph', 'auth', 'get-or-create', 'client.glance', 'mon',
'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa
['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon',
'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa
]
for cmd in ceph_auth_cmds:
ceph_node.run(args=cmd)
def distribute_ceph_keys(devstack_node, ceph_node):
log.info("Copying Ceph keys to DevStack node...")
def copy_key(from_remote, key_name, to_remote, dest_path, owner):
key_stringio = BytesIO()
from_remote.run(
args=['sudo', 'ceph', 'auth', 'get-or-create', key_name],
stdout=key_stringio)
key_stringio.seek(0)
to_remote.write_file(dest_path, key_stringio, owner=owner, sudo=True)
keys = [
dict(name='client.glance',
path='/etc/ceph/ceph.client.glance.keyring',
# devstack appears to just want root:root
#owner='glance:glance',
),
dict(name='client.cinder',
path='/etc/ceph/ceph.client.cinder.keyring',
# devstack appears to just want root:root
#owner='cinder:cinder',
),
dict(name='client.cinder-backup',
path='/etc/ceph/ceph.client.cinder-backup.keyring',
# devstack appears to just want root:root
#owner='cinder:cinder',
),
]
for key_dict in keys:
copy_key(ceph_node, key_dict['name'], devstack_node,
key_dict['path'], key_dict.get('owner'))
def set_libvirt_secret(devstack_node, ceph_node):
log.info("Setting libvirt secret...")
cinder_key = ceph_node.sh('sudo ceph auth get-key client.cinder').strip()
uuid = devstack_node.sh('uuidgen').strip()
secret_path = '/tmp/secret.xml'
secret_template = textwrap.dedent("""
<secret ephemeral='no' private='no'>
<uuid>{uuid}</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>""")
secret_data = secret_template.format(uuid=uuid)
devstack_node.write_file(secret_path, secret_data)
devstack_node.run(args=['sudo', 'virsh', 'secret-define', '--file',
secret_path])
devstack_node.run(args=['sudo', 'virsh', 'secret-set-value', '--secret',
uuid, '--base64', cinder_key])
return uuid
def update_devstack_config_files(devstack_node, secret_uuid):
log.info("Updating DevStack config files to use Ceph...")
def backup_config(node, file_name, backup_ext='.orig.teuth'):
node.run(args=['cp', '-f', file_name, file_name + backup_ext])
def update_config(config_name, config_stream, update_dict,
section='DEFAULT'):
parser = ConfigParser()
parser.read_file(config_stream)
for (key, value) in update_dict.items():
parser.set(section, key, value)
out_stream = StringIO()
parser.write(out_stream)
out_stream.seek(0)
return out_stream
updates = [
dict(name='/etc/glance/glance-api.conf', options=dict(
default_store='rbd',
rbd_store_user='glance',
rbd_store_pool='images',
show_image_direct_url='True',)),
dict(name='/etc/cinder/cinder.conf', options=dict(
volume_driver='cinder.volume.drivers.rbd.RBDDriver',
rbd_pool='volumes',
rbd_ceph_conf='/etc/ceph/ceph.conf',
rbd_flatten_volume_from_snapshot='false',
rbd_max_clone_depth='5',
glance_api_version='2',
rbd_user='cinder',
rbd_secret_uuid=secret_uuid,
backup_driver='cinder.backup.drivers.ceph',
backup_ceph_conf='/etc/ceph/ceph.conf',
backup_ceph_user='cinder-backup',
backup_ceph_chunk_size='134217728',
backup_ceph_pool='backups',
backup_ceph_stripe_unit='0',
backup_ceph_stripe_count='0',
restore_discard_excess_bytes='true',
)),
dict(name='/etc/nova/nova.conf', options=dict(
libvirt_images_type='rbd',
libvirt_images_rbd_pool='volumes',
libvirt_images_rbd_ceph_conf='/etc/ceph/ceph.conf',
rbd_user='cinder',
rbd_secret_uuid=secret_uuid,
libvirt_inject_password='false',
libvirt_inject_key='false',
libvirt_inject_partition='-2',
)),
]
for update in updates:
file_name = update['name']
options = update['options']
config_data = devstack_node.read_file(file_name, sudo=True)
config_stream = StringIO(config_data)
backup_config(devstack_node, file_name)
new_config_stream = update_config(file_name, config_stream, options)
devstack_node.write_file(file_name, new_config_stream, sudo=True)
def set_apache_servername(node):
# Apache complains: "Could not reliably determine the server's fully
# qualified domain name, using 127.0.0.1 for ServerName"
# So, let's make sure it knows its name.
log.info("Setting Apache ServerName...")
hostname = node.hostname
config_file = '/etc/apache2/conf.d/servername'
config_data = "ServerName {name}".format(name=hostname)
node.write_file(config_file, config_data, sudo=True)
def start_devstack(devstack_node):
log.info("Patching devstack start script...")
# This causes screen to start headless - otherwise rejoin-stack.sh fails
# because there is no terminal attached.
cmd = "cd devstack && sed -ie 's/screen -c/screen -dm -c/' rejoin-stack.sh"
devstack_node.run(args=cmd)
log.info("Starting devstack...")
cmd = "cd devstack && ./rejoin-stack.sh"
devstack_node.run(args=cmd)
# This was added because I was getting timeouts on Cinder requests - which
# were trying to access Keystone on port 5000. A more robust way to handle
# this would be to introduce a wait-loop on devstack_node that checks to
# see if a service is listening on port 5000.
log.info("Waiting 30s for devstack to start...")
time.sleep(30)
def restart_apache(node):
node.run(args=['sudo', '/etc/init.d/apache2', 'restart'], wait=True)
@contextlib.contextmanager
def exercise(ctx, config):
log.info("Running devstack exercises...")
if config is None:
config = {}
if not isinstance(config, dict):
raise TypeError("config must be a dict")
devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
# TODO: save the log *and* preserve failures
#devstack_archive_dir = create_devstack_archive(ctx, devstack_node)
try:
#cmd = "cd devstack && ./exercise.sh 2>&1 | tee {dir}/exercise.log".format( # noqa
# dir=devstack_archive_dir)
cmd = "cd devstack && ./exercise.sh"
devstack_node.run(args=cmd, wait=True)
yield
finally:
pass
def create_devstack_archive(ctx, devstack_node):
test_dir = misc.get_testdir(ctx)
devstack_archive_dir = "{test_dir}/archive/devstack".format(
test_dir=test_dir)
devstack_node.run(args="mkdir -p " + devstack_archive_dir)
return devstack_archive_dir
@contextlib.contextmanager
def smoke(ctx, config):
log.info("Running a basic smoketest...")
devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys()))
try:
create_volume(devstack_node, an_osd_node, 'smoke0', 1)
yield
finally:
pass
def create_volume(devstack_node, ceph_node, vol_name, size):
"""
:param size: The size of the volume, in GB
"""
size = str(size)
log.info("Creating a {size}GB volume named {name}...".format(
name=vol_name,
size=size))
args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create',
'--display-name', vol_name, size]
cinder_create = devstack_node.sh(args, wait=True)
vol_info = parse_os_table(cinder_create)
log.debug("Volume info: %s", str(vol_info))
try:
rbd_output = ceph_node.sh("rbd --id cinder ls -l volumes", wait=True)
except run.CommandFailedError:
log.debug("Original rbd call failed; retrying without '--id cinder'")
rbd_output = ceph_node.sh("rbd ls -l volumes", wait=True)
assert vol_info['id'] in rbd_output, \
"Volume not found on Ceph cluster"
assert vol_info['size'] == size, \
"Volume size on Ceph cluster is different than specified"
return vol_info['id']
def parse_os_table(table_str):
out_dict = dict()
for line in table_str.split('\n'):
if line.startswith('|'):
items = line.split()
out_dict[items[1]] = items[3]
return out_dict
| 13,327 | 34.827957 | 131 |
py
|
null |
ceph-main/qa/tasks/die_on_err.py
|
"""
Raise exceptions on osd coredumps or test err directories
"""
import contextlib
import logging
import time
from teuthology.orchestra import run
from tasks import ceph_manager
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Die if {testdir}/err exists or if an OSD dumps core
"""
if config is None:
config = {}
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while len(manager.get_osd_status()['up']) < num_osds:
time.sleep(10)
testdir = teuthology.get_testdir(ctx)
while True:
for i in range(num_osds):
(osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.keys()
p = osd_remote.run(
args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ],
wait=True,
check_status=False,
)
exit_status = p.exitstatus
if exit_status == 0:
log.info("osd %d has an error" % i)
raise Exception("osd %d error" % i)
log_path = '/var/log/ceph/osd.%d.log' % (i)
p = osd_remote.run(
args = [
'tail', '-1', log_path,
run.Raw('|'),
'grep', '-q', 'end dump'
],
wait=True,
check_status=False,
)
exit_status = p.exitstatus
if exit_status == 0:
log.info("osd %d dumped core" % i)
raise Exception("osd %d dumped core" % i)
time.sleep(5)
| 1,944 | 26.394366 | 75 |
py
|
null |
ceph-main/qa/tasks/divergent_priors.py
|
"""
Special case divergence test
"""
import logging
import time
from teuthology import misc as teuthology
from tasks.util.rados import rados
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Test handling of divergent entries with prior_version
prior to log_tail
overrides:
ceph:
conf:
osd:
debug osd: 5
Requires 3 osds on a single test node.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'divergent_priors task only accepts a dict for configuration'
manager = ctx.managers['ceph']
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
manager.flush_pg_stats([0, 1, 2])
manager.raw_cluster_cmd('osd', 'set', 'noout')
manager.raw_cluster_cmd('osd', 'set', 'noin')
manager.raw_cluster_cmd('osd', 'set', 'nodown')
manager.wait_for_clean()
# something that is always there
dummyfile = '/etc/fstab'
dummyfile2 = '/etc/resolv.conf'
# create 1 pg pool
log.info('creating foo')
manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1')
osds = [0, 1, 2]
for i in osds:
manager.set_config(i, osd_min_pg_log_entries=10)
manager.set_config(i, osd_max_pg_log_entries=10)
manager.set_config(i, osd_pg_log_trim_min=5)
# determine primary
divergent = manager.get_pg_primary('foo', 0)
log.info("primary and soon to be divergent is %d", divergent)
non_divergent = list(osds)
non_divergent.remove(divergent)
log.info('writing initial objects')
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
# write 100 objects
for i in range(100):
rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
manager.wait_for_clean()
# blackhole non_divergent
log.info("blackholing osds %s", str(non_divergent))
for i in non_divergent:
manager.set_config(i, objectstore_blackhole=1)
DIVERGENT_WRITE = 5
DIVERGENT_REMOVE = 5
# Write some soon to be divergent
log.info('writing divergent objects')
for i in range(DIVERGENT_WRITE):
rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i,
dummyfile2], wait=False)
# Remove some soon to be divergent
log.info('remove divergent objects')
for i in range(DIVERGENT_REMOVE):
rados(ctx, mon, ['-p', 'foo', 'rm',
'existing_%d' % (i + DIVERGENT_WRITE)], wait=False)
time.sleep(10)
mon.run(
args=['killall', '-9', 'rados'],
wait=True,
check_status=False)
# kill all the osds but leave divergent in
log.info('killing all the osds')
for i in osds:
manager.kill_osd(i)
for i in osds:
manager.mark_down_osd(i)
for i in non_divergent:
manager.mark_out_osd(i)
# bring up non-divergent
log.info("bringing up non_divergent %s", str(non_divergent))
for i in non_divergent:
manager.revive_osd(i)
for i in non_divergent:
manager.mark_in_osd(i)
# write 1 non-divergent object (ensure that old divergent one is divergent)
objname = "existing_%d" % (DIVERGENT_WRITE + DIVERGENT_REMOVE)
log.info('writing non-divergent object ' + objname)
rados(ctx, mon, ['-p', 'foo', 'put', objname, dummyfile2])
manager.wait_for_recovery()
# ensure no recovery of up osds first
log.info('delay recovery')
for i in non_divergent:
manager.wait_run_admin_socket(
'osd', i, ['set_recovery_delay', '100000'])
# bring in our divergent friend
log.info("revive divergent %d", divergent)
manager.raw_cluster_cmd('osd', 'set', 'noup')
manager.revive_osd(divergent)
log.info('delay recovery divergent')
manager.wait_run_admin_socket(
'osd', divergent, ['set_recovery_delay', '100000'])
manager.raw_cluster_cmd('osd', 'unset', 'noup')
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
log.info('wait for peering')
rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
# At this point the divergent_priors should have been detected
log.info("killing divergent %d", divergent)
manager.kill_osd(divergent)
log.info("reviving divergent %d", divergent)
manager.revive_osd(divergent)
time.sleep(20)
log.info('allowing recovery')
# Set osd_recovery_delay_start back to 0 and kick the queue
for i in osds:
manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'debug',
'kick_recovery_wq', ' 0')
log.info('reading divergent objects')
for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
'/tmp/existing'])
assert exit_status == 0
log.info("success")
| 4,925 | 29.596273 | 79 |
py
|
null |
ceph-main/qa/tasks/divergent_priors2.py
|
"""
Special case divergence test with ceph-objectstore-tool export/remove/import
"""
import logging
import time
from teuthology.exceptions import CommandFailedError
from teuthology import misc as teuthology
from tasks.util.rados import rados
import os
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Test handling of divergent entries with prior_version
prior to log_tail and a ceph-objectstore-tool export/import
overrides:
ceph:
conf:
osd:
debug osd: 5
Requires 3 osds on a single test node.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'divergent_priors task only accepts a dict for configuration'
manager = ctx.managers['ceph']
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
manager.flush_pg_stats([0, 1, 2])
manager.raw_cluster_cmd('osd', 'set', 'noout')
manager.raw_cluster_cmd('osd', 'set', 'noin')
manager.raw_cluster_cmd('osd', 'set', 'nodown')
manager.wait_for_clean()
# something that is always there
dummyfile = '/etc/fstab'
dummyfile2 = '/etc/resolv.conf'
testdir = teuthology.get_testdir(ctx)
# create 1 pg pool
log.info('creating foo')
manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1')
osds = [0, 1, 2]
for i in osds:
manager.set_config(i, osd_min_pg_log_entries=10)
manager.set_config(i, osd_max_pg_log_entries=10)
manager.set_config(i, osd_pg_log_trim_min=5)
# determine primary
divergent = manager.get_pg_primary('foo', 0)
log.info("primary and soon to be divergent is %d", divergent)
non_divergent = list(osds)
non_divergent.remove(divergent)
log.info('writing initial objects')
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
# write 100 objects
for i in range(100):
rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
manager.wait_for_clean()
# blackhole non_divergent
log.info("blackholing osds %s", str(non_divergent))
for i in non_divergent:
manager.set_config(i, objectstore_blackhole=1)
DIVERGENT_WRITE = 5
DIVERGENT_REMOVE = 5
# Write some soon to be divergent
log.info('writing divergent objects')
for i in range(DIVERGENT_WRITE):
rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i,
dummyfile2], wait=False)
# Remove some soon to be divergent
log.info('remove divergent objects')
for i in range(DIVERGENT_REMOVE):
rados(ctx, mon, ['-p', 'foo', 'rm',
'existing_%d' % (i + DIVERGENT_WRITE)], wait=False)
time.sleep(10)
mon.run(
args=['killall', '-9', 'rados'],
wait=True,
check_status=False)
# kill all the osds but leave divergent in
log.info('killing all the osds')
for i in osds:
manager.kill_osd(i)
for i in osds:
manager.mark_down_osd(i)
for i in non_divergent:
manager.mark_out_osd(i)
# bring up non-divergent
log.info("bringing up non_divergent %s", str(non_divergent))
for i in non_divergent:
manager.revive_osd(i)
for i in non_divergent:
manager.mark_in_osd(i)
# write 1 non-divergent object (ensure that old divergent one is divergent)
objname = "existing_%d" % (DIVERGENT_WRITE + DIVERGENT_REMOVE)
log.info('writing non-divergent object ' + objname)
rados(ctx, mon, ['-p', 'foo', 'put', objname, dummyfile2])
manager.wait_for_recovery()
# ensure no recovery of up osds first
log.info('delay recovery')
for i in non_divergent:
manager.wait_run_admin_socket(
'osd', i, ['set_recovery_delay', '100000'])
# bring in our divergent friend
log.info("revive divergent %d", divergent)
manager.raw_cluster_cmd('osd', 'set', 'noup')
manager.revive_osd(divergent)
log.info('delay recovery divergent')
manager.wait_run_admin_socket(
'osd', divergent, ['set_recovery_delay', '100000'])
manager.raw_cluster_cmd('osd', 'unset', 'noup')
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
log.info('wait for peering')
rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
# At this point the divergent_priors should have been detected
log.info("killing divergent %d", divergent)
manager.kill_osd(divergent)
# Export a pg
(exp_remote,) = ctx.\
cluster.only('osd.{o}'.format(o=divergent)).remotes.keys()
FSPATH = manager.get_filepath()
JPATH = os.path.join(FSPATH, "journal")
prefix = ("sudo adjust-ulimits ceph-objectstore-tool "
"--data-path {fpath} --journal-path {jpath} "
"--log-file="
"/var/log/ceph/objectstore_tool.$$.log ".
format(fpath=FSPATH, jpath=JPATH))
pid = os.getpid()
expfile = os.path.join(testdir, "exp.{pid}.out".format(pid=pid))
cmd = ((prefix + "--op export-remove --pgid 2.0 --file {file}").
format(id=divergent, file=expfile))
try:
exp_remote.sh(cmd, wait=True)
except CommandFailedError as e:
assert e.exitstatus == 0
cmd = ((prefix + "--op import --file {file}").
format(id=divergent, file=expfile))
try:
exp_remote.sh(cmd, wait=True)
except CommandFailedError as e:
assert e.exitstatus == 0
log.info("reviving divergent %d", divergent)
manager.revive_osd(divergent)
manager.wait_run_admin_socket('osd', divergent, ['dump_ops_in_flight'])
time.sleep(20);
log.info('allowing recovery')
# Set osd_recovery_delay_start back to 0 and kick the queue
for i in osds:
manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'debug',
'kick_recovery_wq', ' 0')
log.info('reading divergent objects')
for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
'/tmp/existing'])
assert exit_status == 0
cmd = 'rm {file}'.format(file=expfile)
exp_remote.run(args=cmd, wait=True)
log.info("success")
| 6,262 | 31.450777 | 79 |
py
|
null |
ceph-main/qa/tasks/dnsmasq.py
|
"""
Task for dnsmasq configuration
"""
import contextlib
import logging
from teuthology import misc
from teuthology.exceptions import ConfigError
from teuthology import contextutil
from teuthology import packaging
from tasks.util import get_remote_for_role
log = logging.getLogger(__name__)
@contextlib.contextmanager
def install_dnsmasq(remote):
"""
If dnsmasq is not installed, install it for the duration of the task.
"""
try:
existing = packaging.get_package_version(remote, 'dnsmasq')
except:
existing = None
if existing is None:
packaging.install_package('dnsmasq', remote)
try:
yield
finally:
if existing is None:
packaging.remove_package('dnsmasq', remote)
@contextlib.contextmanager
def backup_resolv(remote, path):
"""
Store a backup of resolv.conf in the testdir and restore it after the task.
"""
remote.run(args=['cp', '/etc/resolv.conf', path])
try:
yield
finally:
# restore with 'cp' to avoid overwriting its security context
remote.run(args=['sudo', 'cp', path, '/etc/resolv.conf'])
remote.run(args=['rm', path])
@contextlib.contextmanager
def replace_resolv(remote, path):
"""
Update resolv.conf to point the nameserver at localhost.
"""
remote.write_file(path, "nameserver 127.0.0.1\n")
try:
# install it
if remote.os.package_type == "rpm":
# for centos ovh resolv.conf has immutable attribute set
remote.run(args=['sudo', 'chattr', '-i', '/etc/resolv.conf'], check_status=False)
remote.run(args=['sudo', 'cp', path, '/etc/resolv.conf'])
yield
finally:
remote.run(args=['rm', path])
@contextlib.contextmanager
def setup_dnsmasq(remote, testdir, cnames):
""" configure dnsmasq on the given remote, adding each cname given """
log.info('Configuring dnsmasq on remote %s..', remote.name)
# add address entries for each cname
dnsmasq = "server=8.8.8.8\nserver=8.8.4.4\n"
address_template = "address=/{cname}/{ip_address}\n"
for cname, ip_address in cnames.items():
dnsmasq += address_template.format(cname=cname, ip_address=ip_address)
# write to temporary dnsmasq file
dnsmasq_tmp = '/'.join((testdir, 'ceph.tmp'))
remote.write_file(dnsmasq_tmp, dnsmasq)
# move into /etc/dnsmasq.d/
dnsmasq_path = '/etc/dnsmasq.d/ceph'
remote.run(args=['sudo', 'mv', dnsmasq_tmp, dnsmasq_path])
# restore selinux context if necessary
remote.run(args=['sudo', 'restorecon', dnsmasq_path], check_status=False)
# restart dnsmasq
remote.run(args=['sudo', 'systemctl', 'restart', 'dnsmasq'])
# verify dns name is set
remote.run(args=['ping', '-c', '4', next(iter(cnames.keys()))])
try:
yield
finally:
log.info('Removing dnsmasq configuration from remote %s..', remote.name)
# remove /etc/dnsmasq.d/ceph
remote.run(args=['sudo', 'rm', dnsmasq_path])
# restart dnsmasq
remote.run(args=['sudo', 'systemctl', 'restart', 'dnsmasq'])
@contextlib.contextmanager
def task(ctx, config):
"""
Configures dnsmasq to add cnames for teuthology remotes. The task expects a
dictionary, where each key is a role. If all cnames for that role use the
same address as that role, the cnames can be given as a list. For example,
this entry configures dnsmasq on the remote associated with client.0, adding
two cnames for the ip address associated with client.0:
- dnsmasq:
client.0:
- client0.example.com
- c0.example.com
If the addresses do not all match the given role, a dictionary can be given
to specify the ip address by its target role. For example:
- dnsmasq:
client.0:
client.0.example.com: client.0
client.1.example.com: client.1
Cnames that end with a . are treated as prefix for the existing hostname.
For example, if the remote for client.0 has a hostname of 'example.com',
this task will add cnames for dev.example.com and test.example.com:
- dnsmasq:
client.0: [dev., test.]
"""
# apply overrides
overrides = config.get('overrides', {})
misc.deep_merge(config, overrides.get('dnsmasq', {}))
# multiple roles may map to the same remote, so collect names by remote
remote_names = {}
for role, cnames in config.items():
remote = get_remote_for_role(ctx, role)
if remote is None:
raise ConfigError('no remote for role %s' % role)
names = remote_names.get(remote, {})
if isinstance(cnames, list):
# when given a list of cnames, point to local ip
for cname in cnames:
if cname.endswith('.'):
cname += remote.hostname
names[cname] = remote.ip_address
elif isinstance(cnames, dict):
# when given a dict, look up the remote ip for each
for cname, client in cnames.items():
r = get_remote_for_role(ctx, client)
if r is None:
raise ConfigError('no remote for role %s' % client)
if cname.endswith('.'):
cname += r.hostname
names[cname] = r.ip_address
remote_names[remote] = names
testdir = misc.get_testdir(ctx)
resolv_bak = '/'.join((testdir, 'resolv.bak'))
resolv_tmp = '/'.join((testdir, 'resolv.tmp'))
# run subtasks for each unique remote
subtasks = []
for remote, cnames in remote_names.items():
subtasks.extend([ lambda r=remote: install_dnsmasq(r) ])
subtasks.extend([ lambda r=remote: backup_resolv(r, resolv_bak) ])
subtasks.extend([ lambda r=remote: replace_resolv(r, resolv_tmp) ])
subtasks.extend([ lambda r=remote, cn=cnames: setup_dnsmasq(r, testdir, cn) ])
with contextutil.nested(*subtasks):
yield
| 5,995 | 34.064327 | 93 |
py
|
null |
ceph-main/qa/tasks/dump_stuck.py
|
"""
Dump_stuck command
"""
import logging
import time
from tasks import ceph_manager
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def check_stuck(manager, num_inactive, num_unclean, num_stale, timeout=10):
"""
Do checks. Make sure get_stuck_pgs return the right amount of information, then
extract health information from the raw_cluster_cmd and compare the results with
values passed in. This passes if all asserts pass.
:param num_manager: Ceph manager
:param num_inactive: number of inaactive pages that are stuck
:param num_unclean: number of unclean pages that are stuck
:param num_stale: number of stale pages that are stuck
:param timeout: timeout value for get_stuck_pgs calls
"""
inactive = manager.get_stuck_pgs('inactive', timeout)
unclean = manager.get_stuck_pgs('unclean', timeout)
stale = manager.get_stuck_pgs('stale', timeout)
log.info('inactive %s / %d, unclean %s / %d, stale %s / %d',
len(inactive), num_inactive,
len(unclean), num_unclean,
len(stale), num_stale)
assert len(inactive) == num_inactive
assert len(unclean) == num_unclean
assert len(stale) == num_stale
def task(ctx, config):
"""
Test the dump_stuck command.
:param ctx: Context
:param config: Configuration
"""
assert config is None, \
'dump_stuck requires no configuration'
assert teuthology.num_instances_of_type(ctx.cluster, 'osd') == 2, \
'dump_stuck requires exactly 2 osds'
timeout = 60
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
manager.flush_pg_stats([0, 1])
manager.wait_for_clean(timeout)
manager.raw_cluster_cmd('tell', 'mon.a', 'injectargs', '--',
# '--mon-osd-report-timeout 90',
'--mon-pg-stuck-threshold 10')
# all active+clean
check_stuck(
manager,
num_inactive=0,
num_unclean=0,
num_stale=0,
)
num_pgs = manager.get_num_pgs()
manager.mark_out_osd(0)
time.sleep(timeout)
manager.flush_pg_stats([1])
manager.wait_for_recovery(timeout)
# all active+clean+remapped
check_stuck(
manager,
num_inactive=0,
num_unclean=0,
num_stale=0,
)
manager.mark_in_osd(0)
manager.flush_pg_stats([0, 1])
manager.wait_for_clean(timeout)
# all active+clean
check_stuck(
manager,
num_inactive=0,
num_unclean=0,
num_stale=0,
)
log.info('stopping first osd')
manager.kill_osd(0)
manager.mark_down_osd(0)
manager.wait_for_active(timeout)
log.info('waiting for all to be unclean')
starttime = time.time()
done = False
while not done:
try:
check_stuck(
manager,
num_inactive=0,
num_unclean=num_pgs,
num_stale=0,
)
done = True
except AssertionError:
# wait up to 15 minutes to become stale
if time.time() - starttime > 900:
raise
log.info('stopping second osd')
manager.kill_osd(1)
manager.mark_down_osd(1)
log.info('waiting for all to be stale')
starttime = time.time()
done = False
while not done:
try:
check_stuck(
manager,
num_inactive=0,
num_unclean=num_pgs,
num_stale=num_pgs,
)
done = True
except AssertionError:
# wait up to 15 minutes to become stale
if time.time() - starttime > 900:
raise
log.info('reviving')
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'):
manager.revive_osd(id_)
manager.mark_in_osd(id_)
while True:
try:
manager.flush_pg_stats([0, 1])
break
except Exception:
log.exception('osds must not be started yet, waiting...')
time.sleep(1)
manager.wait_for_clean(timeout)
check_stuck(
manager,
num_inactive=0,
num_unclean=0,
num_stale=0,
)
| 4,415 | 26.259259 | 84 |
py
|
null |
ceph-main/qa/tasks/ec_inconsistent_hinfo.py
|
"""
Inconsistent_hinfo
"""
import logging
import time
from dateutil.parser import parse
from tasks import ceph_manager
from tasks.util.rados import rados
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def wait_for_deep_scrub_complete(manager, pgid, check_time_now, inconsistent):
log.debug("waiting for pg %s deep-scrub complete (check_time_now=%s)" %
(pgid, check_time_now))
for i in range(300):
time.sleep(5)
manager.flush_pg_stats([0, 1, 2, 3])
pgs = manager.get_pg_stats()
pg = next((pg for pg in pgs if pg['pgid'] == pgid), None)
log.debug('pg=%s' % pg);
assert pg
last_deep_scrub_time = parse(pg['last_deep_scrub_stamp']).strftime('%s')
if last_deep_scrub_time < check_time_now:
log.debug('not scrubbed')
continue
status = pg['state'].split('+')
if inconsistent:
assert 'inconsistent' in status
else:
assert 'inconsistent' not in status
return
assert False, 'not scrubbed'
def wait_for_backfilling_complete(manager, pgid, from_osd, to_osd):
log.debug("waiting for pg %s backfill from osd.%s to osd.%s complete" %
(pgid, from_osd, to_osd))
for i in range(300):
time.sleep(5)
manager.flush_pg_stats([0, 1, 2, 3])
pgs = manager.get_pg_stats()
pg = next((pg for pg in pgs if pg['pgid'] == pgid), None)
log.info('pg=%s' % pg);
assert pg
status = pg['state'].split('+')
if 'active' not in status:
log.debug('not active')
continue
if 'backfilling' in status:
assert from_osd in pg['acting'] and to_osd in pg['up']
log.debug('backfilling')
continue
if to_osd not in pg['up']:
log.debug('backfill not started yet')
continue
log.debug('backfilled!')
break
def task(ctx, config):
"""
Test handling of objects with inconsistent hash info during backfill and deep-scrub.
A pretty rigid cluster is brought up and tested by this task
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'ec_inconsistent_hinfo task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
profile = config.get('erasure_code_profile', {
'k': '2',
'm': '1',
'crush-failure-domain': 'osd'
})
profile_name = profile.get('name', 'backfill_unfound')
manager.create_erasure_code_profile(profile_name, profile)
pool = manager.create_pool_with_unique_name(
pg_num=1,
erasure_code_profile_name=profile_name,
min_size=2)
manager.raw_cluster_cmd('osd', 'pool', 'set', pool,
'pg_autoscale_mode', 'off')
manager.flush_pg_stats([0, 1, 2, 3])
manager.wait_for_clean()
pool_id = manager.get_pool_num(pool)
pgid = '%d.0' % pool_id
pgs = manager.get_pg_stats()
acting = next((pg['acting'] for pg in pgs if pg['pgid'] == pgid), None)
log.info("acting=%s" % acting)
assert acting
primary = acting[0]
# something that is always there, readable and never empty
dummyfile = '/etc/group'
# kludge to make sure they get a map
rados(ctx, mon, ['-p', pool, 'put', 'dummy', dummyfile])
manager.flush_pg_stats([0, 1])
manager.wait_for_recovery()
log.debug("create test object")
obj = 'test'
rados(ctx, mon, ['-p', pool, 'put', obj, dummyfile])
victim = acting[1]
log.info("remove test object hash info from osd.%s shard and test deep-scrub and repair"
% victim)
manager.objectstore_tool(pool, options='', args='rm-attr hinfo_key',
object_name=obj, osd=victim)
check_time_now = time.strftime('%s')
manager.raw_cluster_cmd('pg', 'deep-scrub', pgid)
wait_for_deep_scrub_complete(manager, pgid, check_time_now, True)
check_time_now = time.strftime('%s')
manager.raw_cluster_cmd('pg', 'repair', pgid)
wait_for_deep_scrub_complete(manager, pgid, check_time_now, False)
log.info("remove test object hash info from primary osd.%s shard and test backfill"
% primary)
log.debug("write some data")
rados(ctx, mon, ['-p', pool, 'bench', '30', 'write', '-b', '4096',
'--no-cleanup'])
manager.objectstore_tool(pool, options='', args='rm-attr hinfo_key',
object_name=obj, osd=primary)
# mark the osd out to trigger a rebalance/backfill
source = acting[1]
target = [x for x in [0, 1, 2, 3] if x not in acting][0]
manager.mark_out_osd(source)
# wait for everything to peer, backfill and recover
wait_for_backfilling_complete(manager, pgid, source, target)
manager.wait_for_clean()
manager.flush_pg_stats([0, 1, 2, 3])
pgs = manager.get_pg_stats()
pg = next((pg for pg in pgs if pg['pgid'] == pgid), None)
log.debug('pg=%s' % pg)
assert pg
assert 'clean' in pg['state'].split('+')
assert 'inconsistent' not in pg['state'].split('+')
unfound = manager.get_num_unfound_objects()
log.debug("there are %d unfound objects" % unfound)
assert unfound == 0
source, target = target, source
log.info("remove test object hash info from non-primary osd.%s shard and test backfill"
% source)
manager.objectstore_tool(pool, options='', args='rm-attr hinfo_key',
object_name=obj, osd=source)
# mark the osd in to trigger a rebalance/backfill
manager.mark_in_osd(target)
# wait for everything to peer, backfill and recover
wait_for_backfilling_complete(manager, pgid, source, target)
manager.wait_for_clean()
manager.flush_pg_stats([0, 1, 2, 3])
pgs = manager.get_pg_stats()
pg = next((pg for pg in pgs if pg['pgid'] == pgid), None)
log.debug('pg=%s' % pg)
assert pg
assert 'clean' in pg['state'].split('+')
assert 'inconsistent' not in pg['state'].split('+')
unfound = manager.get_num_unfound_objects()
log.debug("there are %d unfound objects" % unfound)
assert unfound == 0
log.info("remove hash info from two shards and test backfill")
source = acting[2]
target = [x for x in [0, 1, 2, 3] if x not in acting][0]
manager.objectstore_tool(pool, options='', args='rm-attr hinfo_key',
object_name=obj, osd=primary)
manager.objectstore_tool(pool, options='', args='rm-attr hinfo_key',
object_name=obj, osd=source)
# mark the osd out to trigger a rebalance/backfill
manager.mark_out_osd(source)
# wait for everything to peer, backfill and detect unfound object
wait_for_backfilling_complete(manager, pgid, source, target)
# verify that there is unfound object
manager.flush_pg_stats([0, 1, 2, 3])
pgs = manager.get_pg_stats()
pg = next((pg for pg in pgs if pg['pgid'] == pgid), None)
log.debug('pg=%s' % pg)
assert pg
assert 'backfill_unfound' in pg['state'].split('+')
unfound = manager.get_num_unfound_objects()
log.debug("there are %d unfound objects" % unfound)
assert unfound == 1
m = manager.list_pg_unfound(pgid)
log.debug('list_pg_unfound=%s' % m)
assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
# mark stuff lost
pgs = manager.get_pg_stats()
manager.raw_cluster_cmd('pg', pgid, 'mark_unfound_lost', 'delete')
# wait for everything to peer and be happy...
manager.flush_pg_stats([0, 1, 2, 3])
manager.wait_for_recovery()
| 7,848 | 33.730088 | 92 |
py
|
null |
ceph-main/qa/tasks/ec_lost_unfound.py
|
"""
Lost_unfound
"""
import logging
import time
from tasks import ceph_manager
from tasks.util.rados import rados
from teuthology import misc as teuthology
from teuthology.orchestra import run
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Test handling of lost objects on an ec pool.
A pretty rigid cluster is brought up and tested by this task
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'lost_unfound task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
manager.wait_for_clean()
profile = config.get('erasure_code_profile', {
'k': '2',
'm': '2',
'crush-failure-domain': 'osd'
})
profile_name = profile.get('name', 'lost_unfound')
manager.create_erasure_code_profile(profile_name, profile)
pool = manager.create_pool_with_unique_name(
erasure_code_profile_name=profile_name,
min_size=2)
# something that is always there, readable and never empty
dummyfile = '/etc/group'
# kludge to make sure they get a map
rados(ctx, mon, ['-p', pool, 'put', 'dummy', dummyfile])
manager.flush_pg_stats([0, 1])
manager.wait_for_recovery()
# create old objects
for f in range(1, 10):
rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile])
rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile])
rados(ctx, mon, ['-p', pool, 'rm', 'existed_%d' % f])
# delay recovery, and make the pg log very long (to prevent backfill)
manager.raw_cluster_cmd(
'tell', 'osd.1',
'injectargs',
'--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
)
manager.kill_osd(0)
manager.mark_down_osd(0)
manager.kill_osd(3)
manager.mark_down_osd(3)
for f in range(1, 10):
rados(ctx, mon, ['-p', pool, 'put', 'new_%d' % f, dummyfile])
rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile])
rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile])
# take out osd.1 and a necessary shard of those objects.
manager.kill_osd(1)
manager.mark_down_osd(1)
manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
manager.revive_osd(0)
manager.wait_till_osd_is_up(0)
manager.revive_osd(3)
manager.wait_till_osd_is_up(3)
manager.flush_pg_stats([0, 2, 3])
manager.wait_till_active()
manager.flush_pg_stats([0, 2, 3])
# verify that there are unfound objects
unfound = manager.get_num_unfound_objects()
log.info("there are %d unfound objects" % unfound)
assert unfound
testdir = teuthology.get_testdir(ctx)
procs = []
if config.get('parallel_bench', True):
procs.append(mon.run(
args=[
"/bin/sh", "-c",
" ".join(['adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage',
'rados',
'--no-log-to-stderr',
'--name', 'client.admin',
'-b', str(4<<10),
'-p' , pool,
'-t', '20',
'bench', '240', 'write',
]).format(tdir=testdir),
],
logger=log.getChild('radosbench.{id}'.format(id='client.admin')),
stdin=run.PIPE,
wait=False
))
time.sleep(10)
# mark stuff lost
pgs = manager.get_pg_stats()
for pg in pgs:
if pg['stat_sum']['num_objects_unfound'] > 0:
# verify that i can list them direct from the osd
log.info('listing missing/lost in %s state %s', pg['pgid'],
pg['state']);
m = manager.list_pg_unfound(pg['pgid'])
log.info('%s' % m)
assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
log.info("reverting unfound in %s", pg['pgid'])
manager.raw_cluster_cmd('pg', pg['pgid'],
'mark_unfound_lost', 'delete')
else:
log.info("no unfound in %s", pg['pgid'])
manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
manager.raw_cluster_cmd('tell', 'osd.3', 'debug', 'kick_recovery_wq', '5')
manager.flush_pg_stats([0, 2, 3])
manager.wait_for_recovery()
if not config.get('parallel_bench', True):
time.sleep(20)
# verify result
for f in range(1, 10):
err = rados(ctx, mon, ['-p', pool, 'get', 'new_%d' % f, '-'])
assert err
err = rados(ctx, mon, ['-p', pool, 'get', 'existed_%d' % f, '-'])
assert err
err = rados(ctx, mon, ['-p', pool, 'get', 'existing_%d' % f, '-'])
assert err
# see if osd.1 can cope
manager.revive_osd(1)
manager.wait_till_osd_is_up(1)
manager.wait_for_clean()
run.wait(procs)
manager.wait_for_clean()
| 5,320 | 32.25625 | 80 |
py
|
null |
ceph-main/qa/tasks/exec_on_cleanup.py
|
"""
Exececute custom commands during unwind/cleanup
"""
import logging
import contextlib
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Execute commands on a given role
tasks:
- ceph:
- kclient: [client.a]
- exec:
client.a:
- "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control"
- "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control"
- interactive:
It stops and fails with the first command that does not return on success. It means
that if the first command fails, the second won't run at all.
To avoid confusion it is recommended to explicitly enclose the commands in
double quotes. For instance if the command is false (without double quotes) it will
be interpreted as a boolean by the YAML parser.
:param ctx: Context
:param config: Configuration
"""
try:
yield
finally:
log.info('Executing custom commands...')
assert isinstance(config, dict), "task exec got invalid config"
testdir = teuthology.get_testdir(ctx)
if 'all' in config and len(config) == 1:
a = config['all']
roles = teuthology.all_roles(ctx.cluster)
config = dict((id_, a) for id_ in roles)
for role, ls in config.items():
(remote,) = ctx.cluster.only(role).remotes.keys()
log.info('Running commands on role %s host %s', role, remote.name)
for c in ls:
c.replace('$TESTDIR', testdir)
remote.run(
args=[
'sudo',
'TESTDIR={tdir}'.format(tdir=testdir),
'bash',
'-c',
c],
)
| 1,899 | 29.645161 | 87 |
py
|
null |
ceph-main/qa/tasks/fs.py
|
"""
CephFS sub-tasks.
"""
import logging
import re
from tasks.cephfs.filesystem import Filesystem, MDSCluster
log = logging.getLogger(__name__)
# Everything up to CEPH_MDSMAP_ALLOW_STANDBY_REPLAY
CEPH_MDSMAP_ALLOW_STANDBY_REPLAY = (1<<5)
CEPH_MDSMAP_NOT_JOINABLE = (1 << 0)
CEPH_MDSMAP_LAST = CEPH_MDSMAP_ALLOW_STANDBY_REPLAY
UPGRADE_FLAGS_MASK = ((CEPH_MDSMAP_LAST<<1) - 1)
def pre_upgrade_save(ctx, config):
"""
That the upgrade procedure doesn't clobber state: save state.
"""
mdsc = MDSCluster(ctx)
status = mdsc.status()
state = {}
ctx['mds-upgrade-state'] = state
for fs in list(status.get_filesystems()):
fscid = fs['id']
mdsmap = fs['mdsmap']
fs_state = {}
fs_state['epoch'] = mdsmap['epoch']
fs_state['max_mds'] = mdsmap['max_mds']
fs_state['flags'] = mdsmap['flags'] & UPGRADE_FLAGS_MASK
state[fscid] = fs_state
log.debug(f"fs fscid={fscid},name={mdsmap['fs_name']} state = {fs_state}")
def post_upgrade_checks(ctx, config):
"""
That the upgrade procedure doesn't clobber state.
"""
state = ctx['mds-upgrade-state']
mdsc = MDSCluster(ctx)
status = mdsc.status()
for fs in list(status.get_filesystems()):
fscid = fs['id']
mdsmap = fs['mdsmap']
fs_state = state[fscid]
log.debug(f"checking fs fscid={fscid},name={mdsmap['fs_name']} state = {fs_state}")
# check state was restored to previous values
assert fs_state['max_mds'] == mdsmap['max_mds']
assert fs_state['flags'] == (mdsmap['flags'] & UPGRADE_FLAGS_MASK)
# now confirm that the upgrade procedure was followed
epoch = mdsmap['epoch']
pre_upgrade_epoch = fs_state['epoch']
assert pre_upgrade_epoch < epoch
multiple_max_mds = fs_state['max_mds'] > 1
did_decrease_max_mds = False
should_disable_allow_standby_replay = fs_state['flags'] & CEPH_MDSMAP_ALLOW_STANDBY_REPLAY
did_disable_allow_standby_replay = False
did_fail_fs = False
for i in range(pre_upgrade_epoch+1, mdsmap['epoch']):
old_status = mdsc.status(epoch=i)
old_fs = old_status.get_fsmap(fscid)
old_mdsmap = old_fs['mdsmap']
if not multiple_max_mds \
and (old_mdsmap['flags'] & CEPH_MDSMAP_NOT_JOINABLE):
raise RuntimeError('mgr is failing fs when there is only one '
f'rank in epoch {i}.')
if multiple_max_mds \
and (old_mdsmap['flags'] & CEPH_MDSMAP_NOT_JOINABLE) \
and old_mdsmap['max_mds'] == 1:
raise RuntimeError('mgr is failing fs as well the max_mds '
f'is reduced in epoch {i}')
if old_mdsmap['flags'] & CEPH_MDSMAP_NOT_JOINABLE:
log.debug(f"max_mds not reduced in epoch {i} as fs was failed "
"for carrying out rapid multi-rank mds upgrade")
did_fail_fs = True
if multiple_max_mds and old_mdsmap['max_mds'] == 1:
log.debug(f"max_mds reduced in epoch {i}")
did_decrease_max_mds = True
if should_disable_allow_standby_replay and not (old_mdsmap['flags'] & CEPH_MDSMAP_ALLOW_STANDBY_REPLAY):
log.debug(f"allow_standby_replay disabled in epoch {i}")
did_disable_allow_standby_replay = True
assert not multiple_max_mds or did_fail_fs or did_decrease_max_mds
assert not should_disable_allow_standby_replay or did_disable_allow_standby_replay
def ready(ctx, config):
"""
That the file system is ready for clients.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
timeout = config.get('timeout', 300)
mdsc = MDSCluster(ctx)
status = mdsc.status()
for filesystem in status.get_filesystems():
fs = Filesystem(ctx, fscid=filesystem['id'])
fs.wait_for_daemons(timeout=timeout, status=status)
def clients_evicted(ctx, config):
"""
Check clients are evicted, unmount (cleanup) if so.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
clients = config.get('clients')
if clients is None:
clients = {("client."+client_id): True for client_id in ctx.mounts}
log.info("clients is {}".format(str(clients)))
fs = Filesystem(ctx)
status = fs.status()
has_session = set()
mounts = {}
for client in clients:
client_id = re.match("^client.([0-9]+)$", client).groups(1)[0]
mounts[client] = ctx.mounts.get(client_id)
for rank in fs.get_ranks(status=status):
ls = fs.rank_asok(['session', 'ls'], rank=rank['rank'], status=status)
for session in ls:
for client, evicted in clients.items():
mount = mounts.get(client)
if mount is not None:
global_id = mount.get_global_id()
if session['id'] == global_id:
if evicted:
raise RuntimeError("client still has session: {}".format(str(session)))
else:
log.info("client {} has a session with MDS {}.{}".format(client, fs.id, rank['rank']))
has_session.add(client)
no_session = set(clients) - has_session
should_assert = False
for client, evicted in clients.items():
mount = mounts.get(client)
if mount is not None:
if evicted:
log.info("confirming client {} is blocklisted".format(client))
assert fs.is_addr_blocklisted(mount.get_global_addr())
elif client in no_session:
log.info("client {} should not be evicted but has no session with an MDS".format(client))
fs.is_addr_blocklisted(mount.get_global_addr()) # for debugging
should_assert = True
if should_assert:
raise RuntimeError("some clients which should not be evicted have no session with an MDS?")
| 6,260 | 36.267857 | 116 |
py
|
null |
ceph-main/qa/tasks/fwd_scrub.py
|
"""
Thrash mds by simulating failures
"""
import logging
import contextlib
from gevent import sleep, GreenletExit
from gevent.greenlet import Greenlet
from gevent.event import Event
from teuthology import misc as teuthology
from tasks import ceph_manager
from tasks.cephfs.filesystem import MDSCluster, Filesystem
from tasks.thrasher import Thrasher
log = logging.getLogger(__name__)
class ForwardScrubber(Thrasher, Greenlet):
"""
ForwardScrubber::
The ForwardScrubber does forward scrubbing of file-systems during execution
of other tasks (workunits, etc).
"""
def __init__(self, fs, scrub_timeout=300, sleep_between_iterations=1):
super(ForwardScrubber, self).__init__()
self.logger = log.getChild('fs.[{f}]'.format(f=fs.name))
self.fs = fs
self.name = 'thrasher.fs.[{f}]'.format(f=fs.name)
self.stopping = Event()
self.scrub_timeout = scrub_timeout
self.sleep_between_iterations = sleep_between_iterations
def _run(self):
try:
self.do_scrub()
except Exception as e:
self.set_thrasher_exception(e)
self.logger.exception("exception:")
# allow successful completion so gevent doesn't see an exception...
def stop(self):
self.stopping.set()
def do_scrub(self):
"""
Perform the file-system scrubbing
"""
self.logger.info(f'start scrubbing fs: {self.fs.name}')
try:
while not self.stopping.is_set():
self._scrub()
sleep(self.sleep_between_iterations)
except GreenletExit:
pass
self.logger.info(f'end scrubbing fs: {self.fs.name}')
def _scrub(self, path="/", recursive=True):
self.logger.info(f"scrubbing fs: {self.fs.name}")
scrubopts = ["force"]
if recursive:
scrubopts.append("recursive")
out_json = self.fs.run_scrub(["start", path, ",".join(scrubopts)])
assert out_json is not None
tag = out_json['scrub_tag']
assert tag is not None
assert out_json['return_code'] == 0
assert out_json['mode'] == 'asynchronous'
done = self.fs.wait_until_scrub_complete(tag=tag, sleep=30, timeout=self.scrub_timeout)
if not done:
raise RuntimeError('scrub timeout')
self._check_damage()
def _check_damage(self):
rdmg = self.fs.get_damage()
types = set()
for rank, dmg in rdmg.items():
if dmg:
for d in dmg:
types.add(d['damage_type'])
log.error(f"rank {rank} damaged:\n{dmg}")
if types:
raise RuntimeError(f"rank damage found: {types}")
def stop_all_fwd_scrubbers(thrashers):
for thrasher in thrashers:
if not isinstance(thrasher, ForwardScrubber):
continue
thrasher.stop()
thrasher.join()
if thrasher.exception is not None:
raise RuntimeError(f"error during scrub thrashing: {thrasher.exception}")
@contextlib.contextmanager
def task(ctx, config):
"""
Stress test the mds by running scrub iterations while another task/workunit
is running.
Example config:
- fwd_scrub:
scrub_timeout: 300
sleep_between_iterations: 1
"""
mds_cluster = MDSCluster(ctx)
if config is None:
config = {}
assert isinstance(config, dict), \
'fwd_scrub task only accepts a dict for configuration'
mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
assert len(mdslist) > 0, \
'fwd_scrub task requires at least 1 metadata server'
(first,) = ctx.cluster.only(f'mds.{mdslist[0]}').remotes.keys()
manager = ceph_manager.CephManager(
first, ctx=ctx, logger=log.getChild('ceph_manager'),
)
# make sure everyone is in active, standby, or standby-replay
log.info('Wait for all MDSs to reach steady state...')
status = mds_cluster.status()
while True:
steady = True
for info in status.get_all():
state = info['state']
if state not in ('up:active', 'up:standby', 'up:standby-replay'):
steady = False
break
if steady:
break
sleep(2)
status = mds_cluster.status()
log.info('Ready to start scrub thrashing')
manager.wait_for_clean()
assert manager.is_clean()
if 'cluster' not in config:
config['cluster'] = 'ceph'
for fs in status.get_filesystems():
fwd_scrubber = ForwardScrubber(Filesystem(ctx, fscid=fs['id']),
config['scrub_timeout'],
config['sleep_between_iterations'])
fwd_scrubber.start()
ctx.ceph[config['cluster']].thrashers.append(fwd_scrubber)
try:
log.debug('Yielding')
yield
finally:
log.info('joining ForwardScrubbers')
stop_all_fwd_scrubbers(ctx.ceph[config['cluster']].thrashers)
log.info('done joining')
| 5,083 | 29.626506 | 95 |
py
|
null |
ceph-main/qa/tasks/immutable_object_cache.py
|
"""
immutable object cache task
"""
import contextlib
import logging
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def immutable_object_cache(ctx, config):
"""
setup and cleanup immutable object cache
"""
log.info("start immutable object cache daemon")
for client, client_config in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
# make sure that there is one immutable object cache daemon on the same node.
remote.run(
args=[
'sudo', 'killall', '-s', '9', 'ceph-immutable-object-cache', run.Raw('||'), 'true',
]
)
remote.run(
args=[
'ceph-immutable-object-cache', '-b',
]
)
try:
yield
finally:
log.info("check and cleanup immutable object cache")
for client, client_config in config.items():
client_config = client_config if client_config is not None else dict()
(remote,) = ctx.cluster.only(client).remotes.keys()
cache_path = client_config.get('immutable object cache path', '/tmp/ceph-immutable-object-cache')
ls_command = '"$(ls {} )"'.format(cache_path)
remote.run(
args=[
'test', '-n', run.Raw(ls_command),
]
)
remote.run(
args=[
'sudo', 'killall', '-s', '9', 'ceph-immutable-object-cache', run.Raw('||'), 'true',
]
)
remote.run(
args=[
'sudo', 'rm', '-rf', cache_path, run.Raw('||'), 'true',
]
)
@contextlib.contextmanager
def task(ctx, config):
"""
This is task for start immutable_object_cache.
"""
assert isinstance(config, dict), \
"task immutable_object_cache only supports a dictionary for configuration"
managers = []
config = teuthology.replace_all_with_clients(ctx.cluster, config)
managers.append(
lambda: immutable_object_cache(ctx=ctx, config=config)
)
with contextutil.nested(*managers):
yield
| 2,333 | 30.972603 | 109 |
py
|
null |
ceph-main/qa/tasks/immutable_object_cache_thrash.py
|
"""
immutable object cache thrash task
"""
import contextlib
import logging
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
DEFAULT_KILL_DAEMON_TIME = 2
DEFAULT_DEAD_TIME = 30
DEFAULT_LIVE_TIME = 120
log = logging.getLogger(__name__)
@contextlib.contextmanager
def thrashes_immutable_object_cache_daemon(ctx, config):
"""
thrashes immutable object cache daemon.
It can test reconnection feature of RO cache when RO daemon crash
TODO : replace sleep with better method.
"""
log.info("thrashes immutable object cache daemon")
# just thrash one rbd client.
client, client_config = list(config.items())[0]
(remote,) = ctx.cluster.only(client).remotes.keys()
client_config = client_config if client_config is not None else dict()
kill_daemon_time = client_config.get('kill_daemon_time', DEFAULT_KILL_DAEMON_TIME)
dead_time = client_config.get('dead_time', DEFAULT_DEAD_TIME)
live_time = client_config.get('live_time', DEFAULT_LIVE_TIME)
for i in range(kill_daemon_time):
log.info("ceph-immutable-object-cache crash....")
remote.run(
args=[
'sudo', 'killall', '-s', '9', 'ceph-immutable-object-cache', run.Raw('||'), 'true',
]
)
# librbd shoud normally run when ceph-immutable-object-cache
remote.run(
args=[
'sleep', '{dead_time}'.format(dead_time=dead_time),
]
)
# librbd should reconnect daemon
log.info("startup ceph-immutable-object-cache")
remote.run(
args=[
'ceph-immutable-object-cache', '-b',
]
)
remote.run(
args=[
'sleep', '{live_time}'.format(live_time=live_time),
]
)
try:
yield
finally:
log.info("cleanup")
@contextlib.contextmanager
def task(ctx, config):
"""
This is task for testing immutable_object_cache thrash.
"""
assert isinstance(config, dict), \
"task immutable_object_cache_thrash only supports a dictionary for configuration"
managers = []
config = teuthology.replace_all_with_clients(ctx.cluster, config)
managers.append(
lambda: thrashes_immutable_object_cache_daemon(ctx=ctx, config=config)
)
with contextutil.nested(*managers):
yield
| 2,473 | 29.925 | 99 |
py
|
null |
ceph-main/qa/tasks/kafka.py
|
"""
Deploy and configure Kafka for Teuthology
"""
import contextlib
import logging
import time
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
log = logging.getLogger(__name__)
def get_kafka_version(config):
for client, client_config in config.items():
if 'kafka_version' in client_config:
kafka_version = client_config.get('kafka_version')
return kafka_version
def get_kafka_dir(ctx, config):
kafka_version = get_kafka_version(config)
current_version = 'kafka-' + kafka_version + '-src'
return '{tdir}/{ver}'.format(tdir=teuthology.get_testdir(ctx),ver=current_version)
@contextlib.contextmanager
def install_kafka(ctx, config):
"""
Downloading the kafka tar file.
"""
assert isinstance(config, dict)
log.info('Installing Kafka...')
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
test_dir=teuthology.get_testdir(ctx)
current_version = get_kafka_version(config)
link1 = 'https://archive.apache.org/dist/kafka/' + current_version + '/kafka-' + current_version + '-src.tgz'
ctx.cluster.only(client).run(
args=['cd', '{tdir}'.format(tdir=test_dir), run.Raw('&&'), 'wget', link1],
)
file1 = 'kafka-' + current_version + '-src.tgz'
ctx.cluster.only(client).run(
args=['cd', '{tdir}'.format(tdir=test_dir), run.Raw('&&'), 'tar', '-xvzf', file1],
)
try:
yield
finally:
log.info('Removing packaged dependencies of Kafka...')
test_dir=get_kafka_dir(ctx, config)
current_version = get_kafka_version(config)
for (client,_) in config.items():
ctx.cluster.only(client).run(
args=['rm', '-rf', '{tdir}/logs'.format(tdir=test_dir)],
)
ctx.cluster.only(client).run(
args=['rm', '-rf', test_dir],
)
rmfile1 = 'kafka-' + current_version + '-src.tgz'
ctx.cluster.only(client).run(
args=['rm', '-rf', '{tdir}/{doc}'.format(tdir=teuthology.get_testdir(ctx),doc=rmfile1)],
)
@contextlib.contextmanager
def run_kafka(ctx,config):
"""
This includes two parts:
1. Starting Zookeeper service
2. Starting Kafka service
"""
assert isinstance(config, dict)
log.info('Bringing up Zookeeper and Kafka services...')
for (client,_) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
ctx.cluster.only(client).run(
args=['cd', '{tdir}'.format(tdir=get_kafka_dir(ctx, config)), run.Raw('&&'),
'./gradlew', 'jar',
'-PscalaVersion=2.13.2'
],
)
ctx.cluster.only(client).run(
args=['cd', '{tdir}/bin'.format(tdir=get_kafka_dir(ctx, config)), run.Raw('&&'),
'./zookeeper-server-start.sh',
'{tir}/config/zookeeper.properties'.format(tir=get_kafka_dir(ctx, config)),
run.Raw('&'), 'exit'
],
)
ctx.cluster.only(client).run(
args=['cd', '{tdir}/bin'.format(tdir=get_kafka_dir(ctx, config)), run.Raw('&&'),
'./kafka-server-start.sh',
'{tir}/config/server.properties'.format(tir=get_kafka_dir(ctx, config)),
run.Raw('&'), 'exit'
],
)
try:
yield
finally:
log.info('Stopping Zookeeper and Kafka Services...')
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
ctx.cluster.only(client).run(
args=['cd', '{tdir}/bin'.format(tdir=get_kafka_dir(ctx, config)), run.Raw('&&'),
'./kafka-server-stop.sh',
'{tir}/config/kafka.properties'.format(tir=get_kafka_dir(ctx, config)),
],
)
time.sleep(5)
ctx.cluster.only(client).run(
args=['cd', '{tdir}/bin'.format(tdir=get_kafka_dir(ctx, config)), run.Raw('&&'),
'./zookeeper-server-stop.sh',
'{tir}/config/zookeeper.properties'.format(tir=get_kafka_dir(ctx, config)),
],
)
time.sleep(5)
ctx.cluster.only(client).run(args=['killall', '-9', 'java'])
@contextlib.contextmanager
def run_admin_cmds(ctx,config):
"""
Running Kafka Admin commands in order to check the working of producer anf consumer and creation of topic.
"""
assert isinstance(config, dict)
log.info('Checking kafka server through producer/consumer commands...')
for (client,_) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
ctx.cluster.only(client).run(
args=[
'cd', '{tdir}/bin'.format(tdir=get_kafka_dir(ctx, config)), run.Raw('&&'),
'./kafka-topics.sh', '--create', '--topic', 'quickstart-events',
'--bootstrap-server', 'localhost:9092'
],
)
ctx.cluster.only(client).run(
args=[
'cd', '{tdir}/bin'.format(tdir=get_kafka_dir(ctx, config)), run.Raw('&&'),
'echo', "First", run.Raw('|'),
'./kafka-console-producer.sh', '--topic', 'quickstart-events',
'--bootstrap-server', 'localhost:9092'
],
)
ctx.cluster.only(client).run(
args=[
'cd', '{tdir}/bin'.format(tdir=get_kafka_dir(ctx, config)), run.Raw('&&'),
'./kafka-console-consumer.sh', '--topic', 'quickstart-events',
'--from-beginning',
'--bootstrap-server', 'localhost:9092',
run.Raw('&'), 'exit'
],
)
try:
yield
finally:
pass
@contextlib.contextmanager
def task(ctx,config):
"""
Following is the way how to run kafka::
tasks:
- kafka:
client.0:
kafka_version: 2.6.0
"""
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task kafka only supports a list or dictionary for configuration"
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
log.debug('Kafka config is %s', config)
with contextutil.nested(
lambda: install_kafka(ctx=ctx, config=config),
lambda: run_kafka(ctx=ctx, config=config),
lambda: run_admin_cmds(ctx=ctx, config=config),
):
yield
| 6,757 | 31.965854 | 117 |
py
|
null |
ceph-main/qa/tasks/kclient.py
|
"""
Mount/unmount a ``kernel`` client.
"""
import contextlib
import logging
from teuthology.misc import deep_merge
from teuthology.exceptions import CommandFailedError
from teuthology import misc
from teuthology.contextutil import MaxWhileTries
from tasks.cephfs.kernel_mount import KernelMount
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Mount/unmount a ``kernel`` client.
The config is optional and defaults to mounting on all clients. If
a config is given, it is expected to be a list of clients to do
this operation on. This lets you e.g. set up one client with
``ceph-fuse`` and another with ``kclient``.
``brxnet`` should be a Private IPv4 Address range, default range is
[192.168.0.0/16]
Example that mounts all clients::
tasks:
- ceph:
- kclient:
- interactive:
- brxnet: [192.168.0.0/16]
Example that uses both ``kclient` and ``ceph-fuse``::
tasks:
- ceph:
- ceph-fuse: [client.0]
- kclient: [client.1]
- interactive:
Pass a dictionary instead of lists to specify per-client config:
tasks:
-kclient:
client.0:
debug: true
mntopts: ["nowsync"]
:param ctx: Context
:param config: Configuration
"""
log.info('Mounting kernel clients...')
if config is None:
ids = misc.all_roles_of_type(ctx.cluster, 'client')
client_roles = [f'client.{id_}' for id_ in ids]
config = dict([r, dict()] for r in client_roles)
elif isinstance(config, list):
client_roles = config
config = dict([r, dict()] for r in client_roles)
elif isinstance(config, dict):
client_roles = filter(lambda x: 'client.' in x, config.keys())
else:
raise ValueError(f"Invalid config object: {config} ({config.__class__})")
log.info(f"config is {config}")
clients = list(misc.get_clients(ctx=ctx, roles=client_roles))
test_dir = misc.get_testdir(ctx)
for id_, remote in clients:
KernelMount.cleanup_stale_netnses_and_bridge(remote)
mounts = {}
overrides = ctx.config.get('overrides', {}).get('kclient', {})
top_overrides = dict(filter(lambda x: 'client.' not in x[0], overrides.items()))
for id_, remote in clients:
entity = f"client.{id_}"
client_config = config.get(entity)
if client_config is None:
client_config = {}
# top level overrides
deep_merge(client_config, top_overrides)
# mount specific overrides
client_config_overrides = overrides.get(entity)
deep_merge(client_config, client_config_overrides)
log.info(f"{entity} config is {client_config}")
cephfs_name = client_config.get("cephfs_name")
if config.get("disabled", False) or not client_config.get('mounted', True):
continue
kernel_mount = KernelMount(
ctx=ctx,
test_dir=test_dir,
client_id=id_,
client_remote=remote,
brxnet=ctx.teuthology_config.get('brxnet', None),
client_config=client_config,
cephfs_name=cephfs_name)
mounts[id_] = kernel_mount
if client_config.get('debug', False):
remote.run(args=["sudo", "bash", "-c", "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control"])
remote.run(args=["sudo", "bash", "-c", "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control"])
kernel_mount.mount(mntopts=client_config.get('mntopts', []))
def umount_all():
log.info('Unmounting kernel clients...')
forced = False
for mount in mounts.values():
if mount.is_mounted():
try:
mount.umount()
except (CommandFailedError, MaxWhileTries):
log.warning("Ordinary umount failed, forcing...")
forced = True
mount.umount_wait(force=True)
for id_, remote in clients:
KernelMount.cleanup_stale_netnses_and_bridge(remote)
return forced
ctx.mounts = mounts
try:
yield mounts
except:
umount_all() # ignore forced retval, we are already in error handling
finally:
forced = umount_all()
if forced:
# The context managers within the kclient manager worked (i.e.
# the test workload passed) but for some reason we couldn't
# umount, so turn this into a test failure.
raise RuntimeError("Kernel mounts did not umount cleanly")
| 4,672 | 31.227586 | 121 |
py
|
null |
ceph-main/qa/tasks/keycloak.py
|
"""
Deploy and configure Keycloak for Teuthology
"""
import contextlib
import logging
import os
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
from teuthology.exceptions import ConfigError
log = logging.getLogger(__name__)
def get_keycloak_version(config):
for client, client_config in config.items():
if 'keycloak_version' in client_config:
keycloak_version = client_config.get('keycloak_version')
return keycloak_version
def get_keycloak_dir(ctx, config):
keycloak_version = get_keycloak_version(config)
current_version = 'keycloak-'+keycloak_version
return '{tdir}/{ver}'.format(tdir=teuthology.get_testdir(ctx),ver=current_version)
def run_in_keycloak_dir(ctx, client, config, args, **kwargs):
return ctx.cluster.only(client).run(
args=[ 'cd', get_keycloak_dir(ctx,config), run.Raw('&&'), ] + args,
**kwargs
)
def get_toxvenv_dir(ctx):
return ctx.tox.venv_path
def toxvenv_sh(ctx, remote, args, **kwargs):
activate = get_toxvenv_dir(ctx) + '/bin/activate'
return remote.sh(['source', activate, run.Raw('&&')] + args, **kwargs)
@contextlib.contextmanager
def install_packages(ctx, config):
"""
Downloading the two required tar files
1. Keycloak
2. Wildfly (Application Server)
"""
assert isinstance(config, dict)
log.info('Installing packages for Keycloak...')
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
test_dir=teuthology.get_testdir(ctx)
current_version = get_keycloak_version(config)
link1 = 'https://downloads.jboss.org/keycloak/'+current_version+'/keycloak-'+current_version+'.tar.gz'
toxvenv_sh(ctx, remote, ['wget', link1])
file1 = 'keycloak-'+current_version+'.tar.gz'
toxvenv_sh(ctx, remote, ['tar', '-C', test_dir, '-xvzf', file1])
link2 ='https://downloads.jboss.org/keycloak/'+current_version+'/adapters/keycloak-oidc/keycloak-wildfly-adapter-dist-'+current_version+'.tar.gz'
toxvenv_sh(ctx, remote, ['cd', '{tdir}'.format(tdir=get_keycloak_dir(ctx,config)), run.Raw('&&'), 'wget', link2])
file2 = 'keycloak-wildfly-adapter-dist-'+current_version+'.tar.gz'
toxvenv_sh(ctx, remote, ['tar', '-C', '{tdir}'.format(tdir=get_keycloak_dir(ctx,config)), '-xvzf', '{tdr}/{file}'.format(tdr=get_keycloak_dir(ctx,config),file=file2)])
try:
yield
finally:
log.info('Removing packaged dependencies of Keycloak...')
for client in config:
current_version = get_keycloak_version(config)
ctx.cluster.only(client).run(
args=['cd', '{tdir}'.format(tdir=get_keycloak_dir(ctx,config)), run.Raw('&&'), 'rm', '-rf', 'keycloak-wildfly-adapter-dist-' + current_version + '.tar.gz'],
)
ctx.cluster.only(client).run(
args=['rm', '-rf', '{tdir}'.format(tdir=get_keycloak_dir(ctx,config))],
)
@contextlib.contextmanager
def download_conf(ctx, config):
"""
Downloads confi.py used in run_admin_cmds
"""
assert isinstance(config, dict)
log.info('Downloading conf...')
testdir = teuthology.get_testdir(ctx)
conf_branch = 'main'
conf_repo = 'https://github.com/TRYTOBE8TME/scripts.git'
for (client, _) in config.items():
ctx.cluster.only(client).run(
args=[
'git', 'clone',
'-b', conf_branch,
conf_repo,
'{tdir}/scripts'.format(tdir=testdir),
],
)
try:
yield
finally:
log.info('Removing conf...')
testdir = teuthology.get_testdir(ctx)
for client in config:
ctx.cluster.only(client).run(
args=[
'rm',
'-rf',
'{tdir}/scripts'.format(tdir=testdir),
],
)
@contextlib.contextmanager
def build(ctx,config):
"""
Build process which needs to be done before starting a server.
"""
assert isinstance(config, dict)
log.info('Building Keycloak...')
for (client,_) in config.items():
run_in_keycloak_dir(ctx, client, config,['cd', 'bin', run.Raw('&&'), './jboss-cli.sh', '--file=adapter-elytron-install-offline.cli'])
try:
yield
finally:
pass
@contextlib.contextmanager
def run_keycloak(ctx,config):
"""
This includes two parts:
1. Adding a user to keycloak which is actually used to log in when we start the server and check in browser.
2. Starting the server.
"""
assert isinstance(config, dict)
log.info('Bringing up Keycloak...')
for (client,_) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
ctx.cluster.only(client).run(
args=[
'{tdir}/bin/add-user-keycloak.sh'.format(tdir=get_keycloak_dir(ctx,config)),
'-r', 'master',
'-u', 'admin',
'-p', 'admin',
],
)
toxvenv_sh(ctx, remote, ['cd', '{tdir}/bin'.format(tdir=get_keycloak_dir(ctx,config)), run.Raw('&&'), './standalone.sh', run.Raw('&'), 'exit'])
try:
yield
finally:
log.info('Stopping Keycloak Server...')
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
toxvenv_sh(ctx, remote, ['cd', '{tdir}/bin'.format(tdir=get_keycloak_dir(ctx,config)), run.Raw('&&'), './jboss-cli.sh', '--connect', 'command=:shutdown'])
@contextlib.contextmanager
def run_admin_cmds(ctx,config):
"""
Running Keycloak Admin commands(kcadm commands) in order to get the token, aud value, thumbprint and realm name.
"""
assert isinstance(config, dict)
log.info('Running admin commands...')
for (client,_) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
'{tdir}/bin/kcadm.sh'.format(tdir=get_keycloak_dir(ctx,config)),
'config', 'credentials',
'--server', 'http://localhost:8080/auth',
'--realm', 'master',
'--user', 'admin',
'--password', 'admin',
'--client', 'admin-cli',
],
)
realm_name='demorealm'
realm='realm={}'.format(realm_name)
remote.run(
args=[
'{tdir}/bin/kcadm.sh'.format(tdir=get_keycloak_dir(ctx,config)),
'create', 'realms',
'-s', realm,
'-s', 'enabled=true',
'-s', 'accessTokenLifespan=1800',
'-o',
],
)
client_name='my_client'
client='clientId={}'.format(client_name)
remote.run(
args=[
'{tdir}/bin/kcadm.sh'.format(tdir=get_keycloak_dir(ctx,config)),
'create', 'clients',
'-r', realm_name,
'-s', client,
'-s', 'directAccessGrantsEnabled=true',
'-s', 'redirectUris=["http://localhost:8080/myapp/*"]',
],
)
ans1= toxvenv_sh(ctx, remote,
[
'cd', '{tdir}/bin'.format(tdir=get_keycloak_dir(ctx,config)), run.Raw('&&'),
'./kcadm.sh', 'get', 'clients',
'-r', realm_name,
'-F', 'id,clientId', run.Raw('|'),
'jq', '-r', '.[] | select (.clientId == "my_client") | .id'
])
pre0=ans1.rstrip()
pre1="clients/{}".format(pre0)
remote.run(
args=[
'{tdir}/bin/kcadm.sh'.format(tdir=get_keycloak_dir(ctx,config)),
'update', pre1,
'-r', realm_name,
'-s', 'enabled=true',
'-s', 'serviceAccountsEnabled=true',
'-s', 'redirectUris=["http://localhost:8080/myapp/*"]',
],
)
ans2= pre1+'/client-secret'
out2= toxvenv_sh(ctx, remote,
[
'cd', '{tdir}/bin'.format(tdir=get_keycloak_dir(ctx,config)), run.Raw('&&'),
'./kcadm.sh', 'get', ans2,
'-r', realm_name,
'-F', 'value'
])
ans0= '{client}:{secret}'.format(client=client_name,secret=out2[15:51])
ans3= 'client_secret={}'.format(out2[15:51])
clientid='client_id={}'.format(client_name)
proto_map = pre1+"/protocol-mappers/models"
uname = "username=testuser"
upass = "password=testuser"
remote.run(
args=[
'{tdir}/bin/kcadm.sh'.format(tdir=get_keycloak_dir(ctx,config)),
'create', 'users',
'-s', uname,
'-s', 'enabled=true',
'-s', 'attributes.\"https://aws.amazon.com/tags\"=\"{"principal_tags":{"Department":["Engineering", "Marketing"]}}\"',
'-r', realm_name,
],
)
sample = 'testuser'
remote.run(
args=[
'{tdir}/bin/kcadm.sh'.format(tdir=get_keycloak_dir(ctx,config)),
'set-password',
'-r', realm_name,
'--username', sample,
'--new-password', sample,
],
)
file_path = '{tdir}/scripts/confi.py'.format(tdir=teuthology.get_testdir(ctx))
remote.run(
args=[
'{tdir}/bin/kcadm.sh'.format(tdir=get_keycloak_dir(ctx,config)),
'create', proto_map,
'-r', realm_name,
'-f', file_path,
],
)
remote.run(
args=[
'{tdir}/bin/kcadm.sh'.format(tdir=get_keycloak_dir(ctx,config)),
'config', 'credentials',
'--server', 'http://localhost:8080/auth',
'--realm', realm_name,
'--user', sample,
'--password', sample,
'--client', 'admin-cli',
],
)
out9= toxvenv_sh(ctx, remote,
[
'curl', '-k', '-v',
'-X', 'POST',
'-H', 'Content-Type:application/x-www-form-urlencoded',
'-d', 'scope=openid',
'-d', 'grant_type=password',
'-d', clientid,
'-d', ans3,
'-d', uname,
'-d', upass,
'http://localhost:8080/auth/realms/'+realm_name+'/protocol/openid-connect/token', run.Raw('|'),
'jq', '-r', '.access_token'
])
user_token_pre = out9.rstrip()
user_token = '{}'.format(user_token_pre)
out3= toxvenv_sh(ctx, remote,
[
'curl', '-k', '-v',
'-X', 'POST',
'-H', 'Content-Type:application/x-www-form-urlencoded',
'-d', 'scope=openid',
'-d', 'grant_type=client_credentials',
'-d', clientid,
'-d', ans3,
'http://localhost:8080/auth/realms/'+realm_name+'/protocol/openid-connect/token', run.Raw('|'),
'jq', '-r', '.access_token'
])
pre2=out3.rstrip()
acc_token= 'token={}'.format(pre2)
ans4= '{}'.format(pre2)
out4= toxvenv_sh(ctx, remote,
[
'curl', '-k', '-v',
'-X', 'GET',
'-H', 'Content-Type:application/x-www-form-urlencoded',
'http://localhost:8080/auth/realms/'+realm_name+'/protocol/openid-connect/certs', run.Raw('|'),
'jq', '-r', '.keys[].x5c[]'
])
pre3=out4.rstrip()
cert_value='{}'.format(pre3)
start_value= "-----BEGIN CERTIFICATE-----\n"
end_value= "\n-----END CERTIFICATE-----"
user_data=""
user_data+=start_value
user_data+=cert_value
user_data+=end_value
remote.write_file(
path='{tdir}/bin/certificate.crt'.format(tdir=get_keycloak_dir(ctx,config)),
data=user_data
)
out5= toxvenv_sh(ctx, remote,
[
'openssl', 'x509',
'-in', '{tdir}/bin/certificate.crt'.format(tdir=get_keycloak_dir(ctx,config)),
'--fingerprint', '--noout', '-sha1'
])
pre_ans= '{}'.format(out5[17:76])
ans5=""
for character in pre_ans:
if(character!=':'):
ans5+=character
str1 = 'curl'
str2 = '-k'
str3 = '-v'
str4 = '-X'
str5 = 'POST'
str6 = '-u'
str7 = '-d'
str8 = 'http://localhost:8080/auth/realms/'+realm_name+'/protocol/openid-connect/token/introspect'
out6= toxvenv_sh(ctx, remote,
[
str1, str2, str3, str4, str5, str6, ans0, str7, acc_token, str8, run.Raw('|'), 'jq', '-r', '.aud'
])
out7= toxvenv_sh(ctx, remote,
[
str1, str2, str3, str4, str5, str6, ans0, str7, acc_token, str8, run.Raw('|'), 'jq', '-r', '.sub'
])
out8= toxvenv_sh(ctx, remote,
[
str1, str2, str3, str4, str5, str6, ans0, str7, acc_token, str8, run.Raw('|'), 'jq', '-r', '.azp'
])
ans6=out6.rstrip()
ans7=out7.rstrip()
ans8=out8.rstrip()
os.environ['TOKEN']=ans4
os.environ['THUMBPRINT']=ans5
os.environ['AUD']=ans6
os.environ['SUB']=ans7
os.environ['AZP']=ans8
os.environ['USER_TOKEN']=user_token
os.environ['KC_REALM']=realm_name
try:
yield
finally:
log.info('Removing certificate.crt file...')
for (client,_) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=['rm', '-f',
'{tdir}/bin/certificate.crt'.format(tdir=get_keycloak_dir(ctx,config)),
],
)
remote.run(
args=['rm', '-f',
'{tdir}/confi.py'.format(tdir=teuthology.get_testdir(ctx)),
],
)
@contextlib.contextmanager
def task(ctx,config):
"""
To run keycloak the prerequisite is to run the tox task. Following is the way how to run
tox and then keycloak::
tasks:
- tox: [ client.0 ]
- keycloak:
client.0:
keycloak_version: 11.0.0
To pass extra arguments to nose (e.g. to run a certain test)::
tasks:
- tox: [ client.0 ]
- keycloak:
client.0:
keycloak_version: 11.0.0
- s3tests:
client.0:
extra_attrs: ['webidentity_test']
"""
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task keycloak only supports a list or dictionary for configuration"
if not hasattr(ctx, 'tox'):
raise ConfigError('keycloak must run after the tox task')
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
log.debug('Keycloak config is %s', config)
with contextutil.nested(
lambda: install_packages(ctx=ctx, config=config),
lambda: build(ctx=ctx, config=config),
lambda: run_keycloak(ctx=ctx, config=config),
lambda: download_conf(ctx=ctx, config=config),
lambda: run_admin_cmds(ctx=ctx, config=config),
):
yield
| 16,012 | 33.142857 | 175 |
py
|
null |
ceph-main/qa/tasks/keystone.py
|
"""
Deploy and configure Keystone for Teuthology
"""
import argparse
import contextlib
import logging
# still need this for python3.6
from collections import OrderedDict
from itertools import chain
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
from teuthology.packaging import install_package
from teuthology.packaging import remove_package
from teuthology.exceptions import ConfigError
log = logging.getLogger(__name__)
def get_keystone_dir(ctx):
return '{tdir}/keystone'.format(tdir=teuthology.get_testdir(ctx))
def run_in_keystone_dir(ctx, client, args, **kwargs):
return ctx.cluster.only(client).run(
args=[ 'cd', get_keystone_dir(ctx), run.Raw('&&'), ] + args,
**kwargs
)
def get_toxvenv_dir(ctx):
return ctx.tox.venv_path
def toxvenv_sh(ctx, remote, args, **kwargs):
activate = get_toxvenv_dir(ctx) + '/bin/activate'
return remote.sh(['source', activate, run.Raw('&&')] + args, **kwargs)
def run_in_keystone_venv(ctx, client, args):
run_in_keystone_dir(ctx, client,
[ 'source',
'.tox/venv/bin/activate',
run.Raw('&&')
] + args)
def get_keystone_venved_cmd(ctx, cmd, args, env=[]):
kbindir = get_keystone_dir(ctx) + '/.tox/venv/bin/'
return env + [ kbindir + 'python', kbindir + cmd ] + args
@contextlib.contextmanager
def download(ctx, config):
"""
Download the Keystone from github.
Remove downloaded file upon exit.
The context passed in should be identical to the context
passed in to the main task.
"""
assert isinstance(config, dict)
log.info('Downloading keystone...')
keystonedir = get_keystone_dir(ctx)
for (client, cconf) in config.items():
ctx.cluster.only(client).run(
args=[
'git', 'clone',
'-b', cconf.get('force-branch', 'master'),
'https://github.com/openstack/keystone.git',
keystonedir,
],
)
sha1 = cconf.get('sha1')
if sha1 is not None:
run_in_keystone_dir(ctx, client, [
'git', 'reset', '--hard', sha1,
],
)
# hax for http://tracker.ceph.com/issues/23659
run_in_keystone_dir(ctx, client, [
'sed', '-i',
's/pysaml2<4.0.3,>=2.4.0/pysaml2>=4.5.0/',
'requirements.txt'
],
)
try:
yield
finally:
log.info('Removing keystone...')
for client in config:
ctx.cluster.only(client).run(
args=[ 'rm', '-rf', keystonedir ],
)
patch_bindep_template = """\
import fileinput
import sys
import os
fixed=False
os.chdir("{keystone_dir}")
for line in fileinput.input("bindep.txt", inplace=True):
if line == "python34-devel [platform:centos]\\n":
line="python34-devel [platform:centos-7]\\npython36-devel [platform:centos-8]\\n"
fixed=True
print(line,end="")
print("Fixed line" if fixed else "No fix necessary", file=sys.stderr)
exit(0)
"""
@contextlib.contextmanager
def install_packages(ctx, config):
"""
Download the packaged dependencies of Keystone.
Remove install packages upon exit.
The context passed in should be identical to the context
passed in to the main task.
"""
assert isinstance(config, dict)
log.info('Installing packages for Keystone...')
patch_bindep = patch_bindep_template \
.replace("{keystone_dir}", get_keystone_dir(ctx))
packages = {}
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
toxvenv_sh(ctx, remote, ['python'], stdin=patch_bindep)
# use bindep to read which dependencies we need from keystone/bindep.txt
toxvenv_sh(ctx, remote, ['pip', 'install', 'bindep'])
packages[client] = toxvenv_sh(ctx, remote,
['bindep', '--brief', '--file', '{}/bindep.txt'.format(get_keystone_dir(ctx))],
check_status=False).splitlines() # returns 1 on success?
for dep in packages[client]:
install_package(dep, remote)
try:
yield
finally:
log.info('Removing packaged dependencies of Keystone...')
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
for dep in packages[client]:
remove_package(dep, remote)
def run_mysql_query(ctx, remote, query):
query_arg = '--execute="{}"'.format(query)
args = ['sudo', 'mysql', run.Raw(query_arg)]
remote.run(args=args)
@contextlib.contextmanager
def setup_database(ctx, config):
"""
Setup database for Keystone.
"""
assert isinstance(config, dict)
log.info('Setting up database for keystone...')
for (client, cconf) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
# MariaDB on RHEL/CentOS needs service started after package install
# while Ubuntu starts service by default.
if remote.os.name == 'rhel' or remote.os.name == 'centos':
remote.run(args=['sudo', 'systemctl', 'restart', 'mariadb'])
run_mysql_query(ctx, remote, "CREATE USER 'keystone'@'localhost' IDENTIFIED BY 'SECRET';")
run_mysql_query(ctx, remote, "CREATE DATABASE keystone;")
run_mysql_query(ctx, remote, "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost';")
run_mysql_query(ctx, remote, "FLUSH PRIVILEGES;")
try:
yield
finally:
pass
@contextlib.contextmanager
def setup_venv(ctx, config):
"""
Setup the virtualenv for Keystone using tox.
"""
assert isinstance(config, dict)
log.info('Setting up virtualenv for keystone...')
for (client, _) in config.items():
run_in_keystone_dir(ctx, client,
['sed', '-i', 's/usedevelop.*/usedevelop=false/g', 'tox.ini'])
run_in_keystone_dir(ctx, client,
[ 'source',
'{tvdir}/bin/activate'.format(tvdir=get_toxvenv_dir(ctx)),
run.Raw('&&'),
'tox', '-e', 'venv', '--notest'
])
run_in_keystone_venv(ctx, client,
[ 'pip', 'install',
'python-openstackclient==5.2.1',
'osc-lib==2.0.0'
])
try:
yield
finally:
pass
@contextlib.contextmanager
def configure_instance(ctx, config):
assert isinstance(config, dict)
log.info('Configuring keystone...')
kdir = get_keystone_dir(ctx)
keyrepo_dir = '{kdir}/etc/fernet-keys'.format(kdir=kdir)
for (client, _) in config.items():
# prepare the config file
run_in_keystone_dir(ctx, client,
[
'source',
f'{get_toxvenv_dir(ctx)}/bin/activate',
run.Raw('&&'),
'tox', '-e', 'genconfig'
])
run_in_keystone_dir(ctx, client,
[
'cp', '-f',
'etc/keystone.conf.sample',
'etc/keystone.conf'
])
run_in_keystone_dir(ctx, client,
[
'sed',
'-e', 's^#key_repository =.*^key_repository = {kr}^'.format(kr = keyrepo_dir),
'-i', 'etc/keystone.conf'
])
run_in_keystone_dir(ctx, client,
[
'sed',
'-e', 's^#connection =.*^connection = mysql+pymysql://keystone:SECRET@localhost/keystone^',
'-i', 'etc/keystone.conf'
])
# log to a file that gets archived
log_file = '{p}/archive/keystone.{c}.log'.format(p=teuthology.get_testdir(ctx), c=client)
run_in_keystone_dir(ctx, client,
[
'sed',
'-e', 's^#log_file =.*^log_file = {}^'.format(log_file),
'-i', 'etc/keystone.conf'
])
# copy the config to archive
run_in_keystone_dir(ctx, client, [
'cp', 'etc/keystone.conf',
'{}/archive/keystone.{}.conf'.format(teuthology.get_testdir(ctx), client)
])
conf_file = '{kdir}/etc/keystone.conf'.format(kdir=get_keystone_dir(ctx))
# prepare key repository for Fetnet token authenticator
run_in_keystone_dir(ctx, client, [ 'mkdir', '-p', keyrepo_dir ])
run_in_keystone_venv(ctx, client, [ 'keystone-manage', '--config-file', conf_file, 'fernet_setup' ])
# sync database
run_in_keystone_venv(ctx, client, [ 'keystone-manage', '--config-file', conf_file, 'db_sync' ])
yield
@contextlib.contextmanager
def run_keystone(ctx, config):
assert isinstance(config, dict)
log.info('Configuring keystone...')
conf_file = '{kdir}/etc/keystone.conf'.format(kdir=get_keystone_dir(ctx))
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
cluster_name, _, client_id = teuthology.split_role(client)
# start the public endpoint
client_public_with_id = 'keystone.public' + '.' + client_id
public_host, public_port = ctx.keystone.public_endpoints[client]
run_cmd = get_keystone_venved_cmd(ctx, 'keystone-wsgi-public',
[ '--host', public_host, '--port', str(public_port),
# Let's put the Keystone in background, wait for EOF
# and after receiving it, send SIGTERM to the daemon.
# This crazy hack is because Keystone, in contrast to
# our other daemons, doesn't quit on stdin.close().
# Teuthology relies on this behaviour.
run.Raw('& { read; kill %1; }')
],
[
run.Raw('OS_KEYSTONE_CONFIG_FILES={}'.format(conf_file)),
],
)
ctx.daemons.add_daemon(
remote, 'keystone', client_public_with_id,
cluster=cluster_name,
args=run_cmd,
logger=log.getChild(client),
stdin=run.PIPE,
wait=False,
check_status=False,
)
# sleep driven synchronization
run_in_keystone_venv(ctx, client, [ 'sleep', '15' ])
try:
yield
finally:
log.info('Stopping Keystone public instance')
ctx.daemons.get_daemon('keystone', client_public_with_id,
cluster_name).stop()
def dict_to_args(specials, items):
"""
Transform
[(key1, val1), (special, val_special), (key3, val3) ]
into:
[ '--key1', 'val1', '--key3', 'val3', 'val_special' ]
"""
args = []
special_vals = OrderedDict((k, '') for k in specials.split(','))
for (k, v) in items:
if k in special_vals:
special_vals[k] = v
else:
args.append('--{k}'.format(k=k))
args.append(v)
args.extend(arg for arg in special_vals.values() if arg)
return args
def run_section_cmds(ctx, cclient, section_cmd, specials,
section_config_list):
public_host, public_port = ctx.keystone.public_endpoints[cclient]
auth_section = [
( 'os-username', 'admin' ),
( 'os-password', 'ADMIN' ),
( 'os-user-domain-id', 'default' ),
( 'os-project-name', 'admin' ),
( 'os-project-domain-id', 'default' ),
( 'os-identity-api-version', '3' ),
( 'os-auth-url', 'http://{host}:{port}/v3'.format(host=public_host,
port=public_port) ),
]
for section_item in section_config_list:
run_in_keystone_venv(ctx, cclient,
[ 'openstack' ] + section_cmd.split() +
dict_to_args(specials, auth_section + list(section_item.items())) +
[ '--debug' ])
def create_endpoint(ctx, cclient, service, url, adminurl=None):
endpoint_sections = [
{'service': service, 'interface': 'public', 'url': url},
]
if adminurl:
endpoint_sections.append(
{'service': service, 'interface': 'admin', 'url': adminurl}
)
run_section_cmds(ctx, cclient, 'endpoint create',
'service,interface,url',
endpoint_sections)
@contextlib.contextmanager
def fill_keystone(ctx, config):
assert isinstance(config, dict)
for (cclient, cconfig) in config.items():
public_host, public_port = ctx.keystone.public_endpoints[cclient]
url = 'http://{host}:{port}/v3'.format(host=public_host,
port=public_port)
opts = {'password': 'ADMIN',
'region-id': 'RegionOne',
'internal-url': url,
'admin-url': url,
'public-url': url}
bootstrap_args = chain.from_iterable(('--bootstrap-{}'.format(k), v)
for k, v in opts.items())
conf_file = '{kdir}/etc/keystone.conf'.format(kdir=get_keystone_dir(ctx))
run_in_keystone_venv(ctx, cclient,
['keystone-manage', '--config-file', conf_file, 'bootstrap'] +
list(bootstrap_args))
# configure tenants/projects
run_section_cmds(ctx, cclient, 'domain create --or-show', 'name',
cconfig.get('domains', []))
run_section_cmds(ctx, cclient, 'project create --or-show', 'name',
cconfig.get('projects', []))
run_section_cmds(ctx, cclient, 'user create --or-show', 'name',
cconfig.get('users', []))
run_section_cmds(ctx, cclient, 'role create --or-show', 'name',
cconfig.get('roles', []))
run_section_cmds(ctx, cclient, 'role add', 'name',
cconfig.get('role-mappings', []))
run_section_cmds(ctx, cclient, 'service create', 'type',
cconfig.get('services', []))
# for the deferred endpoint creation; currently it's used in rgw.py
ctx.keystone.create_endpoint = create_endpoint
# sleep driven synchronization -- just in case
run_in_keystone_venv(ctx, cclient, [ 'sleep', '3' ])
try:
yield
finally:
pass
def assign_ports(ctx, config, initial_port):
"""
Assign port numbers starting from @initial_port
"""
port = initial_port
role_endpoints = {}
for remote, roles_for_host in ctx.cluster.remotes.items():
for role in roles_for_host:
if role in config:
role_endpoints[role] = (remote.name.split('@')[1], port)
port += 1
return role_endpoints
@contextlib.contextmanager
def task(ctx, config):
"""
Deploy and configure Keystone
Example of configuration:
- install:
- ceph:
- tox: [ client.0 ]
- keystone:
client.0:
force-branch: master
domains:
- name: custom
description: Custom domain
projects:
- name: custom
description: Custom project
users:
- name: custom
password: SECRET
project: custom
roles: [ name: custom ]
role-mappings:
- name: custom
user: custom
project: custom
services:
- name: swift
type: object-store
description: Swift Service
"""
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task keystone only supports a list or dictionary for configuration"
if not hasattr(ctx, 'tox'):
raise ConfigError('keystone must run after the tox task')
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
log.debug('Keystone config is %s', config)
ctx.keystone = argparse.Namespace()
ctx.keystone.public_endpoints = assign_ports(ctx, config, 5000)
with contextutil.nested(
lambda: download(ctx=ctx, config=config),
lambda: install_packages(ctx=ctx, config=config),
lambda: setup_database(ctx=ctx, config=config),
lambda: setup_venv(ctx=ctx, config=config),
lambda: configure_instance(ctx=ctx, config=config),
lambda: run_keystone(ctx=ctx, config=config),
lambda: fill_keystone(ctx=ctx, config=config),
):
yield
| 16,727 | 33.705394 | 108 |
py
|
null |
ceph-main/qa/tasks/kubeadm.py
|
"""
Kubernetes cluster task, deployed via kubeadm
"""
import argparse
import contextlib
import ipaddress
import json
import logging
import random
import yaml
from io import BytesIO
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.config import config as teuth_config
from teuthology.orchestra import run
log = logging.getLogger(__name__)
def _kubectl(ctx, config, args, **kwargs):
cluster_name = config['cluster']
ctx.kubeadm[cluster_name].bootstrap_remote.run(
args=['kubectl'] + args,
**kwargs,
)
def kubectl(ctx, config):
if isinstance(config, str):
config = [config]
assert isinstance(config, list)
for c in config:
if isinstance(c, str):
_kubectl(ctx, config, c.split(' '))
else:
_kubectl(ctx, config, c)
@contextlib.contextmanager
def preflight(ctx, config):
run.wait(
ctx.cluster.run(
args=[
'sudo', 'modprobe', 'br_netfilter',
run.Raw('&&'),
'sudo', 'sysctl', 'net.bridge.bridge-nf-call-ip6tables=1',
run.Raw('&&'),
'sudo', 'sysctl', 'net.bridge.bridge-nf-call-iptables=1',
run.Raw('&&'),
'sudo', 'sysctl', 'net.ipv4.ip_forward=1',
run.Raw('&&'),
'sudo', 'swapoff', '-a',
],
wait=False,
)
)
# set docker cgroup driver = systemd
# see https://kubernetes.io/docs/setup/production-environment/container-runtimes/#docker
# see https://github.com/kubernetes/kubeadm/issues/2066
for remote in ctx.cluster.remotes.keys():
try:
orig = remote.read_file('/etc/docker/daemon.json', sudo=True)
j = json.loads(orig)
except Exception as e:
log.info(f'Failed to pull old daemon.json: {e}')
j = {}
j["exec-opts"] = ["native.cgroupdriver=systemd"]
j["log-driver"] = "json-file"
j["log-opts"] = {"max-size": "100m"}
j["storage-driver"] = "overlay2"
remote.write_file('/etc/docker/daemon.json', json.dumps(j), sudo=True)
run.wait(
ctx.cluster.run(
args=[
'sudo', 'systemctl', 'restart', 'docker',
run.Raw('||'),
'true',
],
wait=False,
)
)
yield
@contextlib.contextmanager
def kubeadm_install(ctx, config):
version = config.get('version', '1.21')
os_type = teuthology.get_distro(ctx)
os_version = teuthology.get_distro_version(ctx)
try:
if os_type in ['centos', 'rhel']:
os = f"CentOS_{os_version.split('.')[0]}"
log.info('Installing cri-o')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'curl', '-L', '-o',
'/etc/yum.repos.d/devel:kubic:libcontainers:stable.repo',
f'https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/{os}/devel:kubic:libcontainers:stable.repo',
run.Raw('&&'),
'sudo',
'curl', '-L', '-o',
f'/etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:{version}.repo',
f'https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/{version}/{os}/devel:kubic:libcontainers:stable:cri-o:{version}.repo',
run.Raw('&&'),
'sudo', 'dnf', 'install', '-y', 'cri-o',
],
wait=False,
)
)
log.info('Installing kube{adm,ctl,let}')
repo = """[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
"""
for remote in ctx.cluster.remotes.keys():
remote.write_file(
'/etc/yum.repos.d/kubernetes.repo',
repo,
sudo=True,
)
run.wait(
ctx.cluster.run(
args=[
'sudo', 'dnf', 'install', '-y',
'kubelet', 'kubeadm', 'kubectl',
'iproute-tc', 'bridge-utils',
],
wait=False,
)
)
# fix cni config
for remote in ctx.cluster.remotes.keys():
conf = """# from https://github.com/cri-o/cri-o/blob/master/tutorials/kubernetes.md#flannel-network
{
"name": "crio",
"type": "flannel"
}
"""
remote.write_file('/etc/cni/net.d/10-crio-flannel.conf', conf, sudo=True)
remote.run(args=[
'sudo', 'rm', '-f',
'/etc/cni/net.d/87-podman-bridge.conflist',
'/etc/cni/net.d/100-crio-bridge.conf',
])
# start crio
run.wait(
ctx.cluster.run(
args=[
'sudo', 'systemctl', 'daemon-reload',
run.Raw('&&'),
'sudo', 'systemctl', 'enable', 'crio', '--now',
],
wait=False,
)
)
elif os_type == 'ubuntu':
os = f"xUbuntu_{os_version}"
log.info('Installing kube{adm,ctl,let}')
run.wait(
ctx.cluster.run(
args=[
'sudo', 'apt', 'update',
run.Raw('&&'),
'sudo', 'apt', 'install', '-y',
'apt-transport-https', 'ca-certificates', 'curl',
run.Raw('&&'),
'sudo', 'curl', '-fsSLo',
'/usr/share/keyrings/kubernetes-archive-keyring.gpg',
'https://packages.cloud.google.com/apt/doc/apt-key.gpg',
run.Raw('&&'),
'echo', 'deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main',
run.Raw('|'),
'sudo', 'tee', '/etc/apt/sources.list.d/kubernetes.list',
run.Raw('&&'),
'sudo', 'apt', 'update',
run.Raw('&&'),
'sudo', 'apt', 'install', '-y',
'kubelet', 'kubeadm', 'kubectl',
'bridge-utils',
],
wait=False,
)
)
else:
raise RuntimeError(f'unsupported distro {os_type} for cri-o')
run.wait(
ctx.cluster.run(
args=[
'sudo', 'systemctl', 'enable', '--now', 'kubelet',
run.Raw('&&'),
'sudo', 'kubeadm', 'config', 'images', 'pull',
],
wait=False,
)
)
yield
finally:
if config.get('uninstall', True):
log.info('Uninstalling kube{adm,let,ctl}')
if os_type in ['centos', 'rhel']:
run.wait(
ctx.cluster.run(
args=[
'sudo', 'rm', '-f',
'/etc/yum.repos.d/kubernetes.repo',
run.Raw('&&'),
'sudo', 'dnf', 'remove', '-y',
'kubeadm', 'kubelet', 'kubectl', 'cri-o',
],
wait=False
)
)
elif os_type == 'ubuntu' and False:
run.wait(
ctx.cluster.run(
args=[
'sudo', 'rm', '-f',
'/etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list',
f'/etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:{version}.list',
'/etc/apt/trusted.gpg.d/libcontainers-cri-o.gpg',
run.Raw('&&'),
'sudo', 'apt', 'remove', '-y',
'kkubeadm', 'kubelet', 'kubectl', 'cri-o', 'cri-o-runc',
],
wait=False,
)
)
@contextlib.contextmanager
def kubeadm_init_join(ctx, config):
cluster_name = config['cluster']
bootstrap_remote = None
remotes = {} # remote -> ip
for remote, roles in ctx.cluster.remotes.items():
for role in roles:
if role.startswith('host.'):
if not bootstrap_remote:
bootstrap_remote = remote
if remote not in remotes:
remotes[remote] = remote.ssh.get_transport().getpeername()[0]
if not bootstrap_remote:
raise RuntimeError('must define at least one host.something role')
ctx.kubeadm[cluster_name].bootstrap_remote = bootstrap_remote
ctx.kubeadm[cluster_name].remotes = remotes
ctx.kubeadm[cluster_name].token = 'abcdef.' + ''.join([
random.choice('0123456789abcdefghijklmnopqrstuvwxyz') for _ in range(16)
])
log.info(f'Token: {ctx.kubeadm[cluster_name].token}')
log.info(f'Remotes: {ctx.kubeadm[cluster_name].remotes}')
try:
# init
cmd = [
'sudo', 'kubeadm', 'init',
'--node-name', ctx.kubeadm[cluster_name].bootstrap_remote.shortname,
'--token', ctx.kubeadm[cluster_name].token,
'--pod-network-cidr', str(ctx.kubeadm[cluster_name].pod_subnet),
]
bootstrap_remote.run(args=cmd)
# join additional nodes
joins = []
for remote, ip in ctx.kubeadm[cluster_name].remotes.items():
if remote == bootstrap_remote:
continue
cmd = [
'sudo', 'kubeadm', 'join',
ctx.kubeadm[cluster_name].remotes[ctx.kubeadm[cluster_name].bootstrap_remote] + ':6443',
'--node-name', remote.shortname,
'--token', ctx.kubeadm[cluster_name].token,
'--discovery-token-unsafe-skip-ca-verification',
]
joins.append(remote.run(args=cmd, wait=False))
run.wait(joins)
yield
except Exception as e:
log.exception(e)
raise
finally:
log.info('Cleaning up node')
run.wait(
ctx.cluster.run(
args=['sudo', 'kubeadm', 'reset', 'cleanup-node', '-f'],
wait=False,
)
)
@contextlib.contextmanager
def kubectl_config(ctx, config):
cluster_name = config['cluster']
bootstrap_remote = ctx.kubeadm[cluster_name].bootstrap_remote
ctx.kubeadm[cluster_name].admin_conf = \
bootstrap_remote.read_file('/etc/kubernetes/admin.conf', sudo=True)
log.info('Setting up kubectl')
try:
ctx.cluster.run(args=[
'mkdir', '-p', '.kube',
run.Raw('&&'),
'sudo', 'mkdir', '-p', '/root/.kube',
])
for remote in ctx.kubeadm[cluster_name].remotes.keys():
remote.write_file('.kube/config', ctx.kubeadm[cluster_name].admin_conf)
remote.sudo_write_file('/root/.kube/config',
ctx.kubeadm[cluster_name].admin_conf)
yield
except Exception as e:
log.exception(e)
raise
finally:
log.info('Deconfiguring kubectl')
ctx.cluster.run(args=[
'rm', '-rf', '.kube',
run.Raw('&&'),
'sudo', 'rm', '-rf', '/root/.kube',
])
def map_vnet(mip):
for mapping in teuth_config.get('vnet', []):
mnet = ipaddress.ip_network(mapping['machine_subnet'])
vnet = ipaddress.ip_network(mapping['virtual_subnet'])
if vnet.prefixlen >= mnet.prefixlen:
log.error(f"virtual_subnet {vnet} prefix >= machine_subnet {mnet} prefix")
return None
if mip in mnet:
pos = list(mnet.hosts()).index(mip)
log.info(f"{mip} is in {mnet} at pos {pos}")
sub = list(vnet.subnets(32 - mnet.prefixlen))[pos]
return sub
return None
@contextlib.contextmanager
def allocate_pod_subnet(ctx, config):
"""
Allocate a private subnet that will not collide with other test machines/clusters
"""
cluster_name = config['cluster']
assert cluster_name == 'kubeadm', 'multiple subnets not yet implemented'
log.info('Identifying pod subnet')
remote = list(ctx.cluster.remotes.keys())[0]
ip = remote.ssh.get_transport().getpeername()[0]
mip = ipaddress.ip_address(ip)
vnet = map_vnet(mip)
assert vnet
log.info(f'Pod subnet: {vnet}')
ctx.kubeadm[cluster_name].pod_subnet = vnet
yield
@contextlib.contextmanager
def pod_network(ctx, config):
cluster_name = config['cluster']
pnet = config.get('pod_network', 'calico')
if pnet == 'flannel':
r = ctx.kubeadm[cluster_name].bootstrap_remote.run(
args=[
'curl',
'https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml',
],
stdout=BytesIO(),
)
assert r.exitstatus == 0
flannel = list(yaml.load_all(r.stdout.getvalue(), Loader=yaml.FullLoader))
for o in flannel:
if o.get('data', {}).get('net-conf.json'):
log.info(f'Updating {o}')
o['data']['net-conf.json'] = o['data']['net-conf.json'].replace(
'10.244.0.0/16',
str(ctx.kubeadm[cluster_name].pod_subnet)
)
log.info(f'Now {o}')
flannel_yaml = yaml.dump_all(flannel)
log.debug(f'Flannel:\n{flannel_yaml}')
_kubectl(ctx, config, ['apply', '-f', '-'], stdin=flannel_yaml)
elif pnet == 'calico':
_kubectl(ctx, config, [
'create', '-f',
'https://docs.projectcalico.org/manifests/tigera-operator.yaml'
])
cr = {
'apiVersion': 'operator.tigera.io/v1',
'kind': 'Installation',
'metadata': {'name': 'default'},
'spec': {
'calicoNetwork': {
'ipPools': [
{
'blockSize': 26,
'cidr': str(ctx.kubeadm[cluster_name].pod_subnet),
'encapsulation': 'IPIPCrossSubnet',
'natOutgoing': 'Enabled',
'nodeSelector': 'all()',
}
]
}
}
}
_kubectl(ctx, config, ['create', '-f', '-'], stdin=yaml.dump(cr))
else:
raise RuntimeError(f'unrecognized pod_network {pnet}')
try:
yield
finally:
if pnet == 'flannel':
_kubectl(ctx, config, [
'delete', '-f',
'https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml',
])
elif pnet == 'calico':
_kubectl(ctx, config, ['delete', 'installation', 'default'])
_kubectl(ctx, config, [
'delete', '-f',
'https://docs.projectcalico.org/manifests/tigera-operator.yaml'
])
@contextlib.contextmanager
def setup_pvs(ctx, config):
"""
Create PVs for all scratch LVs and set up a trivial provisioner
"""
log.info('Scanning for scratch devices')
crs = []
for remote in ctx.cluster.remotes.keys():
ls = remote.read_file('/scratch_devs').decode('utf-8').strip().splitlines()
log.info(f'Scratch devices on {remote.shortname}: {ls}')
for dev in ls:
devname = dev.split('/')[-1].replace("_", "-")
crs.append({
'apiVersion': 'v1',
'kind': 'PersistentVolume',
'metadata': {'name': f'{remote.shortname}-{devname}'},
'spec': {
'volumeMode': 'Block',
'accessModes': ['ReadWriteOnce'],
'capacity': {'storage': '100Gi'}, # doesn't matter?
'persistentVolumeReclaimPolicy': 'Retain',
'storageClassName': 'scratch',
'local': {'path': dev},
'nodeAffinity': {
'required': {
'nodeSelectorTerms': [
{
'matchExpressions': [
{
'key': 'kubernetes.io/hostname',
'operator': 'In',
'values': [remote.shortname]
}
]
}
]
}
}
}
})
# overwriting first few MB is enough to make k8s happy
remote.run(args=[
'sudo', 'dd', 'if=/dev/zero', f'of={dev}', 'bs=1M', 'count=10'
])
crs.append({
'kind': 'StorageClass',
'apiVersion': 'storage.k8s.io/v1',
'metadata': {'name': 'scratch'},
'provisioner': 'kubernetes.io/no-provisioner',
'volumeBindingMode': 'WaitForFirstConsumer',
})
y = yaml.dump_all(crs)
log.info('Creating PVs + StorageClass')
log.debug(y)
_kubectl(ctx, config, ['create', '-f', '-'], stdin=y)
yield
@contextlib.contextmanager
def final(ctx, config):
cluster_name = config['cluster']
# remove master node taint
_kubectl(ctx, config, [
'taint', 'node',
ctx.kubeadm[cluster_name].bootstrap_remote.shortname,
'node-role.kubernetes.io/master-',
run.Raw('||'),
'true',
])
yield
@contextlib.contextmanager
def task(ctx, config):
if not config:
config = {}
assert isinstance(config, dict), \
"task only supports a dictionary for configuration"
log.info('Kubeadm start')
overrides = ctx.config.get('overrides', {})
teuthology.deep_merge(config, overrides.get('kubeadm', {}))
log.info('Config: ' + str(config))
# set up cluster context
if not hasattr(ctx, 'kubeadm'):
ctx.kubeadm = {}
if 'cluster' not in config:
config['cluster'] = 'kubeadm'
cluster_name = config['cluster']
if cluster_name not in ctx.kubeadm:
ctx.kubeadm[cluster_name] = argparse.Namespace()
with contextutil.nested(
lambda: preflight(ctx, config),
lambda: allocate_pod_subnet(ctx, config),
lambda: kubeadm_install(ctx, config),
lambda: kubeadm_init_join(ctx, config),
lambda: kubectl_config(ctx, config),
lambda: pod_network(ctx, config),
lambda: setup_pvs(ctx, config),
lambda: final(ctx, config),
):
try:
log.info('Kubeadm complete, yielding')
yield
finally:
log.info('Tearing down kubeadm')
| 19,665 | 33.868794 | 183 |
py
|
null |
ceph-main/qa/tasks/locktest.py
|
"""
locktests
"""
import logging
from teuthology.orchestra import run
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Run locktests, from the xfstests suite, on the given
clients. Whether the clients are ceph-fuse or kernel does not
matter, and the two clients can refer to the same mount.
The config is a list of two clients to run the locktest on. The
first client will be the host.
For example:
tasks:
- ceph:
- ceph-fuse: [client.0, client.1]
- locktest:
[client.0, client.1]
This task does not yield; there would be little point.
:param ctx: Context
:param config: Configuration
"""
assert isinstance(config, list)
log.info('fetching and building locktests...')
(host,) = ctx.cluster.only(config[0]).remotes
(client,) = ctx.cluster.only(config[1]).remotes
( _, _, host_id) = config[0].partition('.')
( _, _, client_id) = config[1].partition('.')
testdir = teuthology.get_testdir(ctx)
hostmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=host_id)
clientmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=client_id)
try:
for client_name in config:
log.info('building on {client_}'.format(client_=client_name))
ctx.cluster.only(client_name).run(
args=[
# explicitly does not support multiple autotest tasks
# in a single run; the result archival would conflict
'mkdir', '{tdir}/archive/locktest'.format(tdir=testdir),
run.Raw('&&'),
'mkdir', '{tdir}/locktest'.format(tdir=testdir),
run.Raw('&&'),
'wget',
'-nv',
'https://raw.github.com/gregsfortytwo/xfstests-ceph/master/src/locktest.c',
'-O', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
run.Raw('&&'),
'g++', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
'-o', '{tdir}/locktest/locktest'.format(tdir=testdir)
],
logger=log.getChild('locktest_client.{id}'.format(id=client_name)),
)
log.info('built locktest on each client')
host.run(args=['sudo', 'touch',
'{mnt}/locktestfile'.format(mnt=hostmnt),
run.Raw('&&'),
'sudo', 'chown', 'ubuntu.ubuntu',
'{mnt}/locktestfile'.format(mnt=hostmnt)
]
)
log.info('starting on host')
hostproc = host.run(
args=[
'{tdir}/locktest/locktest'.format(tdir=testdir),
'-p', '6788',
'-d',
'{mnt}/locktestfile'.format(mnt=hostmnt),
],
wait=False,
logger=log.getChild('locktest.host'),
)
log.info('starting on client')
(_,_,hostaddr) = host.name.partition('@')
clientproc = client.run(
args=[
'{tdir}/locktest/locktest'.format(tdir=testdir),
'-p', '6788',
'-d',
'-h', hostaddr,
'{mnt}/locktestfile'.format(mnt=clientmnt),
],
logger=log.getChild('locktest.client'),
wait=False
)
hostresult = hostproc.wait()
clientresult = clientproc.wait()
if (hostresult != 0) or (clientresult != 0):
raise Exception("Did not pass locking test!")
log.info('finished locktest executable with results {r} and {s}'. \
format(r=hostresult, s=clientresult))
finally:
log.info('cleaning up host dir')
host.run(
args=[
'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir),
run.Raw('&&'),
'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
run.Raw('&&'),
'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir),
run.Raw('&&'),
'rmdir', '{tdir}/locktest'
],
logger=log.getChild('.{id}'.format(id=config[0])),
)
log.info('cleaning up client dir')
client.run(
args=[
'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir),
run.Raw('&&'),
'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
run.Raw('&&'),
'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir),
run.Raw('&&'),
'rmdir', '{tdir}/locktest'.format(tdir=testdir)
],
logger=log.getChild('.{id}'.format(\
id=config[1])),
)
| 4,936 | 35.57037 | 95 |
py
|
null |
ceph-main/qa/tasks/lost_unfound.py
|
"""
Lost_unfound
"""
import logging
import time
from tasks import ceph_manager
from tasks.util.rados import rados
from teuthology import misc as teuthology
from teuthology.orchestra import run
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Test handling of lost objects.
A pretty rigid cluster is brought up and tested by this task
"""
POOL = 'unfound_pool'
if config is None:
config = {}
assert isinstance(config, dict), \
'lost_unfound task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
manager.wait_for_clean()
manager.create_pool(POOL)
# something that is always there
dummyfile = '/etc/fstab'
# take an osd out until the very end
manager.kill_osd(2)
manager.mark_down_osd(2)
manager.mark_out_osd(2)
# kludge to make sure they get a map
rados(ctx, mon, ['-p', POOL, 'put', 'dummy', dummyfile])
manager.flush_pg_stats([0, 1])
manager.wait_for_recovery()
# create old objects
for f in range(1, 10):
rados(ctx, mon, ['-p', POOL, 'put', 'existing_%d' % f, dummyfile])
rados(ctx, mon, ['-p', POOL, 'put', 'existed_%d' % f, dummyfile])
rados(ctx, mon, ['-p', POOL, 'rm', 'existed_%d' % f])
# delay recovery, and make the pg log very long (to prevent backfill)
manager.raw_cluster_cmd(
'tell', 'osd.1',
'injectargs',
'--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
)
manager.kill_osd(0)
manager.mark_down_osd(0)
for f in range(1, 10):
rados(ctx, mon, ['-p', POOL, 'put', 'new_%d' % f, dummyfile])
rados(ctx, mon, ['-p', POOL, 'put', 'existed_%d' % f, dummyfile])
rados(ctx, mon, ['-p', POOL, 'put', 'existing_%d' % f, dummyfile])
# bring osd.0 back up, let it peer, but don't replicate the new
# objects...
log.info('osd.0 command_args is %s' % 'foo')
log.info(ctx.daemons.get_daemon('osd', 0).command_args)
ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([
'--osd-recovery-delay-start', '1000'
])
manager.revive_osd(0)
manager.mark_in_osd(0)
manager.wait_till_osd_is_up(0)
manager.flush_pg_stats([1, 0])
manager.wait_till_active()
# take out osd.1 and the only copy of those objects.
manager.kill_osd(1)
manager.mark_down_osd(1)
manager.mark_out_osd(1)
manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
# bring up osd.2 so that things would otherwise, in theory, recovery fully
manager.revive_osd(2)
manager.mark_in_osd(2)
manager.wait_till_osd_is_up(2)
manager.flush_pg_stats([0, 2])
manager.wait_till_active()
manager.flush_pg_stats([0, 2])
# verify that there are unfound objects
unfound = manager.get_num_unfound_objects()
log.info("there are %d unfound objects" % unfound)
assert unfound
testdir = teuthology.get_testdir(ctx)
procs = []
if config.get('parallel_bench', True):
procs.append(mon.run(
args=[
"/bin/sh", "-c",
" ".join(['adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage',
'rados',
'--no-log-to-stderr',
'--name', 'client.admin',
'-b', str(4<<10),
'-p' , POOL,
'-t', '20',
'bench', '240', 'write',
]).format(tdir=testdir),
],
logger=log.getChild('radosbench.{id}'.format(id='client.admin')),
stdin=run.PIPE,
wait=False
))
time.sleep(10)
# mark stuff lost
pgs = manager.get_pg_stats()
for pg in pgs:
if pg['stat_sum']['num_objects_unfound'] > 0:
primary = 'osd.%d' % pg['acting'][0]
# verify that i can list them direct from the osd
log.info('listing missing/lost in %s state %s', pg['pgid'],
pg['state']);
m = manager.list_pg_unfound(pg['pgid'])
#log.info('%s' % m)
assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
assert m['available_might_have_unfound'] == True
assert m['might_have_unfound'][0]['osd'] == "1"
assert m['might_have_unfound'][0]['status'] == "osd is down"
num_unfound=0
for o in m['objects']:
if len(o['locations']) == 0:
num_unfound += 1
assert m['num_unfound'] == num_unfound
log.info("reverting unfound in %s on %s", pg['pgid'], primary)
manager.raw_cluster_cmd('pg', pg['pgid'],
'mark_unfound_lost', 'revert')
else:
log.info("no unfound in %s", pg['pgid'])
manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
manager.flush_pg_stats([0, 2])
manager.wait_for_recovery()
# verify result
for f in range(1, 10):
err = rados(ctx, mon, ['-p', POOL, 'get', 'new_%d' % f, '-'])
assert err
err = rados(ctx, mon, ['-p', POOL, 'get', 'existed_%d' % f, '-'])
assert err
err = rados(ctx, mon, ['-p', POOL, 'get', 'existing_%d' % f, '-'])
assert not err
# see if osd.1 can cope
manager.mark_in_osd(1)
manager.revive_osd(1)
manager.wait_till_osd_is_up(1)
manager.wait_for_clean()
run.wait(procs)
manager.wait_for_clean()
| 5,993 | 32.116022 | 80 |
py
|
null |
ceph-main/qa/tasks/manypools.py
|
"""
Force pg creation on all osds
"""
from teuthology import misc as teuthology
from teuthology.orchestra import run
import logging
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Create the specified number of pools and write 16 objects to them (thereby forcing
the PG creation on each OSD). This task creates pools from all the clients,
in parallel. It is easy to add other daemon types which have the appropriate
permissions, but I don't think anything else does.
The config is just the number of pools to create. I recommend setting
"mon create pg interval" to a very low value in your ceph config to speed
this up.
You probably want to do this to look at memory consumption, and
maybe to test how performance changes with the number of PGs. For example:
tasks:
- ceph:
config:
mon:
mon create pg interval: 1
- manypools: 3000
- radosbench:
clients: [client.0]
time: 360
"""
log.info('creating {n} pools'.format(n=config))
poolnum = int(config)
creator_remotes = []
client_roles = teuthology.all_roles_of_type(ctx.cluster, 'client')
log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles))
for role in client_roles:
log.info('role={role_}'.format(role_=role))
(creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.keys()
creator_remotes.append((creator_remote, 'client.{id}'.format(id=role)))
remaining_pools = poolnum
poolprocs=dict()
while (remaining_pools > 0):
log.info('{n} pools remaining to create'.format(n=remaining_pools))
for remote, role_ in creator_remotes:
poolnum = remaining_pools
remaining_pools -= 1
if remaining_pools < 0:
continue
log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_))
proc = remote.run(
args=[
'ceph',
'--name', role_,
'osd', 'pool', 'create', 'pool{num}'.format(num=poolnum), '8',
run.Raw('&&'),
'rados',
'--name', role_,
'--pool', 'pool{num}'.format(num=poolnum),
'bench', '0', 'write', '-t', '16', '--block-size', '1'
],
wait = False
)
log.info('waiting for pool and object creates')
poolprocs[remote] = proc
run.wait(poolprocs.values())
log.info('created all {n} pools and wrote 16 objects to each'.format(n=poolnum))
| 2,680 | 35.22973 | 91 |
py
|
null |
ceph-main/qa/tasks/mds_creation_failure.py
|
# FIXME: this file has many undefined vars which are accessed!
# flake8: noqa
import logging
import contextlib
import time
from tasks import ceph_manager
from teuthology import misc
from teuthology.exceptions import CommandFailedError
from teuthology.orchestra.run import Raw
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Go through filesystem creation with a synthetic failure in an MDS
in its 'up:creating' state, to exercise the retry behaviour.
"""
# Grab handles to the teuthology objects of interest
mdslist = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
if len(mdslist) != 1:
# Require exactly one MDS, the code path for creation failure when
# a standby is available is different
raise RuntimeError("This task requires exactly one MDS")
mds_id = mdslist[0]
(mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.keys()
manager = ceph_manager.CephManager(
mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'),
)
# Stop MDS
self.fs.set_max_mds(0)
self.fs.mds_stop(mds_id)
self.fs.mds_fail(mds_id)
# Reset the filesystem so that next start will go into CREATING
manager.raw_cluster_cmd('fs', 'rm', "default", "--yes-i-really-mean-it")
manager.raw_cluster_cmd('fs', 'new', "default", "metadata", "data")
# Start the MDS with mds_kill_create_at set, it will crash during creation
mds.restart_with_args(["--mds_kill_create_at=1"])
try:
mds.wait_for_exit()
except CommandFailedError as e:
if e.exitstatus == 1:
log.info("MDS creation killed as expected")
else:
log.error("Unexpected status code %s" % e.exitstatus)
raise
# Since I have intentionally caused a crash, I will clean up the resulting core
# file to avoid task.internal.coredump seeing it as a failure.
log.info("Removing core file from synthetic MDS failure")
mds_remote.run(args=['rm', '-f', Raw("{archive}/coredump/*.core".format(archive=misc.get_archive_dir(ctx)))])
# It should have left the MDS map state still in CREATING
status = self.fs.status().get_mds(mds_id)
assert status['state'] == 'up:creating'
# Start the MDS again without the kill flag set, it should proceed with creation successfully
mds.restart()
# Wait for state ACTIVE
self.fs.wait_for_state("up:active", timeout=120, mds_id=mds_id)
# The system should be back up in a happy healthy state, go ahead and run any further tasks
# inside this context.
yield
| 2,606 | 35.71831 | 113 |
py
|
null |
ceph-main/qa/tasks/mds_pre_upgrade.py
|
"""
Prepare MDS cluster for upgrade.
"""
import logging
from tasks.cephfs.filesystem import Filesystem
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Prepare MDS cluster for upgrade.
This task reduces ranks to 1 and stops all standbys.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'snap-upgrade task only accepts a dict for configuration'
fs = Filesystem(ctx)
fs.getinfo() # load name
fs.set_allow_standby_replay(False)
fs.set_max_mds(1)
fs.reach_max_mds()
| 563 | 19.142857 | 65 |
py
|
null |
ceph-main/qa/tasks/mds_thrash.py
|
"""
Thrash mds by simulating failures
"""
import logging
import contextlib
import itertools
import random
import time
from gevent import sleep
from gevent.greenlet import Greenlet
from gevent.event import Event
from teuthology import misc as teuthology
from tasks import ceph_manager
from tasks.cephfs.filesystem import MDSCluster, Filesystem, FSMissing
from tasks.thrasher import Thrasher
log = logging.getLogger(__name__)
class MDSThrasher(Thrasher, Greenlet):
"""
MDSThrasher::
The MDSThrasher thrashes MDSs during execution of other tasks (workunits, etc).
The config is optional. Many of the config parameters are a a maximum value
to use when selecting a random value from a range. To always use the maximum
value, set no_random to true. The config is a dict containing some or all of:
max_thrash: [default: 1] the maximum number of active MDSs per FS that will be thrashed at
any given time.
max_thrash_delay: [default: 30] maximum number of seconds to delay before
thrashing again.
max_replay_thrash_delay: [default: 4] maximum number of seconds to delay while in
the replay state before thrashing.
max_revive_delay: [default: 10] maximum number of seconds to delay before
bringing back a thrashed MDS.
randomize: [default: true] enables randomization and use the max/min values
seed: [no default] seed the random number generator
thrash_in_replay: [default: 0.0] likelihood that the MDS will be thrashed
during replay. Value should be between 0.0 and 1.0.
thrash_max_mds: [default: 0.05] likelihood that the max_mds of the mds
cluster will be modified to a value [1, current) or (current, starting
max_mds]. Value should be between 0.0 and 1.0.
thrash_while_stopping: [default: false] thrash an MDS while there
are MDS in up:stopping (because max_mds was changed and some
MDS were deactivated).
thrash_weights: allows specific MDSs to be thrashed more/less frequently.
This option overrides anything specified by max_thrash. This option is a
dict containing mds.x: weight pairs. For example, [mds.a: 0.7, mds.b:
0.3, mds.c: 0.0]. Each weight is a value from 0.0 to 1.0. Any MDSs not
specified will be automatically given a weight of 0.0 (not thrashed).
For a given MDS, by default the trasher delays for up to
max_thrash_delay, trashes, waits for the MDS to recover, and iterates.
If a non-zero weight is specified for an MDS, for each iteration the
thrasher chooses whether to thrash during that iteration based on a
random value [0-1] not exceeding the weight of that MDS.
Examples::
The following example sets the likelihood that mds.a will be thrashed
to 80%, mds.b to 20%, and other MDSs will not be thrashed. It also sets the
likelihood that an MDS will be thrashed in replay to 40%.
Thrash weights do not have to sum to 1.
tasks:
- ceph:
- mds_thrash:
thrash_weights:
- mds.a: 0.8
- mds.b: 0.2
thrash_in_replay: 0.4
- ceph-fuse:
- workunit:
clients:
all: [suites/fsx.sh]
The following example disables randomization, and uses the max delay values:
tasks:
- ceph:
- mds_thrash:
max_thrash_delay: 10
max_revive_delay: 1
max_replay_thrash_delay: 4
"""
def __init__(self, ctx, manager, config, fs, max_mds):
super(MDSThrasher, self).__init__()
self.config = config
self.ctx = ctx
self.logger = log.getChild('fs.[{f}]'.format(f = fs.name))
self.fs = fs
self.manager = manager
self.max_mds = max_mds
self.name = 'thrasher.fs.[{f}]'.format(f = fs.name)
self.stopping = Event()
self.randomize = bool(self.config.get('randomize', True))
self.thrash_max_mds = float(self.config.get('thrash_max_mds', 0.05))
self.max_thrash = int(self.config.get('max_thrash', 1))
self.max_thrash_delay = float(self.config.get('thrash_delay', 120.0))
self.thrash_in_replay = float(self.config.get('thrash_in_replay', False))
assert self.thrash_in_replay >= 0.0 and self.thrash_in_replay <= 1.0, 'thrash_in_replay ({v}) must be between [0.0, 1.0]'.format(
v=self.thrash_in_replay)
self.max_replay_thrash_delay = float(self.config.get('max_replay_thrash_delay', 4.0))
self.max_revive_delay = float(self.config.get('max_revive_delay', 10.0))
def _run(self):
try:
self.do_thrash()
except FSMissing:
pass
except Exception as e:
# Log exceptions here so we get the full backtrace (gevent loses them).
# Also allow successful completion as gevent exception handling is a broken mess:
#
# 2017-02-03T14:34:01.259 CRITICAL:root: File "gevent.libev.corecext.pyx", line 367, in gevent.libev.corecext.loop.handle_error (src/gevent/libev/gevent.corecext.c:5051)
# File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/gevent/hub.py", line 558, in handle_error
# self.print_exception(context, type, value, tb)
# File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/gevent/hub.py", line 605, in print_exception
# traceback.print_exception(type, value, tb, file=errstream)
# File "/usr/lib/python2.7/traceback.py", line 124, in print_exception
# _print(file, 'Traceback (most recent call last):')
# File "/usr/lib/python2.7/traceback.py", line 13, in _print
# file.write(str+terminator)
# 2017-02-03T14:34:01.261 CRITICAL:root:IOError
self.set_thrasher_exception(e)
self.logger.exception("exception:")
# allow successful completion so gevent doesn't see an exception...
def log(self, x):
"""Write data to the logger assigned to MDSThrasher"""
self.logger.info(x)
def stop(self):
self.stopping.set()
def kill_mds(self, mds):
if self.config.get('powercycle'):
(remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)).
remotes.keys())
self.log('kill_mds on mds.{m} doing powercycle of {s}'.
format(m=mds, s=remote.name))
self._assert_ipmi(remote)
remote.console.power_off()
else:
self.ctx.daemons.get_daemon('mds', mds).stop()
@staticmethod
def _assert_ipmi(remote):
assert remote.console.has_ipmi_credentials, (
"powercycling requested but RemoteConsole is not "
"initialized. Check ipmi config.")
def revive_mds(self, mds):
"""
Revive mds -- do an ipmpi powercycle (if indicated by the config)
and then restart.
"""
if self.config.get('powercycle'):
(remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)).
remotes.keys())
self.log('revive_mds on mds.{m} doing powercycle of {s}'.
format(m=mds, s=remote.name))
self._assert_ipmi(remote)
remote.console.power_on()
self.manager.make_admin_daemon_dir(self.ctx, remote)
args = []
self.ctx.daemons.get_daemon('mds', mds).restart(*args)
def wait_for_stable(self, rank = None, gid = None):
self.log('waiting for mds cluster to stabilize...')
for itercount in itertools.count():
status = self.fs.status()
max_mds = status.get_fsmap(self.fs.id)['mdsmap']['max_mds']
ranks = list(status.get_ranks(self.fs.id))
stopping = sum(1 for _ in ranks if "up:stopping" == _['state'])
actives = sum(1 for _ in ranks
if "up:active" == _['state'] and "laggy_since" not in _)
if not bool(self.config.get('thrash_while_stopping', False)) and stopping > 0:
if itercount % 5 == 0:
self.log('cluster is considered unstable while MDS are in up:stopping (!thrash_while_stopping)')
else:
if rank is not None:
try:
info = status.get_rank(self.fs.id, rank)
if info['gid'] != gid and "up:active" == info['state']:
self.log('mds.{name} has gained rank={rank}, replacing gid={gid}'.format(name = info['name'], rank = rank, gid = gid))
return status
except:
pass # no rank present
if actives >= max_mds:
# no replacement can occur!
self.log("cluster has {actives} actives (max_mds is {max_mds}), no MDS can replace rank {rank}".format(
actives=actives, max_mds=max_mds, rank=rank))
return status
else:
if actives == max_mds:
self.log('mds cluster has {count} alive and active, now stable!'.format(count = actives))
return status, None
if itercount > 300/2: # 5 minutes
raise RuntimeError('timeout waiting for cluster to stabilize')
elif itercount % 5 == 0:
self.log('mds map: {status}'.format(status=status))
else:
self.log('no change')
sleep(2)
def do_thrash(self):
"""
Perform the random thrashing action
"""
self.log('starting mds_do_thrash for fs {fs}'.format(fs = self.fs.name))
stats = {
"max_mds": 0,
"deactivate": 0,
"kill": 0,
}
while not self.stopping.is_set():
delay = self.max_thrash_delay
if self.randomize:
delay = random.randrange(0.0, self.max_thrash_delay)
if delay > 0.0:
self.log('waiting for {delay} secs before thrashing'.format(delay=delay))
self.stopping.wait(delay)
if self.stopping.is_set():
continue
status = self.fs.status()
if random.random() <= self.thrash_max_mds:
max_mds = status.get_fsmap(self.fs.id)['mdsmap']['max_mds']
options = [i for i in range(1, self.max_mds + 1) if i != max_mds]
if len(options) > 0:
new_max_mds = random.choice(options)
self.log('thrashing max_mds: %d -> %d' % (max_mds, new_max_mds))
self.fs.set_max_mds(new_max_mds)
stats['max_mds'] += 1
self.wait_for_stable()
count = 0
for info in status.get_ranks(self.fs.id):
name = info['name']
label = 'mds.' + name
rank = info['rank']
gid = info['gid']
# if thrash_weights isn't specified and we've reached max_thrash,
# we're done
count = count + 1
if 'thrash_weights' not in self.config and count > self.max_thrash:
break
weight = 1.0
if 'thrash_weights' in self.config:
weight = self.config['thrash_weights'].get(label, '0.0')
skip = random.random()
if weight <= skip:
self.log('skipping thrash iteration with skip ({skip}) > weight ({weight})'.format(skip=skip, weight=weight))
continue
self.log('kill {label} (rank={rank})'.format(label=label, rank=rank))
self.kill_mds(name)
stats['kill'] += 1
# wait for mon to report killed mds as crashed
last_laggy_since = None
itercount = 0
while True:
status = self.fs.status()
info = status.get_mds(name)
if not info:
break
if 'laggy_since' in info:
last_laggy_since = info['laggy_since']
break
if any([(f == name) for f in status.get_fsmap(self.fs.id)['mdsmap']['failed']]):
break
self.log(
'waiting till mds map indicates {label} is laggy/crashed, in failed state, or {label} is removed from mdsmap'.format(
label=label))
itercount = itercount + 1
if itercount > 10:
self.log('mds map: {status}'.format(status=status))
sleep(2)
if last_laggy_since:
self.log(
'{label} reported laggy/crashed since: {since}'.format(label=label, since=last_laggy_since))
else:
self.log('{label} down, removed from mdsmap'.format(label=label))
# wait for a standby mds to takeover and become active
status = self.wait_for_stable(rank, gid)
# wait for a while before restarting old active to become new
# standby
delay = self.max_revive_delay
if self.randomize:
delay = random.randrange(0.0, self.max_revive_delay)
self.log('waiting for {delay} secs before reviving {label}'.format(
delay=delay, label=label))
sleep(delay)
self.log('reviving {label}'.format(label=label))
self.revive_mds(name)
for itercount in itertools.count():
if itercount > 300/2: # 5 minutes
raise RuntimeError('timeout waiting for MDS to revive')
status = self.fs.status()
info = status.get_mds(name)
if info and info['state'] in ('up:standby', 'up:standby-replay', 'up:active'):
self.log('{label} reported in {state} state'.format(label=label, state=info['state']))
break
self.log(
'waiting till mds map indicates {label} is in active, standby or standby-replay'.format(label=label))
sleep(2)
for stat in stats:
self.log("stat['{key}'] = {value}".format(key = stat, value = stats[stat]))
# don't do replay thrashing right now
# for info in status.get_replays(self.fs.id):
# # this might race with replay -> active transition...
# if status['state'] == 'up:replay' and random.randrange(0.0, 1.0) < self.thrash_in_replay:
# delay = self.max_replay_thrash_delay
# if self.randomize:
# delay = random.randrange(0.0, self.max_replay_thrash_delay)
# sleep(delay)
# self.log('kill replaying mds.{id}'.format(id=self.to_kill))
# self.kill_mds(self.to_kill)
#
# delay = self.max_revive_delay
# if self.randomize:
# delay = random.randrange(0.0, self.max_revive_delay)
#
# self.log('waiting for {delay} secs before reviving mds.{id}'.format(
# delay=delay, id=self.to_kill))
# sleep(delay)
#
# self.log('revive mds.{id}'.format(id=self.to_kill))
# self.revive_mds(self.to_kill)
@contextlib.contextmanager
def task(ctx, config):
"""
Stress test the mds by thrashing while another task/workunit
is running.
Please refer to MDSThrasher class for further information on the
available options.
"""
mds_cluster = MDSCluster(ctx)
if config is None:
config = {}
assert isinstance(config, dict), \
'mds_thrash task only accepts a dict for configuration'
mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
assert len(mdslist) > 1, \
'mds_thrash task requires at least 2 metadata servers'
# choose random seed
if 'seed' in config:
seed = int(config['seed'])
else:
seed = int(time.time())
log.info('mds thrasher using random seed: {seed}'.format(seed=seed))
random.seed(seed)
(first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.keys()
manager = ceph_manager.CephManager(
first, ctx=ctx, logger=log.getChild('ceph_manager'),
)
# make sure everyone is in active, standby, or standby-replay
log.info('Wait for all MDSs to reach steady state...')
status = mds_cluster.status()
while True:
steady = True
for info in status.get_all():
state = info['state']
if state not in ('up:active', 'up:standby', 'up:standby-replay'):
steady = False
break
if steady:
break
sleep(2)
status = mds_cluster.status()
log.info('Ready to start thrashing')
manager.wait_for_clean()
assert manager.is_clean()
if 'cluster' not in config:
config['cluster'] = 'ceph'
for fs in status.get_filesystems():
thrasher = MDSThrasher(ctx, manager, config, Filesystem(ctx, fscid=fs['id']), fs['mdsmap']['max_mds'])
thrasher.start()
ctx.ceph[config['cluster']].thrashers.append(thrasher)
try:
log.debug('Yielding')
yield
finally:
log.info('joining mds_thrasher')
thrasher.stop()
if thrasher.exception is not None:
raise RuntimeError('error during thrashing')
thrasher.join()
log.info('done joining')
| 17,999 | 40.37931 | 182 |
py
|
null |
ceph-main/qa/tasks/metadata.yaml
|
instance-id: test
local-hostname: test
| 39 | 12.333333 | 20 |
yaml
|
null |
ceph-main/qa/tasks/mon_clock_skew_check.py
|
"""
Handle clock skews in monitors.
"""
import logging
import time
from tasks import ceph_manager
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
class ClockSkewCheck:
"""
Check if there are any clock skews among the monitors in the
quorum.
This task accepts the following options:
interval amount of seconds to wait before check. (default: 30.0)
expect-skew 'true' or 'false', to indicate whether to expect a skew during
the run or not. If 'true', the test will fail if no skew is
found, and succeed if a skew is indeed found; if 'false', it's
the other way around. (default: false)
- mon_clock_skew_check:
expect-skew: true
"""
def __init__(self, ctx, manager, config, logger):
self.ctx = ctx
self.manager = manager
self.stopping = False
self.logger = logger
self.config = config
if self.config is None:
self.config = dict()
def task(ctx, config):
if config is None:
config = {}
assert isinstance(config, dict), \
'mon_clock_skew_check task only accepts a dict for configuration'
interval = float(config.get('interval', 30.0))
expect_skew = config.get('expect-skew', False)
log.info('Beginning mon_clock_skew_check...')
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
quorum_size = len(teuthology.get_mon_names(ctx))
manager.wait_for_mon_quorum_size(quorum_size)
# wait a bit
log.info('sleeping for {s} seconds'.format(
s=interval))
time.sleep(interval)
health = manager.get_mon_health(True)
log.info('got health %s' % health)
if expect_skew:
if 'MON_CLOCK_SKEW' not in health['checks']:
raise RuntimeError('expected MON_CLOCK_SKEW but got none')
else:
if 'MON_CLOCK_SKEW' in health['checks']:
raise RuntimeError('got MON_CLOCK_SKEW but expected none')
| 2,165 | 28.27027 | 79 |
py
|
null |
ceph-main/qa/tasks/mon_recovery.py
|
"""
Monitor recovery
"""
import logging
from tasks import ceph_manager
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Test monitor recovery.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
mons = [f.split('.')[1] for f in teuthology.get_mon_names(ctx)]
log.info("mon ids = %s" % mons)
manager.wait_for_mon_quorum_size(len(mons))
log.info('verifying all monitors are in the quorum')
for m in mons:
s = manager.get_mon_status(m)
assert s['state'] == 'leader' or s['state'] == 'peon'
assert len(s['quorum']) == len(mons)
log.info('restarting each monitor in turn')
for m in mons:
# stop a monitor
manager.kill_mon(m)
manager.wait_for_mon_quorum_size(len(mons) - 1)
# restart
manager.revive_mon(m)
manager.wait_for_mon_quorum_size(len(mons))
# in forward and reverse order,
rmons = mons
rmons.reverse()
for mons in mons, rmons:
log.info('stopping all monitors')
for m in mons:
manager.kill_mon(m)
log.info('forming a minimal quorum for %s, then adding monitors' % mons)
qnum = (len(mons) // 2) + 1
num = 0
for m in mons:
manager.revive_mon(m)
num += 1
if num >= qnum:
manager.wait_for_mon_quorum_size(num)
# on both leader and non-leader ranks...
for rank in [0, 1]:
# take one out
log.info('removing mon %s' % mons[rank])
manager.kill_mon(mons[rank])
manager.wait_for_mon_quorum_size(len(mons) - 1)
log.info('causing some monitor log activity')
m = 30
for n in range(1, m):
manager.raw_cluster_cmd('log', '%d of %d' % (n, m))
log.info('adding mon %s back in' % mons[rank])
manager.revive_mon(mons[rank])
manager.wait_for_mon_quorum_size(len(mons))
| 2,281 | 27.17284 | 80 |
py
|
null |
ceph-main/qa/tasks/mon_thrash.py
|
"""
Monitor thrash
"""
import logging
import contextlib
import random
import time
import gevent
import json
import math
from teuthology import misc as teuthology
from teuthology.contextutil import safe_while
from tasks import ceph_manager
from tasks.cephfs.filesystem import MDSCluster
from tasks.thrasher import Thrasher
log = logging.getLogger(__name__)
def _get_mons(ctx):
"""
Get monitor names from the context value.
"""
mons = [f[len('mon.'):] for f in teuthology.get_mon_names(ctx)]
return mons
class MonitorThrasher(Thrasher):
"""
How it works::
- pick a monitor
- kill it
- wait for quorum to be formed
- sleep for 'revive_delay' seconds
- revive monitor
- wait for quorum to be formed
- sleep for 'thrash_delay' seconds
Options::
seed Seed to use on the RNG to reproduce a previous
behaviour (default: None; i.e., not set)
revive_delay Number of seconds to wait before reviving
the monitor (default: 10)
thrash_delay Number of seconds to wait in-between
test iterations (default: 0)
store_thrash Thrash monitor store before killing the monitor being thrashed (default: False)
store_thrash_probability Probability of thrashing a monitor's store
(default: 50)
thrash_many Thrash multiple monitors instead of just one. If
'maintain_quorum' is set to False, then we will
thrash up to as many monitors as there are
available. (default: False)
maintain_quorum Always maintain quorum, taking care on how many
monitors we kill during the thrashing. If we
happen to only have one or two monitors configured,
if this option is set to True, then we won't run
this task as we cannot guarantee maintenance of
quorum. Setting it to false however would allow the
task to run with as many as just one single monitor.
(default: True)
freeze_mon_probability: how often to freeze the mon instead of killing it,
in % (default: 0)
freeze_mon_duration: how many seconds to freeze the mon (default: 15)
scrub Scrub after each iteration (default: True)
check_mds_failover Check if mds failover happened (default: False)
Note: if 'store_thrash' is set to True, then 'maintain_quorum' must also
be set to True.
For example::
tasks:
- ceph:
- mon_thrash:
revive_delay: 20
thrash_delay: 1
store_thrash: true
store_thrash_probability: 40
seed: 31337
maintain_quorum: true
thrash_many: true
check_mds_failover: True
- ceph-fuse:
- workunit:
clients:
all:
- mon/workloadgen.sh
"""
def __init__(self, ctx, manager, config, name, logger):
super(MonitorThrasher, self).__init__()
self.ctx = ctx
self.manager = manager
self.manager.wait_for_clean()
self.stopping = False
self.logger = logger
self.config = config
self.name = name
if self.config is None:
self.config = dict()
""" Test reproducibility """
self.random_seed = self.config.get('seed', None)
if self.random_seed is None:
self.random_seed = int(time.time())
self.rng = random.Random()
self.rng.seed(int(self.random_seed))
""" Monitor thrashing """
self.revive_delay = float(self.config.get('revive_delay', 10.0))
self.thrash_delay = float(self.config.get('thrash_delay', 0.0))
self.thrash_many = self.config.get('thrash_many', False)
self.maintain_quorum = self.config.get('maintain_quorum', True)
self.scrub = self.config.get('scrub', True)
self.freeze_mon_probability = float(self.config.get('freeze_mon_probability', 10))
self.freeze_mon_duration = float(self.config.get('freeze_mon_duration', 15.0))
assert self.max_killable() > 0, \
'Unable to kill at least one monitor with the current config.'
""" Store thrashing """
self.store_thrash = self.config.get('store_thrash', False)
self.store_thrash_probability = int(
self.config.get('store_thrash_probability', 50))
if self.store_thrash:
assert self.store_thrash_probability > 0, \
'store_thrash is set, probability must be > 0'
assert self.maintain_quorum, \
'store_thrash = true must imply maintain_quorum = true'
#MDS failover
self.mds_failover = self.config.get('check_mds_failover', False)
if self.mds_failover:
self.mds_cluster = MDSCluster(ctx)
self.thread = gevent.spawn(self.do_thrash)
def log(self, x):
"""
locally log info messages
"""
self.logger.info(x)
def do_join(self):
"""
Break out of this processes thrashing loop.
"""
self.stopping = True
self.thread.get()
def should_thrash_store(self):
"""
If allowed, indicate that we should thrash a certain percentage of
the time as determined by the store_thrash_probability value.
"""
if not self.store_thrash:
return False
return self.rng.randrange(0, 101) < self.store_thrash_probability
def thrash_store(self, mon):
"""
Thrash the monitor specified.
:param mon: monitor to thrash
"""
self.log('thrashing mon.{id} store'.format(id=mon))
out = self.manager.raw_cluster_cmd(
'tell', 'mon.%s' % mon, 'sync_force',
'--yes-i-really-mean-it')
j = json.loads(out)
assert j['ret'] == 0, \
'error forcing store sync on mon.{id}:\n{ret}'.format(
id=mon,ret=out)
def should_freeze_mon(self):
"""
Indicate that we should freeze a certain percentago of the time
as determined by the freeze_mon_probability value.
"""
return self.rng.randrange(0, 101) < self.freeze_mon_probability
def freeze_mon(self, mon):
"""
Send STOP signal to freeze the monitor.
"""
log.info('Sending STOP to mon %s', mon)
self.manager.signal_mon(mon, 19) # STOP
def unfreeze_mon(self, mon):
"""
Send CONT signal to unfreeze the monitor.
"""
log.info('Sending CONT to mon %s', mon)
self.manager.signal_mon(mon, 18) # CONT
def kill_mon(self, mon):
"""
Kill the monitor specified
"""
self.log('killing mon.{id}'.format(id=mon))
self.manager.kill_mon(mon)
def revive_mon(self, mon):
"""
Revive the monitor specified
"""
self.log('killing mon.{id}'.format(id=mon))
self.log('reviving mon.{id}'.format(id=mon))
self.manager.revive_mon(mon)
def max_killable(self):
"""
Return the maximum number of monitors we can kill.
"""
m = len(_get_mons(self.ctx))
if self.maintain_quorum:
return max(math.ceil(m/2.0)-1, 0)
else:
return m
def _wait_until_quorum(self, mon, size, timeout=300):
"""
Wait until the monitor specified is in the quorum.
"""
self.log('waiting for quorum size %d for mon %s' % (size, mon))
s = {}
with safe_while(sleep=3,
tries=timeout // 3,
action=f'wait for quorum size {size} on mon {mon}') as proceed:
while proceed():
s = self.manager.get_mon_status(mon)
if len(s['quorum']) == size:
break
self.log("quorum is size %d" % len(s['quorum']))
self.log("final quorum is size %d" % len(s['quorum']))
return s
def do_thrash(self):
"""
_do_thrash() wrapper.
"""
try:
self._do_thrash()
except Exception as e:
# See _run exception comment for MDSThrasher
self.set_thrasher_exception(e)
self.logger.exception("exception:")
# Allow successful completion so gevent doesn't see an exception.
# The DaemonWatchdog will observe the error and tear down the test.
def _do_thrash(self):
"""
Continuously loop and thrash the monitors.
"""
#status before mon thrashing
if self.mds_failover:
oldstatus = self.mds_cluster.status()
self.log('start thrashing')
self.log('seed: {s}, revive delay: {r}, thrash delay: {t} '\
'thrash many: {tm}, maintain quorum: {mq} '\
'store thrash: {st}, probability: {stp} '\
'freeze mon: prob {fp} duration {fd}'.format(
s=self.random_seed,r=self.revive_delay,t=self.thrash_delay,
tm=self.thrash_many, mq=self.maintain_quorum,
st=self.store_thrash,stp=self.store_thrash_probability,
fp=self.freeze_mon_probability,fd=self.freeze_mon_duration,
))
while not self.stopping:
mons = _get_mons(self.ctx)
self.manager.wait_for_mon_quorum_size(len(mons))
self.log('making sure all monitors are in the quorum')
for m in mons:
try:
s = self._wait_until_quorum(m, len(mons), timeout=30)
except Exception as e:
self.log('mon.{m} is not in quorum size, exception: {e}'.format(m=m,e=e))
self.log('mon_status: {s}'.format(s=s))
assert s['state'] == 'leader' or s['state'] == 'peon'
assert len(s['quorum']) == len(mons)
kill_up_to = self.rng.randrange(1, self.max_killable()+1)
mons_to_kill = self.rng.sample(mons, kill_up_to)
self.log('monitors to thrash: {m}'.format(m=mons_to_kill))
mons_to_freeze = []
for mon in mons:
if mon in mons_to_kill:
continue
if self.should_freeze_mon():
mons_to_freeze.append(mon)
self.log('monitors to freeze: {m}'.format(m=mons_to_freeze))
for mon in mons_to_kill:
self.log('thrashing mon.{m}'.format(m=mon))
""" we only thrash stores if we are maintaining quorum """
if self.should_thrash_store() and self.maintain_quorum:
self.thrash_store(mon)
self.kill_mon(mon)
if mons_to_freeze:
for mon in mons_to_freeze:
self.freeze_mon(mon)
self.log('waiting for {delay} secs to unfreeze mons'.format(
delay=self.freeze_mon_duration))
time.sleep(self.freeze_mon_duration)
for mon in mons_to_freeze:
self.unfreeze_mon(mon)
if self.maintain_quorum:
self.manager.wait_for_mon_quorum_size(len(mons)-len(mons_to_kill))
for m in mons:
if m in mons_to_kill:
continue
try:
s = self._wait_until_quorum(m, len(mons)-len(mons_to_kill), timeout=30)
except Exception as e:
self.log('mon.{m} is not in quorum size, exception: {e}'.format(m=m,e=e))
self.log('mon_status: {s}'.format(s=s))
assert s['state'] == 'leader' or s['state'] == 'peon'
assert len(s['quorum']) == len(mons)-len(mons_to_kill)
self.log('waiting for {delay} secs before reviving monitors'.format(
delay=self.revive_delay))
time.sleep(self.revive_delay)
for mon in mons_to_kill:
self.revive_mon(mon)
# do more freezes
if mons_to_freeze:
for mon in mons_to_freeze:
self.freeze_mon(mon)
self.log('waiting for {delay} secs to unfreeze mons'.format(
delay=self.freeze_mon_duration))
time.sleep(self.freeze_mon_duration)
for mon in mons_to_freeze:
self.unfreeze_mon(mon)
self.manager.wait_for_mon_quorum_size(len(mons))
for m in mons:
try:
s = self._wait_until_quorum(m, len(mons), timeout=30)
except Exception as e:
self.log('mon.{m} is not in quorum size, exception: {e}'.format(m=m,e=e))
self.log('mon_status: {s}'.format(s=s))
assert s['state'] == 'leader' or s['state'] == 'peon'
assert len(s['quorum']) == len(mons)
if self.scrub:
self.log('triggering scrub')
try:
self.manager.raw_cluster_cmd('mon', 'scrub')
except Exception as e:
log.warning("Ignoring exception while triggering scrub: %s", e)
if self.thrash_delay > 0.0:
self.log('waiting for {delay} secs before continuing thrashing'.format(
delay=self.thrash_delay))
time.sleep(self.thrash_delay)
#status after thrashing
if self.mds_failover:
status = self.mds_cluster.status()
assert not oldstatus.hadfailover(status), \
'MDS Failover'
@contextlib.contextmanager
def task(ctx, config):
"""
Stress test the monitor by thrashing them while another task/workunit
is running.
Please refer to MonitorThrasher class for further information on the
available options.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'mon_thrash task only accepts a dict for configuration'
assert len(_get_mons(ctx)) > 2, \
'mon_thrash task requires at least 3 monitors'
if 'cluster' not in config:
config['cluster'] = 'ceph'
log.info('Beginning mon_thrash...')
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
thrash_proc = MonitorThrasher(ctx,
manager, config, "MonitorThrasher",
logger=log.getChild('mon_thrasher'))
ctx.ceph[config['cluster']].thrashers.append(thrash_proc)
try:
log.debug('Yielding')
yield
finally:
log.info('joining mon_thrasher')
thrash_proc.do_join()
mons = _get_mons(ctx)
manager.wait_for_mon_quorum_size(len(mons))
| 15,102 | 34.874109 | 103 |
py
|
null |
ceph-main/qa/tasks/multibench.py
|
"""
Multibench testing
"""
import contextlib
import logging
import time
import copy
import gevent
from tasks import radosbench
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Run multibench
The config should be as follows:
multibench:
time: <seconds to run total>
segments: <number of concurrent benches>
radosbench: <config for radosbench>
example:
tasks:
- ceph:
- multibench:
clients: [client.0]
time: 360
- interactive:
"""
log.info('Beginning multibench...')
assert isinstance(config, dict), \
"please list clients to run on"
def run_one(num):
"""Run test spawn from gevent"""
start = time.time()
if not config.get('radosbench'):
benchcontext = {}
else:
benchcontext = copy.copy(config.get('radosbench'))
iterations = 0
while time.time() - start < int(config.get('time', 600)):
log.info("Starting iteration %s of segment %s"%(iterations, num))
benchcontext['pool'] = str(num) + "-" + str(iterations)
with radosbench.task(ctx, benchcontext):
time.sleep()
iterations += 1
log.info("Starting %s threads"%(str(config.get('segments', 3)),))
segments = [
gevent.spawn(run_one, i)
for i in range(0, int(config.get('segments', 3)))]
try:
yield
finally:
[i.get() for i in segments]
| 1,511 | 23.387097 | 77 |
py
|
null |
ceph-main/qa/tasks/netem.py
|
"""
Task to run tests with network delay between two remotes using tc and netem.
Reference:https://wiki.linuxfoundation.org/networking/netem.
"""
import logging
import contextlib
from paramiko import SSHException
import socket
import time
import gevent
import argparse
log = logging.getLogger(__name__)
def set_priority(interface):
# create a priority queueing discipline
return ['sudo', 'tc', 'qdisc', 'add', 'dev', interface, 'root', 'handle', '1:', 'prio']
def show_tc(interface):
# shows tc device present
return ['sudo', 'tc', 'qdisc', 'show', 'dev', interface]
def del_tc(interface):
return ['sudo', 'tc', 'qdisc', 'del', 'dev', interface, 'root']
def cmd_prefix(interface):
# prepare command to set delay
cmd1 = ['sudo', 'tc', 'qdisc', 'add', 'dev', interface, 'parent',
'1:1', 'handle', '2:', 'netem', 'delay']
# prepare command to change delay
cmd2 = ['sudo', 'tc', 'qdisc', 'replace', 'dev', interface, 'root', 'netem', 'delay']
# prepare command to apply filter to the matched ip/host
cmd3 = ['sudo', 'tc', 'filter', 'add', 'dev', interface,
'parent', '1:0', 'protocol', 'ip', 'pref', '55',
'handle', '::55', 'u32', 'match', 'ip', 'dst']
return cmd1, cmd2, cmd3
def static_delay(remote, host, interface, delay):
""" Sets a constant delay between two hosts to emulate network delays using tc qdisc and netem"""
set_delay, change_delay, set_ip = cmd_prefix(interface)
ip = socket.gethostbyname(host.hostname)
tc = remote.sh(show_tc(interface))
if tc.strip().find('refcnt') == -1:
# call set_priority() func to create priority queue
# if not already created(indicated by -1)
log.info('Create priority queue')
remote.run(args=set_priority(interface))
# set static delay, with +/- 5ms jitter with normal distribution as default
log.info('Setting delay to %s' % delay)
set_delay.extend(['%s' % delay, '5ms', 'distribution', 'normal'])
remote.run(args=set_delay)
# set delay to a particular remote node via ip
log.info('Delay set on %s' % remote)
set_ip.extend(['%s' % ip, 'flowid', '2:1'])
remote.run(args=set_ip)
else:
# if the device is already created, only change the delay
log.info('Setting delay to %s' % delay)
change_delay.extend(['%s' % delay, '5ms', 'distribution', 'normal'])
remote.run(args=change_delay)
def variable_delay(remote, host, interface, delay_range=[]):
""" Vary delay between two values"""
set_delay, change_delay, set_ip = cmd_prefix(interface)
ip = socket.gethostbyname(host.hostname)
# delay1 has to be lower than delay2
delay1 = delay_range[0]
delay2 = delay_range[1]
tc = remote.sh(show_tc(interface))
if tc.strip().find('refcnt') == -1:
# call set_priority() func to create priority queue
# if not already created(indicated by -1)
remote.run(args=set_priority(interface))
# set variable delay
log.info('Setting varying delay')
set_delay.extend(['%s' % delay1, '%s' % delay2])
remote.run(args=set_delay)
# set delay to a particular remote node via ip
log.info('Delay set on %s' % remote)
set_ip.extend(['%s' % ip, 'flowid', '2:1'])
remote.run(args=set_ip)
else:
# if the device is already created, only change the delay
log.info('Setting varying delay')
change_delay.extend(['%s' % delay1, '%s' % delay2])
remote.run(args=change_delay)
def delete_dev(remote, interface):
""" Delete the qdisc if present"""
log.info('Delete tc')
tc = remote.sh(show_tc(interface))
if tc.strip().find('refcnt') != -1:
remote.run(args=del_tc(interface))
class Toggle:
stop_event = gevent.event.Event()
def __init__(self, ctx, remote, host, interface, interval):
self.ctx = ctx
self.remote = remote
self.host = host
self.interval = interval
self.interface = interface
self.ip = socket.gethostbyname(self.host.hostname)
def packet_drop(self):
""" Drop packets to the remote ip specified"""
_, _, set_ip = cmd_prefix(self.interface)
tc = self.remote.sh(show_tc(self.interface))
if tc.strip().find('refcnt') == -1:
self.remote.run(args=set_priority(self.interface))
# packet drop to specific ip
log.info('Drop all packets to %s' % self.host)
set_ip.extend(['%s' % self.ip, 'action', 'drop'])
self.remote.run(args=set_ip)
def link_toggle(self):
"""
For toggling packet drop and recovery in regular interval.
If interval is 5s, link is up for 5s and link is down for 5s
"""
while not self.stop_event.is_set():
self.stop_event.wait(timeout=self.interval)
# simulate link down
try:
self.packet_drop()
log.info('link down')
except SSHException:
log.debug('Failed to run command')
self.stop_event.wait(timeout=self.interval)
# if qdisc exist,delete it.
try:
delete_dev(self.remote, self.interface)
log.info('link up')
except SSHException:
log.debug('Failed to run command')
def begin(self, gname):
self.thread = gevent.spawn(self.link_toggle)
self.ctx.netem.names[gname] = self.thread
def end(self, gname):
self.stop_event.set()
log.info('gname is {}'.format(self.ctx.netem.names[gname]))
self.ctx.netem.names[gname].get()
def cleanup(self):
"""
Invoked during unwinding if the test fails or exits before executing task 'link_recover'
"""
log.info('Clean up')
self.stop_event.set()
self.thread.get()
@contextlib.contextmanager
def task(ctx, config):
"""
- netem:
clients: [c1.rgw.0]
iface: eno1
dst_client: [c2.rgw.1]
delay: 10ms
- netem:
clients: [c1.rgw.0]
iface: eno1
dst_client: [c2.rgw.1]
delay_range: [10ms, 20ms] # (min, max)
- netem:
clients: [rgw.1, mon.0]
iface: eno1
gname: t1
dst_client: [c2.rgw.1]
link_toggle_interval: 10 # no unit mentioned. By default takes seconds.
- netem:
clients: [rgw.1, mon.0]
iface: eno1
link_recover: [t1, t2]
"""
log.info('config %s' % config)
assert isinstance(config, dict), \
"please list clients to run on"
if not hasattr(ctx, 'netem'):
ctx.netem = argparse.Namespace()
ctx.netem.names = {}
if config.get('dst_client') is not None:
dst = config.get('dst_client')
(host,) = ctx.cluster.only(dst).remotes.keys()
for role in config.get('clients', None):
(remote,) = ctx.cluster.only(role).remotes.keys()
ctx.netem.remote = remote
if config.get('delay', False):
static_delay(remote, host, config.get('iface'), config.get('delay'))
if config.get('delay_range', False):
variable_delay(remote, host, config.get('iface'), config.get('delay_range'))
if config.get('link_toggle_interval', False):
log.info('Toggling link for %s' % config.get('link_toggle_interval'))
global toggle
toggle = Toggle(ctx, remote, host, config.get('iface'), config.get('link_toggle_interval'))
toggle.begin(config.get('gname'))
if config.get('link_recover', False):
log.info('Recovering link')
for gname in config.get('link_recover'):
toggle.end(gname)
log.info('sleeping')
time.sleep(config.get('link_toggle_interval'))
delete_dev(ctx.netem.remote, config.get('iface'))
del ctx.netem.names[gname]
try:
yield
finally:
if ctx.netem.names:
toggle.cleanup()
for role in config.get('clients'):
(remote,) = ctx.cluster.only(role).remotes.keys()
delete_dev(remote, config.get('iface'))
| 8,338 | 30 | 103 |
py
|
null |
ceph-main/qa/tasks/netsplit.py
|
"""
Functions to netsplit test machines.
At present, you must specify monitors to disconnect, and it
drops those IP pairs. This means OSDs etc on the hosts which use
the same IP will also be blocked! If you are using multiple IPs on the
same host within the cluster, daemons on those other IPs will get
through.
"""
import logging
import re
log = logging.getLogger(__name__)
def get_ip_and_ports(ctx, daemon):
assert daemon.startswith('mon.')
addr = ctx.ceph['ceph'].mons['{a}'.format(a=daemon)]
ips = re.findall("[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+[:[0-9]*]*", addr)
assert len(ips) > 0
plain_ip = re.match("[0-9\.]*", ips[0]).group()
assert plain_ip is not None
port_list = []
for ip in ips:
ip_str, port_str = re.match("([0-9\.]*)([:[0-9]*]*)", ip).groups()
assert ip_str == plain_ip
if len(port_str) > 0:
port_list.append(port_str)
return (plain_ip, port_list)
def disconnect(ctx, config):
assert len(config) == 2 # we can only disconnect pairs right now
# and we can only disconnect mons right now
assert config[0].startswith('mon.')
assert config[1].startswith('mon.')
(ip1, _) = get_ip_and_ports(ctx, config[0])
(ip2, _) = get_ip_and_ports(ctx, config[1])
(host1,) = ctx.cluster.only(config[0]).remotes.keys()
(host2,) = ctx.cluster.only(config[1]).remotes.keys()
assert host1 is not None
assert host2 is not None
host1.run(
args = ["sudo", "iptables", "-A", "INPUT", "-p", "tcp", "-s",
ip2, "-j", "DROP"]
)
host2.run(
args = ["sudo", "iptables", "-A", "INPUT", "-p", "tcp", "-s",
ip1, "-j", "DROP"]
)
def reconnect(ctx, config):
assert len(config) == 2 # we can only disconnect pairs right now
# and we can only disconnect mons right now
assert config[0].startswith('mon.')
assert config[1].startswith('mon.')
(ip1, _) = get_ip_and_ports(ctx, config[0])
(ip2, _) = get_ip_and_ports(ctx, config[1])
(host1,) = ctx.cluster.only(config[0]).remotes.keys()
(host2,) = ctx.cluster.only(config[1]).remotes.keys()
assert host1 is not None
assert host2 is not None
host1.run(
args = ["sudo", "iptables", "-D", "INPUT", "-p", "tcp", "-s",
ip2, "-j", "DROP"]
)
host2.run(
args = ["sudo", "iptables", "-D", "INPUT", "-p", "tcp", "-s",
ip1, "-j", "DROP"]
)
| 2,437 | 31.945946 | 74 |
py
|
null |
ceph-main/qa/tasks/notification_tests.py
|
"""
Run a set of bucket notification tests on rgw.
"""
from io import BytesIO
from configobj import ConfigObj
import base64
import contextlib
import logging
import os
import random
import string
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def download(ctx, config):
assert isinstance(config, dict)
log.info('Downloading bucket-notifications-tests...')
testdir = teuthology.get_testdir(ctx)
branch = ctx.config.get('suite_branch')
repo = ctx.config.get('suite_repo')
log.info('Using branch %s from %s for bucket notifications tests', branch, repo)
for (client, client_config) in config.items():
ctx.cluster.only(client).run(
args=['git', 'clone', '-b', branch, repo, '{tdir}/ceph'.format(tdir=testdir)],
)
sha1 = client_config.get('sha1')
if sha1 is not None:
ctx.cluster.only(client).run(
args=[
'cd', '{tdir}/ceph'.format(tdir=testdir),
run.Raw('&&'),
'git', 'reset', '--hard', sha1,
],
)
try:
yield
finally:
log.info('Removing bucket-notifications-tests...')
testdir = teuthology.get_testdir(ctx)
for client in config:
ctx.cluster.only(client).run(
args=[
'rm',
'-rf',
'{tdir}/ceph'.format(tdir=testdir),
],
)
def _config_user(bntests_conf, section, user):
"""
Configure users for this section by stashing away keys, ids, and
email addresses.
"""
bntests_conf[section].setdefault('user_id', user)
bntests_conf[section].setdefault('email', '{user}[email protected]'.format(user=user))
bntests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
bntests_conf[section].setdefault('access_key',
''.join(random.choice(string.ascii_uppercase) for i in range(20)))
bntests_conf[section].setdefault('secret_key',
base64.b64encode(os.urandom(40)).decode())
@contextlib.contextmanager
def pre_process(ctx, config):
"""
This function creates a directory which is required to run some AMQP tests.
"""
assert isinstance(config, dict)
log.info('Pre-processing...')
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
test_dir=teuthology.get_testdir(ctx)
ctx.cluster.only(client).run(
args=[
'mkdir', '-p', '/home/ubuntu/.aws/models/s3/2006-03-01/',
],
)
ctx.cluster.only(client).run(
args=[
'cd', '/home/ubuntu/.aws/models/s3/2006-03-01/', run.Raw('&&'), 'cp', '{tdir}/ceph/examples/rgw/boto3/service-2.sdk-extras.json'.format(tdir=test_dir), 'service-2.sdk-extras.json'
],
)
try:
yield
finally:
log.info('Pre-processing completed...')
test_dir = teuthology.get_testdir(ctx)
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
ctx.cluster.only(client).run(
args=[
'rm', '-rf', '/home/ubuntu/.aws/models/s3/2006-03-01/service-2.sdk-extras.json',
],
)
ctx.cluster.only(client).run(
args=[
'cd', '/home/ubuntu/', run.Raw('&&'), 'rmdir', '-p', '.aws/models/s3/2006-03-01/',
],
)
@contextlib.contextmanager
def create_users(ctx, config):
"""
Create a main and an alternate s3 user.
"""
assert isinstance(config, dict)
log.info('Creating rgw user...')
testdir = teuthology.get_testdir(ctx)
users = {'s3 main': 'foo'}
for client in config['clients']:
bntests_conf = config['bntests_conf'][client]
for section, user in users.items():
_config_user(bntests_conf, section, '{user}.{client}'.format(user=user, client=client))
log.debug('Creating user {user} on {host}'.format(user=bntests_conf[section]['user_id'], host=client))
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'user', 'create',
'--uid', bntests_conf[section]['user_id'],
'--display-name', bntests_conf[section]['display_name'],
'--access-key', bntests_conf[section]['access_key'],
'--secret', bntests_conf[section]['secret_key'],
'--cluster', cluster_name,
],
)
try:
yield
finally:
for client in config['clients']:
for user in users.values():
uid = '{user}.{client}'.format(user=user, client=client)
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'user', 'rm',
'--uid', uid,
'--purge-data',
'--cluster', cluster_name,
],
)
@contextlib.contextmanager
def configure(ctx, config):
assert isinstance(config, dict)
log.info('Configuring bucket-notifications-tests...')
testdir = teuthology.get_testdir(ctx)
for client, properties in config['clients'].items():
(remote,) = ctx.cluster.only(client).remotes.keys()
bntests_conf = config['bntests_conf'][client]
conf_fp = BytesIO()
bntests_conf.write(conf_fp)
remote.write_file(
path='{tdir}/ceph/src/test/rgw/bucket_notification/bn-tests.{client}.conf'.format(tdir=testdir, client=client),
data=conf_fp.getvalue(),
)
remote.run(
args=[
'cd',
'{tdir}/ceph/src/test/rgw/bucket_notification'.format(tdir=testdir),
run.Raw('&&'),
'./bootstrap',
],
)
try:
yield
finally:
log.info('Removing bn-tests.conf file...')
testdir = teuthology.get_testdir(ctx)
for client, properties in config['clients'].items():
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=['rm', '-f',
'{tdir}/ceph/src/test/rgw/bucket_notification/bn-tests.{client}.conf'.format(tdir=testdir,client=client),
],
)
@contextlib.contextmanager
def run_tests(ctx, config):
"""
Run the bucket notifications tests after everything is set up.
:param ctx: Context passed to task
:param config: specific configuration information
"""
assert isinstance(config, dict)
log.info('Running bucket-notifications-tests...')
testdir = teuthology.get_testdir(ctx)
for client, client_config in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
attr = ["!kafka_test", "!amqp_test", "!amqp_ssl_test", "!kafka_security_test", "!modification_required", "!manual_test"]
if 'extra_attr' in client_config:
attr = client_config.get('extra_attr')
args = [
'BNTESTS_CONF={tdir}/ceph/src/test/rgw/bucket_notification/bn-tests.{client}.conf'.format(tdir=testdir, client=client),
'{tdir}/ceph/src/test/rgw/bucket_notification/virtualenv/bin/python'.format(tdir=testdir),
'-m', 'nose',
'-s',
'{tdir}/ceph/src/test/rgw/bucket_notification/test_bn.py'.format(tdir=testdir),
'-v',
'-a', ','.join(attr),
]
remote.run(
args=args,
label="bucket notification tests against different endpoints"
)
yield
@contextlib.contextmanager
def task(ctx,config):
"""
To run bucket notification tests under Kafka endpoint the prerequisite is to run the kafka server. Also you need to pass the
'extra_attr' to the notification tests. Following is the way how to run kafka and finally bucket notification tests::
tasks:
- kafka:
client.0:
kafka_version: 2.6.0
- notification_tests:
client.0:
extra_attr: ["kafka_test"]
To run bucket notification tests under AMQP endpoint the prerequisite is to run the rabbitmq server. Also you need to pass the
'extra_attr' to the notification tests. Following is the way how to run rabbitmq and finally bucket notification tests::
tasks:
- rabbitmq:
client.0:
- notification_tests:
client.0:
extra_attr: ["amqp_test"]
If you want to run the tests against your changes pushed to your remote repo you can provide 'suite_branch' and 'suite_repo'
parameters in your teuthology-suite command. Example command for this is as follows::
teuthology-suite --ceph-repo https://github.com/ceph/ceph-ci.git -s rgw:notifications --ceph your_ceph_branch_name -m smithi --suite-repo https://github.com/your_name/ceph.git --suite-branch your_branch_name
"""
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task kafka only supports a list or dictionary for configuration"
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
clients=config.keys()
log.debug('Notifications config is %s', config)
bntests_conf = {}
for client in clients:
endpoint = ctx.rgw.role_endpoints.get(client)
assert endpoint, 'bntests: no rgw endpoint for {}'.format(client)
bntests_conf[client] = ConfigObj(
indent_type='',
infile={
'DEFAULT':
{
'port':endpoint.port,
'host':endpoint.dns_name,
},
's3 main':{}
}
)
with contextutil.nested(
lambda: download(ctx=ctx, config=config),
lambda: pre_process(ctx=ctx, config=config),
lambda: create_users(ctx=ctx, config=dict(
clients=clients,
bntests_conf=bntests_conf,
)),
lambda: configure(ctx=ctx, config=dict(
clients=config,
bntests_conf=bntests_conf,
)),
lambda: run_tests(ctx=ctx, config=config),
):
pass
yield
| 11,466 | 34.722741 | 211 |
py
|
null |
ceph-main/qa/tasks/nvme_loop.py
|
import contextlib
import logging
from io import StringIO
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
log.info('Setting up nvme_loop on scratch devices...')
host = 'hostnqn'
port = '1'
devs_by_remote = {}
old_scratch_by_remote = {}
for remote, roles in ctx.cluster.remotes.items():
if remote.is_container:
continue
devs = teuthology.get_scratch_devices(remote)
devs_by_remote[remote] = devs
base = '/sys/kernel/config/nvmet'
remote.run(
args=[
'grep', '^nvme_loop', '/proc/modules', run.Raw('||'),
'sudo', 'modprobe', 'nvme_loop',
run.Raw('&&'),
'sudo', 'mkdir', '-p', f'{base}/hosts/{host}',
run.Raw('&&'),
'sudo', 'mkdir', '-p', f'{base}/ports/{port}',
run.Raw('&&'),
'echo', 'loop', run.Raw('|'),
'sudo', 'tee', f'{base}/ports/{port}/addr_trtype',
]
)
for dev in devs:
short = dev.split('/')[-1]
log.info(f'Connecting nvme_loop {remote.shortname}:{dev}...')
remote.run(
args=[
'sudo', 'mkdir', '-p', f'{base}/subsystems/{short}',
run.Raw('&&'),
'echo', '1', run.Raw('|'),
'sudo', 'tee', f'{base}/subsystems/{short}/attr_allow_any_host',
run.Raw('&&'),
'sudo', 'mkdir', '-p', f'{base}/subsystems/{short}/namespaces/1',
run.Raw('&&'),
'echo', '-n', dev, run.Raw('|'),
'sudo', 'tee', f'{base}/subsystems/{short}/namespaces/1/device_path',
run.Raw('&&'),
'echo', '1', run.Raw('|'),
'sudo', 'tee', f'{base}/subsystems/{short}/namespaces/1/enable',
run.Raw('&&'),
'sudo', 'ln', '-s', f'{base}/subsystems/{short}',
f'{base}/ports/{port}/subsystems/{short}',
run.Raw('&&'),
'sudo', 'nvme', 'connect', '-t', 'loop', '-n', short, '-q', host,
]
)
# identify nvme_loops devices
old_scratch_by_remote[remote] = remote.read_file('/scratch_devs')
with contextutil.safe_while(sleep=1, tries=15) as proceed:
while proceed():
p = remote.run(args=['sudo', 'nvme', 'list'], stdout=StringIO())
new_devs = []
for line in p.stdout.getvalue().splitlines():
dev, _, vendor = line.split()[0:3]
if dev.startswith('/dev/') and vendor == 'Linux':
new_devs.append(dev)
log.info(f'new_devs {new_devs}')
assert len(new_devs) <= len(devs)
if len(new_devs) == len(devs):
break
remote.write_file(
path='/scratch_devs',
data='\n'.join(new_devs) + '\n',
sudo=True
)
try:
yield
finally:
for remote, devs in devs_by_remote.items():
if remote.is_container:
continue
for dev in devs:
short = dev.split('/')[-1]
log.info(f'Disconnecting nvme_loop {remote.shortname}:{dev}...')
remote.run(
args=[
'sudo', 'nvme', 'disconnect', '-n', short
],
check_status=False,
)
remote.write_file(
path='/scratch_devs',
data=old_scratch_by_remote[remote],
sudo=True
)
| 3,908 | 35.53271 | 89 |
py
|
null |
ceph-main/qa/tasks/object_source_down.py
|
"""
Test Object locations going down
"""
import logging
import time
from teuthology import misc as teuthology
from tasks import ceph_manager
from tasks.util.rados import rados
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Test handling of object location going down
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'lost_unfound task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
manager.wait_for_clean()
# something that is always there
dummyfile = '/etc/fstab'
# take 0, 1 out
manager.mark_out_osd(0)
manager.mark_out_osd(1)
manager.wait_for_clean()
# delay recovery, and make the pg log very long (to prevent backfill)
manager.raw_cluster_cmd(
'tell', 'osd.0',
'injectargs',
'--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
)
# delay recovery, and make the pg log very long (to prevent backfill)
manager.raw_cluster_cmd(
'tell', 'osd.1',
'injectargs',
'--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
)
# delay recovery, and make the pg log very long (to prevent backfill)
manager.raw_cluster_cmd(
'tell', 'osd.2',
'injectargs',
'--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
)
# delay recovery, and make the pg log very long (to prevent backfill)
manager.raw_cluster_cmd(
'tell', 'osd.3',
'injectargs',
'--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
)
# kludge to make sure they get a map
rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
# create old objects
for f in range(1, 10):
rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
manager.mark_out_osd(3)
manager.wait_till_active()
manager.mark_in_osd(0)
manager.wait_till_active()
manager.flush_pg_stats([2, 0])
manager.mark_out_osd(2)
manager.wait_till_active()
# bring up 1
manager.mark_in_osd(1)
manager.wait_till_active()
manager.flush_pg_stats([0, 1])
log.info("Getting unfound objects")
unfound = manager.get_num_unfound_objects()
assert not unfound
manager.kill_osd(2)
manager.mark_down_osd(2)
manager.kill_osd(3)
manager.mark_down_osd(3)
manager.flush_pg_stats([0, 1])
log.info("Getting unfound objects")
unfound = manager.get_num_unfound_objects()
assert unfound
| 2,900 | 27.441176 | 81 |
py
|
null |
ceph-main/qa/tasks/omapbench.py
|
"""
Run omapbench executable within teuthology
"""
import contextlib
import logging
from teuthology.orchestra import run
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Run omapbench
The config should be as follows::
omapbench:
clients: [client list]
threads: <threads at once>
objects: <number of objects to write>
entries: <number of entries per object map>
keysize: <number of characters per object map key>
valsize: <number of characters per object map val>
increment: <interval to show in histogram (in ms)>
omaptype: <how the omaps should be generated>
example::
tasks:
- ceph:
- omapbench:
clients: [client.0]
threads: 30
objects: 1000
entries: 10
keysize: 10
valsize: 100
increment: 100
omaptype: uniform
- interactive:
"""
log.info('Beginning omapbench...')
assert isinstance(config, dict), \
"please list clients to run on"
omapbench = {}
testdir = teuthology.get_testdir(ctx)
print(str(config.get('increment',-1)))
for role in config.get('clients', ['client.0']):
assert isinstance(role, str)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.keys()
proc = remote.run(
args=[
"/bin/sh", "-c",
" ".join(['adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage',
'omapbench',
'--name', role[len(PREFIX):],
'-t', str(config.get('threads', 30)),
'-o', str(config.get('objects', 1000)),
'--entries', str(config.get('entries',10)),
'--keysize', str(config.get('keysize',10)),
'--valsize', str(config.get('valsize',1000)),
'--inc', str(config.get('increment',10)),
'--omaptype', str(config.get('omaptype','uniform'))
]).format(tdir=testdir),
],
logger=log.getChild('omapbench.{id}'.format(id=id_)),
stdin=run.PIPE,
wait=False
)
omapbench[id_] = proc
try:
yield
finally:
log.info('joining omapbench')
run.wait(omapbench.values())
| 2,603 | 30 | 77 |
py
|
null |
ceph-main/qa/tasks/openssl_keys.py
|
"""
Generates and installs a signed SSL certificate.
"""
import argparse
import logging
import os
from teuthology import misc
from teuthology.exceptions import ConfigError
from teuthology.orchestra import run
from teuthology.task import Task
log = logging.getLogger(__name__)
class OpenSSLKeys(Task):
name = 'openssl_keys'
"""
Generates and installs a signed SSL certificate.
To create a self-signed certificate:
- openssl_keys:
# certificate name
root: # results in root.key and root.crt
# [required] make the private key and certificate available in this client's test directory
client: client.0
# common name, defaults to `hostname`. chained certificates must not share a common name
cn: teuthology
# private key type for -newkey, defaults to rsa:2048
key-type: rsa:4096
# install the certificate as trusted on these clients:
install: [client.0, client.1]
To create a certificate signed by a ca certificate:
- openssl_keys:
root: (self-signed certificate as above)
...
cert-for-client1:
client: client.1
# use another ssl certificate (by 'name') as the certificate authority
ca: root # --CAkey=root.key -CA=root.crt
# embed the private key in the certificate file
embed-key: true
"""
def __init__(self, ctx, config):
super(OpenSSLKeys, self).__init__(ctx, config)
self.certs = []
self.installed = []
def setup(self):
# global dictionary allows other tasks to look up certificate paths
if not hasattr(self.ctx, 'ssl_certificates'):
self.ctx.ssl_certificates = {}
# use testdir/ca as a working directory
self.cadir = '/'.join((misc.get_testdir(self.ctx), 'ca'))
# make sure self-signed certs get added first, they don't have 'ca' field
configs = sorted(self.config.items(), key=lambda x: 'ca' in x[1])
for name, config in configs:
# names must be unique to avoid clobbering each others files
if name in self.ctx.ssl_certificates:
raise ConfigError('ssl: duplicate certificate name {}'.format(name))
# create the key and certificate
cert = self.create_cert(name, config)
self.ctx.ssl_certificates[name] = cert
self.certs.append(cert)
# install as trusted on the requested clients
for client in config.get('install', []):
installed = self.install_cert(cert, client)
self.installed.append(installed)
def teardown(self):
"""
Clean up any created/installed certificate files.
"""
for cert in self.certs:
self.remove_cert(cert)
for installed in self.installed:
self.uninstall_cert(installed)
def create_cert(self, name, config):
"""
Create a certificate with the given configuration.
"""
cert = argparse.Namespace()
cert.name = name
cert.key_type = config.get('key-type', 'rsa:2048')
cert.client = config.get('client', None)
if not cert.client:
raise ConfigError('ssl: missing required field "client"')
(cert.remote,) = self.ctx.cluster.only(cert.client).remotes.keys()
cert.remote.run(args=['mkdir', '-p', self.cadir])
cert.key = f'{self.cadir}/{cert.name}.key'
cert.certificate = f'{self.cadir}/{cert.name}.crt'
san_ext = []
add_san_default = False
cn = config.get('cn', '')
if cn == '':
cn = cert.remote.hostname
add_san_default = True
if config.get('add-san', add_san_default):
ext = f'{self.cadir}/{cert.name}.ext'
san_ext = ['-extfile', ext]
# provide the common name in -subj to avoid the openssl command prompts
subject = f'/CN={cn}'
# if a ca certificate is provided, use it to sign the new certificate
ca = config.get('ca', None)
if ca:
# the ca certificate must have been created by a prior ssl task
ca_cert = self.ctx.ssl_certificates.get(ca, None)
if not ca_cert:
raise ConfigError(f'ssl: ca {ca} not found for certificate {cert.name}')
csr = f'{self.cadir}/{cert.name}.csr'
srl = f'{self.cadir}/{ca_cert.name}.srl'
remove_files = ['rm', '-f', csr, srl]
# these commands are run on the ca certificate's client because
# they need access to its private key and cert
# generate a private key and signing request
ca_cert.remote.run(args=['openssl', 'req', '-nodes',
'-newkey', cert.key_type, '-keyout', cert.key,
'-out', csr, '-subj', subject])
if san_ext:
remove_files.append(ext)
ca_cert.remote.write_file(path=ext,
data='subjectAltName = DNS:{},IP:{}'.format(
cn,
config.get('ip', cert.remote.ip_address)))
# create the signed certificate
ca_cert.remote.run(args=['openssl', 'x509', '-req', '-in', csr,
'-CA', ca_cert.certificate, '-CAkey', ca_cert.key, '-CAcreateserial',
'-out', cert.certificate, '-days', '365', '-sha256'] + san_ext)
ca_cert.remote.run(args=remove_files) # clean up the signing request and serial
# verify the new certificate against its ca cert
ca_cert.remote.run(args=['openssl', 'verify',
'-CAfile', ca_cert.certificate, cert.certificate])
if cert.remote != ca_cert.remote:
# copy to remote client
self.remote_copy_file(ca_cert.remote, cert.certificate, cert.remote, cert.certificate)
self.remote_copy_file(ca_cert.remote, cert.key, cert.remote, cert.key)
# clean up the local copies
ca_cert.remote.run(args=['rm', cert.certificate, cert.key])
# verify the remote certificate (requires ca to be in its trusted ca certificate store)
cert.remote.run(args=['openssl', 'verify', cert.certificate])
else:
# otherwise, generate a private key and use it to self-sign a new certificate
cert.remote.run(args=['openssl', 'req', '-x509', '-nodes',
'-newkey', cert.key_type, '-keyout', cert.key,
'-days', '365', '-out', cert.certificate, '-subj', subject])
if config.get('embed-key', False):
# append the private key to the certificate file
cert.remote.run(args=['cat', cert.key, run.Raw('>>'), cert.certificate])
return cert
def remove_cert(self, cert):
"""
Delete all of the files associated with the given certificate.
"""
# remove the private key and certificate
cert.remote.run(args=['rm', '-f', cert.certificate, cert.key])
# remove ca subdirectory if it's empty
cert.remote.run(args=['rmdir', '--ignore-fail-on-non-empty', self.cadir])
def install_cert(self, cert, client):
"""
Install as a trusted ca certificate on the given client.
"""
(remote,) = self.ctx.cluster.only(client).remotes.keys()
installed = argparse.Namespace()
installed.remote = remote
if remote.os.package_type == 'deb':
installed.path = '/usr/local/share/ca-certificates/{}.crt'.format(cert.name)
installed.command = ['sudo', 'update-ca-certificates']
else:
installed.path = '/usr/share/pki/ca-trust-source/anchors/{}.crt'.format(cert.name)
installed.command = ['sudo', 'update-ca-trust']
cp_or_mv = 'cp'
if remote != cert.remote:
# copy into remote cadir (with mkdir if necessary)
remote.run(args=['mkdir', '-p', self.cadir])
self.remote_copy_file(cert.remote, cert.certificate, remote, cert.certificate)
cp_or_mv = 'mv' # move this remote copy into the certificate store
# install into certificate store as root
remote.run(args=['sudo', cp_or_mv, cert.certificate, installed.path])
remote.run(args=installed.command)
return installed
def uninstall_cert(self, installed):
"""
Uninstall a certificate from the trusted certificate store.
"""
installed.remote.run(args=['sudo', 'rm', installed.path])
installed.remote.run(args=installed.command)
def remote_copy_file(self, from_remote, from_path, to_remote, to_path):
"""
Copies a file from one remote to another.
The remotes don't have public-key auth for 'scp' or misc.copy_file(),
so this copies through an intermediate local tmp file.
"""
log.info('copying from {}:{} to {}:{}...'.format(from_remote, from_path, to_remote, to_path))
local_path = from_remote.get_file(from_path)
try:
to_remote.put_file(local_path, to_path)
finally:
os.remove(local_path)
task = OpenSSLKeys
| 9,344 | 36.987805 | 105 |
py
|
null |
ceph-main/qa/tasks/osd_backfill.py
|
"""
Osd backfill test
"""
import logging
import time
from tasks import ceph_manager
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def rados_start(ctx, remote, cmd):
"""
Run a remote rados command (currently used to only write data)
"""
log.info("rados %s" % ' '.join(cmd))
testdir = teuthology.get_testdir(ctx)
pre = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rados',
];
pre.extend(cmd)
proc = remote.run(
args=pre,
wait=False,
)
return proc
def task(ctx, config):
"""
Test backfill
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'thrashosds task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
assert num_osds == 3
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
manager.flush_pg_stats([0, 1, 2])
manager.wait_for_clean()
# write some data
p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096',
'--no-cleanup'])
err = p.wait()
log.info('err is %d' % err)
# mark osd.0 out to trigger a rebalance/backfill
manager.mark_out_osd(0)
# also mark it down to it won't be included in pg_temps
manager.kill_osd(0)
manager.mark_down_osd(0)
# wait for everything to peer and be happy...
manager.flush_pg_stats([1, 2])
manager.wait_for_recovery()
# write some new data
p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '30', 'write', '-b', '4096',
'--no-cleanup'])
time.sleep(15)
# blackhole + restart osd.1
# this triggers a divergent backfill target
manager.blackhole_kill_osd(1)
time.sleep(2)
manager.revive_osd(1)
# wait for our writes to complete + succeed
err = p.wait()
log.info('err is %d' % err)
# wait for osd.1 and osd.2 to be up
manager.wait_till_osd_is_up(1)
manager.wait_till_osd_is_up(2)
# cluster must recover
manager.flush_pg_stats([1, 2])
manager.wait_for_recovery()
# re-add osd.0
manager.revive_osd(0)
manager.flush_pg_stats([1, 2])
manager.wait_for_clean()
| 2,599 | 23.761905 | 81 |
py
|
null |
ceph-main/qa/tasks/osd_failsafe_enospc.py
|
"""
Handle osdfailsafe configuration settings (nearfull ratio and full ratio)
"""
from io import StringIO
import logging
import time
from teuthology.orchestra import run
from tasks.util.rados import rados
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Test handling of osd_failsafe_nearfull_ratio and osd_failsafe_full_ratio
configuration settings
In order for test to pass must use log-ignorelist as follows
tasks:
- chef:
- install:
- ceph:
log-ignorelist: ['OSD near full', 'OSD full dropping all updates']
- osd_failsafe_enospc:
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'osd_failsafe_enospc task only accepts a dict for configuration'
# Give 2 seconds for injectargs + osd_op_complaint_time (30) + 2 * osd_heartbeat_interval (6) + 6 padding
sleep_time = 50
# something that is always there
dummyfile = '/etc/fstab'
dummyfile2 = '/etc/resolv.conf'
manager = ctx.managers['ceph']
# create 1 pg pool with 1 rep which can only be on osd.0
osds = manager.get_osd_dump()
for osd in osds:
if osd['osd'] != 0:
manager.mark_out_osd(osd['osd'])
log.info('creating pool foo')
manager.create_pool("foo")
manager.raw_cluster_cmd('osd', 'pool', 'set', 'foo', 'size', '1')
# State NONE -> NEAR
log.info('1. Verify warning messages when exceeding nearfull_ratio')
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
proc = mon.run(
args=[
'sudo',
'daemon-helper',
'kill',
'ceph', '-w'
],
stdin=run.PIPE,
stdout=StringIO(),
wait=False,
)
manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .00001')
time.sleep(sleep_time)
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
proc.wait()
lines = proc.stdout.getvalue().split('\n')
count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
assert count == 2, 'Incorrect number of warning messages expected 2 got %d' % count
count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count
# State NEAR -> FULL
log.info('2. Verify error messages when exceeding full_ratio')
proc = mon.run(
args=[
'sudo',
'daemon-helper',
'kill',
'ceph', '-w'
],
stdin=run.PIPE,
stdout=StringIO(),
wait=False,
)
manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001')
time.sleep(sleep_time)
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
proc.wait()
lines = proc.stdout.getvalue().split('\n')
count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count
log.info('3. Verify write failure when exceeding full_ratio')
# Write data should fail
ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile1', dummyfile])
assert ret != 0, 'Expected write failure but it succeeded with exit status 0'
# Put back default
manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97')
time.sleep(10)
# State FULL -> NEAR
log.info('4. Verify write success when NOT exceeding full_ratio')
# Write should succeed
ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile2', dummyfile2])
assert ret == 0, 'Expected write to succeed, but got exit status %d' % ret
log.info('5. Verify warning messages again when exceeding nearfull_ratio')
proc = mon.run(
args=[
'sudo',
'daemon-helper',
'kill',
'ceph', '-w'
],
stdin=run.PIPE,
stdout=StringIO(),
wait=False,
)
time.sleep(sleep_time)
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
proc.wait()
lines = proc.stdout.getvalue().split('\n')
count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
assert count == 1 or count == 2, 'Incorrect number of warning messages expected 1 or 2 got %d' % count
count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count
manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .90')
time.sleep(10)
# State NONE -> FULL
log.info('6. Verify error messages again when exceeding full_ratio')
proc = mon.run(
args=[
'sudo',
'daemon-helper',
'kill',
'ceph', '-w'
],
stdin=run.PIPE,
stdout=StringIO(),
wait=False,
)
manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001')
time.sleep(sleep_time)
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
proc.wait()
lines = proc.stdout.getvalue().split('\n')
count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count
count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count
# State FULL -> NONE
log.info('7. Verify no messages settings back to default')
manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97')
time.sleep(10)
proc = mon.run(
args=[
'sudo',
'daemon-helper',
'kill',
'ceph', '-w'
],
stdin=run.PIPE,
stdout=StringIO(),
wait=False,
)
time.sleep(sleep_time)
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
proc.wait()
lines = proc.stdout.getvalue().split('\n')
count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count
count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count
log.info('Test Passed')
# Bring all OSDs back in
manager.remove_pool("foo")
for osd in osds:
if osd['osd'] != 0:
manager.mark_in_osd(osd['osd'])
| 7,082 | 31.342466 | 109 |
py
|
null |
ceph-main/qa/tasks/osd_max_pg_per_osd.py
|
import logging
import random
log = logging.getLogger(__name__)
def pg_num_in_all_states(pgs, *states):
return sum(1 for state in pgs.values()
if all(s in state for s in states))
def pg_num_in_any_state(pgs, *states):
return sum(1 for state in pgs.values()
if any(s in state for s in states))
def test_create_from_mon(ctx, config):
"""
osd should stop creating new pools if the number of pg it servers
exceeds the max-pg-per-osd setting, and it should resume the previously
suspended pg creations once the its pg number drops down below the setting
How it works::
1. set the hard limit of pg-per-osd to "2"
2. create pool.a with pg_num=2
# all pgs should be active+clean
2. create pool.b with pg_num=2
# new pgs belonging to this pool should be unknown (the primary osd
reaches the limit) or creating (replica osd reaches the limit)
3. remove pool.a
4. all pg belonging to pool.b should be active+clean
"""
pg_num = config.get('pg_num', 2)
manager = ctx.managers['ceph']
log.info('1. creating pool.a')
pool_a = manager.create_pool_with_unique_name(pg_num)
pg_states = manager.wait_till_pg_convergence(300)
pg_created = pg_num_in_all_states(pg_states, 'active', 'clean')
assert pg_created == pg_num
log.info('2. creating pool.b')
pool_b = manager.create_pool_with_unique_name(pg_num)
pg_states = manager.wait_till_pg_convergence(300)
pg_created = pg_num_in_all_states(pg_states, 'active', 'clean')
assert pg_created == pg_num
pg_pending = pg_num_in_any_state(pg_states, 'unknown', 'creating')
assert pg_pending == pg_num
log.info('3. removing pool.a')
manager.remove_pool(pool_a)
pg_states = manager.wait_till_pg_convergence(300)
assert len(pg_states) == pg_num
pg_created = pg_num_in_all_states(pg_states, 'active', 'clean')
assert pg_created == pg_num
# cleanup
manager.remove_pool(pool_b)
def test_create_from_peer(ctx, config):
"""
osd should stop creating new pools if the number of pg it servers
exceeds the max-pg-per-osd setting, and it should resume the previously
suspended pg creations once the its pg number drops down below the setting
How it works::
0. create 4 OSDs.
1. create pool.a with pg_num=1, size=2
pg will be mapped to osd.0, and osd.1, and it should be active+clean
2. create pool.b with pg_num=1, size=2.
if the pgs stuck in creating, delete the pool since the pool and try
again, eventually we'll get the pool to land on the other 2 osds that
aren't occupied by pool.a. (this will also verify that pgs for deleted
pools get cleaned out of the creating wait list.)
3. mark an osd out. verify that some pgs get stuck stale or peering.
4. delete a pool, verify pgs go active.
"""
pg_num = config.get('pg_num', 1)
from_primary = config.get('from_primary', True)
manager = ctx.managers['ceph']
log.info('1. creating pool.a')
pool_a = manager.create_pool_with_unique_name(pg_num)
pg_states = manager.wait_till_pg_convergence(300)
pg_created = pg_num_in_all_states(pg_states, 'active', 'clean')
assert pg_created == pg_num
log.info('2. creating pool.b')
while True:
pool_b = manager.create_pool_with_unique_name(pg_num)
pg_states = manager.wait_till_pg_convergence(300)
pg_created = pg_num_in_all_states(pg_states, 'active', 'clean')
assert pg_created >= pg_num
pg_pending = pg_num_in_any_state(pg_states, 'unknown', 'creating')
assert pg_pending == pg_num * 2 - pg_created
if pg_created == pg_num * 2:
break
manager.remove_pool(pool_b)
log.info('3. mark an osd out')
pg_stats = manager.get_pg_stats()
pg = random.choice(pg_stats)
if from_primary:
victim = pg['acting'][-1]
else:
victim = pg['acting'][0]
manager.mark_out_osd(victim)
pg_states = manager.wait_till_pg_convergence(300)
pg_stuck = pg_num_in_any_state(pg_states, 'activating', 'stale', 'peering')
assert pg_stuck > 0
log.info('4. removing pool.b')
manager.remove_pool(pool_b)
manager.wait_for_clean(30)
# cleanup
manager.remove_pool(pool_a)
def task(ctx, config):
assert isinstance(config, dict), \
'osd_max_pg_per_osd task only accepts a dict for config'
if config.get('test_create_from_mon', True):
test_create_from_mon(ctx, config)
else:
test_create_from_peer(ctx, config)
| 4,571 | 35 | 79 |
py
|
null |
ceph-main/qa/tasks/osd_recovery.py
|
"""
osd recovery
"""
import logging
import time
from tasks import ceph_manager
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def rados_start(testdir, remote, cmd):
"""
Run a remote rados command (currently used to only write data)
"""
log.info("rados %s" % ' '.join(cmd))
pre = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rados',
];
pre.extend(cmd)
proc = remote.run(
args=pre,
wait=False,
)
return proc
def task(ctx, config):
"""
Test (non-backfill) recovery
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
testdir = teuthology.get_testdir(ctx)
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
assert num_osds == 3
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
manager.flush_pg_stats([0, 1, 2])
manager.wait_for_clean()
# test some osdmap flags
manager.raw_cluster_cmd('osd', 'set', 'noin')
manager.raw_cluster_cmd('osd', 'set', 'noout')
manager.raw_cluster_cmd('osd', 'set', 'noup')
manager.raw_cluster_cmd('osd', 'set', 'nodown')
manager.raw_cluster_cmd('osd', 'unset', 'noin')
manager.raw_cluster_cmd('osd', 'unset', 'noout')
manager.raw_cluster_cmd('osd', 'unset', 'noup')
manager.raw_cluster_cmd('osd', 'unset', 'nodown')
# write some new data
p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '20', 'write', '-b', '4096',
'--no-cleanup'])
time.sleep(15)
# trigger a divergent target:
# blackhole + restart osd.1 (shorter log)
manager.blackhole_kill_osd(1)
# kill osd.2 (longer log... we'll make it divergent below)
manager.kill_osd(2)
time.sleep(2)
manager.revive_osd(1)
# wait for our writes to complete + succeed
err = p.wait()
log.info('err is %d' % err)
# cluster must repeer
manager.flush_pg_stats([0, 1])
manager.wait_for_active_or_down()
# write some more (make sure osd.2 really is divergent)
p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096'])
p.wait()
# revive divergent osd
manager.revive_osd(2)
while len(manager.get_osd_status()['up']) < 3:
log.info('waiting a bit...')
time.sleep(2)
log.info('3 are up!')
# cluster must recover
manager.flush_pg_stats([0, 1, 2])
manager.wait_for_clean()
def test_incomplete_pgs(ctx, config):
"""
Test handling of incomplete pgs. Requires 4 osds.
"""
testdir = teuthology.get_testdir(ctx)
if config is None:
config = {}
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
assert num_osds == 4
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while len(manager.get_osd_status()['up']) < 4:
time.sleep(10)
manager.flush_pg_stats([0, 1, 2, 3])
manager.wait_for_clean()
log.info('Testing incomplete pgs...')
for i in range(4):
manager.set_config(
i,
osd_recovery_delay_start=1000)
# move data off of osd.0, osd.1
manager.raw_cluster_cmd('osd', 'out', '0', '1')
manager.flush_pg_stats([0, 1, 2, 3], [0, 1])
manager.wait_for_clean()
# lots of objects in rbd (no pg log, will backfill)
p = rados_start(testdir, mon,
['-p', 'rbd', 'bench', '20', 'write', '-b', '1',
'--no-cleanup'])
p.wait()
# few objects in rbd pool (with pg log, normal recovery)
for f in range(1, 20):
p = rados_start(testdir, mon, ['-p', 'rbd', 'put',
'foo.%d' % f, '/etc/passwd'])
p.wait()
# move it back
manager.raw_cluster_cmd('osd', 'in', '0', '1')
manager.raw_cluster_cmd('osd', 'out', '2', '3')
time.sleep(10)
manager.flush_pg_stats([0, 1, 2, 3], [2, 3])
time.sleep(10)
manager.wait_for_active()
assert not manager.is_clean()
assert not manager.is_recovered()
# kill 2 + 3
log.info('stopping 2,3')
manager.kill_osd(2)
manager.kill_osd(3)
log.info('...')
manager.raw_cluster_cmd('osd', 'down', '2', '3')
manager.flush_pg_stats([0, 1])
manager.wait_for_active_or_down()
assert manager.get_num_down() > 0
# revive 2 + 3
manager.revive_osd(2)
manager.revive_osd(3)
while len(manager.get_osd_status()['up']) < 4:
log.info('waiting a bit...')
time.sleep(2)
log.info('all are up!')
for i in range(4):
manager.kick_recovery_wq(i)
# cluster must recover
manager.wait_for_clean()
| 5,337 | 26.515464 | 86 |
py
|
null |
ceph-main/qa/tasks/peer.py
|
"""
Peer test (Single test, not much configurable here)
"""
import logging
import json
import time
from tasks import ceph_manager
from tasks.util.rados import rados
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Test peering.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'peer task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
manager.flush_pg_stats([0, 1, 2])
manager.wait_for_clean()
for i in range(3):
manager.set_config(
i,
osd_recovery_delay_start=120)
# take on osd down
manager.kill_osd(2)
manager.mark_down_osd(2)
# kludge to make sure they get a map
rados(ctx, mon, ['-p', 'data', 'get', 'dummy', '-'])
manager.flush_pg_stats([0, 1])
manager.wait_for_recovery()
# kill another and revive 2, so that some pgs can't peer.
manager.kill_osd(1)
manager.mark_down_osd(1)
manager.revive_osd(2)
manager.wait_till_osd_is_up(2)
manager.flush_pg_stats([0, 2])
manager.wait_for_active_or_down()
manager.flush_pg_stats([0, 2])
# look for down pgs
num_down_pgs = 0
pgs = manager.get_pg_stats()
for pg in pgs:
out = manager.raw_cluster_cmd('pg', pg['pgid'], 'query')
log.debug("out string %s",out)
j = json.loads(out)
log.info("pg is %s, query json is %s", pg, j)
if pg['state'].count('down'):
num_down_pgs += 1
# verify that it is blocked on osd.1
rs = j['recovery_state']
assert len(rs) >= 2
assert rs[0]['name'] == 'Started/Primary/Peering/Down'
assert rs[1]['name'] == 'Started/Primary/Peering'
assert rs[1]['blocked']
assert rs[1]['down_osds_we_would_probe'] == [1]
assert len(rs[1]['peering_blocked_by']) == 1
assert rs[1]['peering_blocked_by'][0]['osd'] == 1
assert num_down_pgs > 0
# bring it all back
manager.revive_osd(1)
manager.wait_till_osd_is_up(1)
manager.flush_pg_stats([0, 1, 2])
manager.wait_for_clean()
| 2,457 | 26.010989 | 66 |
py
|
null |
ceph-main/qa/tasks/peering_speed_test.py
|
"""
Remotely run peering tests.
"""
import logging
import time
log = logging.getLogger(__name__)
from teuthology.task.args import argify
POOLNAME = "POOLNAME"
ARGS = [
('num_pgs', 'number of pgs to create', 256, int),
('max_time', 'seconds to complete peering', 0, int),
('runs', 'trials to run', 10, int),
('num_objects', 'objects to create', 256 * 1024, int),
('object_size', 'size in bytes for objects', 64, int),
('creation_time_limit', 'time limit for pool population', 60*60, int),
('create_threads', 'concurrent writes for create', 256, int)
]
def setup(ctx, config):
"""
Setup peering test on remotes.
"""
manager = ctx.managers['ceph']
manager.clear_pools()
manager.create_pool(POOLNAME, config.num_pgs)
log.info("populating pool")
manager.rados_write_objects(
POOLNAME,
config.num_objects,
config.object_size,
config.creation_time_limit,
config.create_threads)
log.info("done populating pool")
def do_run(ctx, config):
"""
Perform the test.
"""
start = time.time()
# mark in osd
manager = ctx.managers['ceph']
manager.mark_in_osd(0)
log.info("writing out objects")
manager.rados_write_objects(
POOLNAME,
config.num_pgs, # write 1 object per pg or so
1,
config.creation_time_limit,
config.num_pgs, # lots of concurrency
cleanup = True)
peering_end = time.time()
log.info("peering done, waiting on recovery")
manager.wait_for_clean()
log.info("recovery done")
recovery_end = time.time()
if config.max_time:
assert(peering_end - start < config.max_time)
manager.mark_out_osd(0)
manager.wait_for_clean()
return {
'time_to_active': peering_end - start,
'time_to_clean': recovery_end - start
}
@argify("peering_speed_test", ARGS)
def task(ctx, config):
"""
Peering speed test
"""
setup(ctx, config)
manager = ctx.managers['ceph']
manager.mark_out_osd(0)
manager.wait_for_clean()
ret = []
for i in range(config.runs):
log.info("Run {i}".format(i = i))
ret.append(do_run(ctx, config))
manager.mark_in_osd(0)
ctx.summary['recovery_times'] = {
'runs': ret
}
| 2,303 | 25.181818 | 74 |
py
|
null |
ceph-main/qa/tasks/populate_rbd_pool.py
|
"""
Populate rbd pools
"""
import contextlib
import logging
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Populate <num_pools> pools with prefix <pool_prefix> with <num_images>
rbd images at <num_snaps> snaps
The config could be as follows::
populate_rbd_pool:
client: <client>
pool_prefix: foo
num_pools: 5
num_images: 10
num_snaps: 3
image_size: 10737418240
"""
if config is None:
config = {}
client = config.get("client", "client.0")
pool_prefix = config.get("pool_prefix", "foo")
num_pools = config.get("num_pools", 2)
num_images = config.get("num_images", 20)
num_snaps = config.get("num_snaps", 4)
image_size = config.get("image_size", 100)
write_size = config.get("write_size", 1024*1024)
write_threads = config.get("write_threads", 10)
write_total_per_snap = config.get("write_total_per_snap", 1024*1024*30)
(remote,) = ctx.cluster.only(client).remotes.keys()
for poolid in range(num_pools):
poolname = "%s-%s" % (pool_prefix, str(poolid))
log.info("Creating pool %s" % (poolname,))
ctx.managers['ceph'].create_pool(poolname)
for imageid in range(num_images):
imagename = "rbd-%s" % (str(imageid),)
log.info("Creating imagename %s" % (imagename,))
remote.run(
args = [
"rbd",
"create",
imagename,
"--image-format", "1",
"--size", str(image_size),
"--pool", str(poolname)])
def bench_run():
remote.run(
args = [
"rbd",
"bench-write",
imagename,
"--pool", poolname,
"--io-size", str(write_size),
"--io-threads", str(write_threads),
"--io-total", str(write_total_per_snap),
"--io-pattern", "rand"])
log.info("imagename %s first bench" % (imagename,))
bench_run()
for snapid in range(num_snaps):
snapname = "snap-%s" % (str(snapid),)
log.info("imagename %s creating snap %s" % (imagename, snapname))
remote.run(
args = [
"rbd", "snap", "create",
"--pool", poolname,
"--snap", snapname,
imagename
])
bench_run()
try:
yield
finally:
log.info('done')
| 2,753 | 32.180723 | 81 |
py
|
null |
ceph-main/qa/tasks/pykmip.py
|
"""
Deploy and configure PyKMIP for Teuthology
"""
import argparse
import contextlib
import logging
import time
import tempfile
import json
import os
from io import BytesIO
from teuthology.orchestra.daemon import DaemonGroup
from teuthology.orchestra.remote import Remote
import pprint
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
from teuthology.packaging import install_package
from teuthology.packaging import remove_package
from teuthology.exceptions import ConfigError
from tasks.util import get_remote_for_role
log = logging.getLogger(__name__)
def get_pykmip_dir(ctx):
return '{tdir}/pykmip'.format(tdir=teuthology.get_testdir(ctx))
def run_in_pykmip_dir(ctx, client, args, **kwargs):
(remote,) = [client] if isinstance(client,Remote) else ctx.cluster.only(client).remotes.keys()
return remote.run(
args=['cd', get_pykmip_dir(ctx), run.Raw('&&'), ] + args,
**kwargs
)
def run_in_pykmip_venv(ctx, client, args, **kwargs):
return run_in_pykmip_dir(ctx, client,
args = ['.', '.pykmipenv/bin/activate',
run.Raw('&&')
] + args, **kwargs)
@contextlib.contextmanager
def download(ctx, config):
"""
Download PyKMIP from github.
Remove downloaded file upon exit.
The context passed in should be identical to the context
passed in to the main task.
"""
assert isinstance(config, dict)
log.info('Downloading pykmip...')
pykmipdir = get_pykmip_dir(ctx)
for (client, cconf) in config.items():
branch = cconf.get('force-branch', 'master')
repo = cconf.get('force-repo', 'https://github.com/OpenKMIP/PyKMIP')
sha1 = cconf.get('sha1')
log.info("Using branch '%s' for pykmip", branch)
log.info('sha1=%s', sha1)
ctx.cluster.only(client).run(
args=[
'git', 'clone', '-b', branch, repo,
pykmipdir,
],
)
if sha1 is not None:
run_in_pykmip_dir(ctx, client, [
'git', 'reset', '--hard', sha1,
],
)
try:
yield
finally:
log.info('Removing pykmip...')
for client in config:
ctx.cluster.only(client).run(
args=[ 'rm', '-rf', pykmipdir ],
)
_bindep_txt = """# should be part of PyKMIP
libffi-dev [platform:dpkg]
libffi-devel [platform:rpm]
libssl-dev [platform:dpkg]
openssl-devel [platform:redhat]
libopenssl-devel [platform:suse]
libsqlite3-dev [platform:dpkg]
sqlite-devel [platform:rpm]
python-dev [platform:dpkg]
python-devel [(platform:redhat platform:base-py2)]
python3-dev [platform:dpkg]
python3-devel [(platform:redhat platform:base-py3) platform:suse]
python3 [platform:suse]
"""
@contextlib.contextmanager
def install_packages(ctx, config):
"""
Download the packaged dependencies of PyKMIP.
Remove install packages upon exit.
The context passed in should be identical to the context
passed in to the main task.
"""
assert isinstance(config, dict)
log.info('Installing system dependenies for PyKMIP...')
packages = {}
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
# use bindep to read which dependencies we need from temp/bindep.txt
fd, local_temp_path = tempfile.mkstemp(suffix='.txt',
prefix='bindep-')
os.write(fd, _bindep_txt.encode())
os.close(fd)
fd, remote_temp_path = tempfile.mkstemp(suffix='.txt',
prefix='bindep-')
os.close(fd)
remote.put_file(local_temp_path, remote_temp_path)
os.remove(local_temp_path)
run_in_pykmip_venv(ctx, remote, ['pip', 'install', 'bindep'])
r = run_in_pykmip_venv(ctx, remote,
['bindep', '--brief', '--file', remote_temp_path],
stdout=BytesIO(),
check_status=False) # returns 1 on success?
packages[client] = r.stdout.getvalue().decode().splitlines()
for dep in packages[client]:
install_package(dep, remote)
try:
yield
finally:
log.info('Removing system dependencies of PyKMIP...')
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
for dep in packages[client]:
remove_package(dep, remote)
@contextlib.contextmanager
def setup_venv(ctx, config):
"""
Setup the virtualenv for PyKMIP using pip.
"""
assert isinstance(config, dict)
log.info('Setting up virtualenv for pykmip...')
for (client, _) in config.items():
run_in_pykmip_dir(ctx, client, ['python3', '-m', 'venv', '.pykmipenv'])
run_in_pykmip_venv(ctx, client, ['pip', 'install', '--upgrade', 'pip'])
run_in_pykmip_venv(ctx, client, ['pip', 'install', 'pytz', '-e', get_pykmip_dir(ctx)])
yield
def assign_ports(ctx, config, initial_port):
"""
Assign port numbers starting from @initial_port
"""
port = initial_port
role_endpoints = {}
for remote, roles_for_host in ctx.cluster.remotes.items():
for role in roles_for_host:
if role in config:
r = get_remote_for_role(ctx, role)
role_endpoints[role] = r.ip_address, port, r.hostname
port += 1
return role_endpoints
def copy_policy_json(ctx, cclient, cconfig):
run_in_pykmip_dir(ctx, cclient,
['cp',
get_pykmip_dir(ctx)+'/examples/policy.json',
get_pykmip_dir(ctx)])
_pykmip_configuration = """# configuration for pykmip
[server]
hostname={ipaddr}
port={port}
certificate_path={servercert}
key_path={serverkey}
ca_path={clientca}
auth_suite=TLS1.2
policy_path={confdir}
enable_tls_client_auth=False
tls_cipher_suites=
TLS_RSA_WITH_AES_128_CBC_SHA256
TLS_RSA_WITH_AES_256_CBC_SHA256
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
logging_level=DEBUG
database_path={confdir}/pykmip.sqlite
[client]
host={hostname}
port=5696
certfile={clientcert}
keyfile={clientkey}
ca_certs={clientca}
ssl_version=PROTOCOL_TLSv1_2
"""
def create_pykmip_conf(ctx, cclient, cconfig):
log.info('#0 cclient={} cconfig={}'.format(pprint.pformat(cclient),pprint.pformat(cconfig)))
(remote,) = ctx.cluster.only(cclient).remotes.keys()
pykmip_ipaddr, pykmip_port, pykmip_hostname = ctx.pykmip.endpoints[cclient]
log.info('#1 ip,p,h {} {} {}'.format(pykmip_ipaddr, pykmip_port, pykmip_hostname))
clientca = cconfig.get('clientca', None)
log.info('#2 clientca {}'.format(clientca))
serverkey = None
servercert = cconfig.get('servercert', None)
log.info('#3 servercert {}'.format(servercert))
servercert = ctx.ssl_certificates.get(servercert)
log.info('#4 servercert {}'.format(servercert))
clientkey = None
clientcert = cconfig.get('clientcert', None)
log.info('#3 clientcert {}'.format(clientcert))
clientcert = ctx.ssl_certificates.get(clientcert)
log.info('#4 clientcert {}'.format(clientcert))
clientca = ctx.ssl_certificates.get(clientca)
log.info('#5 clientca {}'.format(clientca))
if servercert != None:
serverkey = servercert.key
servercert = servercert.certificate
log.info('#6 serverkey {} servercert {}'.format(serverkey, servercert))
if clientcert != None:
clientkey = clientcert.key
clientcert = clientcert.certificate
log.info('#6 clientkey {} clientcert {}'.format(clientkey, clientcert))
if clientca != None:
clientca = clientca.certificate
log.info('#7 clientca {}'.format(clientca))
if servercert == None or clientca == None or serverkey == None:
log.info('#8 clientca {} serverkey {} servercert {}'.format(clientca, serverkey, servercert))
raise ConfigError('pykmip: Missing/bad servercert or clientca')
pykmipdir = get_pykmip_dir(ctx)
kmip_conf = _pykmip_configuration.format(
ipaddr=pykmip_ipaddr,
port=pykmip_port,
confdir=pykmipdir,
hostname=pykmip_hostname,
clientca=clientca,
clientkey=clientkey,
clientcert=clientcert,
serverkey=serverkey,
servercert=servercert
)
fd, local_temp_path = tempfile.mkstemp(suffix='.conf',
prefix='pykmip')
os.write(fd, kmip_conf.encode())
os.close(fd)
remote.put_file(local_temp_path, pykmipdir+'/pykmip.conf')
os.remove(local_temp_path)
@contextlib.contextmanager
def configure_pykmip(ctx, config):
"""
Configure pykmip paste-api and pykmip-api.
"""
assert isinstance(config, dict)
(cclient, cconfig) = next(iter(config.items()))
copy_policy_json(ctx, cclient, cconfig)
create_pykmip_conf(ctx, cclient, cconfig)
try:
yield
finally:
pass
def has_ceph_task(tasks):
for task in tasks:
for name, conf in task.items():
if name == 'ceph':
return True
return False
@contextlib.contextmanager
def run_pykmip(ctx, config):
assert isinstance(config, dict)
if hasattr(ctx, 'daemons'):
pass
elif has_ceph_task(ctx.config['tasks']):
log.info('Delay start pykmip so ceph can do once-only daemon logic')
try:
yield
finally:
pass
else:
ctx.daemons = DaemonGroup()
log.info('Running pykmip...')
pykmipdir = get_pykmip_dir(ctx)
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
cluster_name, _, client_id = teuthology.split_role(client)
# start the public endpoint
client_public_with_id = 'pykmip.public' + '.' + client_id
run_cmd = 'cd ' + pykmipdir + ' && ' + \
'. .pykmipenv/bin/activate && ' + \
'HOME={}'.format(pykmipdir) + ' && ' + \
'exec pykmip-server -f pykmip.conf -l ' + \
pykmipdir + '/pykmip.log & { read; kill %1; }'
ctx.daemons.add_daemon(
remote, 'pykmip', client_public_with_id,
cluster=cluster_name,
args=['bash', '-c', run_cmd],
logger=log.getChild(client),
stdin=run.PIPE,
cwd=pykmipdir,
wait=False,
check_status=False,
)
# sleep driven synchronization
time.sleep(10)
try:
yield
finally:
log.info('Stopping PyKMIP instance')
ctx.daemons.get_daemon('pykmip', client_public_with_id,
cluster_name).stop()
make_keys_template = """
from kmip.pie import client
from kmip import enums
import ssl
import sys
import json
from io import BytesIO
c = client.ProxyKmipClient(config_file="{replace-with-config-file-path}")
rl=[]
for kwargs in {replace-with-secrets}:
with c:
key_id = c.create(
enums.CryptographicAlgorithm.AES,
256,
operation_policy_name='default',
cryptographic_usage_mask=[
enums.CryptographicUsageMask.ENCRYPT,
enums.CryptographicUsageMask.DECRYPT
],
**kwargs
)
c.activate(key_id)
attrs = c.get_attributes(uid=key_id)
r = {}
for a in attrs[1]:
r[str(a.attribute_name)] = str(a.attribute_value)
rl.append(r)
print(json.dumps(rl))
"""
@contextlib.contextmanager
def create_secrets(ctx, config):
"""
Create and activate any requested keys in kmip
"""
assert isinstance(config, dict)
pykmipdir = get_pykmip_dir(ctx)
pykmip_conf_path = pykmipdir + '/pykmip.conf'
my_output = BytesIO()
for (client,cconf) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
secrets=cconf.get('secrets')
if secrets:
secrets_json = json.dumps(cconf['secrets'])
make_keys = make_keys_template \
.replace("{replace-with-secrets}",secrets_json) \
.replace("{replace-with-config-file-path}",pykmip_conf_path)
my_output.truncate()
remote.run(args=[run.Raw('. cephtest/pykmip/.pykmipenv/bin/activate;' \
+ 'python')], stdin=make_keys, stdout = my_output)
ctx.pykmip.keys[client] = json.loads(my_output.getvalue().decode())
try:
yield
finally:
pass
@contextlib.contextmanager
def task(ctx, config):
"""
Deploy and configure PyKMIP
Example of configuration:
tasks:
- install:
- ceph:
conf:
client:
rgw crypt s3 kms backend: kmip
rgw crypt kmip ca path: /home/ubuntu/cephtest/ca/kmiproot.crt
rgw crypt kmip client cert: /home/ubuntu/cephtest/ca/kmip-client.crt
rgw crypt kmip client key: /home/ubuntu/cephtest/ca/kmip-client.key
rgw crypt kmip kms key template: pykmip-$keyid
- openssl_keys:
kmiproot:
client: client.0
cn: kmiproot
key-type: rsa:4096
- openssl_keys:
kmip-server:
client: client.0
ca: kmiproot
kmip-client:
client: client.0
ca: kmiproot
cn: rgw-client
- pykmip:
client.0:
force-branch: master
clientca: kmiproot
servercert: kmip-server
clientcert: kmip-client
secrets:
- name: pykmip-key-1
- name: pykmip-key-2
- rgw:
client.0:
use-pykmip-role: client.0
- s3tests:
client.0:
force-branch: master
"""
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task pykmip only supports a list or dictionary for configuration"
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
for client in config.keys():
if not config[client]:
config[client] = {}
teuthology.deep_merge(config[client], overrides.get('pykmip', {}))
log.debug('PyKMIP config is %s', config)
if not hasattr(ctx, 'ssl_certificates'):
raise ConfigError('pykmip must run after the openssl_keys task')
ctx.pykmip = argparse.Namespace()
ctx.pykmip.endpoints = assign_ports(ctx, config, 5696)
ctx.pykmip.keys = {}
with contextutil.nested(
lambda: download(ctx=ctx, config=config),
lambda: setup_venv(ctx=ctx, config=config),
lambda: install_packages(ctx=ctx, config=config),
lambda: configure_pykmip(ctx=ctx, config=config),
lambda: run_pykmip(ctx=ctx, config=config),
lambda: create_secrets(ctx=ctx, config=config),
):
yield
| 14,989 | 31.167382 | 99 |
py
|
null |
ceph-main/qa/tasks/python.py
|
import logging
from teuthology import misc as teuthology
from tasks.vip import subst_vip
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Execute some python code.
tasks:
- python:
host.a: |
import boto3
c = boto3.resource(...)
The provided dict is normally indexed by role. You can also include a
'sudo: false' key to run the code without sudo.
tasks:
- python:
sudo: false
host.b: |
import boto3
c = boto3.resource(...)
"""
assert isinstance(config, dict), "task python got invalid config"
testdir = teuthology.get_testdir(ctx)
sudo = config.pop('sudo', True)
for role, code in config.items():
(remote,) = ctx.cluster.only(role).remotes.keys()
log.info('Running python on role %s host %s', role, remote.name)
log.info(code)
args=[
'TESTDIR={tdir}'.format(tdir=testdir),
'python3',
]
if sudo:
args = ['sudo'] + args
remote.run(args=args, stdin=subst_vip(ctx, code))
| 1,122 | 23.413043 | 75 |
py
|
null |
ceph-main/qa/tasks/qemu.py
|
"""
Qemu task
"""
import contextlib
import logging
import os
import yaml
import time
from tasks import rbd
from tasks.util.workunit import get_refspec_after_overrides
from teuthology import contextutil
from teuthology import misc as teuthology
from teuthology.config import config as teuth_config
from teuthology.orchestra import run
from teuthology.packaging import install_package, remove_package
log = logging.getLogger(__name__)
DEFAULT_NUM_DISKS = 2
DEFAULT_IMAGE_URL = 'http://download.ceph.com/qa/ubuntu-12.04.qcow2'
DEFAULT_IMAGE_SIZE = 10240 # in megabytes
ENCRYPTION_HEADER_SIZE = 16 # in megabytes
DEFAULT_CPUS = 1
DEFAULT_MEM = 4096 # in megabytes
def normalize_disks(config):
# normalize the 'disks' parameter into a list of dictionaries
for client, client_config in config.items():
clone = client_config.get('clone', False)
image_url = client_config.get('image_url', DEFAULT_IMAGE_URL)
device_type = client_config.get('type', 'filesystem')
encryption_format = client_config.get('encryption_format', 'none')
parent_encryption_format = client_config.get(
'parent_encryption_format', 'none')
disks = client_config.get('disks', DEFAULT_NUM_DISKS)
if not isinstance(disks, list):
disks = [{'image_name': '{client}.{num}'.format(client=client,
num=i)}
for i in range(int(disks))]
client_config['disks'] = disks
for i, disk in enumerate(disks):
if 'action' not in disk:
disk['action'] = 'create'
assert disk['action'] in ['none', 'create', 'clone'], 'invalid disk action'
assert disk['action'] != 'clone' or 'parent_name' in disk, 'parent_name required for clone'
if 'image_size' not in disk:
disk['image_size'] = DEFAULT_IMAGE_SIZE
disk['image_size'] = int(disk['image_size'])
if 'image_url' not in disk and i == 0:
disk['image_url'] = image_url
if 'device_type' not in disk:
disk['device_type'] = device_type
disk['device_letter'] = chr(ord('a') + i)
if 'encryption_format' not in disk:
if clone:
disk['encryption_format'] = parent_encryption_format
else:
disk['encryption_format'] = encryption_format
assert disk['encryption_format'] in ['none', 'luks1', 'luks2'], 'invalid encryption format'
assert disks, 'at least one rbd device must be used'
if clone:
for disk in disks:
if disk['action'] != 'create':
continue
clone = dict(disk)
clone['action'] = 'clone'
clone['parent_name'] = clone['image_name']
clone['image_name'] += '-clone'
del disk['device_letter']
clone['encryption_format'] = encryption_format
assert clone['encryption_format'] in ['none', 'luks1', 'luks2'], 'invalid encryption format'
clone['parent_encryption_format'] = parent_encryption_format
assert clone['parent_encryption_format'] in ['none', 'luks1', 'luks2'], 'invalid encryption format'
disks.append(clone)
def create_images(ctx, config, managers):
for client, client_config in config.items():
disks = client_config['disks']
for disk in disks:
if disk.get('action') != 'create' or (
'image_url' in disk and
disk['encryption_format'] == 'none'):
continue
image_size = disk['image_size']
if disk['encryption_format'] != 'none':
image_size += ENCRYPTION_HEADER_SIZE
create_config = {
client: {
'image_name': disk['image_name'],
'image_format': 2,
'image_size': image_size,
'encryption_format': disk['encryption_format'],
}
}
managers.append(
lambda create_config=create_config:
rbd.create_image(ctx=ctx, config=create_config)
)
def create_clones(ctx, config, managers):
for client, client_config in config.items():
disks = client_config['disks']
for disk in disks:
if disk['action'] != 'clone':
continue
create_config = {
client: {
'image_name': disk['image_name'],
'parent_name': disk['parent_name'],
'encryption_format': disk['encryption_format'],
}
}
managers.append(
lambda create_config=create_config:
rbd.clone_image(ctx=ctx, config=create_config)
)
def create_encrypted_devices(ctx, config, managers):
for client, client_config in config.items():
disks = client_config['disks']
for disk in disks:
if (disk['encryption_format'] == 'none' and
disk.get('parent_encryption_format', 'none') == 'none') or \
'device_letter' not in disk:
continue
dev_config = {client: disk}
managers.append(
lambda dev_config=dev_config:
rbd.dev_create(ctx=ctx, config=dev_config)
)
@contextlib.contextmanager
def create_dirs(ctx, config):
"""
Handle directory creation and cleanup
"""
testdir = teuthology.get_testdir(ctx)
for client, client_config in config.items():
assert 'test' in client_config, 'You must specify a test to run'
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
'install', '-d', '-m0755', '--',
'{tdir}/qemu'.format(tdir=testdir),
'{tdir}/archive/qemu'.format(tdir=testdir),
]
)
try:
yield
finally:
for client, client_config in config.items():
assert 'test' in client_config, 'You must specify a test to run'
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true',
]
)
@contextlib.contextmanager
def install_block_rbd_driver(ctx, config):
"""
Make sure qemu rbd block driver (block-rbd.so) is installed
"""
packages = {}
for client, _ in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
if remote.os.package_type == 'rpm':
packages[client] = ['qemu-kvm-block-rbd']
else:
packages[client] = ['qemu-block-extra', 'qemu-utils']
for pkg in packages[client]:
install_package(pkg, remote)
try:
yield
finally:
for client, _ in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
for pkg in packages[client]:
remove_package(pkg, remote)
@contextlib.contextmanager
def generate_iso(ctx, config):
"""Execute system commands to generate iso"""
log.info('generating iso...')
testdir = teuthology.get_testdir(ctx)
# use ctx.config instead of config, because config has been
# through teuthology.replace_all_with_clients()
refspec = get_refspec_after_overrides(ctx.config, {})
git_url = teuth_config.get_ceph_qa_suite_git_url()
log.info('Pulling tests from %s ref %s', git_url, refspec)
for client, client_config in config.items():
assert 'test' in client_config, 'You must specify a test to run'
test = client_config['test']
(remote,) = ctx.cluster.only(client).remotes.keys()
clone_dir = '{tdir}/qemu_clone.{role}'.format(tdir=testdir, role=client)
remote.run(args=refspec.clone(git_url, clone_dir))
src_dir = os.path.dirname(__file__)
userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)
with open(os.path.join(src_dir, 'userdata_setup.yaml')) as f:
test_setup = ''.join(f.readlines())
# configuring the commands to setup the nfs mount
mnt_dir = "/export/{client}".format(client=client)
test_setup = test_setup.format(
mnt_dir=mnt_dir
)
with open(os.path.join(src_dir, 'userdata_teardown.yaml')) as f:
test_teardown = ''.join(f.readlines())
user_data = test_setup
disks = client_config['disks']
for disk in disks:
if disk['device_type'] != 'filesystem' or \
'device_letter' not in disk or \
'image_url' in disk:
continue
if disk['encryption_format'] == 'none' and \
disk.get('parent_encryption_format', 'none') == 'none':
dev_name = 'vd' + disk['device_letter']
else:
# encrypted disks use if=ide interface, instead of if=virtio
dev_name = 'sd' + disk['device_letter']
user_data += """
- |
#!/bin/bash
mkdir /mnt/test_{dev_name}
mkfs -t xfs /dev/{dev_name}
mount -t xfs /dev/{dev_name} /mnt/test_{dev_name}
""".format(dev_name=dev_name)
user_data += """
- |
#!/bin/bash
test -d /etc/ceph || mkdir /etc/ceph
cp /mnt/cdrom/ceph.* /etc/ceph/
"""
cloud_config_archive = client_config.get('cloud_config_archive', [])
if cloud_config_archive:
user_data += yaml.safe_dump(cloud_config_archive, default_style='|',
default_flow_style=False)
# this may change later to pass the directories as args to the
# script or something. xfstests needs that.
user_data += """
- |
#!/bin/bash
test -d /mnt/test_b && cd /mnt/test_b
/mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
""" + test_teardown
user_data = user_data.format(
ceph_branch=ctx.config.get('branch'),
ceph_sha1=ctx.config.get('sha1'))
remote.write_file(userdata_path, user_data)
with open(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
remote.write_file(metadata_path, f)
test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)
log.info('fetching test %s for %s', test, client)
remote.run(
args=[
'cp', '--', os.path.join(clone_dir, test), test_file,
run.Raw('&&'),
'chmod', '755', test_file,
],
)
remote.run(
args=[
'genisoimage', '-quiet', '-input-charset', 'utf-8',
'-volid', 'cidata', '-joliet', '-rock',
'-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
'-graft-points',
'user-data={userdata}'.format(userdata=userdata_path),
'meta-data={metadata}'.format(metadata=metadata_path),
'ceph.conf=/etc/ceph/ceph.conf',
'ceph.keyring=/etc/ceph/ceph.keyring',
'test.sh={file}'.format(file=test_file),
],
)
try:
yield
finally:
for client in config.keys():
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
'rm', '-rf',
'{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
os.path.join(testdir, 'qemu', 'userdata.' + client),
os.path.join(testdir, 'qemu', 'metadata.' + client),
'{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
'{tdir}/qemu_clone.{client}'.format(tdir=testdir, client=client),
],
)
@contextlib.contextmanager
def download_image(ctx, config):
"""Downland base image, remove image file when done"""
log.info('downloading base image')
testdir = teuthology.get_testdir(ctx)
client_base_files = {}
for client, client_config in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
client_base_files[client] = []
disks = client_config['disks']
for disk in disks:
if disk['action'] != 'create' or 'image_url' not in disk:
continue
base_file = '{tdir}/qemu/base.{name}.qcow2'.format(tdir=testdir,
name=disk['image_name'])
client_base_files[client].append(base_file)
remote.run(
args=[
'wget', '-nv', '-O', base_file, disk['image_url'],
]
)
if disk['encryption_format'] == 'none':
remote.run(
args=[
'qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw',
base_file, 'rbd:rbd/{image_name}'.format(image_name=disk['image_name'])
]
)
else:
dev_config = {client: {'image_name': disk['image_name'],
'encryption_format': disk['encryption_format']}}
raw_file = '{tdir}/qemu/base.{name}.raw'.format(
tdir=testdir, name=disk['image_name'])
client_base_files[client].append(raw_file)
remote.run(
args=[
'qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw',
base_file, raw_file
]
)
with rbd.dev_create(ctx, dev_config):
remote.run(
args=[
'dd', 'if={name}'.format(name=raw_file),
'of={name}'.format(name=dev_config[client]['device_path']),
'bs=4M', 'conv=fdatasync'
]
)
for disk in disks:
if disk['action'] == 'clone' or \
disk['encryption_format'] != 'none' or \
(disk['action'] == 'create' and 'image_url' not in disk):
continue
remote.run(
args=[
'rbd', 'resize',
'--size={image_size}M'.format(image_size=disk['image_size']),
disk['image_name'], run.Raw('||'), 'true'
]
)
try:
yield
finally:
log.debug('cleaning up base image files')
for client, base_files in client_base_files.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
for base_file in base_files:
remote.run(
args=[
'rm', '-f', base_file,
],
)
def _setup_nfs_mount(remote, client, service_name, mount_dir):
"""
Sets up an nfs mount on the remote that the guest can use to
store logs. This nfs mount is also used to touch a file
at the end of the test to indicate if the test was successful
or not.
"""
export_dir = "/export/{client}".format(client=client)
log.info("Creating the nfs export directory...")
remote.run(args=[
'sudo', 'mkdir', '-p', export_dir,
])
log.info("Mounting the test directory...")
remote.run(args=[
'sudo', 'mount', '--bind', mount_dir, export_dir,
])
log.info("Adding mount to /etc/exports...")
export = "{dir} *(rw,no_root_squash,no_subtree_check,insecure)".format(
dir=export_dir
)
log.info("Deleting export from /etc/exports...")
remote.run(args=[
'sudo', 'sed', '-i', "\|{export_dir}|d".format(export_dir=export_dir),
'/etc/exports'
])
remote.run(args=[
'echo', export, run.Raw("|"),
'sudo', 'tee', '-a', "/etc/exports",
])
log.info("Restarting NFS...")
if remote.os.package_type == "deb":
remote.run(args=['sudo', 'service', 'nfs-kernel-server', 'restart'])
else:
remote.run(args=['sudo', 'systemctl', 'restart', service_name])
def _teardown_nfs_mount(remote, client, service_name):
"""
Tears down the nfs mount on the remote used for logging and reporting the
status of the tests being ran in the guest.
"""
log.info("Tearing down the nfs mount for {remote}".format(remote=remote))
export_dir = "/export/{client}".format(client=client)
log.info("Stopping NFS...")
if remote.os.package_type == "deb":
remote.run(args=[
'sudo', 'service', 'nfs-kernel-server', 'stop'
])
else:
remote.run(args=[
'sudo', 'systemctl', 'stop', service_name
])
log.info("Unmounting exported directory...")
remote.run(args=[
'sudo', 'umount', export_dir
])
log.info("Deleting export from /etc/exports...")
remote.run(args=[
'sudo', 'sed', '-i', "\|{export_dir}|d".format(export_dir=export_dir),
'/etc/exports'
])
log.info("Starting NFS...")
if remote.os.package_type == "deb":
remote.run(args=[
'sudo', 'service', 'nfs-kernel-server', 'start'
])
else:
remote.run(args=[
'sudo', 'systemctl', 'start', service_name
])
@contextlib.contextmanager
def run_qemu(ctx, config):
"""Setup kvm environment and start qemu"""
procs = []
testdir = teuthology.get_testdir(ctx)
for client, client_config in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client)
remote.run(
args=[
'mkdir', log_dir, run.Raw('&&'),
'sudo', 'modprobe', 'kvm',
]
)
nfs_service_name = 'nfs'
if remote.os.name in ['rhel', 'centos'] and float(remote.os.version) >= 8:
nfs_service_name = 'nfs-server'
# make an nfs mount to use for logging and to
# allow to test to tell teuthology the tests outcome
_setup_nfs_mount(remote, client, nfs_service_name, log_dir)
# Hack to make sure /dev/kvm permissions are set correctly
# See http://tracker.ceph.com/issues/17977 and
# https://bugzilla.redhat.com/show_bug.cgi?id=1333159
remote.run(args='sudo udevadm control --reload')
remote.run(args='sudo udevadm trigger /dev/kvm')
remote.run(args='ls -l /dev/kvm')
qemu_cmd = 'qemu-system-x86_64'
if remote.os.package_type == "rpm":
qemu_cmd = "/usr/libexec/qemu-kvm"
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'daemon-helper',
'term',
qemu_cmd, '-enable-kvm', '-nographic', '-cpu', 'host',
'-smp', str(client_config.get('cpus', DEFAULT_CPUS)),
'-m', str(client_config.get('memory', DEFAULT_MEM)),
# cd holding metadata for cloud-init
'-cdrom', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
]
cachemode = 'none'
ceph_config = ctx.ceph['ceph'].conf.get('global', {})
ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
if ceph_config.get('rbd cache', True):
if ceph_config.get('rbd cache max dirty', 1) > 0:
cachemode = 'writeback'
else:
cachemode = 'writethrough'
disks = client_config['disks']
for disk in disks:
if 'device_letter' not in disk:
continue
if disk['encryption_format'] == 'none' and \
disk.get('parent_encryption_format', 'none') == 'none':
interface = 'virtio'
disk_spec = 'rbd:rbd/{img}:id={id}'.format(
img=disk['image_name'],
id=client[len('client.'):]
)
else:
# encrypted disks use ide as a temporary workaround for
# a bug in qemu when using virtio over nbd
# TODO: use librbd encryption directly via qemu (not via nbd)
interface = 'ide'
disk_spec = disk['device_path']
args.extend([
'-drive',
'file={disk_spec},format=raw,if={interface},cache={cachemode}'.format(
disk_spec=disk_spec,
interface=interface,
cachemode=cachemode,
),
])
time_wait = client_config.get('time_wait', 0)
log.info('starting qemu...')
procs.append(
remote.run(
args=args,
logger=log.getChild(client),
stdin=run.PIPE,
wait=False,
)
)
try:
yield
finally:
log.info('waiting for qemu tests to finish...')
run.wait(procs)
if time_wait > 0:
log.debug('waiting {time_wait} sec for workloads detect finish...'.format(
time_wait=time_wait));
time.sleep(time_wait)
log.debug('checking that qemu tests succeeded...')
for client in config.keys():
(remote,) = ctx.cluster.only(client).remotes.keys()
# ensure we have permissions to all the logs
log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir,
client=client)
remote.run(
args=[
'sudo', 'chmod', 'a+rw', '-R', log_dir
]
)
# teardown nfs mount
_teardown_nfs_mount(remote, client, nfs_service_name)
# check for test status
remote.run(
args=[
'test', '-f',
'{tdir}/archive/qemu/{client}/success'.format(
tdir=testdir,
client=client
),
],
)
log.info("Deleting exported directory...")
for client in config.keys():
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(args=[
'sudo', 'rm', '-r', '/export'
])
@contextlib.contextmanager
def task(ctx, config):
"""
Run a test inside of QEMU on top of rbd. Only one test
is supported per client.
For example, you can specify which clients to run on::
tasks:
- ceph:
- qemu:
client.0:
test: http://download.ceph.com/qa/test.sh
client.1:
test: http://download.ceph.com/qa/test2.sh
Or use the same settings on all clients:
tasks:
- ceph:
- qemu:
all:
test: http://download.ceph.com/qa/test.sh
For tests that want to explicitly describe the RBD images to connect:
tasks:
- ceph:
- qemu:
client.0:
test: http://download.ceph.com/qa/test.sh
clone: True/False (optionally clone all created disks),
image_url: <URL> (optional default image URL)
type: filesystem / block (optional default device type)
disks: [
{
action: create / clone / none (optional, defaults to create)
image_name: <image name> (optional)
parent_name: <parent_name> (if action == clone),
type: filesystem / block (optional, defaults to filesystem)
image_url: <URL> (optional),
image_size: <MiB> (optional)
encryption_format: luks1 / luks2 / none (optional, defaults to none)
}, ...
]
You can set the amount of CPUs and memory the VM has (default is 1 CPU and
4096 MB)::
tasks:
- ceph:
- qemu:
client.0:
test: http://download.ceph.com/qa/test.sh
cpus: 4
memory: 512 # megabytes
If you need to configure additional cloud-config options, set cloud_config
to the required data set::
tasks:
- ceph
- qemu:
client.0:
test: http://ceph.com/qa/test.sh
cloud_config_archive:
- |
#/bin/bash
touch foo1
- content: |
test data
type: text/plain
filename: /tmp/data
"""
assert isinstance(config, dict), \
"task qemu only supports a dictionary for configuration"
config = teuthology.replace_all_with_clients(ctx.cluster, config)
normalize_disks(config)
managers = []
create_images(ctx=ctx, config=config, managers=managers)
managers.extend([
lambda: create_dirs(ctx=ctx, config=config),
lambda: install_block_rbd_driver(ctx=ctx, config=config),
lambda: generate_iso(ctx=ctx, config=config),
lambda: download_image(ctx=ctx, config=config),
])
create_clones(ctx=ctx, config=config, managers=managers)
create_encrypted_devices(ctx=ctx, config=config, managers=managers)
managers.append(
lambda: run_qemu(ctx=ctx, config=config),
)
with contextutil.nested(*managers):
yield
| 26,167 | 35.64986 | 115 |
py
|
null |
ceph-main/qa/tasks/rabbitmq.py
|
"""
Deploy and configure RabbitMQ for Teuthology
"""
import contextlib
import logging
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def install_rabbitmq(ctx, config):
"""
Downloading the RabbitMQ package.
"""
assert isinstance(config, dict)
log.info('Installing RabbitMQ...')
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
ctx.cluster.only(client).run(args=[
'sudo', 'yum', '-y', 'install', 'epel-release'
])
link1 = 'https://packagecloud.io/install/repositories/rabbitmq/erlang/script.rpm.sh'
ctx.cluster.only(client).run(args=[
'curl', '-s', link1, run.Raw('|'), 'sudo', 'bash'
])
ctx.cluster.only(client).run(args=[
'sudo', 'yum', '-y', 'install', 'erlang'
])
link2 = 'https://packagecloud.io/install/repositories/rabbitmq/rabbitmq-server/script.rpm.sh'
ctx.cluster.only(client).run(args=[
'curl', '-s', link2, run.Raw('|'), 'sudo', 'bash'
])
ctx.cluster.only(client).run(args=[
'sudo', 'yum', '-y', 'install', 'rabbitmq-server'
])
try:
yield
finally:
log.info('Removing packaged dependencies of RabbitMQ...')
for (client, _) in config.items():
ctx.cluster.only(client).run(args=[
'sudo', 'yum', '-y', 'remove', 'rabbitmq-server.noarch'
])
@contextlib.contextmanager
def run_rabbitmq(ctx, config):
"""
This includes two parts:
1. Starting Daemon
2. Starting RabbitMQ service
"""
assert isinstance(config, dict)
log.info('Bringing up Daemon and RabbitMQ service...')
for (client,_) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
ctx.cluster.only(client).run(args=[
'sudo', 'chkconfig', 'rabbitmq-server', 'on'
],
)
ctx.cluster.only(client).run(args=[
'sudo', '/sbin/service', 'rabbitmq-server', 'start'
],
)
'''
# To check whether rabbitmq-server is running or not
ctx.cluster.only(client).run(args=[
'sudo', '/sbin/service', 'rabbitmq-server', 'status'
],
)
'''
try:
yield
finally:
log.info('Stopping RabbitMQ Service...')
for (client, _) in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
ctx.cluster.only(client).run(args=[
'sudo', '/sbin/service', 'rabbitmq-server', 'stop'
],
)
@contextlib.contextmanager
def task(ctx,config):
"""
To run rabbitmq the prerequisite is to run the tox task. Following is the way how to run
tox and then rabbitmq::
tasks:
- rabbitmq:
client.0:
"""
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task rabbitmq only supports a list or dictionary for configuration"
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
log.debug('RabbitMQ config is %s', config)
with contextutil.nested(
lambda: install_rabbitmq(ctx=ctx, config=config),
lambda: run_rabbitmq(ctx=ctx, config=config),
):
yield
| 3,650 | 26.870229 | 101 |
py
|
null |
ceph-main/qa/tasks/rados.py
|
"""
Rados modle-based integration tests
"""
import contextlib
import logging
import gevent
from teuthology import misc as teuthology
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Run RadosModel-based integration tests.
The config should be as follows::
rados:
clients: [client list]
ops: <number of ops>
objects: <number of objects to use>
max_in_flight: <max number of operations in flight>
object_size: <size of objects in bytes>
min_stride_size: <minimum write stride size in bytes>
max_stride_size: <maximum write stride size in bytes>
op_weights: <dictionary mapping operation type to integer weight>
runs: <number of times to run> - the pool is remade between runs
ec_pool: use an ec pool
erasure_code_profile: profile to use with the erasure coded pool
fast_read: enable ec_pool's fast_read
min_size: set the min_size of created pool
pool_snaps: use pool snapshots instead of selfmanaged snapshots
write_fadvise_dontneed: write behavior like with LIBRADOS_OP_FLAG_FADVISE_DONTNEED.
This mean data don't access in the near future.
Let osd backend don't keep data in cache.
For example::
tasks:
- ceph:
- rados:
clients: [client.0]
ops: 1000
max_seconds: 0 # 0 for no limit
objects: 25
max_in_flight: 16
object_size: 4000000
min_stride_size: 1024
max_stride_size: 4096
op_weights:
read: 20
write: 10
delete: 2
snap_create: 3
rollback: 2
snap_remove: 0
ec_pool: create an ec pool, defaults to False
erasure_code_use_overwrites: test overwrites, default false
erasure_code_profile:
name: teuthologyprofile
k: 2
m: 1
crush-failure-domain: osd
pool_snaps: true
write_fadvise_dontneed: true
runs: 10
- interactive:
Optionally, you can provide the pool name to run against:
tasks:
- ceph:
- exec:
client.0:
- ceph osd pool create foo
- rados:
clients: [client.0]
pools: [foo]
...
Alternatively, you can provide a pool prefix:
tasks:
- ceph:
- exec:
client.0:
- ceph osd pool create foo.client.0
- rados:
clients: [client.0]
pool_prefix: foo
...
The tests are run asynchronously, they are not complete when the task
returns. For instance:
- rados:
clients: [client.0]
pools: [ecbase]
ops: 4000
objects: 500
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
- print: "**** done rados ec-cache-agent (part 2)"
will run the print task immediately after the rados tasks begins but
not after it completes. To make the rados task a blocking / sequential
task, use:
- sequential:
- rados:
clients: [client.0]
pools: [ecbase]
ops: 4000
objects: 500
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
- print: "**** done rados ec-cache-agent (part 2)"
"""
log.info('Beginning rados...')
assert isinstance(config, dict), \
"please list clients to run on"
object_size = int(config.get('object_size', 4000000))
op_weights = config.get('op_weights', {})
testdir = teuthology.get_testdir(ctx)
args = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'ceph_test_rados']
if config.get('ec_pool', False):
args.extend(['--no-omap'])
if not config.get('erasure_code_use_overwrites', False):
args.extend(['--ec-pool'])
if config.get('write_fadvise_dontneed', False):
args.extend(['--write-fadvise-dontneed'])
if config.get('set_redirect', False):
args.extend(['--set_redirect'])
if config.get('set_chunk', False):
args.extend(['--set_chunk'])
if config.get('enable_dedup', False):
args.extend(['--enable_dedup'])
if config.get('low_tier_pool', None):
args.extend(['--low_tier_pool', config.get('low_tier_pool', None)])
if config.get('dedup_chunk_size', False):
args.extend(['--dedup_chunk_size', config.get('dedup_chunk_size', None)] )
if config.get('dedup_chunk_algo', False):
args.extend(['--dedup_chunk_algo', config.get('dedup_chunk_algo', None)])
if config.get('pool_snaps', False):
args.extend(['--pool-snaps'])
if config.get('balance_reads', False):
args.extend(['--balance-reads'])
if config.get('localize_reads', False):
args.extend(['--localize-reads'])
args.extend([
'--max-ops', str(config.get('ops', 10000)),
'--objects', str(config.get('objects', 500)),
'--max-in-flight', str(config.get('max_in_flight', 16)),
'--size', str(object_size),
'--min-stride-size', str(config.get('min_stride_size', object_size // 10)),
'--max-stride-size', str(config.get('max_stride_size', object_size // 5)),
'--max-seconds', str(config.get('max_seconds', 0))
])
weights = {}
weights['read'] = 100
weights['write'] = 100
weights['delete'] = 10
# Parallel of the op_types in test/osd/TestRados.cc
for field in [
# read handled above
# write handled above
# delete handled above
"snap_create",
"snap_remove",
"rollback",
"setattr",
"rmattr",
"watch",
"copy_from",
"hit_set_list",
"is_dirty",
"undirty",
"cache_flush",
"cache_try_flush",
"cache_evict",
"append",
"write",
"read",
"delete",
"set_chunk",
"tier_promote",
"tier_evict",
"tier_promote",
"tier_flush"
]:
if field in op_weights:
weights[field] = op_weights[field]
if config.get('write_append_excl', True):
if 'write' in weights:
weights['write'] = weights['write'] // 2
weights['write_excl'] = weights['write']
if 'append' in weights:
weights['append'] = weights['append'] // 2
weights['append_excl'] = weights['append']
for op, weight in weights.items():
args.extend([
'--op', op, str(weight)
])
def thread():
"""Thread spawned by gevent"""
clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
log.info('clients are %s' % clients)
manager = ctx.managers['ceph']
if config.get('ec_pool', False):
profile = config.get('erasure_code_profile', {})
profile_name = profile.get('name', 'teuthologyprofile')
manager.create_erasure_code_profile(profile_name, profile)
else:
profile_name = None
for i in range(int(config.get('runs', '1'))):
log.info("starting run %s out of %s", str(i), config.get('runs', '1'))
tests = {}
existing_pools = config.get('pools', [])
created_pools = []
for role in config.get('clients', clients):
assert isinstance(role, str)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
pool = config.get('pool', None)
if not pool and existing_pools:
pool = existing_pools.pop()
else:
pool = manager.create_pool_with_unique_name(
erasure_code_profile_name=profile_name,
erasure_code_use_overwrites=
config.get('erasure_code_use_overwrites', False)
)
created_pools.append(pool)
if config.get('fast_read', False):
manager.raw_cluster_cmd(
'osd', 'pool', 'set', pool, 'fast_read', 'true')
min_size = config.get('min_size', None);
if min_size is not None:
manager.raw_cluster_cmd(
'osd', 'pool', 'set', pool, 'min_size', str(min_size))
(remote,) = ctx.cluster.only(role).remotes.keys()
proc = remote.run(
args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args +
["--pool", pool],
logger=log.getChild("rados.{id}".format(id=id_)),
stdin=run.PIPE,
wait=False
)
tests[id_] = proc
run.wait(tests.values())
for pool in created_pools:
manager.wait_snap_trimming_complete(pool);
manager.remove_pool(pool)
running = gevent.spawn(thread)
try:
yield
finally:
log.info('joining rados')
running.get()
| 9,629 | 32.554007 | 111 |
py
|
null |
ceph-main/qa/tasks/radosbench.py
|
"""
Rados benchmarking
"""
import contextlib
import logging
from teuthology.orchestra import run
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Run radosbench
The config should be as follows:
radosbench:
clients: [client list]
time: <seconds to run>
pool: <pool to use>
size: write size to use
concurrency: max number of outstanding writes (16)
objectsize: object size to use
unique_pool: use a unique pool, defaults to False
ec_pool: create an ec pool, defaults to False
create_pool: create pool, defaults to True
erasure_code_profile:
name: teuthologyprofile
k: 2
m: 1
crush-failure-domain: osd
cleanup: false (defaults to true)
type: <write|seq|rand> (defaults to write)
example:
tasks:
- ceph:
- radosbench:
clients: [client.0]
time: 360
- interactive:
"""
log.info('Beginning radosbench...')
assert isinstance(config, dict), \
"please list clients to run on"
radosbench = {}
testdir = teuthology.get_testdir(ctx)
manager = ctx.managers['ceph']
runtype = config.get('type', 'write')
create_pool = config.get('create_pool', True)
for role in config.get(
'clients',
list(map(lambda x: 'client.' + x,
teuthology.all_roles_of_type(ctx.cluster, 'client')))):
assert isinstance(role, str)
(_, id_) = role.split('.', 1)
(remote,) = ctx.cluster.only(role).remotes.keys()
if config.get('ec_pool', False):
profile = config.get('erasure_code_profile', {})
profile_name = profile.get('name', 'teuthologyprofile')
manager.create_erasure_code_profile(profile_name, profile)
else:
profile_name = None
cleanup = []
if not config.get('cleanup', True):
cleanup = ['--no-cleanup']
write_to_omap = []
if config.get('write-omap', False):
write_to_omap = ['--write-omap']
log.info('omap writes')
pool = config.get('pool', 'data')
if create_pool:
if pool != 'data':
manager.create_pool(pool, erasure_code_profile_name=profile_name)
else:
pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name)
concurrency = config.get('concurrency', 16)
osize = config.get('objectsize', 65536)
if osize == 0:
objectsize = []
else:
objectsize = ['--object-size', str(osize)]
size = ['-b', str(config.get('size', 65536))]
# If doing a reading run then populate data
if runtype != "write":
proc = remote.run(
args=[
"/bin/sh", "-c",
" ".join(['adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage',
'rados',
'--no-log-to-stderr',
'--name', role] +
['-t', str(concurrency)]
+ size + objectsize +
['-p' , pool,
'bench', str(60), "write", "--no-cleanup"
]).format(tdir=testdir),
],
logger=log.getChild('radosbench.{id}'.format(id=id_)),
wait=True
)
size = []
objectsize = []
proc = remote.run(
args=[
"/bin/sh", "-c",
" ".join(['adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage',
'rados',
'--no-log-to-stderr',
'--name', role]
+ size + objectsize +
['-p' , pool,
'bench', str(config.get('time', 360)), runtype,
] + write_to_omap + cleanup).format(tdir=testdir),
],
logger=log.getChild('radosbench.{id}'.format(id=id_)),
stdin=run.PIPE,
wait=False
)
radosbench[id_] = proc
try:
yield
finally:
timeout = config.get('time', 360) * 30 + 300
log.info('joining radosbench (timing out after %ss)', timeout)
run.wait(radosbench.values(), timeout=timeout)
if pool != 'data' and create_pool:
manager.remove_pool(pool)
| 4,728 | 31.613793 | 99 |
py
|
null |
ceph-main/qa/tasks/radosbenchsweep.py
|
"""
Rados benchmarking sweep
"""
import contextlib
import logging
import re
from io import BytesIO
from itertools import product
from teuthology.orchestra import run
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Execute a radosbench parameter sweep
Puts radosbench in a loop, taking values from the given config at each
iteration. If given, the min and max values below create a range, e.g.
min_replicas=1 and max_replicas=3 implies executing with 1-3 replicas.
Parameters:
clients: [client list]
time: seconds to run (default=120)
sizes: [list of object sizes] (default=[4M])
mode: <write|read|seq> (default=write)
repetitions: execute the same configuration multiple times (default=1)
min_num_replicas: minimum number of replicas to use (default = 3)
max_num_replicas: maximum number of replicas to use (default = 3)
min_num_osds: the minimum number of OSDs in a pool (default=all)
max_num_osds: the maximum number of OSDs in a pool (default=all)
file: name of CSV-formatted output file (default='radosbench.csv')
columns: columns to include (default=all)
- rep: execution number (takes values from 'repetitions')
- num_osd: number of osds for pool
- num_replica: number of replicas
- avg_throughput: throughput
- avg_latency: latency
- stdev_throughput:
- stdev_latency:
Example:
- radsobenchsweep:
columns: [rep, num_osd, num_replica, avg_throughput, stdev_throughput]
"""
log.info('Beginning radosbenchsweep...')
assert isinstance(config, dict), 'expecting dictionary for configuration'
# get and validate config values
# {
# only one client supported for now
if len(config.get('clients', [])) != 1:
raise Exception("Only one client can be specified")
# only write mode
if config.get('mode', 'write') != 'write':
raise Exception("Only 'write' mode supported for now.")
# OSDs
total_osds_in_cluster = teuthology.num_instances_of_type(ctx.cluster, 'osd')
min_num_osds = config.get('min_num_osds', total_osds_in_cluster)
max_num_osds = config.get('max_num_osds', total_osds_in_cluster)
if max_num_osds > total_osds_in_cluster:
raise Exception('max_num_osds cannot be greater than total in cluster')
if min_num_osds < 1:
raise Exception('min_num_osds cannot be less than 1')
if min_num_osds > max_num_osds:
raise Exception('min_num_osds cannot be greater than max_num_osd')
osds = range(0, (total_osds_in_cluster + 1))
# replicas
min_num_replicas = config.get('min_num_replicas', 3)
max_num_replicas = config.get('max_num_replicas', 3)
if min_num_replicas < 1:
raise Exception('min_num_replicas cannot be less than 1')
if min_num_replicas > max_num_replicas:
raise Exception('min_num_replicas cannot be greater than max_replicas')
if max_num_replicas > max_num_osds:
raise Exception('max_num_replicas cannot be greater than max_num_osds')
replicas = range(min_num_replicas, (max_num_replicas + 1))
# object size
sizes = config.get('size', [4 << 20])
# repetitions
reps = range(config.get('repetitions', 1))
# file
fname = config.get('file', 'radosbench.csv')
f = open('{}/{}'.format(ctx.archive, fname), 'w')
f.write(get_csv_header(config) + '\n')
# }
# set default pools size=1 to avoid 'unhealthy' issues
ctx.manager.set_pool_property('data', 'size', 1)
ctx.manager.set_pool_property('metadata', 'size', 1)
ctx.manager.set_pool_property('rbd', 'size', 1)
current_osds_out = 0
# sweep through all parameters
for osds_out, size, replica, rep in product(osds, sizes, replicas, reps):
osds_in = total_osds_in_cluster - osds_out
if osds_in == 0:
# we're done
break
if current_osds_out != osds_out:
# take an osd out
ctx.manager.raw_cluster_cmd(
'osd', 'reweight', str(osds_out-1), '0.0')
wait_until_healthy(ctx, config)
current_osds_out = osds_out
if osds_in not in range(min_num_osds, (max_num_osds + 1)):
# no need to execute with a number of osds that wasn't requested
continue
if osds_in < replica:
# cannot execute with more replicas than available osds
continue
run_radosbench(ctx, config, f, osds_in, size, replica, rep)
f.close()
yield
def get_csv_header(conf):
all_columns = [
'rep', 'num_osd', 'num_replica', 'avg_throughput',
'avg_latency', 'stdev_throughput', 'stdev_latency'
]
given_columns = conf.get('columns', None)
if given_columns and len(given_columns) != 0:
for column in given_columns:
if column not in all_columns:
raise Exception('Unknown column ' + column)
return ','.join(conf['columns'])
else:
conf['columns'] = all_columns
return ','.join(all_columns)
def run_radosbench(ctx, config, f, num_osds, size, replica, rep):
pool = ctx.manager.create_pool_with_unique_name()
ctx.manager.set_pool_property(pool, 'size', replica)
wait_until_healthy(ctx, config)
log.info('Executing with parameters: ')
log.info(' num_osd =' + str(num_osds))
log.info(' size =' + str(size))
log.info(' num_replicas =' + str(replica))
log.info(' repetition =' + str(rep))
for role in config.get('clients', ['client.0']):
assert isinstance(role, str)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.keys()
proc = remote.run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{}/archive/coverage'.format(teuthology.get_testdir(ctx)),
'rados',
'--no-log-to-stderr',
'--name', role,
'-b', str(size),
'-p', pool,
'bench', str(config.get('time', 120)), 'write',
],
logger=log.getChild('radosbench.{id}'.format(id=id_)),
stdin=run.PIPE,
stdout=BytesIO(),
wait=False
)
# parse output to get summary and format it as CSV
proc.wait()
out = proc.stdout.getvalue()
all_values = {
'stdev_throughput': re.sub(r'Stddev Bandwidth: ', '', re.search(
r'Stddev Bandwidth:.*', out).group(0)),
'stdev_latency': re.sub(r'Stddev Latency: ', '', re.search(
r'Stddev Latency:.*', out).group(0)),
'avg_throughput': re.sub(r'Bandwidth \(MB/sec\): ', '', re.search(
r'Bandwidth \(MB/sec\):.*', out).group(0)),
'avg_latency': re.sub(r'Average Latency: ', '', re.search(
r'Average Latency:.*', out).group(0)),
'rep': str(rep),
'num_osd': str(num_osds),
'num_replica': str(replica)
}
values_to_write = []
for column in config['columns']:
values_to_write.extend([all_values[column]])
f.write(','.join(values_to_write) + '\n')
ctx.manager.remove_pool(pool)
def wait_until_healthy(ctx, config):
first_mon = teuthology.get_first_mon(ctx, config)
(mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
teuthology.wait_until_healthy(ctx, mon_remote)
| 7,667 | 33.38565 | 80 |
py
|
null |
ceph-main/qa/tasks/radosgw_admin.py
|
"""
Rgw admin testing against a running instance
"""
# The test cases in this file have been annotated for inventory.
# To extract the inventory (in csv format) use the command:
#
# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
#
# to run this standalone:
# python qa/tasks/radosgw_admin.py [--user=uid] --host=host --port=port
#
import json
import logging
import time
import datetime
import sys
from io import StringIO
from queue import Queue
import boto.exception
import boto.s3.connection
import boto.s3.acl
import httplib2
#import pdb
import tasks.vstart_runner
from tasks.rgw import RGWEndpoint
from tasks.util.rgw import rgwadmin as tasks_util_rgw_rgwadmin
from tasks.util.rgw import get_user_summary, get_user_successful_ops
log = logging.getLogger(__name__)
def rgwadmin(*args, **kwargs):
ctx = args[0]
# Is this a local runner?
omit_sudo = hasattr(ctx.rgw, 'omit_sudo') and ctx.rgw.omit_sudo == True
omit_tdir = hasattr(ctx.rgw, 'omit_tdir') and ctx.rgw.omit_tdir == True
return tasks_util_rgw_rgwadmin(*args, **kwargs, omit_sudo=omit_sudo, omit_tdir=omit_tdir)
def usage_acc_findentry2(entries, user, add=True):
for e in entries:
if e['user'] == user:
return e
if not add:
return None
e = {'user': user, 'buckets': []}
entries.append(e)
return e
def usage_acc_findsum2(summaries, user, add=True):
for e in summaries:
if e['user'] == user:
return e
if not add:
return None
e = {'user': user, 'categories': [],
'total': {'bytes_received': 0,
'bytes_sent': 0, 'ops': 0, 'successful_ops': 0 }}
summaries.append(e)
return e
def usage_acc_update2(x, out, b_in, err):
x['bytes_sent'] += b_in
x['bytes_received'] += out
x['ops'] += 1
if not err:
x['successful_ops'] += 1
def usage_acc_validate_fields(r, x, x2, what):
q=[]
for field in ['bytes_sent', 'bytes_received', 'ops', 'successful_ops']:
try:
if x2[field] < x[field]:
q.append("field %s: %d < %d" % (field, x2[field], x[field]))
except Exception as ex:
r.append( "missing/bad field " + field + " in " + what + " " + str(ex))
return
if len(q) > 0:
r.append("incomplete counts in " + what + ": " + ", ".join(q))
class usage_acc:
def __init__(self):
self.results = {'entries': [], 'summary': []}
def findentry(self, user):
return usage_acc_findentry2(self.results['entries'], user)
def findsum(self, user):
return usage_acc_findsum2(self.results['summary'], user)
def e2b(self, e, bucket, add=True):
for b in e['buckets']:
if b['bucket'] == bucket:
return b
if not add:
return None
b = {'bucket': bucket, 'categories': []}
e['buckets'].append(b)
return b
def c2x(self, c, cat, add=True):
for x in c:
if x['category'] == cat:
return x
if not add:
return None
x = {'bytes_received': 0, 'category': cat,
'bytes_sent': 0, 'ops': 0, 'successful_ops': 0 }
c.append(x)
return x
def update(self, c, cat, user, out, b_in, err):
x = self.c2x(c, cat)
usage_acc_update2(x, out, b_in, err)
if not err and cat == 'create_bucket' and 'owner' not in x:
x['owner'] = user
def make_entry(self, cat, bucket, user, out, b_in, err):
if cat == 'create_bucket' and err:
return
e = self.findentry(user)
b = self.e2b(e, bucket)
self.update(b['categories'], cat, user, out, b_in, err)
s = self.findsum(user)
x = self.c2x(s['categories'], cat)
usage_acc_update2(x, out, b_in, err)
x = s['total']
usage_acc_update2(x, out, b_in, err)
def generate_make_entry(self):
return lambda cat,bucket,user,out,b_in,err: self.make_entry(cat, bucket, user, out, b_in, err)
def get_usage(self):
return self.results
def compare_results(self, results):
if 'entries' not in results or 'summary' not in results:
return ['Missing entries or summary']
r = []
for e in self.results['entries']:
try:
e2 = usage_acc_findentry2(results['entries'], e['user'], False)
except Exception as ex:
r.append("malformed entry looking for user "
+ e['user'] + " " + str(ex))
break
if e2 == None:
r.append("missing entry for user " + e['user'])
continue
for b in e['buckets']:
c = b['categories']
if b['bucket'] == 'nosuchbucket':
print("got here")
try:
b2 = self.e2b(e2, b['bucket'], False)
if b2 != None:
c2 = b2['categories']
except Exception as ex:
r.append("malformed entry looking for bucket "
+ b['bucket'] + " in user " + e['user'] + " " + str(ex))
break
if b2 == None:
r.append("can't find bucket " + b['bucket']
+ " in user " + e['user'])
continue
for x in c:
try:
x2 = self.c2x(c2, x['category'], False)
except Exception as ex:
r.append("malformed entry looking for "
+ x['category'] + " in bucket " + b['bucket']
+ " user " + e['user'] + " " + str(ex))
break
usage_acc_validate_fields(r, x, x2, "entry: category "
+ x['category'] + " bucket " + b['bucket']
+ " in user " + e['user'])
for s in self.results['summary']:
c = s['categories']
try:
s2 = usage_acc_findsum2(results['summary'], s['user'], False)
except Exception as ex:
r.append("malformed summary looking for user " + e['user']
+ " " + str(ex))
break
if s2 == None:
r.append("missing summary for user " + e['user'] + " " + str(ex))
continue
try:
c2 = s2['categories']
except Exception as ex:
r.append("malformed summary missing categories for user "
+ e['user'] + " " + str(ex))
break
for x in c:
try:
x2 = self.c2x(c2, x['category'], False)
except Exception as ex:
r.append("malformed summary looking for "
+ x['category'] + " user " + e['user'] + " " + str(ex))
break
usage_acc_validate_fields(r, x, x2, "summary: category "
+ x['category'] + " in user " + e['user'])
x = s['total']
try:
x2 = s2['total']
except Exception as ex:
r.append("malformed summary looking for totals for user "
+ e['user'] + " " + str(ex))
break
usage_acc_validate_fields(r, x, x2, "summary: totals for user" + e['user'])
return r
def ignore_this_entry(cat, bucket, user, out, b_in, err):
pass
class requestlog_queue():
def __init__(self, add):
self.q = Queue(1000)
self.adder = add
def handle_request_data(self, request, response, error=False):
now = datetime.datetime.now()
if error:
pass
elif response.status < 200 or response.status >= 400:
error = True
self.q.put({'t': now, 'o': request, 'i': response, 'e': error})
def clear(self):
with self.q.mutex:
self.q.queue.clear()
def log_and_clear(self, cat, bucket, user, add_entry = None):
while not self.q.empty():
j = self.q.get()
bytes_out = 0
if 'Content-Length' in j['o'].headers:
bytes_out = int(j['o'].headers['Content-Length'])
bytes_in = 0
msg = j['i'].msg
if 'content-length'in msg:
bytes_in = int(msg['content-length'])
log.info('RL: %s %s %s bytes_out=%d bytes_in=%d failed=%r'
% (cat, bucket, user, bytes_out, bytes_in, j['e']))
if add_entry == None:
add_entry = self.adder
add_entry(cat, bucket, user, bytes_out, bytes_in, j['e'])
def create_presigned_url(conn, method, bucket_name, key_name, expiration):
return conn.generate_url(expires_in=expiration,
method=method,
bucket=bucket_name,
key=key_name,
query_auth=True,
)
def send_raw_http_request(conn, method, bucket_name, key_name, follow_redirects = False):
url = create_presigned_url(conn, method, bucket_name, key_name, 3600)
print(url)
h = httplib2.Http()
h.follow_redirects = follow_redirects
return h.request(url, method)
def get_acl(key):
"""
Helper function to get the xml acl from a key, ensuring that the xml
version tag is removed from the acl response
"""
raw_acl = key.get_xml_acl().decode()
def remove_version(string):
return string.split(
'<?xml version="1.0" encoding="UTF-8"?>'
)[-1]
def remove_newlines(string):
return string.strip('\n')
return remove_version(
remove_newlines(raw_acl)
)
def cleanup(ctx, client):
# remove objects and buckets
(err, out) = rgwadmin(ctx, client, ['bucket', 'list'], check_status=True)
try:
for bucket in out:
(err, out) = rgwadmin(ctx, client, [
'bucket', 'rm', '--bucket', bucket, '--purge-objects'],
check_status=True)
except:
pass
# remove test user(s)
users = ['foo', 'fud', 'bar', 'bud']
users.reverse()
for user in users:
try:
(err, out) = rgwadmin(ctx, client, [
'user', 'rm', '--uid', user],
check_status=True)
except:
pass
# remove custom placement
try:
zonecmd = ['zone', 'placement', 'rm', '--rgw-zone', 'default',
'--placement-id', 'new-placement']
(err, out) = rgwadmin(ctx, client, zonecmd, check_status=True)
except:
pass
def task(ctx, config):
"""
Test radosgw-admin functionality against a running rgw instance.
"""
global log
assert ctx.rgw.config, \
"radosgw_admin task needs a config passed from the rgw task"
config = ctx.rgw.config
log.debug('config is: %r', config)
clients_from_config = config.keys()
# choose first client as default
client = next(iter(clients_from_config))
# once the client is chosen, pull the host name and assigned port out of
# the role_endpoints that were assigned by the rgw task
endpoint = ctx.rgw.role_endpoints[client]
cleanup(ctx, client)
##
user1='foo'
user2='fud'
user3='bar'
user4='bud'
subuser1='foo:foo1'
subuser2='foo:foo2'
display_name1='Foo'
display_name2='Fud'
display_name3='Bar'
email='[email protected]'
access_key='9te6NH5mcdcq0Tc5i8i1'
secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
access_key2='p5YnriCv1nAtykxBrupQ'
secret_key2='Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
access_key3='NX5QOQKC6BH2IDN8HC7A'
secret_key3='LnEsqNNqZIpkzauboDcLXLcYaWwLQ3Kop0zAnKIn'
swift_secret1='gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
swift_secret2='ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
bucket_name='myfoo'
bucket_name2='mybar'
# connect to rgw
connection = boto.s3.connection.S3Connection(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=False,
port=endpoint.port,
host=endpoint.hostname,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
)
connection.auth_region_name='us-east-1'
connection2 = boto.s3.connection.S3Connection(
aws_access_key_id=access_key2,
aws_secret_access_key=secret_key2,
is_secure=False,
port=endpoint.port,
host=endpoint.hostname,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
)
connection2.auth_region_name='us-east-1'
connection3 = boto.s3.connection.S3Connection(
aws_access_key_id=access_key3,
aws_secret_access_key=secret_key3,
is_secure=False,
port=endpoint.port,
host=endpoint.hostname,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
)
connection3.auth_region_name='us-east-1'
acc = usage_acc()
rl = requestlog_queue(acc.generate_make_entry())
connection.set_request_hook(rl)
connection2.set_request_hook(rl)
connection3.set_request_hook(rl)
# legend (test cases can be easily grep-ed out)
# TESTCASE 'testname','object','method','operation','assertion'
# TESTCASE 'usage-show0' 'usage' 'show' 'all usage' 'succeeds'
(err, summary0) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True)
# TESTCASE 'info-nosuch','user','info','non-existent user','fails'
(err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
assert err
# TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
(err, out) = rgwadmin(ctx, client, [
'user', 'create',
'--uid', user1,
'--display-name', display_name1,
'--email', email,
'--access-key', access_key,
'--secret', secret_key,
'--max-buckets', '4'
],
check_status=True)
# TESTCASE 'duplicate email','user','create','existing user email','fails'
(err, out) = rgwadmin(ctx, client, [
'user', 'create',
'--uid', user2,
'--display-name', display_name2,
'--email', email,
])
assert err
# TESTCASE 'info-existing','user','info','existing user','returns correct info'
(err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
assert out['user_id'] == user1
assert out['email'] == email
assert out['display_name'] == display_name1
assert len(out['keys']) == 1
assert out['keys'][0]['access_key'] == access_key
assert out['keys'][0]['secret_key'] == secret_key
assert not out['suspended']
# TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
(err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
check_status=True)
# TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
(err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
assert out['suspended']
# TESTCASE 're-enable','user','enable','suspended user','succeeds'
(err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], check_status=True)
# TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
(err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
assert not out['suspended']
# TESTCASE 'add-keys','key','create','w/valid info','succeeds'
(err, out) = rgwadmin(ctx, client, [
'key', 'create', '--uid', user1,
'--access-key', access_key2, '--secret', secret_key2,
], check_status=True)
# TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
(err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1],
check_status=True)
assert len(out['keys']) == 2
assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
# TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
(err, out) = rgwadmin(ctx, client, [
'key', 'rm', '--uid', user1,
'--access-key', access_key2,
], check_status=True)
assert len(out['keys']) == 1
assert out['keys'][0]['access_key'] == access_key
assert out['keys'][0]['secret_key'] == secret_key
# TESTCASE 'add-swift-key','key','create','swift key','succeeds'
subuser_access = 'full'
subuser_perm = 'full-control'
(err, out) = rgwadmin(ctx, client, [
'subuser', 'create', '--subuser', subuser1,
'--access', subuser_access
], check_status=True)
# TESTCASE 'add-swift-key','key','create','swift key','succeeds'
(err, out) = rgwadmin(ctx, client, [
'subuser', 'modify', '--subuser', subuser1,
'--secret', swift_secret1,
'--key-type', 'swift',
], check_status=True)
# TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds'
(err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
assert out['subusers'][0]['permissions'] == subuser_perm
# TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
(err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
assert len(out['swift_keys']) == 1
assert out['swift_keys'][0]['user'] == subuser1
assert out['swift_keys'][0]['secret_key'] == swift_secret1
# TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
(err, out) = rgwadmin(ctx, client, [
'subuser', 'create', '--subuser', subuser2,
'--secret', swift_secret2,
'--key-type', 'swift',
], check_status=True)
# TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
(err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
assert len(out['swift_keys']) == 2
assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
# TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
(err, out) = rgwadmin(ctx, client, [
'key', 'rm', '--subuser', subuser1,
'--key-type', 'swift',
], check_status=True)
assert len(out['swift_keys']) == 1
# TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
(err, out) = rgwadmin(ctx, client, [
'subuser', 'rm', '--subuser', subuser1,
], check_status=True)
assert len(out['subusers']) == 1
# TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
(err, out) = rgwadmin(ctx, client, [
'subuser', 'rm', '--subuser', subuser2,
'--key-type', 'swift', '--purge-keys',
], check_status=True)
assert len(out['swift_keys']) == 0
assert len(out['subusers']) == 0
# TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list'
(err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1],
check_status=True)
assert len(out) == 0
# TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
(err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
assert len(out) == 0
# create a first bucket
bucket = connection.create_bucket(bucket_name)
rl.log_and_clear("create_bucket", bucket_name, user1)
# TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
(err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
assert len(out) == 1
assert out[0] == bucket_name
bucket_list = connection.get_all_buckets()
assert len(bucket_list) == 1
assert bucket_list[0].name == bucket_name
rl.log_and_clear("list_buckets", '', user1)
# TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list'
(err, out) = rgwadmin(ctx, client, ['bucket', 'list'], check_status=True)
assert len(out) >= 1
assert bucket_name in out;
# TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4'
bucket2 = connection.create_bucket(bucket_name + '2')
rl.log_and_clear("create_bucket", bucket_name + '2', user1)
bucket3 = connection.create_bucket(bucket_name + '3')
rl.log_and_clear("create_bucket", bucket_name + '3', user1)
bucket4 = connection.create_bucket(bucket_name + '4')
rl.log_and_clear("create_bucket", bucket_name + '4', user1)
# the 5th should fail.
failed = False
try:
connection.create_bucket(bucket_name + '5')
except Exception:
failed = True
assert failed
rl.log_and_clear("create_bucket", bucket_name + '5', user1)
# delete the buckets
bucket2.delete()
rl.log_and_clear("delete_bucket", bucket_name + '2', user1)
bucket3.delete()
rl.log_and_clear("delete_bucket", bucket_name + '3', user1)
bucket4.delete()
rl.log_and_clear("delete_bucket", bucket_name + '4', user1)
# TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
(err, out) = rgwadmin(ctx, client, [
'bucket', 'stats', '--bucket', bucket_name], check_status=True)
assert out['owner'] == user1
bucket_id = out['id']
# TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
(err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], check_status=True)
assert len(out) == 1
assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
# use some space
key = boto.s3.key.Key(bucket)
key.set_contents_from_string('one')
rl.log_and_clear("put_obj", bucket_name, user1)
# TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
(err, out) = rgwadmin(ctx, client, [
'bucket', 'stats', '--bucket', bucket_name], check_status=True)
assert out['id'] == bucket_id
assert out['usage']['rgw.main']['num_objects'] == 1
assert out['usage']['rgw.main']['size_kb'] > 0
#validate we have a positive user stats now
(err, out) = rgwadmin(ctx, client,
['user', 'stats','--uid', user1, '--sync-stats'],
check_status=True)
assert out['stats']['size'] > 0
# reclaim it
key.delete()
rl.log_and_clear("delete_obj", bucket_name, user1)
# TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
(err, out) = rgwadmin(ctx, client,
['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name],
check_status=True)
# create a second user to link the bucket to
(err, out) = rgwadmin(ctx, client, [
'user', 'create',
'--uid', user2,
'--display-name', display_name2,
'--access-key', access_key2,
'--secret', secret_key2,
'--max-buckets', '1',
],
check_status=True)
# try creating an object with the first user before the bucket is relinked
denied = False
key = boto.s3.key.Key(bucket)
try:
key.set_contents_from_string('two')
except boto.exception.S3ResponseError:
denied = True
assert not denied
rl.log_and_clear("put_obj", bucket_name, user1)
# delete the object
key.delete()
rl.log_and_clear("delete_obj", bucket_name, user1)
# link the bucket to another user
(err, out) = rgwadmin(ctx, client, ['metadata', 'get', 'bucket:{n}'.format(n=bucket_name)],
check_status=True)
bucket_data = out['data']
assert bucket_data['bucket']['name'] == bucket_name
bucket_id = bucket_data['bucket']['bucket_id']
# link the bucket to another user
(err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user2, '--bucket', bucket_name, '--bucket-id', bucket_id],
check_status=True)
# try to remove user, should fail (has a linked bucket)
(err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2])
assert err
# TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked'
(err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name],
check_status=True)
# relink the bucket to the first user and delete the second user
(err, out) = rgwadmin(ctx, client,
['bucket', 'link', '--uid', user1, '--bucket', bucket_name, '--bucket-id', bucket_id],
check_status=True)
(err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2],
check_status=True)
#TESTCASE 'bucket link', 'bucket', 'tenanted user', 'succeeds'
tenant_name = "testx"
# create a tenanted user to link the bucket to
(err, out) = rgwadmin(ctx, client, [
'user', 'create',
'--tenant', tenant_name,
'--uid', 'tenanteduser',
'--display-name', 'tenanted-user',
'--access-key', access_key2,
'--secret', secret_key2,
'--max-buckets', '1',
],
check_status=True)
# link the bucket to a tenanted user
(err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--bucket', '/' + bucket_name, '--tenant', tenant_name, '--uid', 'tenanteduser'],
check_status=True)
# check if the bucket name has tenant/ prefix
(err, out) = rgwadmin(ctx, client, ['metadata', 'get', 'bucket:{n}'.format(n= tenant_name + '/' + bucket_name)],
check_status=True)
bucket_data = out['data']
assert bucket_data['bucket']['name'] == bucket_name
assert bucket_data['bucket']['tenant'] == tenant_name
# relink the bucket to the first user and delete the tenanted user
(err, out) = rgwadmin(ctx, client,
['bucket', 'link', '--bucket', tenant_name + '/' + bucket_name, '--uid', user1],
check_status=True)
(err, out) = rgwadmin(ctx, client, ['user', 'rm', '--tenant', tenant_name, '--uid', 'tenanteduser'],
check_status=True)
# TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
# upload an object
object_name = 'four'
key = boto.s3.key.Key(bucket, object_name)
key.set_contents_from_string(object_name)
rl.log_and_clear("put_obj", bucket_name, user1)
# fetch it too (for usage stats presently)
s = key.get_contents_as_string(encoding='ascii')
rl.log_and_clear("get_obj", bucket_name, user1)
assert s == object_name
# list bucket too (for usage stats presently)
keys = list(bucket.list())
rl.log_and_clear("list_bucket", bucket_name, user1)
assert len(keys) == 1
assert keys[0].name == object_name
# now delete it
(err, out) = rgwadmin(ctx, client,
['object', 'rm', '--bucket', bucket_name, '--object', object_name],
check_status=True)
# TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
(err, out) = rgwadmin(ctx, client, [
'bucket', 'stats', '--bucket', bucket_name],
check_status=True)
assert out['id'] == bucket_id
assert out['usage']['rgw.main']['num_objects'] == 0
# list log objects
# TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects'
(err, out) = rgwadmin(ctx, client, ['log', 'list'], check_status=True)
assert len(out) > 0
for obj in out:
# TESTCASE 'log-show','log','show','after activity','returns expected info'
if obj[:4] == 'meta' or obj[:4] == 'data' or obj[:18] == 'obj_delete_at_hint':
continue
(err, rgwlog) = rgwadmin(ctx, client, ['log', 'show', '--object', obj],
check_status=True)
assert len(rgwlog) > 0
# skip any entry for which there is no bucket name--e.g., list_buckets,
# since that is valid but cannot pass the following checks
entry_bucket_name = rgwlog['bucket']
if entry_bucket_name.strip() != "":
# exempt bucket_name2 from checking as it was only used for multi-region tests
assert rgwlog['bucket'].find(bucket_name) == 0 or rgwlog['bucket'].find(bucket_name2) == 0
assert rgwlog['bucket'] != bucket_name or rgwlog['bucket_id'] == bucket_id
assert rgwlog['bucket_owner'] == user1 or rgwlog['bucket'] == bucket_name + '5' or rgwlog['bucket'] == bucket_name2
for entry in rgwlog['log_entries']:
log.debug('checking log entry: ', entry)
assert entry['bucket'] == rgwlog['bucket']
possible_buckets = [bucket_name + '5', bucket_name2]
user = entry['user']
assert user == user1 or user.endswith('system-user') or \
rgwlog['bucket'] in possible_buckets
# TESTCASE 'log-rm','log','rm','delete log objects','succeeds'
(err, out) = rgwadmin(ctx, client, ['log', 'rm', '--object', obj],
check_status=True)
# TODO: show log by bucket+date
# TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
(err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
check_status=True)
# TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
denied = False
try:
key = boto.s3.key.Key(bucket)
key.set_contents_from_string('five')
except boto.exception.S3ResponseError as e:
denied = True
assert e.status == 403
assert denied
rl.log_and_clear("put_obj", bucket_name, user1)
# TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
(err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1],
check_status=True)
# TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
key = boto.s3.key.Key(bucket)
key.set_contents_from_string('six')
rl.log_and_clear("put_obj", bucket_name, user1)
# TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection'
# create an object large enough to be split into multiple parts
test_string = 'foo'*10000000
big_key = boto.s3.key.Key(bucket)
big_key.set_contents_from_string(test_string)
rl.log_and_clear("put_obj", bucket_name, user1)
# now delete the head
big_key.delete()
rl.log_and_clear("delete_obj", bucket_name, user1)
# wait a bit to give the garbage collector time to cycle
time.sleep(15)
(err, out) = rgwadmin(ctx, client, ['gc', 'list', '--include-all'])
assert len(out) > 0
# TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage'
(err, out) = rgwadmin(ctx, client, ['gc', 'process'], check_status=True)
#confirm
(err, out) = rgwadmin(ctx, client, ['gc', 'list', '--include-all'])
# don't assume rgw_gc_obj_min_wait has been overridden
omit_tdir = hasattr(ctx.rgw, 'omit_tdir') and ctx.rgw.omit_tdir == True
if omit_tdir==False:
assert len(out) == 0
# TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
(err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
assert err
# delete should fail because ``key`` still exists
try:
bucket.delete()
except boto.exception.S3ResponseError as e:
assert e.status == 409
rl.log_and_clear("delete_bucket", bucket_name, user1)
key.delete()
rl.log_and_clear("delete_obj", bucket_name, user1)
bucket.delete()
rl.log_and_clear("delete_bucket", bucket_name, user1)
# TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
bucket = connection.create_bucket(bucket_name)
rl.log_and_clear("create_bucket", bucket_name, user1)
# create an object
key = boto.s3.key.Key(bucket)
key.set_contents_from_string('seven')
rl.log_and_clear("put_obj", bucket_name, user1)
# should be private already but guarantee it
key.set_acl('private')
rl.log_and_clear("put_acls", bucket_name, user1)
(err, out) = rgwadmin(ctx, client,
['policy', '--bucket', bucket.name, '--object', key.key.decode()],
check_status=True, format='xml')
acl = get_acl(key)
rl.log_and_clear("get_acls", bucket_name, user1)
assert acl == out.strip('\n')
# add another grantee by making the object public read
key.set_acl('public-read')
rl.log_and_clear("put_acls", bucket_name, user1)
(err, out) = rgwadmin(ctx, client,
['policy', '--bucket', bucket.name, '--object', key.key.decode()],
check_status=True, format='xml')
acl = get_acl(key)
rl.log_and_clear("get_acls", bucket_name, user1)
assert acl == out.strip('\n')
# TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
bucket = connection.create_bucket(bucket_name)
rl.log_and_clear("create_bucket", bucket_name, user1)
key_name = ['eight', 'nine', 'ten', 'eleven']
for i in range(4):
key = boto.s3.key.Key(bucket)
key.set_contents_from_string(key_name[i])
rl.log_and_clear("put_obj", bucket_name, user1)
(err, out) = rgwadmin(ctx, client,
['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'],
check_status=True)
# TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
caps='user=read'
(err, out) = rgwadmin(ctx, client, ['caps', 'add', '--uid', user1, '--caps', caps])
assert out['caps'][0]['perm'] == 'read'
# TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
(err, out) = rgwadmin(ctx, client, ['caps', 'rm', '--uid', user1, '--caps', caps])
assert not out['caps']
# TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
bucket = connection.create_bucket(bucket_name)
rl.log_and_clear("create_bucket", bucket_name, user1)
key = boto.s3.key.Key(bucket)
(err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
assert err
# TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds'
bucket = connection.create_bucket(bucket_name)
rl.log_and_clear("create_bucket", bucket_name, user1)
key = boto.s3.key.Key(bucket)
key.set_contents_from_string('twelve')
rl.log_and_clear("put_obj", bucket_name, user1)
time.sleep(35)
# need to wait for all usage data to get flushed, should take up to 30 seconds
timestamp = time.time()
while time.time() - timestamp <= (2 * 60): # wait up to 20 minutes
(err, out) = rgwadmin(ctx, client, ['usage', 'show', '--categories', 'delete_obj']) # one of the operations we did is delete_obj, should be present.
if get_user_successful_ops(out, user1) > 0:
break
time.sleep(1)
assert time.time() - timestamp <= (20 * 60)
# TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
(err, out) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True)
assert len(out['entries']) > 0
assert len(out['summary']) > 0
r = acc.compare_results(out)
if len(r) != 0:
sys.stderr.write(("\n".join(r))+"\n")
assert(len(r) == 0)
user_summary = get_user_summary(out, user1)
total = user_summary['total']
assert total['successful_ops'] > 0
# TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
(err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
check_status=True)
assert len(out['entries']) > 0
assert len(out['summary']) > 0
user_summary = out['summary'][0]
for entry in user_summary['categories']:
assert entry['successful_ops'] > 0
assert user_summary['user'] == user1
# TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
for cat in test_categories:
(err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1, '--categories', cat],
check_status=True)
assert len(out['summary']) > 0
user_summary = out['summary'][0]
assert user_summary['user'] == user1
assert len(user_summary['categories']) == 1
entry = user_summary['categories'][0]
assert entry['category'] == cat
assert entry['successful_ops'] > 0
# TESTCASE 'user-rename', 'user', 'rename', 'existing user', 'new user', 'succeeds'
# create a new user user3
(err, out) = rgwadmin(ctx, client, [
'user', 'create',
'--uid', user3,
'--display-name', display_name3,
'--access-key', access_key3,
'--secret', secret_key3,
'--max-buckets', '4'
],
check_status=True)
# create a bucket
bucket = connection3.create_bucket(bucket_name + '6')
rl.log_and_clear("create_bucket", bucket_name + '6', user3)
# create object
object_name1 = 'thirteen'
key1 = boto.s3.key.Key(bucket, object_name1)
key1.set_contents_from_string(object_name1)
rl.log_and_clear("put_obj", bucket_name + '6', user3)
# rename user3
(err, out) = rgwadmin(ctx, client, ['user', 'rename', '--uid', user3, '--new-uid', user4], check_status=True)
assert out['user_id'] == user4
assert out['keys'][0]['access_key'] == access_key3
assert out['keys'][0]['secret_key'] == secret_key3
time.sleep(5)
# get bucket and object to test if user keys are preserved
bucket = connection3.get_bucket(bucket_name + '6')
s = key1.get_contents_as_string(encoding='ascii')
rl.log_and_clear("get_obj", bucket_name + '6', user4)
assert s == object_name1
# TESTCASE 'user-rename', 'user', 'rename', 'existing user', 'another existing user', 'fails'
# create a new user user2
(err, out) = rgwadmin(ctx, client, [
'user', 'create',
'--uid', user2,
'--display-name', display_name2,
'--access-key', access_key2,
'--secret', secret_key2,
'--max-buckets', '4'
],
check_status=True)
# create a bucket
bucket = connection2.create_bucket(bucket_name + '7')
rl.log_and_clear("create_bucket", bucket_name + '7', user2)
# create object
object_name2 = 'fourteen'
key2 = boto.s3.key.Key(bucket, object_name2)
key2.set_contents_from_string(object_name2)
rl.log_and_clear("put_obj", bucket_name + '7', user2)
(err, out) = rgwadmin(ctx, client, ['user', 'rename', '--uid', user4, '--new-uid', user2])
assert err
# test if user 2 and user4 can still access their bucket and objects after rename fails
bucket = connection3.get_bucket(bucket_name + '6')
s = key1.get_contents_as_string(encoding='ascii')
rl.log_and_clear("get_obj", bucket_name + '6', user4)
assert s == object_name1
bucket = connection2.get_bucket(bucket_name + '7')
s = key2.get_contents_as_string(encoding='ascii')
rl.log_and_clear("get_obj", bucket_name + '7', user2)
assert s == object_name2
(err, out) = rgwadmin(ctx, client,
['user', 'rm', '--uid', user4, '--purge-data' ],
check_status=True)
(err, out) = rgwadmin(ctx, client,
['user', 'rm', '--uid', user2, '--purge-data' ],
check_status=True)
time.sleep(5)
# should be all through with connection. (anything using connection
# should be BEFORE the usage stuff above.)
rl.log_and_clear("(before-close)", '-', '-', ignore_this_entry)
connection.close()
connection = None
# the usage flush interval is 30 seconds, wait that much an then some
# to make sure everything has been flushed
time.sleep(35)
# TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
(err, out) = rgwadmin(ctx, client, ['usage', 'trim', '--uid', user1],
check_status=True)
(err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
check_status=True)
assert len(out['entries']) == 0
assert len(out['summary']) == 0
(err, out) = rgwadmin(ctx, client,
['user', 'rm', '--uid', user1, '--purge-data' ],
check_status=True)
# TESTCASE 'rm-user3','user','rm','deleted user','fails'
(err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
assert err
# TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule'
(err, out) = rgwadmin(ctx, client, ['zone', 'get','--rgw-zone','default'])
orig_placement_pools = len(out['placement_pools'])
# removed this test, it is not correct to assume that zone has default placement, it really
# depends on how we set it up before
#
# assert len(out) > 0
# assert len(out['placement_pools']) == 1
# default_rule = out['placement_pools'][0]
# assert default_rule['key'] == 'default-placement'
rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}}
out['placement_pools'].append(rule)
(err, out) = rgwadmin(ctx, client, ['zone', 'set'],
stdin=StringIO(json.dumps(out)),
check_status=True)
(err, out) = rgwadmin(ctx, client, ['zone', 'get'])
assert len(out) > 0
assert len(out['placement_pools']) == orig_placement_pools + 1
zonecmd = ['zone', 'placement', 'rm',
'--rgw-zone', 'default',
'--placement-id', 'new-placement']
(err, out) = rgwadmin(ctx, client, zonecmd, check_status=True)
# TESTCASE 'zonegroup-info', 'zonegroup', 'get', 'get zonegroup info', 'succeeds'
(err, out) = rgwadmin(ctx, client, ['zonegroup', 'get'], check_status=True)
from teuthology.config import config
from teuthology.orchestra import cluster
import argparse;
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--uid')
parser.add_argument('--host', required=True)
parser.add_argument('--port', type=int)
args = parser.parse_args()
host = args.host
if args.port:
port = args.port
else:
port = 80
client0 = tasks.vstart_runner.LocalRemote()
ctx = config
ctx.cluster=cluster.Cluster(remotes=[(client0,
[ 'ceph.client.rgw.%s' % (port), ]),])
ctx.rgw = argparse.Namespace()
endpoints = {}
endpoints['ceph.client.rgw.%s' % port] = RGWEndpoint(
hostname=host,
port=port)
ctx.rgw.role_endpoints = endpoints
ctx.rgw.realm = None
ctx.rgw.regions = {'region0': { 'api name': 'api1',
'is master': True, 'master zone': 'r0z0',
'zones': ['r0z0', 'r0z1'] }}
ctx.rgw.omit_sudo = True
ctx.rgw.omit_tdir = True
ctx.rgw.config = {'ceph.client.rgw.%s' % port: {'system user': {'name': '%s-system-user' % port}}}
task(config, None)
exit()
if __name__ == '__main__':
main()
| 43,078 | 36.492602 | 157 |
py
|
null |
ceph-main/qa/tasks/radosgw_admin_rest.py
|
"""
Run a series of rgw admin commands through the rest interface.
The test cases in this file have been annotated for inventory.
To extract the inventory (in csv format) use the command:
grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
"""
import logging
import boto.exception
import boto.s3.connection
import boto.s3.acl
import requests
import time
from boto.connection import AWSAuthConnection
from teuthology import misc as teuthology
from tasks.util.rgw import get_user_summary, get_user_successful_ops, rgwadmin
log = logging.getLogger(__name__)
def rgwadmin_rest(connection, cmd, params=None, headers=None, raw=False):
"""
perform a rest command
"""
log.info('radosgw-admin-rest: %s %s' % (cmd, params))
put_cmds = ['create', 'link', 'add']
post_cmds = ['unlink', 'modify']
delete_cmds = ['trim', 'rm', 'process']
get_cmds = ['check', 'info', 'show', 'list', '']
bucket_sub_resources = ['object', 'policy', 'index']
user_sub_resources = ['subuser', 'key', 'caps']
zone_sub_resources = ['pool', 'log', 'garbage']
def get_cmd_method_and_handler(cmd):
"""
Get the rest command and handler from information in cmd and
from the imported requests object.
"""
if cmd[1] in put_cmds:
return 'PUT', requests.put
elif cmd[1] in delete_cmds:
return 'DELETE', requests.delete
elif cmd[1] in post_cmds:
return 'POST', requests.post
elif cmd[1] in get_cmds:
return 'GET', requests.get
def get_resource(cmd):
"""
Get the name of the resource from information in cmd.
"""
if cmd[0] == 'bucket' or cmd[0] in bucket_sub_resources:
if cmd[0] == 'bucket':
return 'bucket', ''
else:
return 'bucket', cmd[0]
elif cmd[0] == 'user' or cmd[0] in user_sub_resources:
if cmd[0] == 'user':
return 'user', ''
else:
return 'user', cmd[0]
elif cmd[0] == 'usage':
return 'usage', ''
elif cmd[0] == 'info':
return 'info', ''
elif cmd[0] == 'ratelimit':
return 'ratelimit', ''
elif cmd[0] == 'zone' or cmd[0] in zone_sub_resources:
if cmd[0] == 'zone':
return 'zone', ''
else:
return 'zone', cmd[0]
def build_admin_request(conn, method, resource = '', headers=None, data='',
query_args=None, params=None):
"""
Build an administative request adapted from the build_request()
method of boto.connection
"""
path = conn.calling_format.build_path_base('admin', resource)
auth_path = conn.calling_format.build_auth_path('admin', resource)
host = conn.calling_format.build_host(conn.server_name(), 'admin')
if query_args:
path += '?' + query_args
boto.log.debug('path=%s' % path)
auth_path += '?' + query_args
boto.log.debug('auth_path=%s' % auth_path)
return AWSAuthConnection.build_base_http_request(conn, method, path,
auth_path, params, headers, data, host)
method, handler = get_cmd_method_and_handler(cmd)
resource, query_args = get_resource(cmd)
request = build_admin_request(connection, method, resource,
query_args=query_args, headers=headers)
url = '{protocol}://{host}{path}'.format(protocol=request.protocol,
host=request.host, path=request.path)
request.authorize(connection=connection)
result = handler(url, params=params, headers=request.headers)
if raw:
log.info(' text result: %s' % result.text)
return result.status_code, result.text
elif len(result.content) == 0:
# many admin requests return no body, so json() throws a JSONDecodeError
log.info(' empty result')
return result.status_code, None
else:
log.info(' json result: %s' % result.json())
return result.status_code, result.json()
def task(ctx, config):
"""
Test radosgw-admin functionality through the RESTful interface
"""
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task s3tests only supports a list or dictionary for configuration"
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
clients = config.keys()
# just use the first client...
client = next(iter(clients))
##
admin_user = 'ada'
admin_display_name = 'Ms. Admin User'
admin_access_key = 'MH1WC2XQ1S8UISFDZC8W'
admin_secret_key = 'dQyrTPA0s248YeN5bBv4ukvKU0kh54LWWywkrpoG'
admin_caps = 'users=read, write; usage=read, write; buckets=read, write; zone=read, write; info=read;ratelimit=read, write'
user1 = 'foo'
user2 = 'fud'
ratelimit_user = 'ratelimit_user'
subuser1 = 'foo:foo1'
subuser2 = 'foo:foo2'
display_name1 = 'Foo'
display_name2 = 'Fud'
email = '[email protected]'
access_key = '9te6NH5mcdcq0Tc5i8i1'
secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
access_key2 = 'p5YnriCv1nAtykxBrupQ'
secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
bucket_name = 'myfoo'
# legend (test cases can be easily grep-ed out)
# TESTCASE 'testname','object','method','operation','assertion'
# TESTCASE 'create-admin-user','user','create','administrative user','succeeds'
(err, out) = rgwadmin(ctx, client, [
'user', 'create',
'--uid', admin_user,
'--display-name', admin_display_name,
'--access-key', admin_access_key,
'--secret', admin_secret_key,
'--max-buckets', '0',
'--caps', admin_caps
])
logging.error(out)
logging.error(err)
assert not err
assert hasattr(ctx, 'rgw'), 'radosgw-admin-rest must run after the rgw task'
endpoint = ctx.rgw.role_endpoints.get(client)
assert endpoint, 'no rgw endpoint for {}'.format(client)
admin_conn = boto.s3.connection.S3Connection(
aws_access_key_id=admin_access_key,
aws_secret_access_key=admin_secret_key,
is_secure=True if endpoint.cert else False,
port=endpoint.port,
host=endpoint.hostname,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
)
# TESTCASE 'info-nosuch','user','info','non-existent user','fails'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {"uid": user1})
assert ret == 404
# TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
(ret, out) = rgwadmin_rest(admin_conn,
['user', 'create'],
{'uid' : user1,
'display-name' : display_name1,
'email' : email,
'access-key' : access_key,
'secret-key' : secret_key,
'max-buckets' : '4'
})
assert ret == 200
# TESTCASE 'list-no-user','user','list','list user keys','user list object'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'list' : '', 'max-entries' : 0})
assert ret == 200
assert out['count'] == 0
assert out['truncated'] == True
assert len(out['keys']) == 0
assert len(out['marker']) > 0
# TESTCASE 'list-user-without-marker','user','list','list user keys','user list object'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'list' : '', 'max-entries' : 1})
assert ret == 200
assert out['count'] == 1
assert out['truncated'] == True
assert len(out['keys']) == 1
assert len(out['marker']) > 0
marker = out['marker']
# TESTCASE 'list-user-with-marker','user','list','list user keys','user list object'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'list' : '', 'max-entries' : 1, 'marker': marker})
assert ret == 200
assert out['count'] == 1
assert out['truncated'] == False
assert len(out['keys']) == 1
# TESTCASE 'info-existing','user','info','existing user','returns correct info'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
assert out['user_id'] == user1
assert out['email'] == email
assert out['display_name'] == display_name1
assert len(out['keys']) == 1
assert out['keys'][0]['access_key'] == access_key
assert out['keys'][0]['secret_key'] == secret_key
assert not out['suspended']
assert out['tenant'] == ''
assert out['max_buckets'] == 4
assert out['caps'] == []
assert out['op_mask'] == 'read, write, delete'
assert out['default_placement'] == ''
assert out['default_storage_class'] == ''
assert out['placement_tags'] == []
assert not out['bucket_quota']['enabled']
assert not out['bucket_quota']['check_on_raw']
assert out['bucket_quota']['max_size'] == -1
assert out['bucket_quota']['max_size_kb'] == 0
assert out['bucket_quota']['max_objects'] == -1
assert not out['user_quota']['enabled']
assert not out['user_quota']['check_on_raw']
assert out['user_quota']['max_size'] == -1
assert out['user_quota']['max_size_kb'] == 0
assert out['user_quota']['max_objects'] == -1
assert out['temp_url_keys'] == []
assert out['type'] == 'rgw'
assert out['mfa_ids'] == []
# TESTCASE 'info-existing','user','info','existing user query with wrong uid but correct access key','returns correct info'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'access-key' : access_key, 'uid': 'uid_not_exist'})
assert out['user_id'] == user1
assert out['email'] == email
assert out['display_name'] == display_name1
assert len(out['keys']) == 1
assert out['keys'][0]['access_key'] == access_key
assert out['keys'][0]['secret_key'] == secret_key
assert not out['suspended']
assert out['tenant'] == ''
assert out['max_buckets'] == 4
assert out['caps'] == []
assert out['op_mask'] == "read, write, delete"
assert out['default_placement'] == ''
assert out['default_storage_class'] == ''
assert out['placement_tags'] == []
assert not out['bucket_quota']['enabled']
assert not out['bucket_quota']['check_on_raw']
assert out ['bucket_quota']['max_size'] == -1
assert out ['bucket_quota']['max_size_kb'] == 0
assert out ['bucket_quota']['max_objects'] == -1
assert not out['user_quota']['enabled']
assert not out['user_quota']['check_on_raw']
assert out['user_quota']['max_size'] == -1
assert out['user_quota']['max_size_kb'] == 0
assert out['user_quota']['max_objects'] == -1
assert out['temp_url_keys'] == []
assert out['type'] == 'rgw'
assert out['mfa_ids'] == []
# TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
assert ret == 200
# TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
assert ret == 200
assert out['suspended']
assert out['email'] == email
# TESTCASE 're-enable','user','enable','suspended user','succeeds'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
assert not err
# TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
assert ret == 200
assert not out['suspended']
# TESTCASE 'add-keys','key','create','w/valid info','succeeds'
(ret, out) = rgwadmin_rest(admin_conn,
['key', 'create'],
{'uid' : user1,
'access-key' : access_key2,
'secret-key' : secret_key2
})
assert ret == 200
# TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
assert ret == 200
assert len(out['keys']) == 2
assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
# TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
(ret, out) = rgwadmin_rest(admin_conn,
['key', 'rm'],
{'uid' : user1,
'access-key' : access_key2
})
assert ret == 200
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
assert len(out['keys']) == 1
assert out['keys'][0]['access_key'] == access_key
assert out['keys'][0]['secret_key'] == secret_key
# TESTCASE 'add-swift-key','key','create','swift key','succeeds'
(ret, out) = rgwadmin_rest(admin_conn,
['subuser', 'create'],
{'subuser' : subuser1,
'secret-key' : swift_secret1,
'key-type' : 'swift'
})
assert ret == 200
# TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
assert ret == 200
assert len(out['swift_keys']) == 1
assert out['swift_keys'][0]['user'] == subuser1
assert out['swift_keys'][0]['secret_key'] == swift_secret1
# TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
(ret, out) = rgwadmin_rest(admin_conn,
['subuser', 'create'],
{'subuser' : subuser2,
'secret-key' : swift_secret2,
'key-type' : 'swift'
})
assert ret == 200
# TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
assert ret == 200
assert len(out['swift_keys']) == 2
assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
# TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
(ret, out) = rgwadmin_rest(admin_conn,
['key', 'rm'],
{'subuser' : subuser1,
'key-type' :'swift'
})
assert ret == 200
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
assert len(out['swift_keys']) == 1
# TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
(ret, out) = rgwadmin_rest(admin_conn,
['subuser', 'rm'],
{'subuser' : subuser1
})
assert ret == 200
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
assert len(out['subusers']) == 1
# TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
(ret, out) = rgwadmin_rest(admin_conn,
['subuser', 'rm'],
{'subuser' : subuser2,
'key-type' : 'swift',
'{purge-keys' :True
})
assert ret == 200
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
assert len(out['swift_keys']) == 0
assert len(out['subusers']) == 0
# TESTCASE 'bucket-stats','bucket','info','no session/buckets','succeeds, empty list'
(ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
assert ret == 200
assert len(out) == 0
# connect to rgw
connection = boto.s3.connection.S3Connection(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=True if endpoint.cert else False,
port=endpoint.port,
host=endpoint.hostname,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
)
# TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
(ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
assert ret == 200
assert len(out) == 0
# create a first bucket
bucket = connection.create_bucket(bucket_name)
# TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
(ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
assert ret == 200
assert len(out) == 1
assert out[0] == bucket_name
# TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
(ret, out) = rgwadmin_rest(admin_conn,
['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
assert ret == 200
assert out['owner'] == user1
assert out['tenant'] == ''
bucket_id = out['id']
# TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
(ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
assert ret == 200
assert len(out) == 1
assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
# use some space
key = boto.s3.key.Key(bucket)
key.set_contents_from_string('one')
# TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
(ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
assert ret == 200
assert out['id'] == bucket_id
assert out['usage']['rgw.main']['num_objects'] == 1
assert out['usage']['rgw.main']['size_kb'] > 0
# TESTCASE 'bucket-stats6', 'bucket', 'stats', 'non-existent bucket', 'fails, 'bucket not found error'
(ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : 'doesnotexist'})
assert ret == 404
assert out['Code'] == 'NoSuchBucket'
# reclaim it
key.delete()
# TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
(ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'unlink'], {'uid' : user1, 'bucket' : bucket_name})
assert ret == 200
# create a second user to link the bucket to
(ret, out) = rgwadmin_rest(admin_conn,
['user', 'create'],
{'uid' : user2,
'display-name' : display_name2,
'access-key' : access_key2,
'secret-key' : secret_key2,
'max-buckets' : '1',
})
assert ret == 200
# try creating an object with the first user before the bucket is relinked
denied = False
key = boto.s3.key.Key(bucket)
try:
key.set_contents_from_string('two')
except boto.exception.S3ResponseError:
denied = True
assert not denied
# delete the object
key.delete()
# link the bucket to another user
(ret, out) = rgwadmin_rest(admin_conn,
['bucket', 'link'],
{'uid' : user2,
'bucket' : bucket_name,
'bucket-id' : bucket_id,
})
assert ret == 200
# try creating an object with the first user which should cause an error
key = boto.s3.key.Key(bucket)
try:
key.set_contents_from_string('three')
except boto.exception.S3ResponseError:
denied = True
assert denied
# relink the bucket to the first user and delete the second user
(ret, out) = rgwadmin_rest(admin_conn,
['bucket', 'link'],
{'uid' : user1,
'bucket' : bucket_name,
'bucket-id' : bucket_id,
})
assert ret == 200
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user2})
assert ret == 200
# TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
# upload an object
object_name = 'four'
key = boto.s3.key.Key(bucket, object_name)
key.set_contents_from_string(object_name)
# now delete it
(ret, out) = rgwadmin_rest(admin_conn, ['object', 'rm'], {'bucket' : bucket_name, 'object' : object_name})
assert ret == 200
# TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
(ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
assert ret == 200
assert out['id'] == bucket_id
assert out['usage']['rgw.main']['num_objects'] == 0
# create a bucket for deletion stats
useless_bucket = connection.create_bucket('useless-bucket')
useless_key = useless_bucket.new_key('useless_key')
useless_key.set_contents_from_string('useless string')
# delete it
useless_key.delete()
useless_bucket.delete()
# wait for the statistics to flush
time.sleep(60)
# need to wait for all usage data to get flushed, should take up to 30 seconds
timestamp = time.time()
while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
(ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'categories' : 'delete_obj'}) # last operation we did is delete obj, wait for it to flush
if get_user_successful_ops(out, user1) > 0:
break
time.sleep(1)
assert time.time() - timestamp <= (20 * 60)
# TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
(ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'])
assert ret == 200
assert len(out['entries']) > 0
assert len(out['summary']) > 0
user_summary = get_user_summary(out, user1)
total = user_summary['total']
assert total['successful_ops'] > 0
# TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
(ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
assert ret == 200
assert len(out['entries']) > 0
assert len(out['summary']) > 0
user_summary = out['summary'][0]
for entry in user_summary['categories']:
assert entry['successful_ops'] > 0
assert user_summary['user'] == user1
# TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
for cat in test_categories:
(ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1, 'categories' : cat})
assert ret == 200
assert len(out['summary']) > 0
user_summary = out['summary'][0]
assert user_summary['user'] == user1
assert len(user_summary['categories']) == 1
entry = user_summary['categories'][0]
assert entry['category'] == cat
assert entry['successful_ops'] > 0
# TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
(ret, out) = rgwadmin_rest(admin_conn, ['usage', 'trim'], {'uid' : user1})
assert ret == 200
(ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
assert ret == 200
assert len(out['entries']) == 0
assert len(out['summary']) == 0
# TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
assert ret == 200
# TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
try:
key = boto.s3.key.Key(bucket)
key.set_contents_from_string('five')
except boto.exception.S3ResponseError as e:
assert e.status == 403
# TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
assert ret == 200
# TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
key = boto.s3.key.Key(bucket)
key.set_contents_from_string('six')
# TESTCASE 'garbage-list', 'garbage', 'list', 'get list of objects ready for garbage collection'
# create an object large enough to be split into multiple parts
test_string = 'foo'*10000000
big_key = boto.s3.key.Key(bucket)
big_key.set_contents_from_string(test_string)
# now delete the head
big_key.delete()
# TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
assert ret == 409
# delete should fail because ``key`` still exists
try:
bucket.delete()
except boto.exception.S3ResponseError as e:
assert e.status == 409
key.delete()
bucket.delete()
# TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
bucket = connection.create_bucket(bucket_name)
# create an object
key = boto.s3.key.Key(bucket)
key.set_contents_from_string('seven')
# should be private already but guarantee it
key.set_acl('private')
(ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
assert ret == 200
assert len(out['acl']['grant_map']) == 1
# add another grantee by making the object public read
key.set_acl('public-read')
(ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
assert ret == 200
assert len(out['acl']['grant_map']) == 2
# TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
bucket = connection.create_bucket(bucket_name)
key_name = ['eight', 'nine', 'ten', 'eleven']
for i in range(4):
key = boto.s3.key.Key(bucket)
key.set_contents_from_string(key_name[i])
(ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'rm'], {'bucket' : bucket_name, 'purge-objects' : True})
assert ret == 200
# TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
caps = 'usage=read'
(ret, out) = rgwadmin_rest(admin_conn, ['caps', 'add'], {'uid' : user1, 'user-caps' : caps})
assert ret == 200
assert out[0]['perm'] == 'read'
# TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
(ret, out) = rgwadmin_rest(admin_conn, ['caps', 'rm'], {'uid' : user1, 'user-caps' : caps})
assert ret == 200
assert not out
# TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
bucket = connection.create_bucket(bucket_name)
key = boto.s3.key.Key(bucket)
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
assert ret == 409
# TESTCASE 'rm-user2', 'user', 'rm', user with data', 'succeeds'
bucket = connection.create_bucket(bucket_name)
key = boto.s3.key.Key(bucket)
key.set_contents_from_string('twelve')
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1, 'purge-data' : True})
assert ret == 200
# TESTCASE 'rm-user3','user','info','deleted user','fails'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
assert ret == 404
# TESTCASE 'info' 'display info' 'succeeds'
(ret, out) = rgwadmin_rest(admin_conn, ['info', ''])
assert ret == 200
info = out['info']
backends = info['storage_backends']
name = backends[0]['name']
fsid = backends[0]['cluster_id']
# name is always "rados" at time of writing, but zipper would allow
# other backends, at some point
assert len(name) > 0
# fsid is a uuid, but I'm not going to try to parse it
assert len(fsid) > 0
# TESTCASE 'ratelimit' 'user' 'info' 'succeeds'
(ret, out) = rgwadmin_rest(admin_conn,
['user', 'create'],
{'uid' : ratelimit_user,
'display-name' : display_name1,
'email' : email,
'access-key' : access_key,
'secret-key' : secret_key,
'max-buckets' : '1000'
})
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'info'], {'ratelimit-scope' : 'user', 'uid' : ratelimit_user})
assert ret == 200
# TESTCASE 'ratelimit' 'user' 'info' 'not existing user' 'fails'
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'info'], {'ratelimit-scope' : 'user', 'uid' : ratelimit_user + 'string'})
assert ret == 404
# TESTCASE 'ratelimit' 'user' 'info' 'uid not specified' 'fails'
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'info'], {'ratelimit-scope' : 'user'})
assert ret == 400
# TESTCASE 'ratelimit' 'bucket' 'info' 'succeeds'
ratelimit_bucket = 'ratelimitbucket'
connection.create_bucket(ratelimit_bucket)
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'info'], {'ratelimit-scope' : 'bucket', 'bucket' : ratelimit_bucket})
assert ret == 200
# TESTCASE 'ratelimit' 'bucket' 'info' 'not existing bucket' 'fails'
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'info'], {'ratelimit-scope' : 'bucket', 'bucket' : ratelimit_bucket + 'string'})
assert ret == 404
# TESTCASE 'ratelimit' 'bucket' 'info' 'bucket not specified' 'fails'
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'info'], {'ratelimit-scope' : 'bucket'})
assert ret == 400
# TESTCASE 'ratelimit' 'global' 'info' 'succeeds'
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'info'], {'global' : 'true'})
assert ret == 200
# TESTCASE 'ratelimit' 'user' 'modify' 'not existing user' 'fails'
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'modify'], {'ratelimit-scope' : 'user', 'uid' : ratelimit_user + 'string', 'enabled' : 'true'})
assert ret == 404
# TESTCASE 'ratelimit' 'user' 'modify' 'uid not specified' 'fails'
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'modify'], {'ratelimit-scope' : 'user'})
assert ret == 400
# TESTCASE 'ratelimit' 'bucket' 'modify' 'not existing bucket' 'fails'
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'modify'], {'ratelimit-scope' : 'bucket', 'bucket' : ratelimit_bucket + 'string', 'enabled' : 'true'})
assert ret == 404
# TESTCASE 'ratelimit' 'bucket' 'modify' 'bucket not specified' 'fails'
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'modify'], {'ratelimit-scope' : 'bucket', 'enabled' : 'true'})
assert ret == 400
# TESTCASE 'ratelimit' 'user' 'modifiy' 'enabled' 'max-read-bytes = 2' 'succeeds'
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'modify'], {'ratelimit-scope' : 'user', 'uid' : ratelimit_user, 'enabled' : 'true', 'max-read-bytes' : '2'})
assert ret == 200
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'info'], {'ratelimit-scope' : 'user', 'uid' : ratelimit_user})
assert ret == 200
user_ratelimit = out['user_ratelimit']
assert user_ratelimit['enabled'] == True
assert user_ratelimit['max_read_bytes'] == 2
# TESTCASE 'ratelimit' 'bucket' 'modifiy' 'enabled' 'max-write-bytes = 2' 'succeeds'
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'modify'], {'ratelimit-scope' : 'bucket', 'bucket' : ratelimit_bucket, 'enabled' : 'true', 'max-write-bytes' : '2'})
assert ret == 200
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'info'], {'ratelimit-scope' : 'bucket', 'bucket' : ratelimit_bucket})
assert ret == 200
bucket_ratelimit = out['bucket_ratelimit']
assert bucket_ratelimit['enabled'] == True
assert bucket_ratelimit['max_write_bytes'] == 2
# TESTCASE 'ratelimit' 'global' 'modify' 'anonymous' 'enabled' 'succeeds'
(ret, out) = rgwadmin_rest(admin_conn, ['ratelimit', 'modify'], {'ratelimit-scope' : 'bucket', 'global': 'true', 'enabled' : 'true'})
assert ret == 200
| 31,822 | 38.046626 | 173 |
py
|
null |
ceph-main/qa/tasks/ragweed.py
|
"""
Run a set of s3 tests on rgw.
"""
from io import BytesIO
from configobj import ConfigObj
import base64
import contextlib
import logging
import os
import random
import string
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.config import config as teuth_config
from teuthology.orchestra import run
log = logging.getLogger(__name__)
def get_ragweed_branches(config, client_conf):
"""
figure out the ragweed branch according to the per-client settings
use force-branch is specified, and fall back to the ones deduced using ceph
branch under testing
"""
force_branch = client_conf.get('force-branch', None)
if force_branch:
return [force_branch]
else:
S3_BRANCHES = ['master', 'nautilus', 'mimic',
'luminous', 'kraken', 'jewel']
ceph_branch = config.get('branch')
suite_branch = config.get('suite_branch', ceph_branch)
if suite_branch in S3_BRANCHES:
branch = client_conf.get('branch', 'ceph-' + suite_branch)
else:
branch = client_conf.get('branch', suite_branch)
default_branch = client_conf.get('default-branch', None)
if default_branch:
return [branch, default_branch]
else:
return [branch]
def get_ragweed_dir(testdir, client):
return '{}/ragweed.{}'.format(testdir, client)
@contextlib.contextmanager
def download(ctx, config):
"""
Download the s3 tests from the git builder.
Remove downloaded s3 file upon exit.
The context passed in should be identical to the context
passed in to the main task.
"""
assert isinstance(config, dict)
log.info('Downloading ragweed...')
testdir = teuthology.get_testdir(ctx)
for (client, cconf) in config.items():
ragweed_dir = get_ragweed_dir(testdir, client)
ragweed_repo = ctx.config.get('ragweed_repo',
teuth_config.ceph_git_base_url + 'ragweed.git')
for branch in get_ragweed_branches(ctx.config, cconf):
log.info("Using branch '%s' for ragweed", branch)
try:
ctx.cluster.only(client).sh(
script=f'git clone -b {branch} {ragweed_repo} {ragweed_dir}')
break
except Exception as e:
exc = e
else:
raise exc
sha1 = cconf.get('sha1')
if sha1 is not None:
ctx.cluster.only(client).run(
args=[
'cd', ragweed_dir,
run.Raw('&&'),
'git', 'reset', '--hard', sha1,
],
)
try:
yield
finally:
log.info('Removing ragweed...')
for client in config:
ragweed_dir = get_ragweed_dir(testdir, client)
ctx.cluster.only(client).run(
args=['rm', '-rf', ragweed_dir]
)
def _config_user(ragweed_conf, section, user):
"""
Configure users for this section by stashing away keys, ids, and
email addresses.
"""
ragweed_conf[section].setdefault('user_id', user)
ragweed_conf[section].setdefault('email', '{user}[email protected]'.format(user=user))
ragweed_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
ragweed_conf[section].setdefault('access_key', ''.join(random.choice(string.ascii_uppercase) for i in range(20)))
ragweed_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)).decode('ascii'))
@contextlib.contextmanager
def create_users(ctx, config, run_stages):
"""
Create a main and an alternate s3 user.
"""
assert isinstance(config, dict)
for client, properties in config['config'].items():
run_stages[client] = properties.get('stages', 'prepare,check').split(',')
log.info('Creating rgw users...')
testdir = teuthology.get_testdir(ctx)
users = {'user regular': 'ragweed', 'user system': 'sysuser'}
for client in config['clients']:
if not 'prepare' in run_stages[client]:
# should have been prepared in a previous run
continue
ragweed_conf = config['ragweed_conf'][client]
ragweed_conf.setdefault('fixtures', {})
ragweed_conf['rgw'].setdefault('bucket_prefix', 'test-' + client)
for section, user in users.items():
_config_user(ragweed_conf, section, '{user}.{client}'.format(user=user, client=client))
log.debug('Creating user {user} on {host}'.format(user=ragweed_conf[section]['user_id'], host=client))
if user == 'sysuser':
sys_str = 'true'
else:
sys_str = 'false'
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client,
'user', 'create',
'--uid', ragweed_conf[section]['user_id'],
'--display-name', ragweed_conf[section]['display_name'],
'--access-key', ragweed_conf[section]['access_key'],
'--secret', ragweed_conf[section]['secret_key'],
'--email', ragweed_conf[section]['email'],
'--system', sys_str,
],
)
try:
yield
finally:
for client in config['clients']:
if not 'check' in run_stages[client]:
# only remove user if went through the check stage
continue
for user in users.values():
uid = '{user}.{client}'.format(user=user, client=client)
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client,
'user', 'rm',
'--uid', uid,
'--purge-data',
],
)
@contextlib.contextmanager
def configure(ctx, config, run_stages):
"""
Configure the local config files.
"""
assert isinstance(config, dict)
log.info('Configuring ragweed...')
testdir = teuthology.get_testdir(ctx)
for client, properties in config['clients'].items():
(remote,) = ctx.cluster.only(client).remotes.keys()
preparing = 'prepare' in run_stages[client]
if not preparing:
# should have been prepared in a previous run
continue
ragweed_conf = config['ragweed_conf'][client]
if properties is not None and 'slow_backend' in properties:
ragweed_conf['fixtures']['slow backend'] = properties['slow_backend']
conf_fp = BytesIO()
ragweed_conf.write(conf_fp)
remote.write_file(
path='{tdir}/archive/ragweed.{client}.conf'.format(tdir=testdir, client=client),
data=conf_fp.getvalue(),
)
log.info('Configuring boto...')
boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
for client, properties in config['clients'].items():
with open(boto_src, 'r') as f:
(remote,) = ctx.cluster.only(client).remotes.keys()
conf = f.read().format(
idle_timeout=config.get('idle_timeout', 30)
)
remote.write_file('{tdir}/boto.cfg'.format(tdir=testdir), conf)
try:
yield
finally:
log.info('Cleaning up boto...')
for client, properties in config['clients'].items():
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
'rm', '-f',
'{tdir}/boto.cfg'.format(tdir=testdir),
],
)
def get_toxvenv_dir(ctx):
return ctx.tox.venv_path
def toxvenv_sh(ctx, remote, args, **kwargs):
activate = get_toxvenv_dir(ctx) + '/bin/activate'
return remote.sh(['source', activate, run.Raw('&&')] + args, **kwargs)
@contextlib.contextmanager
def run_tests(ctx, config, run_stages):
"""
Run the ragweed after everything is set up.
:param ctx: Context passed to task
:param config: specific configuration information
"""
assert isinstance(config, dict)
testdir = teuthology.get_testdir(ctx)
attrs = ["not fails_on_rgw"]
for client, client_config in config.items():
ragweed_dir = get_ragweed_dir(testdir, client)
stages = ','.join(run_stages[client])
args = [
'cd', ragweed_dir, run.Raw('&&'),
'RAGWEED_CONF={tdir}/archive/ragweed.{client}.conf'.format(tdir=testdir, client=client),
'RAGWEED_STAGES={stages}'.format(stages=stages),
'BOTO_CONFIG={tdir}/boto.cfg'.format(tdir=testdir),
'tox',
'--sitepackages',
'--',
'-v',
'-m', ' and '.join(attrs),
]
if client_config is not None and 'extra_args' in client_config:
args.extend(client_config['extra_args'])
(remote,) = ctx.cluster.only(client).remotes.keys()
toxvenv_sh(ctx, remote, args, label="ragweed tests against rgw")
yield
@contextlib.contextmanager
def task(ctx, config):
"""
Run the ragweed suite against rgw.
To run all tests on all clients::
tasks:
- ceph:
- rgw:
- ragweed:
To restrict testing to particular clients::
tasks:
- ceph:
- rgw: [client.0]
- ragweed: [client.0]
To run against a server on client.1 and increase the boto timeout to 10m::
tasks:
- ceph:
- rgw: [client.1]
- ragweed:
client.0:
rgw_server: client.1
idle_timeout: 600
stages: prepare,check
To pass extra arguments to nose (e.g. to run a certain test)::
tasks:
- ceph:
- rgw: [client.0]
- ragweed:
client.0:
extra_args: ['test_s3:test_object_acl_grand_public_read']
client.1:
extra_args: ['--exclude', 'test_100_continue']
"""
assert hasattr(ctx, 'rgw'), 'ragweed must run after the rgw task'
assert hasattr(ctx, 'tox'), 'ragweed must run after the tox task'
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task ragweed only supports a list or dictionary for configuration"
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
clients = config.keys()
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
for client in config.keys():
if not config[client]:
config[client] = {}
teuthology.deep_merge(config[client], overrides.get('ragweed', {}))
log.debug('ragweed config is %s', config)
ragweed_conf = {}
for client in clients:
# use rgw_server endpoint if given, or default to same client
target = config[client].get('rgw_server', client)
endpoint = ctx.rgw.role_endpoints.get(target)
assert endpoint, 'ragweed: no rgw endpoint for {}'.format(target)
ragweed_conf[client] = ConfigObj(
indent_type='',
infile={
'rgw':
{
'host' : endpoint.dns_name,
'port' : endpoint.port,
'is_secure' : endpoint.cert is not None,
},
'fixtures' : {},
'user system' : {},
'user regular' : {},
'rados':
{
'ceph_conf' : '/etc/ceph/ceph.conf',
},
}
)
run_stages = {}
with contextutil.nested(
lambda: download(ctx=ctx, config=config),
lambda: create_users(ctx=ctx, config=dict(
clients=clients,
ragweed_conf=ragweed_conf,
config=config,
),
run_stages=run_stages),
lambda: configure(ctx=ctx, config=dict(
clients=config,
ragweed_conf=ragweed_conf,
),
run_stages=run_stages),
lambda: run_tests(ctx=ctx, config=config, run_stages=run_stages),
):
pass
yield
| 12,875 | 33.520107 | 117 |
py
|
null |
ceph-main/qa/tasks/rbd.py
|
"""
Rbd testing task
"""
import contextlib
import logging
import os
import tempfile
import sys
from io import StringIO
from teuthology.orchestra import run
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.parallel import parallel
from teuthology.task.common_fs_utils import generic_mkfs
from teuthology.task.common_fs_utils import generic_mount
from teuthology.task.common_fs_utils import default_image_name
#V1 image unsupported but required for testing purposes
os.environ["RBD_FORCE_ALLOW_V1"] = "1"
log = logging.getLogger(__name__)
ENCRYPTION_PASSPHRASE = "password"
CLONE_ENCRYPTION_PASSPHRASE = "password2"
@contextlib.contextmanager
def create_image(ctx, config):
"""
Create an rbd image.
For example::
tasks:
- ceph:
- rbd.create_image:
client.0:
image_name: testimage
image_size: 100
image_format: 1
encryption_format: luks2
client.1:
Image size is expressed as a number of megabytes; default value
is 10240.
Image format value must be either 1 or 2; default value is 1.
"""
assert isinstance(config, dict) or isinstance(config, list), \
"task create_image only supports a list or dictionary for configuration"
if isinstance(config, dict):
images = config.items()
else:
images = [(role, None) for role in config]
testdir = teuthology.get_testdir(ctx)
passphrase_file = '{tdir}/passphrase'.format(tdir=testdir)
for role, properties in images:
if properties is None:
properties = {}
name = properties.get('image_name', default_image_name(role))
size = properties.get('image_size', 10240)
fmt = properties.get('image_format', 1)
encryption_format = properties.get('encryption_format', 'none')
(remote,) = ctx.cluster.only(role).remotes.keys()
log.info('Creating image {name} with size {size}'.format(name=name,
size=size))
args = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rbd',
'-p', 'rbd',
'create',
'--size', str(size),
name,
]
# omit format option if using the default (format 1)
# since old versions of don't support it
if int(fmt) != 1:
args += ['--image-format', str(fmt)]
remote.run(args=args)
if encryption_format != 'none':
remote.run(
args=[
'echo',
ENCRYPTION_PASSPHRASE,
run.Raw('>'),
passphrase_file
]
)
remote.run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rbd',
'encryption',
'format',
name,
encryption_format,
passphrase_file,
'-p',
'rbd'
]
)
try:
yield
finally:
log.info('Deleting rbd images...')
remote.run(args=['rm', '-f', passphrase_file])
for role, properties in images:
if properties is None:
properties = {}
name = properties.get('image_name', default_image_name(role))
(remote,) = ctx.cluster.only(role).remotes.keys()
remote.run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rbd',
'-p', 'rbd',
'rm',
name,
],
)
@contextlib.contextmanager
def clone_image(ctx, config):
"""
Clones a parent imag
For example::
tasks:
- ceph:
- rbd.clone_image:
client.0:
parent_name: testimage
image_name: cloneimage
encryption_format: luks2
"""
assert isinstance(config, dict) or isinstance(config, list), \
"task clone_image only supports a list or dictionary for configuration"
if isinstance(config, dict):
images = config.items()
else:
images = [(role, None) for role in config]
testdir = teuthology.get_testdir(ctx)
clone_passphrase_file = '{tdir}/clone-passphrase'.format(tdir=testdir)
for role, properties in images:
if properties is None:
properties = {}
name = properties.get('image_name', default_image_name(role))
parent_name = properties.get('parent_name')
assert parent_name is not None, \
"parent_name is required"
parent_spec = '{name}@{snap}'.format(name=parent_name, snap=name)
(remote,) = ctx.cluster.only(role).remotes.keys()
log.info('Clone image {parent} to {child}'.format(parent=parent_name,
child=name))
commands = [('snap', 'create', parent_spec),
('snap', 'protect', parent_spec),
('clone', parent_spec, name)
]
encryption_format = properties.get('encryption_format', 'none')
if encryption_format != 'none':
remote.run(
args=[
'echo',
CLONE_ENCRYPTION_PASSPHRASE,
run.Raw('>'),
clone_passphrase_file
]
)
commands.append(
('encryption', 'format', name, encryption_format,
clone_passphrase_file)
)
for cmd in commands:
args = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rbd', '-p', 'rbd'
]
args.extend(cmd)
remote.run(args=args)
try:
yield
finally:
log.info('Deleting rbd clones...')
remote.run(args=['rm', '-f', clone_passphrase_file])
for role, properties in images:
if properties is None:
properties = {}
name = properties.get('image_name', default_image_name(role))
parent_name = properties.get('parent_name')
parent_spec = '{name}@{snap}'.format(name=parent_name, snap=name)
(remote,) = ctx.cluster.only(role).remotes.keys()
for cmd in [('rm', name),
('snap', 'unprotect', parent_spec),
('snap', 'rm', parent_spec)]:
args = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rbd', '-p', 'rbd'
]
args.extend(cmd)
remote.run(args=args)
@contextlib.contextmanager
def modprobe(ctx, config):
"""
Load the rbd kernel module..
For example::
tasks:
- ceph:
- rbd.create_image: [client.0]
- rbd.modprobe: [client.0]
"""
log.info('Loading rbd kernel module...')
for role in config:
(remote,) = ctx.cluster.only(role).remotes.keys()
remote.run(
args=[
'sudo',
'modprobe',
'rbd',
],
)
try:
yield
finally:
log.info('Unloading rbd kernel module...')
for role in config:
(remote,) = ctx.cluster.only(role).remotes.keys()
remote.run(
args=[
'sudo',
'modprobe',
'-r',
'rbd',
# force errors to be ignored; necessary if more
# than one device was created, which may mean
# the module isn't quite ready to go the first
# time through.
run.Raw('||'),
'true',
],
)
@contextlib.contextmanager
def dev_create(ctx, config):
"""
Map block devices to rbd images.
For example::
tasks:
- ceph:
- rbd.create_image: [client.0]
- rbd.modprobe: [client.0]
- rbd.dev_create:
client.0:
image_name: testimage.client.0
encryption_format: luks2
parent_encryption_format: luks1
"""
assert isinstance(config, dict) or isinstance(config, list), \
"task dev_create only supports a list or dictionary for configuration"
if isinstance(config, dict):
images = config.items()
else:
images = [(role, None) for role in config]
log.info('Creating rbd block devices...')
testdir = teuthology.get_testdir(ctx)
passphrase_file = '{tdir}/passphrase'.format(tdir=testdir)
clone_passphrase_file = '{tdir}/clone-passphrase'.format(tdir=testdir)
device_path = {}
for role, properties in images:
if properties is None:
properties = {}
name = properties.get('image_name', default_image_name(role))
parent_encryption_format = properties.get('parent_encryption_format',
'none')
encryption_format = properties.get('encryption_format',
parent_encryption_format)
(remote,) = ctx.cluster.only(role).remotes.keys()
if encryption_format == 'none' and parent_encryption_format == 'none':
device_path[role] = '/dev/rbd/rbd/{image}'.format(image=name)
device_specific_args = []
else:
device_specific_args = ['-t', 'nbd', '-o']
is_cloned = properties.get('parent_name') is not None
encryption_args = ""
if is_cloned and properties.get('encryption_format') != 'none':
remote.run(
args=[
'echo',
CLONE_ENCRYPTION_PASSPHRASE,
run.Raw('>'),
clone_passphrase_file
]
)
encryption_args = \
'encryption-format=%s,encryption-passphrase-file=%s' % (
encryption_format, clone_passphrase_file)
if not is_cloned or parent_encryption_format != 'none':
remote.run(
args=[
'echo',
ENCRYPTION_PASSPHRASE,
run.Raw('>'),
passphrase_file
]
)
if is_cloned and properties.get('encryption_format') != 'none':
encryption_args += ","
if parent_encryption_format != 'none':
encryption_args += \
'encryption-format=%s,encryption-passphrase-file=%s' % (
parent_encryption_format, passphrase_file)
else:
encryption_args += \
'encryption-format=%s,encryption-passphrase-file=%s' % (
encryption_format, passphrase_file)
device_specific_args.append(encryption_args)
map_fp = StringIO()
remote.run(
args=[
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rbd',
'--id', role.rsplit('.')[-1],
'-p', 'rbd',
'map',
name] + device_specific_args,
stdout=map_fp,
)
if encryption_format != 'none' or parent_encryption_format != 'none':
device_path[role] = map_fp.getvalue().rstrip()
properties['device_path'] = device_path[role]
remote.run(args=['sudo', 'chmod', '666', device_path[role]])
try:
yield
finally:
log.info('Unmapping rbd devices...')
remote.run(args=['rm', '-f', passphrase_file, clone_passphrase_file])
for role, properties in images:
if not device_path.get(role):
continue
if properties is None:
properties = {}
encryption_format = properties.get('encryption_format', 'none')
parent_encryption_format = properties.get(
'parent_encryption_format', 'none')
(remote,) = ctx.cluster.only(role).remotes.keys()
if encryption_format == 'none' and \
parent_encryption_format == 'none':
device_specific_args = []
else:
device_specific_args = ['-t', 'nbd']
remote.run(
args=[
'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rbd',
'-p', 'rbd',
'unmap',
device_path[role],
] + device_specific_args,
)
def rbd_devname_rtn(ctx, image):
return '/dev/rbd/rbd/{image}'.format(image=image)
def canonical_path(ctx, role, path):
"""
Determine the canonical path for a given path on the host
representing the given role. A canonical path contains no
. or .. components, and includes no symbolic links.
"""
version_fp = StringIO()
ctx.cluster.only(role).run(
args=[ 'readlink', '-f', path ],
stdout=version_fp,
)
canonical_path = version_fp.getvalue().rstrip('\n')
version_fp.close()
return canonical_path
@contextlib.contextmanager
def run_xfstests(ctx, config):
"""
Run xfstests over specified devices.
Warning: both the test and scratch devices specified will be
overwritten. Normally xfstests modifies (but does not destroy)
the test device, but for now the run script used here re-makes
both filesystems.
Note: Only one instance of xfstests can run on a single host at
a time, although this is not enforced.
This task in its current form needs some improvement. For
example, it assumes all roles provided in the config are
clients, and that the config provided is a list of key/value
pairs. For now please use the xfstests() interface, below.
For example::
tasks:
- ceph:
- rbd.run_xfstests:
client.0:
count: 2
test_dev: 'test_dev'
scratch_dev: 'scratch_dev'
fs_type: 'xfs'
tests: 'generic/100 xfs/003 xfs/005 xfs/006 generic/015'
exclude:
- generic/42
randomize: true
"""
with parallel() as p:
for role, properties in config.items():
p.spawn(run_xfstests_one_client, ctx, role, properties)
exc = None
while True:
try:
p.next()
except StopIteration:
break
except:
exc = sys.exc_info()[1]
if exc is not None:
raise exc
yield
def run_xfstests_one_client(ctx, role, properties):
"""
Spawned routine to handle xfs tests for a single client
"""
testdir = teuthology.get_testdir(ctx)
try:
count = properties.get('count')
test_dev = properties.get('test_dev')
assert test_dev is not None, \
"task run_xfstests requires test_dev to be defined"
test_dev = canonical_path(ctx, role, test_dev)
scratch_dev = properties.get('scratch_dev')
assert scratch_dev is not None, \
"task run_xfstests requires scratch_dev to be defined"
scratch_dev = canonical_path(ctx, role, scratch_dev)
fs_type = properties.get('fs_type')
tests = properties.get('tests')
exclude_list = properties.get('exclude')
randomize = properties.get('randomize')
(remote,) = ctx.cluster.only(role).remotes.keys()
# Fetch the test script
test_root = teuthology.get_testdir(ctx)
test_script = 'run_xfstests.sh'
test_path = os.path.join(test_root, test_script)
xfstests_url = properties.get('xfstests_url')
assert xfstests_url is not None, \
"task run_xfstests requires xfstests_url to be defined"
xfstests_krbd_url = xfstests_url + '/' + test_script
log.info('Fetching {script} for {role} from {url}'.format(
script=test_script,
role=role,
url=xfstests_krbd_url))
args = [ 'wget', '-O', test_path, '--', xfstests_krbd_url ]
remote.run(args=args)
log.info('Running xfstests on {role}:'.format(role=role))
log.info(' iteration count: {count}:'.format(count=count))
log.info(' test device: {dev}'.format(dev=test_dev))
log.info(' scratch device: {dev}'.format(dev=scratch_dev))
log.info(' using fs_type: {fs_type}'.format(fs_type=fs_type))
log.info(' tests to run: {tests}'.format(tests=tests))
log.info(' exclude list: {}'.format(' '.join(exclude_list)))
log.info(' randomize: {randomize}'.format(randomize=randomize))
if exclude_list:
with tempfile.NamedTemporaryFile(mode='w', prefix='exclude') as exclude_file:
for test in exclude_list:
exclude_file.write("{}\n".format(test))
exclude_file.flush()
remote.put_file(exclude_file.name, exclude_file.name)
# Note that the device paths are interpreted using
# readlink -f <path> in order to get their canonical
# pathname (so it matches what the kernel remembers).
args = [
'/usr/bin/sudo',
'TESTDIR={tdir}'.format(tdir=testdir),
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'/bin/bash',
test_path,
'-c', str(count),
'-f', fs_type,
'-t', test_dev,
'-s', scratch_dev,
]
if exclude_list:
args.extend(['-x', exclude_file.name])
if randomize:
args.append('-r')
if tests:
args.extend(['--', tests])
remote.run(args=args, logger=log.getChild(role))
finally:
log.info('Removing {script} on {role}'.format(script=test_script,
role=role))
remote.run(args=['rm', '-f', test_path])
@contextlib.contextmanager
def xfstests(ctx, config):
"""
Run xfstests over rbd devices. This interface sets up all
required configuration automatically if not otherwise specified.
Note that only one instance of xfstests can run on a single host
at a time. By default, the set of tests specified is run once.
If a (non-zero) count value is supplied, the complete set of
tests will be run that number of times.
For example::
tasks:
- ceph:
# Image sizes are in MB
- rbd.xfstests:
client.0:
count: 3
test_image: 'test_image'
test_size: 250
test_format: 2
scratch_image: 'scratch_image'
scratch_size: 250
scratch_format: 1
fs_type: 'xfs'
tests: 'generic/100 xfs/003 xfs/005 xfs/006 generic/015'
exclude:
- generic/42
randomize: true
xfstests_url: 'https://raw.github.com/ceph/ceph-ci/wip-55555/qa'
"""
if config is None:
config = { 'all': None }
assert isinstance(config, dict) or isinstance(config, list), \
"task xfstests only supports a list or dictionary for configuration"
if isinstance(config, dict):
config = teuthology.replace_all_with_clients(ctx.cluster, config)
runs = config.items()
else:
runs = [(role, None) for role in config]
running_xfstests = {}
for role, properties in runs:
assert role.startswith('client.'), \
"task xfstests can only run on client nodes"
for host, roles_for_host in ctx.cluster.remotes.items():
if role in roles_for_host:
assert host not in running_xfstests, \
"task xfstests allows only one instance at a time per host"
running_xfstests[host] = True
images_config = {}
scratch_config = {}
modprobe_config = {}
image_map_config = {}
scratch_map_config = {}
xfstests_config = {}
for role, properties in runs:
if properties is None:
properties = {}
test_image = properties.get('test_image', 'test_image.{role}'.format(role=role))
test_size = properties.get('test_size', 10000) # 10G
test_fmt = properties.get('test_format', 1)
scratch_image = properties.get('scratch_image', 'scratch_image.{role}'.format(role=role))
scratch_size = properties.get('scratch_size', 10000) # 10G
scratch_fmt = properties.get('scratch_format', 1)
images_config[role] = dict(
image_name=test_image,
image_size=test_size,
image_format=test_fmt,
)
scratch_config[role] = dict(
image_name=scratch_image,
image_size=scratch_size,
image_format=scratch_fmt,
)
xfstests_branch = properties.get('xfstests_branch', 'master')
xfstests_url = properties.get('xfstests_url', 'https://raw.github.com/ceph/ceph/{branch}/qa'.format(branch=xfstests_branch))
xfstests_config[role] = dict(
count=properties.get('count', 1),
test_dev='/dev/rbd/rbd/{image}'.format(image=test_image),
scratch_dev='/dev/rbd/rbd/{image}'.format(image=scratch_image),
fs_type=properties.get('fs_type', 'xfs'),
randomize=properties.get('randomize', False),
tests=properties.get('tests'),
exclude=properties.get('exclude', []),
xfstests_url=xfstests_url,
)
log.info('Setting up xfstests using RBD images:')
log.info(' test ({size} MB): {image}'.format(size=test_size,
image=test_image))
log.info(' scratch ({size} MB): {image}'.format(size=scratch_size,
image=scratch_image))
modprobe_config[role] = None
image_map_config[role] = {'image_name': test_image}
scratch_map_config[role] = {'image_name': scratch_image}
with contextutil.nested(
lambda: create_image(ctx=ctx, config=images_config),
lambda: create_image(ctx=ctx, config=scratch_config),
lambda: modprobe(ctx=ctx, config=modprobe_config),
lambda: dev_create(ctx=ctx, config=image_map_config),
lambda: dev_create(ctx=ctx, config=scratch_map_config),
lambda: run_xfstests(ctx=ctx, config=xfstests_config),
):
yield
@contextlib.contextmanager
def task(ctx, config):
"""
Create and mount an rbd image.
For example, you can specify which clients to run on::
tasks:
- ceph:
- rbd: [client.0, client.1]
There are a few image options::
tasks:
- ceph:
- rbd:
client.0: # uses defaults
client.1:
image_name: foo
image_size: 2048
image_format: 2
fs_type: xfs
To use default options on all clients::
tasks:
- ceph:
- rbd:
all:
To create 20GiB images and format them with xfs on all clients::
tasks:
- ceph:
- rbd:
all:
image_size: 20480
fs_type: xfs
"""
if config is None:
config = { 'all': None }
norm_config = config
if isinstance(config, dict):
norm_config = teuthology.replace_all_with_clients(ctx.cluster, config)
if isinstance(norm_config, dict):
role_images = {}
for role, properties in norm_config.items():
if properties is None:
properties = {}
role_images[role] = properties.get('image_name')
else:
role_images = norm_config
log.debug('rbd config is: %s', norm_config)
with contextutil.nested(
lambda: create_image(ctx=ctx, config=norm_config),
lambda: modprobe(ctx=ctx, config=norm_config),
lambda: dev_create(ctx=ctx, config=norm_config),
lambda: generic_mkfs(ctx=ctx, config=norm_config,
devname_rtn=rbd_devname_rtn),
lambda: generic_mount(ctx=ctx, config=role_images,
devname_rtn=rbd_devname_rtn),
):
yield
| 25,715 | 33.379679 | 132 |
py
|
null |
ceph-main/qa/tasks/rbd_fio.py
|
"""
Long running fio tests on rbd mapped devices for format/features provided in config
Many fio parameters can be configured so that this task can be used along with thrash/power-cut tests
and exercise IO on full disk for all format/features
- This test should not be run on VM due to heavy use of resource
"""
import contextlib
import json
import logging
import os
from teuthology.parallel import parallel
from teuthology import misc as teuthology
from tempfile import NamedTemporaryFile
from teuthology.orchestra import run
from teuthology.packaging import install_package, remove_package
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
client.0:
fio-io-size: 100g or 80% or 100m
fio-version: 2.2.9
formats: [2]
features: [[layering],[striping],[layering,exclusive-lock,object-map]]
test-clone-io: 1 #remove this option to not run create rbd clone and not run io on clone
io-engine: "sync or rbd or any io-engine"
rw: randrw
client.1:
fio-io-size: 100g
fio-version: 2.2.9
rw: read
image-size:20480
or
all:
fio-io-size: 400g
rw: randrw
formats: [2]
features: [[layering],[striping]]
io-engine: libaio
Create rbd image + device and exercise IO for format/features provided in config file
Config can be per client or one config can be used for all clients, fio jobs are run in parallel for client provided
"""
if config.get('all'):
client_config = config['all']
clients = ctx.cluster.only(teuthology.is_type('client'))
rbd_test_dir = teuthology.get_testdir(ctx) + "/rbd_fio_test"
for remote,role in clients.remotes.items():
if 'client_config' in locals():
with parallel() as p:
p.spawn(run_fio, remote, client_config, rbd_test_dir)
else:
for client_config in config:
if client_config in role:
with parallel() as p:
p.spawn(run_fio, remote, config[client_config], rbd_test_dir)
yield
def get_ioengine_package_name(ioengine, remote):
system_type = teuthology.get_system_type(remote)
if ioengine == 'rbd':
return 'librbd1-devel' if system_type == 'rpm' else 'librbd-dev'
elif ioengine == 'libaio':
return 'libaio-devel' if system_type == 'rpm' else 'libaio-dev'
else:
return None
def run_rbd_map(remote, image, iodepth):
iodepth = max(iodepth, 128) # RBD_QUEUE_DEPTH_DEFAULT
dev = remote.sh(['sudo', 'rbd', 'device', 'map', '-o',
'queue_depth={}'.format(iodepth), image]).rstrip('\n')
remote.sudo_write_file(
'/sys/block/{}/queue/nr_requests'.format(os.path.basename(dev)),
str(iodepth))
return dev
def run_fio(remote, config, rbd_test_dir):
"""
create fio config file with options based on above config
get the fio from GitHub, generate binary, and use it to run on
the generated fio config file
"""
fio_config=NamedTemporaryFile(mode='w', prefix='fio_rbd_', dir='/tmp/', delete=False)
fio_config.write('[global]\n')
if config.get('io-engine'):
ioengine=config['io-engine']
fio_config.write('ioengine={ioe}\n'.format(ioe=ioengine))
else:
fio_config.write('ioengine=sync\n')
if config.get('bs'):
bs=config['bs']
fio_config.write('bs={bs}\n'.format(bs=bs))
else:
fio_config.write('bs=4k\n')
iodepth = config.get('io-depth', 2)
fio_config.write('iodepth={iod}\n'.format(iod=iodepth))
if config.get('fio-io-size'):
size=config['fio-io-size']
fio_config.write('size={size}\n'.format(size=size))
else:
fio_config.write('size=100m\n')
fio_config.write('time_based\n')
if config.get('runtime'):
runtime=config['runtime']
fio_config.write('runtime={runtime}\n'.format(runtime=runtime))
else:
fio_config.write('runtime=1800\n')
fio_config.write('allow_file_create=0\n')
image_size=10240
if config.get('image_size'):
image_size=config['image_size']
formats=[1,2]
features=[['layering'],['striping'],['exclusive-lock','object-map']]
fio_version='3.32'
if config.get('formats'):
formats=config['formats']
if config.get('features'):
features=config['features']
if config.get('fio-version'):
fio_version=config['fio-version']
# handle package required for ioengine, if any
sn=remote.shortname
ioengine_pkg = get_ioengine_package_name(ioengine, remote)
if ioengine_pkg:
install_package(ioengine_pkg, remote)
fio_config.write('norandommap\n')
if ioengine == 'rbd':
fio_config.write('clientname=admin\n')
fio_config.write('pool=rbd\n')
fio_config.write('invalidate=0\n')
elif ioengine == 'libaio':
fio_config.write('direct=1\n')
for frmt in formats:
for feature in features:
log.info("Creating rbd images on {sn}".format(sn=sn))
feature_name = '-'.join(feature)
rbd_name = 'i{i}f{f}{sn}'.format(i=frmt,f=feature_name,sn=sn)
rbd_snap_name = 'i{i}f{f}{sn}@i{i}f{f}{sn}Snap'.format(i=frmt,f=feature_name,sn=sn)
rbd_clone_name = 'i{i}f{f}{sn}Clone'.format(i=frmt,f=feature_name,sn=sn)
create_args=['rbd', 'create',
'--size', '{size}'.format(size=image_size),
'--image', rbd_name,
'--image-format', '{f}'.format(f=frmt)]
map(lambda x: create_args.extend(['--image-feature', x]), feature)
if config.get('thick-provision'):
create_args.append('--thick-provision')
remote.run(args=create_args)
remote.run(args=['rbd', 'info', rbd_name])
if ioengine != 'rbd':
rbd_dev = run_rbd_map(remote, rbd_name, iodepth)
if config.get('test-clone-io'):
log.info("Testing clones using fio")
remote.run(args=['rbd', 'snap', 'create', rbd_snap_name])
remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name])
remote.run(args=['rbd', 'clone', rbd_snap_name, rbd_clone_name])
rbd_clone_dev = run_rbd_map(remote, rbd_clone_name, iodepth)
fio_config.write('[{rbd_dev}]\n'.format(rbd_dev=rbd_dev))
if config.get('rw'):
rw=config['rw']
fio_config.write('rw={rw}\n'.format(rw=rw))
else:
fio_config .write('rw=randrw\n')
fio_config.write('filename={rbd_dev}\n'.format(rbd_dev=rbd_dev))
if config.get('test-clone-io'):
fio_config.write('[{rbd_clone_dev}]\n'.format(rbd_clone_dev=rbd_clone_dev))
fio_config.write('rw={rw}\n'.format(rw=rw))
fio_config.write('filename={rbd_clone_dev}\n'.format(rbd_clone_dev=rbd_clone_dev))
else:
if config.get('test-clone-io'):
log.info("Testing clones using fio")
remote.run(args=['rbd', 'snap', 'create', rbd_snap_name])
remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name])
remote.run(args=['rbd', 'clone', rbd_snap_name, rbd_clone_name])
fio_config.write('[{img_name}]\n'.format(img_name=rbd_name))
if config.get('rw'):
rw=config['rw']
fio_config.write('rw={rw}\n'.format(rw=rw))
else:
fio_config.write('rw=randrw\n')
fio_config.write('rbdname={img_name}\n'.format(img_name=rbd_name))
if config.get('test-clone-io'):
fio_config.write('[{clone_img_name}]\n'.format(clone_img_name=rbd_clone_name))
fio_config.write('rw={rw}\n'.format(rw=rw))
fio_config.write('rbdname={clone_img_name}\n'.format(clone_img_name=rbd_clone_name))
fio_config.close()
remote.put_file(fio_config.name,fio_config.name)
try:
log.info("Running rbd feature - fio test on {sn}".format(sn=sn))
fio = "https://github.com/axboe/fio/archive/fio-" + fio_version + ".tar.gz"
remote.run(args=['mkdir', run.Raw(rbd_test_dir),])
remote.run(args=['cd' , run.Raw(rbd_test_dir),
run.Raw(';'), 'wget', fio, run.Raw(';'), run.Raw('tar -xvf fio*tar.gz'), run.Raw(';'),
run.Raw('cd fio-fio*'), run.Raw(';'), './configure', run.Raw(';'), 'make'])
remote.run(args=['ceph', '-s'])
remote.run(args=[run.Raw('{tdir}/fio-fio-{v}/fio --showcmd {f}'.format(tdir=rbd_test_dir,v=fio_version,f=fio_config.name))])
remote.run(args=['sudo', run.Raw('{tdir}/fio-fio-{v}/fio {f}'.format(tdir=rbd_test_dir,v=fio_version,f=fio_config.name))])
remote.run(args=['ceph', '-s'])
finally:
out = remote.sh('rbd device list --format=json')
mapped_images = json.loads(out)
if mapped_images:
log.info("Unmapping rbd images on {sn}".format(sn=sn))
for image in mapped_images:
remote.run(args=['sudo', 'rbd', 'device', 'unmap',
str(image['device'])])
log.info("Cleaning up fio install")
remote.run(args=['rm','-rf', run.Raw(rbd_test_dir)])
if ioengine_pkg:
remove_package(ioengine_pkg, remote)
| 9,543 | 41.230088 | 132 |
py
|
null |
ceph-main/qa/tasks/rbd_fsx.py
|
"""
Run fsx on an rbd image
"""
import contextlib
import logging
from teuthology.exceptions import ConfigError
from teuthology.parallel import parallel
from teuthology import misc as teuthology
from tasks.ceph_manager import get_valgrind_args
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Run fsx on an rbd image.
Currently this requires running as client.admin
to create a pool.
Specify which clients to run on as a list::
tasks:
ceph:
rbd_fsx:
clients: [client.0, client.1]
You can optionally change some properties of fsx:
tasks:
ceph:
rbd_fsx:
clients: <list of clients>
seed: <random seed number, or 0 to use the time>
ops: <number of operations to do>
size: <maximum image size in bytes>
valgrind: [--tool=<valgrind tool>]
"""
log.info('starting rbd_fsx...')
with parallel() as p:
for role in config['clients']:
p.spawn(_run_one_client, ctx, config, role)
yield
def _run_one_client(ctx, config, role):
"""Spawned task that runs the client"""
krbd = config.get('krbd', False)
nbd = config.get('nbd', False)
testdir = teuthology.get_testdir(ctx)
(remote,) = ctx.cluster.only(role).remotes.keys()
args = []
if krbd or nbd:
args.append('sudo') # rbd(-nbd) map/unmap need privileges
args.extend([
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir)
])
overrides = ctx.config.get('overrides', {})
teuthology.deep_merge(config, overrides.get('rbd_fsx', {}))
if config.get('valgrind'):
args = get_valgrind_args(
testdir,
'fsx_{id}'.format(id=role),
args,
config.get('valgrind')
)
cluster_name, type_, client_id = teuthology.split_role(role)
if type_ != 'client':
msg = 'client role ({0}) must be a client'.format(role)
raise ConfigError(msg)
args.extend([
'ceph_test_librbd_fsx',
'--cluster', cluster_name,
'--id', client_id,
'-d', # debug output for all operations
'-W', '-R', # mmap doesn't work with rbd
'-p', str(config.get('progress_interval', 100)), # show progress
'-P', '{tdir}/archive'.format(tdir=testdir),
'-r', str(config.get('readbdy',1)),
'-w', str(config.get('writebdy',1)),
'-t', str(config.get('truncbdy',1)),
'-h', str(config.get('holebdy',1)),
'-l', str(config.get('size', 250000000)),
'-S', str(config.get('seed', 0)),
'-N', str(config.get('ops', 1000)),
])
if krbd:
args.append('-K') # -K enables krbd mode
if nbd:
args.append('-M') # -M enables nbd mode
if config.get('direct_io', False):
args.append('-Z') # -Z use direct IO
if not config.get('randomized_striping', True):
args.append('-U') # -U disables randomized striping
if not config.get('punch_holes', True):
args.append('-H') # -H disables discard ops
if config.get('deep_copy', False):
args.append('-g') # -g deep copy instead of clone
if config.get('journal_replay', False):
args.append('-j') # -j replay all IO events from journal
if config.get('keep_images', False):
args.append('-k') # -k keep images on success
args.extend([
config.get('pool_name', 'pool_{pool}'.format(pool=role)),
'image_{image}'.format(image=role),
])
remote.run(args=args)
| 3,591 | 29.965517 | 72 |
py
|
null |
ceph-main/qa/tasks/rbd_mirror.py
|
"""
Task for running rbd mirroring daemons and configuring mirroring
"""
import logging
from teuthology.orchestra import run
from teuthology import misc
from teuthology.exceptions import ConfigError
from teuthology.task import Task
from tasks.ceph_manager import get_valgrind_args
from tasks.util import get_remote_for_role
log = logging.getLogger(__name__)
class RBDMirror(Task):
"""
Run an rbd-mirror daemon to sync rbd images between clusters.
This requires two clients (one from each cluster) on the same host
to connect with. The pool configuration should be adjusted by later
test scripts to include the remote client and cluster name. This task
just needs to know how to connect to the local cluster.
For example:
roles:
- [primary.mon.a, primary.osd.0, primary.osd.1, primary.osd.2]
- [secondary.mon.a, secondary.osd.0, secondary.osd.1, secondary.osd.2]
- [primary.client.mirror, secondary.client.mirror]
tasks:
- ceph:
cluster: primary
- ceph:
cluster: secondary
- rbd-mirror:
client: primary.client.mirror
To mirror back to the primary cluster as well, add another
rbd_mirror instance:
- rbd-mirror:
client: secondary.client.mirror
Possible options for this task are:
client: role - ceph client to connect as
valgrind: [--tool=<valgrind tool>] - none by default
coverage: bool - whether this run may be collecting coverage data
thrash: bool - whether this run may be thrashed
"""
def __init__(self, ctx, config):
super(RBDMirror, self).__init__(ctx, config)
self.log = log
def setup(self):
super(RBDMirror, self).setup()
try:
self.client = self.config['client']
except KeyError:
raise ConfigError('rbd-mirror requires a client to connect with')
self.cluster_name, type_, self.client_id = misc.split_role(self.client)
if type_ != 'client':
msg = 'client role ({0}) must be a client'.format(self.client)
raise ConfigError(msg)
self.remote = get_remote_for_role(self.ctx, self.client)
def begin(self):
super(RBDMirror, self).begin()
testdir = misc.get_testdir(self.ctx)
daemon_signal = 'kill'
if 'coverage' in self.config or 'valgrind' in self.config or \
self.config.get('thrash', False):
daemon_signal = 'term'
args = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'daemon-helper',
daemon_signal,
]
if 'valgrind' in self.config:
args = get_valgrind_args(
testdir,
'rbd-mirror-{id}'.format(id=self.client),
args,
self.config.get('valgrind')
)
args.extend([
'rbd-mirror', '--foreground',
'--cluster',
self.cluster_name,
'--id',
self.client_id,
])
self.ctx.daemons.add_daemon(
self.remote, 'rbd-mirror', self.client,
cluster=self.cluster_name,
args=args,
logger=self.log.getChild(self.client),
stdin=run.PIPE,
wait=False,
)
def end(self):
mirror_daemon = self.ctx.daemons.get_daemon('rbd-mirror',
self.client,
self.cluster_name)
mirror_daemon.stop()
super(RBDMirror, self).end()
task = RBDMirror
| 3,719 | 29.743802 | 79 |
py
|
null |
ceph-main/qa/tasks/rbd_mirror_thrash.py
|
"""
Task for thrashing rbd-mirror daemons
"""
import contextlib
import logging
import random
import signal
import socket
import time
from gevent import sleep
from gevent.greenlet import Greenlet
from gevent.event import Event
from teuthology.exceptions import CommandFailedError
from teuthology.orchestra import run
from tasks.thrasher import Thrasher
log = logging.getLogger(__name__)
class RBDMirrorThrasher(Thrasher, Greenlet):
"""
RBDMirrorThrasher::
The RBDMirrorThrasher thrashes rbd-mirror daemons during execution of other
tasks (workunits, etc).
The config is optional. Many of the config parameters are a maximum value
to use when selecting a random value from a range. The config is a dict
containing some or all of:
cluster: [default: ceph] cluster to thrash
max_thrash: [default: 1] the maximum number of active rbd-mirror daemons per
cluster will be thrashed at any given time.
min_thrash_delay: [default: 60] minimum number of seconds to delay before
thrashing again.
max_thrash_delay: [default: 120] maximum number of seconds to delay before
thrashing again.
max_revive_delay: [default: 10] maximum number of seconds to delay before
bringing back a thrashed rbd-mirror daemon.
randomize: [default: true] enables randomization and use the max/min values
seed: [no default] seed the random number generator
Examples::
The following example disables randomization, and uses the max delay
values:
tasks:
- ceph:
- rbd_mirror_thrash:
randomize: False
max_thrash_delay: 10
"""
def __init__(self, ctx, config, cluster, daemons):
super(RBDMirrorThrasher, self).__init__()
self.ctx = ctx
self.config = config
self.cluster = cluster
self.daemons = daemons
self.logger = log
self.name = 'thrasher.rbd_mirror.[{cluster}]'.format(cluster = cluster)
self.stopping = Event()
self.randomize = bool(self.config.get('randomize', True))
self.max_thrash = int(self.config.get('max_thrash', 1))
self.min_thrash_delay = float(self.config.get('min_thrash_delay', 60.0))
self.max_thrash_delay = float(self.config.get('max_thrash_delay', 120.0))
self.max_revive_delay = float(self.config.get('max_revive_delay', 10.0))
def _run(self):
try:
self.do_thrash()
except Exception as e:
# See _run exception comment for MDSThrasher
self.set_thrasher_exception(e)
self.logger.exception("exception:")
# Allow successful completion so gevent doesn't see an exception.
# The DaemonWatchdog will observe the error and tear down the test.
def log(self, x):
"""Write data to logger assigned to this RBDMirrorThrasher"""
self.logger.info(x)
def stop(self):
self.stopping.set()
def do_thrash(self):
"""
Perform the random thrashing action
"""
self.log('starting thrash for cluster {cluster}'.format(cluster=self.cluster))
stats = {
"kill": 0,
}
while not self.stopping.is_set():
delay = self.max_thrash_delay
if self.randomize:
delay = random.randrange(self.min_thrash_delay, self.max_thrash_delay)
if delay > 0.0:
self.log('waiting for {delay} secs before thrashing'.format(delay=delay))
self.stopping.wait(delay)
if self.stopping.is_set():
continue
killed_daemons = []
weight = 1.0 / len(self.daemons)
count = 0
for daemon in self.daemons:
skip = random.uniform(0.0, 1.0)
if weight <= skip:
self.log('skipping daemon {label} with skip ({skip}) > weight ({weight})'.format(
label=daemon.id_, skip=skip, weight=weight))
continue
self.log('kill {label}'.format(label=daemon.id_))
try:
daemon.signal(signal.SIGTERM)
except socket.error:
pass
killed_daemons.append(daemon)
stats['kill'] += 1
# if we've reached max_thrash, we're done
count += 1
if count >= self.max_thrash:
break
if killed_daemons:
# wait for a while before restarting
delay = self.max_revive_delay
if self.randomize:
delay = random.randrange(0.0, self.max_revive_delay)
self.log('waiting for {delay} secs before reviving daemons'.format(delay=delay))
sleep(delay)
for daemon in killed_daemons:
self.log('waiting for {label}'.format(label=daemon.id_))
try:
run.wait([daemon.proc], timeout=600)
except CommandFailedError:
pass
except:
self.log('Failed to stop {label}'.format(label=daemon.id_))
try:
# try to capture a core dump
daemon.signal(signal.SIGABRT)
except socket.error:
pass
raise
finally:
daemon.reset()
for daemon in killed_daemons:
self.log('reviving {label}'.format(label=daemon.id_))
daemon.start()
for stat in stats:
self.log("stat['{key}'] = {value}".format(key = stat, value = stats[stat]))
@contextlib.contextmanager
def task(ctx, config):
"""
Stress test the rbd-mirror by thrashing while another task/workunit
is running.
Please refer to RBDMirrorThrasher class for further information on the
available options.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'rbd_mirror_thrash task only accepts a dict for configuration'
cluster = config.get('cluster', 'ceph')
daemons = list(ctx.daemons.iter_daemons_of_role('rbd-mirror', cluster))
assert len(daemons) > 0, \
'rbd_mirror_thrash task requires at least 1 rbd-mirror daemon'
# choose random seed
if 'seed' in config:
seed = int(config['seed'])
else:
seed = int(time.time())
log.info('rbd_mirror_thrash using random seed: {seed}'.format(seed=seed))
random.seed(seed)
thrasher = RBDMirrorThrasher(ctx, config, cluster, daemons)
thrasher.start()
ctx.ceph[cluster].thrashers.append(thrasher)
try:
log.debug('Yielding')
yield
finally:
log.info('joining rbd_mirror_thrash')
thrasher.stop()
if thrasher.exception is not None:
raise RuntimeError('error during thrashing')
thrasher.join()
log.info('done joining')
| 7,125 | 31.538813 | 101 |
py
|
null |
ceph-main/qa/tasks/rbd_pwl_cache_recovery.py
|
"""
persistent write log cache recovery task
"""
import contextlib
import logging
import random
import json
import time
from teuthology import misc as teuthology
from teuthology import contextutil
DEFAULT_NUM_ITERATIONS = 20
IO_PATTERNS = ("full-seq", "rand")
IO_SIZES = ('4K', '16K', '128K', '1024K')
log = logging.getLogger(__name__)
@contextlib.contextmanager
def thrashes_rbd_bench_on_persistent_cache(ctx, config):
"""
thrashes rbd bench on persistent write log cache.
It can test recovery feature of persistent write log cache.
"""
log.info("thrashes rbd bench on persistent write log cache")
client, client_config = list(config.items())[0]
(remote,) = ctx.cluster.only(client).remotes.keys()
client_config = client_config if client_config is not None else dict()
image_name = client_config.get('image_name', 'testimage')
num_iterations = client_config.get('num_iterations', DEFAULT_NUM_ITERATIONS)
for i in range(num_iterations):
log.info("start rbd bench")
# rbd bench could not specify the run time so set a large enough test size.
remote.run(
args=[
'rbd', 'bench',
'--io-type', 'write',
'--io-pattern', random.choice(IO_PATTERNS),
'--io-size', random.choice(IO_SIZES),
'--io-total', '100G',
image_name,
],
wait=False,
)
# Wait a few seconds for the rbd bench process to run
# and complete the pwl cache initialization
time.sleep(10)
log.info("dump cache state when rbd bench running.")
remote.sh(['rbd', 'status', image_name, '--format=json'])
log.info("sleep...")
time.sleep(random.randint(10, 60))
log.info("rbd bench crash.")
remote.run(
args=[
'killall', '-9', 'rbd',
],
check_status=False,
)
log.info("wait for watch timeout.")
time.sleep(40)
log.info("check cache state after crash.")
out = remote.sh(['rbd', 'status', image_name, '--format=json'])
rbd_status = json.loads(out)
assert len(rbd_status['watchers']) == 0
assert rbd_status['persistent_cache']['present'] == True
assert rbd_status['persistent_cache']['empty'] == False
assert rbd_status['persistent_cache']['clean'] == False
log.info("check dirty cache file.")
remote.run(
args=[
'test', '-e', rbd_status['persistent_cache']['path'],
]
)
try:
yield
finally:
log.info("cleanup")
@contextlib.contextmanager
def task(ctx, config):
"""
This is task for testing persistent write log cache recovery.
"""
assert isinstance(config, dict), \
"task rbd_pwl_cache_recovery only supports a dictionary for configuration"
managers = []
config = teuthology.replace_all_with_clients(ctx.cluster, config)
managers.append(
lambda: thrashes_rbd_bench_on_persistent_cache(ctx=ctx, config=config)
)
with contextutil.nested(*managers):
yield
| 3,188 | 31.876289 | 86 |
py
|
null |
ceph-main/qa/tasks/rebuild_mondb.py
|
"""
Test that we can rebuild the mon RocksDB from OSD data if all mon DBs are
corrupted or lost
"""
import logging
import os.path
import shutil
import tempfile
from tasks import ceph_manager
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def _push_directory(path, remote, remote_dir):
"""
local_temp_path=`mktemp`
tar czf $local_temp_path $path
ssh remote mkdir -p remote_dir
remote_temp_path=`mktemp`
scp $local_temp_path $remote_temp_path
rm $local_temp_path
tar xzf $remote_temp_path -C $remote_dir
ssh remote:$remote_temp_path
"""
fd, local_temp_path = tempfile.mkstemp(suffix='.tgz',
prefix='rebuild_mondb-')
os.close(fd)
cmd = ' '.join(['tar', 'cz',
'-f', local_temp_path,
'-C', path,
'--', '.'])
teuthology.sh(cmd)
_, fname = os.path.split(local_temp_path)
fd, remote_temp_path = tempfile.mkstemp(suffix='.tgz',
prefix='rebuild_mondb-')
os.close(fd)
remote.put_file(local_temp_path, remote_temp_path)
os.remove(local_temp_path)
remote.run(args=['sudo',
'tar', 'xz',
'-C', remote_dir,
'-f', remote_temp_path])
remote.run(args=['sudo', 'rm', '-fr', remote_temp_path])
def _nuke_mons(manager, mons, mon_id):
assert mons
is_mon = teuthology.is_type('mon')
for remote, roles in mons.remotes.items():
for role in roles:
if not is_mon(role):
continue
cluster, _, m = teuthology.split_role(role)
log.info('killing {cluster}:mon.{mon}'.format(
cluster=cluster,
mon=m))
manager.kill_mon(m)
mon_data = os.path.join('/var/lib/ceph/mon/',
'{0}-{1}'.format(cluster, m))
if m == mon_id:
# so we will only need to recreate the store.db for the
# first mon, would be easier than mkfs on it then replace
# the its store.db with the recovered one
store_dir = os.path.join(mon_data, 'store.db')
remote.run(args=['sudo', 'rm', '-r', store_dir])
# we need to remove the external_log_to file too, since it
# references a version number inside store.db
remote.run(args=['sudo', 'rm', '-r', os.path.join(mon_data,
'external_log_to')])
else:
remote.run(args=['sudo', 'rm', '-r', mon_data])
def _rebuild_db(ctx, manager, cluster_name, mon, mon_id, keyring_path):
local_mstore = tempfile.mkdtemp()
# collect the maps from all OSDs
is_osd = teuthology.is_type('osd')
osds = ctx.cluster.only(is_osd)
assert osds
for osd, roles in osds.remotes.items():
for role in roles:
if not is_osd(role):
continue
cluster, _, osd_id = teuthology.split_role(role)
assert cluster_name == cluster
log.info('collecting maps from {cluster}:osd.{osd}'.format(
cluster=cluster,
osd=osd_id))
# push RocksDB to OSD
osd_mstore = os.path.join(teuthology.get_testdir(ctx), 'mon-store')
osd.run(args=['sudo', 'mkdir', '-m', 'o+x', '-p', osd_mstore])
_push_directory(local_mstore, osd, osd_mstore)
log.info('rm -rf {0}'.format(local_mstore))
shutil.rmtree(local_mstore)
# update RocksDB with OSD data
options = '--no-mon-config --op update-mon-db --mon-store-path {0}'
log.info('cot {0}'.format(osd_mstore))
manager.objectstore_tool(pool=None,
options=options.format(osd_mstore),
args='',
osd=osd_id,
do_revive=False)
# pull the updated mon db
log.info('pull dir {0} -> {1}'.format(osd_mstore, local_mstore))
local_mstore = tempfile.mkdtemp()
teuthology.pull_directory(osd, osd_mstore, local_mstore)
log.info('rm -rf osd:{0}'.format(osd_mstore))
osd.run(args=['sudo', 'rm', '-fr', osd_mstore])
# recover the first_mon with re-built mon db
# pull from recovered RocksDB from client
mon_store_dir = os.path.join('/var/lib/ceph/mon',
'{0}-{1}'.format(cluster_name, mon_id))
_push_directory(local_mstore, mon, mon_store_dir)
mon.run(args=['sudo', 'chown', '-R', 'ceph:ceph', mon_store_dir])
shutil.rmtree(local_mstore)
# fill up the caps in the keyring file
mon.run(args=['sudo',
'ceph-authtool', keyring_path,
'-n', 'mon.',
'--cap', 'mon', 'allow *'])
mon.run(args=['sudo',
'ceph-authtool', keyring_path,
'-n', 'client.admin',
'--cap', 'mon', 'allow *',
'--cap', 'osd', 'allow *',
'--cap', 'mds', 'allow *',
'--cap', 'mgr', 'allow *'])
mon.run(args=['sudo', '-u', 'ceph',
'CEPH_ARGS=--no-mon-config',
'ceph-monstore-tool', mon_store_dir,
'rebuild', '--',
'--keyring', keyring_path,
'--monmap', '/tmp/monmap',
])
def _revive_mons(manager, mons, recovered, keyring_path):
# revive monitors
# the initial monmap is in the ceph.conf, so we are good.
n_mons = 0
is_mon = teuthology.is_type('mon')
for remote, roles in mons.remotes.items():
for role in roles:
if not is_mon(role):
continue
cluster, _, m = teuthology.split_role(role)
if recovered != m:
log.info('running mkfs on {cluster}:mon.{mon}'.format(
cluster=cluster,
mon=m))
remote.run(
args=[
'sudo',
'ceph-mon',
'--cluster', cluster,
'--mkfs',
'-i', m,
'--keyring', keyring_path,
'--monmap', '/tmp/monmap'])
log.info('reviving mon.{0}'.format(m))
manager.revive_mon(m)
n_mons += 1
manager.wait_for_mon_quorum_size(n_mons, timeout=30)
def _revive_mgrs(ctx, manager):
is_mgr = teuthology.is_type('mgr')
mgrs = ctx.cluster.only(is_mgr)
for _, roles in mgrs.remotes.items():
for role in roles:
if not is_mgr(role):
continue
_, _, mgr_id = teuthology.split_role(role)
log.info('reviving mgr.{0}'.format(mgr_id))
manager.revive_mgr(mgr_id)
def _revive_osds(ctx, manager):
is_osd = teuthology.is_type('osd')
osds = ctx.cluster.only(is_osd)
for _, roles in osds.remotes.items():
for role in roles:
if not is_osd(role):
continue
_, _, osd_id = teuthology.split_role(role)
log.info('reviving osd.{0}'.format(osd_id))
manager.revive_osd(osd_id)
def task(ctx, config):
"""
Test monitor recovery from OSD
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
# stash a monmap for later
mon.run(args=['ceph', 'mon', 'getmap', '-o', '/tmp/monmap'])
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'))
mons = ctx.cluster.only(teuthology.is_type('mon'))
# note down the first cluster_name and mon_id
# we will recover it later on
cluster_name, _, mon_id = teuthology.split_role(first_mon)
_nuke_mons(manager, mons, mon_id)
default_keyring = '/etc/ceph/{cluster}.keyring'.format(
cluster=cluster_name)
keyring_path = config.get('keyring_path', default_keyring)
_rebuild_db(ctx, manager, cluster_name, mon, mon_id, keyring_path)
_revive_mons(manager, mons, mon_id, keyring_path)
_revive_mgrs(ctx, manager)
_revive_osds(ctx, manager)
| 8,550 | 36.340611 | 86 |
py
|
null |
ceph-main/qa/tasks/reg11184.py
|
"""
Special regression test for tracker #11184
Synopsis: osd/SnapMapper.cc: 282: FAILED assert(check(oid))
This is accomplished by moving a pg that wasn't part of split and still include
divergent priors.
"""
import logging
import time
from teuthology.exceptions import CommandFailedError
from teuthology.orchestra import run
from teuthology import misc as teuthology
from tasks.util.rados import rados
import os
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Test handling of divergent entries during export / import
to regression test tracker #11184
overrides:
ceph:
conf:
osd:
debug osd: 5
Requires 3 osds on a single test node.
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'divergent_priors task only accepts a dict for configuration'
manager = ctx.managers['ceph']
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
osds = [0, 1, 2]
manager.flush_pg_stats(osds)
manager.raw_cluster_cmd('osd', 'set', 'noout')
manager.raw_cluster_cmd('osd', 'set', 'noin')
manager.raw_cluster_cmd('osd', 'set', 'nodown')
manager.wait_for_clean()
# something that is always there
dummyfile = '/etc/fstab'
dummyfile2 = '/etc/resolv.conf'
testdir = teuthology.get_testdir(ctx)
# create 1 pg pool
log.info('creating foo')
manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1')
manager.raw_cluster_cmd(
'osd', 'pool', 'application', 'enable',
'foo', 'rados', run.Raw('||'), 'true')
# Remove extra pool to simlify log output
manager.raw_cluster_cmd('osd', 'pool', 'delete', 'rbd', 'rbd', '--yes-i-really-really-mean-it')
for i in osds:
manager.set_config(i, osd_min_pg_log_entries=10)
manager.set_config(i, osd_max_pg_log_entries=10)
manager.set_config(i, osd_pg_log_trim_min=5)
# determine primary
divergent = manager.get_pg_primary('foo', 0)
log.info("primary and soon to be divergent is %d", divergent)
non_divergent = list(osds)
non_divergent.remove(divergent)
log.info('writing initial objects')
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
# write 100 objects
for i in range(100):
rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
manager.wait_for_clean()
# blackhole non_divergent
log.info("blackholing osds %s", str(non_divergent))
for i in non_divergent:
manager.set_config(i, objectstore_blackhole=1)
DIVERGENT_WRITE = 5
DIVERGENT_REMOVE = 5
# Write some soon to be divergent
log.info('writing divergent objects')
for i in range(DIVERGENT_WRITE):
rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i,
dummyfile2], wait=False)
# Remove some soon to be divergent
log.info('remove divergent objects')
for i in range(DIVERGENT_REMOVE):
rados(ctx, mon, ['-p', 'foo', 'rm',
'existing_%d' % (i + DIVERGENT_WRITE)], wait=False)
time.sleep(10)
mon.run(
args=['killall', '-9', 'rados'],
wait=True,
check_status=False)
# kill all the osds but leave divergent in
log.info('killing all the osds')
for i in osds:
manager.kill_osd(i)
for i in osds:
manager.mark_down_osd(i)
for i in non_divergent:
manager.mark_out_osd(i)
# bring up non-divergent
log.info("bringing up non_divergent %s", str(non_divergent))
for i in non_divergent:
manager.revive_osd(i)
for i in non_divergent:
manager.mark_in_osd(i)
# write 1 non-divergent object (ensure that old divergent one is divergent)
objname = "existing_%d" % (DIVERGENT_WRITE + DIVERGENT_REMOVE)
log.info('writing non-divergent object ' + objname)
rados(ctx, mon, ['-p', 'foo', 'put', objname, dummyfile2])
manager.wait_for_recovery()
# ensure no recovery of up osds first
log.info('delay recovery')
for i in non_divergent:
manager.wait_run_admin_socket(
'osd', i, ['set_recovery_delay', '100000'])
# bring in our divergent friend
log.info("revive divergent %d", divergent)
manager.raw_cluster_cmd('osd', 'set', 'noup')
manager.revive_osd(divergent)
log.info('delay recovery divergent')
manager.wait_run_admin_socket(
'osd', divergent, ['set_recovery_delay', '100000'])
manager.raw_cluster_cmd('osd', 'unset', 'noup')
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
log.info('wait for peering')
rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
# At this point the divergent_priors should have been detected
log.info("killing divergent %d", divergent)
manager.kill_osd(divergent)
# Split pgs for pool foo
manager.raw_cluster_cmd('osd', 'pool', 'set', 'foo', 'pg_num', '2')
time.sleep(5)
manager.raw_cluster_cmd('pg','dump')
# Export a pg
(exp_remote,) = ctx.\
cluster.only('osd.{o}'.format(o=divergent)).remotes.keys()
FSPATH = manager.get_filepath()
JPATH = os.path.join(FSPATH, "journal")
prefix = ("sudo adjust-ulimits ceph-objectstore-tool "
"--data-path {fpath} --journal-path {jpath} "
"--log-file="
"/var/log/ceph/objectstore_tool.$$.log ".
format(fpath=FSPATH, jpath=JPATH))
pid = os.getpid()
expfile = os.path.join(testdir, "exp.{pid}.out".format(pid=pid))
cmd = ((prefix + "--op export-remove --pgid 2.0 --file {file}").
format(id=divergent, file=expfile))
try:
exp_remote.sh(cmd, wait=True)
except CommandFailedError as e:
assert e.exitstatus == 0
# Kill one of non-divergent OSDs
log.info('killing osd.%d' % non_divergent[0])
manager.kill_osd(non_divergent[0])
manager.mark_down_osd(non_divergent[0])
# manager.mark_out_osd(non_divergent[0])
# An empty collection for pg 2.0 might need to be cleaned up
cmd = ((prefix + "--force --op remove --pgid 2.0").
format(id=non_divergent[0]))
exp_remote.sh(cmd, wait=True, check_status=False)
cmd = ((prefix + "--op import --file {file}").
format(id=non_divergent[0], file=expfile))
try:
exp_remote.sh(cmd, wait=True)
except CommandFailedError as e:
assert e.exitstatus == 0
# bring in our divergent friend and other node
log.info("revive divergent %d", divergent)
manager.revive_osd(divergent)
manager.mark_in_osd(divergent)
log.info("revive %d", non_divergent[0])
manager.revive_osd(non_divergent[0])
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
log.info('delay recovery divergent')
manager.set_config(divergent, osd_recovery_delay_start=100000)
log.info('mark divergent in')
manager.mark_in_osd(divergent)
log.info('wait for peering')
rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
log.info("killing divergent %d", divergent)
manager.kill_osd(divergent)
log.info("reviving divergent %d", divergent)
manager.revive_osd(divergent)
time.sleep(3)
log.info('allowing recovery')
# Set osd_recovery_delay_start back to 0 and kick the queue
for i in osds:
manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'debug',
'kick_recovery_wq', ' 0')
log.info('reading divergent objects')
for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
'/tmp/existing'])
assert exit_status == 0
(remote,) = ctx.\
cluster.only('osd.{o}'.format(o=divergent)).remotes.keys()
cmd = 'rm {file}'.format(file=expfile)
remote.run(args=cmd, wait=True)
log.info("success")
| 7,953 | 31.73251 | 99 |
py
|
null |
ceph-main/qa/tasks/rep_lost_unfound_delete.py
|
"""
Lost_unfound
"""
import logging
import time
from tasks import ceph_manager
from tasks.util.rados import rados
from teuthology import misc as teuthology
from teuthology.orchestra import run
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Test handling of lost objects.
A pretty rigid cluster is brought up and tested by this task
"""
POOL = 'unfounddel_pool'
if config is None:
config = {}
assert isinstance(config, dict), \
'lost_unfound task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
manager.flush_pg_stats([0, 1, 2])
manager.wait_for_clean()
manager.create_pool(POOL)
# something that is always there
dummyfile = '/etc/fstab'
# take an osd out until the very end
manager.kill_osd(2)
manager.mark_down_osd(2)
manager.mark_out_osd(2)
# kludge to make sure they get a map
rados(ctx, mon, ['-p', POOL, 'put', 'dummy', dummyfile])
manager.flush_pg_stats([0, 1])
manager.wait_for_recovery()
# create old objects
for f in range(1, 10):
rados(ctx, mon, ['-p', POOL, 'put', 'existing_%d' % f, dummyfile])
rados(ctx, mon, ['-p', POOL, 'put', 'existed_%d' % f, dummyfile])
rados(ctx, mon, ['-p', POOL, 'rm', 'existed_%d' % f])
# delay recovery, and make the pg log very long (to prevent backfill)
manager.raw_cluster_cmd(
'tell', 'osd.1',
'injectargs',
'--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
)
manager.kill_osd(0)
manager.mark_down_osd(0)
for f in range(1, 10):
rados(ctx, mon, ['-p', POOL, 'put', 'new_%d' % f, dummyfile])
rados(ctx, mon, ['-p', POOL, 'put', 'existed_%d' % f, dummyfile])
rados(ctx, mon, ['-p', POOL, 'put', 'existing_%d' % f, dummyfile])
# bring osd.0 back up, let it peer, but don't replicate the new
# objects...
log.info('osd.0 command_args is %s' % 'foo')
log.info(ctx.daemons.get_daemon('osd', 0).command_args)
ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([
'--osd-recovery-delay-start', '1000'
])
manager.revive_osd(0)
manager.mark_in_osd(0)
manager.wait_till_osd_is_up(0)
manager.flush_pg_stats([0, 1])
manager.wait_till_active()
# take out osd.1 and the only copy of those objects.
manager.kill_osd(1)
manager.mark_down_osd(1)
manager.mark_out_osd(1)
manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
# bring up osd.2 so that things would otherwise, in theory, recovery fully
manager.revive_osd(2)
manager.mark_in_osd(2)
manager.wait_till_osd_is_up(2)
manager.flush_pg_stats([0, 2])
manager.wait_till_active()
manager.flush_pg_stats([0, 2])
# verify that there are unfound objects
unfound = manager.get_num_unfound_objects()
log.info("there are %d unfound objects" % unfound)
assert unfound
testdir = teuthology.get_testdir(ctx)
procs = []
if config.get('parallel_bench', True):
procs.append(mon.run(
args=[
"/bin/sh", "-c",
" ".join(['adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage',
'rados',
'--no-log-to-stderr',
'--name', 'client.admin',
'-b', str(4<<10),
'-p' , POOL,
'-t', '20',
'bench', '240', 'write',
]).format(tdir=testdir),
],
logger=log.getChild('radosbench.{id}'.format(id='client.admin')),
stdin=run.PIPE,
wait=False
))
time.sleep(10)
# mark stuff lost
pgs = manager.get_pg_stats()
for pg in pgs:
if pg['stat_sum']['num_objects_unfound'] > 0:
primary = 'osd.%d' % pg['acting'][0]
# verify that i can list them direct from the osd
log.info('listing missing/lost in %s state %s', pg['pgid'],
pg['state']);
m = manager.list_pg_unfound(pg['pgid'])
#log.info('%s' % m)
assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
num_unfound=0
for o in m['objects']:
if len(o['locations']) == 0:
num_unfound += 1
assert m['num_unfound'] == num_unfound
log.info("reverting unfound in %s on %s", pg['pgid'], primary)
manager.raw_cluster_cmd('pg', pg['pgid'],
'mark_unfound_lost', 'delete')
else:
log.info("no unfound in %s", pg['pgid'])
manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
manager.flush_pg_stats([0, 2])
manager.wait_for_recovery()
# verify result
for f in range(1, 10):
err = rados(ctx, mon, ['-p', POOL, 'get', 'new_%d' % f, '-'])
assert err
err = rados(ctx, mon, ['-p', POOL, 'get', 'existed_%d' % f, '-'])
assert err
err = rados(ctx, mon, ['-p', POOL, 'get', 'existing_%d' % f, '-'])
assert err
# see if osd.1 can cope
manager.mark_in_osd(1)
manager.revive_osd(1)
manager.wait_till_osd_is_up(1)
manager.wait_for_clean()
run.wait(procs)
manager.wait_for_clean()
| 5,837 | 31.433333 | 80 |
py
|
null |
ceph-main/qa/tasks/repair_test.py
|
"""
Test pool repairing after objects are damaged.
"""
import logging
import time
log = logging.getLogger(__name__)
def choose_primary(manager, pool, num):
"""
Return primary to test on.
"""
log.info("Choosing primary")
return manager.get_pg_primary(pool, num)
def choose_replica(manager, pool, num):
"""
Return replica to test on.
"""
log.info("Choosing replica")
return manager.get_pg_replica(pool, num)
def trunc(manager, osd, pool, obj):
"""
truncate an object
"""
log.info("truncating object")
return manager.osd_admin_socket(
osd,
['truncobj', pool, obj, '1'])
def dataerr(manager, osd, pool, obj):
"""
cause an error in the data
"""
log.info("injecting data err on object")
return manager.osd_admin_socket(
osd,
['injectdataerr', pool, obj])
def mdataerr(manager, osd, pool, obj):
"""
cause an error in the mdata
"""
log.info("injecting mdata err on object")
return manager.osd_admin_socket(
osd,
['injectmdataerr', pool, obj])
def omaperr(manager, osd, pool, obj):
"""
Cause an omap error.
"""
log.info("injecting omap err on object")
return manager.osd_admin_socket(osd, ['setomapval', pool, obj,
'badkey', 'badval'])
def repair_test_1(manager, corrupter, chooser, scrub_type):
"""
Creates an object in the pool, corrupts it,
scrubs it, and verifies that the pool is inconsistent. It then repairs
the pool, rescrubs it, and verifies that the pool is consistent
:param corrupter: error generating function (truncate, data-error, or
meta-data error, for example).
:param chooser: osd type chooser (primary or replica)
:param scrub_type: regular scrub or deep-scrub
"""
pool = "repair_pool_1"
manager.wait_for_clean()
with manager.pool(pool, 1):
log.info("starting repair test type 1")
victim_osd = chooser(manager, pool, 0)
# create object
log.info("doing put")
manager.do_put(pool, 'repair_test_obj', '/etc/hosts')
# corrupt object
log.info("corrupting object")
corrupter(manager, victim_osd, pool, 'repair_test_obj')
# verify inconsistent
log.info("scrubbing")
manager.do_pg_scrub(pool, 0, scrub_type)
manager.with_pg_state(pool, 0, lambda s: 'inconsistent' in s)
# repair
log.info("repairing")
manager.do_pg_scrub(pool, 0, "repair")
log.info("re-scrubbing")
manager.do_pg_scrub(pool, 0, scrub_type)
# verify consistent
manager.with_pg_state(pool, 0, lambda s: 'inconsistent' not in s)
log.info("done")
def repair_test_2(ctx, manager, config, chooser):
"""
First creates a set of objects and
sets the omap value. It then corrupts an object, does both a scrub
and a deep-scrub, and then corrupts more objects. After that, it
repairs the pool and makes sure that the pool is consistent some
time after a deep-scrub.
:param chooser: primary or replica selection routine.
"""
pool = "repair_pool_2"
manager.wait_for_clean()
with manager.pool(pool, 1):
log.info("starting repair test type 2")
victim_osd = chooser(manager, pool, 0)
# create object
log.info("doing put and setomapval")
manager.do_put(pool, 'file1', '/etc/hosts')
manager.do_rados(['setomapval', 'file1', 'key', 'val'], pool=pool)
manager.do_put(pool, 'file2', '/etc/hosts')
manager.do_put(pool, 'file3', '/etc/hosts')
manager.do_put(pool, 'file4', '/etc/hosts')
manager.do_put(pool, 'file5', '/etc/hosts')
manager.do_rados(['setomapval', 'file5', 'key', 'val'], pool=pool)
manager.do_put(pool, 'file6', '/etc/hosts')
# corrupt object
log.info("corrupting object")
omaperr(manager, victim_osd, pool, 'file1')
# verify inconsistent
log.info("scrubbing")
manager.do_pg_scrub(pool, 0, 'deep-scrub')
manager.with_pg_state(pool, 0, lambda s: 'inconsistent' in s)
# Regression test for bug #4778, should still
# be inconsistent after scrub
manager.do_pg_scrub(pool, 0, 'scrub')
manager.with_pg_state(pool, 0, lambda s: 'inconsistent' in s)
# Additional corruptions including 2 types for file1
log.info("corrupting more objects")
dataerr(manager, victim_osd, pool, 'file1')
mdataerr(manager, victim_osd, pool, 'file2')
trunc(manager, victim_osd, pool, 'file3')
omaperr(manager, victim_osd, pool, 'file6')
# see still inconsistent
log.info("scrubbing")
manager.do_pg_scrub(pool, 0, 'deep-scrub')
manager.with_pg_state(pool, 0, lambda s: 'inconsistent' in s)
# repair
log.info("repairing")
manager.do_pg_scrub(pool, 0, "repair")
# Let repair clear inconsistent flag
time.sleep(10)
# verify consistent
manager.with_pg_state(pool, 0, lambda s: 'inconsistent' not in s)
# In the future repair might determine state of
# inconsistency itself, verify with a deep-scrub
log.info("scrubbing")
manager.do_pg_scrub(pool, 0, 'deep-scrub')
# verify consistent
manager.with_pg_state(pool, 0, lambda s: 'inconsistent' not in s)
log.info("done")
def hinfoerr(manager, victim, pool, obj):
"""
cause an error in the hinfo_key
"""
log.info("remove the hinfo_key")
manager.objectstore_tool(pool,
options='',
args='rm-attr hinfo_key',
object_name=obj,
osd=victim)
def repair_test_erasure_code(manager, corrupter, victim, scrub_type):
"""
Creates an object in the pool, corrupts it,
scrubs it, and verifies that the pool is inconsistent. It then repairs
the pool, rescrubs it, and verifies that the pool is consistent
:param corrupter: error generating function.
:param chooser: osd type chooser (primary or replica)
:param scrub_type: regular scrub or deep-scrub
"""
pool = "repair_pool_3"
manager.wait_for_clean()
with manager.pool(pool_name=pool, pg_num=1,
erasure_code_profile_name='default'):
log.info("starting repair test for erasure code")
# create object
log.info("doing put")
manager.do_put(pool, 'repair_test_obj', '/etc/hosts')
# corrupt object
log.info("corrupting object")
corrupter(manager, victim, pool, 'repair_test_obj')
# verify inconsistent
log.info("scrubbing")
manager.do_pg_scrub(pool, 0, scrub_type)
manager.with_pg_state(pool, 0, lambda s: 'inconsistent' in s)
# repair
log.info("repairing")
manager.do_pg_scrub(pool, 0, "repair")
log.info("re-scrubbing")
manager.do_pg_scrub(pool, 0, scrub_type)
# verify consistent
manager.with_pg_state(pool, 0, lambda s: 'inconsistent' not in s)
log.info("done")
def task(ctx, config):
"""
Test [deep] repair in several situations:
Repair [Truncate, Data EIO, MData EIO] on [Primary|Replica]
The config should be as follows:
Must include the log-ignorelist below
Must enable filestore_debug_inject_read_err config
example:
tasks:
- chef:
- install:
- ceph:
log-ignorelist:
- 'candidate had a stat error'
- 'candidate had a read error'
- 'deep-scrub 0 missing, 1 inconsistent objects'
- 'deep-scrub 0 missing, 4 inconsistent objects'
- 'deep-scrub [0-9]+ errors'
- '!= omap_digest'
- '!= data_digest'
- 'repair 0 missing, 1 inconsistent objects'
- 'repair 0 missing, 4 inconsistent objects'
- 'repair [0-9]+ errors, [0-9]+ fixed'
- 'scrub 0 missing, 1 inconsistent objects'
- 'scrub [0-9]+ errors'
- 'size 1 != size'
- 'attr name mismatch'
- 'Regular scrub request, deep-scrub details will be lost'
- 'candidate size [0-9]+ info size [0-9]+ mismatch'
conf:
osd:
filestore debug inject read err: true
- repair_test:
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'repair_test task only accepts a dict for config'
manager = ctx.managers['ceph']
manager.wait_for_all_osds_up()
manager.raw_cluster_cmd('osd', 'set', 'noscrub')
manager.raw_cluster_cmd('osd', 'set', 'nodeep-scrub')
repair_test_1(manager, mdataerr, choose_primary, "scrub")
repair_test_1(manager, mdataerr, choose_replica, "scrub")
repair_test_1(manager, dataerr, choose_primary, "deep-scrub")
repair_test_1(manager, dataerr, choose_replica, "deep-scrub")
repair_test_1(manager, trunc, choose_primary, "scrub")
repair_test_1(manager, trunc, choose_replica, "scrub")
repair_test_2(ctx, manager, config, choose_primary)
repair_test_2(ctx, manager, config, choose_replica)
repair_test_erasure_code(manager, hinfoerr, 'primary', "deep-scrub")
manager.raw_cluster_cmd('osd', 'unset', 'noscrub')
manager.raw_cluster_cmd('osd', 'unset', 'nodeep-scrub')
| 9,443 | 30.065789 | 75 |
py
|
null |
ceph-main/qa/tasks/resolve_stuck_peering.py
|
"""
Resolve stuck peering
"""
import logging
import time
from teuthology import misc as teuthology
from tasks.util.rados import rados
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Test handling resolve stuck peering
requires 3 osds on a single test node
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'Resolve stuck peering only accepts a dict for config'
manager = ctx.managers['ceph']
while len(manager.get_osd_status()['up']) < 3:
time.sleep(10)
manager.wait_for_clean()
dummyfile = '/etc/fstab'
dummyfile1 = '/etc/resolv.conf'
#create 1 PG pool
pool='foo'
log.info('creating pool foo')
manager.raw_cluster_cmd('osd', 'pool', 'create', '%s' % pool, '1')
#set min_size of the pool to 1
#so that we can continue with I/O
#when 2 osds are down
manager.set_pool_property(pool, "min_size", 1)
osds = [0, 1, 2]
primary = manager.get_pg_primary('foo', 0)
log.info("primary osd is %d", primary)
others = list(osds)
others.remove(primary)
log.info('writing initial objects')
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
#create few objects
for i in range(100):
rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
manager.wait_for_clean()
#kill other osds except primary
log.info('killing other osds except primary')
for i in others:
manager.kill_osd(i)
for i in others:
manager.mark_down_osd(i)
for i in range(100):
rados(ctx, mon, ['-p', 'foo', 'put', 'new_%d' % i, dummyfile1])
#kill primary osd
manager.kill_osd(primary)
manager.mark_down_osd(primary)
#revive other 2 osds
for i in others:
manager.revive_osd(i)
#make sure that pg is down
#Assuming pg number for single pg pool will start from 0
pgnum=0
pgstr = manager.get_pgid(pool, pgnum)
stats = manager.get_single_pg_stats(pgstr)
print(stats['state'])
timeout=60
start=time.time()
while 'down' not in stats['state']:
assert time.time() - start < timeout, \
'failed to reach down state before timeout expired'
stats = manager.get_single_pg_stats(pgstr)
#mark primary as lost
manager.raw_cluster_cmd('osd', 'lost', '%d' % primary,\
'--yes-i-really-mean-it')
#expect the pg status to be active+undersized+degraded
#pg should recover and become active+clean within timeout
stats = manager.get_single_pg_stats(pgstr)
print(stats['state'])
timeout=10
start=time.time()
while manager.get_num_down():
assert time.time() - start < timeout, \
'failed to recover before timeout expired'
manager.revive_osd(primary)
| 2,881 | 24.504425 | 75 |
py
|
null |
ceph-main/qa/tasks/rgw.py
|
"""
rgw routines
"""
import argparse
import contextlib
import logging
from teuthology.orchestra import run
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.exceptions import ConfigError
from tasks.ceph_manager import get_valgrind_args
from tasks.util import get_remote_for_role
from tasks.util.rgw import rgwadmin, wait_for_radosgw
from tasks.util.rados import (create_ec_pool,
create_replicated_pool,
create_cache_pool)
log = logging.getLogger(__name__)
class RGWEndpoint:
def __init__(self, hostname=None, port=None, cert=None, dns_name=None, website_dns_name=None):
self.hostname = hostname
self.port = port
self.cert = cert
self.dns_name = dns_name
self.website_dns_name = website_dns_name
def url(self):
proto = 'https' if self.cert else 'http'
return '{proto}://{hostname}:{port}/'.format(proto=proto, hostname=self.hostname, port=self.port)
@contextlib.contextmanager
def start_rgw(ctx, config, clients):
"""
Start rgw on remote sites.
"""
log.info('Starting rgw...')
testdir = teuthology.get_testdir(ctx)
for client in clients:
(remote,) = ctx.cluster.only(client).remotes.keys()
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
client_with_cluster = cluster_name + '.' + client_with_id
client_config = config.get(client)
if client_config is None:
client_config = {}
log.info("rgw %s config is %s", client, client_config)
cmd_prefix = [
'sudo',
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'daemon-helper',
'term',
]
rgw_cmd = ['radosgw']
log.info("Using %s as radosgw frontend", ctx.rgw.frontend)
endpoint = ctx.rgw.role_endpoints[client]
frontends = ctx.rgw.frontend
frontend_prefix = client_config.get('frontend_prefix', None)
if frontend_prefix:
frontends += ' prefix={pfx}'.format(pfx=frontend_prefix)
if endpoint.cert:
# add the ssl certificate path
frontends += ' ssl_certificate={}'.format(endpoint.cert.certificate)
frontends += ' ssl_port={}'.format(endpoint.port)
else:
frontends += ' port={}'.format(endpoint.port)
rgw_cmd.extend([
'--rgw-frontends', frontends,
'-n', client_with_id,
'--cluster', cluster_name,
'-k', '/etc/ceph/{client_with_cluster}.keyring'.format(client_with_cluster=client_with_cluster),
'--log-file',
'/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster),
'--rgw_ops_log_socket_path',
'{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir,
client_with_cluster=client_with_cluster),
])
keystone_role = client_config.get('use-keystone-role', None)
if keystone_role is not None:
if not ctx.keystone:
raise ConfigError('rgw must run after the keystone task')
url = 'http://{host}:{port}/v1/KEY_$(tenant_id)s'.format(host=endpoint.hostname,
port=endpoint.port)
ctx.keystone.create_endpoint(ctx, keystone_role, 'swift', url)
keystone_host, keystone_port = \
ctx.keystone.public_endpoints[keystone_role]
rgw_cmd.extend([
'--rgw_keystone_url',
'http://{khost}:{kport}'.format(khost=keystone_host,
kport=keystone_port),
])
if client_config.get('dns-name') is not None:
rgw_cmd.extend(['--rgw-dns-name', endpoint.dns_name])
if client_config.get('dns-s3website-name') is not None:
rgw_cmd.extend(['--rgw-dns-s3website-name', endpoint.website_dns_name])
vault_role = client_config.get('use-vault-role', None)
barbican_role = client_config.get('use-barbican-role', None)
pykmip_role = client_config.get('use-pykmip-role', None)
token_path = '/etc/ceph/vault-root-token'
if barbican_role is not None:
if not hasattr(ctx, 'barbican'):
raise ConfigError('rgw must run after the barbican task')
barbican_host, barbican_port = \
ctx.barbican.endpoints[barbican_role]
log.info("Use barbican url=%s:%s", barbican_host, barbican_port)
rgw_cmd.extend([
'--rgw_barbican_url',
'http://{bhost}:{bport}'.format(bhost=barbican_host,
bport=barbican_port),
])
elif vault_role is not None:
if not ctx.vault.root_token:
raise ConfigError('vault: no "root_token" specified')
# create token on file
ctx.rgw.vault_role = vault_role
ctx.cluster.only(client).run(args=['sudo', 'echo', '-n', ctx.vault.root_token, run.Raw('|'), 'sudo', 'tee', token_path])
log.info("Token file content")
ctx.cluster.only(client).run(args=['cat', token_path])
log.info("Restrict access to token file")
ctx.cluster.only(client).run(args=['sudo', 'chmod', '600', token_path])
ctx.cluster.only(client).run(args=['sudo', 'chown', 'ceph', token_path])
vault_addr = "{}:{}".format(*ctx.vault.endpoints[vault_role])
rgw_cmd.extend([
'--rgw_crypt_vault_addr', vault_addr,
'--rgw_crypt_vault_token_file', token_path,
'--rgw_crypt_sse_s3_vault_addr', vault_addr,
'--rgw_crypt_sse_s3_vault_token_file', token_path,
])
elif pykmip_role is not None:
if not hasattr(ctx, 'pykmip'):
raise ConfigError('rgw must run after the pykmip task')
ctx.rgw.pykmip_role = pykmip_role
rgw_cmd.extend([
'--rgw_crypt_kmip_addr', "{}:{}".format(*ctx.pykmip.endpoints[pykmip_role]),
])
clientcert = ctx.ssl_certificates.get('kmip-client')
servercert = ctx.ssl_certificates.get('kmip-server')
clientca = ctx.ssl_certificates.get('kmiproot')
clientkey = clientcert.key
clientcert = clientcert.certificate
serverkey = servercert.key
servercert = servercert.certificate
rootkey = clientca.key
rootcert = clientca.certificate
cert_path = '/etc/ceph/'
ctx.cluster.only(client).run(args=['sudo', 'cp', clientcert, cert_path])
ctx.cluster.only(client).run(args=['sudo', 'cp', clientkey, cert_path])
ctx.cluster.only(client).run(args=['sudo', 'cp', servercert, cert_path])
ctx.cluster.only(client).run(args=['sudo', 'cp', serverkey, cert_path])
ctx.cluster.only(client).run(args=['sudo', 'cp', rootkey, cert_path])
ctx.cluster.only(client).run(args=['sudo', 'cp', rootcert, cert_path])
clientcert = cert_path + 'kmip-client.crt'
clientkey = cert_path + 'kmip-client.key'
servercert = cert_path + 'kmip-server.crt'
serverkey = cert_path + 'kmip-server.key'
rootkey = cert_path + 'kmiproot.key'
rootcert = cert_path + 'kmiproot.crt'
ctx.cluster.only(client).run(args=['sudo', 'chmod', '600', clientcert, clientkey, servercert, serverkey, rootkey, rootcert])
ctx.cluster.only(client).run(args=['sudo', 'chown', 'ceph', clientcert, clientkey, servercert, serverkey, rootkey, rootcert])
rgw_cmd.extend([
'--foreground',
run.Raw('|'),
'sudo',
'tee',
'/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(client_with_cluster=client_with_cluster),
run.Raw('2>&1'),
])
if client_config.get('valgrind'):
cmd_prefix = get_valgrind_args(
testdir,
client_with_cluster,
cmd_prefix,
client_config.get('valgrind'),
# see https://github.com/ceph/teuthology/pull/1600
exit_on_first_error=False
)
run_cmd = list(cmd_prefix)
run_cmd.extend(rgw_cmd)
ctx.daemons.add_daemon(
remote, 'rgw', client_with_id,
cluster=cluster_name,
fsid=ctx.ceph[cluster_name].fsid,
args=run_cmd,
logger=log.getChild(client),
stdin=run.PIPE,
wait=False,
)
# XXX: add_daemon() doesn't let us wait until radosgw finishes startup
for client in clients:
endpoint = ctx.rgw.role_endpoints[client]
url = endpoint.url()
log.info('Polling {client} until it starts accepting connections on {url}'.format(client=client, url=url))
(remote,) = ctx.cluster.only(client).remotes.keys()
wait_for_radosgw(url, remote)
try:
yield
finally:
for client in clients:
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
client_with_cluster = cluster_name + '.' + client_with_id
ctx.daemons.get_daemon('rgw', client_with_id, cluster_name).stop()
ctx.cluster.only(client).run(
args=[
'rm',
'-f',
'{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
client=client_with_cluster),
],
)
ctx.cluster.only(client).run(args=['sudo', 'rm', '-f', token_path])
ctx.cluster.only(client).run(args=['radosgw-admin', 'gc', 'process', '--include-all'])
def assign_endpoints(ctx, config, default_cert):
role_endpoints = {}
for role, client_config in config.items():
client_config = client_config or {}
remote = get_remote_for_role(ctx, role)
cert = client_config.get('ssl certificate', default_cert)
if cert:
# find the certificate created by the ssl task
if not hasattr(ctx, 'ssl_certificates'):
raise ConfigError('rgw: no ssl task found for option "ssl certificate"')
ssl_certificate = ctx.ssl_certificates.get(cert, None)
if not ssl_certificate:
raise ConfigError('rgw: missing ssl certificate "{}"'.format(cert))
else:
ssl_certificate = None
port = client_config.get('port', 443 if ssl_certificate else 80)
# if dns-name is given, use it as the hostname (or as a prefix)
dns_name = client_config.get('dns-name', '')
if len(dns_name) == 0 or dns_name.endswith('.'):
dns_name += remote.hostname
website_dns_name = client_config.get('dns-s3website-name')
if website_dns_name is not None and (len(website_dns_name) == 0 or website_dns_name.endswith('.')):
website_dns_name += remote.hostname
role_endpoints[role] = RGWEndpoint(remote.hostname, port, ssl_certificate, dns_name, website_dns_name)
return role_endpoints
@contextlib.contextmanager
def create_pools(ctx, clients):
"""Create replicated or erasure coded data pools for rgw."""
log.info('Creating data pools')
for client in clients:
log.debug("Obtaining remote for client {}".format(client))
(remote,) = ctx.cluster.only(client).remotes.keys()
data_pool = 'default.rgw.buckets.data'
cluster_name, daemon_type, client_id = teuthology.split_role(client)
if ctx.rgw.ec_data_pool:
create_ec_pool(remote, data_pool, client, ctx.rgw.data_pool_pg_size,
ctx.rgw.erasure_code_profile, cluster_name, 'rgw')
else:
create_replicated_pool(remote, data_pool, ctx.rgw.data_pool_pg_size, cluster_name, 'rgw')
index_pool = 'default.rgw.buckets.index'
create_replicated_pool(remote, index_pool, ctx.rgw.index_pool_pg_size, cluster_name, 'rgw')
if ctx.rgw.cache_pools:
create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
64*1024*1024, cluster_name)
log.debug('Pools created')
yield
@contextlib.contextmanager
def configure_compression(ctx, clients, compression):
""" set a compression type in the default zone placement """
log.info('Configuring compression type = %s', compression)
for client in clients:
# XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
# issue a 'radosgw-admin user list' command to trigger this
rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True)
rgwadmin(ctx, client,
cmd=['zone', 'placement', 'modify', '--rgw-zone', 'default',
'--placement-id', 'default-placement',
'--compression', compression],
check_status=True)
yield
@contextlib.contextmanager
def disable_inline_data(ctx, clients):
for client in clients:
# XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
# issue a 'radosgw-admin user list' command to trigger this
rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True)
rgwadmin(ctx, client,
cmd=['zone', 'placement', 'modify', '--rgw-zone', 'default',
'--placement-id', 'default-placement',
'--placement-inline-data', 'false'],
check_status=True)
yield
@contextlib.contextmanager
def configure_datacache(ctx, clients, datacache_path):
""" create directory for rgw datacache """
log.info('Preparing directory for rgw datacache at %s', datacache_path)
for client in clients:
if(datacache_path != None):
ctx.cluster.only(client).run(args=['mkdir', '-p', datacache_path])
ctx.cluster.only(client).run(args=['sudo', 'chmod', 'a+rwx', datacache_path])
else:
log.info('path for datacache was not provided')
yield
@contextlib.contextmanager
def configure_storage_classes(ctx, clients, storage_classes):
""" set a compression type in the default zone placement """
sc = [s.strip() for s in storage_classes.split(',')]
for client in clients:
# XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
# issue a 'radosgw-admin user list' command to trigger this
rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True)
for storage_class in sc:
log.info('Configuring storage class type = %s', storage_class)
rgwadmin(ctx, client,
cmd=['zonegroup', 'placement', 'add',
'--rgw-zone', 'default',
'--placement-id', 'default-placement',
'--storage-class', storage_class],
check_status=True)
rgwadmin(ctx, client,
cmd=['zone', 'placement', 'add',
'--rgw-zone', 'default',
'--placement-id', 'default-placement',
'--storage-class', storage_class,
'--data-pool', 'default.rgw.buckets.data.' + storage_class.lower()],
check_status=True)
yield
@contextlib.contextmanager
def task(ctx, config):
"""
For example, to run rgw on all clients::
tasks:
- ceph:
- rgw:
To only run on certain clients::
tasks:
- ceph:
- rgw: [client.0, client.3]
or
tasks:
- ceph:
- rgw:
client.0:
client.3:
To run radosgw through valgrind:
tasks:
- ceph:
- rgw:
client.0:
valgrind: [--tool=memcheck]
client.3:
valgrind: [--tool=memcheck]
To configure data or index pool pg_size:
overrides:
rgw:
data_pool_pg_size: 256
index_pool_pg_size: 128
"""
if config is None:
config = dict(('client.{id}'.format(id=id_), None)
for id_ in teuthology.all_roles_of_type(
ctx.cluster, 'client'))
elif isinstance(config, list):
config = dict((name, None) for name in config)
clients = config.keys() # http://tracker.ceph.com/issues/20417
overrides = ctx.config.get('overrides', {})
teuthology.deep_merge(config, overrides.get('rgw', {}))
ctx.rgw = argparse.Namespace()
ctx.rgw_cloudtier = None
ctx.rgw.ec_data_pool = bool(config.pop('ec-data-pool', False))
ctx.rgw.erasure_code_profile = config.pop('erasure_code_profile', {})
ctx.rgw.cache_pools = bool(config.pop('cache-pools', False))
ctx.rgw.frontend = config.pop('frontend', 'beast')
ctx.rgw.compression_type = config.pop('compression type', None)
ctx.rgw.inline_data = config.pop('inline data', True)
ctx.rgw.storage_classes = config.pop('storage classes', None)
default_cert = config.pop('ssl certificate', None)
ctx.rgw.data_pool_pg_size = config.pop('data_pool_pg_size', 64)
ctx.rgw.index_pool_pg_size = config.pop('index_pool_pg_size', 64)
ctx.rgw.datacache = bool(config.pop('datacache', False))
ctx.rgw.datacache_path = config.pop('datacache_path', None)
ctx.rgw.config = config
log.debug("config is {}".format(config))
log.debug("client list is {}".format(clients))
ctx.rgw.role_endpoints = assign_endpoints(ctx, config, default_cert)
subtasks = [
lambda: create_pools(ctx=ctx, clients=clients),
]
if ctx.rgw.compression_type:
subtasks.extend([
lambda: configure_compression(ctx=ctx, clients=clients,
compression=ctx.rgw.compression_type),
])
if not ctx.rgw.inline_data:
subtasks.extend([
lambda: disable_inline_data(ctx=ctx, clients=clients),
])
if ctx.rgw.datacache:
subtasks.extend([
lambda: configure_datacache(ctx=ctx, clients=clients,
datacache_path=ctx.rgw.datacache_path),
])
if ctx.rgw.storage_classes:
subtasks.extend([
lambda: configure_storage_classes(ctx=ctx, clients=clients,
storage_classes=ctx.rgw.storage_classes),
])
subtasks.extend([
lambda: start_rgw(ctx=ctx, config=config, clients=clients),
])
with contextutil.nested(*subtasks):
yield
| 19,140 | 39.381857 | 137 |
py
|
null |
ceph-main/qa/tasks/rgw_cloudtier.py
|
"""
rgw_cloudtier configuration routines
"""
import argparse
import logging
from teuthology import misc as teuthology
from teuthology.exceptions import ConfigError
from tasks.util.rgw import rgwadmin, wait_for_radosgw
from teuthology.task import Task
log = logging.getLogger(__name__)
class RGWCloudTier(Task):
"""
Configure CloudTier storage class.
To configure cloudtiering on any client::
tasks:
- ceph:
- rgw:
- rgw-cloudtier:
client.0:
cloud_storage_class:
cloud_client:
cloud_regular_storage_class:
cloud_target_storage_class:
cloud_retain_head_object:
cloud_target_path:
cloudtier_user:
cloud_secret:
cloud_access_key:
"""
def __init__(self, ctx, config):
super(RGWCloudTier, self).__init__(ctx, config)
def setup(self):
super(RGWCloudTier, self).setup()
overrides = self.ctx.config.get('overrides', {})
teuthology.deep_merge(self.config, overrides.get('rgw-cloudtier', {}))
if not self.ctx.rgw:
raise ConfigError('rgw-cloudtier must run after the rgw task')
self.ctx.rgw_cloudtier = argparse.Namespace()
self.ctx.rgw_cloudtier.config = self.config
log.info('Configuring rgw cloudtier ...')
clients = self.config.keys() # http://tracker.ceph.com/issues/20417
for client in clients:
client_config = self.config.get(client)
if client_config is None:
client_config = {}
if client_config is not None:
log.info('client %s - cloudtier config is -----------------%s ', client, client_config)
# configuring cloudtier
cloud_client = client_config.get('cloud_client')
cloud_storage_class = client_config.get('cloud_storage_class')
cloud_target_path = client_config.get('cloud_target_path')
cloud_target_storage_class = client_config.get('cloud_target_storage_class')
cloud_retain_head_object = client_config.get('cloud_retain_head_object')
cloudtier_user = client_config.get('cloudtier_user')
cloud_access_key = cloudtier_user.get('cloud_access_key')
cloud_secret = cloudtier_user.get('cloud_secret')
# XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete().
# issue a 'radosgw-admin user list' command to trigger this
rgwadmin(self.ctx, client, cmd=['user', 'list'], check_status=True)
endpoint = self.ctx.rgw.role_endpoints[cloud_client]
# create cloudtier storage class
tier_config_params = "endpoint=" + endpoint.url() + \
",access_key=" + cloud_access_key + \
",secret=" + cloud_secret + \
",retain_head_object=" + cloud_retain_head_object
if (cloud_target_path != None):
tier_config_params += ",target_path=" + cloud_target_path
if (cloud_target_storage_class != None):
tier_config_params += ",target_storage_class=" + cloud_target_storage_class
log.info('Configuring cloud-s3 tier storage class type = %s', cloud_storage_class)
rgwadmin(self.ctx, client,
cmd=['zonegroup', 'placement', 'add',
'--rgw-zone', 'default',
'--placement-id', 'default-placement',
'--storage-class', cloud_storage_class,
'--tier-type', 'cloud-s3',
'--tier-config', tier_config_params],
check_status=True)
## create cloudtier user with the access keys given on the cloud client
cloud_tier_user_id = "cloud-tier-user-" + cloud_client
cloud_tier_user_name = "CLOUD TIER USER - " + cloud_client
rgwadmin(self.ctx, cloud_client,
cmd=['user', 'create', '--uid', cloud_tier_user_id,
'--display-name', cloud_tier_user_name,
'--access-key', cloud_access_key,
'--secret', cloud_secret,
'--caps', 'user-policy=*'],
check_status=True)
log.info('Finished Configuring rgw cloudtier ...')
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
self.ctx.daemons.get_daemon('rgw', client_with_id, cluster_name).restart()
log.info('restarted rgw daemon ...')
(remote,) = self.ctx.cluster.only(client).remotes.keys()
wait_for_radosgw(endpoint.url(), remote)
task = RGWCloudTier
| 5,081 | 40.317073 | 110 |
py
|
null |
ceph-main/qa/tasks/rgw_logsocket.py
|
"""
rgw s3tests logging wrappers
"""
from io import BytesIO
from configobj import ConfigObj
import contextlib
import logging
from tasks import s3tests
from teuthology import misc as teuthology
from teuthology import contextutil
log = logging.getLogger(__name__)
@contextlib.contextmanager
def download(ctx, config):
"""
Run s3tests download function
"""
return s3tests.download(ctx, config)
def _config_user(s3tests_conf, section, user):
"""
Run s3tests user config function
"""
return s3tests._config_user(s3tests_conf, section, user)
@contextlib.contextmanager
def create_users(ctx, config):
"""
Run s3tests user create function
"""
return s3tests.create_users(ctx, config)
@contextlib.contextmanager
def configure(ctx, config):
"""
Run s3tests user configure function
"""
return s3tests.configure(ctx, config)
@contextlib.contextmanager
def run_tests(ctx, config):
"""
Run remote netcat tests
"""
assert isinstance(config, dict)
testdir = teuthology.get_testdir(ctx)
for client, client_config in config.items():
client_config['extra_args'] = [
's3tests.functional.test_s3:test_bucket_list_return_data',
]
# args = [
# 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
# '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
# '-w',
# '{tdir}/s3-tests'.format(tdir=testdir),
# '-v',
# 's3tests.functional.test_s3:test_bucket_list_return_data',
# ]
# if client_config is not None and 'extra_args' in client_config:
# args.extend(client_config['extra_args'])
#
# ctx.cluster.only(client).run(
# args=args,
# )
s3tests.run_tests(ctx, config)
netcat_out = BytesIO()
for client, client_config in config.items():
ctx.cluster.only(client).run(
args = [
'netcat',
'-w', '5',
'-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir),
],
stdout = netcat_out,
)
out = netcat_out.getvalue()
assert len(out) > 100
log.info('Received', out)
yield
@contextlib.contextmanager
def task(ctx, config):
"""
Run some s3-tests suite against rgw, verify opslog socket returns data
Must restrict testing to a particular client::
tasks:
- ceph:
- rgw: [client.0]
- s3tests: [client.0]
To pass extra arguments to nose (e.g. to run a certain test)::
tasks:
- ceph:
- rgw: [client.0]
- s3tests:
client.0:
extra_args: ['test_s3:test_object_acl_grand_public_read']
client.1:
extra_args: ['--exclude', 'test_100_continue']
"""
assert hasattr(ctx, 'rgw'), 'rgw-logsocket must run after the rgw task'
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task rgw-logsocket only supports a list or dictionary for configuration"
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
clients = config.keys()
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
for (client, cconf) in config.items():
teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {}))
log.debug('config is %s', config)
s3tests_conf = {}
for client in clients:
endpoint = ctx.rgw.role_endpoints.get(client)
assert endpoint, 'rgw-logsocket: no rgw endpoint for {}'.format(client)
s3tests_conf[client] = ConfigObj(
indent_type='',
infile={
'DEFAULT':
{
'port' : endpoint.port,
'is_secure' : endpoint.cert is not None,
},
'fixtures' : {},
's3 main' : {},
's3 alt' : {},
}
)
with contextutil.nested(
lambda: download(ctx=ctx, config=config),
lambda: create_users(ctx=ctx, config=dict(
clients=clients,
s3tests_conf=s3tests_conf,
)),
lambda: configure(ctx=ctx, config=dict(
clients=config,
s3tests_conf=s3tests_conf,
)),
lambda: run_tests(ctx=ctx, config=config),
):
yield
| 4,733 | 27.518072 | 105 |
py
|
null |
ceph-main/qa/tasks/rgw_module.py
|
import logging
import yaml
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def _shell(ctx, cluster_name, remote, args, extra_cephadm_args=[], **kwargs):
teuthology.get_testdir(ctx)
return remote.run(
args=[
'sudo',
ctx.cephadm,
'--image', ctx.ceph[cluster_name].image,
'shell',
'-c', '/etc/ceph/{}.conf'.format(cluster_name),
'-k', '/etc/ceph/{}.client.admin.keyring'.format(cluster_name),
'--fsid', ctx.ceph[cluster_name].fsid,
] + extra_cephadm_args + [
'--',
] + args,
**kwargs
)
def apply(ctx, config):
"""
Apply spec
tasks:
- rgw_module.apply:
specs:
- rgw_realm: myrealm1
rgw_zonegroup: myzonegroup1
rgw_zone: myzone1
placement:
hosts:
- ceph-node-0
- ceph-node-1
spec:
rgw_frontend_port: 5500
"""
cluster_name = config.get('cluster', 'ceph')
specs = config.get('specs', [])
y = yaml.dump_all(specs)
log.info(f'Applying spec(s):\n{y}')
_shell(
ctx, cluster_name, ctx.ceph[cluster_name].bootstrap_remote,
['ceph', 'rgw', 'realm', 'bootstrap', '-i', '-'],
stdin=y,
)
| 1,375 | 24.481481 | 77 |
py
|
null |
ceph-main/qa/tasks/rgw_multisite.py
|
"""
rgw multisite configuration routines
"""
import argparse
import logging
import random
import string
from copy import deepcopy
from tasks.util.rgw import rgwadmin, wait_for_radosgw
from tasks.util.rados import create_ec_pool, create_replicated_pool
from tasks.rgw_multi import multisite
from tasks.rgw_multi.zone_rados import RadosZone as RadosZone
from teuthology.orchestra import run
from teuthology import misc
from teuthology.exceptions import ConfigError
from teuthology.task import Task
log = logging.getLogger(__name__)
class RGWMultisite(Task):
"""
Performs rgw multisite configuration to match the given realm definition.
- rgw-multisite:
realm:
name: test-realm
is_default: true
List one or more zonegroup definitions. These are provided as json
input to `radosgw-admin zonegroup set`, with the exception of these keys:
* 'is_master' is passed on the command line as --master
* 'is_default' is passed on the command line as --default
* 'endpoints' given as client names are replaced with actual endpoints
zonegroups:
- name: test-zonegroup
api_name: test-api
is_master: true
is_default: true
endpoints: [c1.client.0]
List each of the zones to be created in this zonegroup.
zones:
- name: test-zone1
is_master: true
is_default: true
endpoints: [c1.client.0]
- name: test-zone2
is_default: true
endpoints: [c2.client.0]
A complete example:
tasks:
- install:
- ceph: {cluster: c1}
- ceph: {cluster: c2}
- rgw:
c1.client.0:
c2.client.0:
- rgw-multisite:
realm:
name: test-realm
is_default: true
zonegroups:
- name: test-zonegroup
is_master: true
is_default: true
zones:
- name: test-zone1
is_master: true
is_default: true
endpoints: [c1.client.0]
- name: test-zone2
is_default: true
endpoints: [c2.client.0]
"""
def __init__(self, ctx, config):
super(RGWMultisite, self).__init__(ctx, config)
def setup(self):
super(RGWMultisite, self).setup()
overrides = self.ctx.config.get('overrides', {})
misc.deep_merge(self.config, overrides.get('rgw-multisite', {}))
if not self.ctx.rgw:
raise ConfigError('rgw-multisite must run after the rgw task')
role_endpoints = self.ctx.rgw.role_endpoints
# construct Clusters and Gateways for each client in the rgw task
clusters, gateways = extract_clusters_and_gateways(self.ctx,
role_endpoints)
# get the master zone and zonegroup configuration
mz, mzg = extract_master_zone_zonegroup(self.config['zonegroups'])
cluster1 = cluster_for_zone(clusters, mz)
# create the realm and period on the master zone's cluster
log.info('creating realm..')
realm = create_realm(cluster1, self.config['realm'])
period = realm.current_period
creds = gen_credentials()
# create the master zonegroup and its master zone
log.info('creating master zonegroup..')
master_zonegroup = create_zonegroup(cluster1, gateways, period,
deepcopy(mzg))
period.master_zonegroup = master_zonegroup
log.info('creating master zone..')
master_zone = create_zone(self.ctx, cluster1, gateways, creds,
master_zonegroup, deepcopy(mz))
master_zonegroup.master_zone = master_zone
period.update(master_zone, commit=True)
restart_zone_gateways(master_zone) # restart with --rgw-zone
# create the admin user on the master zone
log.info('creating admin user..')
user_args = ['--display-name', 'Realm Admin', '--system']
user_args += creds.credential_args()
admin_user = multisite.User('realm-admin')
admin_user.create(master_zone, user_args)
# process 'zonegroups'
for zg_config in self.config['zonegroups']:
zones_config = zg_config.pop('zones')
zonegroup = None
for zone_config in zones_config:
# get the cluster for this zone
cluster = cluster_for_zone(clusters, zone_config)
if cluster != cluster1: # already created on master cluster
log.info('pulling realm configuration to %s', cluster.name)
realm.pull(cluster, master_zone.gateways[0], creds)
# use the first zone's cluster to create the zonegroup
if not zonegroup:
if zg_config['name'] == master_zonegroup.name:
zonegroup = master_zonegroup
else:
log.info('creating zonegroup..')
zonegroup = create_zonegroup(cluster, gateways,
period, zg_config)
if zone_config['name'] == master_zone.name:
# master zone was already created
zone = master_zone
else:
# create the zone and commit the period
log.info('creating zone..')
zone = create_zone(self.ctx, cluster, gateways, creds,
zonegroup, zone_config)
period.update(zone, commit=True)
restart_zone_gateways(zone) # restart with --rgw-zone
# attach configuration to the ctx for other tasks
self.ctx.rgw_multisite = argparse.Namespace()
self.ctx.rgw_multisite.clusters = clusters
self.ctx.rgw_multisite.gateways = gateways
self.ctx.rgw_multisite.realm = realm
self.ctx.rgw_multisite.admin_user = admin_user
log.info('rgw multisite configuration completed')
def end(self):
del self.ctx.rgw_multisite
class Cluster(multisite.Cluster):
""" Issues 'radosgw-admin' commands with the rgwadmin() helper """
def __init__(self, ctx, name, client):
super(Cluster, self).__init__()
self.ctx = ctx
self.name = name
self.client = client
def admin(self, args = None, **kwargs):
""" radosgw-admin command """
args = args or []
args += ['--cluster', self.name]
args += ['--debug-rgw', str(kwargs.pop('debug_rgw', 0))]
args += ['--debug-ms', str(kwargs.pop('debug_ms', 0))]
if kwargs.pop('read_only', False):
args += ['--rgw-cache-enabled', 'false']
kwargs['decode'] = False
check_retcode = kwargs.pop('check_retcode', True)
r, s = rgwadmin(self.ctx, self.client, args, **kwargs)
if check_retcode:
assert r == 0
return s, r
class Gateway(multisite.Gateway):
""" Controls a radosgw instance using its daemon """
def __init__(self, role, remote, daemon, *args, **kwargs):
super(Gateway, self).__init__(*args, **kwargs)
self.role = role
self.remote = remote
self.daemon = daemon
def set_zone(self, zone):
""" set the zone and add its args to the daemon's command line """
assert self.zone is None, 'zone can only be set once'
self.zone = zone
# daemon.restart_with_args() would be perfect for this, except that
# radosgw args likely include a pipe and redirect. zone arguments at
# the end won't actually apply to radosgw
args = self.daemon.command_kwargs.get('args', [])
try:
# insert zone args before the first |
pipe = args.index(run.Raw('|'))
args = args[0:pipe] + zone.zone_args() + args[pipe:]
except ValueError:
args += zone.zone_args()
self.daemon.command_kwargs['args'] = args
def start(self, args = None):
""" (re)start the daemon """
self.daemon.restart()
# wait until startup completes
wait_for_radosgw(self.endpoint(), self.remote)
def stop(self):
""" stop the daemon """
self.daemon.stop()
def extract_clusters_and_gateways(ctx, role_endpoints):
""" create cluster and gateway instances for all of the radosgw roles """
clusters = {}
gateways = {}
for role, endpoint in role_endpoints.items():
cluster_name, daemon_type, client_id = misc.split_role(role)
# find or create the cluster by name
cluster = clusters.get(cluster_name)
if not cluster:
clusters[cluster_name] = cluster = Cluster(ctx, cluster_name, role)
# create a gateway for this daemon
client_with_id = daemon_type + '.' + client_id # match format from rgw.py
daemon = ctx.daemons.get_daemon('rgw', client_with_id, cluster_name)
if not daemon:
raise ConfigError('no daemon for role=%s cluster=%s type=rgw id=%s' % \
(role, cluster_name, client_id))
(remote,) = ctx.cluster.only(role).remotes.keys()
gateways[role] = Gateway(role, remote, daemon, endpoint.hostname,
endpoint.port, cluster)
return clusters, gateways
def create_realm(cluster, config):
""" create a realm from configuration and initialize its first period """
realm = multisite.Realm(config['name'])
args = []
if config.get('is_default', False):
args += ['--default']
realm.create(cluster, args)
realm.current_period = multisite.Period(realm)
return realm
def extract_user_credentials(config):
""" extract keys from configuration """
return multisite.Credentials(config['access_key'], config['secret_key'])
def extract_master_zone(zonegroup_config):
""" find and return the master zone definition """
master = None
for zone in zonegroup_config['zones']:
if not zone.get('is_master', False):
continue
if master:
raise ConfigError('zones %s and %s cannot both set \'is_master\'' % \
(master['name'], zone['name']))
master = zone
# continue the loop so we can detect duplicates
if not master:
raise ConfigError('one zone must set \'is_master\' in zonegroup %s' % \
zonegroup_config['name'])
return master
def extract_master_zone_zonegroup(zonegroups_config):
""" find and return the master zone and zonegroup definitions """
master_zone, master_zonegroup = (None, None)
for zonegroup in zonegroups_config:
# verify that all zonegroups have a master zone set, even if they
# aren't in the master zonegroup
zone = extract_master_zone(zonegroup)
if not zonegroup.get('is_master', False):
continue
if master_zonegroup:
raise ConfigError('zonegroups %s and %s cannot both set \'is_master\'' % \
(master_zonegroup['name'], zonegroup['name']))
master_zonegroup = zonegroup
master_zone = zone
# continue the loop so we can detect duplicates
if not master_zonegroup:
raise ConfigError('one zonegroup must set \'is_master\'')
return master_zone, master_zonegroup
def extract_zone_cluster_name(zone_config):
""" return the cluster (must be common to all zone endpoints) """
cluster_name = None
endpoints = zone_config.get('endpoints')
if not endpoints:
raise ConfigError('zone %s missing \'endpoints\' list' % \
zone_config['name'])
for role in endpoints:
name, _, _ = misc.split_role(role)
if not cluster_name:
cluster_name = name
elif cluster_name != name:
raise ConfigError('all zone %s endpoints must be in the same cluster' % \
zone_config['name'])
return cluster_name
def cluster_for_zone(clusters, zone_config):
""" return the cluster entry for the given zone """
name = extract_zone_cluster_name(zone_config)
try:
return clusters[name]
except KeyError:
raise ConfigError('no cluster %s found' % name)
def gen_access_key():
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
def gen_secret():
return ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(32))
def gen_credentials():
return multisite.Credentials(gen_access_key(), gen_secret())
def extract_gateway_endpoints(gateways, endpoints_config):
""" return a list of gateway endpoints associated with the given roles """
endpoints = []
for role in endpoints_config:
try:
# replace role names with their gateway's endpoint
endpoints.append(gateways[role].endpoint())
except KeyError:
raise ConfigError('no radosgw endpoint found for role %s' % role)
return endpoints
def is_default_arg(config):
return ['--default'] if config.pop('is_default', False) else []
def is_master_arg(config):
return ['--master'] if config.pop('is_master', False) else []
def create_zonegroup(cluster, gateways, period, config):
""" pass the zonegroup configuration to `zonegroup set` """
config.pop('zones', None) # remove 'zones' from input to `zonegroup set`
endpoints = config.get('endpoints')
if endpoints:
# replace client names with their gateway endpoints
config['endpoints'] = extract_gateway_endpoints(gateways, endpoints)
zonegroup = multisite.ZoneGroup(config['name'], period)
# `zonegroup set` needs --default on command line, and 'is_master' in json
args = is_default_arg(config)
zonegroup.set(cluster, config, args)
period.zonegroups.append(zonegroup)
return zonegroup
def create_zone(ctx, cluster, gateways, creds, zonegroup, config):
""" create a zone with the given configuration """
zone = multisite.Zone(config['name'], zonegroup, cluster)
zone = RadosZone(config['name'], zonegroup, cluster)
# collect Gateways for the zone's endpoints
endpoints = config.get('endpoints')
if not endpoints:
raise ConfigError('no \'endpoints\' for zone %s' % config['name'])
zone.gateways = [gateways[role] for role in endpoints]
for gateway in zone.gateways:
gateway.set_zone(zone)
# format the gateway endpoints
endpoints = [g.endpoint() for g in zone.gateways]
args = is_default_arg(config)
args += is_master_arg(config)
args += creds.credential_args()
if len(endpoints):
args += ['--endpoints', ','.join(endpoints)]
zone.create(cluster, args)
zonegroup.zones.append(zone)
create_zone_pools(ctx, zone)
if ctx.rgw.compression_type:
configure_zone_compression(zone, ctx.rgw.compression_type)
zonegroup.zones_by_type.setdefault(zone.tier_type(), []).append(zone)
if zone.is_read_only():
zonegroup.ro_zones.append(zone)
else:
zonegroup.rw_zones.append(zone)
return zone
def create_zone_pools(ctx, zone):
""" Create the data_pool for each placement type """
gateway = zone.gateways[0]
cluster = zone.cluster
for pool_config in zone.data.get('placement_pools', []):
pool_name = pool_config['val']['storage_classes']['STANDARD']['data_pool']
if ctx.rgw.ec_data_pool:
create_ec_pool(gateway.remote, pool_name, zone.name, 64,
ctx.rgw.erasure_code_profile, cluster.name, 'rgw')
else:
create_replicated_pool(gateway.remote, pool_name, 64, cluster.name, 'rgw')
def configure_zone_compression(zone, compression):
""" Set compression type in the zone's default-placement """
zone.json_command(zone.cluster, 'placement', ['modify',
'--placement-id', 'default-placement',
'--compression', compression
])
def restart_zone_gateways(zone):
zone.stop()
zone.start()
task = RGWMultisite
| 16,450 | 37.347319 | 117 |
py
|
null |
ceph-main/qa/tasks/rgw_multisite_tests.py
|
"""
rgw multisite testing
"""
import importlib.util
import logging
import nose.core
import nose.config
import sys
from nose.plugins.manager import DefaultPluginManager
from teuthology.config import config as teuth_config
from teuthology.exceptions import ConfigError
from teuthology.repo_utils import fetch_repo
from teuthology.task import Task
from teuthology import misc
log = logging.getLogger(__name__)
class RGWMultisiteTests(Task):
"""
Runs the rgw_multi tests against a multisite configuration created by the
rgw-multisite task. Tests are run with nose, using any additional 'args'
provided. Overrides for tests.Config can be set in 'config'. The 'branch'
and 'repo' can be overridden to clone the rgw_multi tests from another
release.
- rgw-multisite-tests:
args:
- tests.py:test_object_sync
config:
reconfigure_delay: 60
branch: octopus
repo: https://github.com/ceph/ceph.git
"""
def __init__(self, ctx, config):
super(RGWMultisiteTests, self).__init__(ctx, config)
def setup(self):
super(RGWMultisiteTests, self).setup()
overrides = self.ctx.config.get('overrides', {})
misc.deep_merge(self.config, overrides.get('rgw-multisite-tests', {}))
if not self.ctx.rgw_multisite:
raise ConfigError('rgw-multisite-tests must run after the rgw-multisite task')
realm = self.ctx.rgw_multisite.realm
master_zone = realm.meta_master_zone()
branch = self.config.get('branch')
if not branch:
# run from suite_path
suite_path = self.ctx.config.get('suite_path')
self.module_path = suite_path + '/../src/test/rgw/rgw_multi'
else:
# clone the qa branch
repo = self.config.get('repo', teuth_config.get_ceph_qa_suite_git_url())
log.info("cloning suite branch %s from %s...", branch, repo)
clonedir = fetch_repo(repo, branch)
# import its version of rgw_multi
self.module_path = clonedir + '/src/test/rgw/rgw_multi'
log.info("importing tests from %s", self.module_path)
spec = importlib.util.spec_from_file_location('rgw_multi', self.module_path + '/__init__.py')
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
from rgw_multi import multisite, tests
# create the test user
log.info('creating test user..')
user = multisite.User('rgw-multisite-test-user')
user.create(master_zone, ['--display-name', 'Multisite Test User',
'--gen-access-key', '--gen-secret', '--caps', 'roles=*'])
config = self.config.get('config', {})
tests.init_multi(realm, user, tests.Config(**config))
tests.realm_meta_checkpoint(realm)
def begin(self):
# extra arguments for nose can be passed as a string or list
extra_args = self.config.get('args', [])
if not isinstance(extra_args, list):
extra_args = [extra_args]
argv = [__name__] + extra_args
log.info("running rgw multisite tests on '%s' with args=%r",
self.module_path, extra_args)
# run nose tests in the module path
conf = nose.config.Config(stream=get_log_stream(), verbosity=2, workingDir=self.module_path)
conf.plugins = DefaultPluginManager() # overrides default = NoPlugins()
assert nose.run(argv=argv, config=conf), 'rgw multisite test failures'
def get_log_stream():
""" return a log stream for nose output """
# XXX: this is a workaround for IOErrors when nose writes to stderr,
# copied from vstart_runner.py
class LogStream(object):
def __init__(self):
self.buffer = ""
def write(self, data):
self.buffer += data
if "\n" in self.buffer:
lines = self.buffer.split("\n")
for line in lines[:-1]:
log.info(line)
self.buffer = lines[-1]
def flush(self):
pass
return LogStream()
task = RGWMultisiteTests
| 4,238 | 34.033058 | 101 |
py
|
null |
ceph-main/qa/tasks/rook.py
|
"""
Rook cluster task
"""
import argparse
import configobj
import contextlib
import json
import logging
import os
import yaml
from io import BytesIO
from tarfile import ReadError
from tasks.ceph_manager import CephManager
from teuthology import misc as teuthology
from teuthology.config import config as teuth_config
from teuthology.contextutil import safe_while
from teuthology.orchestra import run
from teuthology import contextutil
from tasks.ceph import healthy
from tasks.cephadm import update_archive_setting
log = logging.getLogger(__name__)
def path_to_examples(ctx, cluster_name : str) -> str:
for p in ['rook/deploy/examples/', 'rook/cluster/examples/kubernetes/ceph/']:
try:
ctx.rook[cluster_name].remote.get_file(p + 'operator.yaml')
return p
except:
pass
assert False, 'Path to examples not found'
def _kubectl(ctx, config, args, **kwargs):
cluster_name = config.get('cluster', 'ceph')
return ctx.rook[cluster_name].remote.run(
args=['kubectl'] + args,
**kwargs
)
def shell(ctx, config):
"""
Run command(s) inside the rook tools container.
tasks:
- kubeadm:
- rook:
- rook.shell:
- ceph -s
or
tasks:
- kubeadm:
- rook:
- rook.shell:
commands:
- ceph -s
"""
if isinstance(config, list):
config = {'commands': config}
for cmd in config.get('commands', []):
if isinstance(cmd, str):
_shell(ctx, config, cmd.split(' '))
else:
_shell(ctx, config, cmd)
def _shell(ctx, config, args, **kwargs):
cluster_name = config.get('cluster', 'ceph')
return _kubectl(
ctx, config,
[
'-n', 'rook-ceph',
'exec',
ctx.rook[cluster_name].toolbox, '--'
] + args,
**kwargs
)
@contextlib.contextmanager
def rook_operator(ctx, config):
cluster_name = config['cluster']
rook_branch = config.get('rook_branch', 'master')
rook_git_url = config.get('rook_git_url', 'https://github.com/rook/rook')
log.info(f'Cloning {rook_git_url} branch {rook_branch}')
ctx.rook[cluster_name].remote.run(
args=[
'rm', '-rf', 'rook',
run.Raw('&&'),
'git',
'clone',
'--single-branch',
'--branch', rook_branch,
rook_git_url,
'rook',
]
)
# operator.yaml
log.info(os.path.abspath(os.getcwd()))
object_methods = [method_name for method_name in dir(ctx.rook[cluster_name].remote)
if callable(getattr(ctx.rook[cluster_name].remote, method_name))]
log.info(object_methods)
operator_yaml = ctx.rook[cluster_name].remote.read_file(
(path_to_examples(ctx, cluster_name) + 'operator.yaml')
)
rook_image = config.get('rook_image')
if rook_image:
log.info(f'Patching operator to use image {rook_image}')
crs = list(yaml.load_all(operator_yaml, Loader=yaml.FullLoader))
assert len(crs) == 2
crs[1]['spec']['template']['spec']['containers'][0]['image'] = rook_image
operator_yaml = yaml.dump_all(crs)
ctx.rook[cluster_name].remote.write_file('operator.yaml', operator_yaml)
op_job = None
try:
log.info('Deploying operator')
_kubectl(ctx, config, [
'create',
'-f', (path_to_examples(ctx, cluster_name) + 'crds.yaml'),
'-f', (path_to_examples(ctx, cluster_name) + 'common.yaml'),
'-f', 'operator.yaml',
])
# on centos:
if teuthology.get_distro(ctx) == 'centos':
_kubectl(ctx, config, [
'-n', 'rook-ceph',
'set', 'env', 'deploy/rook-ceph-operator',
'ROOK_HOSTPATH_REQUIRES_PRIVILEGED=true'
])
# wait for operator
op_name = None
with safe_while(sleep=10, tries=90, action="wait for operator") as proceed:
while not op_name and proceed():
p = _kubectl(
ctx, config,
['-n', 'rook-ceph', 'get', 'pods', '-l', 'app=rook-ceph-operator'],
stdout=BytesIO(),
)
for line in p.stdout.getvalue().decode('utf-8').strip().splitlines():
name, ready, status, _ = line.split(None, 3)
if status == 'Running':
op_name = name
break
# log operator output
op_job = _kubectl(
ctx,
config,
['-n', 'rook-ceph', 'logs', '-f', op_name],
wait=False,
logger=log.getChild('operator'),
)
yield
except Exception as e:
log.exception(e)
raise
finally:
log.info('Cleaning up rook operator')
_kubectl(ctx, config, [
'delete',
'-f', 'operator.yaml',
])
if False:
# don't bother since we'll tear down k8s anyway (and this mysteriously
# fails sometimes when deleting some of the CRDs... not sure why!)
_kubectl(ctx, config, [
'delete',
'-f', (path_to_examples() + 'common.yaml'),
])
_kubectl(ctx, config, [
'delete',
'-f', (path_to_examples() + 'crds.yaml'),
])
ctx.rook[cluster_name].remote.run(args=['rm', '-rf', 'rook', 'operator.yaml'])
if op_job:
op_job.wait()
run.wait(
ctx.cluster.run(
args=[
'sudo', 'rm', '-rf', '/var/lib/rook'
]
)
)
@contextlib.contextmanager
def ceph_log(ctx, config):
cluster_name = config['cluster']
log_dir = '/var/lib/rook/rook-ceph/log'
update_archive_setting(ctx, 'log', log_dir)
try:
yield
except Exception:
# we need to know this below
ctx.summary['success'] = False
raise
finally:
log.info('Checking cluster log for badness...')
def first_in_ceph_log(pattern, excludes):
"""
Find the first occurrence of the pattern specified in the Ceph log,
Returns None if none found.
:param pattern: Pattern scanned for.
:param excludes: Patterns to ignore.
:return: First line of text (or None if not found)
"""
args = [
'sudo',
'egrep', pattern,
f'{log_dir}/ceph.log',
]
if excludes:
for exclude in excludes:
args.extend([run.Raw('|'), 'egrep', '-v', exclude])
args.extend([
run.Raw('|'), 'head', '-n', '1',
])
r = ctx.rook[cluster_name].remote.run(
stdout=BytesIO(),
args=args,
)
stdout = r.stdout.getvalue().decode()
if stdout:
return stdout
return None
if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]',
config.get('log-ignorelist')) is not None:
log.warning('Found errors (ERR|WRN|SEC) in cluster log')
ctx.summary['success'] = False
# use the most severe problem as the failure reason
if 'failure_reason' not in ctx.summary:
for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']:
match = first_in_ceph_log(pattern, config['log-ignorelist'])
if match is not None:
ctx.summary['failure_reason'] = \
'"{match}" in cluster log'.format(
match=match.rstrip('\n'),
)
break
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and ctx.summary['success']):
# and logs
log.info('Compressing logs...')
run.wait(
ctx.cluster.run(
args=[
'time',
'sudo',
'find',
log_dir,
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'--max-args=1',
'--max-procs=0',
'--verbose',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'-5',
'--verbose',
'--',
],
wait=False,
),
)
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
try:
os.makedirs(path)
except OSError:
pass
for remote in ctx.cluster.remotes.keys():
sub = os.path.join(path, remote.name)
try:
os.makedirs(sub)
except OSError:
pass
try:
teuthology.pull_directory(remote, log_dir,
os.path.join(sub, 'log'))
except ReadError:
pass
def build_initial_config(ctx, config):
path = os.path.join(os.path.dirname(__file__), 'rook-ceph.conf')
conf = configobj.ConfigObj(path, file_error=True)
# overrides
for section, keys in config.get('conf',{}).items():
for key, value in keys.items():
log.info(" override: [%s] %s = %s" % (section, key, value))
if section not in conf:
conf[section] = {}
conf[section][key] = value
return conf
@contextlib.contextmanager
def rook_cluster(ctx, config):
cluster_name = config['cluster']
# count how many OSDs we'll create
num_devs = 0
num_hosts = 0
for remote in ctx.cluster.remotes.keys():
ls = remote.read_file('/scratch_devs').decode('utf-8').strip().splitlines()
num_devs += len(ls)
num_hosts += 1
ctx.rook[cluster_name].num_osds = num_devs
# config
ceph_conf = build_initial_config(ctx, config)
ceph_conf_fp = BytesIO()
ceph_conf.write(ceph_conf_fp)
log.info(f'Config:\n{ceph_conf_fp.getvalue()}')
_kubectl(ctx, ceph_conf, ['create', '-f', '-'], stdin=yaml.dump({
'apiVersion': 'v1',
'kind': 'ConfigMap',
'metadata': {
'name': 'rook-config-override',
'namespace': 'rook-ceph'},
'data': {
'config': ceph_conf_fp.getvalue()
}
}))
# cluster
cluster = {
'apiVersion': 'ceph.rook.io/v1',
'kind': 'CephCluster',
'metadata': {'name': 'rook-ceph', 'namespace': 'rook-ceph'},
'spec': {
'cephVersion': {
'image': ctx.rook[cluster_name].image,
'allowUnsupported': True,
},
'dataDirHostPath': '/var/lib/rook',
'skipUpgradeChecks': True,
'mgr': {
'count': 1,
'modules': [
{ 'name': 'rook', 'enabled': True },
],
},
'mon': {
'count': num_hosts,
'allowMultiplePerNode': True,
},
'storage': {
'storageClassDeviceSets': [
{
'name': 'scratch',
'count': num_devs,
'portable': False,
'volumeClaimTemplates': [
{
'metadata': {'name': 'data'},
'spec': {
'resources': {
'requests': {
'storage': '10Gi' # <= (lte) the actual PV size
}
},
'storageClassName': 'scratch',
'volumeMode': 'Block',
'accessModes': ['ReadWriteOnce'],
},
},
],
}
],
},
}
}
teuthology.deep_merge(cluster['spec'], config.get('spec', {}))
cluster_yaml = yaml.dump(cluster)
log.info(f'Cluster:\n{cluster_yaml}')
try:
ctx.rook[cluster_name].remote.write_file('cluster.yaml', cluster_yaml)
_kubectl(ctx, config, ['create', '-f', 'cluster.yaml'])
yield
except Exception as e:
log.exception(e)
raise
finally:
_kubectl(ctx, config, ['delete', '-f', 'cluster.yaml'], check_status=False)
# wait for cluster to shut down
log.info('Waiting for cluster to stop')
running = True
with safe_while(sleep=5, tries=100, action="wait for teardown") as proceed:
while running and proceed():
p = _kubectl(
ctx, config,
['-n', 'rook-ceph', 'get', 'pods'],
stdout=BytesIO(),
)
running = False
for line in p.stdout.getvalue().decode('utf-8').strip().splitlines():
name, ready, status, _ = line.split(None, 3)
if (
name != 'NAME'
and not name.startswith('csi-')
and not name.startswith('rook-ceph-operator-')
and not name.startswith('rook-ceph-tools-')
):
running = True
break
_kubectl(
ctx, config,
['-n', 'rook-ceph', 'delete', 'configmap', 'rook-config-override'],
check_status=False,
)
ctx.rook[cluster_name].remote.run(args=['rm', '-f', 'cluster.yaml'])
@contextlib.contextmanager
def rook_toolbox(ctx, config):
cluster_name = config['cluster']
try:
_kubectl(ctx, config, [
'create',
'-f', (path_to_examples(ctx, cluster_name) + 'toolbox.yaml'),
])
log.info('Waiting for tools container to start')
toolbox = None
with safe_while(sleep=5, tries=100, action="wait for toolbox") as proceed:
while not toolbox and proceed():
p = _kubectl(
ctx, config,
['-n', 'rook-ceph', 'get', 'pods', '-l', 'app=rook-ceph-tools'],
stdout=BytesIO(),
)
_kubectl(
ctx, config,
['-n', 'rook-ceph', 'get', 'pods'],
stdout=BytesIO(),
)
for line in p.stdout.getvalue().decode('utf-8').strip().splitlines():
name, ready, status, _ = line.split(None, 3)
if status == 'Running':
toolbox = name
break
ctx.rook[cluster_name].toolbox = toolbox
yield
except Exception as e:
log.exception(e)
raise
finally:
_kubectl(ctx, config, [
'delete',
'-f', (path_to_examples(ctx, cluster_name) + 'toolbox.yaml'),
], check_status=False)
@contextlib.contextmanager
def wait_for_osds(ctx, config):
cluster_name = config.get('cluster', 'ceph')
want = ctx.rook[cluster_name].num_osds
log.info(f'Waiting for {want} OSDs')
with safe_while(sleep=10, tries=90, action="check osd count") as proceed:
while proceed():
p = _shell(ctx, config, ['ceph', 'osd', 'stat', '-f', 'json'],
stdout=BytesIO(),
check_status=False)
if p.exitstatus == 0:
r = json.loads(p.stdout.getvalue().decode('utf-8'))
have = r.get('num_up_osds', 0)
if have == want:
break
log.info(f' have {have}/{want} OSDs')
yield
@contextlib.contextmanager
def ceph_config_keyring(ctx, config):
# get config and push to hosts
log.info('Distributing ceph config and client.admin keyring')
p = _shell(ctx, config, ['cat', '/etc/ceph/ceph.conf'], stdout=BytesIO())
conf = p.stdout.getvalue()
p = _shell(ctx, config, ['cat', '/etc/ceph/keyring'], stdout=BytesIO())
keyring = p.stdout.getvalue()
ctx.cluster.run(args=['sudo', 'mkdir', '-p', '/etc/ceph'])
for remote in ctx.cluster.remotes.keys():
remote.write_file(
'/etc/ceph/ceph.conf',
conf,
sudo=True,
)
remote.write_file(
'/etc/ceph/keyring',
keyring,
sudo=True,
)
try:
yield
except Exception as e:
log.exception(e)
raise
finally:
log.info('Cleaning up config and client.admin keyring')
ctx.cluster.run(args=[
'sudo', 'rm', '-f',
'/etc/ceph/ceph.conf',
'/etc/ceph/ceph.client.admin.keyring'
])
@contextlib.contextmanager
def ceph_clients(ctx, config):
cluster_name = config['cluster']
log.info('Setting up client nodes...')
clients = ctx.cluster.only(teuthology.is_type('client', cluster_name))
for remote, roles_for_host in clients.remotes.items():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'client',
cluster_name):
name = teuthology.ceph_role(role)
client_keyring = '/etc/ceph/{0}.{1}.keyring'.format(cluster_name,
name)
r = _shell(ctx, config,
args=[
'ceph', 'auth',
'get-or-create', name,
'mon', 'allow *',
'osd', 'allow *',
'mds', 'allow *',
'mgr', 'allow *',
],
stdout=BytesIO(),
)
keyring = r.stdout.getvalue()
remote.write_file(client_keyring, keyring, sudo=True, mode='0644')
yield
@contextlib.contextmanager
def task(ctx, config):
"""
Deploy rook-ceph cluster
tasks:
- kubeadm:
- rook:
branch: wip-foo
spec:
mon:
count: 1
The spec item is deep-merged against the cluster.yaml. The branch, sha1, or
image items are used to determine the Ceph container image.
"""
if not config:
config = {}
assert isinstance(config, dict), \
"task only supports a dictionary for configuration"
log.info('Rook start')
overrides = ctx.config.get('overrides', {})
teuthology.deep_merge(config, overrides.get('ceph', {}))
teuthology.deep_merge(config, overrides.get('rook', {}))
log.info('Config: ' + str(config))
# set up cluster context
if not hasattr(ctx, 'rook'):
ctx.rook = {}
if 'cluster' not in config:
config['cluster'] = 'ceph'
cluster_name = config['cluster']
if cluster_name not in ctx.rook:
ctx.rook[cluster_name] = argparse.Namespace()
ctx.rook[cluster_name].remote = list(ctx.cluster.remotes.keys())[0]
# image
teuth_defaults = teuth_config.get('defaults', {})
cephadm_defaults = teuth_defaults.get('cephadm', {})
containers_defaults = cephadm_defaults.get('containers', {})
container_image_name = containers_defaults.get('image', None)
if 'image' in config:
ctx.rook[cluster_name].image = config.get('image')
else:
sha1 = config.get('sha1')
flavor = config.get('flavor', 'default')
if sha1:
if flavor == "crimson":
ctx.rook[cluster_name].image = container_image_name + ':' + sha1 + '-' + flavor
else:
ctx.rook[cluster_name].image = container_image_name + ':' + sha1
else:
# hmm, fall back to branch?
branch = config.get('branch', 'master')
ctx.rook[cluster_name].image = container_image_name + ':' + branch
log.info('Ceph image is %s' % ctx.rook[cluster_name].image)
with contextutil.nested(
lambda: rook_operator(ctx, config),
lambda: ceph_log(ctx, config),
lambda: rook_cluster(ctx, config),
lambda: rook_toolbox(ctx, config),
lambda: wait_for_osds(ctx, config),
lambda: ceph_config_keyring(ctx, config),
lambda: ceph_clients(ctx, config),
):
if not hasattr(ctx, 'managers'):
ctx.managers = {}
ctx.managers[cluster_name] = CephManager(
ctx.rook[cluster_name].remote,
ctx=ctx,
logger=log.getChild('ceph_manager.' + cluster_name),
cluster=cluster_name,
rook=True,
)
try:
if config.get('wait-for-healthy', True):
healthy(ctx=ctx, config=config)
log.info('Rook complete, yielding')
yield
finally:
log.info('Tearing down rook')
| 21,645 | 31.747352 | 95 |
py
|
null |
ceph-main/qa/tasks/s3a_hadoop.py
|
import contextlib
import logging
from teuthology import misc
from teuthology.orchestra import run
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Run Hadoop S3A tests using Ceph
usage:
-tasks:
ceph-ansible:
s3a-hadoop:
maven-version: '3.6.3' (default)
hadoop-version: '2.9.2'
bucket-name: 's3atest' (default)
access-key: 'anykey' (uses a default value)
secret-key: 'secretkey' ( uses a default value)
role: client.0
"""
if config is None:
config = {}
assert isinstance(config, dict), \
"task only supports a dictionary for configuration"
assert hasattr(ctx, 'rgw'), 's3a-hadoop must run after the rgw task'
overrides = ctx.config.get('overrides', {})
misc.deep_merge(config, overrides.get('s3a-hadoop', {}))
testdir = misc.get_testdir(ctx)
role = config.get('role')
(remote,) = ctx.cluster.only(role).remotes.keys()
endpoint = ctx.rgw.role_endpoints.get(role)
assert endpoint, 's3tests: no rgw endpoint for {}'.format(role)
# get versions
maven_major = config.get('maven-major', 'maven-3')
maven_version = config.get('maven-version', '3.6.3')
hadoop_ver = config.get('hadoop-version', '2.9.2')
bucket_name = config.get('bucket-name', 's3atest')
access_key = config.get('access-key', 'EGAQRD2ULOIFKFSKCT4F')
secret_key = config.get(
'secret-key',
'zi816w1vZKfaSM85Cl0BxXTwSLyN7zB4RbTswrGb')
# set versions for cloning the repo
apache_maven = 'apache-maven-{maven_version}-bin.tar.gz'.format(
maven_version=maven_version)
maven_link = 'http://archive.apache.org/dist/maven/' + \
'{maven_major}/{maven_version}/binaries/'.format(maven_major=maven_major, maven_version=maven_version) + apache_maven
hadoop_git = 'https://github.com/apache/hadoop'
hadoop_rel = 'hadoop-{ver} rel/release-{ver}'.format(ver=hadoop_ver)
if hadoop_ver == 'trunk':
# just checkout a new branch out of trunk
hadoop_rel = 'hadoop-ceph-trunk'
install_prereq(remote)
remote.run(
args=[
'cd',
testdir,
run.Raw('&&'),
'wget',
maven_link,
run.Raw('&&'),
'tar',
'-xvf',
apache_maven,
run.Raw('&&'),
'git',
'clone',
run.Raw(hadoop_git),
run.Raw('&&'),
'cd',
'hadoop',
run.Raw('&&'),
'git',
'checkout',
'-b',
run.Raw(hadoop_rel)
]
)
configure_s3a(remote, endpoint.dns_name, access_key, secret_key, bucket_name, testdir)
setup_user_bucket(remote, endpoint.dns_name, access_key, secret_key, bucket_name, testdir)
if hadoop_ver.startswith('2.8'):
# test all ITtests but skip AWS test using public bucket landsat-pds
# which is not available from within this test
test_options = '-Dit.test=ITestS3A* -Dparallel-tests -Dscale \
-Dfs.s3a.scale.test.timeout=1200 \
-Dfs.s3a.scale.test.huge.filesize=256M verify'
else:
test_options = 'test -Dtest=S3a*,TestS3A*'
try:
run_s3atest(remote, maven_version, testdir, test_options)
yield
finally:
log.info("Done s3a testing, Cleaning up")
for fil in ['apache*', 'hadoop*', 'venv*', 'create*']:
remote.run(args=['rm', run.Raw('-rf'), run.Raw('{tdir}/{file}'.format(tdir=testdir, file=fil))])
def install_prereq(client):
"""
Install pre requisites for RHEL and CentOS
TBD: Ubuntu
"""
if client.os.name == 'rhel' or client.os.name == 'centos':
client.run(
args=[
'sudo',
'yum',
'install',
'-y',
'protobuf-c.x86_64',
'java',
'java-1.8.0-openjdk-devel',
'dnsmasq'
]
)
def setup_user_bucket(client, dns_name, access_key, secret_key, bucket_name, testdir):
"""
Create user with access_key and secret_key that will be
used for the s3a testdir
"""
client.run(
args=[
'sudo',
'radosgw-admin',
'user',
'create',
run.Raw('--uid'),
's3a',
run.Raw('--display-name="s3a cephtests"'),
run.Raw('--access-key={access_key}'.format(access_key=access_key)),
run.Raw('--secret-key={secret_key}'.format(secret_key=secret_key)),
run.Raw('[email protected]'),
]
)
client.run(
args=[
'python3',
'-m',
'venv',
'{testdir}/venv'.format(testdir=testdir),
run.Raw('&&'),
run.Raw('{testdir}/venv/bin/pip'.format(testdir=testdir)),
'install',
'boto'
]
)
create_bucket = """
#!/usr/bin/env python
import boto
import boto.s3.connection
access_key = '{access_key}'
secret_key = '{secret_key}'
conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
host = '{dns_name}',
is_secure=False,
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
bucket = conn.create_bucket('{bucket_name}')
for bucket in conn.get_all_buckets():
print(bucket.name + "\t" + bucket.creation_date)
""".format(access_key=access_key, secret_key=secret_key, dns_name=dns_name, bucket_name=bucket_name)
py_bucket_file = '{testdir}/create_bucket.py'.format(testdir=testdir)
client.sudo_write_file(py_bucket_file, create_bucket, mode='0744')
client.run(
args=[
'cat',
'{testdir}/create_bucket.py'.format(testdir=testdir),
]
)
client.run(
args=[
'{testdir}/venv/bin/python'.format(testdir=testdir),
'{testdir}/create_bucket.py'.format(testdir=testdir),
]
)
def run_s3atest(client, maven_version, testdir, test_options):
"""
Finally run the s3a test
"""
aws_testdir = '{testdir}/hadoop/hadoop-tools/hadoop-aws/'.format(testdir=testdir)
run_test = '{testdir}/apache-maven-{maven_version}/bin/mvn'.format(testdir=testdir, maven_version=maven_version)
# Remove AWS CredentialsProvider tests as it hits public bucket from AWS
# better solution is to create the public bucket on local server and test
rm_test = 'rm src/test/java/org/apache/hadoop/fs/s3a/ITestS3AAWSCredentialsProvider.java'
client.run(
args=[
'cd',
run.Raw(aws_testdir),
run.Raw('&&'),
run.Raw(rm_test),
run.Raw('&&'),
run.Raw(run_test),
run.Raw(test_options)
]
)
def configure_s3a(client, dns_name, access_key, secret_key, bucket_name, testdir):
"""
Use the template to configure s3a test, Fill in access_key, secret_key
and other details required for test.
"""
config_template = """<configuration>
<property>
<name>fs.s3a.endpoint</name>
<value>{name}</value>
</property>
<property>
<name>fs.contract.test.fs.s3a</name>
<value>s3a://{bucket_name}/</value>
</property>
<property>
<name>fs.s3a.connection.ssl.enabled</name>
<value>false</value>
</property>
<property>
<name>test.fs.s3n.name</name>
<value>s3n://{bucket_name}/</value>
</property>
<property>
<name>test.fs.s3a.name</name>
<value>s3a://{bucket_name}/</value>
</property>
<property>
<name>test.fs.s3.name</name>
<value>s3://{bucket_name}/</value>
</property>
<property>
<name>fs.s3.awsAccessKeyId</name>
<value>{access_key}</value>
</property>
<property>
<name>fs.s3.awsSecretAccessKey</name>
<value>{secret_key}</value>
</property>
<property>
<name>fs.s3n.awsAccessKeyId</name>
<value>{access_key}</value>
</property>
<property>
<name>fs.s3n.awsSecretAccessKey</name>
<value>{secret_key}</value>
</property>
<property>
<name>fs.s3a.access.key</name>
<description>AWS access key ID. Omit for Role-based authentication.</description>
<value>{access_key}</value>
</property>
<property>
<name>fs.s3a.secret.key</name>
<description>AWS secret key. Omit for Role-based authentication.</description>
<value>{secret_key}</value>
</property>
</configuration>
""".format(name=dns_name, bucket_name=bucket_name, access_key=access_key, secret_key=secret_key)
config_path = testdir + '/hadoop/hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml'
client.write_file(config_path, config_template)
# output for debug
client.run(args=['cat', config_path])
| 8,766 | 29.653846 | 125 |
py
|
null |
ceph-main/qa/tasks/s3tests.py
|
"""
Run a set of s3 tests on rgw.
"""
from io import BytesIO
from configobj import ConfigObj
import base64
import contextlib
import logging
import os
import random
import string
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.config import config as teuth_config
from teuthology.orchestra import run
from teuthology.exceptions import ConfigError
log = logging.getLogger(__name__)
@contextlib.contextmanager
def download(ctx, config):
"""
Download the s3 tests from the git builder.
Remove downloaded s3 file upon exit.
The context passed in should be identical to the context
passed in to the main task.
"""
assert isinstance(config, dict)
log.info('Downloading s3-tests...')
testdir = teuthology.get_testdir(ctx)
for (client, client_config) in config.items():
s3tests_branch = client_config.get('force-branch', None)
if not s3tests_branch:
raise ValueError(
"Could not determine what branch to use for s3-tests. Please add 'force-branch: {s3-tests branch name}' to the .yaml config for this s3tests task.")
log.info("Using branch '%s' for s3tests", s3tests_branch)
sha1 = client_config.get('sha1')
git_remote = client_config.get('git_remote', teuth_config.ceph_git_base_url)
ctx.cluster.only(client).run(
args=[
'git', 'clone',
'-b', s3tests_branch,
git_remote + 's3-tests.git',
'{tdir}/s3-tests-{client}'.format(tdir=testdir, client=client),
],
)
if sha1 is not None:
ctx.cluster.only(client).run(
args=[
'cd', '{tdir}/s3-tests-{client}'.format(tdir=testdir, client=client),
run.Raw('&&'),
'git', 'reset', '--hard', sha1,
],
)
try:
yield
finally:
log.info('Removing s3-tests...')
testdir = teuthology.get_testdir(ctx)
for client in config:
ctx.cluster.only(client).run(
args=[
'rm',
'-rf',
'{tdir}/s3-tests-{client}'.format(tdir=testdir, client=client),
],
)
def _config_user(s3tests_conf, section, user):
"""
Configure users for this section by stashing away keys, ids, and
email addresses.
"""
s3tests_conf[section].setdefault('user_id', user)
s3tests_conf[section].setdefault('email', '{user}[email protected]'.format(user=user))
s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
s3tests_conf[section].setdefault('access_key',
''.join(random.choice(string.ascii_uppercase) for i in range(20)))
s3tests_conf[section].setdefault('secret_key',
base64.b64encode(os.urandom(40)).decode())
s3tests_conf[section].setdefault('totp_serial',
''.join(random.choice(string.digits) for i in range(10)))
s3tests_conf[section].setdefault('totp_seed',
base64.b32encode(os.urandom(40)).decode())
s3tests_conf[section].setdefault('totp_seconds', '5')
@contextlib.contextmanager
def create_users(ctx, config):
"""
Create a main and an alternate s3 user.
"""
assert isinstance(config, dict)
log.info('Creating rgw users...')
testdir = teuthology.get_testdir(ctx)
users = {'s3 main': 'foo', 's3 alt': 'bar', 's3 tenant': 'testx$tenanteduser', 'iam': 'foobar'}
for client in config['clients']:
s3tests_conf = config['s3tests_conf'][client]
s3tests_conf.setdefault('fixtures', {})
s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-')
for section, user in users.items():
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client))
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
# create user
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'user', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--display-name', s3tests_conf[section]['display_name'],
'--email', s3tests_conf[section]['email'],
'--caps', 'user-policy=*',
'--access-key', s3tests_conf[section]['access_key'],
'--secret', s3tests_conf[section]['secret_key'],
'--cluster', cluster_name,
],
)
if not ctx.dbstore_variable:
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'mfa', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--totp-serial', s3tests_conf[section]['totp_serial'],
'--totp-seed', s3tests_conf[section]['totp_seed'],
'--totp-seconds', s3tests_conf[section]['totp_seconds'],
'--totp-window', '8',
'--totp-seed-type', 'base32',
'--cluster', cluster_name,
],
)
# add/configure caps for iam user
if section=='iam':
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'caps', 'add',
'--uid', s3tests_conf[section]['user_id'],
'--caps', 'roles=*',
'--cluster', cluster_name,
],
)
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'caps', 'add',
'--uid', s3tests_conf[section]['user_id'],
'--caps', 'oidc-provider=*',
'--cluster', cluster_name,
],
)
if "TOKEN" in os.environ:
s3tests_conf.setdefault('webidentity', {})
s3tests_conf['webidentity'].setdefault('token',os.environ['TOKEN'])
s3tests_conf['webidentity'].setdefault('aud',os.environ['AUD'])
s3tests_conf['webidentity'].setdefault('sub',os.environ['SUB'])
s3tests_conf['webidentity'].setdefault('azp',os.environ['AZP'])
s3tests_conf['webidentity'].setdefault('user_token',os.environ['USER_TOKEN'])
s3tests_conf['webidentity'].setdefault('thumbprint',os.environ['THUMBPRINT'])
s3tests_conf['webidentity'].setdefault('KC_REALM',os.environ['KC_REALM'])
try:
yield
finally:
for client in config['clients']:
for user in users.values():
uid = '{user}.{client}'.format(user=user, client=client)
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'user', 'rm',
'--uid', uid,
'--purge-data',
'--cluster', cluster_name,
],
)
@contextlib.contextmanager
def configure(ctx, config):
"""
Create the config files for s3tests an boto.
"""
assert isinstance(config, dict)
log.info('Configuring s3-tests...')
testdir = teuthology.get_testdir(ctx)
for client, properties in config['clients'].items():
properties = properties or {}
s3tests_conf = config['s3tests_conf'][client]
s3tests_conf['DEFAULT']['calling_format'] = properties.get('calling-format', 'ordinary')
# use rgw_server if given, or default to local client
role = properties.get('rgw_server', client)
endpoint = ctx.rgw.role_endpoints.get(role)
assert endpoint, 's3tests: no rgw endpoint for {}'.format(role)
s3tests_conf['DEFAULT']['host'] = endpoint.dns_name
website_role = properties.get('rgw_website_server')
if website_role:
website_endpoint = ctx.rgw.role_endpoints.get(website_role)
assert website_endpoint, \
's3tests: no rgw endpoint for rgw_website_server {}'.format(website_role)
assert website_endpoint.website_dns_name, \
's3tests: no dns-s3website-name for rgw_website_server {}'.format(website_role)
s3tests_conf['DEFAULT']['s3website_domain'] = website_endpoint.website_dns_name
if hasattr(ctx, 'barbican'):
properties = properties['barbican']
if properties is not None and 'kms_key' in properties:
if not (properties['kms_key'] in ctx.barbican.keys):
raise ConfigError('Key '+properties['kms_key']+' not defined')
if not (properties['kms_key2'] in ctx.barbican.keys):
raise ConfigError('Key '+properties['kms_key2']+' not defined')
key = ctx.barbican.keys[properties['kms_key']]
s3tests_conf['DEFAULT']['kms_keyid'] = key['id']
key = ctx.barbican.keys[properties['kms_key2']]
s3tests_conf['DEFAULT']['kms_keyid2'] = key['id']
elif hasattr(ctx, 'vault'):
engine_or_flavor = vars(ctx.vault).get('flavor',ctx.vault.engine)
keys=[]
for name in (x['Path'] for x in vars(ctx.vault).get('keys', {}).get(ctx.rgw.vault_role)):
keys.append(name)
keys.extend(['testkey-1','testkey-2'])
if engine_or_flavor == "old":
keys=[keys[i] + "/1" for i in range(len(keys))]
properties = properties.get('vault_%s' % engine_or_flavor, {})
s3tests_conf['DEFAULT']['kms_keyid'] = properties.get('key_path', keys[0])
s3tests_conf['DEFAULT']['kms_keyid2'] = properties.get('key_path2', keys[1])
elif hasattr(ctx.rgw, 'pykmip_role'):
keys=[]
for name in (x['Name'] for x in ctx.pykmip.keys[ctx.rgw.pykmip_role]):
p=name.partition('-')
keys.append(p[2] if p[2] else p[0])
keys.extend(['testkey-1', 'testkey-2'])
s3tests_conf['DEFAULT']['kms_keyid'] = properties.get('kms_key', keys[0])
s3tests_conf['DEFAULT']['kms_keyid2'] = properties.get('kms_key2', keys[1])
else:
# Fallback scenario where it's the local (ceph.conf) kms being tested
s3tests_conf['DEFAULT']['kms_keyid'] = 'testkey-1'
s3tests_conf['DEFAULT']['kms_keyid2'] = 'testkey-2'
slow_backend = properties.get('slow_backend')
if slow_backend:
s3tests_conf['fixtures']['slow backend'] = slow_backend
storage_classes = properties.get('storage classes')
if storage_classes:
s3tests_conf['s3 main']['storage_classes'] = storage_classes
lc_debug_interval = properties.get('lc_debug_interval')
if lc_debug_interval:
s3tests_conf['s3 main']['lc_debug_interval'] = lc_debug_interval
if ctx.rgw_cloudtier is not None:
log.info(' ctx.rgw_cloudtier config is %s ...', ctx.rgw_cloudtier.config)
client_rgw_config = ctx.rgw_cloudtier.config.get(client)
if client_rgw_config:
log.info(' ctx.rgw_cloudtier config is %s ...', client_rgw_config)
cloudtier_user = client_rgw_config.get('cloudtier_user')
cloud_client = client_rgw_config.get('cloud_client')
endpoint = ctx.rgw.role_endpoints.get(cloud_client)
s3tests_conf['s3 cloud']['host'] = endpoint.dns_name
s3tests_conf['s3 cloud']['port'] = endpoint.port
s3tests_conf['s3 cloud']['access_key'] = cloudtier_user.get('cloud_access_key')
s3tests_conf['s3 cloud']['secret_key'] = cloudtier_user.get('cloud_secret')
s3tests_conf['s3 cloud']['cloud_storage_class'] = client_rgw_config.get('cloud_storage_class')
s3tests_conf['s3 cloud']['storage_class'] = client_rgw_config.get('cloud_regular_storage_class')
s3tests_conf['s3 cloud']['retain_head_object'] = client_rgw_config.get('cloud_retain_head_object')
cloud_target_path = client_rgw_config.get('cloud_target_path')
cloud_target_storage_class = client_rgw_config.get('cloud_target_storage_class')
if (cloud_target_path != None):
s3tests_conf['s3 cloud']['target_path'] = cloud_target_path
if (cloud_target_storage_class != None):
s3tests_conf['s3 cloud']['target_storage_class'] = cloud_target_storage_class
(remote,) = ctx.cluster.only(client).remotes.keys()
conf_fp = BytesIO()
s3tests_conf.write(conf_fp)
remote.write_file(
path='{tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
data=conf_fp.getvalue(),
)
log.info('Configuring boto...')
boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
for client, properties in config['clients'].items():
with open(boto_src) as f:
(remote,) = ctx.cluster.only(client).remotes.keys()
conf = f.read().format(
idle_timeout=config.get('idle_timeout', 30)
)
remote.write_file('{tdir}/boto-{client}.cfg'.format(tdir=testdir, client=client), conf)
try:
yield
finally:
log.info('Cleaning up boto...')
for client, properties in config['clients'].items():
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
'rm',
'{tdir}/boto-{client}.cfg'.format(tdir=testdir, client=client),
],
)
def get_toxvenv_dir(ctx):
return ctx.tox.venv_path
def toxvenv_sh(ctx, remote, args, **kwargs):
activate = get_toxvenv_dir(ctx) + '/bin/activate'
return remote.sh(['source', activate, run.Raw('&&')] + args, **kwargs)
@contextlib.contextmanager
def run_tests(ctx, config):
"""
Run the s3tests after everything is set up.
:param ctx: Context passed to task
:param config: specific configuration information
"""
assert isinstance(config, dict)
testdir = teuthology.get_testdir(ctx)
for client, client_config in config.items():
client_config = client_config or {}
(remote,) = ctx.cluster.only(client).remotes.keys()
args = [
'cd', '{tdir}/s3-tests-{client}'.format(tdir=testdir, client=client), run.Raw('&&'),
'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
'BOTO_CONFIG={tdir}/boto-{client}.cfg'.format(tdir=testdir, client=client)
]
# the 'requests' library comes with its own ca bundle to verify ssl
# certificates - override that to use the system's ca bundle, which
# is where the ssl task installed this certificate
if remote.os.package_type == 'deb':
args += ['REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt']
else:
args += ['REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt']
attrs = ["not fails_on_rgw", "not lifecycle_expiration"]
if not client_config.get('sts_tests', False):
attrs += ["not test_of_sts"]
if not client_config.get('webidentity_tests', False):
attrs += ["not webidentity_test"]
if client_config.get('calling-format') != 'ordinary':
attrs += ['not fails_with_subdomain']
if not client_config.get('with-sse-s3'):
attrs += ['not sse_s3']
attrs += client_config.get('extra_attrs', [])
args += ['tox', '--', '-v', '-m', ' and '.join(attrs)]
args += client_config.get('extra_args', [])
toxvenv_sh(ctx, remote, args, label="s3 tests against rgw")
yield
@contextlib.contextmanager
def scan_for_leaked_encryption_keys(ctx, config):
"""
Scan radosgw logs for the encryption keys used by s3tests to
verify that we're not leaking secrets.
:param ctx: Context passed to task
:param config: specific configuration information
"""
assert isinstance(config, dict)
try:
yield
finally:
# x-amz-server-side-encryption-customer-key
s3test_customer_key = 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs='
log.debug('Scanning radosgw logs for leaked encryption keys...')
procs = list()
for client, client_config in config.items():
if not client_config.get('scan_for_encryption_keys', True):
continue
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_cluster = '.'.join((cluster_name, daemon_type, client_id))
(remote,) = ctx.cluster.only(client).remotes.keys()
proc = remote.run(
args=[
'grep',
'--binary-files=text',
s3test_customer_key,
'/var/log/ceph/rgw.{client}.log'.format(client=client_with_cluster),
],
wait=False,
check_status=False,
)
procs.append(proc)
for proc in procs:
proc.wait()
if proc.returncode == 1: # 1 means no matches
continue
log.error('radosgw log is leaking encryption keys!')
raise Exception('radosgw log is leaking encryption keys')
@contextlib.contextmanager
def task(ctx, config):
"""
Run the s3-tests suite against rgw.
To run all tests on all clients::
tasks:
- ceph:
- rgw:
- s3tests:
To restrict testing to particular clients::
tasks:
- ceph:
- rgw: [client.0]
- s3tests: [client.0]
To run against a server on client.1 and increase the boto timeout to 10m::
tasks:
- ceph:
- rgw: [client.1]
- s3tests:
client.0:
rgw_server: client.1
idle_timeout: 600
To pass extra arguments to pytest (e.g. to run a certain test)::
tasks:
- ceph:
- rgw: [client.0]
- s3tests:
client.0:
extra_args: ['test_s3:test_object_acl_grand_public_read']
client.1:
extra_args: ['--exclude', 'test_100_continue']
To run any sts-tests don't forget to set a config variable named 'sts_tests' to 'True' as follows::
tasks:
- ceph:
- rgw: [client.0]
- s3tests:
client.0:
sts_tests: True
rgw_server: client.0
To run any cloud-transition tests don't forget to set a config variable named 'cloudtier_tests' to 'True' as follows::
tasks:
- ceph:
- rgw: [client.0 client.1]
- s3tests:
client.0:
cloudtier_tests: True
rgw_server: client.0
"""
assert hasattr(ctx, 'rgw'), 's3tests must run after the rgw task'
assert hasattr(ctx, 'tox'), 's3tests must run after the tox task'
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
"task s3tests only supports a list or dictionary for configuration"
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
clients = config.keys()
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
for client in config.keys():
if not config[client]:
config[client] = {}
teuthology.deep_merge(config[client], overrides.get('s3tests', {}))
log.debug('s3tests config is %s', config)
s3tests_conf = {}
for client, client_config in config.items():
if 'sts_tests' in client_config:
ctx.sts_variable = True
else:
ctx.sts_variable = False
if 'cloudtier_tests' in client_config:
ctx.cloudtier_variable = True
else:
ctx.cloudtier_variable = False
if 'dbstore_tests' in client_config:
ctx.dbstore_variable = True
else:
ctx.dbstore_variable = False
#This will be the structure of config file when you want to run webidentity_test (sts-test)
if ctx.sts_variable and "TOKEN" in os.environ:
for client in clients:
endpoint = ctx.rgw.role_endpoints.get(client)
assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)
s3tests_conf[client] = ConfigObj(
indent_type='',
infile={
'DEFAULT':
{
'port' : endpoint.port,
'is_secure' : endpoint.cert is not None,
'api_name' : 'default',
},
'fixtures' : {},
's3 main' : {},
's3 alt' : {},
's3 tenant' : {},
'iam' : {},
'webidentity': {},
}
)
elif ctx.sts_variable:
#This will be the structure of config file when you want to run assume_role_test and get_session_token_test (sts-test) or iam-tests
for client in clients:
endpoint = ctx.rgw.role_endpoints.get(client)
assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)
s3tests_conf[client] = ConfigObj(
indent_type='',
infile={
'DEFAULT':
{
'port' : endpoint.port,
'is_secure' : endpoint.cert is not None,
'api_name' : 'default',
},
'fixtures' : {},
's3 main' : {},
's3 alt' : {},
'iam' : {},
's3 tenant' : {},
}
)
elif ctx.cloudtier_variable:
#This will be the structure of config file when you want to run normal s3-tests
for client in clients:
endpoint = ctx.rgw.role_endpoints.get(client)
assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)
s3tests_conf[client] = ConfigObj(
indent_type='',
infile={
'DEFAULT':
{
'port' : endpoint.port,
'is_secure' : endpoint.cert is not None,
'api_name' : 'default',
},
'fixtures' : {},
's3 main' : {},
's3 alt' : {},
's3 tenant' : {},
's3 cloud' : {},
'iam' : {},
}
)
else:
#This will be the structure of config file when you want to run normal s3-tests
for client in clients:
endpoint = ctx.rgw.role_endpoints.get(client)
assert endpoint, 's3tests: no rgw endpoint for {}'.format(client)
s3tests_conf[client] = ConfigObj(
indent_type='',
infile={
'DEFAULT':
{
'port' : endpoint.port,
'is_secure' : endpoint.cert is not None,
'api_name' : 'default',
},
'fixtures' : {},
's3 main' : {},
's3 alt' : {},
's3 tenant' : {},
'iam' : {},
}
)
with contextutil.nested(
lambda: download(ctx=ctx, config=config),
lambda: create_users(ctx=ctx, config=dict(
clients=clients,
s3tests_conf=s3tests_conf,
)),
lambda: configure(ctx=ctx, config=dict(
clients=config,
s3tests_conf=s3tests_conf,
)),
lambda: run_tests(ctx=ctx, config=config),
lambda: scan_for_leaked_encryption_keys(ctx=ctx, config=config),
):
pass
yield
| 26,492 | 39.758462 | 164 |
py
|
null |
ceph-main/qa/tasks/s3tests_java.py
|
"""
Task for running RGW S3 tests with the AWS Java SDK
"""
from io import BytesIO
import logging
import base64
import os
import random
import string
import yaml
import getpass
from teuthology import misc as teuthology
from teuthology.task import Task
from teuthology.orchestra import run
log = logging.getLogger(__name__)
"""
Task for running RGW S3 tests with the AWS Java SDK
Tests run only on clients specified in the s3tests-java config section.
If no client is given a default 'client.0' is chosen.
If such does not match the rgw client the task will fail.
tasks:
- ceph:
- rgw: [client.0]
- s3tests-java:
client.0:
Extra arguments can be passed by adding options to the corresponding client
section under the s3tests-java task (e.g. to run a certain test,
specify a different repository and branch for the test suite,
run in info/debug mode (for the java suite) or forward the gradle output to a log file):
tasks:
- ceph:
- rgw: [client.0]
- s3tests-java:
client.0:
force-branch: wip
force-repo: 'https://github.com/adamyanova/java_s3tests.git'
log-fwd: '../s3tests-java.log'
log-level: info
extra-args: ['--tests', 'ObjectTest.testEncryptionKeySSECInvalidMd5']
To run a specific test, provide its name to the extra-args section e.g.:
- s3tests-java:
client.0:
extra-args: ['--tests', 'ObjectTest.testEncryptionKeySSECInvalidMd5']
"""
class S3tests_java(Task):
"""
Download and install S3 tests in Java
This will require openjdk and gradle
"""
def __init__(self, ctx, config):
super(S3tests_java, self).__init__(ctx, config)
self.log = log
log.debug('S3 Tests Java: __INIT__ ')
assert hasattr(ctx, 'rgw'), 'S3tests_java must run after the rgw task'
clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(self.ctx.cluster, 'client')]
self.all_clients = []
for client in clients:
if client in self.config:
self.all_clients.extend([client])
if self.all_clients is None:
self.all_clients = 'client.0'
self.users = {'s3main': 'tester',
's3alt': 'johndoe', 'tenanted': 'testx$tenanteduser'}
def setup(self):
super(S3tests_java, self).setup()
log.debug('S3 Tests Java: SETUP')
for client in self.all_clients:
self.download_test_suite(client)
self.install_required_packages(client)
def begin(self):
super(S3tests_java, self).begin()
log.debug('S3 Tests Java: BEGIN')
for (host, roles) in self.ctx.cluster.remotes.items():
log.debug(
'S3 Tests Java: Cluster config is: {cfg}'.format(cfg=roles))
log.debug('S3 Tests Java: Host is: {host}'.format(host=host))
self.create_users()
self.run_tests()
def end(self):
super(S3tests_java, self).end()
log.debug('S3 Tests Java: END')
for client in self.all_clients:
self.remove_tests(client)
self.delete_users(client)
def download_test_suite(self, client):
log.info("S3 Tests Java: Downloading test suite...")
testdir = teuthology.get_testdir(self.ctx)
branch = 'master'
repo = 'https://github.com/ceph/java_s3tests.git'
if client in self.config and self.config[client] is not None:
if 'force-branch' in self.config[client] and self.config[client]['force-branch'] is not None:
branch = self.config[client]['force-branch']
if 'force-repo' in self.config[client] and self.config[client]['force-repo'] is not None:
repo = self.config[client]['force-repo']
self.ctx.cluster.only(client).run(
args=[
'git', 'clone',
'-b', branch,
repo,
'{tdir}/s3-tests-java'.format(tdir=testdir),
],
stdout=BytesIO()
)
if client in self.config and self.config[client] is not None:
if 'sha1' in self.config[client] and self.config[client]['sha1'] is not None:
self.ctx.cluster.only(client).run(
args=[
'cd', '{tdir}/s3-tests-java'.format(tdir=testdir),
run.Raw('&&'),
'git', 'reset', '--hard', self.config[client]['sha1'],
],
)
if 'log-level' in self.config[client]:
if self.config[client]['log-level'] == 'info':
self.ctx.cluster.only(client).run(
args=[
'sed', '-i', '\'s/log4j.rootLogger=WARN/log4j.rootLogger=INFO/g\'',
'{tdir}/s3-tests-java/src/main/resources/log4j.properties'.format(
tdir=testdir)
]
)
if self.config[client]['log-level'] == 'debug':
self.ctx.cluster.only(client).run(
args=[
'sed', '-i', '\'s/log4j.rootLogger=WARN/log4j.rootLogger=DEBUG/g\'',
'{tdir}/s3-tests-java/src/main/resources/log4j.properties'.format(
tdir=testdir)
]
)
def install_required_packages(self, client):
"""
Run bootstrap script to install openjdk and gradle.
Add certificates to java keystore
"""
log.info("S3 Tests Java: Installing required packages...")
testdir = teuthology.get_testdir(self.ctx)
self.ctx.cluster.only(client).run(
args=['{tdir}/s3-tests-java/bootstrap.sh'.format(tdir=testdir)],
stdout=BytesIO()
)
endpoint = self.ctx.rgw.role_endpoints[client]
if endpoint.cert:
path = 'lib/security/cacerts'
self.ctx.cluster.only(client).run(
args=['sudo',
'keytool',
'-import', '-alias', '{alias}'.format(
alias=endpoint.hostname),
'-keystore',
run.Raw(
'$(readlink -e $(dirname $(readlink -e $(which keytool)))/../{path})'.format(path=path)),
'-file', endpoint.cert.certificate,
'-storepass', 'changeit',
],
stdout=BytesIO()
)
def create_users(self):
"""
Create a main and an alternative s3 user.
Configuration is read from a skelethon config file
s3tests.teuth.config.yaml in the java-s3tests repository
and missing information is added from the task.
Existing values are NOT overriden unless they are empty!
"""
log.info("S3 Tests Java: Creating S3 users...")
testdir = teuthology.get_testdir(self.ctx)
for client in self.all_clients:
endpoint = self.ctx.rgw.role_endpoints.get(client)
local_user = getpass.getuser()
remote_user = teuthology.get_test_user()
os.system("scp {remote}@{host}:{tdir}/s3-tests-java/s3tests.teuth.config.yaml /home/{local}/".format(
host=endpoint.hostname, tdir=testdir, remote=remote_user, local=local_user))
s3tests_conf = teuthology.config_file(
'/home/{local}/s3tests.teuth.config.yaml'.format(local=local_user))
log.debug("S3 Tests Java: s3tests_conf is {s3cfg}".format(
s3cfg=s3tests_conf))
for section, user in list(self.users.items()):
if section in s3tests_conf:
s3_user_id = '{user}.{client}'.format(
user=user, client=client)
log.debug(
'S3 Tests Java: Creating user {s3_user_id}'.format(s3_user_id=s3_user_id))
self._config_user(s3tests_conf=s3tests_conf,
section=section, user=s3_user_id, client=client)
cluster_name, daemon_type, client_id = teuthology.split_role(
client)
client_with_id = daemon_type + '.' + client_id
args = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client_with_id,
'user', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--display-name', s3tests_conf[section]['display_name'],
'--access-key', s3tests_conf[section]['access_key'],
'--secret', s3tests_conf[section]['access_secret'],
'--email', s3tests_conf[section]['email'],
'--cluster', cluster_name,
]
log.info('{args}'.format(args=args))
self.ctx.cluster.only(client).run(
args=args,
stdout=BytesIO()
)
else:
self.users.pop(section)
self._write_cfg_file(s3tests_conf, client)
os.system(
"rm -rf /home/{local}/s3tests.teuth.config.yaml".format(local=local_user))
def _config_user(self, s3tests_conf, section, user, client):
"""
Generate missing users data for this section by stashing away keys, ids, and
email addresses.
"""
access_key = ''.join(random.choice(string.ascii_uppercase)
for i in range(20))
access_secret = base64.b64encode(os.urandom(40)).decode('ascii')
endpoint = self.ctx.rgw.role_endpoints.get(client)
self._set_cfg_entry(
s3tests_conf[section], 'user_id', '{user}'.format(user=user))
self._set_cfg_entry(
s3tests_conf[section], 'email', '{user}[email protected]'.format(user=user))
self._set_cfg_entry(
s3tests_conf[section], 'display_name', 'Ms. {user}'.format(user=user))
self._set_cfg_entry(
s3tests_conf[section], 'access_key', '{ak}'.format(ak=access_key))
self._set_cfg_entry(
s3tests_conf[section], 'access_secret', '{asc}'.format(asc=access_secret))
self._set_cfg_entry(
s3tests_conf[section], 'region', 'us-east-1')
self._set_cfg_entry(
s3tests_conf[section], 'endpoint', '{ip}:{port}'.format(
ip=endpoint.hostname, port=endpoint.port))
self._set_cfg_entry(
s3tests_conf[section], 'host', endpoint.hostname)
self._set_cfg_entry(
s3tests_conf[section], 'port', endpoint.port)
self._set_cfg_entry(
s3tests_conf[section], 'is_secure', True if endpoint.cert else False)
log.debug("S3 Tests Java: s3tests_conf[{sect}] is {s3cfg}".format(
sect=section, s3cfg=s3tests_conf[section]))
log.debug('S3 Tests Java: Setion, User = {sect}, {user}'.format(
sect=section, user=user))
def _write_cfg_file(self, cfg_dict, client):
"""
Write s3 tests java config file on the remote node.
"""
testdir = teuthology.get_testdir(self.ctx)
(remote,) = self.ctx.cluster.only(client).remotes.keys()
data = yaml.safe_dump(cfg_dict, default_flow_style=False)
path = testdir + '/archive/s3-tests-java.' + client + '.conf'
remote.write_file(path, data)
def _set_cfg_entry(self, cfg_dict, key, value):
if not (key in cfg_dict):
cfg_dict.setdefault(key, value)
elif cfg_dict[key] is None:
cfg_dict[key] = value
def run_tests(self):
log.info("S3 Tests Java: Running tests...")
testdir = teuthology.get_testdir(self.ctx)
for client in self.all_clients:
self.ctx.cluster.only(client).run(
args=['cp',
'{tdir}/archive/s3-tests-java.{client}.conf'.format(
tdir=testdir, client=client),
'{tdir}/s3-tests-java/config.properties'.format(
tdir=testdir)
],
stdout=BytesIO()
)
args = ['cd',
'{tdir}/s3-tests-java'.format(tdir=testdir),
run.Raw('&&'),
'/opt/gradle/gradle/bin/gradle', 'clean', 'test',
'--rerun-tasks', '--no-build-cache',
]
extra_args = []
suppress_groups = False
self.log_fwd = False
self.log_name = ''
if client in self.config and self.config[client] is not None:
if 'extra-args' in self.config[client]:
extra_args.extend(self.config[client]['extra-args'])
suppress_groups = True
if 'log-level' in self.config[client] and self.config[client]['log-level'] == 'debug':
extra_args += ['--debug']
if 'log-fwd' in self.config[client]:
self.log_fwd = True
self.log_name = '{tdir}/s3tests_log.txt'.format(
tdir=testdir)
if self.config[client]['log-fwd'] is not None:
self.log_name = self.config[client]['log-fwd']
extra_args += [run.Raw('>>'),
self.log_name]
if not suppress_groups:
test_groups = ['AWS4Test', 'BucketTest', 'ObjectTest']
else:
test_groups = ['All']
for gr in test_groups:
for i in range(2):
self.ctx.cluster.only(client).run(
args=['radosgw-admin', 'gc',
'process', '--include-all'],
stdout=BytesIO()
)
if gr != 'All':
self.ctx.cluster.only(client).run(
args=args + ['--tests'] + [gr] + extra_args,
stdout=BytesIO()
)
else:
self.ctx.cluster.only(client).run(
args=args + extra_args,
stdout=BytesIO()
)
for i in range(2):
self.ctx.cluster.only(client).run(
args=['radosgw-admin', 'gc',
'process', '--include-all'],
stdout=BytesIO()
)
def remove_tests(self, client):
log.info('S3 Tests Java: Cleaning up s3-tests-java...')
testdir = teuthology.get_testdir(self.ctx)
if self.log_fwd:
self.ctx.cluster.only(client).run(
args=['cd',
'{tdir}/s3-tests-java'.format(tdir=testdir),
run.Raw('&&'),
'cat', self.log_name,
run.Raw('&&'),
'rm', self.log_name],
stdout=BytesIO()
)
self.ctx.cluster.only(client).run(
args=[
'rm',
'-rf',
'{tdir}/s3-tests-java'.format(tdir=testdir),
],
stdout=BytesIO()
)
def delete_users(self, client):
log.info("S3 Tests Java: Deleting S3 users...")
testdir = teuthology.get_testdir(self.ctx)
for section, user in self.users.items():
s3_user_id = '{user}.{client}'.format(user=user, client=client)
self.ctx.cluster.only(client).run(
args=[
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'radosgw-admin',
'-n', client,
'user', 'rm',
'--uid', s3_user_id,
'--purge-data',
'--cluster', 'ceph',
],
stdout=BytesIO()
)
task = S3tests_java
| 16,628 | 40.263027 | 115 |
py
|
null |
ceph-main/qa/tasks/samba.py
|
"""
Samba
"""
import contextlib
import logging
import time
from teuthology import misc as teuthology
from teuthology.orchestra import run
from teuthology.orchestra.daemon import DaemonGroup
log = logging.getLogger(__name__)
def get_sambas(ctx, roles):
"""
Scan for roles that are samba. Yield the id of the the samba role
(samba.0, samba.1...) and the associated remote site
:param ctx: Context
:param roles: roles for this test (extracted from yaml files)
"""
for role in roles:
assert isinstance(role, str)
PREFIX = 'samba.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.keys()
yield (id_, remote)
@contextlib.contextmanager
def task(ctx, config):
"""
Setup samba smbd with ceph vfs module. This task assumes the samba
package has already been installed via the install task.
The config is optional and defaults to starting samba on all nodes.
If a config is given, it is expected to be a list of
samba nodes to start smbd servers on.
Example that starts smbd on all samba nodes::
tasks:
- install:
- install:
project: samba
extra_packages: ['samba']
- ceph:
- samba:
- interactive:
Example that starts smbd on just one of the samba nodes and cifs on the other::
tasks:
- samba: [samba.0]
- cifs: [samba.1]
An optional backend can be specified, and requires a path which smbd will
use as the backend storage location:
roles:
- [osd.0, osd.1, osd.2, mon.0, mon.1, mon.2, mds.a]
- [client.0, samba.0]
tasks:
- ceph:
- ceph-fuse: [client.0]
- samba:
samba.0:
cephfuse: "{testdir}/mnt.0"
This mounts ceph to {testdir}/mnt.0 using fuse, and starts smbd with
a UNC of //localhost/cephfuse. Access through that UNC will be on
the ceph fuse mount point.
If no arguments are specified in the samba
role, the default behavior is to enable the ceph UNC //localhost/ceph
and use the ceph vfs module as the smbd backend.
:param ctx: Context
:param config: Configuration
"""
log.info("Setting up smbd with ceph vfs...")
assert config is None or isinstance(config, list) or isinstance(config, dict), \
"task samba got invalid config"
if config is None:
config = dict(('samba.{id}'.format(id=id_), None)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba'))
elif isinstance(config, list):
config = dict((name, None) for name in config)
samba_servers = list(get_sambas(ctx=ctx, roles=config.keys()))
testdir = teuthology.get_testdir(ctx)
if not hasattr(ctx, 'daemons'):
ctx.daemons = DaemonGroup()
for id_, remote in samba_servers:
rolestr = "samba.{id_}".format(id_=id_)
confextras = """vfs objects = ceph
ceph:config_file = /etc/ceph/ceph.conf"""
unc = "ceph"
backend = "/"
if config[rolestr] is not None:
# verify that there's just one parameter in role
if len(config[rolestr]) != 1:
log.error("samba config for role samba.{id_} must have only one parameter".format(id_=id_))
raise Exception('invalid config')
confextras = ""
(unc, backendstr) = config[rolestr].items()[0]
backend = backendstr.format(testdir=testdir)
# on first samba role, set ownership and permissions of ceph root
# so that samba tests succeed
if config[rolestr] is None and id_ == samba_servers[0][0]:
remote.run(
args=[
'mkdir', '-p', '/tmp/cmnt', run.Raw('&&'),
'sudo', 'ceph-fuse', '/tmp/cmnt', run.Raw('&&'),
'sudo', 'chown', 'ubuntu:ubuntu', '/tmp/cmnt/', run.Raw('&&'),
'sudo', 'chmod', '1777', '/tmp/cmnt/', run.Raw('&&'),
'sudo', 'umount', '/tmp/cmnt/', run.Raw('&&'),
'rm', '-rf', '/tmp/cmnt',
],
)
else:
remote.run(
args=[
'sudo', 'chown', 'ubuntu:ubuntu', backend, run.Raw('&&'),
'sudo', 'chmod', '1777', backend,
],
)
remote.sudo_write_file("/usr/local/samba/etc/smb.conf", """
[global]
workgroup = WORKGROUP
netbios name = DOMAIN
[{unc}]
path = {backend}
{extras}
writeable = yes
valid users = ubuntu
""".format(extras=confextras, unc=unc, backend=backend))
# create ubuntu user
remote.run(
args=[
'sudo', '/usr/local/samba/bin/smbpasswd', '-e', 'ubuntu',
run.Raw('||'),
'printf', run.Raw('"ubuntu\nubuntu\n"'),
run.Raw('|'),
'sudo', '/usr/local/samba/bin/smbpasswd', '-s', '-a', 'ubuntu'
])
smbd_cmd = [
'sudo',
'daemon-helper',
'term',
'nostdin',
'/usr/local/samba/sbin/smbd',
'-F',
]
ctx.daemons.add_daemon(remote, 'smbd', id_,
args=smbd_cmd,
logger=log.getChild("smbd.{id_}".format(id_=id_)),
stdin=run.PIPE,
wait=False,
)
# let smbd initialize, probably a better way...
seconds_to_sleep = 100
log.info('Sleeping for %s seconds...' % seconds_to_sleep)
time.sleep(seconds_to_sleep)
log.info('Sleeping stopped...')
try:
yield
finally:
log.info('Stopping smbd processes...')
exc = None
for d in ctx.daemons.iter_daemons_of_role('smbd'):
try:
d.stop()
except (run.CommandFailedError,
run.CommandCrashedError,
run.ConnectionLostError) as e:
exc = e
log.exception('Saw exception from %s.%s', d.role, d.id_)
if exc is not None:
raise exc
for id_, remote in samba_servers:
remote.run(
args=[
'sudo',
'rm', '-rf',
'/usr/local/samba/etc/smb.conf',
'/usr/local/samba/private/*',
'/usr/local/samba/var/run/',
'/usr/local/samba/var/locks',
'/usr/local/samba/var/lock',
],
)
# make sure daemons are gone
try:
remote.run(
args=[
'while',
'sudo', 'killall', '-9', 'smbd',
run.Raw(';'),
'do', 'sleep', '1',
run.Raw(';'),
'done',
],
)
remote.run(
args=[
'sudo',
'lsof',
backend,
],
check_status=False
)
remote.run(
args=[
'sudo',
'fuser',
'-M',
backend,
],
check_status=False
)
except Exception:
log.exception("Saw exception")
pass
| 7,830 | 30.963265 | 107 |
py
|
null |
ceph-main/qa/tasks/scrub.py
|
"""
Scrub osds
"""
import contextlib
import gevent
import logging
import random
import time
from tasks import ceph_manager
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
Run scrub periodically. Randomly chooses an OSD to scrub.
The config should be as follows:
scrub:
frequency: <seconds between scrubs>
deep: <bool for deepness>
example:
tasks:
- ceph:
- scrub:
frequency: 30
deep: 0
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'scrub task only accepts a dict for configuration'
log.info('Beginning scrub...')
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
while len(manager.get_osd_status()['up']) < num_osds:
time.sleep(10)
scrub_proc = Scrubber(
manager,
config,
)
try:
yield
finally:
log.info('joining scrub')
scrub_proc.do_join()
class Scrubber:
"""
Scrubbing is actually performed during initialization
"""
def __init__(self, manager, config):
"""
Spawn scrubbing thread upon completion.
"""
self.ceph_manager = manager
self.ceph_manager.wait_for_clean()
osd_status = self.ceph_manager.get_osd_status()
self.osds = osd_status['up']
self.config = config
if self.config is None:
self.config = dict()
else:
def tmp(x):
"""Local display"""
print(x)
self.log = tmp
self.stopping = False
log.info("spawning thread")
self.thread = gevent.spawn(self.do_scrub)
def do_join(self):
"""Scrubbing thread finished"""
self.stopping = True
self.thread.get()
def do_scrub(self):
"""Perform the scrub operation"""
frequency = self.config.get("frequency", 30)
deep = self.config.get("deep", 0)
log.info("stopping %s" % self.stopping)
while not self.stopping:
osd = str(random.choice(self.osds))
if deep:
cmd = 'deep-scrub'
else:
cmd = 'scrub'
log.info('%sbing %s' % (cmd, osd))
self.ceph_manager.raw_cluster_cmd('osd', cmd, osd)
time.sleep(frequency)
| 2,662 | 21.567797 | 67 |
py
|
null |
ceph-main/qa/tasks/scrub_test.py
|
"""Scrub testing"""
import contextlib
import json
import logging
import os
import time
import tempfile
from tasks import ceph_manager
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def wait_for_victim_pg(manager, poolid):
"""Return a PG with some data and its acting set"""
# wait for some PG to have data that we can mess with
victim = None
while victim is None:
stats = manager.get_pg_stats()
for pg in stats:
pgid = str(pg['pgid'])
pgpool = int(pgid.split('.')[0])
if poolid != pgpool:
continue
size = pg['stat_sum']['num_bytes']
if size > 0:
victim = pg['pgid']
acting = pg['acting']
return victim, acting
time.sleep(3)
def find_victim_object(ctx, pg, osd):
"""Return a file to be fuzzed"""
(osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.keys()
data_path = os.path.join(
'/var/lib/ceph/osd',
'ceph-{id}'.format(id=osd),
'fuse',
'{pg}_head'.format(pg=pg),
'all',
)
# fuzz time
ls_out = osd_remote.sh('sudo ls %s' % data_path)
# find an object file we can mess with (and not the pg info object)
osdfilename = next(line for line in ls_out.split('\n')
if not line.endswith('::::head#'))
assert osdfilename is not None
# Get actual object name from osd stored filename
objname = osdfilename.split(':')[4]
return osd_remote, os.path.join(data_path, osdfilename), objname
def corrupt_file(osd_remote, path):
# put a single \0 at the beginning of the file
osd_remote.run(
args=['sudo', 'dd',
'if=/dev/zero',
'of=%s/data' % path,
'bs=1', 'count=1', 'conv=notrunc']
)
def get_pgnum(pgid):
pos = pgid.find('.')
assert pos != -1
return pgid[pos+1:]
def deep_scrub(manager, victim, pool):
# scrub, verify inconsistent
pgnum = get_pgnum(victim)
manager.do_pg_scrub(pool, pgnum, 'deep-scrub')
stats = manager.get_single_pg_stats(victim)
inconsistent = stats['state'].find('+inconsistent') != -1
assert inconsistent
def repair(manager, victim, pool):
# repair, verify no longer inconsistent
pgnum = get_pgnum(victim)
manager.do_pg_scrub(pool, pgnum, 'repair')
stats = manager.get_single_pg_stats(victim)
inconsistent = stats['state'].find('+inconsistent') != -1
assert not inconsistent
def test_repair_corrupted_obj(ctx, manager, pg, osd_remote, obj_path, pool):
corrupt_file(osd_remote, obj_path)
deep_scrub(manager, pg, pool)
repair(manager, pg, pool)
def test_repair_bad_omap(ctx, manager, pg, osd, objname):
# Test deep-scrub with various omap modifications
# Modify omap on specific osd
log.info('fuzzing omap of %s' % objname)
manager.osd_admin_socket(osd, ['rmomapkey', 'rbd', objname, 'key'])
manager.osd_admin_socket(osd, ['setomapval', 'rbd', objname,
'badkey', 'badval'])
manager.osd_admin_socket(osd, ['setomapheader', 'rbd', objname, 'badhdr'])
deep_scrub(manager, pg, 'rbd')
# please note, the repair here is errnomous, it rewrites the correct omap
# digest and data digest on the replicas with the corresponding digests
# from the primary osd which is hosting the victim object, see
# find_victim_object().
# so we need to either put this test and the end of this task or
# undo the mess-up manually before the "repair()" that just ensures
# the cleanup is sane, otherwise the succeeding tests will fail. if they
# try set "badkey" in hope to get an "inconsistent" pg with a deep-scrub.
manager.osd_admin_socket(osd, ['setomapheader', 'rbd', objname, 'hdr'])
manager.osd_admin_socket(osd, ['rmomapkey', 'rbd', objname, 'badkey'])
manager.osd_admin_socket(osd, ['setomapval', 'rbd', objname,
'key', 'val'])
repair(manager, pg, 'rbd')
class MessUp:
def __init__(self, manager, osd_remote, pool, osd_id,
obj_name, obj_path, omap_key, omap_val):
self.manager = manager
self.osd = osd_remote
self.pool = pool
self.osd_id = osd_id
self.obj = obj_name
self.path = obj_path
self.omap_key = omap_key
self.omap_val = omap_val
@contextlib.contextmanager
def _test_with_file(self, messup_cmd, *checks):
temp = tempfile.mktemp()
backup_cmd = ['sudo', 'cp', os.path.join(self.path, 'data'), temp]
self.osd.run(args=backup_cmd)
self.osd.run(args=messup_cmd.split())
yield checks
create_cmd = ['sudo', 'mkdir', self.path]
self.osd.run(args=create_cmd, check_status=False)
restore_cmd = ['sudo', 'cp', temp, os.path.join(self.path, 'data')]
self.osd.run(args=restore_cmd)
def remove(self):
cmd = 'sudo rmdir {path}'.format(path=self.path)
return self._test_with_file(cmd, 'missing')
def append(self):
cmd = 'sudo dd if=/dev/zero of={path}/data bs=1 count=1 ' \
'conv=notrunc oflag=append'.format(path=self.path)
return self._test_with_file(cmd,
'data_digest_mismatch',
'size_mismatch')
def truncate(self):
cmd = 'sudo dd if=/dev/null of={path}/data'.format(path=self.path)
return self._test_with_file(cmd,
'data_digest_mismatch',
'size_mismatch')
def change_obj(self):
cmd = 'sudo dd if=/dev/zero of={path}/data bs=1 count=1 ' \
'conv=notrunc'.format(path=self.path)
return self._test_with_file(cmd,
'data_digest_mismatch')
@contextlib.contextmanager
def rm_omap(self):
cmd = ['rmomapkey', self.pool, self.obj, self.omap_key]
self.manager.osd_admin_socket(self.osd_id, cmd)
yield ('omap_digest_mismatch',)
cmd = ['setomapval', self.pool, self.obj,
self.omap_key, self.omap_val]
self.manager.osd_admin_socket(self.osd_id, cmd)
@contextlib.contextmanager
def add_omap(self):
cmd = ['setomapval', self.pool, self.obj, 'badkey', 'badval']
self.manager.osd_admin_socket(self.osd_id, cmd)
yield ('omap_digest_mismatch',)
cmd = ['rmomapkey', self.pool, self.obj, 'badkey']
self.manager.osd_admin_socket(self.osd_id, cmd)
@contextlib.contextmanager
def change_omap(self):
cmd = ['setomapval', self.pool, self.obj, self.omap_key, 'badval']
self.manager.osd_admin_socket(self.osd_id, cmd)
yield ('omap_digest_mismatch',)
cmd = ['setomapval', self.pool, self.obj, self.omap_key, self.omap_val]
self.manager.osd_admin_socket(self.osd_id, cmd)
class InconsistentObjChecker:
"""Check the returned inconsistents/inconsistent info"""
def __init__(self, osd, acting, obj_name):
self.osd = osd
self.acting = acting
self.obj = obj_name
assert self.osd in self.acting
def basic_checks(self, inc):
assert inc['object']['name'] == self.obj
assert inc['object']['snap'] == "head"
assert len(inc['shards']) == len(self.acting), \
"the number of returned shard does not match with the acting set"
def run(self, check, inc):
func = getattr(self, check)
func(inc)
def _check_errors(self, inc, err_name):
bad_found = False
good_found = False
for shard in inc['shards']:
log.info('shard = %r' % shard)
log.info('err = %s' % err_name)
assert 'osd' in shard
osd = shard['osd']
err = err_name in shard['errors']
if osd == self.osd:
assert bad_found is False, \
"multiple entries found for the given OSD"
assert err is True, \
"Didn't find '{err}' in errors".format(err=err_name)
bad_found = True
else:
assert osd in self.acting, "shard not in acting set"
assert err is False, \
"Expected '{err}' in errors".format(err=err_name)
good_found = True
assert bad_found is True, \
"Shard for osd.{osd} not found".format(osd=self.osd)
assert good_found is True, \
"No other acting shards found"
def _check_attrs(self, inc, attr_name):
bad_attr = None
good_attr = None
for shard in inc['shards']:
log.info('shard = %r' % shard)
log.info('attr = %s' % attr_name)
assert 'osd' in shard
osd = shard['osd']
attr = shard.get(attr_name, False)
if osd == self.osd:
assert bad_attr is None, \
"multiple entries found for the given OSD"
bad_attr = attr
else:
assert osd in self.acting, "shard not in acting set"
assert good_attr is None or good_attr == attr, \
"multiple good attrs found"
good_attr = attr
assert bad_attr is not None, \
"bad {attr} not found".format(attr=attr_name)
assert good_attr is not None, \
"good {attr} not found".format(attr=attr_name)
assert good_attr != bad_attr, \
"bad attr is identical to the good ones: " \
"{0} == {1}".format(good_attr, bad_attr)
def data_digest_mismatch(self, inc):
assert 'data_digest_mismatch' in inc['errors']
self._check_attrs(inc, 'data_digest')
def missing(self, inc):
assert 'missing' in inc['union_shard_errors']
self._check_errors(inc, 'missing')
def size_mismatch(self, inc):
assert 'size_mismatch' in inc['errors']
self._check_attrs(inc, 'size')
def omap_digest_mismatch(self, inc):
assert 'omap_digest_mismatch' in inc['errors']
self._check_attrs(inc, 'omap_digest')
def test_list_inconsistent_obj(ctx, manager, osd_remote, pg, acting, osd_id,
obj_name, obj_path):
mon = manager.controller
pool = 'rbd'
omap_key = 'key'
omap_val = 'val'
manager.do_rados(['setomapval', obj_name, omap_key, omap_val], pool=pool)
# Update missing digests, requires "osd deep scrub update digest min age: 0"
pgnum = get_pgnum(pg)
manager.do_pg_scrub(pool, pgnum, 'deep-scrub')
messup = MessUp(manager, osd_remote, pool, osd_id, obj_name, obj_path,
omap_key, omap_val)
for test in [messup.rm_omap, messup.add_omap, messup.change_omap,
messup.append, messup.truncate, messup.change_obj,
messup.remove]:
with test() as checks:
deep_scrub(manager, pg, pool)
cmd = 'rados list-inconsistent-pg {pool} ' \
'--format=json'.format(pool=pool)
pgs = json.loads(mon.sh(cmd))
assert pgs == [pg]
cmd = 'rados list-inconsistent-obj {pg} ' \
'--format=json'.format(pg=pg)
objs = json.loads(mon.sh(cmd))
assert len(objs['inconsistents']) == 1
checker = InconsistentObjChecker(osd_id, acting, obj_name)
inc_obj = objs['inconsistents'][0]
log.info('inc = %r', inc_obj)
checker.basic_checks(inc_obj)
for check in checks:
checker.run(check, inc_obj)
def task(ctx, config):
"""
Test [deep] scrub
tasks:
- chef:
- install:
- ceph:
log-ignorelist:
- '!= data_digest'
- '!= omap_digest'
- '!= size'
- deep-scrub 0 missing, 1 inconsistent objects
- deep-scrub [0-9]+ errors
- repair 0 missing, 1 inconsistent objects
- repair [0-9]+ errors, [0-9]+ fixed
- shard [0-9]+ .* : missing
- deep-scrub 1 missing, 1 inconsistent objects
- does not match object info size
- attr name mistmatch
- deep-scrub 1 missing, 0 inconsistent objects
- failed to pick suitable auth object
- candidate size [0-9]+ info size [0-9]+ mismatch
conf:
osd:
osd deep scrub update digest min age: 0
- scrub_test:
"""
if config is None:
config = {}
assert isinstance(config, dict), \
'scrub_test task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.keys()
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
logger=log.getChild('ceph_manager'),
)
while len(manager.get_osd_status()['up']) < num_osds:
time.sleep(10)
for i in range(num_osds):
manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'injectargs',
'--', '--osd-objectstore-fuse')
manager.flush_pg_stats(range(num_osds))
manager.wait_for_clean()
osd_dump = manager.get_osd_dump_json()
poolid = -1
for p in osd_dump['pools']:
if p['pool_name'] == 'rbd':
poolid = p['pool']
break
assert poolid != -1
# write some data
p = manager.do_rados(['bench', '--no-cleanup', '1', 'write', '-b', '4096'], pool='rbd')
log.info('err is %d' % p.exitstatus)
# wait for some PG to have data that we can mess with
pg, acting = wait_for_victim_pg(manager, poolid)
osd = acting[0]
osd_remote, obj_path, obj_name = find_victim_object(ctx, pg, osd)
manager.do_rados(['setomapval', obj_name, 'key', 'val'], pool='rbd')
log.info('err is %d' % p.exitstatus)
manager.do_rados(['setomapheader', obj_name, 'hdr'], pool='rbd')
log.info('err is %d' % p.exitstatus)
# Update missing digests, requires "osd deep scrub update digest min age: 0"
pgnum = get_pgnum(pg)
manager.do_pg_scrub('rbd', pgnum, 'deep-scrub')
log.info('messing with PG %s on osd %d' % (pg, osd))
test_repair_corrupted_obj(ctx, manager, pg, osd_remote, obj_path, 'rbd')
test_repair_bad_omap(ctx, manager, pg, osd, obj_name)
test_list_inconsistent_obj(ctx, manager, osd_remote, pg, acting, osd,
obj_name, obj_path)
log.info('test successful!')
# shut down fuse mount
for i in range(num_osds):
manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'injectargs',
'--', '--no-osd-objectstore-fuse')
time.sleep(5)
log.info('done')
| 14,841 | 34.850242 | 91 |
py
|
null |
ceph-main/qa/tasks/systemd.py
|
"""
Systemd test
"""
import contextlib
import logging
import re
import time
from teuthology.orchestra import run
from teuthology.misc import reconnect, get_first_mon, wait_until_healthy
log = logging.getLogger(__name__)
def _remote_service_status(remote, service):
status = remote.sh('sudo systemctl status %s' % service,
check_status=False)
return status
@contextlib.contextmanager
def task(ctx, config):
"""
- tasks:
ceph-deploy:
systemd:
Test ceph systemd services can start, stop and restart and
check for any failed services and report back errors
"""
for remote, roles in ctx.cluster.remotes.items():
remote.run(args=['sudo', 'ps', '-eaf', run.Raw('|'),
'grep', 'ceph'])
units = remote.sh('sudo systemctl list-units | grep ceph',
check_status=False)
log.info(units)
if units.find('failed'):
log.info("Ceph services in failed state")
# test overall service stop and start using ceph.target
# ceph.target tests are meant for ceph systemd tests
# and not actual process testing using 'ps'
log.info("Stopping all Ceph services")
remote.run(args=['sudo', 'systemctl', 'stop', 'ceph.target'])
status = _remote_service_status(remote, 'ceph.target')
log.info(status)
log.info("Checking process status")
ps_eaf = remote.sh('sudo ps -eaf | grep ceph')
if ps_eaf.find('Active: inactive'):
log.info("Successfully stopped all ceph services")
else:
log.info("Failed to stop ceph services")
log.info("Starting all Ceph services")
remote.run(args=['sudo', 'systemctl', 'start', 'ceph.target'])
status = _remote_service_status(remote, 'ceph.target')
log.info(status)
if status.find('Active: active'):
log.info("Successfully started all Ceph services")
else:
log.info("info", "Failed to start Ceph services")
ps_eaf = remote.sh('sudo ps -eaf | grep ceph')
log.info(ps_eaf)
time.sleep(4)
# test individual services start stop
name = remote.shortname
mon_name = 'ceph-mon@' + name + '.service'
mds_name = 'ceph-mds@' + name + '.service'
mgr_name = 'ceph-mgr@' + name + '.service'
mon_role_name = 'mon.' + name
mds_role_name = 'mds.' + name
mgr_role_name = 'mgr.' + name
m_osd = re.search('--id (\d+) --setuser ceph', ps_eaf)
if m_osd:
osd_service = 'ceph-osd@{m}.service'.format(m=m_osd.group(1))
remote.run(args=['sudo', 'systemctl', 'status',
osd_service])
remote.run(args=['sudo', 'systemctl', 'stop',
osd_service])
time.sleep(4) # immediate check will result in deactivating state
status = _remote_service_status(remote, osd_service)
log.info(status)
if status.find('Active: inactive'):
log.info("Successfully stopped single osd ceph service")
else:
log.info("Failed to stop ceph osd services")
remote.sh(['sudo', 'systemctl', 'start', osd_service])
time.sleep(4)
if mon_role_name in roles:
remote.run(args=['sudo', 'systemctl', 'status', mon_name])
remote.run(args=['sudo', 'systemctl', 'stop', mon_name])
time.sleep(4) # immediate check will result in deactivating state
status = _remote_service_status(remote, mon_name)
if status.find('Active: inactive'):
log.info("Successfully stopped single mon ceph service")
else:
log.info("Failed to stop ceph mon service")
remote.run(args=['sudo', 'systemctl', 'start', mon_name])
time.sleep(4)
if mgr_role_name in roles:
remote.run(args=['sudo', 'systemctl', 'status', mgr_name])
remote.run(args=['sudo', 'systemctl', 'stop', mgr_name])
time.sleep(4) # immediate check will result in deactivating state
status = _remote_service_status(remote, mgr_name)
if status.find('Active: inactive'):
log.info("Successfully stopped single ceph mgr service")
else:
log.info("Failed to stop ceph mgr service")
remote.run(args=['sudo', 'systemctl', 'start', mgr_name])
time.sleep(4)
if mds_role_name in roles:
remote.run(args=['sudo', 'systemctl', 'status', mds_name])
remote.run(args=['sudo', 'systemctl', 'stop', mds_name])
time.sleep(4) # immediate check will result in deactivating state
status = _remote_service_status(remote, mds_name)
if status.find('Active: inactive'):
log.info("Successfully stopped single ceph mds service")
else:
log.info("Failed to stop ceph mds service")
remote.run(args=['sudo', 'systemctl', 'start', mds_name])
time.sleep(4)
# reboot all nodes and verify the systemd units restart
# workunit that runs would fail if any of the systemd unit doesnt start
ctx.cluster.run(args='sudo reboot', wait=False, check_status=False)
# avoid immediate reconnect
time.sleep(120)
reconnect(ctx, 480) # reconnect all nodes
# for debug info
ctx.cluster.run(args=['sudo', 'ps', '-eaf', run.Raw('|'),
'grep', 'ceph'])
# wait for HEALTH_OK
mon = get_first_mon(ctx, config)
(mon_remote,) = ctx.cluster.only(mon).remotes.keys()
wait_until_healthy(ctx, mon_remote, use_sudo=True)
yield
| 5,784 | 41.536765 | 78 |
py
|
null |
ceph-main/qa/tasks/tempest.py
|
"""
Deploy and configure Tempest for Teuthology
"""
import configparser
import contextlib
import logging
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.exceptions import ConfigError
from teuthology.orchestra import run
log = logging.getLogger(__name__)
def get_tempest_dir(ctx):
return '{tdir}/tempest'.format(tdir=teuthology.get_testdir(ctx))
def run_in_tempest_dir(ctx, client, cmdargs, **kwargs):
ctx.cluster.only(client).run(
args=[ 'cd', get_tempest_dir(ctx), run.Raw('&&'), ] + cmdargs,
**kwargs
)
def run_in_tempest_rgw_dir(ctx, client, cmdargs, **kwargs):
ctx.cluster.only(client).run(
args=[ 'cd', get_tempest_dir(ctx) + '/rgw', run.Raw('&&'), ] + cmdargs,
**kwargs
)
def run_in_tempest_venv(ctx, client, cmdargs, **kwargs):
run_in_tempest_dir(ctx, client,
[ 'source',
'.tox/venv/bin/activate',
run.Raw('&&')
] + cmdargs, **kwargs)
@contextlib.contextmanager
def download(ctx, config):
"""
Download the Tempest from github.
Remove downloaded file upon exit.
The context passed in should be identical to the context
passed in to the main task.
"""
assert isinstance(config, dict)
log.info('Downloading Tempest...')
for (client, cconf) in config.items():
ctx.cluster.only(client).run(
args=[
'git', 'clone',
'-b', cconf.get('force-branch', 'master'),
'https://github.com/openstack/tempest.git',
get_tempest_dir(ctx)
],
)
sha1 = cconf.get('sha1')
if sha1 is not None:
run_in_tempest_dir(ctx, client, [ 'git', 'reset', '--hard', sha1 ])
try:
yield
finally:
log.info('Removing Tempest...')
for client in config:
ctx.cluster.only(client).run(
args=[ 'rm', '-rf', get_tempest_dir(ctx) ],
)
def get_toxvenv_dir(ctx):
return ctx.tox.venv_path
@contextlib.contextmanager
def setup_venv(ctx, config):
"""
Setup the virtualenv for Tempest using tox.
"""
assert isinstance(config, dict)
log.info('Setting up virtualenv for Tempest')
for (client, _) in config.items():
run_in_tempest_dir(ctx, client,
[ '{tvdir}/bin/tox'.format(tvdir=get_toxvenv_dir(ctx)),
'-e', 'venv', '--notest'
])
yield
def setup_logging(ctx, cpar):
cpar.set('DEFAULT', 'log_dir', teuthology.get_archive_dir(ctx))
cpar.set('DEFAULT', 'log_file', 'tempest.log')
def to_config(config, params, section, cpar):
for (k, v) in config[section].items():
if isinstance(v, str):
v = v.format(**params)
elif isinstance(v, bool):
v = 'true' if v else 'false'
else:
v = str(v)
cpar.set(section, k, v)
@contextlib.contextmanager
def configure_instance(ctx, config):
assert isinstance(config, dict)
log.info('Configuring Tempest')
for (client, cconfig) in config.items():
run_in_tempest_venv(ctx, client,
[
'tempest',
'init',
'--workspace-path',
get_tempest_dir(ctx) + '/workspace.yaml',
'rgw'
])
# prepare the config file
tetcdir = '{tdir}/rgw/etc'.format(tdir=get_tempest_dir(ctx))
(remote,) = ctx.cluster.only(client).remotes.keys()
local_conf = remote.get_file(tetcdir + '/tempest.conf.sample')
# fill the params dictionary which allows to use templatized configs
keystone_role = cconfig.get('use-keystone-role', None)
if keystone_role is None \
or keystone_role not in ctx.keystone.public_endpoints:
raise ConfigError('the use-keystone-role is misconfigured')
public_host, public_port = ctx.keystone.public_endpoints[keystone_role]
params = {
'keystone_public_host': public_host,
'keystone_public_port': str(public_port),
}
cpar = configparser.ConfigParser()
cpar.read(local_conf)
setup_logging(ctx, cpar)
to_config(cconfig, params, 'auth', cpar)
to_config(cconfig, params, 'identity', cpar)
to_config(cconfig, params, 'object-storage', cpar)
to_config(cconfig, params, 'object-storage-feature-enabled', cpar)
cpar.write(open(local_conf, 'w+'))
remote.put_file(local_conf, tetcdir + '/tempest.conf')
yield
@contextlib.contextmanager
def run_tempest(ctx, config):
assert isinstance(config, dict)
log.info('Configuring Tempest')
for (client, cconf) in config.items():
blocklist = cconf.get('blocklist', [])
assert isinstance(blocklist, list)
run_in_tempest_venv(ctx, client,
[
'tempest',
'run',
'--workspace-path',
get_tempest_dir(ctx) + '/workspace.yaml',
'--workspace',
'rgw',
'--regex', '^tempest.api.object_storage',
'--black-regex', '|'.join(blocklist)
])
try:
yield
finally:
pass
@contextlib.contextmanager
def task(ctx, config):
"""
Deploy and run Tempest's object storage campaign
Example of configuration:
overrides:
ceph:
conf:
client:
rgw keystone api version: 3
rgw keystone accepted roles: admin,member
rgw keystone implicit tenants: true
rgw keystone accepted admin roles: admin
rgw swift enforce content length: true
rgw swift account in url: true
rgw swift versioning enabled: true
rgw keystone admin domain: Default
rgw keystone admin user: admin
rgw keystone admin password: ADMIN
rgw keystone admin project: admin
tasks:
# typically, the task should be preceded with install, ceph, tox,
# keystone and rgw. Tox and Keystone are specific requirements
# of tempest.py.
- rgw:
# it's important to match the prefix with the endpoint's URL
# in Keystone. Additionally, if we want to test /info and its
# accompanying stuff, the whole Swift API must be put in root
# of the whole URL hierarchy (read: frontend_prefix == /swift).
frontend_prefix: /swift
client.0:
use-keystone-role: client.0
- tempest:
client.0:
force-branch: master
use-keystone-role: client.0
auth:
admin_username: admin
admin_project_name: admin
admin_password: ADMIN
admin_domain_name: Default
identity:
uri: http://{keystone_public_host}:{keystone_public_port}/v2.0/
uri_v3: http://{keystone_public_host}:{keystone_public_port}/v3/
admin_role: admin
object-storage:
reseller_admin_role: admin
object-storage-feature-enabled:
container_sync: false
discoverability: false
blocklist:
# please strip half of these items after merging PRs #15369
# and #12704
- .*test_list_containers_reverse_order.*
- .*test_list_container_contents_with_end_marker.*
- .*test_delete_non_empty_container.*
- .*test_container_synchronization.*
- .*test_get_object_after_expiration_time.*
- .*test_create_object_with_transfer_encoding.*
"""
assert config is None or isinstance(config, list) \
or isinstance(config, dict), \
'task tempest only supports a list or dictionary for configuration'
if not ctx.tox:
raise ConfigError('tempest must run after the tox task')
if not ctx.keystone:
raise ConfigError('tempest must run after the keystone task')
all_clients = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
if config is None:
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
for client in config.keys():
if not config[client]:
config[client] = {}
teuthology.deep_merge(config[client], overrides.get('keystone', {}))
log.debug('Tempest config is %s', config)
with contextutil.nested(
lambda: download(ctx=ctx, config=config),
lambda: setup_venv(ctx=ctx, config=config),
lambda: configure_instance(ctx=ctx, config=config),
lambda: run_tempest(ctx=ctx, config=config),
):
yield
| 8,991 | 33.060606 | 82 |
py
|
null |
ceph-main/qa/tasks/teuthology_integration.py
|
import logging
from teuthology import misc
from teuthology.task import Task
log = logging.getLogger(__name__)
class TeuthologyIntegration(Task):
def begin(self):
misc.sh("""
set -x
pip install tox
tox
# tox -e py27-integration
tox -e openstack-integration
""")
task = TeuthologyIntegration
| 355 | 16.8 | 36 |
py
|
null |
ceph-main/qa/tasks/tgt.py
|
"""
Task to handle tgt
Assumptions made:
The ceph-extras tgt package may need to get installed.
The open-iscsi package needs to get installed.
"""
import logging
import contextlib
from teuthology import misc as teuthology
from teuthology import contextutil
log = logging.getLogger(__name__)
@contextlib.contextmanager
def start_tgt_remotes(ctx, start_tgtd):
"""
This subtask starts up a tgtd on the clients specified
"""
remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
tgtd_list = []
for rem, roles in remotes.items():
for _id in roles:
if _id in start_tgtd:
if not rem in tgtd_list:
tgtd_list.append(rem)
size = ctx.config.get('image_size', 10240)
rem.run(
args=[
'rbd',
'create',
'iscsi-image',
'--size',
str(size),
])
rem.run(
args=[
'sudo',
'tgtadm',
'--lld',
'iscsi',
'--mode',
'target',
'--op',
'new',
'--tid',
'1',
'--targetname',
'rbd',
])
rem.run(
args=[
'sudo',
'tgtadm',
'--lld',
'iscsi',
'--mode',
'logicalunit',
'--op',
'new',
'--tid',
'1',
'--lun',
'1',
'--backing-store',
'iscsi-image',
'--bstype',
'rbd',
])
rem.run(
args=[
'sudo',
'tgtadm',
'--lld',
'iscsi',
'--op',
'bind',
'--mode',
'target',
'--tid',
'1',
'-I',
'ALL',
])
try:
yield
finally:
for rem in tgtd_list:
rem.run(
args=[
'sudo',
'tgtadm',
'--lld',
'iscsi',
'--mode',
'target',
'--op',
'delete',
'--force',
'--tid',
'1',
])
rem.run(
args=[
'rbd',
'snap',
'purge',
'iscsi-image',
])
rem.run(
args=[
'sudo',
'rbd',
'rm',
'iscsi-image',
])
@contextlib.contextmanager
def task(ctx, config):
"""
Start up tgt.
To start on on all clients::
tasks:
- ceph:
- tgt:
To start on certain clients::
tasks:
- ceph:
- tgt: [client.0, client.3]
or
tasks:
- ceph:
- tgt:
client.0:
client.3:
An image blocksize size can also be specified::
tasks:
- ceph:
- tgt:
image_size = 20480
The general flow of things here is:
1. Find clients on which tgt is supposed to run (start_tgtd)
2. Remotely start up tgt daemon
On cleanup:
3. Stop tgt daemon
The iscsi administration is handled by the iscsi task.
"""
if config:
config = {key : val for key, val in config.items()
if key.startswith('client')}
# config at this point should only contain keys starting with 'client'
start_tgtd = []
remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
log.info(remotes)
if not config:
start_tgtd = ['client.{id}'.format(id=id_)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
else:
start_tgtd = config
log.info(start_tgtd)
with contextutil.nested(
lambda: start_tgt_remotes(ctx=ctx, start_tgtd=start_tgtd),):
yield
| 5,001 | 27.101124 | 75 |
py
|
null |
ceph-main/qa/tasks/thrash_pool_snaps.py
|
"""
Thrash -- Simulate random osd failures.
"""
import contextlib
import logging
import gevent
import time
import random
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
"Thrash" snap creation and removal on the listed pools
Example:
thrash_pool_snaps:
pools: [.rgw.buckets, .rgw.buckets.index]
max_snaps: 10
min_snaps: 5
period: 10
"""
stopping = False
def do_thrash():
pools = config.get('pools', [])
max_snaps = config.get('max_snaps', 10)
min_snaps = config.get('min_snaps', 5)
period = config.get('period', 30)
snaps = []
manager = ctx.managers['ceph']
def remove_snap():
assert len(snaps) > 0
snap = random.choice(snaps)
log.info("Removing snap %s" % (snap,))
for pool in pools:
manager.remove_pool_snap(pool, str(snap))
snaps.remove(snap)
def add_snap(snap):
log.info("Adding snap %s" % (snap,))
for pool in pools:
manager.add_pool_snap(pool, str(snap))
snaps.append(snap)
index = 0
while not stopping:
index += 1
time.sleep(period)
if len(snaps) <= min_snaps:
add_snap(index)
elif len(snaps) >= max_snaps:
remove_snap()
else:
random.choice([lambda: add_snap(index), remove_snap])()
log.info("Stopping")
thread = gevent.spawn(do_thrash)
yield
stopping = True
thread.join()
| 1,617 | 25.096774 | 71 |
py
|
null |
ceph-main/qa/tasks/thrasher.py
|
"""
Thrasher base class
"""
class Thrasher(object):
def __init__(self):
super(Thrasher, self).__init__()
self._exception = None
@property
def exception(self):
return self._exception
def set_thrasher_exception(self, e):
self._exception = e
| 290 | 17.1875 | 40 |
py
|
null |
ceph-main/qa/tasks/thrashosds-health.yaml
|
overrides:
ceph:
conf:
osd:
osd max markdown count: 1000
osd blocked scrub grace period: 3600
log-ignorelist:
- overall HEALTH_
- \(OSDMAP_FLAGS\)
- \(OSD_
- \(PG_
- \(POOL_
- \(CACHE_POOL_
- \(SMALLER_PGP_NUM\)
- \(OBJECT_
- \(SLOW_OPS\)
- \(REQUEST_SLOW\)
- \(TOO_FEW_PGS\)
- slow request
- timeout on replica
- late reservation from
| 452 | 19.590909 | 44 |
yaml
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.