Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/raw/activate.py
|
from __future__ import print_function
import argparse
import logging
import os
from textwrap import dedent
from ceph_volume import process, conf, decorators, terminal
from ceph_volume.util import system
from ceph_volume.util import prepare as prepare_utils
from .list import direct_report
logger = logging.getLogger(__name__)
def activate_bluestore(meta, tmpfs, systemd):
# find the osd
osd_id = meta['osd_id']
osd_uuid = meta['osd_uuid']
# mount on tmpfs the osd directory
osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
if not system.path_is_mounted(osd_path):
# mkdir -p and mount as tmpfs
prepare_utils.create_osd_path(osd_id, tmpfs=tmpfs)
# XXX This needs to be removed once ceph-bluestore-tool can deal with
# symlinks that exist in the osd dir
for link_name in ['block', 'block.db', 'block.wal']:
link_path = os.path.join(osd_path, link_name)
if os.path.exists(link_path):
os.unlink(os.path.join(osd_path, link_name))
# Once symlinks are removed, the osd dir can be 'primed again. chown first,
# regardless of what currently exists so that ``prime-osd-dir`` can succeed
# even if permissions are somehow messed up
system.chown(osd_path)
prime_command = [
'ceph-bluestore-tool',
'prime-osd-dir',
'--path', osd_path,
'--no-mon-config',
'--dev', meta['device'],
]
process.run(prime_command)
# always re-do the symlink regardless if it exists, so that the block,
# block.wal, and block.db devices that may have changed can be mapped
# correctly every time
prepare_utils.link_block(meta['device'], osd_id)
if 'device_db' in meta:
prepare_utils.link_db(meta['device_db'], osd_id, osd_uuid)
if 'device_wal' in meta:
prepare_utils.link_wal(meta['device_wal'], osd_id, osd_uuid)
system.chown(osd_path)
terminal.success("ceph-volume raw activate successful for osd ID: %s" % osd_id)
class Activate(object):
help = 'Discover and prepare a data directory for a (BlueStore) OSD on a raw device'
def __init__(self, argv):
self.argv = argv
self.args = None
@decorators.needs_root
def activate(self, devs, start_osd_id, start_osd_uuid,
tmpfs, systemd):
"""
:param args: The parsed arguments coming from the CLI
"""
assert devs or start_osd_id or start_osd_uuid
found = direct_report(devs)
activated_any = False
for osd_uuid, meta in found.items():
osd_id = meta['osd_id']
if start_osd_id is not None and str(osd_id) != str(start_osd_id):
continue
if start_osd_uuid is not None and osd_uuid != start_osd_uuid:
continue
logger.info('Activating osd.%s uuid %s cluster %s' % (
osd_id, osd_uuid, meta['ceph_fsid']))
activate_bluestore(meta,
tmpfs=tmpfs,
systemd=systemd)
activated_any = True
if not activated_any:
raise RuntimeError('did not find any matching OSD to activate')
def main(self):
sub_command_help = dedent("""
Activate (BlueStore) OSD on a raw block device(s) based on the
device label (normally the first block of the device).
ceph-volume raw activate [/dev/sdb2 ...]
or
ceph-volume raw activate --osd-id NUM --osd-uuid UUID
The device(s) associated with the OSD need to have been prepared
previously, so that all needed tags and metadata exist.
""")
parser = argparse.ArgumentParser(
prog='ceph-volume raw activate',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=sub_command_help,
)
parser.add_argument(
'--device',
help='The device for the OSD to start'
)
parser.add_argument(
'--osd-id',
help='OSD ID to activate'
)
parser.add_argument(
'--osd-uuid',
help='OSD UUID to active'
)
parser.add_argument(
'--no-systemd',
dest='no_systemd',
action='store_true',
help='Skip creating and enabling systemd units and starting OSD services'
)
parser.add_argument(
'--block.db',
dest='block_db',
help='Path to bluestore block.db block device'
)
parser.add_argument(
'--block.wal',
dest='block_wal',
help='Path to bluestore block.wal block device'
)
parser.add_argument(
'--no-tmpfs',
action='store_true',
help='Do not use a tmpfs mount for OSD data dir'
)
if not self.argv:
print(sub_command_help)
return
args = parser.parse_args(self.argv)
self.args = args
if not args.no_systemd:
terminal.error('systemd support not yet implemented')
raise SystemExit(1)
devs = [args.device]
if args.block_wal:
devs.append(args.block_wal)
if args.block_db:
devs.append(args.block_db)
self.activate(devs=devs,
start_osd_id=args.osd_id,
start_osd_uuid=args.osd_uuid,
tmpfs=not args.no_tmpfs,
systemd=not self.args.no_systemd)
| 5,575 | 32.389222 | 88 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/raw/common.py
|
import argparse
from ceph_volume.util import arg_validators
def create_parser(prog, description):
"""
Both prepare and create share the same parser, those are defined here to
avoid duplication
"""
parser = argparse.ArgumentParser(
prog=prog,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description,
)
parser.add_argument(
'--data',
required=True,
type=arg_validators.ValidRawDevice(as_string=True),
help='a raw device to use for the OSD',
)
parser.add_argument(
'--bluestore',
action='store_true',
help='Use BlueStore backend')
parser.add_argument(
'--crush-device-class',
dest='crush_device_class',
help='Crush device class to assign this OSD to',
default=""
)
parser.add_argument(
'--no-tmpfs',
action='store_true',
help='Do not use a tmpfs mount for OSD data dir'
)
parser.add_argument(
'--block.db',
dest='block_db',
help='Path to bluestore block.db block device',
type=arg_validators.ValidRawDevice(as_string=True)
)
parser.add_argument(
'--block.wal',
dest='block_wal',
help='Path to bluestore block.wal block device',
type=arg_validators.ValidRawDevice(as_string=True)
)
parser.add_argument(
'--dmcrypt',
action='store_true',
help='Enable device encryption via dm-crypt',
)
parser.add_argument(
'--osd-id',
help='Reuse an existing OSD id',
default=None,
type=arg_validators.valid_osd_id,
)
return parser
| 1,678 | 27.457627 | 76 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/raw/list.py
|
from __future__ import print_function
import argparse
import json
import logging
from textwrap import dedent
from ceph_volume import decorators, process
from ceph_volume.util import disk
logger = logging.getLogger(__name__)
def direct_report(devices):
"""
Other non-cli consumers of listing information will want to consume the
report without the need to parse arguments or other flags. This helper
bypasses the need to deal with the class interface which is meant for cli
handling.
"""
_list = List([])
return _list.generate(devices)
def _get_bluestore_info(dev):
out, err, rc = process.call([
'ceph-bluestore-tool', 'show-label',
'--dev', dev], verbose_on_failure=False)
if rc:
# ceph-bluestore-tool returns an error (below) if device is not bluestore OSD
# > unable to read label for <device>: (2) No such file or directory
# but it's possible the error could be for a different reason (like if the disk fails)
logger.debug('assuming device {} is not BlueStore; ceph-bluestore-tool failed to get info from device: {}\n{}'.format(dev, out, err))
return None
oj = json.loads(''.join(out))
if dev not in oj:
# should be impossible, so warn
logger.warning('skipping device {} because it is not reported in ceph-bluestore-tool output: {}'.format(dev, out))
return None
try:
r = {
'osd_uuid': oj[dev]['osd_uuid'],
}
if oj[dev]['description'] == 'main':
whoami = oj[dev]['whoami']
r.update({
'type': 'bluestore',
'osd_id': int(whoami),
'ceph_fsid': oj[dev]['ceph_fsid'],
'device': dev,
})
elif oj[dev]['description'] == 'bluefs db':
r['device_db'] = dev
elif oj[dev]['description'] == 'bluefs wal':
r['device_wal'] = dev
return r
except KeyError as e:
# this will appear for devices that have a bluestore header but aren't valid OSDs
# for example, due to incomplete rollback of OSDs: https://tracker.ceph.com/issues/51869
logger.error('device {} does not have all BlueStore data needed to be a valid OSD: {}\n{}'.format(dev, out, e))
return None
class List(object):
help = 'list BlueStore OSDs on raw devices'
def __init__(self, argv):
self.argv = argv
def generate(self, devs=None):
logger.debug('Listing block devices via lsblk...')
info_devices = disk.lsblk_all(abspath=True)
if devs is None or devs == []:
# If no devs are given initially, we want to list ALL devices including children and
# parents. Parent disks with child partitions may be the appropriate device to return if
# the parent disk has a bluestore header, but children may be the most appropriate
# devices to return if the parent disk does not have a bluestore header.
devs = [device['NAME'] for device in info_devices if device.get('NAME',)]
result = {}
logger.debug('inspecting devices: {}'.format(devs))
for dev in devs:
# Linux kernels built with CONFIG_ATARI_PARTITION enabled can falsely interpret
# bluestore's on-disk format as an Atari partition table. These false Atari partitions
# can be interpreted as real OSDs if a bluestore OSD was previously created on the false
# partition. See https://tracker.ceph.com/issues/52060 for more info. If a device has a
# parent, it is a child. If the parent is a valid bluestore OSD, the child will only
# exist if it is a phantom Atari partition, and the child should be ignored. If the
# parent isn't bluestore, then the child could be a valid bluestore OSD. If we fail to
# determine whether a parent is bluestore, we should err on the side of not reporting
# the child so as not to give a false negative.
for info_device in info_devices:
if 'PKNAME' in info_device and info_device['PKNAME'] != "":
parent = info_device['PKNAME']
try:
if disk.has_bluestore_label(parent):
logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(dev, parent),
'device is likely a phantom Atari partition. device info: {}'.format(info_device)))
continue
except OSError as e:
logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(dev),
'failed to determine if parent device {} is BlueStore. err: {}'.format(parent, e)))
continue
bs_info = _get_bluestore_info(dev)
if bs_info is None:
# None is also returned in the rare event that there is an issue reading info from
# a BlueStore disk, so be sure to log our assumption that it isn't bluestore
logger.info('device {} does not have BlueStore information'.format(dev))
continue
uuid = bs_info['osd_uuid']
if uuid not in result:
result[uuid] = {}
result[uuid].update(bs_info)
return result
@decorators.needs_root
def list(self, args):
report = self.generate(args.device)
if args.format == 'json':
print(json.dumps(report, indent=4, sort_keys=True))
else:
if not report:
raise SystemExit('No valid Ceph devices found')
raise RuntimeError('not implemented yet')
def main(self):
sub_command_help = dedent("""
List OSDs on raw devices with raw device labels (usually the first
block of the device).
Full listing of all identifiable (currently, BlueStore) OSDs
on raw devices:
ceph-volume raw list
List a particular device, reporting all metadata about it::
ceph-volume raw list /dev/sda1
""")
parser = argparse.ArgumentParser(
prog='ceph-volume raw list',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=sub_command_help,
)
parser.add_argument(
'device',
metavar='DEVICE',
nargs='*',
help='Path to a device like /dev/sda1'
)
parser.add_argument(
'--format',
help='output format, defaults to "pretty"',
default='json',
choices=['json', 'pretty'],
)
args = parser.parse_args(self.argv)
self.list(args)
| 6,879 | 40.95122 | 150 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/raw/main.py
|
import argparse
from textwrap import dedent
from ceph_volume import terminal
from . import list
from . import prepare
from . import activate
class Raw(object):
help = 'Manage single-device OSDs on raw block devices'
_help = dedent("""
Manage a single-device OSD on a raw block device. Rely on
the existing device labels to store any needed metadata.
{sub_help}
""")
mapper = {
'list': list.List,
'prepare': prepare.Prepare,
'activate': activate.Activate,
}
def __init__(self, argv):
self.argv = argv
def print_help(self, sub_help):
return self._help.format(sub_help=sub_help)
def main(self):
terminal.dispatch(self.mapper, self.argv)
parser = argparse.ArgumentParser(
prog='ceph-volume raw',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.print_help(terminal.subhelp(self.mapper)),
)
parser.parse_args(self.argv)
if len(self.argv) <= 1:
return parser.print_help()
| 1,069 | 25.097561 | 71 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/raw/prepare.py
|
from __future__ import print_function
import json
import logging
import os
from textwrap import dedent
from ceph_volume.util import prepare as prepare_utils
from ceph_volume.util import encryption as encryption_utils
from ceph_volume.util import disk
from ceph_volume.util import system
from ceph_volume import decorators, terminal
from ceph_volume.devices.lvm.common import rollback_osd
from .common import create_parser
logger = logging.getLogger(__name__)
def prepare_dmcrypt(key, device, device_type, fsid):
"""
Helper for devices that are encrypted. The operations needed for
block, db, wal, devices are all the same
"""
if not device:
return ''
kname = disk.lsblk(device)['KNAME']
mapping = 'ceph-{}-{}-{}-dmcrypt'.format(fsid, kname, device_type)
# format data device
encryption_utils.luks_format(
key,
device
)
encryption_utils.luks_open(
key,
device,
mapping
)
return '/dev/mapper/{}'.format(mapping)
def prepare_bluestore(block, wal, db, secrets, osd_id, fsid, tmpfs):
"""
:param block: The name of the logical volume for the bluestore data
:param wal: a regular/plain disk or logical volume, to be used for block.wal
:param db: a regular/plain disk or logical volume, to be used for block.db
:param secrets: A dict with the secrets needed to create the osd (e.g. cephx)
:param id_: The OSD id
:param fsid: The OSD fsid, also known as the OSD UUID
"""
cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key())
if secrets.get('dmcrypt_key'):
key = secrets['dmcrypt_key']
block = prepare_dmcrypt(key, block, 'block', fsid)
wal = prepare_dmcrypt(key, wal, 'wal', fsid)
db = prepare_dmcrypt(key, db, 'db', fsid)
# create the directory
prepare_utils.create_osd_path(osd_id, tmpfs=tmpfs)
# symlink the block
prepare_utils.link_block(block, osd_id)
# get the latest monmap
prepare_utils.get_monmap(osd_id)
# write the OSD keyring if it doesn't exist already
prepare_utils.write_keyring(osd_id, cephx_secret)
# prepare the osd filesystem
prepare_utils.osd_mkfs_bluestore(
osd_id, fsid,
keyring=cephx_secret,
wal=wal,
db=db
)
class Prepare(object):
help = 'Format a raw device and associate it with a (BlueStore) OSD'
def __init__(self, argv):
self.argv = argv
self.osd_id = None
def safe_prepare(self, args=None):
"""
An intermediate step between `main()` and `prepare()` so that we can
capture the `self.osd_id` in case we need to rollback
:param args: Injected args, usually from `raw create` which compounds
both `prepare` and `create`
"""
if args is not None:
self.args = args
try:
self.prepare()
except Exception:
logger.exception('raw prepare was unable to complete')
logger.info('will rollback OSD ID creation')
rollback_osd(self.args, self.osd_id)
raise
dmcrypt_log = 'dmcrypt' if args.dmcrypt else 'clear'
terminal.success("ceph-volume raw {} prepare successful for: {}".format(dmcrypt_log, self.args.data))
@decorators.needs_root
def prepare(self):
secrets = {'cephx_secret': prepare_utils.create_key()}
encrypted = 1 if self.args.dmcrypt else 0
cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key()
if encrypted:
secrets['dmcrypt_key'] = os.getenv('CEPH_VOLUME_DMCRYPT_SECRET')
secrets['cephx_lockbox_secret'] = cephx_lockbox_secret # dummy value to make `ceph osd new` not complaining
osd_fsid = system.generate_uuid()
crush_device_class = self.args.crush_device_class
if crush_device_class:
secrets['crush_device_class'] = crush_device_class
tmpfs = not self.args.no_tmpfs
wal = ""
db = ""
if self.args.block_wal:
wal = self.args.block_wal
if self.args.block_db:
db = self.args.block_db
# reuse a given ID if it exists, otherwise create a new ID
self.osd_id = prepare_utils.create_id(
osd_fsid,
json.dumps(secrets),
osd_id=self.args.osd_id)
prepare_bluestore(
self.args.data,
wal,
db,
secrets,
self.osd_id,
osd_fsid,
tmpfs,
)
def main(self):
sub_command_help = dedent("""
Prepare an OSD by assigning an ID and FSID, registering them with the
cluster with an ID and FSID, formatting the volume.
Once the OSD is ready, an ad-hoc systemd unit will be enabled so that
it can later get activated and the OSD daemon can get started.
ceph-volume raw prepare --bluestore --data {device}
DB and WAL devices are supported.
ceph-volume raw prepare --bluestore --data {device} --block.db {device} --block.wal {device}
""")
parser = create_parser(
prog='ceph-volume raw prepare',
description=sub_command_help,
)
if not self.argv:
print(sub_command_help)
return
self.args = parser.parse_args(self.argv)
if not self.args.bluestore:
terminal.error('must specify --bluestore (currently the only supported backend)')
raise SystemExit(1)
if self.args.dmcrypt and not os.getenv('CEPH_VOLUME_DMCRYPT_SECRET'):
terminal.error('encryption was requested (--dmcrypt) but environment variable ' \
'CEPH_VOLUME_DMCRYPT_SECRET is not set, you must set ' \
'this variable to provide a dmcrypt secret.')
raise SystemExit(1)
self.safe_prepare(self.args)
| 5,952 | 33.610465 | 119 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/simple/__init__.py
|
from .main import Simple # noqa
| 32 | 15.5 | 31 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/simple/activate.py
|
from __future__ import print_function
import argparse
import base64
import glob
import json
import logging
import os
from textwrap import dedent
from ceph_volume import process, decorators, terminal, conf
from ceph_volume.util import system, disk
from ceph_volume.util import encryption as encryption_utils
from ceph_volume.systemd import systemctl
logger = logging.getLogger(__name__)
mlogger = terminal.MultiLogger(__name__)
class Activate(object):
help = 'Enable systemd units to mount configured devices and start a Ceph OSD'
def __init__(self, argv, from_trigger=False):
self.argv = argv
self.from_trigger = from_trigger
self.skip_systemd = False
def validate_devices(self, json_config):
"""
``json_config`` is the loaded dictionary coming from the JSON file. It is usually mixed with
other non-device items, but for sakes of comparison it doesn't really matter. This method is
just making sure that the keys needed exist
"""
devices = json_config.keys()
try:
objectstore = json_config['type']
except KeyError:
logger.warning(
'"type" key not found, assuming "bluestore" since journal key is not present'
)
objectstore = 'bluestore'
# Go through all the device combinations that are absolutely required,
# raise an error describing what was expected and what was found
# otherwise.
if objectstore == 'bluestore':
# This is a bit tricky, with newer bluestore we don't need data, older implementations
# do (e.g. with ceph-disk). ceph-volume just uses a tmpfs that doesn't require data.
if {'block', 'data'}.issubset(set(devices)):
return True
else:
bluestore_devices = ['block.db', 'block.wal', 'block', 'data']
found = [i for i in devices if i in bluestore_devices]
mlogger.error("Required devices (block and data) not present for bluestore")
mlogger.error('bluestore devices found: %s', found)
raise RuntimeError('Unable to activate bluestore OSD due to missing devices')
def get_device(self, uuid):
"""
If a device is encrypted, it will decrypt/open and return the mapper
path, if it isn't encrypted it will just return the device found that
is mapped to the uuid. This will make it easier for the caller to
avoid if/else to check if devices need decrypting
:param uuid: The partition uuid of the device (PARTUUID)
"""
device = disk.get_device_from_partuuid(uuid)
# If device is not found, it is fine to return an empty string from the
# helper that finds `device`. If it finds anything and it is not
# encrypted, just return what was found
if not self.is_encrypted or not device:
return device
if self.encryption_type == 'luks':
encryption_utils.luks_open(self.dmcrypt_secret, device, uuid)
else:
encryption_utils.plain_open(self.dmcrypt_secret, device, uuid)
return '/dev/mapper/%s' % uuid
def enable_systemd_units(self, osd_id, osd_fsid):
"""
* disables the ceph-disk systemd units to prevent them from running when
a UDEV event matches Ceph rules
* creates the ``simple`` systemd units to handle the activation and
startup of the OSD with ``osd_id`` and ``osd_fsid``
* enables the OSD systemd unit and finally starts the OSD.
"""
if not self.from_trigger and not self.skip_systemd:
# means it was scanned and now activated directly, so ensure that
# ceph-disk units are disabled, and that the `simple` systemd unit
# is created and enabled
# enable the ceph-volume unit for this OSD
systemctl.enable_volume(osd_id, osd_fsid, 'simple')
# disable any/all ceph-disk units
systemctl.mask_ceph_disk()
terminal.warning(
('All ceph-disk systemd units have been disabled to '
'prevent OSDs getting triggered by UDEV events')
)
else:
terminal.info('Skipping enabling of `simple` systemd unit')
terminal.info('Skipping masking of ceph-disk systemd units')
if not self.skip_systemd:
# enable the OSD
systemctl.enable_osd(osd_id)
# start the OSD
systemctl.start_osd(osd_id)
else:
terminal.info(
'Skipping enabling and starting OSD simple systemd unit because --no-systemd was used'
)
@decorators.needs_root
def activate(self, args):
with open(args.json_config, 'r') as fp:
osd_metadata = json.load(fp)
# Make sure that required devices are configured
self.validate_devices(osd_metadata)
osd_id = osd_metadata.get('whoami', args.osd_id)
osd_fsid = osd_metadata.get('fsid', args.osd_fsid)
data_uuid = osd_metadata.get('data', {}).get('uuid')
conf.cluster = osd_metadata.get('cluster_name', 'ceph')
if not data_uuid:
raise RuntimeError(
'Unable to activate OSD %s - no "uuid" key found for data' % args.osd_id
)
# Encryption detection, and capturing of the keys to decrypt
self.is_encrypted = osd_metadata.get('encrypted', False)
self.encryption_type = osd_metadata.get('encryption_type')
if self.is_encrypted:
lockbox_secret = osd_metadata.get('lockbox.keyring')
# write the keyring always so that we can unlock
encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret)
# Store the secret around so that the decrypt method can reuse
raw_dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
# Note how both these calls need b64decode. For some reason, the
# way ceph-disk creates these keys, it stores them in the monitor
# *undecoded*, requiring this decode call again. The lvm side of
# encryption doesn't need it, so we are assuming here that anything
# that `simple` scans, will come from ceph-disk and will need this
# extra decode call here
self.dmcrypt_secret = base64.b64decode(raw_dmcrypt_secret)
cluster_name = osd_metadata.get('cluster_name', 'ceph')
osd_dir = '/var/lib/ceph/osd/%s-%s' % (cluster_name, osd_id)
# XXX there is no support for LVM here
data_device = self.get_device(data_uuid)
if not data_device:
raise RuntimeError("osd fsid {} doesn't exist, this file will "
"be skipped, consider cleaning legacy "
"json file {}".format(osd_metadata['fsid'], args.json_config))
block_device = self.get_device(osd_metadata.get('block', {}).get('uuid'))
block_db_device = self.get_device(osd_metadata.get('block.db', {}).get('uuid'))
block_wal_device = self.get_device(osd_metadata.get('block.wal', {}).get('uuid'))
if not system.device_is_mounted(data_device, destination=osd_dir):
process.run(['mount', '-v', data_device, osd_dir])
device_map = {
'block': block_device,
'block.db': block_db_device,
'block.wal': block_wal_device
}
for name, device in device_map.items():
if not device:
continue
# always re-do the symlink regardless if it exists, so that the journal
# device path that may have changed can be mapped correctly every time
destination = os.path.join(osd_dir, name)
process.run(['ln', '-snf', device, destination])
# make sure that the journal has proper permissions
system.chown(device)
self.enable_systemd_units(osd_id, osd_fsid)
terminal.success('Successfully activated OSD %s with FSID %s' % (osd_id, osd_fsid))
def main(self):
sub_command_help = dedent("""
Activate OSDs by mounting devices previously configured to their
appropriate destination::
ceph-volume simple activate {ID} {FSID}
Or using a JSON file directly::
ceph-volume simple activate --file /etc/ceph/osd/{ID}-{FSID}.json
The OSD must have been "scanned" previously (see ``ceph-volume simple
scan``), so that all needed OSD device information and metadata exist.
A previously scanned OSD would exist like::
/etc/ceph/osd/{ID}-{FSID}.json
Environment variables supported:
CEPH_VOLUME_SIMPLE_JSON_DIR: Directory location for scanned OSD JSON configs
""")
parser = argparse.ArgumentParser(
prog='ceph-volume simple activate',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=sub_command_help,
)
parser.add_argument(
'osd_id',
metavar='ID',
nargs='?',
help='The ID of the OSD, usually an integer, like 0'
)
parser.add_argument(
'osd_fsid',
metavar='FSID',
nargs='?',
help='The FSID of the OSD, similar to a SHA1'
)
parser.add_argument(
'--all',
help='Activate all OSDs with a OSD JSON config',
action='store_true',
default=False,
)
parser.add_argument(
'--file',
help='The path to a JSON file, from a scanned OSD'
)
parser.add_argument(
'--no-systemd',
dest='skip_systemd',
action='store_true',
help='Skip creating and enabling systemd units and starting OSD services',
)
if len(self.argv) == 0:
print(sub_command_help)
return
args = parser.parse_args(self.argv)
if not args.file and not args.all:
if not args.osd_id and not args.osd_fsid:
terminal.error('ID and FSID are required to find the right OSD to activate')
terminal.error('from a scanned OSD location in /etc/ceph/osd/')
raise RuntimeError('Unable to activate without both ID and FSID')
# don't allow a CLI flag to specify the JSON dir, because that might
# implicitly indicate that it would be possible to activate a json file
# at a non-default location which would not work at boot time if the
# custom location is not exposed through an ENV var
self.skip_systemd = args.skip_systemd
json_dir = os.environ.get('CEPH_VOLUME_SIMPLE_JSON_DIR', '/etc/ceph/osd/')
if args.all:
if args.file or args.osd_id:
mlogger.warn('--all was passed, ignoring --file and ID/FSID arguments')
json_configs = glob.glob('{}/*.json'.format(json_dir))
for json_config in json_configs:
mlogger.info('activating OSD specified in {}'.format(json_config))
args.json_config = json_config
try:
self.activate(args)
except RuntimeError as e:
terminal.warning(e.message)
else:
if args.file:
json_config = args.file
else:
json_config = os.path.join(json_dir, '%s-%s.json' % (args.osd_id, args.osd_fsid))
if not os.path.exists(json_config):
raise RuntimeError('Expected JSON config path not found: %s' % json_config)
args.json_config = json_config
self.activate(args)
| 11,830 | 40.805654 | 102 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/simple/main.py
|
import argparse
from textwrap import dedent
from ceph_volume import terminal
from . import scan
from . import activate
from . import trigger
class Simple(object):
help = 'Manage already deployed OSDs with ceph-volume'
_help = dedent("""
Take over a deployed OSD, persisting its metadata in /etc/ceph/osd/ so that it can be managed
with ceph-volume directly. Avoids UDEV and ceph-disk handling.
{sub_help}
""")
mapper = {
'scan': scan.Scan,
'activate': activate.Activate,
'trigger': trigger.Trigger,
}
def __init__(self, argv):
self.argv = argv
def print_help(self, sub_help):
return self._help.format(sub_help=sub_help)
def main(self):
terminal.dispatch(self.mapper, self.argv)
parser = argparse.ArgumentParser(
prog='ceph-volume simple',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.print_help(terminal.subhelp(self.mapper)),
)
parser.parse_args(self.argv)
if len(self.argv) <= 1:
return parser.print_help()
| 1,116 | 25.595238 | 97 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/simple/scan.py
|
from __future__ import print_function
import argparse
import base64
import json
import logging
import os
from textwrap import dedent
from ceph_volume import decorators, terminal, conf
from ceph_volume.api import lvm
from ceph_volume.systemd import systemctl
from ceph_volume.util import arg_validators, system, disk, encryption
from ceph_volume.util.device import Device
logger = logging.getLogger(__name__)
def parse_keyring(file_contents):
"""
Extract the actual key from a string. Usually from a keyring file, where
the keyring will be in a client section. In the case of a lockbox, it is
something like::
[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]\n\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==\n
From the above case, it would return::
AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==
"""
# remove newlines that might be trailing
keyring = file_contents.strip('\n')
# Now split on spaces
keyring = keyring.split(' ')[-1]
# Split on newlines
keyring = keyring.split('\n')[-1]
return keyring.strip()
class Scan(object):
help = 'Capture metadata from all running ceph-disk OSDs, OSD data partition or directory'
def __init__(self, argv):
self.argv = argv
self._etc_path = '/etc/ceph/osd/'
@property
def etc_path(self):
if os.path.isdir(self._etc_path):
return self._etc_path
if not os.path.exists(self._etc_path):
os.mkdir(self._etc_path)
return self._etc_path
error = "OSD Configuration path (%s) needs to be a directory" % self._etc_path
raise RuntimeError(error)
def get_contents(self, path):
with open(path, 'r') as fp:
contents = fp.readlines()
if len(contents) > 1:
return ''.join(contents)
return ''.join(contents).strip().strip('\n')
def scan_device(self, path):
device_metadata = {'path': None, 'uuid': None}
if not path:
return device_metadata
if self.is_encrypted:
encryption_metadata = encryption.legacy_encrypted(path)
device_metadata['path'] = encryption_metadata['device']
device_metadata['uuid'] = disk.get_partuuid(encryption_metadata['device'])
return device_metadata
# cannot read the symlink if this is tmpfs
if os.path.islink(path):
device = os.readlink(path)
else:
device = path
lvm_device = lvm.get_single_lv(filters={'lv_path': device})
if lvm_device:
device_uuid = lvm_device.lv_uuid
else:
device_uuid = disk.get_partuuid(device)
device_metadata['uuid'] = device_uuid
device_metadata['path'] = device
return device_metadata
def scan_directory(self, path):
osd_metadata = {'cluster_name': conf.cluster}
directory_files = os.listdir(path)
if 'keyring' not in directory_files:
raise RuntimeError(
'OSD files not found, required "keyring" file is not present at: %s' % path
)
for file_ in os.listdir(path):
file_path = os.path.join(path, file_)
file_json_key = file_
if file_.endswith('_dmcrypt'):
file_json_key = file_.rstrip('_dmcrypt')
logger.info(
'reading file {}, stripping _dmcrypt suffix'.format(file_)
)
if os.path.islink(file_path):
if os.path.exists(file_path):
osd_metadata[file_json_key] = self.scan_device(file_path)
else:
msg = 'broken symlink found %s -> %s' % (file_path, os.path.realpath(file_path))
terminal.warning(msg)
logger.warning(msg)
if os.path.isdir(file_path):
continue
# the check for binary needs to go before the file, to avoid
# capturing data from binary files but still be able to capture
# contents from actual files later
try:
if system.is_binary(file_path):
logger.info('skipping binary file: %s' % file_path)
continue
except IOError:
logger.exception('skipping due to IOError on file: %s' % file_path)
continue
if os.path.isfile(file_path):
content = self.get_contents(file_path)
if 'keyring' in file_path:
content = parse_keyring(content)
try:
osd_metadata[file_json_key] = int(content)
except ValueError:
osd_metadata[file_json_key] = content
# we must scan the paths again because this might be a temporary mount
path_mounts = system.Mounts(paths=True)
device = path_mounts.get_mounts().get(path)
# it is possible to have more than one device, pick the first one, and
# warn that it is possible that more than one device is 'data'
if not device:
terminal.error('Unable to detect device mounted for path: %s' % path)
raise RuntimeError('Cannot activate OSD')
osd_metadata['data'] = self.scan_device(device[0] if len(device) else None)
return osd_metadata
def scan_encrypted(self, directory=None):
device = self.encryption_metadata['device']
lockbox = self.encryption_metadata['lockbox']
encryption_type = self.encryption_metadata['type']
osd_metadata = {}
# Get the PARTUUID of the device to make sure have the right one and
# that maps to the data device
device_uuid = disk.get_partuuid(device)
dm_path = '/dev/mapper/%s' % device_uuid
# check if this partition is already mapped
device_status = encryption.status(device_uuid)
# capture all the information from the lockbox first, reusing the
# directory scan method
if self.device_mounts.get(lockbox):
lockbox_path = self.device_mounts.get(lockbox)[0]
lockbox_metadata = self.scan_directory(lockbox_path)
# ceph-disk stores the fsid as osd-uuid in the lockbox, thanks ceph-disk
dmcrypt_secret = encryption.get_dmcrypt_key(
None, # There is no ID stored in the lockbox
lockbox_metadata['osd-uuid'],
os.path.join(lockbox_path, 'keyring')
)
else:
with system.tmp_mount(lockbox) as lockbox_path:
lockbox_metadata = self.scan_directory(lockbox_path)
# ceph-disk stores the fsid as osd-uuid in the lockbox, thanks ceph-disk
dmcrypt_secret = encryption.get_dmcrypt_key(
None, # There is no ID stored in the lockbox
lockbox_metadata['osd-uuid'],
os.path.join(lockbox_path, 'keyring')
)
if not device_status:
# Note how both these calls need b64decode. For some reason, the
# way ceph-disk creates these keys, it stores them in the monitor
# *undecoded*, requiring this decode call again. The lvm side of
# encryption doesn't need it, so we are assuming here that anything
# that `simple` scans, will come from ceph-disk and will need this
# extra decode call here
dmcrypt_secret = base64.b64decode(dmcrypt_secret)
if encryption_type == 'luks':
encryption.luks_open(dmcrypt_secret, device, device_uuid)
else:
encryption.plain_open(dmcrypt_secret, device, device_uuid)
# If we have a directory, use that instead of checking for mounts
if directory:
osd_metadata = self.scan_directory(directory)
else:
# Now check if that mapper is mounted already, to avoid remounting and
# decrypting the device
dm_path_mount = self.device_mounts.get(dm_path)
if dm_path_mount:
osd_metadata = self.scan_directory(dm_path_mount[0])
else:
with system.tmp_mount(dm_path, encrypted=True) as device_path:
osd_metadata = self.scan_directory(device_path)
osd_metadata['encrypted'] = True
osd_metadata['encryption_type'] = encryption_type
osd_metadata['lockbox.keyring'] = parse_keyring(lockbox_metadata['keyring'])
return osd_metadata
@decorators.needs_root
def scan(self, args):
osd_metadata = {'cluster_name': conf.cluster}
osd_path = None
logger.info('detecting if argument is a device or a directory: %s', args.osd_path)
if os.path.isdir(args.osd_path):
logger.info('will scan directly, path is a directory')
osd_path = args.osd_path
else:
# assume this is a device, check if it is mounted and use that path
logger.info('path is not a directory, will check if mounted')
if system.device_is_mounted(args.osd_path):
logger.info('argument is a device, which is mounted')
mounted_osd_paths = self.device_mounts.get(args.osd_path)
osd_path = mounted_osd_paths[0] if len(mounted_osd_paths) else None
# argument is not a directory, and it is not a device that is mounted
# somewhere so temporarily mount it to poke inside, otherwise, scan
# directly
if not osd_path:
# check if we have an encrypted device first, so that we can poke at
# the lockbox instead
if self.is_encrypted:
if not self.encryption_metadata.get('lockbox'):
raise RuntimeError(
'Lockbox partition was not found for device: %s' % args.osd_path
)
osd_metadata = self.scan_encrypted()
else:
logger.info('device is not mounted, will mount it temporarily to scan')
with system.tmp_mount(args.osd_path) as osd_path:
osd_metadata = self.scan_directory(osd_path)
else:
if self.is_encrypted:
logger.info('will scan encrypted OSD directory at path: %s', osd_path)
osd_metadata = self.scan_encrypted(osd_path)
else:
logger.info('will scan OSD directory at path: %s', osd_path)
osd_metadata = self.scan_directory(osd_path)
osd_id = osd_metadata['whoami']
osd_fsid = osd_metadata['fsid']
filename = '%s-%s.json' % (osd_id, osd_fsid)
json_path = os.path.join(self.etc_path, filename)
if os.path.exists(json_path) and not args.stdout:
if not args.force:
raise RuntimeError(
'--force was not used and OSD metadata file exists: %s' % json_path
)
if args.stdout:
print(json.dumps(osd_metadata, indent=4, sort_keys=True, ensure_ascii=False))
else:
with open(json_path, 'w') as fp:
json.dump(osd_metadata, fp, indent=4, sort_keys=True, ensure_ascii=False)
fp.write(os.linesep)
terminal.success(
'OSD %s got scanned and metadata persisted to file: %s' % (
osd_id,
json_path
)
)
terminal.success(
'To take over management of this scanned OSD, and disable ceph-disk and udev, run:'
)
terminal.success(' ceph-volume simple activate %s %s' % (osd_id, osd_fsid))
if not osd_metadata.get('data'):
msg = 'Unable to determine device mounted on %s' % args.osd_path
logger.warning(msg)
terminal.warning(msg)
terminal.warning('OSD will not be able to start without this information:')
terminal.warning(' "data": "/path/to/device",')
logger.warning('Unable to determine device mounted on %s' % args.osd_path)
def main(self):
sub_command_help = dedent("""
Scan running OSDs, an OSD directory (or data device) for files and configurations
that will allow to take over the management of the OSD.
Scanned OSDs will get their configurations stored in
/etc/ceph/osd/<id>-<fsid>.json
For an OSD ID of 0 with fsid of ``a9d50838-e823-43d6-b01f-2f8d0a77afc2``
that could mean a scan command that looks like::
ceph-volume simple scan /var/lib/ceph/osd/ceph-0
Which would store the metadata in a JSON file at::
/etc/ceph/osd/0-a9d50838-e823-43d6-b01f-2f8d0a77afc2.json
To scan all running OSDs:
ceph-volume simple scan
To a scan a specific running OSD:
ceph-volume simple scan /var/lib/ceph/osd/{cluster}-{osd id}
And to scan a device (mounted or unmounted) that has OSD data in it, for example /dev/sda1
ceph-volume simple scan /dev/sda1
Scanning a device or directory that belongs to an OSD not created by ceph-disk will be ingored.
""")
parser = argparse.ArgumentParser(
prog='ceph-volume simple scan',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=sub_command_help,
)
parser.add_argument(
'-f', '--force',
action='store_true',
help='If OSD has already been scanned, the JSON file will be overwritten'
)
parser.add_argument(
'--stdout',
action='store_true',
help='Do not save to a file, output metadata to stdout'
)
parser.add_argument(
'osd_path',
metavar='OSD_PATH',
type=arg_validators.OSDPath(),
nargs='?',
default=None,
help='Path to an existing OSD directory or OSD data partition'
)
args = parser.parse_args(self.argv)
paths = []
if args.osd_path:
paths.append(args.osd_path)
else:
osd_ids = systemctl.get_running_osd_ids()
for osd_id in osd_ids:
paths.append("/var/lib/ceph/osd/{}-{}".format(
conf.cluster,
osd_id,
))
# Capture some environment status, so that it can be reused all over
self.device_mounts = system.Mounts(devices=True).get_mounts()
self.path_mounts = system.Mounts(paths=True).get_mounts()
for path in paths:
args.osd_path = path
device = Device(args.osd_path)
if device.is_partition:
if device.ceph_disk.type != 'data':
label = device.ceph_disk.partlabel
msg = 'Device must be the ceph data partition, but PARTLABEL reported: "%s"' % label
raise RuntimeError(msg)
self.encryption_metadata = encryption.legacy_encrypted(args.osd_path)
self.is_encrypted = self.encryption_metadata['encrypted']
if self.encryption_metadata['device'] != "tmpfs":
device = Device(self.encryption_metadata['device'])
if not device.is_ceph_disk_member:
terminal.warning("Ignoring %s because it's not a ceph-disk created osd." % path)
else:
self.scan(args)
else:
terminal.warning("Ignoring %s because it's not a ceph-disk created osd." % path)
| 15,700 | 39.676166 | 117 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/devices/simple/trigger.py
|
from __future__ import print_function
import argparse
from textwrap import dedent
from ceph_volume.exceptions import SuffixParsingError
from ceph_volume import decorators
from .activate import Activate
def parse_osd_id(string):
osd_id = string.split('-', 1)[0]
if not osd_id:
raise SuffixParsingError('OSD id', string)
if osd_id.isdigit():
return osd_id
raise SuffixParsingError('OSD id', string)
def parse_osd_uuid(string):
osd_id = '%s-' % parse_osd_id(string)
# remove the id first
osd_uuid = string.split(osd_id, 1)[-1]
if not osd_uuid:
raise SuffixParsingError('OSD uuid', string)
return osd_uuid
class Trigger(object):
help = 'systemd helper to activate an OSD'
def __init__(self, argv):
self.argv = argv
@decorators.needs_root
def main(self):
sub_command_help = dedent("""
** DO NOT USE DIRECTLY **
This tool is meant to help the systemd unit that knows about OSDs.
Proxy OSD activation to ``ceph-volume simple activate`` by parsing the
input from systemd, detecting the UUID and ID associated with an OSD::
ceph-volume simple trigger {SYSTEMD-DATA}
The systemd "data" is expected to be in the format of::
{OSD ID}-{OSD UUID}
The devices associated with the OSD need to have been scanned previously,
so that all needed metadata can be used for starting the OSD process.
""")
parser = argparse.ArgumentParser(
prog='ceph-volume simple trigger',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=sub_command_help,
)
parser.add_argument(
'systemd_data',
metavar='SYSTEMD_DATA',
nargs='?',
help='Data from a systemd unit containing ID and UUID of the OSD, like 0-asdf-lkjh'
)
if len(self.argv) == 0:
print(sub_command_help)
return
args = parser.parse_args(self.argv)
osd_id = parse_osd_id(args.systemd_data)
osd_uuid = parse_osd_uuid(args.systemd_data)
Activate([osd_id, osd_uuid], from_trigger=True).main()
| 2,202 | 30.028169 | 95 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/drive_group/__init__.py
|
from .main import Deploy # noqa
| 32 | 15.5 | 31 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/drive_group/main.py
|
# -*- coding: utf-8 -*-
import argparse
import json
import logging
import sys
from ceph.deployment.drive_group import DriveGroupSpec
from ceph.deployment.drive_selection.selector import DriveSelection
from ceph.deployment.translate import to_ceph_volume
from ceph.deployment.inventory import Device
from ceph_volume.inventory import Inventory
from ceph_volume.devices.lvm.batch import Batch
logger = logging.getLogger(__name__)
class Deploy(object):
help = '''
Deploy OSDs according to a drive groups specification.
The DriveGroup specification must be passed in json.
It can either be (preference in this order)
- in a file, path passed as a positional argument
- read from stdin, pass "-" as a positional argument
- a json string passed via the --spec argument
Either the path postional argument or --spec must be specifed.
'''
def __init__(self, argv):
self.argv = argv
def main(self):
parser = argparse.ArgumentParser(
prog='ceph-volume drive-group',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.help,
)
parser.add_argument(
'path',
nargs='?',
default=None,
help=('Path to file containing drive group spec or "-" to read from stdin'),
)
parser.add_argument(
'--spec',
default='',
nargs='?',
help=('drive-group json string')
)
parser.add_argument(
'--dry-run',
default=False,
action='store_true',
help=('dry run, only print the batch command that would be run'),
)
self.args = parser.parse_args(self.argv)
if self.args.path:
if self.args.path == "-":
commands = self.from_json(sys.stdin)
else:
with open(self.args.path, 'r') as f:
commands = self.from_json(f)
elif self.args.spec:
dg = json.loads(self.args.spec)
commands = self.get_dg_spec(dg)
else:
# either --spec or path arg must be specified
parser.print_help(sys.stderr)
sys.exit(0)
cmd = commands.run()
if not cmd:
logger.error('DriveGroup didn\'t produce any commands')
return
if self.args.dry_run:
logger.info('Returning ceph-volume command (--dry-run was passed): {}'.format(cmd))
print(cmd)
else:
logger.info('Running ceph-volume command: {}'.format(cmd))
batch_args = cmd[0].split(' ')[2:]
b = Batch(batch_args)
b.main()
def from_json(self, file_):
dg = {}
dg = json.load(file_)
return self.get_dg_spec(dg)
def get_dg_spec(self, dg):
dg_spec = DriveGroupSpec._from_json_impl(dg)
dg_spec.validate()
i = Inventory(['--filter-for-batch'])
i.main()
inventory = i.get_report()
devices = [Device.from_json(i) for i in inventory]
selection = DriveSelection(dg_spec, devices)
return to_ceph_volume(selection)
| 3,197 | 31.30303 | 95 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/inventory/__init__.py
|
from .main import Inventory # noqa
| 35 | 17 | 34 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/inventory/main.py
|
# -*- coding: utf-8 -*-
import argparse
import json
from ceph_volume.util.device import Devices, Device
class Inventory(object):
help = "Get this nodes available disk inventory"
def __init__(self, argv):
self.argv = argv
def main(self):
parser = argparse.ArgumentParser(
prog='ceph-volume inventory',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.help,
)
parser.add_argument(
'path',
nargs='?',
default=None,
help=('Report on specific disk'),
)
parser.add_argument(
'--format',
choices=['plain', 'json', 'json-pretty'],
default='plain',
help='Output format',
)
parser.add_argument(
'--filter-for-batch',
action='store_true',
help=('Filter devices unsuitable to pass to an OSD service spec, '
'no effect when <path> is passed'),
default=False,
)
parser.add_argument(
'--with-lsm',
action='store_true',
help=('Attempt to retrieve additional health and metadata through '
'libstoragemgmt'),
default=False,
)
self.args = parser.parse_args(self.argv)
if self.args.path:
self.format_report(Device(self.args.path, with_lsm=self.args.with_lsm))
else:
self.format_report(Devices(filter_for_batch=self.args.filter_for_batch,
with_lsm=self.args.with_lsm))
def get_report(self):
if self.args.path:
return Device(self.args.path, with_lsm=self.args.with_lsm).json_report()
else:
return Devices(filter_for_batch=self.args.filter_for_batch, with_lsm=self.args.with_lsm).json_report()
def format_report(self, inventory):
if self.args.format == 'json':
print(json.dumps(inventory.json_report()))
elif self.args.format == 'json-pretty':
print(json.dumps(inventory.json_report(), indent=4, sort_keys=True))
else:
print(inventory.pretty_report())
| 2,219 | 31.647059 | 114 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/systemd/__init__.py
|
from .main import main # noqa
| 30 | 14.5 | 29 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/systemd/main.py
|
"""
This file is used only by systemd units that are passing their instance suffix
as arguments to this script so that it can parse the suffix into arguments that
``ceph-volume <sub command>`` can consume
"""
import os
import sys
import time
import logging
from ceph_volume import log, process
from ceph_volume.exceptions import SuffixParsingError
def parse_subcommand(string):
subcommand = string.split('-', 1)[0]
if not subcommand:
raise SuffixParsingError('subcommand', string)
return subcommand
def parse_extra_data(string):
# get the subcommand to split on that
sub_command = parse_subcommand(string)
# the split will leave data with a dash, so remove that
data = string.split(sub_command)[-1]
if not data:
raise SuffixParsingError('data', string)
return data.lstrip('-')
def parse_osd_id(string):
osd_id = string.split('-', 1)[0]
if not osd_id:
raise SuffixParsingError('OSD id', string)
if osd_id.isdigit():
return osd_id
raise SuffixParsingError('OSD id', string)
def parse_osd_uuid(string):
osd_id = '%s-' % parse_osd_id(string)
osd_subcommand = '-%s' % parse_subcommand(string)
# remove the id first
trimmed_suffix = string.split(osd_id)[-1]
# now remove the sub command
osd_uuid = trimmed_suffix.split(osd_subcommand)[0]
if not osd_uuid:
raise SuffixParsingError('OSD uuid', string)
return osd_uuid
def main(args=None):
"""
Main entry point for the ``ceph-volume-systemd`` executable. ``args`` are
optional for easier testing of arguments.
Expected input is similar to::
['/path/to/ceph-volume-systemd', '<type>-<extra metadata>']
For example::
[
'/usr/bin/ceph-volume-systemd',
'lvm-0-8715BEB4-15C5-49DE-BA6F-401086EC7B41'
]
The first part of the argument is the only interesting bit, which contains
the metadata needed to proxy the call to ``ceph-volume`` itself.
Reusing the example, the proxy call to ``ceph-volume`` would look like::
ceph-volume lvm trigger 0-8715BEB4-15C5-49DE-BA6F-401086EC7B41
That means that ``lvm`` is used as the subcommand and it is **expected**
that a ``trigger`` sub-commmand will be present to make sense of the extra
piece of the string.
"""
log.setup(name='ceph-volume-systemd.log', log_path='/var/log/ceph/ceph-volume-systemd.log')
logger = logging.getLogger('systemd')
args = args if args is not None else sys.argv
try:
suffix = args[-1]
except IndexError:
raise RuntimeError('no arguments supplied')
sub_command = parse_subcommand(suffix)
extra_data = parse_extra_data(suffix)
logger.info('raw systemd input received: %s', suffix)
logger.info('parsed sub-command: %s, extra data: %s', sub_command, extra_data)
command = ['ceph-volume', sub_command, 'trigger', extra_data]
tries = int(os.environ.get('CEPH_VOLUME_SYSTEMD_TRIES', 30))
interval = int(os.environ.get('CEPH_VOLUME_SYSTEMD_INTERVAL', 5))
while tries > 0:
try:
# don't log any output to the terminal, just rely on stderr/stdout
# going to logging
process.run(command, terminal_logging=False)
logger.info('successfully triggered activation for: %s', extra_data)
break
except RuntimeError as error:
logger.warning(error)
logger.warning('failed activating OSD, retries left: %s', tries)
tries -= 1
time.sleep(interval)
| 3,566 | 31.724771 | 95 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/systemd/systemctl.py
|
"""
Utilities to control systemd units
"""
import logging
from ceph_volume import process
logger = logging.getLogger(__name__)
def start(unit):
process.run(['systemctl', 'start', unit])
def stop(unit):
process.run(['systemctl', 'stop', unit])
def enable(unit, runtime=False):
if runtime:
process.run(['systemctl', 'enable', '--runtime', unit])
else:
process.run(['systemctl', 'enable', unit])
def disable(unit):
process.run(['systemctl', 'disable', unit])
def mask(unit):
process.run(['systemctl', 'mask', unit])
def is_active(unit):
out, err, rc = process.call(
['systemctl', 'is-active', unit],
verbose_on_failure=False
)
return rc == 0
def get_running_osd_ids():
out, err, rc = process.call([
'systemctl',
'show',
'--no-pager',
'--property=Id',
'--state=running',
'ceph-osd@*',
])
osd_ids = []
if rc == 0:
for line in out:
if line:
# example line looks like: [email protected]
try:
osd_id = line.split("@")[1].split(".service")[0]
osd_ids.append(osd_id)
except (IndexError, TypeError):
logger.warning("Failed to parse output from systemctl: %s", line)
return osd_ids
def start_osd(id_):
return start(osd_unit % id_)
def stop_osd(id_):
return stop(osd_unit % id_)
def enable_osd(id_):
return enable(osd_unit % id_, runtime=True)
def disable_osd(id_):
return disable(osd_unit % id_)
def osd_is_active(id_):
return is_active(osd_unit % id_)
def enable_volume(id_, fsid, device_type='lvm'):
return enable(volume_unit % (device_type, id_, fsid))
def mask_ceph_disk():
# systemctl allows using a glob like '*' for masking, but there was a bug
# in that it wouldn't allow this for service templates. This means that
# masking ceph-disk@* will not work, so we must link the service directly.
# /etc/systemd takes precedence regardless of the location of the unit
process.run(
['ln', '-sf', '/dev/null', '/etc/systemd/system/[email protected]']
)
#
# templates
#
osd_unit = "ceph-osd@%s"
ceph_disk_unit = "ceph-disk@%s"
volume_unit = "ceph-volume@%s-%s-%s"
| 2,306 | 21.617647 | 85 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/conftest.py
|
import os
import pytest
from mock.mock import patch, PropertyMock, create_autospec
from ceph_volume.api import lvm
from ceph_volume.util import disk
from ceph_volume.util import device
from ceph_volume.util.constants import ceph_disk_guids
from ceph_volume import conf, configuration
class Capture(object):
def __init__(self, *a, **kw):
self.a = a
self.kw = kw
self.calls = []
self.return_values = kw.get('return_values', False)
self.always_returns = kw.get('always_returns', False)
def __call__(self, *a, **kw):
self.calls.append({'args': a, 'kwargs': kw})
if self.always_returns:
return self.always_returns
if self.return_values:
return self.return_values.pop()
class Factory(object):
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
@pytest.fixture
def factory():
return Factory
@pytest.fixture
def capture():
return Capture()
@pytest.fixture
def mock_lv_device_generator():
def mock_lv():
size = 21474836480
dev = create_autospec(device.Device)
dev.lv_name = 'lv'
dev.vg_name = 'vg'
dev.path = '{}/{}'.format(dev.vg_name, dev.lv_name)
dev.used_by_ceph = False
dev.vg_size = [size]
dev.vg_free = dev.vg_size
dev.available_lvm = True
dev.is_device = False
dev.lvs = [lvm.Volume(vg_name=dev.vg_name, lv_name=dev.lv_name, lv_size=size, lv_tags='')]
return dev
return mock_lv
def mock_device():
dev = create_autospec(device.Device)
dev.path = '/dev/foo'
dev.vg_name = 'vg_foo'
dev.lv_name = 'lv_foo'
dev.symlink = None
dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)]
dev.available_lvm = True
dev.vg_size = [21474836480]
dev.vg_free = dev.vg_size
dev.lvs = []
return dev
@pytest.fixture(params=range(1,4))
def mock_devices_available(request):
ret = []
for n in range(request.param):
dev = mock_device()
# after v15.2.8, a single VG is created for each PV
dev.vg_name = f'vg_foo_{n}'
dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)]
ret.append(dev)
return ret
@pytest.fixture
def mock_device_generator():
return mock_device
@pytest.fixture(params=range(1,11))
def osds_per_device(request):
return request.param
@pytest.fixture
def fake_run(monkeypatch):
fake_run = Capture()
monkeypatch.setattr('ceph_volume.process.run', fake_run)
return fake_run
@pytest.fixture
def fake_call(monkeypatch):
fake_call = Capture(always_returns=([], [], 0))
monkeypatch.setattr('ceph_volume.process.call', fake_call)
return fake_call
@pytest.fixture
def fakedevice(factory):
def apply(**kw):
params = dict(
path='/dev/sda',
abspath='/dev/sda',
lv_api=None,
pvs_api=[],
disk_api={},
sys_api={},
exists=True,
is_lvm_member=True,
)
params.update(dict(kw))
params['lvm_size'] = disk.Size(b=params['sys_api'].get("size", 0))
return factory(**params)
return apply
@pytest.fixture
def stub_call(monkeypatch):
"""
Monkeypatches process.call, so that a caller can add behavior to the response
"""
def apply(return_values):
if isinstance(return_values, tuple):
return_values = [return_values]
stubbed_call = Capture(return_values=return_values)
monkeypatch.setattr('ceph_volume.process.call', stubbed_call)
return stubbed_call
return apply
@pytest.fixture(autouse=True)
def reset_cluster_name(request, monkeypatch):
"""
The globally available ``ceph_volume.conf.cluster`` might get mangled in
tests, make sure that after evert test, it gets reset, preventing pollution
going into other tests later.
"""
def fin():
conf.cluster = None
try:
os.environ.pop('CEPH_CONF')
except KeyError:
pass
request.addfinalizer(fin)
@pytest.fixture
def conf_ceph(monkeypatch):
"""
Monkeypatches ceph_volume.conf.ceph, which is meant to parse/read
a ceph.conf. The patching is naive, it allows one to set return values for
specific method calls.
"""
def apply(**kw):
stub = Factory(**kw)
monkeypatch.setattr(conf, 'ceph', stub)
return stub
return apply
@pytest.fixture
def conf_ceph_stub(monkeypatch, tmpfile):
"""
Monkeypatches ceph_volume.conf.ceph with contents from a string that are
written to a temporary file and then is fed through the same ceph.conf
loading mechanisms for testing. Unlike ``conf_ceph`` which is just a fake,
we are actually loading values as seen on a ceph.conf file
This is useful when more complex ceph.conf's are needed. In the case of
just trying to validate a key/value behavior ``conf_ceph`` is better
suited.
"""
def apply(contents):
conf_path = tmpfile(contents=contents)
parser = configuration.load(conf_path)
monkeypatch.setattr(conf, 'ceph', parser)
return parser
return apply
@pytest.fixture
def is_root(monkeypatch):
"""
Patch ``os.getuid()`` so that ceph-volume's decorators that ensure a user
is root (or is sudoing to superuser) can continue as-is
"""
monkeypatch.setattr('os.getuid', lambda: 0)
@pytest.fixture
def tmpfile(tmpdir):
"""
Create a temporary file, optionally filling it with contents, returns an
absolute path to the file when called
"""
def generate_file(name='file', contents='', directory=None):
directory = directory or str(tmpdir)
path = os.path.join(directory, name)
with open(path, 'w') as fp:
fp.write(contents)
return path
return generate_file
@pytest.fixture
def disable_kernel_queries(monkeypatch):
'''
This speeds up calls to Device and Disk
'''
monkeypatch.setattr("ceph_volume.util.device.disk.get_devices", lambda device='': {})
monkeypatch.setattr("ceph_volume.util.disk.udevadm_property", lambda *a, **kw: {})
@pytest.fixture(params=[
'', 'ceph data', 'ceph journal', 'ceph block',
'ceph block.wal', 'ceph block.db', 'ceph lockbox'])
def ceph_partlabel(request):
return request.param
@pytest.fixture(params=list(ceph_disk_guids.keys()))
def ceph_parttype(request):
return request.param
@pytest.fixture
def lsblk_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype):
monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
lambda path: {'TYPE': 'disk',
'NAME': 'sda',
'PARTLABEL': ceph_partlabel,
'PARTTYPE': ceph_parttype})
monkeypatch.setattr("ceph_volume.util.device.disk.lsblk_all",
lambda: [{'TYPE': 'disk',
'NAME': 'sda',
'PARTLABEL': ceph_partlabel,
'PARTTYPE': ceph_parttype}])
@pytest.fixture
def blkid_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype):
monkeypatch.setattr("ceph_volume.util.device.disk.blkid",
lambda path: {'TYPE': 'disk',
'PARTLABEL': ceph_partlabel,
'PARTTYPE': ceph_parttype})
@pytest.fixture(params=[
('gluster partition', 'gluster partition'),
# falls back to blkid
('', 'gluster partition'),
('gluster partition', ''),
])
def device_info_not_ceph_disk_member(monkeypatch, request):
monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
lambda path: {'TYPE': 'disk',
'NAME': 'sda',
'PARTLABEL': request.param[0]})
monkeypatch.setattr("ceph_volume.util.device.disk.lsblk_all",
lambda: [{'TYPE': 'disk',
'NAME': 'sda',
'PARTLABEL': request.param[0]}])
monkeypatch.setattr("ceph_volume.util.device.disk.blkid",
lambda path: {'TYPE': 'disk',
'PARTLABEL': request.param[1]})
@pytest.fixture
def patched_get_block_devs_sysfs():
with patch('ceph_volume.util.disk.get_block_devs_sysfs') as p:
yield p
@pytest.fixture
def patch_bluestore_label():
with patch('ceph_volume.util.device.Device.has_bluestore_label',
new_callable=PropertyMock) as p:
p.return_value = False
yield p
@pytest.fixture
def device_info(monkeypatch, patch_bluestore_label):
def apply(devices=None, lsblk=None, lv=None, blkid=None, udevadm=None,
has_bluestore_label=False):
if devices:
for dev in devices.keys():
devices[dev]['device_nodes'] = os.path.basename(dev)
else:
devices = {}
lsblk = lsblk if lsblk else {}
blkid = blkid if blkid else {}
udevadm = udevadm if udevadm else {}
lv = Factory(**lv) if lv else None
monkeypatch.setattr("ceph_volume.sys_info.devices", {})
monkeypatch.setattr("ceph_volume.util.device.disk.get_devices", lambda device='': devices)
if not devices:
monkeypatch.setattr("ceph_volume.util.device.lvm.get_single_lv", lambda filters: lv)
else:
monkeypatch.setattr("ceph_volume.util.device.lvm.get_device_lvs",
lambda path: [lv])
monkeypatch.setattr("ceph_volume.util.device.disk.lsblk", lambda path: lsblk)
monkeypatch.setattr("ceph_volume.util.device.disk.blkid", lambda path: blkid)
monkeypatch.setattr("ceph_volume.util.disk.udevadm_property", lambda *a, **kw: udevadm)
return apply
@pytest.fixture(params=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.999, 1.0])
def data_allocate_fraction(request):
return request.param
@pytest.fixture
def fake_filesystem(fs):
fs.create_dir('/sys/block/sda/slaves')
fs.create_dir('/sys/block/sda/queue')
fs.create_dir('/sys/block/rbd0')
yield fs
| 10,342 | 30.726994 | 98 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/test_configuration.py
|
import os
try:
from cStringIO import StringIO
except ImportError: # pragma: no cover
from io import StringIO # pragma: no cover
from textwrap import dedent
import pytest
from ceph_volume import configuration, exceptions
tabbed_conf = """
[global]
default = 0
other_h = 1 # comment
other_c = 1 ; comment
colon = ;
hash = #
"""
class TestConf(object):
def setup_method(self):
self.conf_file = StringIO(dedent("""
[foo]
default = 0
"""))
def test_get_non_existing_list(self):
cfg = configuration.Conf()
cfg.is_valid = lambda: True
cfg.read_conf(self.conf_file)
assert cfg.get_list('global', 'key') == []
def test_get_non_existing_list_get_default(self):
cfg = configuration.Conf()
cfg.is_valid = lambda: True
cfg.read_conf(self.conf_file)
assert cfg.get_list('global', 'key', ['a']) == ['a']
def test_get_rid_of_comments(self):
cfg = configuration.Conf()
cfg.is_valid = lambda: True
conf_file = StringIO(dedent("""
[foo]
default = 0 # this is a comment
"""))
cfg.read_conf(conf_file)
assert cfg.get_list('foo', 'default') == ['0']
def test_gets_split_on_commas(self):
cfg = configuration.Conf()
cfg.is_valid = lambda: True
conf_file = StringIO(dedent("""
[foo]
default = 0,1,2,3 # this is a comment
"""))
cfg.read_conf(conf_file)
assert cfg.get_list('foo', 'default') == ['0', '1', '2', '3']
def test_spaces_and_tabs_are_ignored(self):
cfg = configuration.Conf()
cfg.is_valid = lambda: True
conf_file = StringIO(dedent("""
[foo]
default = 0, 1, 2 ,3 # this is a comment
"""))
cfg.read_conf(conf_file)
assert cfg.get_list('foo', 'default') == ['0', '1', '2', '3']
class TestLoad(object):
def test_load_from_path(self, tmpdir):
conf_path = os.path.join(str(tmpdir), 'ceph.conf')
with open(conf_path, 'w') as conf:
conf.write(tabbed_conf)
result = configuration.load(conf_path)
assert result.get('global', 'default') == '0'
def test_load_with_colon_comments(self, tmpdir):
conf_path = os.path.join(str(tmpdir), 'ceph.conf')
with open(conf_path, 'w') as conf:
conf.write(tabbed_conf)
result = configuration.load(conf_path)
assert result.get('global', 'other_c') == '1'
def test_load_with_hash_comments(self, tmpdir):
conf_path = os.path.join(str(tmpdir), 'ceph.conf')
with open(conf_path, 'w') as conf:
conf.write(tabbed_conf)
result = configuration.load(conf_path)
assert result.get('global', 'other_h') == '1'
def test_path_does_not_exist(self):
with pytest.raises(exceptions.ConfigurationError):
conf = configuration.load('/path/does/not/exist/ceph.con')
conf.is_valid()
def test_unable_to_read_configuration(self, tmpdir, capsys):
ceph_conf = os.path.join(str(tmpdir), 'ceph.conf')
with open(ceph_conf, 'w') as config:
config.write(']broken] config\n[[')
with pytest.raises(RuntimeError):
configuration.load(ceph_conf)
stdout, stderr = capsys.readouterr()
assert 'File contains no section headers' in stderr
@pytest.mark.parametrize('commented', ['colon','hash'])
def test_coment_as_a_value(self, tmpdir, commented):
conf_path = os.path.join(str(tmpdir), 'ceph.conf')
with open(conf_path, 'w') as conf:
conf.write(tabbed_conf)
result = configuration.load(conf_path)
assert result.get('global', commented) == ''
| 3,819 | 31.372881 | 70 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/test_decorators.py
|
import os
import pytest
from ceph_volume import exceptions, decorators, terminal
class TestNeedsRoot(object):
def test_is_root(self, monkeypatch):
def func():
return True
monkeypatch.setattr(decorators.os, 'getuid', lambda: 0)
assert decorators.needs_root(func)() is True
def test_is_not_root_env_var_skip_needs_root(self, monkeypatch):
def func():
return True
monkeypatch.setattr(decorators.os, 'getuid', lambda: 123)
monkeypatch.setattr(decorators.os, 'environ', {'CEPH_VOLUME_SKIP_NEEDS_ROOT': '1'})
assert decorators.needs_root(func)() is True
def test_is_not_root(self, monkeypatch):
def func():
return True # pragma: no cover
monkeypatch.setattr(decorators.os, 'getuid', lambda: 20)
with pytest.raises(exceptions.SuperUserError) as error:
decorators.needs_root(func)()
msg = 'This command needs to be executed with sudo or as root'
assert str(error.value) == msg
class TestExceptionMessage(object):
def test_has_str_method(self):
result = decorators.make_exception_message(RuntimeError('an error'))
expected = "%s %s\n" % (terminal.red_arrow, 'RuntimeError: an error')
assert result == expected
def test_has_no_str_method(self):
class Error(Exception):
pass
result = decorators.make_exception_message(Error())
expected = "%s %s\n" % (terminal.red_arrow, 'Error')
assert result == expected
class TestCatches(object):
def teardown_method(self):
try:
del(os.environ['CEPH_VOLUME_DEBUG'])
except KeyError:
pass
def test_ceph_volume_debug_enabled(self):
os.environ['CEPH_VOLUME_DEBUG'] = '1'
@decorators.catches() # noqa
def func():
raise RuntimeError()
with pytest.raises(RuntimeError):
func()
def test_ceph_volume_debug_disabled_no_exit(self, capsys):
@decorators.catches(exit=False)
def func():
raise RuntimeError()
func()
stdout, stderr = capsys.readouterr()
assert 'RuntimeError\n' in stderr
def test_ceph_volume_debug_exits(self, capsys):
@decorators.catches()
def func():
raise RuntimeError()
with pytest.raises(SystemExit):
func()
stdout, stderr = capsys.readouterr()
assert 'RuntimeError\n' in stderr
| 2,487 | 30.493671 | 91 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/test_inventory.py
|
# -*- coding: utf-8 -*-
import pytest
from ceph_volume.util.device import Devices
from ceph_volume.util.lsmdisk import LSMDisk
from mock.mock import patch
import ceph_volume.util.lsmdisk as lsmdisk
@pytest.fixture
@patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
def device_report_keys(device_info):
device_info(devices={
# example output of disk.get_devices()
'/dev/sdb': {'human_readable_size': '1.82 TB',
'locked': 0,
'model': 'PERC H700',
'nr_requests': '128',
'partitions': {},
'path': '/dev/sdb',
'removable': '0',
'rev': '2.10',
'ro': '0',
'rotational': '1',
'sas_address': '',
'sas_device_handle': '',
'scheduler_mode': 'cfq',
'sectors': 0,
'sectorsize': '512',
'size': 1999844147200.0,
'support_discard': '',
'vendor': 'DELL',
'device_id': 'Vendor-Model-Serial',
'device_nodes': 'sdb'}
}
)
report = Devices().json_report()[0]
return list(report.keys())
@pytest.fixture
@patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
def device_sys_api_keys(device_info):
device_info(devices={
# example output of disk.get_devices()
'/dev/sdb': {'human_readable_size': '1.82 TB',
'locked': 0,
'model': 'PERC H700',
'nr_requests': '128',
'partitions': {},
'path': '/dev/sdb',
'removable': '0',
'rev': '2.10',
'ro': '0',
'rotational': '1',
'sas_address': '',
'sas_device_handle': '',
'scheduler_mode': 'cfq',
'sectors': 0,
'sectorsize': '512',
'size': 1999844147200.0,
'support_discard': '',
'vendor': 'DELL',
'device_nodes': 'sdb'}
}
)
report = Devices().json_report()[0]
return list(report['sys_api'].keys())
@pytest.fixture
@patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False)
def device_data(device_info):
device_info(
devices={
# example output of disk.get_devices()
'/dev/sdb': {
'human_readable_size': '1.82 TB',
'locked': 0,
'model': 'PERC H700',
'nr_requests': '128',
'partitions': {},
'path': '/dev/sdb',
'removable': '0',
'rev': '2.10',
'ro': '0',
'rotational': '1',
'sas_address': '',
'sas_device_handle': '',
'scheduler_mode': 'cfq',
'sectors': 0,
'sectorsize': '512',
'size': 1999844147200.0,
'support_discard': '',
'vendor': 'DELL',
'device_nodes': 'sdb'
}
}
)
dev = Devices().devices[0]
dev.lsm_data = {
"serialNum": 'S2X9NX0H935283',
"transport": 'SAS',
"mediaType": 'HDD',
"rpm": 10000,
"linkSpeed": 6000,
"health": 'Good',
"ledSupport": {
"IDENTsupport": 'Supported',
"IDENTstatus": 'Off',
"FAILsupport": 'Supported',
"FAILstatus": 'Off',
},
"errors": [],
}
return dev.json_report()
class TestInventory(object):
expected_keys = [
'ceph_device',
'path',
'rejected_reasons',
'sys_api',
'available',
'lvs',
'device_id',
'lsm_data',
]
expected_sys_api_keys = [
'human_readable_size',
'locked',
'model',
'nr_requests',
'partitions',
'path',
'removable',
'rev',
'ro',
'rotational',
'sas_address',
'sas_device_handle',
'scheduler_mode',
'sectors',
'sectorsize',
'size',
'support_discard',
'vendor',
'device_nodes'
]
expected_lsm_keys = [
'serialNum',
'transport',
'mediaType',
'rpm',
'linkSpeed',
'health',
'ledSupport',
'errors',
]
def test_json_inventory_keys_unexpected(self, fake_call, device_report_keys):
for k in device_report_keys:
assert k in self.expected_keys, "unexpected key {} in report".format(k)
def test_json_inventory_keys_missing(self, fake_call, device_report_keys):
for k in self.expected_keys:
assert k in device_report_keys, "expected key {} in report".format(k)
def test_sys_api_keys_unexpected(self, fake_call, device_sys_api_keys):
for k in device_sys_api_keys:
assert k in self.expected_sys_api_keys, "unexpected key {} in sys_api field".format(k)
def test_sys_api_keys_missing(self, fake_call, device_sys_api_keys):
for k in self.expected_sys_api_keys:
assert k in device_sys_api_keys, "expected key {} in sys_api field".format(k)
def test_lsm_data_type_unexpected(self, fake_call, device_data):
assert isinstance(device_data['lsm_data'], dict), "lsm_data field must be of type dict"
def test_lsm_data_keys_unexpected(self, fake_call, device_data):
for k in device_data['lsm_data'].keys():
assert k in self.expected_lsm_keys, "unexpected key {} in lsm_data field".format(k)
def test_lsm_data_keys_missing(self, fake_call, device_data):
lsm_keys = device_data['lsm_data'].keys()
assert lsm_keys
for k in self.expected_lsm_keys:
assert k in lsm_keys, "expected key {} in lsm_data field".format(k)
@pytest.fixture
def lsm_info(monkeypatch):
def mock_query_lsm(_, func, path):
query_map = {
'serial_num_get': "S2X9NX0H935283",
'link_type_get': 6,
'rpm_get': 0,
'link_speed_get': 6000,
'health_status_get': 2,
'led_status_get': 36,
}
return query_map.get(func, 'Unknown')
# mocked states and settings taken from the libstoragemgmt code base
# c_binding/include/libstoragemgmt/libstoragemgmt_types.h at
# https://github.com/libstorage/libstoragemgmt/
mock_health_map = {
-1: "Unknown",
0: "Fail",
1: "Warn",
2: "Good",
}
mock_transport_map = {
-1: "Unavailable",
0: "Fibre Channel",
2: "IBM SSA",
3: "Serial Bus",
4: "SCSI RDMA",
5: "iSCSI",
6: "SAS",
7: "ADT (Tape)",
8: "ATA/SATA",
9: "USB",
10: "SCSI over PCI-E",
11: "PCI-E",
}
class MockLEDStates():
LED_STATUS_UNKNOWN = 1
LED_STATUS_IDENT_ON = 2
LED_STATUS_IDENT_OFF = 4
LED_STATUS_IDENT_UNKNOWN = 8
LED_STATUS_FAULT_ON = 16
LED_STATUS_FAULT_OFF = 32
LED_STATUS_FAULT_UNKNOWN = 64
monkeypatch.setattr(LSMDisk, '_query_lsm', mock_query_lsm)
monkeypatch.setattr(lsmdisk, 'health_map', mock_health_map)
monkeypatch.setattr(lsmdisk, 'transport_map', mock_transport_map)
monkeypatch.setattr(lsmdisk, 'lsm_Disk', MockLEDStates)
return LSMDisk('/dev/sda')
class TestLSM(object):
def test_lsmdisk_health(self, lsm_info):
assert lsm_info.health == "Good"
def test_lsmdisk_transport(self, lsm_info):
assert lsm_info.transport == 'SAS'
def test_lsmdisk_mediatype(self, lsm_info):
assert lsm_info.media_type == 'Flash'
def test_lsmdisk_led_ident_support(self, lsm_info):
assert lsm_info.led_ident_support == 'Supported'
def test_lsmdisk_led_ident(self, lsm_info):
assert lsm_info.led_ident_state == 'Off'
def test_lsmdisk_led_fault_support(self, lsm_info):
assert lsm_info.led_fault_support == 'Supported'
def test_lsmdisk_led_fault(self, lsm_info):
assert lsm_info.led_fault_state == 'Off'
def test_lsmdisk_report(self, lsm_info):
assert isinstance(lsm_info.json_report(), dict)
| 8,545 | 31.371212 | 98 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/test_main.py
|
import os
import pytest
from ceph_volume import main
class TestVolume(object):
def test_main_spits_help_with_no_arguments(self, capsys):
with pytest.raises(SystemExit):
main.Volume(argv=[])
stdout, stderr = capsys.readouterr()
assert 'Log Path' in stdout
def test_warn_about_using_help_for_full_options(self, capsys):
with pytest.raises(SystemExit):
main.Volume(argv=[])
stdout, stderr = capsys.readouterr()
assert 'See "ceph-volume --help" for full list' in stdout
def test_environ_vars_show_up(self, capsys):
os.environ['CEPH_CONF'] = '/opt/ceph.conf'
with pytest.raises(SystemExit):
main.Volume(argv=[])
stdout, stderr = capsys.readouterr()
assert 'CEPH_CONF' in stdout
assert '/opt/ceph.conf' in stdout
def test_flags_are_parsed_with_help(self, capsys):
with pytest.raises(SystemExit):
main.Volume(argv=['ceph-volume', '--help'])
stdout, stderr = capsys.readouterr()
assert '--cluster' in stdout
assert '--log-path' in stdout
def test_log_ignoring_missing_ceph_conf(self, caplog):
with pytest.raises(SystemExit) as error:
main.Volume(argv=['ceph-volume', '--cluster', 'barnacle', 'lvm', '--help'])
# make sure we aren't causing an actual error
assert error.value.code == 0
log = caplog.records[-1]
assert log.message == 'ignoring inability to load ceph.conf'
assert log.levelname == 'WARNING'
def test_logs_current_command(self, caplog):
with pytest.raises(SystemExit) as error:
main.Volume(argv=['ceph-volume', '--cluster', 'barnacle', 'lvm', '--help'])
# make sure we aren't causing an actual error
assert error.value.code == 0
log = caplog.records[-2]
assert log.message == 'Running command: ceph-volume --cluster barnacle lvm --help'
assert log.levelname == 'INFO'
def test_logs_set_level_warning(self, caplog):
with pytest.raises(SystemExit) as error:
main.Volume(argv=['ceph-volume', '--log-level', 'warning', '--cluster', 'barnacle', 'lvm', '--help'])
# make sure we aren't causing an actual error
assert error.value.code == 0
assert caplog.records
# only log levels of 'WARNING'
for log in caplog.records:
assert log.levelname == 'WARNING'
def test_logs_incorrect_log_level(self, capsys):
with pytest.raises(SystemExit) as error:
main.Volume(argv=['ceph-volume', '--log-level', 'foo', '--cluster', 'barnacle', 'lvm', '--help'])
# make sure this is an error
assert error.value.code != 0
stdout, stderr = capsys.readouterr()
assert "invalid choice" in stderr
| 2,823 | 39.342857 | 113 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/test_process.py
|
import pytest
import logging
from ceph_volume.tests.conftest import Factory
from ceph_volume import process
@pytest.fixture
def mock_call(monkeypatch):
"""
Monkeypatches process.call, so that a caller can add behavior to the response
"""
def apply(stdout=None, stderr=None, returncode=0):
stdout_stream = Factory(read=lambda: stdout)
stderr_stream = Factory(read=lambda: stderr)
return_value = Factory(
stdout=stdout_stream,
stderr=stderr_stream,
wait=lambda: returncode,
communicate=lambda x: (stdout, stderr, returncode)
)
monkeypatch.setattr(
'ceph_volume.process.subprocess.Popen',
lambda *a, **kw: return_value)
return apply
class TestCall(object):
def test_stderr_terminal_and_logfile(self, mock_call, caplog, capsys):
caplog.set_level(logging.INFO)
mock_call(stdout='stdout\n', stderr='some stderr message\n')
process.call(['ls'], terminal_verbose=True)
out, err = capsys.readouterr()
log_lines = [line[-1] for line in caplog.record_tuples]
assert 'Running command: ' in log_lines[0]
assert 'ls' in log_lines[0]
assert 'stderr some stderr message' in log_lines[-1]
assert 'some stderr message' in err
def test_stderr_terminal_and_logfile_off(self, mock_call, caplog, capsys):
caplog.set_level(logging.INFO)
mock_call(stdout='stdout\n', stderr='some stderr message\n')
process.call(['ls'], terminal_verbose=False)
out, err = capsys.readouterr()
log_lines = [line[-1] for line in caplog.record_tuples]
assert 'Running command: ' in log_lines[0]
assert 'ls' in log_lines[0]
assert 'stderr some stderr message' in log_lines[-1]
assert out == ''
def test_verbose_on_failure(self, mock_call, caplog, capsys):
caplog.set_level(logging.INFO)
mock_call(stdout='stdout\n', stderr='stderr\n', returncode=1)
process.call(['ls'], terminal_verbose=False, logfile_verbose=False)
out, err = capsys.readouterr()
log_lines = '\n'.join([line[-1] for line in caplog.record_tuples])
assert 'Running command: ' in log_lines
assert 'ls' in log_lines
assert 'stderr' in log_lines
assert 'stdout: stdout' in err
assert out == ''
def test_silent_verbose_on_failure(self, mock_call, caplog, capsys):
caplog.set_level(logging.INFO)
mock_call(stdout='stdout\n', stderr='stderr\n', returncode=1)
process.call(['ls'], verbose_on_failure=False)
out, err = capsys.readouterr()
log_lines = '\n'.join([line[-1] for line in caplog.record_tuples])
assert 'Running command: ' in log_lines
assert 'ls' in log_lines
assert 'stderr' in log_lines
assert out == ''
class TestFunctionalCall(object):
def test_stdin(self):
process.call(['xargs', 'ls'], stdin="echo '/'")
def test_unicode_encoding(self):
process.call(['echo', u'\xd0'])
def test_unicode_encoding_stdin(self):
process.call(['echo'], stdin=u'\xd0'.encode('utf-8'))
class TestFunctionalRun(object):
def test_log_descriptors(self):
process.run(['ls', '-l'])
| 3,292 | 34.408602 | 81 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/test_terminal.py
|
# -*- mode:python; tab-width:4; indent-tabs-mode:nil; coding:utf-8 -*-
import codecs
import io
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import pytest
import sys
from ceph_volume import terminal
from ceph_volume.log import setup_console
class SubCommand(object):
help = "this is the subcommand help"
def __init__(self, argv):
self.argv = argv
def main(self):
pass
class BadSubCommand(object):
def __init__(self, argv):
self.argv = argv
def main(self):
raise SystemExit(100)
class TestSubhelp(object):
def test_no_sub_command_help(self):
assert terminal.subhelp({}) == ''
def test_single_level_help(self):
result = terminal.subhelp({'sub': SubCommand})
assert 'this is the subcommand help' in result
def test_has_title_header(self):
result = terminal.subhelp({'sub': SubCommand})
assert 'Available subcommands:' in result
def test_command_with_no_help(self):
class SubCommandNoHelp(object):
pass
result = terminal.subhelp({'sub': SubCommandNoHelp})
assert result == ''
class TestDispatch(object):
def test_no_subcommand_found(self):
result = terminal.dispatch({'sub': SubCommand}, argv=[])
assert result is None
def test_no_main_found(self):
class NoMain(object):
def __init__(self, argv):
pass
result = terminal.dispatch({'sub': NoMain}, argv=['sub'])
assert result is None
def test_subcommand_found_and_dispatched(self):
with pytest.raises(SystemExit) as error:
terminal.dispatch({'sub': SubCommand}, argv=['sub'])
assert str(error.value) == '0'
def test_subcommand_found_and_dispatched_with_errors(self):
with pytest.raises(SystemExit) as error:
terminal.dispatch({'sub': BadSubCommand}, argv=['sub'])
assert str(error.value) == '100'
@pytest.fixture
def stream():
def make_stream(buffer, encoding):
# mock a stdout with given encoding
if sys.version_info >= (3, 0):
stderr = sys.stderr
stream = io.TextIOWrapper(buffer,
encoding=encoding,
errors=stderr.errors,
newline=stderr.newlines,
line_buffering=stderr.line_buffering)
else:
stream = codecs.getwriter(encoding)(buffer)
# StreamWriter does not have encoding attached to it, it will ask
# the inner buffer for "encoding" attribute in this case
stream.encoding = encoding
return stream
return make_stream
class TestWriteUnicode(object):
def setup_method(self):
self.octpus_and_squid_en = u'octpus and squid'
self.octpus_and_squid_zh = u'章鱼和鱿鱼'
self.message = self.octpus_and_squid_en + self.octpus_and_squid_zh
setup_console()
def test_stdout_writer(self, capsys):
# should work with whatever stdout is
terminal.stdout(self.message)
_, err = capsys.readouterr()
assert self.octpus_and_squid_en in err
assert self.octpus_and_squid_zh in err
@pytest.mark.parametrize('encoding', ['ascii', 'utf8'])
def test_writer_log(self, stream, encoding, monkeypatch, caplog):
writer = StringIO()
terminal._Write(_writer=writer).raw(self.message)
writer.flush()
writer.seek(0)
output = writer.readlines()[0]
assert self.octpus_and_squid_en in output
@pytest.mark.parametrize('encoding', ['utf8'])
def test_writer(self, encoding, stream, monkeypatch, capsys, caplog):
buffer = io.BytesIO()
writer = stream(buffer, encoding)
terminal._Write(_writer=writer).raw(self.message)
writer.flush()
writer.seek(0)
val = buffer.getvalue()
assert self.octpus_and_squid_en.encode(encoding) in val
def test_writer_uses_log_on_unicodeerror(self, stream, monkeypatch, capture):
if sys.version_info > (3,):
pytest.skip("Something breaks inside of pytest's capsys")
monkeypatch.setattr(terminal.terminal_logger, 'info', capture)
buffer = io.BytesIO()
writer = stream(buffer, 'ascii')
terminal._Write(_writer=writer).raw(self.message)
assert self.octpus_and_squid_en in capture.calls[0]['args'][0]
| 4,509 | 30.319444 | 81 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/api/test_lvm.py
|
import os
import pytest
from mock.mock import patch
from ceph_volume import process, exceptions
from ceph_volume.api import lvm as api
class TestParseTags(object):
def test_no_tags_means_empty_dict(self):
result = api.parse_tags('')
assert result == {}
def test_single_tag_gets_parsed(self):
result = api.parse_tags('ceph.osd_something=1')
assert result == {'ceph.osd_something': '1'}
def test_non_ceph_tags_are_skipped(self):
result = api.parse_tags('foo')
assert result == {}
def test_mixed_non_ceph_tags(self):
result = api.parse_tags('foo,ceph.bar=1')
assert result == {'ceph.bar': '1'}
def test_multiple_csv_expands_in_dict(self):
result = api.parse_tags('ceph.osd_something=1,ceph.foo=2,ceph.fsid=0000')
# assert them piecemeal to avoid the un-ordered dict nature
assert result['ceph.osd_something'] == '1'
assert result['ceph.foo'] == '2'
assert result['ceph.fsid'] == '0000'
class TestVolume(object):
def test_is_ceph_device(self):
lv_tags = "ceph.type=data,ceph.osd_id=0"
osd = api.Volume(lv_name='osd/volume', lv_tags=lv_tags)
assert api.is_ceph_device(osd)
@pytest.mark.parametrize('dev',[
'/dev/sdb',
api.VolumeGroup(vg_name='foo'),
api.Volume(lv_name='vg/no_osd', lv_tags='', lv_path='lv/path'),
api.Volume(lv_name='vg/no_osd', lv_tags='ceph.osd_id=null', lv_path='lv/path'),
None,
])
def test_is_not_ceph_device(self, dev):
assert not api.is_ceph_device(dev)
def test_no_empty_lv_name(self):
with pytest.raises(ValueError):
api.Volume(lv_name='', lv_tags='')
class TestVolumeGroup(object):
def test_volume_group_no_empty_name(self):
with pytest.raises(ValueError):
api.VolumeGroup(vg_name='')
class TestVolumeGroupFree(object):
def test_integer_gets_produced(self):
vg = api.VolumeGroup(vg_name='nosize', vg_free_count=100, vg_extent_size=4194304)
assert vg.free == 100 * 4194304
class TestCreateLVs(object):
def setup_method(self):
self.vg = api.VolumeGroup(vg_name='ceph',
vg_extent_size=1073741824,
vg_extent_count=99999999,
vg_free_count=999)
def test_creates_correct_lv_number_from_parts(self, monkeypatch):
monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
lvs = api.create_lvs(self.vg, parts=4)
assert len(lvs) == 4
def test_suffixes_the_size_arg(self, monkeypatch):
monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
lvs = api.create_lvs(self.vg, parts=4)
assert lvs[0][1]['extents'] == 249
def test_only_uses_free_size(self, monkeypatch):
monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
vg = api.VolumeGroup(vg_name='ceph',
vg_extent_size=1073741824,
vg_extent_count=99999999,
vg_free_count=1000)
lvs = api.create_lvs(vg, parts=4)
assert lvs[0][1]['extents'] == 250
def test_null_tags_are_set_by_default(self, monkeypatch):
monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
kwargs = api.create_lvs(self.vg, parts=4)[0][1]
assert list(kwargs['tags'].values()) == ['null', 'null', 'null', 'null']
def test_fallback_to_one_part(self, monkeypatch):
monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
lvs = api.create_lvs(self.vg)
assert len(lvs) == 1
class TestVolumeGroupSizing(object):
def setup_method(self):
self.vg = api.VolumeGroup(vg_name='ceph',
vg_extent_size=1073741824,
vg_free_count=1024)
def test_parts_and_size_errors(self):
with pytest.raises(ValueError) as error:
self.vg.sizing(parts=4, size=10)
assert "Cannot process sizing" in str(error.value)
def test_zero_parts_produces_100_percent(self):
result = self.vg.sizing(parts=0)
assert result['percentages'] == 100
def test_two_parts_produces_50_percent(self):
result = self.vg.sizing(parts=2)
assert result['percentages'] == 50
def test_two_parts_produces_half_size(self):
result = self.vg.sizing(parts=2)
assert result['sizes'] == 512
def test_half_size_produces_round_sizes(self):
result = self.vg.sizing(size=512)
assert result['sizes'] == 512
assert result['percentages'] == 50
assert result['parts'] == 2
def test_bit_more_than_half_size_allocates_full_size(self):
# 513 can't allocate more than 1, so it just fallsback to using the
# whole device
result = self.vg.sizing(size=513)
assert result['sizes'] == 1024
assert result['percentages'] == 100
assert result['parts'] == 1
def test_extents_are_halfed_rounded_down(self):
result = self.vg.sizing(size=512)
assert result['extents'] == 512
def test_bit_less_size_rounds_down(self):
result = self.vg.sizing(size=129)
assert result['sizes'] == 146
assert result['percentages'] == 14
assert result['parts'] == 7
def test_unable_to_allocate_past_free_size(self):
with pytest.raises(exceptions.SizeAllocationError):
self.vg.sizing(size=2048)
class TestRemoveLV(object):
def test_removes_lv(self, monkeypatch):
def mock_call(cmd, **kw):
return ('', '', 0)
monkeypatch.setattr(process, 'call', mock_call)
assert api.remove_lv("vg/lv")
def test_removes_lv_object(self, fake_call):
foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
api.remove_lv(foo_volume)
# last argument from the list passed to process.call
assert fake_call.calls[0]['args'][0][-1] == '/path'
def test_fails_to_remove_lv(self, monkeypatch):
def mock_call(cmd, **kw):
return ('', '', 1)
monkeypatch.setattr(process, 'call', mock_call)
with pytest.raises(RuntimeError):
api.remove_lv("vg/lv")
class TestCreateLV(object):
def setup_method(self):
self.foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
self.foo_group = api.VolumeGroup(vg_name='foo_group',
vg_extent_size="4194304",
vg_extent_count="100",
vg_free_count="100")
@patch('ceph_volume.api.lvm.process.run')
@patch('ceph_volume.api.lvm.process.call')
@patch('ceph_volume.api.lvm.get_single_lv')
def test_uses_size(self, m_get_single_lv, m_call, m_run, monkeypatch):
m_get_single_lv.return_value = self.foo_volume
api.create_lv('foo', 0, vg=self.foo_group, size=419430400, tags={'ceph.type': 'data'})
expected = (['lvcreate', '--yes', '-l', '100', '-n', 'foo-0', 'foo_group'])
m_run.assert_called_with(expected, run_on_host=True)
@patch('ceph_volume.api.lvm.process.run')
@patch('ceph_volume.api.lvm.process.call')
@patch('ceph_volume.api.lvm.get_single_lv')
def test_uses_size_adjust_if_1percent_over(self, m_get_single_lv, m_call, m_run, monkeypatch):
foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
foo_group = api.VolumeGroup(vg_name='foo_group',
vg_extent_size="4194304",
vg_extent_count="1000",
vg_free_count="1000")
m_get_single_lv.return_value = foo_volume
# 423624704 should be just under 1% off of the available size 419430400
api.create_lv('foo', 0, vg=foo_group, size=4232052736, tags={'ceph.type': 'data'})
expected = ['lvcreate', '--yes', '-l', '1000', '-n', 'foo-0', 'foo_group']
m_run.assert_called_with(expected, run_on_host=True)
@patch('ceph_volume.api.lvm.process.run')
@patch('ceph_volume.api.lvm.process.call')
@patch('ceph_volume.api.lvm.get_single_lv')
def test_uses_size_too_large(self, m_get_single_lv, m_call, m_run, monkeypatch):
m_get_single_lv.return_value = self.foo_volume
with pytest.raises(RuntimeError):
api.create_lv('foo', 0, vg=self.foo_group, size=5368709120, tags={'ceph.type': 'data'})
@patch('ceph_volume.api.lvm.process.run')
@patch('ceph_volume.api.lvm.process.call')
@patch('ceph_volume.api.lvm.get_single_lv')
def test_uses_extents(self, m_get_single_lv, m_call, m_run, monkeypatch):
m_get_single_lv.return_value = self.foo_volume
api.create_lv('foo', 0, vg=self.foo_group, extents='50', tags={'ceph.type': 'data'})
expected = ['lvcreate', '--yes', '-l', '50', '-n', 'foo-0', 'foo_group']
m_run.assert_called_with(expected, run_on_host=True)
@pytest.mark.parametrize("test_input,expected",
[(2, 50),
(3, 33),])
@patch('ceph_volume.api.lvm.process.run')
@patch('ceph_volume.api.lvm.process.call')
@patch('ceph_volume.api.lvm.get_single_lv')
def test_uses_slots(self, m_get_single_lv, m_call, m_run, monkeypatch, test_input, expected):
m_get_single_lv.return_value = self.foo_volume
api.create_lv('foo', 0, vg=self.foo_group, slots=test_input, tags={'ceph.type': 'data'})
expected = ['lvcreate', '--yes', '-l', str(expected), '-n', 'foo-0', 'foo_group']
m_run.assert_called_with(expected, run_on_host=True)
@patch('ceph_volume.api.lvm.process.run')
@patch('ceph_volume.api.lvm.process.call')
@patch('ceph_volume.api.lvm.get_single_lv')
def test_uses_all(self, m_get_single_lv, m_call, m_run, monkeypatch):
m_get_single_lv.return_value = self.foo_volume
api.create_lv('foo', 0, vg=self.foo_group, tags={'ceph.type': 'data'})
expected = ['lvcreate', '--yes', '-l', '100%FREE', '-n', 'foo-0', 'foo_group']
m_run.assert_called_with(expected, run_on_host=True)
@patch('ceph_volume.api.lvm.process.run')
@patch('ceph_volume.api.lvm.process.call')
@patch('ceph_volume.api.lvm.Volume.set_tags')
@patch('ceph_volume.api.lvm.get_single_lv')
def test_calls_to_set_tags_default(self, m_get_single_lv, m_set_tags, m_call, m_run, monkeypatch):
m_get_single_lv.return_value = self.foo_volume
api.create_lv('foo', 0, vg=self.foo_group)
tags = {
"ceph.osd_id": "null",
"ceph.type": "null",
"ceph.cluster_fsid": "null",
"ceph.osd_fsid": "null",
}
m_set_tags.assert_called_with(tags)
@patch('ceph_volume.api.lvm.process.run')
@patch('ceph_volume.api.lvm.process.call')
@patch('ceph_volume.api.lvm.Volume.set_tags')
@patch('ceph_volume.api.lvm.get_single_lv')
def test_calls_to_set_tags_arg(self, m_get_single_lv, m_set_tags, m_call, m_run, monkeypatch):
m_get_single_lv.return_value = self.foo_volume
api.create_lv('foo', 0, vg=self.foo_group, tags={'ceph.type': 'data'})
tags = {
"ceph.type": "data",
"ceph.data_device": "/path"
}
m_set_tags.assert_called_with(tags)
@patch('ceph_volume.api.lvm.process.run')
@patch('ceph_volume.api.lvm.process.call')
@patch('ceph_volume.api.lvm.get_device_vgs')
@patch('ceph_volume.api.lvm.create_vg')
@patch('ceph_volume.api.lvm.get_single_lv')
def test_create_vg(self, m_get_single_lv, m_create_vg, m_get_device_vgs, m_call,
m_run, monkeypatch):
m_get_single_lv.return_value = self.foo_volume
m_get_device_vgs.return_value = []
api.create_lv('foo', 0, device='dev/foo', size='5G', tags={'ceph.type': 'data'})
m_create_vg.assert_called_with('dev/foo', name_prefix='ceph')
class TestTags(object):
def setup_method(self):
self.foo_volume_clean = api.Volume(lv_name='foo_clean', lv_path='/pathclean',
vg_name='foo_group',
lv_tags='')
self.foo_volume = api.Volume(lv_name='foo', lv_path='/path',
vg_name='foo_group',
lv_tags='ceph.foo0=bar0,ceph.foo1=bar1,ceph.foo2=bar2')
def test_set_tag(self, monkeypatch, capture):
monkeypatch.setattr(process, 'run', capture)
monkeypatch.setattr(process, 'call', capture)
self.foo_volume_clean.set_tag('foo', 'bar')
expected = ['lvchange', '--addtag', 'foo=bar', '/pathclean']
assert capture.calls[0]['args'][0] == expected
assert self.foo_volume_clean.tags == {'foo': 'bar'}
def test_set_clear_tag(self, monkeypatch, capture):
monkeypatch.setattr(process, 'run', capture)
monkeypatch.setattr(process, 'call', capture)
self.foo_volume_clean.set_tag('foo', 'bar')
assert self.foo_volume_clean.tags == {'foo': 'bar'}
self.foo_volume_clean.clear_tag('foo')
expected = ['lvchange', '--deltag', 'foo=bar', '/pathclean']
assert self.foo_volume_clean.tags == {}
assert capture.calls[1]['args'][0] == expected
def test_set_tags(self, monkeypatch, capture):
monkeypatch.setattr(process, 'run', capture)
monkeypatch.setattr(process, 'call', capture)
tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'bar1', 'ceph.foo2': 'bar2'}
assert self.foo_volume.tags == tags
tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'baz1', 'ceph.foo2': 'baz2'}
self.foo_volume.set_tags(tags)
assert self.foo_volume.tags == tags
self.foo_volume.set_tag('ceph.foo1', 'other1')
tags['ceph.foo1'] = 'other1'
assert self.foo_volume.tags == tags
expected = [
sorted(['lvchange', '--deltag', 'ceph.foo0=bar0', '--deltag',
'ceph.foo1=bar1', '--deltag', 'ceph.foo2=bar2', '/path']),
sorted(['lvchange', '--deltag', 'ceph.foo1=baz1', '/path']),
sorted(['lvchange', '--addtag', 'ceph.foo0=bar0', '--addtag',
'ceph.foo1=baz1', '--addtag', 'ceph.foo2=baz2', '/path']),
sorted(['lvchange', '--addtag', 'ceph.foo1=other1', '/path']),
]
# The order isn't guaranted
for call in capture.calls:
assert sorted(call['args'][0]) in expected
assert len(capture.calls) == len(expected)
def test_clear_tags(self, monkeypatch, capture):
monkeypatch.setattr(process, 'run', capture)
monkeypatch.setattr(process, 'call', capture)
tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'bar1', 'ceph.foo2': 'bar2'}
self.foo_volume_clean.set_tags(tags)
assert self.foo_volume_clean.tags == tags
self.foo_volume_clean.clear_tags()
assert self.foo_volume_clean.tags == {}
expected = [
sorted(['lvchange', '--addtag', 'ceph.foo0=bar0', '--addtag',
'ceph.foo1=bar1', '--addtag', 'ceph.foo2=bar2',
'/pathclean']),
sorted(['lvchange', '--deltag', 'ceph.foo0=bar0', '--deltag',
'ceph.foo1=bar1', '--deltag', 'ceph.foo2=bar2',
'/pathclean']),
]
# The order isn't guaranted
for call in capture.calls:
assert sorted(call['args'][0]) in expected
assert len(capture.calls) == len(expected)
class TestExtendVG(object):
def setup_method(self):
self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
def test_uses_single_device_in_list(self, monkeypatch, fake_run):
monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
api.extend_vg(self.foo_volume, ['/dev/sda'])
expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda']
assert fake_run.calls[0]['args'][0] == expected
def test_uses_single_device(self, monkeypatch, fake_run):
monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
api.extend_vg(self.foo_volume, '/dev/sda')
expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda']
assert fake_run.calls[0]['args'][0] == expected
def test_uses_multiple_devices(self, monkeypatch, fake_run):
monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
api.extend_vg(self.foo_volume, ['/dev/sda', '/dev/sdb'])
expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb']
assert fake_run.calls[0]['args'][0] == expected
class TestReduceVG(object):
def setup_method(self):
self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
def test_uses_single_device_in_list(self, monkeypatch, fake_run):
monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
api.reduce_vg(self.foo_volume, ['/dev/sda'])
expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda']
assert fake_run.calls[0]['args'][0] == expected
def test_uses_single_device(self, monkeypatch, fake_run):
monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
api.reduce_vg(self.foo_volume, '/dev/sda')
expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda']
assert fake_run.calls[0]['args'][0] == expected
def test_uses_multiple_devices(self, monkeypatch, fake_run):
monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
api.reduce_vg(self.foo_volume, ['/dev/sda', '/dev/sdb'])
expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb']
assert fake_run.calls[0]['args'][0] == expected
class TestCreateVG(object):
def setup_method(self):
self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
def test_no_name(self, monkeypatch, fake_run):
monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
api.create_vg('/dev/sda')
result = fake_run.calls[0]['args'][0]
assert '/dev/sda' in result
assert result[-2].startswith('ceph-')
def test_devices_list(self, monkeypatch, fake_run):
monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
api.create_vg(['/dev/sda', '/dev/sdb'], name='ceph')
result = fake_run.calls[0]['args'][0]
expected = ['vgcreate', '--force', '--yes', 'ceph', '/dev/sda', '/dev/sdb']
assert result == expected
def test_name_prefix(self, monkeypatch, fake_run):
monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
api.create_vg('/dev/sda', name_prefix='master')
result = fake_run.calls[0]['args'][0]
assert '/dev/sda' in result
assert result[-2].startswith('master-')
def test_specific_name(self, monkeypatch, fake_run):
monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
api.create_vg('/dev/sda', name='master')
result = fake_run.calls[0]['args'][0]
assert '/dev/sda' in result
assert result[-2] == 'master'
#
# The following tests are pretty gnarly. VDO detection is very convoluted and
# involves correlating information from device mappers, realpaths, slaves of
# those mappers, and parents or related mappers. This makes it very hard to
# patch nicely or keep tests short and readable. These tests are trying to
# ensure correctness, the better approach will be to do some functional testing
# with VDO.
#
@pytest.fixture
def disable_kvdo_path(monkeypatch):
monkeypatch.setattr('os.path.isdir', lambda x, **kw: False)
@pytest.fixture
def enable_kvdo_path(monkeypatch):
monkeypatch.setattr('os.path.isdir', lambda x, **kw: True)
# Stub for os.listdir
class ListDir(object):
def __init__(self, paths):
self.paths = paths
self._normalize_paths()
self.listdir = os.listdir
def _normalize_paths(self):
for k, v in self.paths.items():
self.paths[k.rstrip('/')] = v.rstrip('/')
def add(self, original, fake):
self.paths[original.rstrip('/')] = fake.rstrip('/')
def __call__(self, path):
return self.listdir(self.paths[path.rstrip('/')])
@pytest.fixture(scope='function')
def listdir(monkeypatch):
def apply(paths=None, stub=None):
if not stub:
stub = ListDir(paths)
if paths:
for original, fake in paths.items():
stub.add(original, fake)
monkeypatch.setattr('os.listdir', stub)
return apply
@pytest.fixture(scope='function')
def makedirs(tmpdir):
def create(directory):
path = os.path.join(str(tmpdir), directory)
os.makedirs(path)
return path
create.base = str(tmpdir)
return create
class TestIsVdo(object):
def test_no_vdo_dir(self, disable_kvdo_path):
assert api._is_vdo('/path') is False
def test_exceptions_return_false(self, monkeypatch):
def throw():
raise Exception()
monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', throw)
assert api.is_vdo('/path') == '0'
def test_is_vdo_returns_a_string(self, monkeypatch):
monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', lambda x, **kw: True)
assert api.is_vdo('/path') == '1'
def test_kvdo_dir_no_devices(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
kvdo_path = makedirs('sys/kvdo')
listdir(paths={'/sys/kvdo': kvdo_path})
monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: [])
monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: [])
assert api._is_vdo('/dev/mapper/vdo0') is False
def test_vdo_slaves_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
kvdo_path = makedirs('sys/kvdo')
listdir(paths={'/sys/kvdo': kvdo_path})
monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: ['/dev/dm-3'])
monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: [])
assert api._is_vdo('/dev/dm-3') is True
def test_vdo_parents_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
kvdo_path = makedirs('sys/kvdo')
listdir(paths={'/sys/kvdo': kvdo_path})
monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: [])
monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: ['/dev/dm-4'])
assert api._is_vdo('/dev/dm-4') is True
class TestVdoSlaves(object):
def test_slaves_are_not_found(self, makedirs, listdir, monkeypatch):
slaves_path = makedirs('sys/block/vdo0/slaves')
listdir(paths={'/sys/block/vdo0/slaves': slaves_path})
monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True)
result = sorted(api._vdo_slaves(['vdo0']))
assert '/dev/mapper/vdo0' in result
assert 'vdo0' in result
def test_slaves_are_found(self, makedirs, listdir, monkeypatch):
slaves_path = makedirs('sys/block/vdo0/slaves')
makedirs('sys/block/vdo0/slaves/dm-4')
makedirs('dev/mapper/vdo0')
listdir(paths={'/sys/block/vdo0/slaves': slaves_path})
monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True)
result = sorted(api._vdo_slaves(['vdo0']))
assert '/dev/dm-4' in result
assert 'dm-4' in result
class TestVDOParents(object):
def test_parents_are_found(self, makedirs, listdir):
block_path = makedirs('sys/block')
slaves_path = makedirs('sys/block/dm-4/slaves')
makedirs('sys/block/dm-4/slaves/dm-3')
listdir(paths={
'/sys/block/dm-4/slaves': slaves_path,
'/sys/block': block_path})
result = api._vdo_parents(['dm-3'])
assert '/dev/dm-4' in result
assert 'dm-4' in result
def test_parents_are_not_found(self, makedirs, listdir):
block_path = makedirs('sys/block')
slaves_path = makedirs('sys/block/dm-4/slaves')
makedirs('sys/block/dm-4/slaves/dm-5')
listdir(paths={
'/sys/block/dm-4/slaves': slaves_path,
'/sys/block': block_path})
result = api._vdo_parents(['dm-3'])
assert result == []
class TestSplitNameParser(object):
def test_keys_are_parsed_without_prefix(self):
line = ["DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''"]
result = api._splitname_parser(line)
assert result['VG_NAME'] == 'vg'
assert result['LV_NAME'] == 'lv'
assert result['LV_LAYER'] == ''
def test_vg_name_sans_mapper(self):
line = ["DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''"]
result = api._splitname_parser(line)
assert '/dev/mapper' not in result['VG_NAME']
class TestGetDeviceVgs(object):
@patch('ceph_volume.process.call')
@patch('ceph_volume.api.lvm._output_parser')
def test_get_device_vgs_with_empty_pv(self, patched_output_parser, pcall):
patched_output_parser.return_value = [{'vg_name': ''}]
pcall.return_value = ('', '', '')
vgs = api.get_device_vgs('/dev/foo')
assert vgs == []
class TestGetDeviceLvs(object):
@patch('ceph_volume.process.call')
@patch('ceph_volume.api.lvm._output_parser')
def test_get_device_lvs_with_empty_vg(self, patched_output_parser, pcall):
patched_output_parser.return_value = [{'lv_name': ''}]
pcall.return_value = ('', '', '')
vgs = api.get_device_lvs('/dev/foo')
assert vgs == []
# NOTE: api.convert_filters_to_str() and api.convert_tags_to_str() should get
# tested automatically while testing api.make_filters_lvmcmd_ready()
class TestMakeFiltersLVMCMDReady(object):
def test_with_no_filters_and_no_tags(self):
retval = api.make_filters_lvmcmd_ready(None, None)
assert isinstance(retval, str)
assert retval == ''
def test_with_filters_and_no_tags(self):
filters = {'lv_name': 'lv1', 'lv_path': '/dev/sda'}
retval = api.make_filters_lvmcmd_ready(filters, None)
assert isinstance(retval, str)
for k, v in filters.items():
assert k in retval
assert v in retval
def test_with_no_filters_and_with_tags(self):
tags = {'ceph.type': 'data', 'ceph.osd_id': '0'}
retval = api.make_filters_lvmcmd_ready(None, tags)
assert isinstance(retval, str)
assert 'tags' in retval
for k, v in tags.items():
assert k in retval
assert v in retval
assert retval.find('tags') < retval.find(k) < retval.find(v)
def test_with_filters_and_tags(self):
filters = {'lv_name': 'lv1', 'lv_path': '/dev/sda'}
tags = {'ceph.type': 'data', 'ceph.osd_id': '0'}
retval = api.make_filters_lvmcmd_ready(filters, tags)
assert isinstance(retval, str)
for f, t in zip(filters.items(), tags.items()):
assert f[0] in retval
assert f[1] in retval
assert t[0] in retval
assert t[1] in retval
assert retval.find(f[0]) < retval.find(f[1]) < \
retval.find('tags') < retval.find(t[0]) < retval.find(t[1])
class TestGetPVs(object):
def test_get_pvs(self, monkeypatch):
pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={},
vg_name='vg1')
pv2 = api.PVolume(pv_name='/dev/sdb', pv_uuid='0001', pv_tags={},
vg_name='vg2')
pvs = [pv1, pv2]
stdout = ['{};{};{};{};;'.format(pv1.pv_name, pv1.pv_tags, pv1.pv_uuid, pv1.vg_name),
'{};{};{};{};;'.format(pv2.pv_name, pv2.pv_tags, pv2.pv_uuid, pv2.vg_name)]
monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
pvs_ = api.get_pvs()
assert len(pvs_) == len(pvs)
for pv, pv_ in zip(pvs, pvs_):
assert pv_.pv_name == pv.pv_name
def test_get_pvs_single_pv(self, monkeypatch):
pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={},
vg_name='vg1')
pvs = [pv1]
stdout = ['{};;;;;;'.format(pv1.pv_name)]
monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
pvs_ = api.get_pvs()
assert len(pvs_) == 1
assert pvs_[0].pv_name == pvs[0].pv_name
def test_get_pvs_empty(self, monkeypatch):
monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
assert api.get_pvs() == []
class TestGetVGs(object):
def test_get_vgs(self, monkeypatch):
vg1 = api.VolumeGroup(vg_name='vg1')
vg2 = api.VolumeGroup(vg_name='vg2')
vgs = [vg1, vg2]
stdout = ['{};;;;;;'.format(vg1.vg_name),
'{};;;;;;'.format(vg2.vg_name)]
monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
vgs_ = api.get_vgs()
assert len(vgs_) == len(vgs)
for vg, vg_ in zip(vgs, vgs_):
assert vg_.vg_name == vg.vg_name
def test_get_vgs_single_vg(self, monkeypatch):
vg1 = api.VolumeGroup(vg_name='vg'); vgs = [vg1]
stdout = ['{};;;;;;'.format(vg1.vg_name)]
monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
vgs_ = api.get_vgs()
assert len(vgs_) == 1
assert vgs_[0].vg_name == vgs[0].vg_name
def test_get_vgs_empty(self, monkeypatch):
monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
assert api.get_vgs() == []
class TestGetLVs(object):
def test_get_lvs(self, monkeypatch):
lv1 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg1/lv1',
lv_name='lv1', vg_name='vg1')
lv2 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg2/lv2',
lv_name='lv2', vg_name='vg2')
lvs = [lv1, lv2]
stdout = ['{};{};{};{}'.format(lv1.lv_tags, lv1.lv_path, lv1.lv_name,
lv1.vg_name),
'{};{};{};{}'.format(lv2.lv_tags, lv2.lv_path, lv2.lv_name,
lv2.vg_name)]
monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
lvs_ = api.get_lvs()
assert len(lvs_) == len(lvs)
for lv, lv_ in zip(lvs, lvs_):
assert lv.__dict__ == lv_.__dict__
def test_get_lvs_single_lv(self, monkeypatch):
stdout = ['ceph.type=data;/dev/vg/lv;lv;vg']
monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
lvs = []
lvs.append((api.Volume(lv_tags='ceph.type=data',
lv_path='/dev/vg/lv',
lv_name='lv', vg_name='vg')))
lvs_ = api.get_lvs()
assert len(lvs_) == len(lvs)
assert lvs[0].__dict__ == lvs_[0].__dict__
def test_get_lvs_empty(self, monkeypatch):
monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
assert api.get_lvs() == []
class TestGetSinglePV(object):
@patch('ceph_volume.devices.lvm.prepare.api.get_pvs')
def test_get_single_pv_multiple_matches_raises_runtimeerror(self, m_get_pvs):
fake_pvs = []
fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={}))
fake_pvs.append(api.PVolume(pv_name='/dev/sdb', pv_tags={}))
m_get_pvs.return_value = fake_pvs
with pytest.raises(RuntimeError) as e:
api.get_single_pv()
assert "matched more than 1 PV present on this host." in str(e.value)
@patch('ceph_volume.devices.lvm.prepare.api.get_pvs')
def test_get_single_pv_no_match_returns_none(self, m_get_pvs):
m_get_pvs.return_value = []
pv = api.get_single_pv()
assert pv == None
@patch('ceph_volume.devices.lvm.prepare.api.get_pvs')
def test_get_single_pv_one_match(self, m_get_pvs):
fake_pvs = []
fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={}))
m_get_pvs.return_value = fake_pvs
pv = api.get_single_pv()
assert isinstance(pv, api.PVolume)
assert pv.name == '/dev/sda'
class TestGetSingleVG(object):
@patch('ceph_volume.devices.lvm.prepare.api.get_vgs')
def test_get_single_vg_multiple_matches_raises_runtimeerror(self, m_get_vgs):
fake_vgs = []
fake_vgs.append(api.VolumeGroup(vg_name='vg1'))
fake_vgs.append(api.VolumeGroup(vg_name='vg2'))
m_get_vgs.return_value = fake_vgs
with pytest.raises(RuntimeError) as e:
api.get_single_vg()
assert "matched more than 1 VG present on this host." in str(e.value)
@patch('ceph_volume.devices.lvm.prepare.api.get_vgs')
def test_get_single_vg_no_match_returns_none(self, m_get_vgs):
m_get_vgs.return_value = []
vg = api.get_single_vg()
assert vg == None
@patch('ceph_volume.devices.lvm.prepare.api.get_vgs')
def test_get_single_vg_one_match(self, m_get_vgs):
fake_vgs = []
fake_vgs.append(api.VolumeGroup(vg_name='vg1'))
m_get_vgs.return_value = fake_vgs
vg = api.get_single_vg()
assert isinstance(vg, api.VolumeGroup)
assert vg.name == 'vg1'
class TestGetSingleLV(object):
@patch('ceph_volume.devices.lvm.prepare.api.get_lvs')
def test_get_single_lv_multiple_matches_raises_runtimeerror(self, m_get_lvs):
fake_lvs = []
fake_lvs.append(api.Volume(lv_name='lv1',
lv_path='/dev/vg1/lv1',
vg_name='vg1',
lv_tags='',
lv_uuid='fake-uuid'))
fake_lvs.append(api.Volume(lv_name='lv1',
lv_path='/dev/vg2/lv1',
vg_name='vg2',
lv_tags='',
lv_uuid='fake-uuid'))
m_get_lvs.return_value = fake_lvs
with pytest.raises(RuntimeError) as e:
api.get_single_lv()
assert "matched more than 1 LV present on this host" in str(e.value)
@patch('ceph_volume.devices.lvm.prepare.api.get_lvs')
def test_get_single_lv_no_match_returns_none(self, m_get_lvs):
m_get_lvs.return_value = []
lv = api.get_single_lv()
assert lv == None
@patch('ceph_volume.devices.lvm.prepare.api.get_lvs')
def test_get_single_lv_one_match(self, m_get_lvs):
fake_lvs = []
fake_lvs.append(api.Volume(lv_name='lv1', lv_path='/dev/vg1/lv1', vg_name='vg1', lv_tags='', lv_uuid='fake-uuid'))
m_get_lvs.return_value = fake_lvs
lv_ = api.get_single_lv()
assert isinstance(lv_, api.Volume)
assert lv_.name == 'lv1'
| 35,099 | 38.616253 | 122 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/test_zap.py
|
import pytest
from ceph_volume.devices import lvm
from mock.mock import patch, MagicMock
class TestZap(object):
def test_main_spits_help_with_no_arguments(self, capsys):
lvm.zap.Zap([]).main()
stdout, stderr = capsys.readouterr()
assert 'Zaps the given logical volume(s), raw device(s) or partition(s)' in stdout
def test_main_shows_full_help(self, capsys):
with pytest.raises(SystemExit):
lvm.zap.Zap(argv=['--help']).main()
stdout, stderr = capsys.readouterr()
assert 'optional arguments' in stdout
@pytest.mark.parametrize('device_name', [
'/dev/mapper/foo',
'/dev/dm-0',
])
@patch('ceph_volume.util.arg_validators.Device')
def test_can_not_zap_mapper_device(self, mocked_device, monkeypatch, device_info, capsys, is_root, device_name):
monkeypatch.setattr('os.path.exists', lambda x: True)
mocked_device.return_value = MagicMock(
is_mapper=True,
is_mpath=False,
used_by_ceph=True,
exists=True,
has_partitions=False,
has_gpt_headers=False,
has_fs=False
)
with pytest.raises(SystemExit):
lvm.zap.Zap(argv=[device_name]).main()
stdout, stderr = capsys.readouterr()
assert 'Refusing to zap' in stderr
| 1,350 | 33.641026 | 116 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
|
import pytest
from copy import deepcopy
from ceph_volume.devices.lvm import activate
from ceph_volume.api import lvm as api
from ceph_volume.tests.conftest import Capture
class Args(object):
def __init__(self, **kw):
# default flags
self.bluestore = False
self.no_systemd = False
self.auto_detect_objectstore = None
for k, v in kw.items():
setattr(self, k, v)
class TestActivate(object):
# these tests are very functional, hence the heavy patching, it is hard to
# test the negative side effect with an actual functional run, so we must
# setup a perfect scenario for this test to check it can really work
# with/without osd_id
def test_no_osd_id_matches_fsid_bluestore(self, is_root, monkeypatch, capture):
FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
lv_tags="ceph.osd_fsid=1234")
volumes = []
volumes.append(FooVolume)
monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
monkeypatch.setattr(activate, 'activate_bluestore', capture)
args = Args(osd_id=None, osd_fsid='1234', bluestore=True)
activate.Activate([]).activate(args)
assert capture.calls[0]['args'][0] == [FooVolume]
def test_osd_id_no_osd_fsid(self, is_root):
args = Args(osd_id=42, osd_fsid=None)
with pytest.raises(RuntimeError) as result:
activate.Activate([]).activate(args)
assert result.value.args[0] == 'could not activate osd.42, please provide the osd_fsid too'
def test_no_osd_id_no_osd_fsid(self, is_root):
args = Args(osd_id=None, osd_fsid=None)
with pytest.raises(RuntimeError) as result:
activate.Activate([]).activate(args)
assert result.value.args[0] == 'Please provide both osd_id and osd_fsid'
def test_bluestore_no_systemd(self, is_root, monkeypatch, capture):
monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
fake_enable = Capture()
fake_start_osd = Capture()
monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
DataVolume = api.Volume(
lv_name='data',
lv_path='/dev/vg/data',
lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
"ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
volumes = []
volumes.append(DataVolume)
monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, bluestore=True)
activate.Activate([]).activate(args)
assert fake_enable.calls == []
assert fake_start_osd.calls == []
def test_bluestore_systemd(self, is_root, monkeypatch, capture):
monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
fake_enable = Capture()
fake_start_osd = Capture()
monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
DataVolume = api.Volume(
lv_name='data',
lv_path='/dev/vg/data',
lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
"ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
volumes = []
volumes.append(DataVolume)
monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
bluestore=True)
activate.Activate([]).activate(args)
assert fake_enable.calls != []
assert fake_start_osd.calls != []
def test_bluestore_no_systemd_autodetect(self, is_root, monkeypatch, capture):
monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
fake_enable = Capture()
fake_start_osd = Capture()
monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
DataVolume = api.Volume(
lv_name='data',
lv_path='/dev/vg/data',
lv_tags="ceph.cluster_name=ceph,,ceph.block_uuid=000," + \
"ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
volumes = []
volumes.append(DataVolume)
monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
bluestore=True, auto_detect_objectstore=True)
activate.Activate([]).activate(args)
assert fake_enable.calls == []
assert fake_start_osd.calls == []
def test_bluestore_systemd_autodetect(self, is_root, monkeypatch, capture):
monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
fake_enable = Capture()
fake_start_osd = Capture()
monkeypatch.setattr('ceph_volume.util.system.path_is_mounted',
lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw:
True)
monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
DataVolume = api.Volume(
lv_name='data',
lv_path='/dev/vg/data',
lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
"ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
volumes = []
volumes.append(DataVolume)
monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
bluestore=True, auto_detect_objectstore=False)
activate.Activate([]).activate(args)
assert fake_enable.calls != []
assert fake_start_osd.calls != []
class TestActivateFlags(object):
def test_default_objectstore(self, capture):
args = ['0', 'asdf-ljh-asdf']
activation = activate.Activate(args)
activation.activate = capture
activation.main()
parsed_args = capture.calls[0]['args'][0]
assert parsed_args.bluestore is False
def test_uses_bluestore(self, capture):
args = ['--bluestore', '0', 'asdf-ljh-asdf']
activation = activate.Activate(args)
activation.activate = capture
activation.main()
parsed_args = capture.calls[0]['args'][0]
assert parsed_args.bluestore is True
class TestActivateAll(object):
def test_does_not_detect_osds(self, capsys, is_root, capture, monkeypatch):
monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: {})
args = ['--all']
activation = activate.Activate(args)
activation.main()
out, err = capsys.readouterr()
assert 'Was unable to find any OSDs to activate' in err
assert 'Verify OSDs are present with ' in err
def test_detects_running_osds(self, capsys, is_root, capture, monkeypatch):
monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: True)
args = ['--all']
activation = activate.Activate(args)
activation.main()
out, err = capsys.readouterr()
assert 'a8789a96ce8b process is active. Skipping activation' in err
assert 'b8218eaa1634 process is active. Skipping activation' in err
def test_detects_osds_to_activate_systemd(self, is_root, capture, monkeypatch):
monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: False)
args = ['--all']
activation = activate.Activate(args)
activation.activate = capture
activation.main()
calls = sorted(capture.calls, key=lambda x: x['kwargs']['osd_id'])
assert calls[0]['kwargs']['osd_id'] == '0'
assert calls[0]['kwargs']['osd_fsid'] == '957d22b7-24ce-466a-9883-b8218eaa1634'
assert calls[1]['kwargs']['osd_id'] == '1'
assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b'
def test_detects_osds_to_activate_no_systemd(self, is_root, capture, monkeypatch):
monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
args = ['--all', '--no-systemd']
activation = activate.Activate(args)
activation.activate = capture
activation.main()
calls = sorted(capture.calls, key=lambda x: x['kwargs']['osd_id'])
assert calls[0]['kwargs']['osd_id'] == '0'
assert calls[0]['kwargs']['osd_fsid'] == '957d22b7-24ce-466a-9883-b8218eaa1634'
assert calls[1]['kwargs']['osd_id'] == '1'
assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b'
#
# Activate All fixture
#
direct_report = {
"0": [
{
"lv_name": "osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
"lv_path": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
"lv_tags": "ceph.block_device=/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634,ceph.block_uuid=6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d4962338-46ff-4cd5-8ea6-c033dbdc5b44,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=957d22b7-24ce-466a-9883-b8218eaa1634,ceph.osd_id=0,ceph.type=block",
"lv_uuid": "6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4",
"name": "osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
"path": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
"tags": {
"ceph.block_device": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
"ceph.block_uuid": "6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4",
"ceph.cephx_lockbox_secret": "",
"ceph.cluster_fsid": "d4962338-46ff-4cd5-8ea6-c033dbdc5b44",
"ceph.cluster_name": "ceph",
"ceph.crush_device_class": "",
"ceph.encrypted": "0",
"ceph.osd_fsid": "957d22b7-24ce-466a-9883-b8218eaa1634",
"ceph.osd_id": "0",
"ceph.type": "block"
},
"type": "block",
"vg_name": "ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44"
}
],
"1": [
{
"lv_name": "osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
"lv_path": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
"lv_tags": "ceph.block_device=/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b,ceph.block_uuid=1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d4962338-46ff-4cd5-8ea6-c033dbdc5b44,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=d0f3e4ad-e52a-4520-afc0-a8789a96ce8b,ceph.osd_id=1,ceph.type=block",
"lv_uuid": "1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW",
"name": "osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
"path": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
"tags": {
"ceph.block_device": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
"ceph.block_uuid": "1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW",
"ceph.cephx_lockbox_secret": "",
"ceph.cluster_fsid": "d4962338-46ff-4cd5-8ea6-c033dbdc5b44",
"ceph.cluster_name": "ceph",
"ceph.crush_device_class": "",
"ceph.encrypted": "0",
"ceph.osd_fsid": "d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
"ceph.osd_id": "1",
"ceph.type": "block"
},
"type": "block",
"vg_name": "ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532"
}
]
}
| 13,393 | 49.734848 | 420 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
|
import pytest
import json
import random
from argparse import ArgumentError
from mock import MagicMock, patch
from ceph_volume.api import lvm
from ceph_volume.devices.lvm import batch
from ceph_volume.util import arg_validators
class TestBatch(object):
def test_batch_instance(self, is_root):
b = batch.Batch([])
b.main()
def test_invalid_osd_ids_passed(self):
with pytest.raises(SystemExit):
batch.Batch(argv=['--osd-ids', '1', 'foo']).main()
def test_disjoint_device_lists(self, factory):
device1 = factory(used_by_ceph=False, available=True, abspath="/dev/sda")
device2 = factory(used_by_ceph=False, available=True, abspath="/dev/sdb")
devices = [device1, device2]
db_devices = [device2]
with pytest.raises(Exception) as disjoint_ex:
batch.ensure_disjoint_device_lists(devices, db_devices)
assert 'Device lists are not disjoint' in str(disjoint_ex.value)
@patch('ceph_volume.util.arg_validators.Device')
def test_reject_partition(self, mocked_device):
mocked_device.return_value = MagicMock(
is_partition=True,
has_fs=False,
is_lvm_member=False,
has_gpt_headers=False,
has_partitions=False,
)
with pytest.raises(ArgumentError):
arg_validators.ValidBatchDevice()('foo')
@pytest.mark.parametrize('format_', ['pretty', 'json', 'json-pretty'])
def test_report(self, format_, factory, conf_ceph_stub, mock_device_generator):
# just ensure reporting works
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
devs = [mock_device_generator() for _ in range(5)]
args = factory(data_slots=1,
osds_per_device=1,
osd_ids=[],
report=True,
format=format_,
devices=devs,
db_devices=[],
wal_devices=[],
bluestore=True,
block_db_size="1G",
dmcrypt=True,
data_allocate_fraction=1.0,
)
b = batch.Batch([])
plan = b.get_plan(args)
b.args = args
b.report(plan)
@pytest.mark.parametrize('format_', ['json', 'json-pretty'])
def test_json_report_valid_empty(self, format_, factory, conf_ceph_stub, mock_device_generator):
# ensure json reports are valid when empty
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
devs = []
args = factory(data_slots=1,
osds_per_device=1,
osd_ids=[],
report=True,
format=format_,
devices=devs,
db_devices=[],
wal_devices=[],
bluestore=True,
block_db_size="1G",
dmcrypt=True,
data_allocate_fraction=1.0,
)
b = batch.Batch([])
plan = b.get_plan(args)
b.args = args
report = b._create_report(plan)
json.loads(report)
@pytest.mark.parametrize('format_', ['json', 'json-pretty'])
def test_json_report_valid_empty_unavailable_fast(self, format_, factory, conf_ceph_stub, mock_device_generator):
# ensure json reports are valid when empty
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
devs = [mock_device_generator() for _ in range(5)]
fast_devs = [mock_device_generator()]
fast_devs[0].available_lvm = False
args = factory(data_slots=1,
osds_per_device=1,
osd_ids=[],
report=True,
format=format_,
devices=devs,
db_devices=fast_devs,
wal_devices=[],
bluestore=True,
block_db_size="1G",
dmcrypt=True,
data_allocate_fraction=1.0,
)
b = batch.Batch([])
plan = b.get_plan(args)
b.args = args
report = b._create_report(plan)
json.loads(report)
@pytest.mark.parametrize('format_', ['json', 'json-pretty'])
def test_json_report_valid_empty_unavailable_very_fast(self, format_, factory, conf_ceph_stub, mock_device_generator):
# ensure json reports are valid when empty
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
devs = [mock_device_generator() for _ in range(5)]
fast_devs = [mock_device_generator()]
very_fast_devs = [mock_device_generator()]
very_fast_devs[0].available_lvm = False
args = factory(data_slots=1,
osds_per_device=1,
osd_ids=[],
report=True,
format=format_,
devices=devs,
db_devices=fast_devs,
wal_devices=very_fast_devs,
bluestore=True,
block_db_size="1G",
dmcrypt=True,
data_allocate_fraction=1.0,
)
b = batch.Batch([])
plan = b.get_plan(args)
b.args = args
report = b._create_report(plan)
json.loads(report)
@pytest.mark.parametrize('rota', [0, 1])
def test_batch_sort_full(self, factory, rota):
device1 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sda")
device2 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sdb")
device3 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sdc")
devices = [device1, device2, device3]
args = factory(report=True,
devices=devices,
)
b = batch.Batch([])
b.args = args
b._sort_rotational_disks()
assert len(b.args.devices) == 3
@pytest.mark.parametrize('objectstore', ['bluestore'])
def test_batch_sort_mixed(self, factory, objectstore):
device1 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sda")
device2 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sdb")
device3 = factory(used_by_ceph=False, available=True, rotational=0, abspath="/dev/sdc")
devices = [device1, device2, device3]
args = factory(report=True,
devices=devices,
)
b = batch.Batch([])
b.args = args
b._sort_rotational_disks()
assert len(b.args.devices) == 2
assert len(b.args.db_devices) == 1
def test_get_physical_osds_return_len(self, factory,
mock_devices_available,
conf_ceph_stub,
osds_per_device):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
args = factory(data_slots=1, osds_per_device=osds_per_device,
osd_ids=[], dmcrypt=False,
data_allocate_fraction=1.0)
osds = batch.get_physical_osds(mock_devices_available, args)
assert len(osds) == len(mock_devices_available) * osds_per_device
def test_get_physical_osds_rel_size(self, factory,
mock_devices_available,
conf_ceph_stub,
osds_per_device,
data_allocate_fraction):
args = factory(data_slots=1, osds_per_device=osds_per_device,
osd_ids=[], dmcrypt=False,
data_allocate_fraction=data_allocate_fraction)
osds = batch.get_physical_osds(mock_devices_available, args)
for osd in osds:
assert osd.data[1] == data_allocate_fraction / osds_per_device
def test_get_physical_osds_abs_size(self, factory,
mock_devices_available,
conf_ceph_stub,
osds_per_device,
data_allocate_fraction):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
args = factory(data_slots=1, osds_per_device=osds_per_device,
osd_ids=[], dmcrypt=False,
data_allocate_fraction=data_allocate_fraction)
osds = batch.get_physical_osds(mock_devices_available, args)
for osd, dev in zip(osds, mock_devices_available):
assert osd.data[2] == int(dev.vg_size[0] * (data_allocate_fraction / osds_per_device))
def test_get_physical_osds_osd_ids(self, factory,
mock_devices_available,
osds_per_device):
pass
def test_get_physical_fast_allocs_length(self, factory,
conf_ceph_stub,
mock_devices_available):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
args = factory(block_db_slots=None, get_block_db_size=None)
fast = batch.get_physical_fast_allocs(mock_devices_available,
'block_db', 2, 2, args)
assert len(fast) == 2
def test_get_physical_fast_allocs_abs_size(self, factory,
conf_ceph_stub,
mock_devices_available):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
args = factory(block_db_slots=None, get_block_db_size=None)
fasts = batch.get_physical_fast_allocs(mock_devices_available,
'block_db', 2, 2, args)
for fast, dev in zip(fasts, mock_devices_available):
assert fast[2] == int(dev.vg_size[0] / 2)
def test_get_physical_fast_allocs_abs_size_unused_devs(self, factory,
conf_ceph_stub,
mock_devices_available):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
args = factory(block_db_slots=None, get_block_db_size=None)
dev_size = 21474836480
vg_size = dev_size
for dev in mock_devices_available:
dev.vg_name = None
dev.vg_size = [vg_size]
dev.vg_free = dev.vg_size
dev.vgs = []
slots_per_device = 2
fasts = batch.get_physical_fast_allocs(mock_devices_available,
'block_db', slots_per_device, 2, args)
expected_slot_size = int(dev_size / slots_per_device)
for (_, _, slot_size, _) in fasts:
assert slot_size == expected_slot_size
def test_get_physical_fast_allocs_abs_size_multi_pvs_per_vg(self, factory,
conf_ceph_stub,
mock_devices_available):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
args = factory(block_db_slots=None, get_block_db_size=None)
dev_size = 21474836480
num_devices = len(mock_devices_available)
vg_size = dev_size * num_devices
vg_name = 'vg_foo'
for dev in mock_devices_available:
dev.vg_name = vg_name
dev.vg_size = [vg_size]
dev.vg_free = dev.vg_size
dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)]
slots_per_device = 2
slots_per_vg = slots_per_device * num_devices
fasts = batch.get_physical_fast_allocs(mock_devices_available,
'block_db', slots_per_device, 2, args)
expected_slot_size = int(vg_size / slots_per_vg)
for (_, _, slot_size, _) in fasts:
assert slot_size == expected_slot_size
def test_batch_fast_allocations_one_block_db_length(self, factory, conf_ceph_stub,
mock_lv_device_generator):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
b = batch.Batch([])
db_lv_devices = [mock_lv_device_generator()]
fast = b.fast_allocations(db_lv_devices, 1, 0, 'block_db')
assert len(fast) == 1
@pytest.mark.parametrize('occupied_prior', range(7))
@pytest.mark.parametrize('slots,num_devs',
[l for sub in [list(zip([x]*x, range(1, x + 1))) for x in range(1,7)] for l in sub])
def test_get_physical_fast_allocs_length_existing(self,
num_devs,
slots,
occupied_prior,
factory,
conf_ceph_stub,
mock_device_generator):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
occupied_prior = min(occupied_prior, slots)
devs = [mock_device_generator() for _ in range(num_devs)]
already_assigned = 0
while already_assigned < occupied_prior:
dev_i = random.randint(0, num_devs - 1)
dev = devs[dev_i]
if len(dev.lvs) < occupied_prior:
dev.lvs.append('foo')
dev.path = '/dev/bar'
already_assigned = sum([len(d.lvs) for d in devs])
args = factory(block_db_slots=None, get_block_db_size=None)
expected_num_osds = max(len(devs) * slots - occupied_prior, 0)
fast = batch.get_physical_fast_allocs(devs,
'block_db', slots,
expected_num_osds, args)
assert len(fast) == expected_num_osds
expected_assignment_on_used_devices = sum([slots - len(d.lvs) for d in devs if len(d.lvs) > 0])
assert len([f for f in fast if f[0] == '/dev/bar']) == expected_assignment_on_used_devices
assert len([f for f in fast if f[0] != '/dev/bar']) == expected_num_osds - expected_assignment_on_used_devices
def test_get_lvm_osds_return_len(self, factory,
mock_lv_device_generator,
conf_ceph_stub,
osds_per_device):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
args = factory(data_slots=1, osds_per_device=osds_per_device,
osd_ids=[], dmcrypt=False)
mock_lvs = [mock_lv_device_generator()]
osds = batch.get_lvm_osds(mock_lvs, args)
assert len(osds) == 1
class TestBatchOsd(object):
def test_osd_class_ctor(self):
osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
assert osd.data == batch.Batch.OSD.VolSpec('/dev/data',
1,
'5G',
1,
'data')
def test_add_fast(self):
osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
osd.add_fast_device('/dev/db', 1, '5G', 1, 'block_db')
assert osd.fast == batch.Batch.OSD.VolSpec('/dev/db',
1,
'5G',
1,
'block_db')
def test_add_very_fast(self):
osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
osd.add_very_fast_device('/dev/wal', 1, '5G', 1)
assert osd.very_fast == batch.Batch.OSD.VolSpec('/dev/wal',
1,
'5G',
1,
'block_wal')
| 16,322 | 45.240793 | 122 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py
|
from ceph_volume.devices.lvm import common
class TestCommon(object):
def test_get_default_args_smoke(self):
default_args = common.get_default_args()
assert default_args
| 192 | 20.444444 | 48 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py
|
import pytest
from ceph_volume.devices import lvm
class TestCreate(object):
def test_main_spits_help_with_no_arguments(self, capsys):
lvm.create.Create([]).main()
stdout, stderr = capsys.readouterr()
assert 'Create an OSD by assigning an ID and FSID' in stdout
def test_main_shows_full_help(self, capsys):
with pytest.raises(SystemExit):
lvm.create.Create(argv=['--help']).main()
stdout, stderr = capsys.readouterr()
assert 'Use the bluestore objectstore' in stdout
assert 'A physical device or logical' in stdout
| 595 | 30.368421 | 68 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py
|
import pytest
from mock.mock import patch
from ceph_volume.api import lvm
from ceph_volume.devices.lvm import deactivate
class TestDeactivate(object):
@patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
def test_no_osd(self, p_get_lvs):
p_get_lvs.return_value = []
with pytest.raises(StopIteration):
deactivate.deactivate_osd(0)
@patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
@patch("ceph_volume.util.system.unmount_tmpfs")
def test_unmount_tmpfs_called_osd_id(self, p_u_tmpfs, p_get_lvs):
FooVolume = lvm.Volume(
lv_name='foo', lv_path='/dev/vg/foo',
lv_tags="ceph.osd_id=0,ceph.cluster_name=foo,ceph.type=data")
p_get_lvs.return_value = [FooVolume]
deactivate.deactivate_osd(0)
p_u_tmpfs.assert_called_with(
'/var/lib/ceph/osd/{}-{}'.format('foo', 0))
@patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
@patch("ceph_volume.util.system.unmount_tmpfs")
def test_unmount_tmpfs_called_osd_uuid(self, p_u_tmpfs, p_get_lvs):
FooVolume = lvm.Volume(
lv_name='foo', lv_path='/dev/vg/foo',
lv_tags="ceph.osd_fsid=0,ceph.osd_id=1,ceph.cluster_name=foo,ceph.type=data")
p_get_lvs.return_value = [FooVolume]
deactivate.deactivate_osd(None, 0)
p_u_tmpfs.assert_called_with(
'/var/lib/ceph/osd/{}-{}'.format('foo', 1))
@patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
@patch("ceph_volume.util.system.unmount_tmpfs")
@patch("ceph_volume.util.encryption.dmcrypt_close")
def test_no_crypt_no_dmclose(self, p_dm_close, p_u_tmpfs, p_get_lvs):
FooVolume = lvm.Volume(
lv_name='foo', lv_path='/dev/vg/foo',
lv_tags="ceph.osd_id=0,ceph.cluster_name=foo,ceph.type=data")
p_get_lvs.return_value = [FooVolume]
deactivate.deactivate_osd(0)
@patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
@patch("ceph_volume.util.system.unmount_tmpfs")
@patch("ceph_volume.util.encryption.dmcrypt_close")
def test_crypt_dmclose(self, p_dm_close, p_u_tmpfs, p_get_lvs):
FooVolume = lvm.Volume(
lv_name='foo', lv_path='/dev/vg/foo', lv_uuid='123',
lv_tags="ceph.osd_id=0,ceph.encrypted=1,ceph.cluster_name=foo,ceph.type=data")
p_get_lvs.return_value = [FooVolume]
deactivate.deactivate_osd(0)
p_dm_close.assert_called_with('123')
| 2,482 | 40.383333 | 90 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py
|
import pytest
from ceph_volume.devices import lvm
from ceph_volume.api import lvm as api
# TODO: add tests for following commands -
# ceph-volume list
# ceph-volume list <path-to-pv>
# ceph-volume list <path-to-vg>
# ceph-volume list <path-to-lv>
class TestReadableTag(object):
def test_dots_get_replaced(self):
result = lvm.listing.readable_tag('ceph.foo')
assert result == 'foo'
def test_underscores_are_replaced_with_spaces(self):
result = lvm.listing.readable_tag('ceph.long_tag')
assert result == 'long tag'
class TestPrettyReport(object):
def test_is_empty(self, capsys):
lvm.listing.pretty_report({})
stdout, stderr = capsys.readouterr()
assert stdout == '\n'
def test_type_and_path_are_reported(self, capsys):
lvm.listing.pretty_report({0: [
{'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda']}
]})
stdout, stderr = capsys.readouterr()
assert '[data] /dev/sda1' in stdout
def test_osd_id_header_is_reported(self, capsys):
lvm.listing.pretty_report({0: [
{'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda']}
]})
stdout, stderr = capsys.readouterr()
assert '====== osd.0 =======' in stdout
def test_tags_are_included(self, capsys):
lvm.listing.pretty_report(
{0: [{
'type': 'data',
'path': '/dev/sda1',
'tags': {'ceph.osd_id': '0'},
'devices': ['/dev/sda'],
}]}
)
stdout, stderr = capsys.readouterr()
assert 'osd id' in stdout
def test_devices_are_comma_separated(self, capsys):
lvm.listing.pretty_report({0: [
{'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda', '/dev/sdb1']}
]})
stdout, stderr = capsys.readouterr()
assert '/dev/sda,/dev/sdb1' in stdout
class TestList(object):
def test_empty_full_json_zero_exit_status(self, fake_call, is_root, factory, capsys):
args = factory(format='json', device=None)
lvm.listing.List([]).list(args)
stdout, stderr = capsys.readouterr()
assert stdout == '{}\n'
def test_empty_device_json_zero_exit_status(self, is_root,factory,capsys):
args = factory(format='json', device='/dev/sda1')
lvm.listing.List([]).list(args)
stdout, stderr = capsys.readouterr()
assert stdout == '{}\n'
def test_empty_full_zero_exit_status(self, fake_call, is_root, factory):
args = factory(format='pretty', device=None)
with pytest.raises(SystemExit):
lvm.listing.List([]).list(args)
def test_empty_device_zero_exit_status(self, is_root, factory):
args = factory(format='pretty', device='/dev/sda1')
with pytest.raises(SystemExit):
lvm.listing.List([]).list(args)
class TestFullReport(object):
def test_no_ceph_lvs(self, monkeypatch):
# ceph lvs are detected by looking into its tags
osd = api.Volume(lv_name='volume1', lv_path='/dev/VolGroup/lv',
lv_tags={})
volumes = []
volumes.append(osd)
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
volumes)
result = lvm.listing.List([]).full_report()
assert result == {}
def test_ceph_data_lv_reported(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000",
vg_name='VolGroup', lv_uuid="aaaa")
osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
volumes = []
volumes.append(osd)
monkeypatch.setattr(lvm.listing.api, 'get_single_pv', lambda **kwargs: pv)
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
volumes)
result = lvm.listing.List([]).full_report()
assert result['0'][0]['name'] == 'volume1'
def test_ceph_journal_lv_reported(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
journal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=journal'
pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000",
vg_name="VolGroup", lv_uuid="aaaa")
osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
journal = api.Volume(
lv_name='journal', lv_uuid='x', lv_tags=journal_tags,
lv_path='/dev/VolGroup/journal', vg_name='VolGroup')
volumes = []
volumes.append(osd)
volumes.append(journal)
monkeypatch.setattr(lvm.listing.api,'get_single_pv',lambda **kwargs:pv)
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
volumes)
result = lvm.listing.List([]).full_report()
assert result['0'][0]['name'] == 'volume1'
assert result['0'][1]['name'] == 'journal'
def test_ceph_wal_lv_reported(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=data'
wal_tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=wal'
osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
wal = api.Volume(lv_name='wal', lv_uuid='x', lv_tags=wal_tags,
lv_path='/dev/VolGroup/wal', vg_name='VolGroup')
volumes = []
volumes.append(osd)
volumes.append(wal)
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
volumes)
result = lvm.listing.List([]).full_report()
assert result['0'][0]['name'] == 'volume1'
assert result['0'][1]['name'] == 'wal'
@pytest.mark.parametrize('type_', ['journal', 'db', 'wal'])
def test_physical_2nd_device_gets_reported(self, type_, monkeypatch):
tags = ('ceph.osd_id=0,ceph.{t}_uuid=x,ceph.type=data,'
'ceph.{t}_device=/dev/sda1').format(t=type_)
osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
vg_name='VolGroup', lv_path='/dev/VolGroup/lv')
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
[osd])
result = lvm.listing.List([]).full_report()
assert result['0'][1]['path'] == '/dev/sda1'
assert result['0'][1]['tags'] == {'PARTUUID': 'x'}
assert result['0'][1]['type'] == type_
class TestSingleReport(object):
def test_not_a_ceph_lv(self, monkeypatch):
# ceph lvs are detected by looking into its tags
lv = api.Volume(lv_name='lv', lv_tags={}, lv_path='/dev/VolGroup/lv',
vg_name='VolGroup')
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
[lv])
result = lvm.listing.List([]).single_report('VolGroup/lv')
assert result == {}
def test_report_a_ceph_lv(self, monkeypatch):
# ceph lvs are detected by looking into its tags
tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa',
lv_path='/dev/VolGroup/lv', lv_tags=tags)
volumes = []
volumes.append(lv)
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
volumes)
result = lvm.listing.List([]).single_report('VolGroup/lv')
assert result['0'][0]['name'] == 'lv'
assert result['0'][0]['lv_tags'] == tags
assert result['0'][0]['path'] == '/dev/VolGroup/lv'
assert result['0'][0]['devices'] == []
def test_report_a_ceph_journal_device(self, monkeypatch):
# ceph lvs are detected by looking into its tags
tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,' + \
'ceph.journal_device=/dev/sda1'
lv = api.Volume(lv_name='lv', lv_uuid='aaa', lv_tags=tags,
lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
[lv] if 'tags' in kwargs else [])
result = lvm.listing.List([]).single_report('/dev/sda1')
assert result['0'][0]['tags'] == {'PARTUUID': 'x'}
assert result['0'][0]['type'] == 'journal'
assert result['0'][0]['path'] == '/dev/sda1'
def test_report_a_ceph_lv_with_devices(self, monkeypatch):
pvolumes = []
tags = 'ceph.osd_id=0,ceph.type=data'
pv1 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sda1',
pv_uuid='', pv_tags={}, lv_uuid="aaaa")
pv2 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sdb1',
pv_uuid='', pv_tags={}, lv_uuid="aaaa")
pvolumes.append(pv1)
pvolumes.append(pv2)
volumes = []
lv = api.Volume(lv_name='lv', vg_name='VolGroup',lv_uuid='aaaa',
lv_path='/dev/VolGroup/lv', lv_tags=tags)
volumes.append(lv)
monkeypatch.setattr(lvm.listing.api, 'get_pvs', lambda **kwargs:
pvolumes)
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
volumes)
listing = lvm.listing.List([])
listing._pvs = [
{'lv_uuid': 'aaaa', 'pv_name': '/dev/sda1', 'pv_tags': '', 'pv_uuid': ''},
{'lv_uuid': 'aaaa', 'pv_name': '/dev/sdb1', 'pv_tags': '', 'pv_uuid': ''},
]
result = listing.single_report('VolGroup/lv')
assert result['0'][0]['name'] == 'lv'
assert result['0'][0]['lv_tags'] == tags
assert result['0'][0]['path'] == '/dev/VolGroup/lv'
assert result['0'][0]['devices'] == ['/dev/sda1', '/dev/sdb1']
def test_report_by_osd_id_for_just_block_dev(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=block'
lvs = [ api.Volume(lv_name='lv1', lv_tags=tags, lv_path='/dev/vg/lv1',
lv_uuid='aaaa', vg_name='vg')
]
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: lvs)
listing = lvm.listing.List([])
result = listing.single_report(0)
assert result['0'][0]['name'] == 'lv1'
assert result['0'][0]['lv_tags'] == tags
assert result['0'][0]['lv_path'] == '/dev/vg/lv1'
assert result['0'][0]['vg_name'] == 'vg'
def test_report_by_osd_id_for_just_data_dev(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
lvs = [ api.Volume(lv_name='lv1', lv_tags=tags, lv_path='/dev/vg/lv1',
lv_uuid='bbbb', vg_name='vg'),
]
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: lvs)
listing = lvm.listing.List([])
result = listing.single_report(0)
assert result['0'][0]['name'] == 'lv1'
assert result['0'][0]['lv_tags'] == tags
assert result['0'][0]['lv_path'] == '/dev/vg/lv1'
assert result['0'][0]['vg_name'] == 'vg'
def test_report_by_osd_id_for_just_block_wal_and_db_dev(self, monkeypatch):
tags1 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=block'
tags2 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal'
tags3 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=db'
lvs = [ api.Volume(lv_name='lv1', lv_tags=tags1, lv_path='/dev/vg/lv1',
lv_uuid='aaaa', vg_name='vg'),
api.Volume(lv_name='lv2', lv_tags=tags2, lv_path='/dev/vg/lv2',
lv_uuid='bbbb', vg_name='vg'),
api.Volume(lv_name='lv3', lv_tags=tags3, lv_path='/dev/vg/lv3',
lv_uuid='cccc', vg_name='vg'),
]
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: lvs)
listing = lvm.listing.List([])
result = listing.single_report(0)
assert result['0'][0]['name'] == 'lv1'
assert result['0'][0]['lv_tags'] == tags1
assert result['0'][0]['lv_path'] == '/dev/vg/lv1'
assert result['0'][0]['vg_name'] == 'vg'
assert result['0'][1]['name'] == 'lv2'
assert result['0'][1]['lv_tags'] == tags2
assert result['0'][1]['lv_path'] == '/dev/vg/lv2'
assert result['0'][1]['vg_name'] == 'vg'
assert result['0'][2]['name'] == 'lv3'
assert result['0'][2]['lv_tags'] == tags3
assert result['0'][2]['lv_path'] == '/dev/vg/lv3'
assert result['0'][2]['vg_name'] == 'vg'
def test_report_by_osd_id_for_data_and_journal_dev(self, monkeypatch):
tags1 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
tags2 = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=journal'
lvs = [ api.Volume(lv_name='lv1', lv_tags=tags1, lv_path='/dev/vg/lv1',
lv_uuid='aaaa', vg_name='vg'),
api.Volume(lv_name='lv2', lv_tags=tags2, lv_path='/dev/vg/lv2',
lv_uuid='bbbb', vg_name='vg'),
]
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: lvs)
listing = lvm.listing.List([])
result = listing.single_report(0)
assert result['0'][0]['name'] == 'lv1'
assert result['0'][0]['lv_tags'] == tags1
assert result['0'][0]['lv_path'] == '/dev/vg/lv1'
assert result['0'][0]['vg_name'] == 'vg'
assert result['0'][1]['name'] == 'lv2'
assert result['0'][1]['lv_tags'] == tags2
assert result['0'][1]['lv_path'] == '/dev/vg/lv2'
assert result['0'][1]['vg_name'] == 'vg'
def test_report_by_nonexistent_osd_id(self, monkeypatch):
lv = api.Volume(lv_name='lv', lv_tags={}, lv_path='/dev/VolGroup/lv',
vg_name='VolGroup')
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
[lv])
result = lvm.listing.List([]).single_report('1')
assert result == {}
def test_report_a_ceph_lv_with_no_matching_devices(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.type=data'
lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa',
lv_path='/dev/VolGroup/lv', lv_tags=tags)
volumes = []
volumes.append(lv)
monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
volumes)
listing = lvm.listing.List([])
listing._pvs = [
{'lv_uuid': 'ffff', 'pv_name': '/dev/sda1', 'pv_tags': '',
'pv_uuid': ''},
{'lv_uuid': 'ffff', 'pv_name': '/dev/sdb1', 'pv_tags': '',
'pv_uuid': ''}]
result = listing.single_report('VolGroup/lv')
assert result['0'][0]['name'] == 'lv'
assert result['0'][0]['lv_tags'] == tags
assert result['0'][0]['path'] == '/dev/VolGroup/lv'
assert result['0'][0]['devices'] == []
| 15,261 | 42.235127 | 89 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py
|
import pytest
from mock.mock import patch
from ceph_volume import process
from ceph_volume.api import lvm as api
from ceph_volume.devices.lvm import migrate
from ceph_volume.util.device import Device
from ceph_volume.util import system
class TestGetClusterName(object):
mock_volumes = []
def mock_get_lvs(self, *args, **kwargs):
return self.mock_volumes.pop(0)
def test_cluster_found(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234,ceph.cluster_name=name_of_the_cluster'
vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
lv_path='/dev/VolGroup/lv1', lv_tags=tags)
self.mock_volumes = []
self.mock_volumes.append([vol])
monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
result = migrate.get_cluster_name(osd_id='0', osd_fsid='1234')
assert "name_of_the_cluster" == result
def test_cluster_not_found(self, monkeypatch, capsys):
self.mock_volumes = []
self.mock_volumes.append([])
monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
with pytest.raises(SystemExit) as error:
migrate.get_cluster_name(osd_id='0', osd_fsid='1234')
stdout, stderr = capsys.readouterr()
expected = 'Unexpected error, terminating'
assert expected in str(error.value)
expected = 'Unable to find any LV for source OSD: id:0 fsid:1234'
assert expected in stderr
class TestFindAssociatedDevices(object):
mock_volumes = []
def mock_get_lvs(self, *args, **kwargs):
return self.mock_volumes.pop(0)
mock_single_volumes = {}
def mock_get_single_lv(self, *args, **kwargs):
p = kwargs['filters']['lv_path']
return self.mock_single_volumes[p]
def test_lv_is_matched_id(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
lv_path='/dev/VolGroup/lv1', lv_tags=tags)
self.mock_volumes = []
self.mock_volumes.append([vol])
self.mock_volumes.append([vol])
self.mock_volumes.append([])
self.mock_volumes.append([])
self.mock_single_volumes = {'/dev/VolGroup/lv1': vol}
monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234')
assert len(result) == 1
assert result[0][0].path == '/dev/VolGroup/lv1'
assert result[0][0].lvs == [vol]
assert result[0][1] == 'block'
def test_lv_is_matched_id2(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv1', lv_tags=tags)
tags2 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=wal,ceph.osd_fsid=1234'
vol2 = api.Volume(lv_name='volume2', lv_uuid='z', vg_name='vg',
lv_path='/dev/VolGroup/lv2', lv_tags=tags2)
self.mock_volumes = []
self.mock_volumes.append([vol])
self.mock_volumes.append([vol])
self.mock_volumes.append([])
self.mock_volumes.append([vol2])
self.mock_single_volumes = {'/dev/VolGroup/lv1': vol, '/dev/VolGroup/lv2': vol2}
monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234')
assert len(result) == 2
for d in result:
if d[1] == 'block':
assert d[0].path == '/dev/VolGroup/lv1'
assert d[0].lvs == [vol]
elif d[1] == 'wal':
assert d[0].path == '/dev/VolGroup/lv2'
assert d[0].lvs == [vol2]
else:
assert False
def test_lv_is_matched_id3(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv1', lv_tags=tags)
tags2 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=wal,ceph.osd_fsid=1234'
vol2 = api.Volume(lv_name='volume2', lv_uuid='z', vg_name='vg',
lv_path='/dev/VolGroup/lv2', lv_tags=tags2)
tags3 = 'ceph.osd_id=0,ceph.journal_uuid=xx,ceph.type=db,ceph.osd_fsid=1234'
vol3 = api.Volume(lv_name='volume3', lv_uuid='z', vg_name='vg',
lv_path='/dev/VolGroup/lv3', lv_tags=tags3)
self.mock_volumes = []
self.mock_volumes.append([vol])
self.mock_volumes.append([vol])
self.mock_volumes.append([vol3])
self.mock_volumes.append([vol2])
self.mock_single_volumes = {'/dev/VolGroup/lv1': vol,
'/dev/VolGroup/lv2': vol2,
'/dev/VolGroup/lv3': vol3}
monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
result = migrate.find_associated_devices(osd_id='0', osd_fsid='1234')
assert len(result) == 3
for d in result:
if d[1] == 'block':
assert d[0].path == '/dev/VolGroup/lv1'
assert d[0].lvs == [vol]
elif d[1] == 'wal':
assert d[0].path == '/dev/VolGroup/lv2'
assert d[0].lvs == [vol2]
elif d[1] == 'db':
assert d[0].path == '/dev/VolGroup/lv3'
assert d[0].lvs == [vol3]
else:
assert False
def test_lv_is_not_matched(self, monkeypatch, capsys):
self.mock_volumes = [None]
monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
with pytest.raises(SystemExit) as error:
migrate.find_associated_devices(osd_id='1', osd_fsid='1234')
stdout, stderr = capsys.readouterr()
expected = 'Unexpected error, terminating'
assert expected in str(error.value)
expected = 'Unable to find any LV for source OSD: id:1 fsid:1234'
assert expected in stderr
class TestVolumeTagTracker(object):
mock_single_volumes = {}
def mock_get_single_lv(self, *args, **kwargs):
p = kwargs['filters']['lv_path']
return self.mock_single_volumes[p]
mock_process_input = []
def mock_process(self, *args, **kwargs):
self.mock_process_input.append(args[0]);
return ('', '', 0)
def test_init(self, monkeypatch):
source_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
source_db_tags = 'ceph.osd_id=0,journal_uuid=x,ceph.type=db, osd_fsid=1234'
source_wal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal'
target_tags="ceph.a=1,ceph.b=2,c=3,ceph.d=4" # 'c' to be bypassed
devices=[]
data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags)
self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol}
monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
data_device = Device(path = '/dev/VolGroup/lv1')
db_device = Device(path = '/dev/VolGroup/lv2')
wal_device = Device(path = '/dev/VolGroup/lv3')
devices.append([data_device, 'block'])
devices.append([db_device, 'db'])
devices.append([wal_device, 'wal'])
target = api.Volume(lv_name='target_name', lv_tags=target_tags,
lv_path='/dev/VolGroup/lv_target')
t = migrate.VolumeTagTracker(devices, target);
assert 3 == len(t.old_target_tags)
assert data_device == t.data_device
assert 4 == len(t.old_data_tags)
assert 'data' == t.old_data_tags['ceph.type']
assert db_device == t.db_device
assert 2 == len(t.old_db_tags)
assert 'db' == t.old_db_tags['ceph.type']
assert wal_device == t.wal_device
assert 3 == len(t.old_wal_tags)
assert 'wal' == t.old_wal_tags['ceph.type']
def test_update_tags_when_lv_create(self, monkeypatch):
source_tags = \
'ceph.osd_id=0,ceph.journal_uuid=x,' \
'ceph.type=data,ceph.osd_fsid=1234'
source_db_tags = \
'ceph.osd_id=0,journal_uuid=x,ceph.type=db,' \
'osd_fsid=1234'
devices=[]
data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol}
monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
data_device = Device(path = '/dev/VolGroup/lv1')
db_device = Device(path = '/dev/VolGroup/lv2')
devices.append([data_device, 'block'])
devices.append([db_device, 'db'])
target = api.Volume(lv_name='target_name', lv_tags='',
lv_uuid='wal_uuid',
lv_path='/dev/VolGroup/lv_target')
t = migrate.VolumeTagTracker(devices, target);
self.mock_process_input = []
t.update_tags_when_lv_create('wal')
assert 3 == len(self.mock_process_input)
assert ['lvchange',
'--addtag', 'ceph.wal_uuid=wal_uuid',
'--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target',
'/dev/VolGroup/lv1'] == self.mock_process_input[0]
assert self.mock_process_input[1].sort() == [
'lvchange',
'--addtag', 'ceph.osd_id=0',
'--addtag', 'ceph.journal_uuid=x',
'--addtag', 'ceph.type=wal',
'--addtag', 'ceph.osd_fsid=1234',
'--addtag', 'ceph.wal_uuid=wal_uuid',
'--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target',
'/dev/VolGroup/lv_target'].sort()
assert ['lvchange',
'--addtag', 'ceph.wal_uuid=wal_uuid',
'--addtag', 'ceph.wal_device=/dev/VolGroup/lv_target',
'/dev/VolGroup/lv2'] == self.mock_process_input[2]
def test_remove_lvs(self, monkeypatch):
source_tags = \
'ceph.osd_id=0,ceph.journal_uuid=x,' \
'ceph.type=data,ceph.osd_fsid=1234,ceph.wal_uuid=aaaaa'
source_db_tags = \
'ceph.osd_id=0,journal_uuid=x,ceph.type=db,' \
'osd_fsid=1234,ceph.wal_device=aaaaa'
source_wal_tags = \
'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
'ceph.osd_id=0,ceph.type=wal'
devices=[]
data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags)
self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol}
monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
data_device = Device(path = '/dev/VolGroup/lv1')
db_device = Device(path = '/dev/VolGroup/lv2')
wal_device = Device(path = '/dev/VolGroup/lv3')
devices.append([data_device, 'block'])
devices.append([db_device, 'db'])
devices.append([wal_device, 'wal'])
target = api.Volume(lv_name='target_name', lv_tags='',
lv_path='/dev/VolGroup/lv_target')
t = migrate.VolumeTagTracker(devices, target);
device_to_remove = devices.copy()
self.mock_process_input = []
t.remove_lvs(device_to_remove, 'db')
assert 3 == len(self.mock_process_input)
assert ['lvchange',
'--deltag', 'ceph.wal_uuid=uuid',
'--deltag', 'ceph.wal_device=device',
'--deltag', 'ceph.osd_id=0',
'--deltag', 'ceph.type=wal',
'/dev/VolGroup/lv3'] == self.mock_process_input[0]
assert ['lvchange',
'--deltag', 'ceph.wal_uuid=aaaaa',
'/dev/VolGroup/lv1'] == self.mock_process_input[1]
assert ['lvchange',
'--deltag', 'ceph.wal_device=aaaaa',
'/dev/VolGroup/lv2'] == self.mock_process_input[2]
def test_replace_lvs(self, monkeypatch):
source_tags = \
'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
source_db_tags = \
'ceph.osd_id=0,ceph.type=db,ceph.osd_fsid=1234'
source_wal_tags = \
'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
'ceph.osd_id=0,ceph.type=wal'
devices=[]
data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg',
lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2', lv_uuid='dbuuid', vg_name='vg',
lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3', lv_uuid='waluuid', vg_name='vg',
lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags)
self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol}
monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
data_device = Device(path = '/dev/VolGroup/lv1')
db_device = Device(path = '/dev/VolGroup/lv2')
wal_device = Device(path = '/dev/VolGroup/lv3')
devices.append([data_device, 'block'])
devices.append([db_device, 'db'])
devices.append([wal_device, 'wal'])
target = api.Volume(lv_name='target_name',
lv_uuid='ttt',
lv_tags='ceph.tag_to_remove=aaa',
lv_path='/dev/VolGroup/lv_target')
t = migrate.VolumeTagTracker(devices, target);
self.mock_process_input = []
t.replace_lvs(devices, 'db')
assert 5 == len(self.mock_process_input)
assert ['lvchange',
'--deltag', 'ceph.osd_id=0',
'--deltag', 'ceph.type=db',
'--deltag', 'ceph.osd_fsid=1234',
'/dev/VolGroup/lv2'] == self.mock_process_input[0]
assert ['lvchange',
'--deltag', 'ceph.wal_uuid=uuid',
'--deltag', 'ceph.wal_device=device',
'--deltag', 'ceph.osd_id=0',
'--deltag', 'ceph.type=wal',
'/dev/VolGroup/lv3'] == self.mock_process_input[1]
assert ['lvchange',
'--deltag', 'ceph.db_device=/dbdevice',
'--deltag', 'ceph.wal_uuid=wal_uuid',
'/dev/VolGroup/lv1'] == self.mock_process_input[2]
assert ['lvchange',
'--addtag', 'ceph.db_uuid=ttt',
'--addtag', 'ceph.db_device=/dev/VolGroup/lv_target',
'/dev/VolGroup/lv1'] == self.mock_process_input[3]
assert self.mock_process_input[4].sort() == [
'lvchange',
'--addtag', 'ceph.osd_id=0',
'--addtag', 'ceph.osd_fsid=1234',
'--addtag', 'ceph.type=db',
'--addtag', 'ceph.db_uuid=ttt',
'--addtag', 'ceph.db_device=/dev/VolGroup/lv_target',
'/dev/VolGroup/lv_target'].sort()
def test_undo(self, monkeypatch):
source_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,ceph.osd_fsid=1234'
source_db_tags = 'ceph.osd_id=0,journal_uuid=x,ceph.type=db, osd_fsid=1234'
source_wal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=wal'
target_tags=""
devices=[]
data_vol = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv2', lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/lv3', lv_tags=source_wal_tags)
self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol}
monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
data_device = Device(path = '/dev/VolGroup/lv1')
db_device = Device(path = '/dev/VolGroup/lv2')
wal_device = Device(path = '/dev/VolGroup/lv3')
devices.append([data_device, 'block'])
devices.append([db_device, 'db'])
devices.append([wal_device, 'wal'])
target = api.Volume(lv_name='target_name', lv_tags=target_tags,
lv_path='/dev/VolGroup/lv_target')
t = migrate.VolumeTagTracker(devices, target);
target.tags['ceph.a'] = 'aa';
target.tags['ceph.b'] = 'bb';
data_vol.tags['ceph.journal_uuid'] = 'z';
db_vol.tags.pop('ceph.type')
wal_vol.tags.clear()
assert 2 == len(target.tags)
assert 4 == len(data_vol.tags)
assert 1 == len(db_vol.tags)
self.mock_process_input = []
t.undo()
assert 0 == len(target.tags)
assert 4 == len(data_vol.tags)
assert 'x' == data_vol.tags['ceph.journal_uuid']
assert 2 == len(db_vol.tags)
assert 'db' == db_vol.tags['ceph.type']
assert 3 == len(wal_vol.tags)
assert 'wal' == wal_vol.tags['ceph.type']
assert 6 == len(self.mock_process_input)
assert 'lvchange' in self.mock_process_input[0]
assert '--deltag' in self.mock_process_input[0]
assert 'ceph.journal_uuid=z' in self.mock_process_input[0]
assert '/dev/VolGroup/lv1' in self.mock_process_input[0]
assert 'lvchange' in self.mock_process_input[1]
assert '--addtag' in self.mock_process_input[1]
assert 'ceph.journal_uuid=x' in self.mock_process_input[1]
assert '/dev/VolGroup/lv1' in self.mock_process_input[1]
assert 'lvchange' in self.mock_process_input[2]
assert '--deltag' in self.mock_process_input[2]
assert 'ceph.osd_id=0' in self.mock_process_input[2]
assert '/dev/VolGroup/lv2' in self.mock_process_input[2]
assert 'lvchange' in self.mock_process_input[3]
assert '--addtag' in self.mock_process_input[3]
assert 'ceph.type=db' in self.mock_process_input[3]
assert '/dev/VolGroup/lv2' in self.mock_process_input[3]
assert 'lvchange' in self.mock_process_input[4]
assert '--addtag' in self.mock_process_input[4]
assert 'ceph.type=wal' in self.mock_process_input[4]
assert '/dev/VolGroup/lv3' in self.mock_process_input[4]
assert 'lvchange' in self.mock_process_input[5]
assert '--deltag' in self.mock_process_input[5]
assert 'ceph.a=aa' in self.mock_process_input[5]
assert 'ceph.b=bb' in self.mock_process_input[5]
assert '/dev/VolGroup/lv_target' in self.mock_process_input[5]
class TestNew(object):
mock_volume = None
def mock_get_lv_by_fullname(self, *args, **kwargs):
return self.mock_volume
mock_process_input = []
def mock_process(self, *args, **kwargs):
self.mock_process_input.append(args[0]);
return ('', '', 0)
mock_single_volumes = {}
def mock_get_single_lv(self, *args, **kwargs):
p = kwargs['filters']['lv_path']
return self.mock_single_volumes[p]
mock_volumes = []
def mock_get_lvs(self, *args, **kwargs):
return self.mock_volumes.pop(0)
def test_newdb_non_root(self):
with pytest.raises(Exception) as error:
migrate.NewDB(argv=[
'--osd-id', '1',
'--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
'--target', 'vgname/new_db']).main()
expected = 'This command needs to be executed with sudo or as root'
assert expected in str(error.value)
@patch('os.getuid')
def test_newdb_not_target_lvm(self, m_getuid, capsys):
m_getuid.return_value = 0
with pytest.raises(SystemExit) as error:
migrate.NewDB(argv=[
'--osd-id', '1',
'--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
'--target', 'vgname/new_db']).main()
stdout, stderr = capsys.readouterr()
expected = 'Unable to attach new volume : vgname/new_db'
assert expected in str(error.value)
expected = 'Target path vgname/new_db is not a Logical Volume'
assert expected in stderr
@patch('os.getuid')
def test_newdb_already_in_use(self, m_getuid, monkeypatch, capsys):
m_getuid.return_value = 0
self.mock_volume = api.Volume(lv_name='volume1',
lv_uuid='y',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags='ceph.osd_id=5') # this results in set used_by_ceph
monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname)
with pytest.raises(SystemExit) as error:
migrate.NewDB(argv=[
'--osd-id', '1',
'--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
'--target', 'vgname/new_db']).main()
stdout, stderr = capsys.readouterr()
expected = 'Unable to attach new volume : vgname/new_db'
assert expected in str(error.value)
expected = 'Target Logical Volume is already used by ceph: vgname/new_db'
assert expected in stderr
@patch('os.getuid')
def test_newdb(self, m_getuid, monkeypatch, capsys):
m_getuid.return_value = 0
source_tags = \
'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
source_wal_tags = \
'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
'ceph.osd_id=0,ceph.type=wal'
data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='waluuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv3': wal_vol}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y',
vg_name='vg',
lv_path='/dev/VolGroup/target_volume',
lv_tags='')
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
lambda id: False)
#find_associated_devices will call get_lvs() 4 times
# and it this needs results to be arranged that way
self.mock_volumes = []
self.mock_volumes.append([data_vol, wal_vol])
self.mock_volumes.append([data_vol])
self.mock_volumes.append([])
self.mock_volumes.append([wal_vol])
monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph_cluster')
monkeypatch.setattr(system, 'chown', lambda path: 0)
migrate.NewDB(argv=[
'--osd-id', '1',
'--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
'--target', 'vgname/new_db']).main()
n = len(self.mock_process_input)
assert n >= 5
assert self.mock_process_input[n - 5] == [
'lvchange',
'--deltag', 'ceph.db_device=/dbdevice',
'/dev/VolGroup/lv1']
assert self.mock_process_input[n - 4] == [
'lvchange',
'--addtag', 'ceph.db_uuid=y',
'--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
'/dev/VolGroup/lv1']
assert self.mock_process_input[n - 3].sort() == [
'lvchange',
'--addtag', 'ceph.wal_uuid=uuid',
'--addtag', 'ceph.osd_id=0',
'--addtag', 'ceph.type=db',
'--addtag', 'ceph.osd_fsid=1234',
'--addtag', 'ceph.db_uuid=y',
'--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
'/dev/VolGroup/target_volume'].sort()
assert self.mock_process_input[n - 2] == [
'lvchange',
'--addtag', 'ceph.db_uuid=y',
'--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
'/dev/VolGroup/lv3']
assert self.mock_process_input[n - 1] == [
'ceph-bluestore-tool',
'--path', '/var/lib/ceph/osd/ceph_cluster-1',
'--dev-target', '/dev/VolGroup/target_volume',
'--command', 'bluefs-bdev-new-db']
def test_newdb_active_systemd(self, is_root, monkeypatch, capsys):
source_tags = \
'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
source_wal_tags = \
'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
'ceph.osd_id=0,ceph.type=wal'
data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='waluuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv3': wal_vol}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y',
vg_name='vg',
lv_path='/dev/VolGroup/target_volume',
lv_tags='')
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
lambda id: True)
#find_associated_devices will call get_lvs() 4 times
# and it this needs results to be arranged that way
self.mock_volumes = []
self.mock_volumes.append([data_vol, wal_vol])
self.mock_volumes.append([data_vol])
self.mock_volumes.append([])
self.mock_volumes.append([wal_vol])
monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph_cluster')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.NewDB(argv=[
'--osd-id', '1',
'--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
'--target', 'vgname/new_db'])
with pytest.raises(SystemExit) as error:
m.main()
stdout, stderr = capsys.readouterr()
assert 'Unable to attach new volume for OSD: 1' == str(error.value)
assert '--> OSD ID is running, stop it with: systemctl stop ceph-osd@1' == stderr.rstrip()
assert not stdout
def test_newdb_no_systemd(self, is_root, monkeypatch):
source_tags = \
'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
source_wal_tags = \
'ceph.wal_uuid=uuid,ceph.wal_device=device,' \
'ceph.osd_id=0,ceph.type=wal'
data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='waluuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv3': wal_vol}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y',
vg_name='vg',
lv_path='/dev/VolGroup/target_volume',
lv_tags='')
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
#find_associated_devices will call get_lvs() 4 times
# and it this needs results to be arranged that way
self.mock_volumes = []
self.mock_volumes.append([data_vol, wal_vol])
self.mock_volumes.append([data_vol])
self.mock_volumes.append([])
self.mock_volumes.append([wal_vol])
monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph_cluster')
monkeypatch.setattr(system, 'chown', lambda path: 0)
migrate.NewDB(argv=[
'--osd-id', '1',
'--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
'--target', 'vgname/new_db',
'--no-systemd']).main()
n = len(self.mock_process_input)
assert n >= 5
assert self.mock_process_input[n - 5] == [
'lvchange',
'--deltag', 'ceph.db_device=/dbdevice',
'/dev/VolGroup/lv1']
assert self.mock_process_input[n - 4] == [
'lvchange',
'--addtag', 'ceph.db_uuid=y',
'--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
'/dev/VolGroup/lv1']
assert self.mock_process_input[n - 3].sort() == [
'lvchange',
'--addtag', 'ceph.wal_uuid=uuid',
'--addtag', 'ceph.osd_id=0',
'--addtag', 'ceph.type=db',
'--addtag', 'ceph.osd_fsid=1234',
'--addtag', 'ceph.db_uuid=y',
'--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
'/dev/VolGroup/target_volume'].sort()
assert self.mock_process_input[n - 2] == [
'lvchange',
'--addtag', 'ceph.db_uuid=y',
'--addtag', 'ceph.db_device=/dev/VolGroup/target_volume',
'/dev/VolGroup/lv3']
assert self.mock_process_input[n - 1] == [
'ceph-bluestore-tool',
'--path', '/var/lib/ceph/osd/ceph_cluster-1',
'--dev-target', '/dev/VolGroup/target_volume',
'--command', 'bluefs-bdev-new-db']
@patch('os.getuid')
def test_newwal(self, m_getuid, monkeypatch, capsys):
m_getuid.return_value = 0
source_tags = \
'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234'
data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg',
lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol}
monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/target_volume',
lv_tags='')
monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", lambda id: False)
#find_associated_devices will call get_lvs() 4 times
# and it this needs results to be arranged that way
self.mock_volumes = []
self.mock_volumes.append([data_vol])
self.mock_volumes.append([data_vol])
self.mock_volumes.append([])
self.mock_volumes.append([])
monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
monkeypatch.setattr(migrate, 'get_cluster_name', lambda osd_id, osd_fsid: 'cluster')
monkeypatch.setattr(system, 'chown', lambda path: 0)
migrate.NewWAL(argv=[
'--osd-id', '2',
'--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
'--target', 'vgname/new_wal']).main()
n = len(self.mock_process_input)
assert n >= 3
assert self.mock_process_input[n - 3] == [
'lvchange',
'--addtag', 'ceph.wal_uuid=y',
'--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume',
'/dev/VolGroup/lv1']
assert self.mock_process_input[n - 2].sort() == [
'lvchange',
'--addtag', 'ceph.osd_id=0',
'--addtag', 'ceph.type=wal',
'--addtag', 'ceph.osd_fsid=1234',
'--addtag', 'ceph.wal_uuid=y',
'--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume',
'/dev/VolGroup/target_volume'].sort()
assert self.mock_process_input[n - 1] == [
'ceph-bluestore-tool',
'--path', '/var/lib/ceph/osd/cluster-2',
'--dev-target', '/dev/VolGroup/target_volume',
'--command', 'bluefs-bdev-new-wal']
def test_newwal_active_systemd(self, is_root, monkeypatch, capsys):
source_tags = \
'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234'
data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg',
lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol}
monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/target_volume',
lv_tags='')
monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active", lambda id: True)
#find_associated_devices will call get_lvs() 4 times
# and it this needs results to be arranged that way
self.mock_volumes = []
self.mock_volumes.append([data_vol])
self.mock_volumes.append([data_vol])
self.mock_volumes.append([])
self.mock_volumes.append([])
monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
monkeypatch.setattr(migrate, 'get_cluster_name', lambda osd_id, osd_fsid: 'cluster')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.NewWAL(argv=[
'--osd-id', '2',
'--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
'--target', 'vgname/new_wal'])
with pytest.raises(SystemExit) as error:
m.main()
stdout, stderr = capsys.readouterr()
assert 'Unable to attach new volume for OSD: 2' == str(error.value)
assert '--> OSD ID is running, stop it with: systemctl stop ceph-osd@2' == stderr.rstrip()
assert not stdout
def test_newwal_no_systemd(self, is_root, monkeypatch):
source_tags = \
'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234'
data_vol = api.Volume(lv_name='volume1', lv_uuid='datauuid', vg_name='vg',
lv_path='/dev/VolGroup/lv1', lv_tags=source_tags)
self.mock_single_volumes = {'/dev/VolGroup/lv1': data_vol}
monkeypatch.setattr(migrate.api, 'get_single_lv', self.mock_get_single_lv)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
self.mock_volume = api.Volume(lv_name='target_volume1', lv_uuid='y', vg_name='vg',
lv_path='/dev/VolGroup/target_volume',
lv_tags='')
monkeypatch.setattr(api, 'get_lv_by_fullname', self.mock_get_lv_by_fullname)
#find_associated_devices will call get_lvs() 4 times
# and it this needs results to be arranged that way
self.mock_volumes = []
self.mock_volumes.append([data_vol])
self.mock_volumes.append([data_vol])
self.mock_volumes.append([])
self.mock_volumes.append([])
monkeypatch.setattr(migrate.api, 'get_lvs', self.mock_get_lvs)
monkeypatch.setattr(migrate, 'get_cluster_name', lambda osd_id, osd_fsid: 'cluster')
monkeypatch.setattr(system, 'chown', lambda path: 0)
migrate.NewWAL(argv=[
'--osd-id', '2',
'--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
'--target', 'vgname/new_wal',
'--no-systemd']).main()
n = len(self.mock_process_input)
assert n >= 3
assert self.mock_process_input[n - 3] == [
'lvchange',
'--addtag', 'ceph.wal_uuid=y',
'--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume',
'/dev/VolGroup/lv1']
assert self.mock_process_input[n - 2].sort() == [
'lvchange',
'--addtag', 'ceph.osd_id=0',
'--addtag', 'ceph.type=wal',
'--addtag', 'ceph.osd_fsid=1234',
'--addtag', 'ceph.wal_uuid=y',
'--addtag', 'ceph.wal_device=/dev/VolGroup/target_volume',
'/dev/VolGroup/target_volume'].sort()
assert self.mock_process_input[n - 1] == [
'ceph-bluestore-tool',
'--path', '/var/lib/ceph/osd/cluster-2',
'--dev-target', '/dev/VolGroup/target_volume',
'--command', 'bluefs-bdev-new-wal']
class TestMigrate(object):
def test_invalid_osd_id_passed(self, is_root):
with pytest.raises(SystemExit):
migrate.Migrate(argv=['--osd-fsid', '123', '--from', 'data', '--target', 'foo', '--osd-id', 'foo']).main()
mock_volume = None
def mock_get_lv_by_fullname(self, *args, **kwargs):
return self.mock_volume
mock_process_input = []
def mock_process(self, *args, **kwargs):
self.mock_process_input.append(args[0])
return ('', '', 0)
mock_single_volumes = {}
def mock_get_single_lv(self, *args, **kwargs):
p = kwargs['filters']['lv_path']
return self.mock_single_volumes[p]
mock_volumes = []
def mock_get_lvs(self, *args, **kwargs):
return self.mock_volumes.pop(0)
def test_get_source_devices(self, monkeypatch):
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234'
source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = api.Volume(lv_name='volume2', lv_uuid='y',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags='ceph.osd_id=5,ceph.osd_type=db')
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
argv = [
'--osd-id', '2',
'--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
'--from', 'data', 'wal',
'--target', 'vgname/new_wal'
]
m = migrate.Migrate(argv=argv)
m.args = m.make_parser('ceph-volume lvm migation', 'help').parse_args(argv)
res_devices = m.get_source_devices(devices)
assert 2 == len(res_devices)
assert devices[0] == res_devices[0]
assert devices[2] == res_devices[1]
argv = [
'--osd-id', '2',
'--osd-fsid', '55BD4219-16A7-4037-BC20-0F158EFCC83D',
'--from', 'db', 'wal', 'data',
'--target', 'vgname/new_wal'
]
m = migrate.Migrate(argv=argv)
m.args = m.make_parser('ceph-volume lvm migation', 'help').parse_args(argv)
res_devices = m.get_source_devices(devices)
assert 3 == len(res_devices)
assert devices[0] == res_devices[0]
assert devices[1] == res_devices[1]
assert devices[2] == res_devices[2]
def test_migrate_without_args(self, capsys):
help_msg = """
Moves BlueFS data from source volume(s) to the target one, source
volumes (except the main (i.e. data or block) one) are removed on
success. LVM volumes are permitted for Target only, both already
attached or new logical one. In the latter case it is attached to OSD
replacing one of the source devices. Following replacement rules apply
(in the order of precedence, stop on the first match):
* if source list has DB volume - target device replaces it.
* if source list has WAL volume - target device replace it.
* if source list has slow volume only - operation is not permitted,
requires explicit allocation via new-db/new-wal command.
Example calls for supported scenarios:
Moves BlueFS data from main device to LV already attached as DB:
ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/db
Moves BlueFS data from shared main device to LV which will be attached
as a new DB:
ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data --target vgname/new_db
Moves BlueFS data from DB device to new LV, DB is replaced:
ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db --target vgname/new_db
Moves BlueFS data from main and DB devices to new LV, DB is replaced:
ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db --target vgname/new_db
Moves BlueFS data from main, DB and WAL devices to new LV, WAL is
removed and DB is replaced:
ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from data db wal --target vgname/new_db
Moves BlueFS data from main, DB and WAL devices to main device, WAL
and DB are removed:
ceph-volume lvm migrate --osd-id 1 --osd-fsid <uuid> --from db wal --target vgname/data
"""
m = migrate.Migrate(argv=[])
m.main()
stdout, stderr = capsys.readouterr()
assert help_msg in stdout
assert not stderr
@patch('os.getuid')
def test_migrate_data_db_to_new_db(self, m_getuid, monkeypatch):
m_getuid.return_value = 0
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2_new',
lv_tags='')
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
lambda id: False)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'data', 'db', 'wal',
'--target', 'vgname/new_wal'])
m.main()
n = len(self.mock_process_input)
assert n >= 5
assert self. mock_process_input[n-5] == [
'lvchange',
'--deltag', 'ceph.osd_id=2',
'--deltag', 'ceph.type=db',
'--deltag', 'ceph.osd_fsid=1234',
'--deltag', 'ceph.cluster_name=ceph',
'--deltag', 'ceph.db_uuid=dbuuid',
'--deltag', 'ceph.db_device=db_dev',
'/dev/VolGroup/lv2']
assert self. mock_process_input[n-4] == [
'lvchange',
'--deltag', 'ceph.db_uuid=dbuuid',
'--deltag', 'ceph.db_device=db_dev',
'/dev/VolGroup/lv1']
assert self. mock_process_input[n-3] == [
'lvchange',
'--addtag', 'ceph.db_uuid=new-db-uuid',
'--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
'/dev/VolGroup/lv1']
assert self. mock_process_input[n-2] == [
'lvchange',
'--addtag', 'ceph.osd_id=2',
'--addtag', 'ceph.type=db',
'--addtag', 'ceph.osd_fsid=1234',
'--addtag', 'ceph.cluster_name=ceph',
'--addtag', 'ceph.db_uuid=new-db-uuid',
'--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
'/dev/VolGroup/lv2_new']
assert self. mock_process_input[n-1] == [
'ceph-bluestore-tool',
'--path', '/var/lib/ceph/osd/ceph-2',
'--dev-target', '/dev/VolGroup/lv2_new',
'--command', 'bluefs-bdev-migrate',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block.db']
def test_migrate_data_db_to_new_db_active_systemd(self, is_root, monkeypatch, capsys):
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2_new',
lv_tags='')
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
lambda id: True)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'data', 'db', 'wal',
'--target', 'vgname/new_wal'])
with pytest.raises(SystemExit) as error:
m.main()
stdout, stderr = capsys.readouterr()
assert 'Unable to migrate devices associated with OSD ID: 2' == str(error.value)
assert '--> OSD is running, stop it with: systemctl stop ceph-osd@2' == stderr.rstrip()
assert not stdout
def test_migrate_data_db_to_new_db_no_systemd(self, is_root, monkeypatch):
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2_new',
lv_tags='')
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'data', 'db', 'wal',
'--target', 'vgname/new_wal',
'--no-systemd'])
m.main()
n = len(self.mock_process_input)
assert n >= 5
assert self. mock_process_input[n-5] == [
'lvchange',
'--deltag', 'ceph.osd_id=2',
'--deltag', 'ceph.type=db',
'--deltag', 'ceph.osd_fsid=1234',
'--deltag', 'ceph.cluster_name=ceph',
'--deltag', 'ceph.db_uuid=dbuuid',
'--deltag', 'ceph.db_device=db_dev',
'/dev/VolGroup/lv2']
assert self. mock_process_input[n-4] == [
'lvchange',
'--deltag', 'ceph.db_uuid=dbuuid',
'--deltag', 'ceph.db_device=db_dev',
'/dev/VolGroup/lv1']
assert self. mock_process_input[n-3] == [
'lvchange',
'--addtag', 'ceph.db_uuid=new-db-uuid',
'--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
'/dev/VolGroup/lv1']
assert self. mock_process_input[n-2] == [
'lvchange',
'--addtag', 'ceph.osd_id=2',
'--addtag', 'ceph.type=db',
'--addtag', 'ceph.osd_fsid=1234',
'--addtag', 'ceph.cluster_name=ceph',
'--addtag', 'ceph.db_uuid=new-db-uuid',
'--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
'/dev/VolGroup/lv2_new']
assert self. mock_process_input[n-1] == [
'ceph-bluestore-tool',
'--path', '/var/lib/ceph/osd/ceph-2',
'--dev-target', '/dev/VolGroup/lv2_new',
'--command', 'bluefs-bdev-migrate',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block.db']
@patch('os.getuid')
def test_migrate_data_db_to_new_db_skip_wal(self, m_getuid, monkeypatch):
m_getuid.return_value = 0
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2_new',
lv_tags='')
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
lambda id: False)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'data', 'db',
'--target', 'vgname/new_wal'])
m.main()
n = len(self.mock_process_input)
assert n >= 7
assert self. mock_process_input[n-7] == [
'lvchange',
'--deltag', 'ceph.osd_id=2',
'--deltag', 'ceph.type=db',
'--deltag', 'ceph.osd_fsid=1234',
'--deltag', 'ceph.cluster_name=ceph',
'--deltag', 'ceph.db_uuid=dbuuid',
'--deltag', 'ceph.db_device=db_dev',
'/dev/VolGroup/lv2']
assert self. mock_process_input[n-6] == [
'lvchange',
'--deltag', 'ceph.db_uuid=dbuuid',
'--deltag', 'ceph.db_device=db_dev',
'/dev/VolGroup/lv1']
assert self. mock_process_input[n-5] == [
'lvchange',
'--addtag', 'ceph.db_uuid=new-db-uuid',
'--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
'/dev/VolGroup/lv1']
assert self. mock_process_input[n-4] == [
'lvchange',
'--deltag', 'ceph.db_uuid=dbuuid',
'--deltag', 'ceph.db_device=db_dev',
'/dev/VolGroup/lv3']
assert self. mock_process_input[n-3] == [
'lvchange',
'--addtag', 'ceph.db_uuid=new-db-uuid',
'--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
'/dev/VolGroup/lv3']
assert self. mock_process_input[n-2] == [
'lvchange',
'--addtag', 'ceph.osd_id=2',
'--addtag', 'ceph.type=db',
'--addtag', 'ceph.osd_fsid=1234',
'--addtag', 'ceph.cluster_name=ceph',
'--addtag', 'ceph.db_uuid=new-db-uuid',
'--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
'/dev/VolGroup/lv2_new']
assert self. mock_process_input[n-1] == [
'ceph-bluestore-tool',
'--path', '/var/lib/ceph/osd/ceph-2',
'--dev-target', '/dev/VolGroup/lv2_new',
'--command', 'bluefs-bdev-migrate',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block.db']
@patch('os.getuid')
def test_migrate_data_db_wal_to_new_db(self, m_getuid, monkeypatch):
m_getuid.return_value = 0
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_wal_tags = 'ceph.osd_id=0,ceph.type=wal,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='waluuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2_new',
lv_tags='')
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
lambda id: False)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'data', 'db', 'wal',
'--target', 'vgname/new_wal'])
m.main()
n = len(self.mock_process_input)
assert n >= 6
assert self. mock_process_input[n-6] == [
'lvchange',
'--deltag', 'ceph.osd_id=2',
'--deltag', 'ceph.type=db',
'--deltag', 'ceph.osd_fsid=1234',
'--deltag', 'ceph.cluster_name=ceph',
'--deltag', 'ceph.db_uuid=dbuuid',
'--deltag', 'ceph.db_device=db_dev',
'/dev/VolGroup/lv2']
assert self. mock_process_input[n-5] == [
'lvchange',
'--deltag', 'ceph.osd_id=0',
'--deltag', 'ceph.type=wal',
'--deltag', 'ceph.osd_fsid=1234',
'--deltag', 'ceph.cluster_name=ceph',
'--deltag', 'ceph.db_uuid=dbuuid',
'--deltag', 'ceph.db_device=db_dev',
'--deltag', 'ceph.wal_uuid=waluuid',
'--deltag', 'ceph.wal_device=wal_dev',
'/dev/VolGroup/lv3']
assert self. mock_process_input[n-4] == [
'lvchange',
'--deltag', 'ceph.db_uuid=dbuuid',
'--deltag', 'ceph.db_device=db_dev',
'--deltag', 'ceph.wal_uuid=waluuid',
'--deltag', 'ceph.wal_device=wal_dev',
'/dev/VolGroup/lv1']
assert self. mock_process_input[n-3] == [
'lvchange',
'--addtag', 'ceph.db_uuid=new-db-uuid',
'--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
'/dev/VolGroup/lv1']
assert self. mock_process_input[n-2] == [
'lvchange',
'--addtag', 'ceph.osd_id=2',
'--addtag', 'ceph.type=db',
'--addtag', 'ceph.osd_fsid=1234',
'--addtag', 'ceph.cluster_name=ceph',
'--addtag', 'ceph.db_uuid=new-db-uuid',
'--addtag', 'ceph.db_device=/dev/VolGroup/lv2_new',
'/dev/VolGroup/lv2_new']
assert self. mock_process_input[n-1] == [
'ceph-bluestore-tool',
'--path', '/var/lib/ceph/osd/ceph-2',
'--dev-target', '/dev/VolGroup/lv2_new',
'--command', 'bluefs-bdev-migrate',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block.db',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal']
@patch('os.getuid')
def test_dont_migrate_data_db_wal_to_new_data(self,
m_getuid,
monkeypatch,
capsys):
m_getuid.return_value = 0
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = api.Volume(lv_name='volume2_new', lv_uuid='new-db-uuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2_new',
lv_tags='')
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
lambda id: False)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'data',
'--target', 'vgname/new_data'])
with pytest.raises(SystemExit) as error:
m.main()
stdout, stderr = capsys.readouterr()
expected = 'Unable to migrate to : vgname/new_data'
assert expected in str(error.value)
expected = 'Unable to determine new volume type,'
' please use new-db or new-wal command before.'
assert expected in stderr
@patch('os.getuid')
def test_dont_migrate_db_to_wal(self,
m_getuid,
monkeypatch,
capsys):
m_getuid.return_value = 0
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='waluuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = wal_vol
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
lambda id: False)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'db',
'--target', 'vgname/wal'])
with pytest.raises(SystemExit) as error:
m.main()
stdout, stderr = capsys.readouterr()
expected = 'Unable to migrate to : vgname/wal'
assert expected in str(error.value)
expected = 'Migrate to WAL is not supported'
assert expected in stderr
@patch('os.getuid')
def test_migrate_data_db_to_db(self,
m_getuid,
monkeypatch,
capsys):
m_getuid.return_value = 0
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='waluuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = db_vol
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
lambda id: False)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'db', 'data',
'--target', 'vgname/db'])
m.main()
n = len(self.mock_process_input)
assert n >= 1
for s in self.mock_process_input:
print(s)
assert self. mock_process_input[n-1] == [
'ceph-bluestore-tool',
'--path', '/var/lib/ceph/osd/ceph-2',
'--dev-target', '/var/lib/ceph/osd/ceph-2/block.db',
'--command', 'bluefs-bdev-migrate',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block']
def test_migrate_data_db_to_db_active_systemd(self, is_root, monkeypatch, capsys):
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='waluuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = db_vol
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
lambda id: True)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'db', 'data',
'--target', 'vgname/db'])
with pytest.raises(SystemExit) as error:
m.main()
stdout, stderr = capsys.readouterr()
assert 'Unable to migrate devices associated with OSD ID: 2' == str(error.value)
assert '--> OSD is running, stop it with: systemctl stop ceph-osd@2' == stderr.rstrip()
assert not stdout
def test_migrate_data_db_to_db_no_systemd(self, is_root, monkeypatch):
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='waluuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = db_vol
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'db', 'data',
'--target', 'vgname/db',
'--no-systemd'])
m.main()
n = len(self.mock_process_input)
assert n >= 1
for s in self.mock_process_input:
print(s)
assert self. mock_process_input[n-1] == [
'ceph-bluestore-tool',
'--path', '/var/lib/ceph/osd/ceph-2',
'--dev-target', '/var/lib/ceph/osd/ceph-2/block.db',
'--command', 'bluefs-bdev-migrate',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block']
@patch('os.getuid')
def test_migrate_data_wal_to_db(self,
m_getuid,
monkeypatch,
capsys):
m_getuid.return_value = 0
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='waluuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = db_vol
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
lambda id: False)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'db', 'data', 'wal',
'--target', 'vgname/db'])
m.main()
n = len(self.mock_process_input)
assert n >= 1
for s in self.mock_process_input:
print(s)
assert self. mock_process_input[n-4] == [
'lvchange',
'--deltag', 'ceph.osd_id=2',
'--deltag', 'ceph.type=wal',
'--deltag', 'ceph.osd_fsid=1234',
'--deltag', 'ceph.cluster_name=ceph',
'--deltag', 'ceph.db_uuid=dbuuid',
'--deltag', 'ceph.db_device=db_dev',
'--deltag', 'ceph.wal_uuid=waluuid',
'--deltag', 'ceph.wal_device=wal_dev',
'/dev/VolGroup/lv3']
assert self. mock_process_input[n-3] == [
'lvchange',
'--deltag', 'ceph.wal_uuid=waluuid',
'--deltag', 'ceph.wal_device=wal_dev',
'/dev/VolGroup/lv1']
assert self. mock_process_input[n-2] == [
'lvchange',
'--deltag', 'ceph.wal_uuid=waluuid',
'--deltag', 'ceph.wal_device=wal_dev',
'/dev/VolGroup/lv2']
assert self. mock_process_input[n-1] == [
'ceph-bluestore-tool',
'--path', '/var/lib/ceph/osd/ceph-2',
'--dev-target', '/var/lib/ceph/osd/ceph-2/block.db',
'--command', 'bluefs-bdev-migrate',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal']
def test_migrate_data_wal_to_db_active_systemd(self, is_root, monkeypatch, capsys):
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='waluuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = db_vol
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr("ceph_volume.systemd.systemctl.osd_is_active",
lambda id: True)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'db', 'data', 'wal',
'--target', 'vgname/db'])
with pytest.raises(SystemExit) as error:
m.main()
stdout, stderr = capsys.readouterr()
assert 'Unable to migrate devices associated with OSD ID: 2' == str(error.value)
assert '--> OSD is running, stop it with: systemctl stop ceph-osd@2' == stderr.rstrip()
assert not stdout
def test_migrate_data_wal_to_db_no_systemd(self, is_root, monkeypatch):
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
source_wal_tags = 'ceph.osd_id=2,ceph.type=wal,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
data_vol = api.Volume(lv_name='volume1',
lv_uuid='datauuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv1',
lv_tags=source_tags)
db_vol = api.Volume(lv_name='volume2',
lv_uuid='dbuuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv2',
lv_tags=source_db_tags)
wal_vol = api.Volume(lv_name='volume3',
lv_uuid='waluuid',
vg_name='vg',
lv_path='/dev/VolGroup/lv3',
lv_tags=source_wal_tags)
self.mock_single_volumes = {
'/dev/VolGroup/lv1': data_vol,
'/dev/VolGroup/lv2': db_vol,
'/dev/VolGroup/lv3': wal_vol,
}
monkeypatch.setattr(migrate.api, 'get_single_lv',
self.mock_get_single_lv)
self.mock_volume = db_vol
monkeypatch.setattr(api, 'get_lv_by_fullname',
self.mock_get_lv_by_fullname)
self.mock_process_input = []
monkeypatch.setattr(process, 'call', self.mock_process)
devices = []
devices.append([Device('/dev/VolGroup/lv1'), 'block'])
devices.append([Device('/dev/VolGroup/lv2'), 'db'])
devices.append([Device('/dev/VolGroup/lv3'), 'wal'])
monkeypatch.setattr(migrate, 'find_associated_devices',
lambda osd_id, osd_fsid: devices)
monkeypatch.setattr(migrate, 'get_cluster_name',
lambda osd_id, osd_fsid: 'ceph')
monkeypatch.setattr(system, 'chown', lambda path: 0)
m = migrate.Migrate(argv=[
'--osd-id', '2',
'--osd-fsid', '1234',
'--from', 'db', 'data', 'wal',
'--target', 'vgname/db',
'--no-systemd'])
m.main()
n = len(self.mock_process_input)
assert n >= 1
for s in self.mock_process_input:
print(s)
assert self. mock_process_input[n-4] == [
'lvchange',
'--deltag', 'ceph.osd_id=2',
'--deltag', 'ceph.type=wal',
'--deltag', 'ceph.osd_fsid=1234',
'--deltag', 'ceph.cluster_name=ceph',
'--deltag', 'ceph.db_uuid=dbuuid',
'--deltag', 'ceph.db_device=db_dev',
'--deltag', 'ceph.wal_uuid=waluuid',
'--deltag', 'ceph.wal_device=wal_dev',
'/dev/VolGroup/lv3']
assert self. mock_process_input[n-3] == [
'lvchange',
'--deltag', 'ceph.wal_uuid=waluuid',
'--deltag', 'ceph.wal_device=wal_dev',
'/dev/VolGroup/lv1']
assert self. mock_process_input[n-2] == [
'lvchange',
'--deltag', 'ceph.wal_uuid=waluuid',
'--deltag', 'ceph.wal_device=wal_dev',
'/dev/VolGroup/lv2']
assert self. mock_process_input[n-1] == [
'ceph-bluestore-tool',
'--path', '/var/lib/ceph/osd/ceph-2',
'--dev-target', '/var/lib/ceph/osd/ceph-2/block.db',
'--command', 'bluefs-bdev-migrate',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block.wal']
| 94,399 | 40.043478 | 122 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
|
import pytest
from ceph_volume.devices import lvm
from ceph_volume.api import lvm as api
from mock.mock import patch, Mock
class TestLVM(object):
def test_main_spits_help_with_no_arguments(self, capsys):
lvm.main.LVM([]).main()
stdout, stderr = capsys.readouterr()
assert 'Use LVM and LVM-based technologies to deploy' in stdout
def test_main_shows_activate_subcommands(self, capsys):
lvm.main.LVM([]).main()
stdout, stderr = capsys.readouterr()
assert 'activate ' in stdout
assert 'Discover and mount' in stdout
def test_main_shows_prepare_subcommands(self, capsys):
lvm.main.LVM([]).main()
stdout, stderr = capsys.readouterr()
assert 'prepare ' in stdout
assert 'Format an LVM device' in stdout
class TestPrepareDevice(object):
def test_cannot_use_device(self, factory):
args = factory(data='/dev/var/foo')
with pytest.raises(RuntimeError) as error:
p = lvm.prepare.Prepare([])
p.args = args
p.prepare_data_device( 'data', '0')
assert 'Cannot use device (/dev/var/foo)' in str(error.value)
assert 'A vg/lv path or an existing device is needed' in str(error.value)
class TestGetClusterFsid(object):
def test_fsid_is_passed_in(self, factory):
args = factory(cluster_fsid='aaaa-1111')
prepare_obj = lvm.prepare.Prepare([])
prepare_obj.args = args
assert prepare_obj.get_cluster_fsid() == 'aaaa-1111'
def test_fsid_is_read_from_ceph_conf(self, factory, conf_ceph_stub):
conf_ceph_stub('[global]\nfsid = bbbb-2222')
prepare_obj = lvm.prepare.Prepare([])
prepare_obj.args = factory(cluster_fsid=None)
assert prepare_obj.get_cluster_fsid() == 'bbbb-2222'
class TestPrepare(object):
def test_main_spits_help_with_no_arguments(self, capsys):
lvm.prepare.Prepare([]).main()
stdout, stderr = capsys.readouterr()
assert 'Prepare an OSD by assigning an ID and FSID' in stdout
def test_main_shows_full_help(self, capsys):
with pytest.raises(SystemExit):
lvm.prepare.Prepare(argv=['--help']).main()
stdout, stderr = capsys.readouterr()
assert 'Use the bluestore objectstore' in stdout
assert 'A physical device or logical' in stdout
@patch('ceph_volume.devices.lvm.prepare.api.is_ceph_device')
def test_safe_prepare_osd_already_created(self, m_is_ceph_device):
m_is_ceph_device.return_value = True
with pytest.raises(RuntimeError) as error:
prepare = lvm.prepare.Prepare(argv=[])
prepare.args = Mock()
prepare.args.data = '/dev/sdfoo'
prepare.get_lv = Mock()
prepare.safe_prepare()
expected = 'skipping {}, it is already prepared'.format('/dev/sdfoo')
assert expected in str(error.value)
def test_setup_device_device_name_is_none(self):
result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name=None, tags={'ceph.type': 'data'}, size=0, slots=None)
assert result == ('', '', {'ceph.type': 'data'})
@patch('ceph_volume.api.lvm.Volume.set_tags')
@patch('ceph_volume.devices.lvm.prepare.api.get_single_lv')
def test_setup_device_lv_passed(self, m_get_single_lv, m_set_tags):
fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid')
m_get_single_lv.return_value = fake_volume
result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='vg_foo/lv_foo', tags={'ceph.type': 'data'}, size=0, slots=None)
assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data',
'ceph.vdo': '0',
'ceph.data_uuid': 'fake-uuid',
'ceph.data_device': '/fake-path'})
@patch('ceph_volume.devices.lvm.prepare.api.create_lv')
@patch('ceph_volume.api.lvm.Volume.set_tags')
@patch('ceph_volume.util.disk.is_device')
def test_setup_device_device_passed(self, m_is_device, m_set_tags, m_create_lv):
fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid')
m_is_device.return_value = True
m_create_lv.return_value = fake_volume
result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data',
'ceph.vdo': '0',
'ceph.data_uuid': 'fake-uuid',
'ceph.data_device': '/fake-path'})
@patch('ceph_volume.devices.lvm.prepare.Prepare.get_ptuuid')
@patch('ceph_volume.devices.lvm.prepare.api.get_single_lv')
def test_setup_device_partition_passed(self, m_get_single_lv, m_get_ptuuid):
m_get_single_lv.side_effect = ValueError()
m_get_ptuuid.return_value = 'fake-uuid'
result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
assert result == ('/dev/sdx', 'fake-uuid', {'ceph.type': 'data',
'ceph.vdo': '0',
'ceph.data_uuid': 'fake-uuid',
'ceph.data_device': '/dev/sdx'})
def test_invalid_osd_id_passed(self):
with pytest.raises(SystemExit):
lvm.prepare.Prepare(argv=['--osd-id', 'foo']).main()
class TestActivate(object):
def test_main_spits_help_with_no_arguments(self, capsys):
lvm.activate.Activate([]).main()
stdout, stderr = capsys.readouterr()
assert 'Activate OSDs by discovering them with' in stdout
def test_main_shows_full_help(self, capsys):
with pytest.raises(SystemExit):
lvm.activate.Activate(argv=['--help']).main()
stdout, stderr = capsys.readouterr()
assert 'optional arguments' in stdout
assert 'positional arguments' in stdout
| 6,360 | 44.435714 | 150 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py
|
import pytest
from ceph_volume import exceptions
from ceph_volume.devices.lvm import trigger
class TestParseOSDid(object):
def test_no_id_found_if_no_digit(self):
with pytest.raises(exceptions.SuffixParsingError):
trigger.parse_osd_id('asdlj-ljahsdfaslkjhdfa')
def test_no_id_found(self):
with pytest.raises(exceptions.SuffixParsingError):
trigger.parse_osd_id('ljahsdfaslkjhdfa')
def test_id_found(self):
result = trigger.parse_osd_id('1-ljahsdfaslkjhdfa')
assert result == '1'
class TestParseOSDUUID(object):
def test_uuid_is_parsed(self):
result = trigger.parse_osd_uuid('1-asdf-ljkh-asdf-ljkh-asdf')
assert result == 'asdf-ljkh-asdf-ljkh-asdf'
def test_uuid_is_parsed_longer_sha1(self):
result = trigger.parse_osd_uuid('1-foo-bar-asdf-ljkh-asdf-ljkh-asdf')
assert result == 'foo-bar-asdf-ljkh-asdf-ljkh-asdf'
def test_uuid_is_not_found(self):
with pytest.raises(exceptions.SuffixParsingError):
trigger.parse_osd_uuid('ljahsdfaslkjhdfa')
def test_uuid_is_not_found_missing_id(self):
with pytest.raises(exceptions.SuffixParsingError):
trigger.parse_osd_uuid('ljahs-dfa-slkjhdfa-foo')
def test_robust_double_id_in_uuid(self):
# it is possible to have the id in the SHA1, this should
# be fine parsing that
result = trigger.parse_osd_uuid("1-abc959fd-1ec9-4864-b141-3154f9b9f8ed")
assert result == 'abc959fd-1ec9-4864-b141-3154f9b9f8ed'
| 1,543 | 32.565217 | 81 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
|
import os
import pytest
from copy import deepcopy
from mock.mock import patch, call
from ceph_volume import process
from ceph_volume.api import lvm as api
from ceph_volume.devices.lvm import zap
class TestZap(object):
def test_invalid_osd_id_passed(self):
with pytest.raises(SystemExit):
zap.Zap(argv=['--osd-id', 'foo']).main()
class TestFindAssociatedDevices(object):
def test_no_lvs_found_that_match_id(self, monkeypatch, device_info):
tags = 'ceph.osd_id=9,ceph.journal_uuid=x,ceph.type=data'
osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
lv_tags=tags, lv_path='/dev/VolGroup/lv')
volumes = []
volumes.append(osd)
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
with pytest.raises(RuntimeError):
zap.find_associated_devices(osd_id=10)
def test_no_lvs_found_that_match_fsid(self, monkeypatch, device_info):
tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
'ceph.type=data'
osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
vg_name='vg', lv_path='/dev/VolGroup/lv')
volumes = []
volumes.append(osd)
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
with pytest.raises(RuntimeError):
zap.find_associated_devices(osd_fsid='aaaa-lkjh')
def test_no_lvs_found_that_match_id_fsid(self, monkeypatch, device_info):
tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
'ceph.type=data'
osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
lv_tags=tags, lv_path='/dev/VolGroup/lv')
volumes = []
volumes.append(osd)
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
with pytest.raises(RuntimeError):
zap.find_associated_devices(osd_id='9', osd_fsid='aaaa-lkjh')
def test_no_ceph_lvs_found(self, monkeypatch):
osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags='',
lv_path='/dev/VolGroup/lv')
volumes = []
volumes.append(osd)
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
with pytest.raises(RuntimeError):
zap.find_associated_devices(osd_id=100)
def test_lv_is_matched_id(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
lv_path='/dev/VolGroup/lv', lv_tags=tags)
volumes = []
volumes.append(osd)
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
result = zap.find_associated_devices(osd_id='0')
assert result[0].path == '/dev/VolGroup/lv'
def test_lv_is_matched_fsid(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
'ceph.type=data'
osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
lv_path='/dev/VolGroup/lv', lv_tags=tags)
volumes = []
volumes.append(osd)
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: deepcopy(volumes))
monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
result = zap.find_associated_devices(osd_fsid='asdf-lkjh')
assert result[0].path == '/dev/VolGroup/lv'
def test_lv_is_matched_id_fsid(self, monkeypatch):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
'ceph.type=data'
osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
lv_path='/dev/VolGroup/lv', lv_tags=tags)
volumes = []
volumes.append(osd)
monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
result = zap.find_associated_devices(osd_id='0', osd_fsid='asdf-lkjh')
assert result[0].path == '/dev/VolGroup/lv'
class TestEnsureAssociatedLVs(object):
def test_nothing_is_found(self):
volumes = []
result = zap.ensure_associated_lvs(volumes)
assert result == []
def test_data_is_found(self, fake_call):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=data'
osd = api.Volume(
lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/data', lv_tags=tags)
volumes = []
volumes.append(osd)
result = zap.ensure_associated_lvs(volumes)
assert result == ['/dev/VolGroup/data']
def test_block_is_found(self, fake_call):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
osd = api.Volume(
lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
volumes = []
volumes.append(osd)
result = zap.ensure_associated_lvs(volumes)
assert result == ['/dev/VolGroup/block']
def test_success_message_for_fsid(self, factory, is_root, capsys):
cli_zap = zap.Zap([])
args = factory(devices=[], osd_id=None, osd_fsid='asdf-lkjh')
cli_zap.args = args
cli_zap.zap()
out, err = capsys.readouterr()
assert "Zapping successful for OSD: asdf-lkjh" in err
def test_success_message_for_id(self, factory, is_root, capsys):
cli_zap = zap.Zap([])
args = factory(devices=[], osd_id='1', osd_fsid=None)
cli_zap.args = args
cli_zap.zap()
out, err = capsys.readouterr()
assert "Zapping successful for OSD: 1" in err
def test_block_and_partition_are_found(self, monkeypatch):
monkeypatch.setattr(zap.disk, 'get_device_from_partuuid', lambda x: '/dev/sdb1')
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
osd = api.Volume(
lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
volumes = []
volumes.append(osd)
result = zap.ensure_associated_lvs(volumes)
assert '/dev/sdb1' in result
assert '/dev/VolGroup/block' in result
def test_journal_is_found(self, fake_call):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
osd = api.Volume(
lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv', lv_tags=tags)
volumes = []
volumes.append(osd)
result = zap.ensure_associated_lvs(volumes)
assert result == ['/dev/VolGroup/lv']
def test_multiple_journals_are_found(self):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
volumes = []
for i in range(3):
osd = api.Volume(
lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
volumes.append(osd)
result = zap.ensure_associated_lvs(volumes)
assert '/dev/VolGroup/lv0' in result
assert '/dev/VolGroup/lv1' in result
assert '/dev/VolGroup/lv2' in result
def test_multiple_dbs_are_found(self):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=db'
volumes = []
for i in range(3):
osd = api.Volume(
lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
volumes.append(osd)
result = zap.ensure_associated_lvs(volumes)
assert '/dev/VolGroup/lv0' in result
assert '/dev/VolGroup/lv1' in result
assert '/dev/VolGroup/lv2' in result
def test_multiple_wals_are_found(self):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.wal_uuid=x,ceph.type=wal'
volumes = []
for i in range(3):
osd = api.Volume(
lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
volumes.append(osd)
result = zap.ensure_associated_lvs(volumes)
assert '/dev/VolGroup/lv0' in result
assert '/dev/VolGroup/lv1' in result
assert '/dev/VolGroup/lv2' in result
def test_multiple_backing_devs_are_found(self):
volumes = []
for _type in ['journal', 'db', 'wal']:
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.wal_uuid=x,ceph.type=%s' % _type
osd = api.Volume(
lv_name='volume%s' % _type, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % _type, lv_tags=tags)
volumes.append(osd)
result = zap.ensure_associated_lvs(volumes)
assert '/dev/VolGroup/lvjournal' in result
assert '/dev/VolGroup/lvwal' in result
assert '/dev/VolGroup/lvdb' in result
@patch('ceph_volume.devices.lvm.zap.api.get_lvs')
def test_ensure_associated_lvs(self, m_get_lvs):
zap.ensure_associated_lvs([], lv_tags={'ceph.osd_id': '1'})
calls = [
call(tags={'ceph.type': 'journal', 'ceph.osd_id': '1'}),
call(tags={'ceph.type': 'db', 'ceph.osd_id': '1'}),
call(tags={'ceph.type': 'wal', 'ceph.osd_id': '1'})
]
m_get_lvs.assert_has_calls(calls, any_order=True)
class TestWipeFs(object):
def setup_method(self):
os.environ['CEPH_VOLUME_WIPEFS_INTERVAL'] = '0'
def test_works_on_second_try(self, stub_call):
os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2'
stub_call([('wiping /dev/sda', '', 1), ('', '', 0)])
result = zap.wipefs('/dev/sda')
assert result is None
def test_does_not_work_after_several_tries(self, stub_call):
os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2'
stub_call([('wiping /dev/sda', '', 1), ('', '', 1)])
with pytest.raises(RuntimeError):
zap.wipefs('/dev/sda')
def test_does_not_work_default_tries(self, stub_call):
stub_call([('wiping /dev/sda', '', 1)]*8)
with pytest.raises(RuntimeError):
zap.wipefs('/dev/sda')
| 10,275 | 41.46281 | 120 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
|
import pytest
from mock.mock import patch
from ceph_volume.devices import raw
# Sample lsblk output is below that overviews the test scenario. (--json output for reader clarity)
# - sda and all its children are used for the OS
# - sdb is a bluestore OSD with phantom Atari partitions
# - sdc is an empty disk
# - sdd has 2 LVM device children
# > lsblk --paths --json
# {
# "blockdevices": [
# {"name": "/dev/sda", "maj:min": "8:0", "rm": "0", "size": "128G", "ro": "0", "type": "disk", "mountpoint": null,
# "children": [
# {"name": "/dev/sda1", "maj:min": "8:1", "rm": "0", "size": "487M", "ro": "0", "type": "part", "mountpoint": null},
# {"name": "/dev/sda2", "maj:min": "8:2", "rm": "0", "size": "1.9G", "ro": "0", "type": "part", "mountpoint": null},
# {"name": "/dev/sda3", "maj:min": "8:3", "rm": "0", "size": "125.6G", "ro": "0", "type": "part", "mountpoint": "/etc/hosts"}
# ]
# },
# {"name": "/dev/sdb", "maj:min": "8:16", "rm": "0", "size": "1T", "ro": "0", "type": "disk", "mountpoint": null,
# "children": [
# {"name": "/dev/sdb2", "maj:min": "8:18", "rm": "0", "size": "48G", "ro": "0", "type": "part", "mountpoint": null},
# {"name": "/dev/sdb3", "maj:min": "8:19", "rm": "0", "size": "6M", "ro": "0", "type": "part", "mountpoint": null}
# ]
# },
# {"name": "/dev/sdc", "maj:min": "8:32", "rm": "0", "size": "1T", "ro": "0", "type": "disk", "mountpoint": null},
# {"name": "/dev/sdd", "maj:min": "8:48", "rm": "0", "size": "1T", "ro": "0", "type": "disk", "mountpoint": null,
# "children": [
# {"name": "/dev/mapper/ceph--osd--block--1", "maj:min": "253:0", "rm": "0", "size": "512G", "ro": "0", "type": "lvm", "mountpoint": null},
# {"name": "/dev/mapper/ceph--osd--block--2", "maj:min": "253:1", "rm": "0", "size": "512G", "ro": "0", "type": "lvm", "mountpoint": null}
# ]
# }
# ]
# }
def _devices_side_effect():
return {
"/dev/sda": {},
"/dev/sda1": {},
"/dev/sda2": {},
"/dev/sda3": {},
"/dev/sdb": {},
"/dev/sdb2": {},
"/dev/sdb3": {},
"/dev/sdc": {},
"/dev/sdd": {},
"/dev/mapper/ceph--osd--block--1": {},
"/dev/mapper/ceph--osd--block--2": {},
}
def _lsblk_all_devices(abspath=True):
return [
{"NAME": "/dev/sda", "KNAME": "/dev/sda", "PKNAME": ""},
{"NAME": "/dev/sda1", "KNAME": "/dev/sda1", "PKNAME": "/dev/sda"},
{"NAME": "/dev/sda2", "KNAME": "/dev/sda2", "PKNAME": "/dev/sda"},
{"NAME": "/dev/sda3", "KNAME": "/dev/sda3", "PKNAME": "/dev/sda"},
{"NAME": "/dev/sdb", "KNAME": "/dev/sdb", "PKNAME": ""},
{"NAME": "/dev/sdb2", "KNAME": "/dev/sdb2", "PKNAME": "/dev/sdb"},
{"NAME": "/dev/sdb3", "KNAME": "/dev/sdb3", "PKNAME": "/dev/sdb"},
{"NAME": "/dev/sdc", "KNAME": "/dev/sdc", "PKNAME": ""},
{"NAME": "/dev/sdd", "KNAME": "/dev/sdd", "PKNAME": ""},
{"NAME": "/dev/mapper/ceph--osd--block--1", "KNAME": "/dev/mapper/ceph--osd--block--1", "PKNAME": "/dev/sdd"},
{"NAME": "/dev/mapper/ceph--osd--block--2", "KNAME": "/dev/mapper/ceph--osd--block--2", "PKNAME": "/dev/sdd"},
]
# dummy lsblk output for device with optional parent output
def _lsblk_output(dev, parent=None):
if parent is None:
parent = ""
ret = 'NAME="{}" KNAME="{}" PKNAME="{}"'.format(dev, dev, parent)
return [ret] # needs to be in a list form
def _bluestore_tool_label_output_sdb():
return '''{
"/dev/sdb": {
"osd_uuid": "sdb-uuid",
"size": 1099511627776,
"btime": "2021-07-23T16:02:22.809186+0000",
"description": "main",
"bfm_blocks": "268435456",
"bfm_blocks_per_key": "128",
"bfm_bytes_per_block": "4096",
"bfm_size": "1099511627776",
"bluefs": "1",
"ceph_fsid": "sdb-fsid",
"kv_backend": "rocksdb",
"magic": "ceph osd volume v026",
"mkfs_done": "yes",
"osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
"ready": "ready",
"require_osd_release": "16",
"whoami": "0"
}
}'''
def _bluestore_tool_label_output_sdb2():
return '''{
"/dev/sdb2": {
"osd_uuid": "sdb2-uuid",
"size": 1099511627776,
"btime": "2021-07-23T16:02:22.809186+0000",
"description": "main",
"bfm_blocks": "268435456",
"bfm_blocks_per_key": "128",
"bfm_bytes_per_block": "4096",
"bfm_size": "1099511627776",
"bluefs": "1",
"ceph_fsid": "sdb2-fsid",
"kv_backend": "rocksdb",
"magic": "ceph osd volume v026",
"mkfs_done": "yes",
"osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
"ready": "ready",
"require_osd_release": "16",
"whoami": "2"
}
}'''
def _bluestore_tool_label_output_dm_okay():
return '''{
"/dev/mapper/ceph--osd--block--1": {
"osd_uuid": "lvm-1-uuid",
"size": 549751619584,
"btime": "2021-07-23T16:04:37.881060+0000",
"description": "main",
"bfm_blocks": "134216704",
"bfm_blocks_per_key": "128",
"bfm_bytes_per_block": "4096",
"bfm_size": "549751619584",
"bluefs": "1",
"ceph_fsid": "lvm-1-fsid",
"kv_backend": "rocksdb",
"magic": "ceph osd volume v026",
"mkfs_done": "yes",
"osd_key": "AQCU6Ppgz+UcIRAAh6IUjtPjiXBlEXfwO8ixzw==",
"ready": "ready",
"require_osd_release": "16",
"whoami": "2"
}
}'''
def _process_call_side_effect(command, **kw):
if "lsblk" in command:
if "/dev/" in command[-1]:
dev = command[-1]
if dev == "/dev/sda1" or dev == "/dev/sda2" or dev == "/dev/sda3":
return _lsblk_output(dev, parent="/dev/sda"), '', 0
if dev == "/dev/sdb2" or dev == "/dev/sdb3":
return _lsblk_output(dev, parent="/dev/sdb"), '', 0
if dev == "/dev/sda" or dev == "/dev/sdb" or dev == "/dev/sdc" or dev == "/dev/sdd":
return _lsblk_output(dev), '', 0
if "mapper" in dev:
return _lsblk_output(dev, parent="/dev/sdd"), '', 0
pytest.fail('dev {} needs behavior specified for it'.format(dev))
if "/dev/" not in command:
return _lsblk_all_devices(), '', 0
pytest.fail('command {} needs behavior specified for it'.format(command))
if "ceph-bluestore-tool" in command:
if "/dev/sdb" in command:
# sdb is a bluestore OSD
return _bluestore_tool_label_output_sdb(), '', 0
if "/dev/sdb2" in command:
# sdb2 is a phantom atari partition that appears to have some valid bluestore info
return _bluestore_tool_label_output_sdb2(), '', 0
if "/dev/mapper/ceph--osd--block--1" in command:
# dm device 1 is a valid bluestore OSD (the other is corrupted/invalid)
return _bluestore_tool_label_output_dm_okay(), '', 0
# sda and children, sdb's children, sdc, sdd, dm device 2 all do NOT have bluestore OSD data
return [], 'fake No such file or directory error', 1
pytest.fail('command {} needs behavior specified for it'.format(command))
def _has_bluestore_label_side_effect(disk_path):
if "/dev/sda" in disk_path:
return False # disk and all children are for the OS
if disk_path == "/dev/sdb":
return True # sdb is a valid bluestore OSD
if disk_path == "/dev/sdb2":
return True # sdb2 appears to be a valid bluestore OSD even though it should not be
if disk_path == "/dev/sdc":
return False # empty disk
if disk_path == "/dev/sdd":
return False # has LVM subdevices
if disk_path == "/dev/mapper/ceph--osd--block--1":
return True # good OSD
if disk_path == "/dev/mapper/ceph--osd--block--2":
return False # corrupted
pytest.fail('device {} needs behavior specified for it'.format(disk_path))
class TestList(object):
@patch('ceph_volume.util.device.disk.get_devices')
@patch('ceph_volume.util.disk.has_bluestore_label')
@patch('ceph_volume.process.call')
@patch('ceph_volume.util.disk.lsblk_all')
def test_raw_list(self, patched_disk_lsblk, patched_call, patched_bluestore_label, patched_get_devices):
raw.list.logger.setLevel("DEBUG")
patched_call.side_effect = _process_call_side_effect
patched_disk_lsblk.side_effect = _lsblk_all_devices
patched_bluestore_label.side_effect = _has_bluestore_label_side_effect
patched_get_devices.side_effect = _devices_side_effect
result = raw.list.List([]).generate()
assert len(result) == 3
sdb = result['sdb-uuid']
assert sdb['osd_uuid'] == 'sdb-uuid'
assert sdb['osd_id'] == 0
assert sdb['device'] == '/dev/sdb'
assert sdb['ceph_fsid'] == 'sdb-fsid'
assert sdb['type'] == 'bluestore'
lvm1 = result['lvm-1-uuid']
assert lvm1['osd_uuid'] == 'lvm-1-uuid'
assert lvm1['osd_id'] == 2
assert lvm1['device'] == '/dev/mapper/ceph--osd--block--1'
assert lvm1['ceph_fsid'] == 'lvm-1-fsid'
assert lvm1['type'] == 'bluestore'
@patch('ceph_volume.util.device.disk.get_devices')
@patch('ceph_volume.util.disk.has_bluestore_label')
@patch('ceph_volume.process.call')
@patch('ceph_volume.util.disk.lsblk_all')
def test_raw_list_with_OSError(self, patched_disk_lsblk, patched_call, patched_bluestore_label, patched_get_devices):
def _has_bluestore_label_side_effect_with_OSError(device_path):
if device_path == "/dev/sdd":
raise OSError('fake OSError')
return _has_bluestore_label_side_effect(device_path)
raw.list.logger.setLevel("DEBUG")
patched_disk_lsblk.side_effect = _lsblk_all_devices
patched_call.side_effect = _process_call_side_effect
patched_bluestore_label.side_effect = _has_bluestore_label_side_effect_with_OSError
patched_get_devices.side_effect = _devices_side_effect
result = raw.list.List([]).generate()
assert len(result) == 3
assert 'sdb-uuid' in result
| 10,354 | 42.32636 | 153 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py
|
import pytest
from ceph_volume.devices import raw
from mock.mock import patch
class TestRaw(object):
def test_main_spits_help_with_no_arguments(self, capsys):
raw.main.Raw([]).main()
stdout, stderr = capsys.readouterr()
assert 'Manage a single-device OSD on a raw block device.' in stdout
def test_main_shows_activate_subcommands(self, capsys):
raw.main.Raw([]).main()
stdout, stderr = capsys.readouterr()
assert 'activate ' in stdout
assert 'Discover and prepare' in stdout
def test_main_shows_prepare_subcommands(self, capsys):
raw.main.Raw([]).main()
stdout, stderr = capsys.readouterr()
assert 'prepare ' in stdout
assert 'Format a raw device' in stdout
class TestPrepare(object):
def test_main_spits_help_with_no_arguments(self, capsys):
raw.prepare.Prepare([]).main()
stdout, stderr = capsys.readouterr()
assert 'Prepare an OSD by assigning an ID and FSID' in stdout
def test_main_shows_full_help(self, capsys):
with pytest.raises(SystemExit):
raw.prepare.Prepare(argv=['--help']).main()
stdout, stderr = capsys.readouterr()
assert 'a raw device to use for the OSD' in stdout
assert 'Crush device class to assign this OSD to' in stdout
assert 'Use BlueStore backend' in stdout
assert 'Path to bluestore block.db block device' in stdout
assert 'Path to bluestore block.wal block device' in stdout
assert 'Enable device encryption via dm-crypt' in stdout
@patch('ceph_volume.util.arg_validators.ValidRawDevice.__call__')
def test_prepare_dmcrypt_no_secret_passed(self, m_valid_device, capsys):
m_valid_device.return_value = '/dev/foo'
with pytest.raises(SystemExit):
raw.prepare.Prepare(argv=['--bluestore', '--data', '/dev/foo', '--dmcrypt']).main()
stdout, stderr = capsys.readouterr()
assert 'CEPH_VOLUME_DMCRYPT_SECRET is not set, you must set' in stderr
@patch('ceph_volume.util.encryption.luks_open')
@patch('ceph_volume.util.encryption.luks_format')
@patch('ceph_volume.util.disk.lsblk')
def test_prepare_dmcrypt_block(self, m_lsblk, m_luks_format, m_luks_open):
m_lsblk.return_value = {'KNAME': 'foo'}
m_luks_format.return_value = True
m_luks_open.return_value = True
result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'block', '123')
m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-block-dmcrypt')
m_luks_format.assert_called_with('foo', '/dev/foo')
assert result == '/dev/mapper/ceph-123-foo-block-dmcrypt'
@patch('ceph_volume.util.encryption.luks_open')
@patch('ceph_volume.util.encryption.luks_format')
@patch('ceph_volume.util.disk.lsblk')
def test_prepare_dmcrypt_db(self, m_lsblk, m_luks_format, m_luks_open):
m_lsblk.return_value = {'KNAME': 'foo'}
m_luks_format.return_value = True
m_luks_open.return_value = True
result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'db', '123')
m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-db-dmcrypt')
m_luks_format.assert_called_with('foo', '/dev/foo')
assert result == '/dev/mapper/ceph-123-foo-db-dmcrypt'
@patch('ceph_volume.util.encryption.luks_open')
@patch('ceph_volume.util.encryption.luks_format')
@patch('ceph_volume.util.disk.lsblk')
def test_prepare_dmcrypt_wal(self, m_lsblk, m_luks_format, m_luks_open):
m_lsblk.return_value = {'KNAME': 'foo'}
m_luks_format.return_value = True
m_luks_open.return_value = True
result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'wal', '123')
m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-wal-dmcrypt')
m_luks_format.assert_called_with('foo', '/dev/foo')
assert result == '/dev/mapper/ceph-123-foo-wal-dmcrypt'
@patch('ceph_volume.devices.raw.prepare.rollback_osd')
@patch('ceph_volume.devices.raw.prepare.Prepare.prepare')
@patch('ceph_volume.util.arg_validators.ValidRawDevice.__call__')
def test_safe_prepare_exception_raised(self, m_valid_device, m_prepare, m_rollback_osd):
m_valid_device.return_value = '/dev/foo'
m_prepare.side_effect=Exception('foo')
m_rollback_osd.return_value = 'foobar'
with pytest.raises(Exception):
raw.prepare.Prepare(argv=['--bluestore', '--data', '/dev/foo']).main()
m_rollback_osd.assert_called()
| 4,558 | 45.520408 | 95 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py
|
import os
import pytest
from ceph_volume.devices.simple import activate
class TestActivate(object):
def test_no_data_uuid(self, factory, is_root, monkeypatch, capture, fake_filesystem):
fake_filesystem.create_file('/tmp/json-config', contents='{}')
args = factory(osd_id='0', osd_fsid='1234', json_config='/tmp/json-config')
with pytest.raises(RuntimeError):
activate.Activate([]).activate(args)
def test_invalid_json_path(self):
os.environ['CEPH_VOLUME_SIMPLE_JSON_DIR'] = '/non/existing/path'
with pytest.raises(RuntimeError) as error:
activate.Activate(['1', 'asdf']).main()
assert 'Expected JSON config path not found' in str(error.value)
def test_main_spits_help_with_no_arguments(self, capsys):
activate.Activate([]).main()
stdout, stderr = capsys.readouterr()
assert 'Activate OSDs by mounting devices previously configured' in stdout
def test_activate_all(self, is_root, monkeypatch):
'''
make sure Activate calls activate for each file returned by glob
'''
mocked_glob = []
def mock_glob(glob):
path = os.path.dirname(glob)
mocked_glob.extend(['{}/{}.json'.format(path, file_) for file_ in
['1', '2', '3']])
return mocked_glob
activate_files = []
def mock_activate(self, args):
activate_files.append(args.json_config)
monkeypatch.setattr('glob.glob', mock_glob)
monkeypatch.setattr(activate.Activate, 'activate', mock_activate)
activate.Activate(['--all']).main()
assert activate_files == mocked_glob
class TestEnableSystemdUnits(object):
def test_nothing_is_activated(self, is_root, capsys, fake_filesystem):
fake_filesystem.create_file('/tmp/json-config', contents='{}')
activation = activate.Activate(['--no-systemd', '--file', '/tmp/json-config', '0', '1234'], from_trigger=True)
activation.activate = lambda x: True
activation.main()
activation.enable_systemd_units('0', '1234')
stdout, stderr = capsys.readouterr()
assert 'Skipping enabling of `simple`' in stderr
assert 'Skipping masking of ceph-disk' in stderr
assert 'Skipping enabling and starting OSD simple' in stderr
def test_no_systemd_flag_is_true(self, is_root, fake_filesystem):
fake_filesystem.create_file('/tmp/json-config', contents='{}')
activation = activate.Activate(['--no-systemd', '--file', '/tmp/json-config', '0', '1234'], from_trigger=True)
activation.activate = lambda x: True
activation.main()
assert activation.skip_systemd is True
def test_no_systemd_flag_is_false(self, is_root, fake_filesystem):
fake_filesystem.create_file('/tmp/json-config', contents='{}')
activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=True)
activation.activate = lambda x: True
activation.main()
assert activation.skip_systemd is False
def test_masks_ceph_disk(self, is_root, monkeypatch, capture, fake_filesystem):
monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', capture)
monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True)
monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True)
monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True)
fake_filesystem.create_file('/tmp/json-config', contents='{}')
activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False)
activation.activate = lambda x: True
activation.main()
activation.enable_systemd_units('0', '1234')
assert len(capture.calls) == 1
def test_enables_simple_unit(self, is_root, monkeypatch, capture, fake_filesystem):
monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True)
monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', capture)
monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True)
monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True)
fake_filesystem.create_file('/tmp/json-config', contents='{}')
activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False)
activation.activate = lambda x: True
activation.main()
activation.enable_systemd_units('0', '1234')
assert len(capture.calls) == 1
assert capture.calls[0]['args'] == ('0', '1234', 'simple')
def test_enables_osd_unit(self, is_root, monkeypatch, capture, fake_filesystem):
monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True)
monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True)
monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', capture)
monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True)
fake_filesystem.create_file('/tmp/json-config', contents='{}')
activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False)
activation.activate = lambda x: True
activation.main()
activation.enable_systemd_units('0', '1234')
assert len(capture.calls) == 1
assert capture.calls[0]['args'] == ('0',)
def test_starts_osd_unit(self, is_root, monkeypatch, capture, fake_filesystem):
monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True)
monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True)
monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True)
monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', capture)
fake_filesystem.create_file('/tmp/json-config', contents='{}')
activation = activate.Activate(['--file', '/tmp/json-config', '0', '1234'], from_trigger=False)
activation.activate = lambda x: True
activation.main()
activation.enable_systemd_units('0', '1234')
assert len(capture.calls) == 1
assert capture.calls[0]['args'] == ('0',)
class TestValidateDevices(object):
def test_bluestore_with_all_devices(self):
activation = activate.Activate([])
result = activation.validate_devices({'type': 'bluestore', 'data': {}, 'block': {}})
assert result is True
def test_bluestore_without_type(self):
activation = activate.Activate([])
result = activation.validate_devices({'data': {}, 'block': {}})
assert result is True
def test_bluestore_is_default(self):
activation = activate.Activate([])
result = activation.validate_devices({'data': {}, 'block': {}})
assert result is True
def test_bluestore_data_device_found(self, capsys):
activation = activate.Activate([])
with pytest.raises(RuntimeError):
activation.validate_devices({'data': {}})
stdout, stderr = capsys.readouterr()
assert "devices found: ['data']" in stderr
def test_bluestore_missing_data(self):
activation = activate.Activate([])
with pytest.raises(RuntimeError) as error:
activation.validate_devices({'type': 'bluestore', 'block': {}})
assert 'Unable to activate bluestore OSD due to missing devices' in str(error.value)
def test_bluestore_block_device_found(self, capsys):
activation = activate.Activate([])
with pytest.raises(RuntimeError):
activation.validate_devices({'block': {}})
stdout, stderr = capsys.readouterr()
assert "devices found: ['block']" in stderr
| 7,905 | 46.915152 | 118 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py
|
import os
import pytest
from ceph_volume.devices.simple import scan
class TestGetContents(object):
def setup_method(self):
self.magic_file_name = '/tmp/magic-file'
def test_multiple_lines_are_left_as_is(self, fake_filesystem):
magic_file = fake_filesystem.create_file(self.magic_file_name, contents='first\nsecond\n')
scanner = scan.Scan([])
assert scanner.get_contents(magic_file.path) == 'first\nsecond\n'
def test_extra_whitespace_gets_removed(self, fake_filesystem):
magic_file = fake_filesystem.create_file(self.magic_file_name, contents='first ')
scanner = scan.Scan([])
assert scanner.get_contents(magic_file.path) == 'first'
def test_single_newline_values_are_trimmed(self, fake_filesystem):
magic_file = fake_filesystem.create_file(self.magic_file_name, contents='first\n')
scanner = scan.Scan([])
assert scanner.get_contents(magic_file.path) == 'first'
class TestEtcPath(object):
def test_directory_is_valid(self, tmpdir):
path = str(tmpdir)
scanner = scan.Scan([])
scanner._etc_path = path
assert scanner.etc_path == path
def test_directory_does_not_exist_gets_created(self, tmpdir):
path = os.path.join(str(tmpdir), 'subdir')
scanner = scan.Scan([])
scanner._etc_path = path
assert scanner.etc_path == path
assert os.path.isdir(path)
def test_complains_when_file(self, fake_filesystem):
etc_dir = fake_filesystem.create_file('/etc/ceph/osd')
scanner = scan.Scan([])
scanner._etc_path = etc_dir.path
with pytest.raises(RuntimeError):
scanner.etc_path
class TestParseKeyring(object):
def test_newlines_are_removed(self):
contents = [
'[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]',
'\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', '']
assert '\n' not in scan.parse_keyring('\n'.join(contents))
def test_key_has_spaces_removed(self):
contents = [
'[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]',
'\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', '']
result = scan.parse_keyring('\n'.join(contents))
assert result.startswith(' ') is False
assert result.endswith(' ') is False
def test_actual_key_is_extracted(self):
contents = [
'[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]',
'\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', '']
result = scan.parse_keyring('\n'.join(contents))
assert result == 'AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA=='
| 2,691 | 36.388889 | 98 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py
|
import pytest
from ceph_volume import exceptions
from ceph_volume.devices.simple import trigger
class TestParseOSDid(object):
def test_no_id_found_if_no_digit(self):
with pytest.raises(exceptions.SuffixParsingError):
trigger.parse_osd_id('asdlj-ljahsdfaslkjhdfa')
def test_no_id_found(self):
with pytest.raises(exceptions.SuffixParsingError):
trigger.parse_osd_id('ljahsdfaslkjhdfa')
def test_id_found(self):
result = trigger.parse_osd_id('1-ljahsdfaslkjhdfa')
assert result == '1'
class TestParseOSDUUID(object):
def test_uuid_is_parsed(self):
result = trigger.parse_osd_uuid('1-asdf-ljkh-asdf-ljkh-asdf')
assert result == 'asdf-ljkh-asdf-ljkh-asdf'
def test_uuid_is_parsed_longer_sha1(self):
result = trigger.parse_osd_uuid('1-foo-bar-asdf-ljkh-asdf-ljkh-asdf')
assert result == 'foo-bar-asdf-ljkh-asdf-ljkh-asdf'
def test_uuid_is_not_found(self):
with pytest.raises(exceptions.SuffixParsingError):
trigger.parse_osd_uuid('ljahsdfaslkjhdfa')
def test_uuid_is_not_found_missing_id(self):
with pytest.raises(exceptions.SuffixParsingError):
trigger.parse_osd_uuid('ljahs-dfa-slkjhdfa-foo')
def test_robust_double_id_in_uuid(self):
# it is possible to have the id in the SHA1, this should
# be fine parsing that
result = trigger.parse_osd_uuid("1-abc959fd-1ec9-4864-b141-3154f9b9f8ed")
assert result == 'abc959fd-1ec9-4864-b141-3154f9b9f8ed'
| 1,546 | 32.630435 | 81 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/README.md
|
# ceph-volume functional test suite
This test suite is based on vagrant and is normally run via Jenkins on github
PRs. With a functioning Vagrant installation these test can also be run locally
(tested with vagrant's libvirt provider).
## Vagrant with libvirt
By default the tests make assumption on the network segments to use (public and
cluster network), as well as the libvirt storage pool and uri. In an unused
vagrant setup these defaults should be fine.
If you prefer to explicitly configure the storage pool and libvirt
uri, create a file
`$ceph_repo/src/ceph-volume/ceph_volume/tests/functional/global_vagrant_variables.yml`
with content as follows:
``` yaml
libvirt_uri: qemu:///system
libvirt_storage_pool: 'vagrant-ceph-nvme'
```
Adjust the values as needed.
After this descend into a test directory (e.g.
`$ceph_repo/src/ceph-volume/ceph_volume/tests/functional/lvm` and run `tox -vre
centos7-bluestore-create -- --provider=libvirt` to execute the tests in
`$ceph_repo/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/`
| 1,061 | 41.48 | 87 |
md
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/vagrant_variables.yml
|
---
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.3
cluster_subnet: 192.168.4
# MEMORY
# set 1024 for CentOS
memory: 1024
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/stream8
# vagrant_box_url: https://cloud.centos.org/centos/8/x86_64/images/CentOS-8-Vagrant-8.1.1911-20200113.3.x86_64.vagrant-libvirt.box
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
#vagrant_sync_dir: /
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
| 2,344 | 39.431034 | 172 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/setup.yml
|
../../../playbooks/setup_mixed_type.yml
| 39 | 39 | 39 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test.yml
|
../../../playbooks/test_explicit.yml
| 36 | 36 | 36 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml
|
../../../playbooks/test_zap.yml
| 31 | 31 | 31 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml
|
../../../../vagrant_variables.yml
| 33 | 33 | 33 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/setup.yml
|
../../../playbooks/setup_mixed_type.yml
| 39 | 39 | 39 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test.yml
|
../../../playbooks/test.yml
| 27 | 27 | 27 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test_zap.yml
|
../../../playbooks/test_zap.yml
| 31 | 31 | 31 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/vagrant_variables.yml
|
../../../../vagrant_variables.yml
| 33 | 33 | 33 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/setup.yml
|
../../../playbooks/setup_mixed_type.yml
| 39 | 39 | 39 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test.yml
|
../../../playbooks/test_explicit.yml
| 36 | 36 | 36 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test_zap.yml
|
../../../playbooks/test_zap.yml
| 31 | 31 | 31 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/vagrant_variables.yml
|
../../../../vagrant_variables.yml
| 33 | 33 | 33 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/setup.yml
|
../../../playbooks/setup_mixed_type.yml
| 39 | 39 | 39 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test.yml
|
../../../playbooks/test.yml
| 27 | 27 | 27 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test_zap.yml
|
../../../playbooks/test_zap.yml
| 31 | 31 | 31 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/vagrant_variables.yml
|
../../../../vagrant_variables.yml
| 33 | 33 | 33 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/setup.yml
|
../../../playbooks/noop.yml
| 27 | 27 | 27 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test.yml
|
../../../playbooks/test.yml
| 27 | 27 | 27 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test_zap.yml
|
../../../playbooks/test_zap.yml
| 31 | 31 | 31 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/vagrant_variables.yml
|
../../../../vagrant_variables.yml
| 33 | 33 | 33 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/setup.yml
|
../../../playbooks/noop.yml
| 27 | 27 | 27 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test.yml
|
../../../playbooks/test.yml
| 27 | 27 | 27 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test_zap.yml
|
../../../playbooks/test_zap.yml
| 31 | 31 | 31 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/vagrant_variables.yml
|
../../../../vagrant_variables.yml
| 33 | 33 | 33 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml
|
---
# Allows to always include a 'setup.yml' file in functional tests, and execute
# only on the ones that actually need it
- hosts: all
gather_facts: no
tasks:
- debug:
msg: "This is an empty setup playbook. The current scenario didn't need any work done"
| 277 | 20.384615 | 94 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml
|
---
- hosts: osds
become: yes
tasks:
- name: install lvm2
package:
name: lvm2
state: present
- name: tell lvm to ignore loop devices
lineinfile:
path: /etc/lvm/lvm.conf
line: "\tfilter = [ 'r|loop.*|' ]"
insertafter: 'devices {'
- name: lvm allow changes depsite duplicate PVIDs
lineinfile:
path: /etc/lvm/lvm.conf
line: ' allow_changes_with_duplicate_pvs = 1'
insertafter: '^devices {'
regexp: 'allow_changes_with_duplicate_pvs = 0'
- name: create mount points
command: "mkdir /opt/{{ item }}"
loop:
- vdd
- vde
ignore_errors: yes
- name: add part
shell: echo "type=83" | sfdisk /dev/{{ item }}
loop:
- vdd
- vde
- name: add fs
command: "mkfs.ext4 /dev/{{ item }}1"
loop:
- vdd
- vde
- name: mount additional drives
command: "mount /dev/{{ item }}1 /opt/{{ item }}"
loop:
- vdd
- vde
- name: create the nvme image systemd unit
copy:
content: |
[Unit]
Description=NVMe loop device
After=local-fs.target
Wants=local-fs.target
[Service]
Type=simple
ExecStart=/bin/bash /opt/ceph-nvme.sh
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
dest: "/etc/systemd/system/ceph-nvme.service"
- name: create the ceph-nvme startup script
copy:
content: |
set -x
set -e
mount /dev/vdd1 /opt/vdd
mount /dev/vde1 /opt/vde
modprobe nvmet
modprobe nvme_loop
modprobe nvme_fabrics
modprobe loop
losetup -v /dev/loop0 /opt/vdd/loop0_nvme0
losetup -v /dev/loop1 /opt/vde/loop1_nvme1
losetup -l
nvmetcli restore /opt/loop.json
nvme connect -t loop -n testnqn1 -q hostnqn
nvme connect -t loop -n testnqn2 -q hostnqn
nvme list
dest: "/opt/ceph-nvme.sh"
- name: ensure ceph-nvme is enabled
service:
name: ceph-nvme
state: stopped
enabled: yes
- name: install nvme dependencies
package:
name: "{{ item }}"
state: present
with_items:
- nvme-cli
- nvmetcli
- name: enable NVME kernel modules
modprobe:
name: "{{ item }}"
state: present
with_items:
- nvmet
- nvme_loop
- nvme_fabrics
- name: detach nvme files from loop devices
command: "losetup -d /dev/{{ item }}"
failed_when: false
loop:
- loop0
- loop1
- name: remove previous nvme files
file:
path: "{{ item }}"
state: absent
loop:
- /opt/vdd/loop0_nvme0
- /opt/vde/loop1_nvme1
- name: create 20GB sparse files for NVMe
command: "fallocate -l 20G {{ item }}"
loop:
- /opt/vdd/loop0_nvme0
- /opt/vde/loop1_nvme1
- name: setup loop devices with sparse files 0
command: "losetup /dev/loop0 /opt/vdd/loop0_nvme0"
failed_when: false
- name: setup loop devices with sparse files 1
command: "losetup /dev/loop1 /opt/vde/loop1_nvme1"
failed_when: false
- name: create the loop.json file for nvmetcli
copy:
content: |
{
"hosts": [
{
"nqn": "hostnqn"
}
],
"ports": [
{
"addr": {
"adrfam": "",
"traddr": "",
"treq": "not specified",
"trsvcid": "",
"trtype": "loop"
},
"portid": 1,
"referrals": [],
"subsystems": [
"testnqn1",
"testnqn2"
]
}
],
"subsystems": [
{
"allowed_hosts": [
"hostnqn"
],
"attr": {
"allow_any_host": "0"
},
"namespaces": [
{
"device": {
"nguid": "ef90689c-6c46-d44c-89c1-4067801309a8",
"path": "/dev/loop0"
},
"enable": 1,
"nsid": 1
}
],
"nqn": "testnqn1"
},
{
"allowed_hosts": [
"hostnqn"
],
"attr": {
"allow_any_host": "0"
},
"namespaces": [
{
"device": {
"nguid": "ef90689c-6c46-d44c-89c1-4067801309a7",
"path": "/dev/loop1"
},
"enable": 1,
"nsid": 2
}
],
"nqn": "testnqn2"
}
]
}
dest: "/opt/loop.json"
- name: setup the /dev/loop0 target with nvmetcli
command: nvmetcli restore /opt/loop.json
- name: connect the new target as an nvme device
command: "nvme connect -t loop -n testnqn{{ item }} -q hostnqn"
loop:
- 1
- 2
- name: debug output for nvme list
command: nvme list
| 5,678 | 25.291667 | 72 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml
|
- hosts: osds
become: yes
tasks:
- name: stop ceph-osd daemons
service:
name: "ceph-osd@{{ item }}"
state: stopped
with_items: "{{ osd_ids }}"
- hosts: mons
become: yes
tasks:
- name: mark osds down
command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
with_items: "{{ osd_ids }}"
- name: purge osds
command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
with_items: "{{ osd_ids }}"
- hosts: osds
become: yes
tasks:
- name: zap devices used for OSDs
command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy"
with_items: "{{ devices }}"
environment:
CEPH_VOLUME_DEBUG: 1
- name: batch create devices again
command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
environment:
CEPH_VOLUME_DEBUG: 1
- name: ensure batch create is idempotent
command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
register: batch_cmd
failed_when: false
environment:
CEPH_VOLUME_DEBUG: 1
- name: check batch idempotency
fail:
msg: "lvm batch failed idempotency check"
when:
- batch_cmd.rc != 0
- "'strategy changed' not in batch_cmd.stderr"
- name: run batch --report to see if devices get filtered
command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
register: report_cmd
failed_when: false
environment:
CEPH_VOLUME_DEBUG: 1
- name: check batch --report idempotency
fail:
msg: "lvm batch --report failed idempotency check"
when:
- report_cmd.rc != 0
- "'strategy changed' not in report_cmd.stderr"
| 2,145 | 32.015385 | 208 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml
|
- hosts: osds
become: yes
tasks:
- name: stop ceph-osd daemons
service:
name: "ceph-osd@{{ item }}"
state: stopped
with_items: "{{ osd_ids }}"
- hosts: mons
become: yes
tasks:
- name: mark osds down
command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
with_items: "{{ osd_ids }}"
- name: purge osds
command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
with_items: "{{ osd_ids }}"
- hosts: osds
become: yes
vars:
external_devices: "{{ '--db-devices' if osd_objectstore == 'bluestore' else '--journal-devices' }}"
tasks:
- name: zap devices used for OSDs
command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy"
with_items: "{{ devices }}"
environment:
CEPH_VOLUME_DEBUG: 1
- name: batch create devices again
command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
environment:
CEPH_VOLUME_DEBUG: 1
- name: ensure batch create is idempotent when all data devices are filtered
command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
register: batch_cmd
failed_when: false
environment:
CEPH_VOLUME_DEBUG: 1
- name: check batch idempotency
fail:
msg: "lvm batch failed idempotency check"
when:
- batch_cmd.rc != 0
- name: run batch --report to see if devices get filtered
command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
register: report_cmd
failed_when: false
environment:
CEPH_VOLUME_DEBUG: 1
- name: check batch --report idempotency
fail:
msg: "lvm batch --report failed idempotency check"
when:
- report_cmd.rc != 0
| 2,350 | 35.169231 | 265 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml
|
- hosts: osds
become: yes
tasks:
- name: stop ceph-osd daemons
service:
name: "ceph-osd@{{ item }}"
state: stopped
with_items: "{{ osd_ids }}"
- hosts: mons
become: yes
tasks:
- name: mark osds down
command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
with_items: "{{ osd_ids }}"
- name: purge osds
command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
with_items: "{{ osd_ids }}"
- hosts: osds
become: yes
tasks:
- name: zap devices used for OSDs
command: "ceph-volume --cluster {{ cluster }} lvm zap --osd-id {{ item }} --destroy"
with_items: "{{ osd_ids }}"
environment:
CEPH_VOLUME_DEBUG: 1
| 755 | 20.6 | 93 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/setup.yml
|
../../../playbooks/setup_partitions.yml
| 39 | 39 | 39 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/test.yml
|
../../../playbooks/test_bluestore.yml
| 37 | 37 | 37 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/vagrant_variables.yml
|
../../../../vagrant_variables.yml
| 33 | 33 | 33 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/setup.yml
|
../../../playbooks/setup_partitions.yml
| 39 | 39 | 39 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml
|
- hosts: osds
become: yes
tasks:
- name: stop ceph-osd@2 daemon
service:
name: ceph-osd@2
state: stopped
- name: stop ceph-osd@0 daemon
service:
name: ceph-osd@0
state: stopped
- hosts: mons
become: yes
tasks:
- name: mark osds down
command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
with_items:
- 0
- 2
- name: destroy osd.2
command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
register: result
retries: 30
delay: 1
until: result is succeeded
- name: destroy osd.0
command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
register: result
retries: 30
delay: 1
until: result is succeeded
- hosts: osds
become: yes
tasks:
# osd.2 device
- name: zap /dev/vdd1
command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
# partitions have been completely removed, so re-create them again
- name: re-create partition /dev/vdd for lvm data usage
parted:
device: /dev/vdd
number: 1
part_start: 0%
part_end: 50%
unit: '%'
label: gpt
state: present
- name: redeploy osd.2 using /dev/vdd1
command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1
# osd.0 lv
- name: zap test_group/data-lv1
command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: redeploy osd.0 using test_group/data-lv1
command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1
- name: stop ceph-osd@0 daemon
service:
name: ceph-osd@0
state: stopped
- hosts: mons
become: yes
tasks:
- name: mark osds down
command: "ceph --cluster {{ cluster }} osd down osd.0"
- name: destroy osd.0
command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
register: result
retries: 30
delay: 1
until: result is succeeded
- hosts: osds
become: yes
tasks:
- name: zap test_group/data-lv1
command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: prepare osd.0 using test_group/data-lv1
command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1
- name: activate all to start the previously prepared osd.0
command: "ceph-volume lvm activate --all"
environment:
CEPH_VOLUME_DEBUG: 1
- name: node inventory
command: "ceph-volume inventory"
environment:
CEPH_VOLUME_DEBUG: 1
- name: list all OSDs
command: "ceph-volume lvm list"
environment:
CEPH_VOLUME_DEBUG: 1
| 3,158 | 24.475806 | 114 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/vagrant_variables.yml
|
../../../../vagrant_variables.yml
| 33 | 33 | 33 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml
|
---
- hosts: osds
gather_facts: false
become: yes
tasks:
- name: partition /dev/vdd for lvm data usage
parted:
device: /dev/vdd
number: 1
part_start: 0%
part_end: 50%
unit: '%'
label: gpt
state: present
- name: partition /dev/vdd lvm journals
parted:
device: /dev/vdd
number: 2
part_start: 50%
part_end: 100%
unit: '%'
state: present
label: gpt
| 487 | 16.428571 | 49 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
|
- hosts: osds
become: yes
tasks:
- name: stop ceph-osd@2 daemon
service:
name: ceph-osd@2
state: stopped
- name: stop ceph-osd@0 daemon
service:
name: ceph-osd@0
state: stopped
- hosts: mons
become: yes
tasks:
- name: mark osds down
command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
with_items:
- 0
- 2
- name: destroy osd.2
command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
register: result
retries: 30
delay: 1
until: result is succeeded
- name: destroy osd.0
command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
register: result
retries: 30
delay: 1
until: result is succeeded
- hosts: osds
become: yes
tasks:
# osd.2 device
- name: zap /dev/vdd1
command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
# partitions have been completely removed, so re-create them again
- name: re-create partition /dev/vdd for lvm data usage
parted:
device: /dev/vdd
number: 1
part_start: 0%
part_end: 50%
unit: '%'
label: gpt
state: present
- name: redeploy osd.2 using /dev/vdd1
command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1
# osd.0 device (zap without --destroy that removes the LV)
- name: zap test_group/data-lv1
command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: prepare osd.0 again using test_group/data-lv1
command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1
- name: find all OSD directories
find:
paths: /var/lib/ceph/osd
recurse: no
file_type: directory
register: osd_directories
- name: find all OSD symlinks
find:
paths: /var/lib/ceph/osd
recurse: yes
depth: 2
file_type: link
register: osd_symlinks
# set the OSD dir and the block/block.db links to root:root permissions, to
# ensure that the OSD will be able to activate regardless
- file:
path: "{{ item.path }}"
owner: root
group: root
with_items:
- "{{ osd_directories.files }}"
- file:
path: "{{ item.path }}"
owner: root
group: root
with_items:
- "{{ osd_symlinks.files }}"
- name: activate all to start the previously prepared osd.0
command: "ceph-volume lvm activate --all"
environment:
CEPH_VOLUME_DEBUG: 1
- name: node inventory
command: "ceph-volume inventory"
environment:
CEPH_VOLUME_DEBUG: 1
- name: list all OSDs
command: "ceph-volume lvm list"
environment:
CEPH_VOLUME_DEBUG: 1
- name: create temporary directory
tempfile:
state: directory
suffix: sparse
register: tmpdir
- name: create a 1GB sparse file
command: fallocate -l 1G {{ tmpdir.path }}/sparse.file
- name: find an empty loop device
command: losetup -f
register: losetup_list
- name: setup loop device with sparse file
command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
- name: create volume group
command: vgcreate test_zap {{ losetup_list.stdout }}
failed_when: false
- name: create logical volume 1
command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
failed_when: false
- name: create logical volume 2
command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
failed_when: false
# zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
- name: zap test_zap/data-lv1
command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: zap test_zap/data-lv2
command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
environment:
CEPH_VOLUME_DEBUG: 1
| 4,367 | 25.962963 | 114 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml
|
---
# Defines deployment design and assigns role to server groups
- hosts:
- mons
- osds
- mgrs
gather_facts: false
any_errors_fatal: true
become: true
tags:
- always
vars:
delegate_facts_host: True
dashboard_enabled: False
environment:
DEBIAN_FRONTEND: noninteractive
pre_tasks:
# If we can't get python2 installed before any module is used we will fail
# so just try what we can to get it installed
- name: check for python2
stat:
path: /usr/bin/python
ignore_errors: yes
register: systempython2
- name: install python2 for debian based systems
raw: sudo apt-get -y install python-simplejson
ignore_errors: yes
when:
- systempython2.stat is undefined or systempython2.stat.exists == false
# Ansible will try to auto-install python-apt, in some systems this might be
# python3-apt, or python-apt, and it has caused whole runs to fail because
# it is trying to do an interactive prompt
- name: install python-apt and aptitude in debian based systems
raw: sudo apt-get -y install "{{ item }}"
ignore_errors: yes
with_items:
- python3-apt
- python-apt
- aptitude
- name: install python2 for fedora
raw: sudo dnf -y install python creates=/usr/bin/python
ignore_errors: yes
when:
- systempython2.stat is undefined or systempython2.stat.exists == false
- name: install python2 for opensuse
raw: sudo zypper -n install python-base creates=/usr/bin/python2.7
ignore_errors: yes
when:
- systempython2.stat is undefined or systempython2.stat.exists == false
- name: gather facts
setup:
when:
- not delegate_facts_host | bool
- name: gather and delegate facts
setup:
delegate_to: "{{ item }}"
delegate_facts: True
with_items: "{{ groups['all'] }}"
run_once: true
when:
- delegate_facts_host | bool
- name: install required packages for fedora > 23
raw: sudo dnf -y install python2-dnf libselinux-python ntp
when:
- ansible_facts['distribution'] == 'Fedora'
- ansible_facts['distribution_major_version']|int >= 23
- name: check if it is atomic host
stat:
path: /run/ostree-booted
register: stat_ostree
- name: set_fact is_atomic
set_fact:
is_atomic: '{{ stat_ostree.stat.exists }}'
- name: force rpm pkg upgrade
package:
name: rpm
state: latest
when: not is_atomic | bool
- name: update the system
command: dnf update -y
changed_when: false
when: not is_atomic | bool
tasks:
- import_role:
name: ceph-defaults
- import_role:
name: ceph-facts
- import_role:
name: ceph-validate
- hosts:
- mons
- osds
- mgrs
gather_facts: false
become: True
any_errors_fatal: true
vars:
dashboard_enabled: False
tasks:
- import_role:
name: ceph-defaults
- import_role:
name: ceph-facts
- import_role:
name: ceph-handler
- import_role:
name: ceph-common
- name: rsync ceph-volume to test nodes on centos
synchronize:
src: "{{ toxinidir }}/../../../../ceph_volume"
dest: "/usr/lib/python3.6/site-packages"
use_ssh_args: true
when:
- ansible_facts['os_family'] == "RedHat"
- inventory_hostname in groups.get(osd_group_name, [])
- name: rsync ceph-volume to test nodes on ubuntu
synchronize:
src: "{{ toxinidir }}/../../../../ceph_volume"
dest: "/usr/lib/python2.7/dist-packages"
use_ssh_args: true
when:
- ansible_facts['os_family'] == "Debian"
- inventory_hostname in groups.get(osd_group_name, [])
- name: run ceph-config role
import_role:
name: ceph-config
- name: run ceph-mon role
import_role:
name: ceph-mon
when:
- inventory_hostname in groups.get(mon_group_name, [])
- name: run ceph-mgr role
import_role:
name: ceph-mgr
when:
- inventory_hostname in groups.get(mgr_group_name, [])
- name: run ceph-osd role
import_role:
name: ceph-osd
when:
- inventory_hostname in groups.get(osd_group_name, [])
| 4,353 | 25.071856 | 80 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/scripts/generate_ssh_config.sh
|
#!/bin/bash
# Generate a custom ssh config from Vagrant so that it can then be used by
# ansible.cfg
path=$1
if [ $# -eq 0 ]
then
echo "A path to the scenario is required as an argument and it wasn't provided"
exit 1
fi
cd "$path"
vagrant ssh-config > vagrant_ssh_config
| 285 | 18.066667 | 83 |
sh
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/scripts/output.py
|
import os
from ceph_volume import terminal
char = os.environ.get('INVALID')
terminal.stdout(char)
| 99 | 15.666667 | 32 |
py
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/scripts/test_unicode.sh
|
#!/bin/bash
# Not entirely sure why these executables don't seem to be available in the
# $PATH when running from tox. Calling out to `which` seems to fix it, at the
# expense of making the script a bit obtuse
mktemp=$(which mktemp)
cat=$(which cat)
grep=$(which grep)
PYTHON_EXECUTABLE=`which python3`
STDERR_FILE=$($mktemp)
INVALID="→"
echo "stderr file created: $STDERR_FILE"
INVALID="$INVALID" $PYTHON_EXECUTABLE $1 2> ${STDERR_FILE}
retVal=$?
if [ $retVal -ne 0 ]; then
echo "Failed test: Unexpected failure from running Python script"
echo "Below is output of stderr captured:"
$cat "${STDERR_FILE}"
exit $retVal
fi
$grep --quiet "$INVALID" ${STDERR_FILE}
retVal=$?
if [ $retVal -ne 0 ]; then
echo "Failed test: expected to find \"${INVALID}\" character in tmpfile: \"${STDERR_FILE}\""
echo "Below is output of stderr captured:"
$cat "${STDERR_FILE}"
fi
exit $retVal
| 909 | 24.277778 | 96 |
sh
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_reload.sh
|
#!/bin/bash
# vagrant-libvirt has a common behavior where it times out when "reloading" vms. Instead
# of calling `vagrant reload` attempt to halt everything, and then start everything, which gives
# this script the ability to try the `vagrant up` again in case of failure
#
vagrant halt
# This should not really be needed, but in case of a possible race condition between halt
# and up, it might improve things
sleep 5
retries=0
until [ $retries -ge 5 ]
do
echo "Attempting to start VMs. Attempts: $retries"
timeout 10m vagrant up "$@" && break
retries=$[$retries+1]
sleep 5
done
| 593 | 26 | 96 |
sh
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh
|
#!/bin/bash
set -e
retries=0
until [ $retries -ge 5 ]
do
echo "Attempting to start VMs. Attempts: $retries"
timeout 10m vagrant up "$@" && break
retries=$[$retries+1]
sleep 5
done
sleep 10
| 200 | 12.4 | 52 |
sh
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml
|
---
- hosts: osds
become: yes
tasks:
- name: list all OSD directories
find:
paths: /var/lib/ceph/osd
file_type: directory
register: osd_paths
- name: scan all OSD directories
command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
environment:
CEPH_VOLUME_DEBUG: 1
with_items:
- "{{ osd_paths.files }}"
- name: list all OSD JSON files
find:
paths: /etc/ceph/osd
file_type: file
register: osd_configs
- name: activate all scanned OSDs
command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
environment:
CEPH_VOLUME_DEBUG: 1
with_items:
- "{{ osd_configs.files }}"
| 762 | 22.84375 | 91 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml
|
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 2
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.1
cluster_subnet: 192.168.2
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
#vagrant_sync_dir: /
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
| 2,605 | 34.216216 | 172 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml
|
---
devices:
- '/dev/sdb'
dedicated_devices:
- '/dev/sdc'
osd_scenario: "non-collocated"
| 94 | 10.875 | 30 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml
|
---
devices:
- '/dev/sdb'
- '/dev/sdc'
osd_scenario: "collocated"
| 71 | 9.285714 | 26 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml
|
---
- hosts: osds
become: yes
tasks:
- name: scan all running OSDs
command: "ceph-volume --cluster={{ cluster }} simple scan"
environment:
CEPH_VOLUME_DEBUG: 1
- name: activate all scanned OSDs
command: "ceph-volume --cluster={{ cluster }} simple activate --all"
environment:
CEPH_VOLUME_DEBUG: 1
| 352 | 21.0625 | 74 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml
|
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 2
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.1
cluster_subnet: 192.168.2
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
#vagrant_sync_dir: /
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
| 2,605 | 34.216216 | 172 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml
|
---
devices:
- '/dev/sdb'
dedicated_devices:
- '/dev/sdc'
osd_scenario: "non-collocated"
| 94 | 10.875 | 30 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml
|
---
devices:
- '/dev/sdb'
- '/dev/sdc'
osd_scenario: "collocated"
| 71 | 9.285714 | 26 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml
|
---
- hosts: osds
become: yes
tasks:
- name: list all OSD directories
find:
paths: /var/lib/ceph/osd
file_type: directory
register: osd_paths
- name: scan all OSD directories
command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
environment:
CEPH_VOLUME_DEBUG: 1
with_items:
- "{{ osd_paths.files }}"
- name: list all OSD JSON files
find:
paths: /etc/ceph/osd
file_type: file
register: osd_configs
- name: activate all scanned OSDs
command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
environment:
CEPH_VOLUME_DEBUG: 1
with_items:
- "{{ osd_configs.files }}"
| 762 | 22.84375 | 91 |
yml
|
null |
ceph-main/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml
|
---
# DEPLOY CONTAINERIZED DAEMONS
docker: false
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 2
mds_vms: 0
rgw_vms: 0
nfs_vms: 0
rbd_mirror_vms: 0
client_vms: 0
iscsi_gw_vms: 0
mgr_vms: 0
# INSTALL SOURCE OF CEPH
# valid values are 'stable' and 'dev'
ceph_install_source: stable
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.1
cluster_subnet: 192.168.2
# MEMORY
# set 1024 for CentOS
memory: 512
# Ethernet interface name
# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
disks: "[ '/dev/sdb', '/dev/sdc' ]"
# VAGRANT BOX
# Ceph boxes are *strongly* suggested. They are under better control and will
# not get updated frequently unless required for build systems. These are (for
# now):
#
# * ceph/ubuntu-xenial
#
# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
# libvirt CentOS: centos/7
# parallels Ubuntu: parallels/ubuntu-14.04
# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
# For more boxes have a look at:
# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
vagrant_box: centos/7
#ssh_private_key_path: "~/.ssh/id_rsa"
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
#vagrant_sync_dir: /
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
# VAGRANT URL
# This is a URL to download an image from an alternate location. vagrant_box
# above should be set to the filename of the image.
# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
os_tuning_params:
- { name: kernel.pid_max, value: 4194303 }
- { name: fs.file-max, value: 26234859 }
| 2,605 | 34.216216 | 172 |
yml
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.