Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml
--- devices: - '/dev/sdb' dedicated_devices: - '/dev/sdc' osd_scenario: "non-collocated"
94
10.875
30
yml
null
ceph-main/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml
--- devices: - '/dev/sdb' - '/dev/sdc' osd_scenario: "collocated"
71
9.285714
26
yml
null
ceph-main/src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py
0
0
0
py
null
ceph-main/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py
import pytest import os @pytest.fixture() def node(host, request): """ This fixture represents a single node in the ceph cluster. Using the host.ansible fixture provided by testinfra it can access all the ansible variables provided to it by the specific test scenario being ran. You must include this fixture on any tests that operate on specific type of node because it contains the logic to manage which tests a node should run. """ ansible_vars = host.ansible.get_variables() # tox/jenkins/user will pass in this environment variable. we need to do it this way # because testinfra does not collect and provide ansible config passed in # from using --extra-vars ceph_dev_branch = os.environ.get("CEPH_DEV_BRANCH", "master") group_names = ansible_vars["group_names"] num_osd_ports = 4 if 'mimic' in ceph_dev_branch or 'luminous' in ceph_dev_branch: num_osd_ports = 2 # capture the initial/default state test_is_applicable = False for marker in request.node.iter_markers(): if marker.name in group_names or marker.name == 'all': test_is_applicable = True break # Check if any markers on the test method exist in the nodes group_names. # If they do not, this test is not valid for the node being tested. if not test_is_applicable: reason = "%s: Not a valid test for node type: %s" % ( request.function, group_names) pytest.skip(reason) osd_ids = [] osds = [] cluster_address = "" # I can assume eth1 because I know all the vagrant # boxes we test with use that interface address = host.interface("eth1").addresses[0] subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1]) num_mons = len(ansible_vars["groups"]["mons"]) num_osds = len(ansible_vars.get("devices", [])) if not num_osds: num_osds = len(ansible_vars.get("lvm_volumes", [])) osds_per_device = ansible_vars.get("osds_per_device", 1) num_osds = num_osds * osds_per_device # If number of devices doesn't map to number of OSDs, allow tests to define # that custom number, defaulting it to ``num_devices`` num_osds = ansible_vars.get('num_osds', num_osds) cluster_name = ansible_vars.get("cluster", "ceph") conf_path = "/etc/ceph/{}.conf".format(cluster_name) if "osds" in group_names: # I can assume eth2 because I know all the vagrant # boxes we test with use that interface. OSDs are the only # nodes that have this interface. cluster_address = host.interface("eth2").addresses[0] cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"') if cmd.rc == 0: osd_ids = cmd.stdout.rstrip("\n").split("\n") osds = osd_ids data = dict( address=address, subnet=subnet, vars=ansible_vars, osd_ids=osd_ids, num_mons=num_mons, num_osds=num_osds, num_osd_ports=num_osd_ports, cluster_name=cluster_name, conf_path=conf_path, cluster_address=cluster_address, osds=osds, ) return data def pytest_collection_modifyitems(session, config, items): for item in items: test_path = item.location[0] if "mon" in test_path: item.add_marker(pytest.mark.mons) elif "osd" in test_path: item.add_marker(pytest.mark.osds) elif "mds" in test_path: item.add_marker(pytest.mark.mdss) elif "mgr" in test_path: item.add_marker(pytest.mark.mgrs) elif "rbd-mirror" in test_path: item.add_marker(pytest.mark.rbdmirrors) elif "rgw" in test_path: item.add_marker(pytest.mark.rgws) elif "nfs" in test_path: item.add_marker(pytest.mark.nfss) elif "iscsi" in test_path: item.add_marker(pytest.mark.iscsigws) else: item.add_marker(pytest.mark.all)
3,983
37.307692
88
py
null
ceph-main/src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py
0
0
0
py
null
ceph-main/src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py
import json class TestOSDs(object): def test_ceph_osd_package_is_installed(self, node, host): assert host.package("ceph-osd").is_installed def test_osds_listen_on_public_network(self, node, host): # TODO: figure out way to paramaterize this test nb_port = (node["num_osds"] * node["num_osd_ports"]) assert host.check_output( "netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["address"])) == str(nb_port) # noqa E501 def test_osds_listen_on_cluster_network(self, node, host): # TODO: figure out way to paramaterize this test nb_port = (node["num_osds"] * node["num_osd_ports"]) assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % # noqa E501 (node["cluster_address"])) == str(nb_port) def test_osd_services_are_running(self, node, host): # TODO: figure out way to paramaterize node['osds'] for this test for osd in node["osds"]: assert host.service("ceph-osd@%s" % osd).is_running def test_osd_are_mounted(self, node, host): # TODO: figure out way to paramaterize node['osd_ids'] for this test for osd_id in node["osd_ids"]: osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format( cluster=node["cluster_name"], osd_id=osd_id, ) assert host.mount_point(osd_path).exists def test_ceph_volume_is_installed(self, node, host): host.exists('ceph-volume') def test_ceph_volume_systemd_is_installed(self, node, host): host.exists('ceph-volume-systemd') def _get_osd_id_from_host(self, node, osd_tree): children = [] for n in osd_tree['nodes']: if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host': # noqa E501 children = n['children'] return children def _get_nb_up_osds_from_ids(self, node, osd_tree): nb_up = 0 ids = self._get_osd_id_from_host(node, osd_tree) for n in osd_tree['nodes']: if n['id'] in ids and n['status'] == 'up': nb_up += 1 return nb_up def test_all_osds_are_up_and_in(self, node, host): cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501 cluster=node["cluster_name"]) output = json.loads(host.check_output(cmd)) assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)
2,609
41.786885
183
py
null
ceph-main/src/ceph-volume/ceph_volume/tests/systemd/test_main.py
import pytest from ceph_volume import exceptions, conf from ceph_volume.systemd.main import parse_subcommand, main, process class TestParseSubcommand(object): def test_no_subcommand_found(self): with pytest.raises(exceptions.SuffixParsingError): parse_subcommand('') def test_sub_command_is_found(self): result = parse_subcommand('lvm-1-sha-1-something-0') assert result == 'lvm' class Capture(object): def __init__(self, *a, **kw): self.a = a self.kw = kw self.calls = [] def __call__(self, *a, **kw): self.calls.append(a) self.calls.append(kw) class TestMain(object): def setup_method(self): conf.log_path = '/tmp/' def test_no_arguments_parsing_error(self): with pytest.raises(RuntimeError): main(args=[]) def test_parsing_suffix_error(self): with pytest.raises(exceptions.SuffixParsingError): main(args=['asdf']) def test_correct_command(self, monkeypatch): run = Capture() monkeypatch.setattr(process, 'run', run) main(args=['ceph-volume-systemd', 'lvm-8715BEB4-15C5-49DE-BA6F-401086EC7B41-0' ]) command = run.calls[0][0] assert command == [ 'ceph-volume', 'lvm', 'trigger', '8715BEB4-15C5-49DE-BA6F-401086EC7B41-0' ]
1,377
25.5
89
py
null
ceph-main/src/ceph-volume/ceph_volume/tests/systemd/test_systemctl.py
import pytest from ceph_volume.systemd import systemctl class TestSystemctl(object): @pytest.mark.parametrize("stdout,expected", [ (['[email protected]', '', '[email protected]'], ['1','2']), (['Id=ceph-osd1.service',], []), (['Id=ceph-osd@1'], ['1']), ([], []), ]) def test_get_running_osd_ids(self, stub_call, stdout, expected): stub_call((stdout, [], 0)) osd_ids = systemctl.get_running_osd_ids() assert osd_ids == expected def test_returns_empty_list_on_nonzero_return_code(self, stub_call): stdout = ['[email protected]', '', '[email protected]'] stub_call((stdout, [], 1)) osd_ids = systemctl.get_running_osd_ids() assert osd_ids == []
765
33.818182
76
py
null
ceph-main/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py
import argparse import pytest import os from ceph_volume import exceptions, process from ceph_volume.util import arg_validators from mock.mock import patch, MagicMock class TestOSDPath(object): def setup_method(self): self.validator = arg_validators.OSDPath() def test_is_not_root(self, monkeypatch): monkeypatch.setattr(os, 'getuid', lambda: 100) with pytest.raises(exceptions.SuperUserError): self.validator('') def test_path_is_not_a_directory(self, is_root, monkeypatch, fake_filesystem): fake_file = fake_filesystem.create_file('/tmp/foo') monkeypatch.setattr(arg_validators.disk, 'is_partition', lambda x: False) validator = arg_validators.OSDPath() with pytest.raises(argparse.ArgumentError): validator(fake_file.path) def test_files_are_missing(self, is_root, tmpdir, monkeypatch): tmppath = str(tmpdir) monkeypatch.setattr(arg_validators.disk, 'is_partition', lambda x: False) validator = arg_validators.OSDPath() with pytest.raises(argparse.ArgumentError) as error: validator(tmppath) assert 'Required file (ceph_fsid) was not found in OSD' in str(error.value) class TestExcludeGroupOptions(object): def setup_method(self): self.parser = argparse.ArgumentParser() def test_flags_in_one_group(self): argv = ['<prog>', '--filestore', '--bar'] filestore_group = self.parser.add_argument_group('filestore') bluestore_group = self.parser.add_argument_group('bluestore') filestore_group.add_argument('--filestore') bluestore_group.add_argument('--bluestore') result = arg_validators.exclude_group_options( self.parser, ['filestore', 'bluestore'], argv=argv ) assert result is None def test_flags_in_no_group(self): argv = ['<prog>', '--foo', '--bar'] filestore_group = self.parser.add_argument_group('filestore') bluestore_group = self.parser.add_argument_group('bluestore') filestore_group.add_argument('--filestore') bluestore_group.add_argument('--bluestore') result = arg_validators.exclude_group_options( self.parser, ['filestore', 'bluestore'], argv=argv ) assert result is None def test_flags_conflict(self, capsys): argv = ['<prog>', '--filestore', '--bluestore'] filestore_group = self.parser.add_argument_group('filestore') bluestore_group = self.parser.add_argument_group('bluestore') filestore_group.add_argument('--filestore') bluestore_group.add_argument('--bluestore') arg_validators.exclude_group_options( self.parser, ['filestore', 'bluestore'], argv=argv ) stdout, stderr = capsys.readouterr() assert 'Cannot use --filestore (filestore) with --bluestore (bluestore)' in stderr class TestValidDevice(object): def setup_method(self, fake_filesystem): self.validator = arg_validators.ValidDevice() @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) def test_path_is_valid(self, m_has_bs_label, fake_call, patch_bluestore_label, device_info, monkeypatch): monkeypatch.setattr('ceph_volume.util.device.Device.exists', lambda: True) lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(lsblk=lsblk) result = self.validator('/dev/sda') assert result.path == '/dev/sda' @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) def test_path_is_invalid(self, m_has_bs_label, fake_call, patch_bluestore_label, device_info): lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(lsblk=lsblk) with pytest.raises(argparse.ArgumentError): self.validator('/device/does/not/exist') @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_dev_has_partitions(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call): mocked_device.return_value = MagicMock( exists=True, has_partitions=True, ) with pytest.raises(RuntimeError): self.validator('/dev/foo') class TestValidZapDevice(object): def setup_method(self): self.validator = arg_validators.ValidZapDevice() @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_device_has_partition(self, m_get_single_lv, m_has_bs_label, mocked_device): mocked_device.return_value = MagicMock( used_by_ceph=False, exists=True, has_partitions=True, has_gpt_headers=False, has_fs=False ) self.validator.zap = False with pytest.raises(RuntimeError): assert self.validator('/dev/foo') @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_device_has_no_partition(self, m_get_single_lv, m_has_bs_label, mocked_device): mocked_device.return_value = MagicMock( used_by_ceph=False, exists=True, has_partitions=False, has_gpt_headers=False, has_fs=False ) self.validator.zap = False assert self.validator('/dev/foo') class TestValidDataDevice(object): def setup_method(self): self.validator = arg_validators.ValidDataDevice() @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_device_used_by_ceph(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call): mocked_device.return_value = MagicMock( used_by_ceph=True, exists=True, has_partitions=False, has_gpt_headers=False ) with pytest.raises(SystemExit): self.validator.zap = False self.validator('/dev/foo') @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_device_has_fs(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call): mocked_device.return_value = MagicMock( used_by_ceph=False, exists=True, has_partitions=False, has_gpt_headers=False, has_fs=True ) with pytest.raises(RuntimeError): self.validator.zap = False self.validator('/dev/foo') @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=True) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_device_has_bs_signature(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call): mocked_device.return_value = MagicMock( used_by_ceph=False, exists=True, has_partitions=False, has_gpt_headers=False, has_fs=False ) with pytest.raises(RuntimeError): self.validator.zap = False self.validator('/dev/foo') class TestValidRawDevice(object): def setup_method(self): self.validator = arg_validators.ValidRawDevice() @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.util.arg_validators.disk.blkid') @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_dmcrypt_device_already_prepared(self, m_get_single_lv, m_blkid, m_has_bs_label, mocked_device, fake_call, monkeypatch): def mock_call(cmd, **kw): return ('', '', 1) monkeypatch.setattr(process, 'call', mock_call) m_blkid.return_value = {'UUID': '8fd92779-ad78-437c-a06f-275f7170fa74', 'TYPE': 'crypto_LUKS'} mocked_device.return_value = MagicMock( used_by_ceph=False, exists=True, has_partitions=False, has_gpt_headers=False, has_fs=False ) with pytest.raises(SystemExit): self.validator.zap = False self.validator('/dev/foo') @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_device_already_prepared(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call): mocked_device.return_value = MagicMock( used_by_ceph=False, exists=True, has_partitions=False, has_gpt_headers=False, has_fs=False ) with pytest.raises(SystemExit): self.validator.zap = False self.validator('/dev/foo') @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_device_not_prepared(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call, monkeypatch): def mock_call(cmd, **kw): return ('', '', 1) monkeypatch.setattr(process, 'call', mock_call) mocked_device.return_value = MagicMock( used_by_ceph=False, exists=True, has_partitions=False, has_gpt_headers=False, has_fs=False ) self.validator.zap = False assert self.validator('/dev/foo') @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_device_has_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call, monkeypatch): def mock_call(cmd, **kw): return ('', '', 1) monkeypatch.setattr(process, 'call', mock_call) mocked_device.return_value = MagicMock( used_by_ceph=False, exists=True, has_partitions=True, has_gpt_headers=False, has_fs=False ) self.validator.zap = False with pytest.raises(RuntimeError): assert self.validator('/dev/foo') class TestValidBatchDevice(object): def setup_method(self): self.validator = arg_validators.ValidBatchDevice() @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_device_is_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call): mocked_device.return_value = MagicMock( used_by_ceph=False, exists=True, has_partitions=False, has_gpt_headers=False, has_fs=False, is_partition=True ) with pytest.raises(argparse.ArgumentError): self.validator.zap = False self.validator('/dev/foo') @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_device_is_not_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call): mocked_device.return_value = MagicMock( used_by_ceph=False, exists=True, has_partitions=False, has_gpt_headers=False, has_fs=False, is_partition=False ) self.validator.zap = False assert self.validator('/dev/foo') class TestValidBatchDataDevice(object): def setup_method(self): self.validator = arg_validators.ValidBatchDataDevice() @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_device_is_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call): mocked_device.return_value = MagicMock( used_by_ceph=False, exists=True, has_partitions=False, has_gpt_headers=False, has_fs=False, is_partition=True ) with pytest.raises(argparse.ArgumentError): self.validator.zap = False assert self.validator('/dev/foo') @patch('ceph_volume.util.arg_validators.Device') @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False) @patch('ceph_volume.api.lvm.get_single_lv', return_value=None) def test_device_is_not_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call): mocked_device.return_value = MagicMock( used_by_ceph=False, exists=True, has_partitions=False, has_gpt_headers=False, has_fs=False, is_partition=False ) self.validator.zap = False assert self.validator('/dev/foo') class TestValidFraction(object): def setup_method(self): self.validator = arg_validators.ValidFraction() def test_fraction_is_valid(self, fake_call): result = self.validator('0.8') assert result == 0.8 def test_fraction_not_float(self, fake_call): with pytest.raises(ValueError): self.validator('xyz') def test_fraction_is_nan(self, fake_call): with pytest.raises(argparse.ArgumentError): self.validator('NaN') def test_fraction_is_negative(self, fake_call): with pytest.raises(argparse.ArgumentError): self.validator('-1.0') def test_fraction_is_zero(self, fake_call): with pytest.raises(argparse.ArgumentError): self.validator('0.0') def test_fraction_is_greater_one(self, fake_call): with pytest.raises(argparse.ArgumentError): self.validator('1.1')
14,940
39.490515
133
py
null
ceph-main/src/ceph-volume/ceph_volume/tests/util/test_device.py
import os import pytest from copy import deepcopy from ceph_volume.util import device from ceph_volume.api import lvm as api from mock.mock import patch, mock_open class TestDevice(object): def test_sys_api(self, monkeypatch, device_info): volume = api.Volume(lv_name='lv', lv_uuid='y', vg_name='vg', lv_tags={}, lv_path='/dev/VolGroup/lv') volumes = [] volumes.append(volume) monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes)) data = {"/dev/sda": {"foo": "bar"}} lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.sys_api assert "foo" in disk.sys_api def test_lvm_size(self, monkeypatch, device_info): volume = api.Volume(lv_name='lv', lv_uuid='y', vg_name='vg', lv_tags={}, lv_path='/dev/VolGroup/lv') volumes = [] volumes.append(volume) monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes)) # 5GB in size data = {"/dev/sda": {"size": "5368709120"}} lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.lvm_size.gb == 4 @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lvm_size_rounds_down(self, fake_call, device_info): # 5.5GB in size data = {"/dev/sda": {"size": "5905580032"}} lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.lvm_size.gb == 4 def test_is_lv(self, fake_call, device_info): data = {"lv_path": "vg/lv", "vg_name": "vg", "name": "lv"} lsblk = {"TYPE": "lvm", "NAME": "vg-lv"} device_info(lv=data,lsblk=lsblk) disk = device.Device("vg/lv") assert disk.is_lv @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_vgs_is_empty(self, fake_call, device_info, monkeypatch): BarPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", pv_tags={}) pvolumes = [] pvolumes.append(BarPVolume) lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(lsblk=lsblk) monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: {}) disk = device.Device("/dev/nvme0n1") assert disk.vgs == [] @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_vgs_is_not_empty(self, fake_call, device_info, monkeypatch): vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=6, vg_extent_size=1073741824) monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg]) lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} device_info(lsblk=lsblk) disk = device.Device("/dev/nvme0n1") assert len(disk.vgs) == 1 @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_device_is_device(self, fake_call, device_info): data = {"/dev/sda": {"foo": "bar"}} lsblk = {"TYPE": "device", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.is_device is True @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_loop_device_is_not_device(self, fake_call, device_info): data = {"/dev/loop0": {"foo": "bar"}} lsblk = {"TYPE": "loop"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/loop0") assert disk.is_device is False @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_loop_device_is_device(self, fake_call, device_info): data = {"/dev/loop0": {"foo": "bar"}} lsblk = {"TYPE": "loop"} os.environ["CEPH_VOLUME_ALLOW_LOOP_DEVICES"] = "1" device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/loop0") assert disk.is_device is True del os.environ["CEPH_VOLUME_ALLOW_LOOP_DEVICES"] @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_device_is_rotational(self, fake_call, device_info): data = {"/dev/sda": {"rotational": "1"}} lsblk = {"TYPE": "device", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.rotational @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_device_is_not_rotational(self, fake_call, device_info): data = {"/dev/sda": {"rotational": "0"}} lsblk = {"TYPE": "device", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert not disk.rotational @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_device_is_rotational_lsblk(self, fake_call, device_info): data = {"/dev/sda": {"foo": "bar"}} lsblk = {"TYPE": "device", "ROTA": "1", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.rotational @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_device_is_not_rotational_lsblk(self, fake_call, device_info): data = {"/dev/sda": {"rotational": "0"}} lsblk = {"TYPE": "device", "ROTA": "0", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert not disk.rotational @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_device_is_rotational_defaults_true(self, fake_call, device_info): # rotational will default true if no info from sys_api or lsblk is found data = {"/dev/sda": {"foo": "bar"}} lsblk = {"TYPE": "device", "foo": "bar", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.rotational @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_disk_is_device(self, fake_call, device_info): data = {"/dev/sda": {"foo": "bar"}} lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.is_device is True @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_partition(self, fake_call, device_info): data = {"/dev/sda1": {"foo": "bar"}} lsblk = {"TYPE": "part", "NAME": "sda1", "PKNAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda1") assert disk.is_partition @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_mpath_device_is_device(self, fake_call, device_info): data = {"/dev/foo": {"foo": "bar"}} lsblk = {"TYPE": "mpath", "NAME": "foo"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/foo") assert disk.is_device is True @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_not_lvm_member(self, fake_call, device_info): data = {"/dev/sda1": {"foo": "bar"}} lsblk = {"TYPE": "part", "NAME": "sda1", "PKNAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda1") assert not disk.is_lvm_member @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_lvm_member(self, fake_call, device_info): data = {"/dev/sda1": {"foo": "bar"}} lsblk = {"TYPE": "part", "NAME": "sda1", "PKNAME": "sda"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/sda1") assert not disk.is_lvm_member @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_mapper_device(self, fake_call, device_info): lsblk = {"TYPE": "lvm", "NAME": "foo"} device_info(lsblk=lsblk) disk = device.Device("/dev/mapper/foo") assert disk.is_mapper @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_dm_is_mapper_device(self, fake_call, device_info): lsblk = {"TYPE": "lvm", "NAME": "dm-4"} device_info(lsblk=lsblk) disk = device.Device("/dev/dm-4") assert disk.is_mapper @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_not_mapper_device(self, fake_call, device_info): lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(lsblk=lsblk) disk = device.Device("/dev/sda") assert not disk.is_mapper @pytest.mark.usefixtures("lsblk_ceph_disk_member", "disable_kernel_queries") @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_ceph_disk_lsblk(self, fake_call, monkeypatch, patch_bluestore_label): disk = device.Device("/dev/sda") assert disk.is_ceph_disk_member @pytest.mark.usefixtures("blkid_ceph_disk_member", "lsblk_ceph_disk_member", "disable_kernel_queries") @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_ceph_disk_blkid(self, fake_call, monkeypatch, patch_bluestore_label): disk = device.Device("/dev/sda") assert disk.is_ceph_disk_member @pytest.mark.usefixtures("lsblk_ceph_disk_member", "disable_kernel_queries") @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_ceph_disk_member_not_available_lsblk(self, fake_call, monkeypatch, patch_bluestore_label): disk = device.Device("/dev/sda") assert disk.is_ceph_disk_member assert not disk.available assert "Used by ceph-disk" in disk.rejected_reasons @pytest.mark.usefixtures("blkid_ceph_disk_member", "lsblk_ceph_disk_member", "disable_kernel_queries") @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_ceph_disk_member_not_available_blkid(self, fake_call, monkeypatch, patch_bluestore_label): disk = device.Device("/dev/sda") assert disk.is_ceph_disk_member assert not disk.available assert "Used by ceph-disk" in disk.rejected_reasons @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_removable_device(self, fake_call, device_info): data = {"/dev/sdb": {"removable": 1}} lsblk = {"TYPE": "disk", "NAME": "sdb"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/sdb") assert not disk.available @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_device_with_gpt_headers(self, fake_call, device_info): data = {"/dev/sdb": {"removable": 0, "size": 5368709120}} lsblk = {"TYPE": "disk", "NAME": "sdb"} blkid= {"PTTYPE": "gpt"} device_info( devices=data, blkid=blkid, lsblk=lsblk, ) disk = device.Device("/dev/sdb") assert not disk.available @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_accept_non_removable_device(self, fake_call, device_info): data = {"/dev/sdb": {"removable": 0, "size": 5368709120}} lsblk = {"TYPE": "disk", "NAME": "sdb"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/sdb") assert disk.available @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_not_acceptable_device(self, fake_call, device_info): data = {"/dev/dm-0": {"foo": "bar"}} lsblk = {"TYPE": "mpath", "NAME": "dm-0"} device_info(devices=data, lsblk=lsblk) disk = device.Device("/dev/dm-0") assert not disk.available @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) @patch('ceph_volume.util.device.os.path.realpath') @patch('ceph_volume.util.device.os.path.islink') def test_accept_symlink_to_device(self, m_os_path_islink, m_os_path_realpath, device_info, fake_call): m_os_path_islink.return_value = True m_os_path_realpath.return_value = '/dev/sdb' data = {"/dev/sdb": {"ro": 0, "size": 5368709120}} lsblk = {"TYPE": "disk"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/test_symlink") print(disk) print(disk.sys_api) assert disk.available @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) @patch('ceph_volume.util.device.os.readlink') @patch('ceph_volume.util.device.os.path.islink') def test_reject_symlink_to_device_mapper(self, m_os_path_islink, m_os_readlink, device_info, fake_call): m_os_path_islink.return_value = True m_os_readlink.return_value = '/dev/dm-0' data = {"/dev/mapper/mpatha": {"ro": 0, "size": 5368709120}} lsblk = {"TYPE": "disk"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/mapper/mpatha") assert disk.available @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_readonly_device(self, fake_call, device_info): data = {"/dev/cdrom": {"ro": 1}} lsblk = {"TYPE": "disk", "NAME": "cdrom"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/cdrom") assert not disk.available @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_smaller_than_5gb(self, fake_call, device_info): data = {"/dev/sda": {"size": 5368709119}} lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/sda") assert not disk.available, 'too small device is available' @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_accept_non_readonly_device(self, fake_call, device_info): data = {"/dev/sda": {"ro": 0, "size": 5368709120}} lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(devices=data,lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.available @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_bluestore_device(self, fake_call, monkeypatch, patch_bluestore_label, device_info): patch_bluestore_label.return_value = True lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(lsblk=lsblk) disk = device.Device("/dev/sda") assert not disk.available assert "Has BlueStore device label" in disk.rejected_reasons @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_reject_device_with_oserror(self, fake_call, monkeypatch, patch_bluestore_label, device_info): patch_bluestore_label.side_effect = OSError('test failure') lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(lsblk=lsblk) disk = device.Device("/dev/sda") assert not disk.available assert "Failed to determine if device is BlueStore" in disk.rejected_reasons @pytest.mark.usefixtures("lsblk_ceph_disk_member", "device_info_not_ceph_disk_member", "disable_kernel_queries") @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_not_ceph_disk_member_lsblk(self, fake_call, patch_bluestore_label): disk = device.Device("/dev/sda") assert disk.is_ceph_disk_member is False @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_existing_vg_available(self, fake_call, monkeypatch, device_info): vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1536, vg_extent_size=4194304) monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg]) lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} data = {"/dev/nvme0n1": {"size": "6442450944"}} lv = {"tags": {"ceph.osd_id": "1"}} device_info(devices=data, lsblk=lsblk, lv=lv) disk = device.Device("/dev/nvme0n1") assert disk.available_lvm assert not disk.available assert not disk.available_raw @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_existing_vg_too_small(self, fake_call, monkeypatch, device_info): vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=4, vg_extent_size=1073741824) monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg]) lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} data = {"/dev/nvme0n1": {"size": "6442450944"}} lv = {"tags": {"ceph.osd_id": "1"}} device_info(devices=data, lsblk=lsblk, lv=lv) disk = device.Device("/dev/nvme0n1") assert not disk.available_lvm assert not disk.available assert not disk.available_raw @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_multiple_existing_vgs(self, fake_call, monkeypatch, device_info): vg1 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1000, vg_extent_size=4194304) vg2 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=536, vg_extent_size=4194304) monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg1, vg2]) lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} data = {"/dev/nvme0n1": {"size": "6442450944"}} lv = {"tags": {"ceph.osd_id": "1"}} device_info(devices=data, lsblk=lsblk, lv=lv) disk = device.Device("/dev/nvme0n1") assert disk.available_lvm assert not disk.available assert not disk.available_raw @pytest.mark.parametrize("ceph_type", ["data", "block"]) def test_used_by_ceph(self, fake_call, device_info, monkeypatch, ceph_type): data = {"/dev/sda": {"foo": "bar"}} lsblk = {"TYPE": "part", "NAME": "sda", "PKNAME": "sda"} FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") pvolumes = [] pvolumes.append(FooPVolume) lv_data = {"lv_name": "lv", "lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "lv_tags": "ceph.osd_id=0,ceph.type="+ceph_type} volumes = [] lv = api.Volume(**lv_data) volumes.append(lv) monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: pvolumes) monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes)) device_info(devices=data, lsblk=lsblk, lv=lv_data) vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6, vg_extent_size=1073741824) monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) disk = device.Device("/dev/sda") assert disk.used_by_ceph @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_not_used_by_ceph(self, fake_call, device_info, monkeypatch): FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") pvolumes = [] pvolumes.append(FooPVolume) data = {"/dev/sda": {"foo": "bar"}} lsblk = {"TYPE": "part", "NAME": "sda", "PKNAME": "sda"} lv_data = {"lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "tags": {"ceph.osd_id": 0, "ceph.type": "journal"}} monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: pvolumes) device_info(devices=data, lsblk=lsblk, lv=lv_data) disk = device.Device("/dev/sda") assert not disk.used_by_ceph @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_get_device_id(self, fake_call, device_info): udev = {k:k for k in ['ID_VENDOR', 'ID_MODEL', 'ID_SCSI_SERIAL']} lsblk = {"TYPE": "disk", "NAME": "sda"} device_info(udevadm=udev,lsblk=lsblk) disk = device.Device("/dev/sda") assert disk._get_device_id() == 'ID_VENDOR_ID_MODEL_ID_SCSI_SERIAL' def test_has_bluestore_label(self): # patch device.Device __init__ function to do nothing since we want to only test the # low-level behavior of has_bluestore_label with patch.object(device.Device, "__init__", lambda self, path, with_lsm=False: None): disk = device.Device("/dev/sda") disk.path = "/dev/sda" with patch('builtins.open', mock_open(read_data=b'bluestore block device\n')): assert disk.has_bluestore_label with patch('builtins.open', mock_open(read_data=b'not a bluestore block device\n')): assert not disk.has_bluestore_label class TestDeviceEncryption(object): @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_partition_is_not_encrypted_lsblk(self, fake_call, device_info): lsblk = {'TYPE': 'part', 'FSTYPE': 'xfs', 'NAME': 'sda', 'PKNAME': 'sda'} device_info(lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.is_encrypted is False @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_partition_is_encrypted_lsblk(self, fake_call, device_info): lsblk = {'TYPE': 'part', 'FSTYPE': 'crypto_LUKS', 'NAME': 'sda', 'PKNAME': 'sda'} device_info(lsblk=lsblk) disk = device.Device("/dev/sda") assert disk.is_encrypted is True @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_partition_is_not_encrypted_blkid(self, fake_call, device_info): lsblk = {'TYPE': 'part', 'NAME': 'sda', 'PKNAME': 'sda'} blkid = {'TYPE': 'ceph data'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") assert disk.is_encrypted is False @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_partition_is_encrypted_blkid(self, fake_call, device_info): lsblk = {'TYPE': 'part', 'NAME': 'sda' ,'PKNAME': 'sda'} blkid = {'TYPE': 'crypto_LUKS'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") assert disk.is_encrypted is True @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_mapper_is_encrypted_luks1(self, fake_call, device_info, monkeypatch): status = {'type': 'LUKS1'} monkeypatch.setattr(device, 'encryption_status', lambda x: status) lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid','TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/mapper/uuid") assert disk.is_encrypted is True @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_mapper_is_encrypted_luks2(self, fake_call, device_info, monkeypatch): status = {'type': 'LUKS2'} monkeypatch.setattr(device, 'encryption_status', lambda x: status) lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/mapper/uuid") assert disk.is_encrypted is True @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_mapper_is_encrypted_plain(self, fake_call, device_info, monkeypatch): status = {'type': 'PLAIN'} monkeypatch.setattr(device, 'encryption_status', lambda x: status) lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/mapper/uuid") assert disk.is_encrypted is True @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_mapper_is_not_encrypted_plain(self, fake_call, device_info, monkeypatch): monkeypatch.setattr(device, 'encryption_status', lambda x: {}) lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/mapper/uuid") assert disk.is_encrypted is False @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lv_is_encrypted_blkid(self, fake_call, device_info): lsblk = {'TYPE': 'lvm', 'NAME': 'sda'} blkid = {'TYPE': 'crypto_LUKS'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") disk.lv_api = {} assert disk.is_encrypted is True @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lv_is_not_encrypted_blkid(self, fake_call, factory, device_info): lsblk = {'TYPE': 'lvm', 'NAME': 'sda'} blkid = {'TYPE': 'xfs'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") disk.lv_api = factory(encrypted=None) assert disk.is_encrypted is False @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lv_is_encrypted_lsblk(self, fake_call, device_info): lsblk = {'FSTYPE': 'crypto_LUKS', 'NAME': 'sda', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") disk.lv_api = {} assert disk.is_encrypted is True @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lv_is_not_encrypted_lsblk(self, fake_call, factory, device_info): lsblk = {'FSTYPE': 'xfs', 'NAME': 'sda', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") disk.lv_api = factory(encrypted=None) assert disk.is_encrypted is False @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lv_is_encrypted_lvm_api(self, fake_call, factory, device_info): lsblk = {'FSTYPE': 'xfs', 'NAME': 'sda', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") disk.lv_api = factory(encrypted=True) assert disk.is_encrypted is True @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_lv_is_not_encrypted_lvm_api(self, fake_call, factory, device_info): lsblk = {'FSTYPE': 'xfs', 'NAME': 'sda', 'TYPE': 'lvm'} blkid = {'TYPE': 'mapper'} device_info(lsblk=lsblk, blkid=blkid) disk = device.Device("/dev/sda") disk.lv_api = factory(encrypted=False) assert disk.is_encrypted is False class TestDeviceOrdering(object): def setup_method(self): self.data = { "/dev/sda": {"removable": 0}, "/dev/sdb": {"removable": 1}, # invalid "/dev/sdc": {"removable": 0}, "/dev/sdd": {"removable": 1}, # invalid } @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_valid_before_invalid(self, fake_call, device_info): lsblk_sda = {"NAME": "sda", "TYPE": "disk"} lsblk_sdb = {"NAME": "sdb", "TYPE": "disk"} device_info(devices=self.data,lsblk=lsblk_sda) sda = device.Device("/dev/sda") device_info(devices=self.data,lsblk=lsblk_sdb) sdb = device.Device("/dev/sdb") assert sda < sdb assert sdb > sda @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_valid_alphabetical_ordering(self, fake_call, device_info): lsblk_sda = {"NAME": "sda", "TYPE": "disk"} lsblk_sdc = {"NAME": "sdc", "TYPE": "disk"} device_info(devices=self.data,lsblk=lsblk_sda) sda = device.Device("/dev/sda") device_info(devices=self.data,lsblk=lsblk_sdc) sdc = device.Device("/dev/sdc") assert sda < sdc assert sdc > sda @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_invalid_alphabetical_ordering(self, fake_call, device_info): lsblk_sdb = {"NAME": "sdb", "TYPE": "disk"} lsblk_sdd = {"NAME": "sdd", "TYPE": "disk"} device_info(devices=self.data,lsblk=lsblk_sdb) sdb = device.Device("/dev/sdb") device_info(devices=self.data,lsblk=lsblk_sdd) sdd = device.Device("/dev/sdd") assert sdb < sdd assert sdd > sdb class TestCephDiskDevice(object): @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_partlabel_lsblk(self, fake_call, device_info): lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": ""} device_info(lsblk=lsblk) disk = device.CephDiskDevice(device.Device("/dev/sda")) assert disk.partlabel == '' @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_partlabel_blkid(self, fake_call, device_info): lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": "ceph data"} blkid = {"TYPE": "disk", "PARTLABEL": "ceph data"} device_info(blkid=blkid, lsblk=lsblk) disk = device.CephDiskDevice(device.Device("/dev/sda")) assert disk.partlabel == 'ceph data' @pytest.mark.usefixtures("lsblk_ceph_disk_member", "blkid_ceph_disk_member", "disable_kernel_queries") @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_member_blkid(self, fake_call, monkeypatch): disk = device.CephDiskDevice(device.Device("/dev/sda")) assert disk.is_member is True @pytest.mark.usefixtures("lsblk_ceph_disk_member", "disable_kernel_queries") @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_is_member_lsblk(self, fake_call, patch_bluestore_label, device_info): lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": "ceph"} device_info(lsblk=lsblk) disk = device.CephDiskDevice(device.Device("/dev/sda")) assert disk.is_member is True @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_unknown_type(self, fake_call, device_info): lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": "gluster"} device_info(lsblk=lsblk) disk = device.CephDiskDevice(device.Device("/dev/sda")) assert disk.type == 'unknown' ceph_types = ['data', 'wal', 'db', 'lockbox', 'journal', 'block'] @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) @pytest.mark.usefixtures("lsblk_ceph_disk_member", "blkid_ceph_disk_member", "disable_kernel_queries") def test_type_blkid(self, monkeypatch, fake_call, device_info, ceph_partlabel): disk = device.CephDiskDevice(device.Device("/dev/sda")) assert disk.type in self.ceph_types @pytest.mark.usefixtures("blkid_ceph_disk_member", "lsblk_ceph_disk_member", "disable_kernel_queries") @patch("ceph_volume.util.disk.has_bluestore_label", lambda x: False) def test_type_lsblk(self, fake_call, device_info, ceph_partlabel): disk = device.CephDiskDevice(device.Device("/dev/sda")) assert disk.type in self.ceph_types
32,368
44.913475
126
py
null
ceph-main/src/ceph-volume/ceph_volume/tests/util/test_disk.py
import os import pytest from ceph_volume.util import disk from mock.mock import patch class TestLsblkParser(object): def test_parses_whitespace_values(self): output = 'NAME="sdaa5" PARTLABEL="ceph data" RM="0" SIZE="10M" RO="0" TYPE="part"' result = disk._lsblk_parser(output) assert result['PARTLABEL'] == 'ceph data' def test_ignores_bogus_pairs(self): output = 'NAME="sdaa5" PARTLABEL RM="0" SIZE="10M" RO="0" TYPE="part" MOUNTPOINT=""' result = disk._lsblk_parser(output) assert result['SIZE'] == '10M' class TestBlkidParser(object): def test_parses_whitespace_values(self): output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa result = disk._blkid_parser(output) assert result['PARTLABEL'] == 'ceph data' def test_ignores_unmapped(self): output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa result = disk._blkid_parser(output) assert len(result.keys()) == 4 def test_translates_to_partuuid(self): output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa result = disk._blkid_parser(output) assert result['PARTUUID'] == 'b89c03bc-bf58-4338-a8f8-a2f484852b4f' class TestBlkid(object): def test_parses_translated(self, stub_call): output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa stub_call((output.split(), [], 0)) result = disk.blkid('/dev/sdb1') assert result['PARTUUID'] == 'b89c03bc-bf58-4338-a8f8-a2f484852b4f' assert result['PARTLABEL'] == 'ceph data' assert result['UUID'] == '62416664-cbaf-40bd-9689-10bd337379c3' assert result['TYPE'] == 'xfs' class TestUdevadmProperty(object): def test_good_output(self, stub_call): output = """ID_MODEL=SK_hynix_SC311_SATA_512GB ID_PART_TABLE_TYPE=gpt ID_SERIAL_SHORT=MS83N71801150416A""".split() stub_call((output, [], 0)) result = disk.udevadm_property('dev/sda') assert result['ID_MODEL'] == 'SK_hynix_SC311_SATA_512GB' assert result['ID_PART_TABLE_TYPE'] == 'gpt' assert result['ID_SERIAL_SHORT'] == 'MS83N71801150416A' def test_property_filter(self, stub_call): output = """ID_MODEL=SK_hynix_SC311_SATA_512GB ID_PART_TABLE_TYPE=gpt ID_SERIAL_SHORT=MS83N71801150416A""".split() stub_call((output, [], 0)) result = disk.udevadm_property('dev/sda', ['ID_MODEL', 'ID_SERIAL_SHORT']) assert result['ID_MODEL'] == 'SK_hynix_SC311_SATA_512GB' assert 'ID_PART_TABLE_TYPE' not in result def test_fail_on_broken_output(self, stub_call): output = ["ID_MODEL:SK_hynix_SC311_SATA_512GB"] stub_call((output, [], 0)) with pytest.raises(ValueError): disk.udevadm_property('dev/sda') class TestDeviceFamily(object): def test_groups_multiple_devices(self, stub_call): out = [ 'NAME="sdaa5" PARLABEL="ceph lockbox"', 'NAME="sdaa" RO="0"', 'NAME="sdaa1" PARLABEL="ceph data"', 'NAME="sdaa2" PARLABEL="ceph journal"', ] stub_call((out, '', 0)) result = disk.device_family('sdaa5') assert len(result) == 4 def test_parses_output_correctly(self, stub_call): names = ['sdaa', 'sdaa5', 'sdaa1', 'sdaa2'] out = [ 'NAME="sdaa5" PARLABEL="ceph lockbox"', 'NAME="sdaa" RO="0"', 'NAME="sdaa1" PARLABEL="ceph data"', 'NAME="sdaa2" PARLABEL="ceph journal"', ] stub_call((out, '', 0)) result = disk.device_family('sdaa5') for parsed in result: assert parsed['NAME'] in names class TestHumanReadableSize(object): def test_bytes(self): result = disk.human_readable_size(800) assert result == '800.00 B' def test_kilobytes(self): result = disk.human_readable_size(800*1024) assert result == '800.00 KB' def test_megabytes(self): result = disk.human_readable_size(800*1024*1024) assert result == '800.00 MB' def test_gigabytes(self): result = disk.human_readable_size(8.19*1024*1024*1024) assert result == '8.19 GB' def test_terabytes(self): result = disk.human_readable_size(81.2*1024*1024*1024*1024) assert result == '81.20 TB' def test_petabytes(self): result = disk.human_readable_size(9.23*1024*1024*1024*1024*1024) assert result == '9.23 PB' class TestSizeFromHumanReadable(object): def test_bytes(self): result = disk.size_from_human_readable('2') assert result == disk.Size(b=2) def test_kilobytes(self): result = disk.size_from_human_readable('2 K') assert result == disk.Size(kb=2) def test_megabytes(self): result = disk.size_from_human_readable('2 M') assert result == disk.Size(mb=2) def test_gigabytes(self): result = disk.size_from_human_readable('2 G') assert result == disk.Size(gb=2) def test_terabytes(self): result = disk.size_from_human_readable('2 T') assert result == disk.Size(tb=2) def test_petabytes(self): result = disk.size_from_human_readable('2 P') assert result == disk.Size(pb=2) def test_case(self): result = disk.size_from_human_readable('2 t') assert result == disk.Size(tb=2) def test_space(self): result = disk.size_from_human_readable('2T') assert result == disk.Size(tb=2) def test_float(self): result = disk.size_from_human_readable('2.0') assert result == disk.Size(b=2) result = disk.size_from_human_readable('2.0T') assert result == disk.Size(tb=2) result = disk.size_from_human_readable('1.8T') assert result == disk.Size(tb=1.8) class TestSizeParse(object): def test_bytes(self): result = disk.Size.parse('2') assert result == disk.Size(b=2) def test_kilobytes(self): result = disk.Size.parse('2K') assert result == disk.Size(kb=2) def test_megabytes(self): result = disk.Size.parse('2M') assert result == disk.Size(mb=2) def test_gigabytes(self): result = disk.Size.parse('2G') assert result == disk.Size(gb=2) def test_terabytes(self): result = disk.Size.parse('2T') assert result == disk.Size(tb=2) def test_petabytes(self): result = disk.Size.parse('2P') assert result == disk.Size(pb=2) def test_tb(self): result = disk.Size.parse('2Tb') assert result == disk.Size(tb=2) def test_case(self): result = disk.Size.parse('2t') assert result == disk.Size(tb=2) def test_space(self): result = disk.Size.parse('2T') assert result == disk.Size(tb=2) def test_float(self): result = disk.Size.parse('2.0') assert result == disk.Size(b=2) result = disk.Size.parse('2.0T') assert result == disk.Size(tb=2) result = disk.Size.parse('1.8T') assert result == disk.Size(tb=1.8) class TestGetDevices(object): def test_no_devices_are_found(self, tmpdir, patched_get_block_devs_sysfs): patched_get_block_devs_sysfs.return_value = [] result = disk.get_devices(_sys_block_path=str(tmpdir)) assert result == {} @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_sda_block_is_found(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] result = disk.get_devices() assert len(result.keys()) == 1 assert result[sda_path]['human_readable_size'] == '0.00 B' assert result[sda_path]['model'] == '' assert result[sda_path]['partitions'] == {} @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_sda_size(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] fake_filesystem.create_file('/sys/block/sda/size', contents = '1024') result = disk.get_devices() assert list(result.keys()) == [sda_path] assert result[sda_path]['human_readable_size'] == '512.00 KB' @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_sda_sectorsize_fallsback(self, patched_get_block_devs_sysfs, fake_filesystem): # if no sectorsize, it will use queue/hw_sector_size sda_path = '/dev/sda' patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] fake_filesystem.create_file('/sys/block/sda/queue/hw_sector_size', contents = '1024') result = disk.get_devices() assert list(result.keys()) == [sda_path] assert result[sda_path]['sectorsize'] == '1024' @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_sda_sectorsize_from_logical_block(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] fake_filesystem.create_file('/sys/block/sda/queue/logical_block_size', contents = '99') result = disk.get_devices() assert result[sda_path]['sectorsize'] == '99' @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_sda_sectorsize_does_not_fallback(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] fake_filesystem.create_file('/sys/block/sda/queue/logical_block_size', contents = '99') fake_filesystem.create_file('/sys/block/sda/queue/hw_sector_size', contents = '1024') result = disk.get_devices() assert result[sda_path]['sectorsize'] == '99' @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_is_rotational(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] fake_filesystem.create_file('/sys/block/sda/queue/rotational', contents = '1') result = disk.get_devices() assert result[sda_path]['rotational'] == '1' @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_is_ceph_rbd(self, patched_get_block_devs_sysfs, fake_filesystem): rbd_path = '/dev/rbd0' patched_get_block_devs_sysfs.return_value = [[rbd_path, rbd_path, 'disk']] result = disk.get_devices() assert rbd_path not in result @patch('ceph_volume.util.disk.is_locked_raw_device', lambda x: False) def test_actuator_device(self, patched_get_block_devs_sysfs, fake_filesystem): sda_path = '/dev/sda' fake_actuator_nb = 2 patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']] for actuator in range(0, fake_actuator_nb): fake_filesystem.create_dir(f'/sys/block/sda/queue/independent_access_ranges/{actuator}') result = disk.get_devices() assert result[sda_path]['actuators'] == fake_actuator_nb class TestSizeCalculations(object): @pytest.mark.parametrize('aliases', [ ('b', 'bytes'), ('kb', 'kilobytes'), ('mb', 'megabytes'), ('gb', 'gigabytes'), ('tb', 'terabytes'), ]) def test_aliases(self, aliases): short_alias, long_alias = aliases s = disk.Size(b=1) short_alias = getattr(s, short_alias) long_alias = getattr(s, long_alias) assert short_alias == long_alias @pytest.mark.parametrize('values', [ ('b', 857619069665.28), ('kb', 837518622.72), ('mb', 817889.28), ('gb', 798.72), ('tb', 0.78), ]) def test_terabytes(self, values): # regardless of the input value, all the other values correlate to each # other the same, every time unit, value = values s = disk.Size(**{unit: value}) assert s.b == 857619069665.28 assert s.kb == 837518622.72 assert s.mb == 817889.28 assert s.gb == 798.72 assert s.tb == 0.78 class TestSizeOperators(object): @pytest.mark.parametrize('larger', [1025, 1024.1, 1024.001]) def test_gigabytes_is_smaller(self, larger): assert disk.Size(gb=1) < disk.Size(mb=larger) @pytest.mark.parametrize('smaller', [1023, 1023.9, 1023.001]) def test_gigabytes_is_larger(self, smaller): assert disk.Size(gb=1) > disk.Size(mb=smaller) @pytest.mark.parametrize('larger', [1025, 1024.1, 1024.001, 1024]) def test_gigabytes_is_smaller_or_equal(self, larger): assert disk.Size(gb=1) <= disk.Size(mb=larger) @pytest.mark.parametrize('smaller', [1023, 1023.9, 1023.001, 1024]) def test_gigabytes_is_larger_or_equal(self, smaller): assert disk.Size(gb=1) >= disk.Size(mb=smaller) @pytest.mark.parametrize('values', [ ('b', 857619069665.28), ('kb', 837518622.72), ('mb', 817889.28), ('gb', 798.72), ('tb', 0.78), ]) def test_equality(self, values): unit, value = values s = disk.Size(**{unit: value}) # both tb and b, since b is always calculated regardless, and is useful # when testing tb assert disk.Size(tb=0.78) == s assert disk.Size(b=857619069665.28) == s @pytest.mark.parametrize('values', [ ('b', 857619069665.28), ('kb', 837518622.72), ('mb', 817889.28), ('gb', 798.72), ('tb', 0.78), ]) def test_inequality(self, values): unit, value = values s = disk.Size(**{unit: value}) # both tb and b, since b is always calculated regardless, and is useful # when testing tb assert disk.Size(tb=1) != s assert disk.Size(b=100) != s class TestSizeOperations(object): def test_assignment_addition_with_size_objects(self): result = disk.Size(mb=256) + disk.Size(gb=1) assert result.gb == 1.25 assert result.gb.as_int() == 1 assert result.gb.as_float() == 1.25 def test_self_addition_with_size_objects(self): base = disk.Size(mb=256) base += disk.Size(gb=1) assert base.gb == 1.25 def test_self_addition_does_not_alter_state(self): base = disk.Size(mb=256) base + disk.Size(gb=1) assert base.mb == 256 def test_addition_with_non_size_objects(self): with pytest.raises(TypeError): disk.Size(mb=100) + 4 def test_assignment_subtraction_with_size_objects(self): base = disk.Size(gb=1) base -= disk.Size(mb=256) assert base.mb == 768 def test_self_subtraction_does_not_alter_state(self): base = disk.Size(gb=1) base - disk.Size(mb=256) assert base.gb == 1 def test_subtraction_with_size_objects(self): result = disk.Size(gb=1) - disk.Size(mb=256) assert result.mb == 768 def test_subtraction_with_non_size_objects(self): with pytest.raises(TypeError): disk.Size(mb=100) - 4 def test_multiplication_with_size_objects(self): with pytest.raises(TypeError): disk.Size(mb=100) * disk.Size(mb=1) def test_multiplication_with_non_size_objects(self): base = disk.Size(gb=1) result = base * 2 assert result.gb == 2 assert result.gb.as_int() == 2 def test_division_with_size_objects(self): result = disk.Size(gb=1) / disk.Size(mb=1) assert int(result) == 1024 def test_division_with_non_size_objects(self): base = disk.Size(gb=1) result = base / 2 assert result.mb == 512 assert result.mb.as_int() == 512 def test_division_with_non_size_objects_without_state(self): base = disk.Size(gb=1) base / 2 assert base.gb == 1 assert base.gb.as_int() == 1 class TestSizeAttributes(object): def test_attribute_does_not_exist(self): with pytest.raises(AttributeError): disk.Size(mb=1).exabytes class TestSizeFormatting(object): def test_default_formatting_tb_to_b(self): size = disk.Size(tb=0.0000000001) result = "%s" % size assert result == "109.95 B" def test_default_formatting_tb_to_kb(self): size = disk.Size(tb=0.00000001) result = "%s" % size assert result == "10.74 KB" def test_default_formatting_tb_to_mb(self): size = disk.Size(tb=0.000001) result = "%s" % size assert result == "1.05 MB" def test_default_formatting_tb_to_gb(self): size = disk.Size(tb=0.001) result = "%s" % size assert result == "1.02 GB" def test_default_formatting_tb_to_tb(self): size = disk.Size(tb=10) result = "%s" % size assert result == "10.00 TB" class TestSizeSpecificFormatting(object): def test_formatting_b(self): size = disk.Size(b=2048) result = "%s" % size.b assert "%s" % size.b == "%s" % size.bytes assert result == "2048.00 B" def test_formatting_kb(self): size = disk.Size(kb=5700) result = "%s" % size.kb assert "%s" % size.kb == "%s" % size.kilobytes assert result == "5700.00 KB" def test_formatting_mb(self): size = disk.Size(mb=4000) result = "%s" % size.mb assert "%s" % size.mb == "%s" % size.megabytes assert result == "4000.00 MB" def test_formatting_gb(self): size = disk.Size(gb=77777) result = "%s" % size.gb assert "%s" % size.gb == "%s" % size.gigabytes assert result == "77777.00 GB" def test_formatting_tb(self): size = disk.Size(tb=1027) result = "%s" % size.tb assert "%s" % size.tb == "%s" % size.terabytes assert result == "1027.00 TB" class TestAllowLoopDevsWarning(object): def test_loop_dev_warning(self, fake_call, caplog): assert disk.allow_loop_devices() is False assert not caplog.records os.environ['CEPH_VOLUME_ALLOW_LOOP_DEVICES'] = "y" assert disk.allow_loop_devices() is True log = caplog.records[0] assert log.levelname == "WARNING" assert "will never be supported in production" in log.message class TestHasBlueStoreLabel(object): def test_device_path_is_a_path(self, fake_filesystem): device_path = '/var/lib/ceph/osd/ceph-0' fake_filesystem.create_dir(device_path) assert not disk.has_bluestore_label(device_path)
19,396
35.32397
203
py
null
ceph-main/src/ceph-volume/ceph_volume/tests/util/test_encryption.py
from ceph_volume.util import encryption from mock.mock import patch import base64 class TestGetKeySize(object): def test_get_size_from_conf_default(self, conf_ceph_stub): conf_ceph_stub(''' [global] fsid=asdf ''') assert encryption.get_key_size_from_conf() == '512' def test_get_size_from_conf_custom(self, conf_ceph_stub): conf_ceph_stub(''' [global] fsid=asdf [osd] osd_dmcrypt_key_size=256 ''') assert encryption.get_key_size_from_conf() == '256' def test_get_size_from_conf_custom_invalid(self, conf_ceph_stub): conf_ceph_stub(''' [global] fsid=asdf [osd] osd_dmcrypt_key_size=1024 ''') assert encryption.get_key_size_from_conf() == '512' class TestStatus(object): def test_skips_unuseful_lines(self, stub_call): out = ['some line here', ' device: /dev/sdc1'] stub_call((out, '', 0)) assert encryption.status('/dev/sdc1') == {'device': '/dev/sdc1'} def test_removes_extra_quotes(self, stub_call): out = ['some line here', ' device: "/dev/sdc1"'] stub_call((out, '', 0)) assert encryption.status('/dev/sdc1') == {'device': '/dev/sdc1'} def test_ignores_bogus_lines(self, stub_call): out = ['some line here', ' '] stub_call((out, '', 0)) assert encryption.status('/dev/sdc1') == {} class TestDmcryptClose(object): def test_mapper_exists(self, fake_run, fake_filesystem): file_name = fake_filesystem.create_file('mapper-device') encryption.dmcrypt_close(file_name.path) arguments = fake_run.calls[0]['args'][0] assert arguments[0] == 'cryptsetup' assert arguments[1] == 'remove' assert arguments[2].startswith('/') def test_mapper_does_not_exist(self, fake_run): file_name = '/path/does/not/exist' encryption.dmcrypt_close(file_name) assert fake_run.calls == [] class TestDmcryptKey(object): def test_dmcrypt(self): result = encryption.create_dmcrypt_key() assert len(base64.b64decode(result)) == 128 class TestLuksFormat(object): @patch('ceph_volume.util.encryption.process.call') def test_luks_format_command_with_default_size(self, m_call, conf_ceph_stub): conf_ceph_stub('[global]\nfsid=abcd') expected = [ 'cryptsetup', '--batch-mode', '--key-size', '512', '--key-file', '-', 'luksFormat', '/dev/foo' ] encryption.luks_format('abcd', '/dev/foo') assert m_call.call_args[0][0] == expected @patch('ceph_volume.util.encryption.process.call') def test_luks_format_command_with_custom_size(self, m_call, conf_ceph_stub): conf_ceph_stub('[global]\nfsid=abcd\n[osd]\nosd_dmcrypt_key_size=256') expected = [ 'cryptsetup', '--batch-mode', '--key-size', '256', '--key-file', '-', 'luksFormat', '/dev/foo' ] encryption.luks_format('abcd', '/dev/foo') assert m_call.call_args[0][0] == expected class TestLuksOpen(object): @patch('ceph_volume.util.encryption.process.call') def test_luks_open_command_with_default_size(self, m_call, conf_ceph_stub): conf_ceph_stub('[global]\nfsid=abcd') expected = [ 'cryptsetup', '--key-size', '512', '--key-file', '-', '--allow-discards', 'luksOpen', '/dev/foo', '/dev/bar' ] encryption.luks_open('abcd', '/dev/foo', '/dev/bar') assert m_call.call_args[0][0] == expected @patch('ceph_volume.util.encryption.process.call') def test_luks_open_command_with_custom_size(self, m_call, conf_ceph_stub): conf_ceph_stub('[global]\nfsid=abcd\n[osd]\nosd_dmcrypt_key_size=256') expected = [ 'cryptsetup', '--key-size', '256', '--key-file', '-', '--allow-discards', 'luksOpen', '/dev/foo', '/dev/bar' ] encryption.luks_open('abcd', '/dev/foo', '/dev/bar') assert m_call.call_args[0][0] == expected
4,386
30.561151
81
py
null
ceph-main/src/ceph-volume/ceph_volume/tests/util/test_prepare.py
import pytest from textwrap import dedent import json from ceph_volume.util import prepare from ceph_volume.util.prepare import system from ceph_volume import conf from ceph_volume.tests.conftest import Factory class TestOSDIDAvailable(object): def test_false_if_id_is_none(self): assert not prepare.osd_id_available(None) def test_returncode_is_not_zero(self, monkeypatch): monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: ('', '', 1)) with pytest.raises(RuntimeError): prepare.osd_id_available(1) def test_id_does_exist_but_not_available(self, monkeypatch): stdout = dict(nodes=[ dict(id=0, status="up"), ]) stdout = ['', json.dumps(stdout)] monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0)) result = prepare.osd_id_available(0) assert not result def test_id_does_not_exist(self, monkeypatch): stdout = dict(nodes=[ dict(id=0), ]) stdout = ['', json.dumps(stdout)] monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0)) result = prepare.osd_id_available(1) assert result def test_returns_true_when_id_is_destroyed(self, monkeypatch): stdout = dict(nodes=[ dict(id=0, status="destroyed"), ]) stdout = ['', json.dumps(stdout)] monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0)) result = prepare.osd_id_available(0) assert result class TestFormatDevice(object): def test_include_force(self, fake_run, monkeypatch): monkeypatch.setattr(conf, 'ceph', Factory(get_list=lambda *a, **kw: [])) prepare.format_device('/dev/sxx') flags = fake_run.calls[0]['args'][0] assert '-f' in flags def test_device_is_always_appended(self, fake_run, conf_ceph): conf_ceph(get_list=lambda *a, **kw: []) prepare.format_device('/dev/sxx') flags = fake_run.calls[0]['args'][0] assert flags[-1] == '/dev/sxx' def test_extra_flags_are_added(self, fake_run, conf_ceph): conf_ceph(get_list=lambda *a, **kw: ['--why-yes']) prepare.format_device('/dev/sxx') flags = fake_run.calls[0]['args'][0] assert '--why-yes' in flags def test_default_options(self, conf_ceph_stub, fake_run): conf_ceph_stub(dedent("""[global] fsid = 1234lkjh1234""")) conf.cluster = 'ceph' prepare.format_device('/dev/sda1') expected = [ 'mkfs', '-t', 'xfs', '-f', '-i', 'size=2048', # default flags '/dev/sda1'] assert expected == fake_run.calls[0]['args'][0] def test_multiple_options_are_used(self, conf_ceph_stub, fake_run): conf_ceph_stub(dedent("""[global] fsid = 1234lkjh1234 [osd] osd mkfs options xfs = -f -i size=1024""")) conf.cluster = 'ceph' prepare.format_device('/dev/sda1') expected = [ 'mkfs', '-t', 'xfs', '-f', '-i', 'size=1024', '/dev/sda1'] assert expected == fake_run.calls[0]['args'][0] def test_multiple_options_will_get_the_force_flag(self, conf_ceph_stub, fake_run): conf_ceph_stub(dedent("""[global] fsid = 1234lkjh1234 [osd] osd mkfs options xfs = -i size=1024""")) conf.cluster = 'ceph' prepare.format_device('/dev/sda1') expected = [ 'mkfs', '-t', 'xfs', '-f', '-i', 'size=1024', '/dev/sda1'] assert expected == fake_run.calls[0]['args'][0] def test_underscore_options_are_used(self, conf_ceph_stub, fake_run): conf_ceph_stub(dedent("""[global] fsid = 1234lkjh1234 [osd] osd_mkfs_options_xfs = -i size=128""")) conf.cluster = 'ceph' prepare.format_device('/dev/sda1') expected = [ 'mkfs', '-t', 'xfs', '-f', '-i', 'size=128', '/dev/sda1'] assert expected == fake_run.calls[0]['args'][0] class TestOsdMkfsBluestore(object): def test_keyring_is_added(self, fake_call, monkeypatch): monkeypatch.setattr(system, 'chown', lambda path: True) prepare.osd_mkfs_bluestore(1, 'asdf', keyring='secret') assert '--keyfile' in fake_call.calls[0]['args'][0] def test_keyring_is_not_added(self, fake_call, monkeypatch): monkeypatch.setattr(system, 'chown', lambda path: True) prepare.osd_mkfs_bluestore(1, 'asdf') assert '--keyfile' not in fake_call.calls[0]['args'][0] def test_wal_is_added(self, fake_call, monkeypatch): monkeypatch.setattr(system, 'chown', lambda path: True) prepare.osd_mkfs_bluestore(1, 'asdf', wal='/dev/smm1') assert '--bluestore-block-wal-path' in fake_call.calls[0]['args'][0] assert '/dev/smm1' in fake_call.calls[0]['args'][0] def test_db_is_added(self, fake_call, monkeypatch): monkeypatch.setattr(system, 'chown', lambda path: True) prepare.osd_mkfs_bluestore(1, 'asdf', db='/dev/smm2') assert '--bluestore-block-db-path' in fake_call.calls[0]['args'][0] assert '/dev/smm2' in fake_call.calls[0]['args'][0] class TestMountOSD(object): def test_default_options(self, conf_ceph_stub, fake_run): conf_ceph_stub(dedent("""[global] fsid = 1234lkjh1234""")) conf.cluster = 'ceph' prepare.mount_osd('/dev/sda1', 1) expected = [ 'mount', '-t', 'xfs', '-o', 'rw,noatime,inode64', # default flags '/dev/sda1', '/var/lib/ceph/osd/ceph-1'] assert expected == fake_run.calls[0]['args'][0] def test_mount_options_are_used(self, conf_ceph_stub, fake_run): conf_ceph_stub(dedent("""[global] fsid = 1234lkjh1234 [osd] osd mount options xfs = rw""")) conf.cluster = 'ceph' prepare.mount_osd('/dev/sda1', 1) expected = [ 'mount', '-t', 'xfs', '-o', 'rw', '/dev/sda1', '/var/lib/ceph/osd/ceph-1'] assert expected == fake_run.calls[0]['args'][0] def test_multiple_whitespace_options_are_used(self, conf_ceph_stub, fake_run): conf_ceph_stub(dedent("""[global] fsid = 1234lkjh1234 [osd] osd mount options xfs = rw auto exec""")) conf.cluster = 'ceph' prepare.mount_osd('/dev/sda1', 1) expected = [ 'mount', '-t', 'xfs', '-o', 'rw,auto,exec', '/dev/sda1', '/var/lib/ceph/osd/ceph-1'] assert expected == fake_run.calls[0]['args'][0] def test_multiple_comma_whitespace_options_are_used(self, conf_ceph_stub, fake_run): conf_ceph_stub(dedent("""[global] fsid = 1234lkjh1234 [osd] osd mount options xfs = rw, auto, exec""")) conf.cluster = 'ceph' prepare.mount_osd('/dev/sda1', 1) expected = [ 'mount', '-t', 'xfs', '-o', 'rw,auto,exec', '/dev/sda1', '/var/lib/ceph/osd/ceph-1'] assert expected == fake_run.calls[0]['args'][0] def test_underscore_mount_options_are_used(self, conf_ceph_stub, fake_run): conf_ceph_stub(dedent("""[global] fsid = 1234lkjh1234 [osd] osd mount options xfs = rw""")) conf.cluster = 'ceph' prepare.mount_osd('/dev/sda1', 1) expected = [ 'mount', '-t', 'xfs', '-o', 'rw', '/dev/sda1', '/var/lib/ceph/osd/ceph-1'] assert expected == fake_run.calls[0]['args'][0] ceph_conf_mount_values = [ ['rw,', 'auto,' 'exec'], ['rw', 'auto', 'exec'], [' rw ', ' auto ', ' exec '], ['rw,', 'auto,', 'exec,'], [',rw ', ',auto ', ',exec,'], [',rw,', ',auto,', ',exec,'], ] string_mount_values = [ 'rw, auto exec ', 'rw auto exec', ',rw, auto, exec,', ' rw auto exec ', ' rw,auto,exec ', 'rw,auto,exec', ',rw,auto,exec,', 'rw,auto,exec ', 'rw, auto, exec ', ] class TestNormalizeFlags(object): # a bit overkill since most of this is already tested in prepare.mount_osd # tests @pytest.mark.parametrize("flags", ceph_conf_mount_values) def test_normalize_lists(self, flags): result = sorted(prepare._normalize_mount_flags(flags).split(',')) assert ','.join(result) == 'auto,exec,rw' @pytest.mark.parametrize("flags", string_mount_values) def test_normalize_strings(self, flags): result = sorted(prepare._normalize_mount_flags(flags).split(',')) assert ','.join(result) == 'auto,exec,rw' @pytest.mark.parametrize("flags", ceph_conf_mount_values) def test_normalize_extra_flags(self, flags): result = prepare._normalize_mount_flags(flags, extras=['discard']) assert sorted(result.split(',')) == ['auto', 'discard', 'exec', 'rw'] @pytest.mark.parametrize("flags", ceph_conf_mount_values) def test_normalize_duplicate_extra_flags(self, flags): result = prepare._normalize_mount_flags(flags, extras=['rw', 'discard']) assert sorted(result.split(',')) == ['auto', 'discard', 'exec', 'rw'] @pytest.mark.parametrize("flags", string_mount_values) def test_normalize_strings_flags(self, flags): result = sorted(prepare._normalize_mount_flags(flags, extras=['discard']).split(',')) assert ','.join(result) == 'auto,discard,exec,rw' @pytest.mark.parametrize("flags", string_mount_values) def test_normalize_strings_duplicate_flags(self, flags): result = sorted(prepare._normalize_mount_flags(flags, extras=['discard','rw']).split(',')) assert ','.join(result) == 'auto,discard,exec,rw' class TestMkfsBluestore(object): def test_non_zero_exit_status(self, stub_call, monkeypatch): conf.cluster = 'ceph' monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True) stub_call(([], [], 1)) with pytest.raises(RuntimeError) as error: prepare.osd_mkfs_bluestore('1', 'asdf-1234', keyring='keyring') assert "Command failed with exit code 1" in str(error.value) def test_non_zero_exit_formats_command_correctly(self, stub_call, monkeypatch): conf.cluster = 'ceph' monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True) stub_call(([], [], 1)) with pytest.raises(RuntimeError) as error: prepare.osd_mkfs_bluestore('1', 'asdf-1234', keyring='keyring') expected = ' '.join([ 'ceph-osd', '--cluster', 'ceph', '--osd-objectstore', 'bluestore', '--mkfs', '-i', '1', '--monmap', '/var/lib/ceph/osd/ceph-1/activate.monmap', '--keyfile', '-', '--osd-data', '/var/lib/ceph/osd/ceph-1/', '--osd-uuid', 'asdf-1234', '--setuser', 'ceph', '--setgroup', 'ceph']) assert expected in str(error.value)
11,024
36.627986
98
py
null
ceph-main/src/ceph-volume/ceph_volume/tests/util/test_system.py
import os import pwd import getpass import pytest from textwrap import dedent from ceph_volume.util import system from mock.mock import patch from ceph_volume.tests.conftest import Factory @pytest.fixture def mock_find_executable_on_host(monkeypatch): """ Monkeypatches util.system.find_executable_on_host, so that a caller can add behavior to the response """ def apply(stdout=None, stderr=None, returncode=0): stdout_stream = Factory(read=lambda: stdout) stderr_stream = Factory(read=lambda: stderr) return_value = Factory( stdout=stdout_stream, stderr=stderr_stream, wait=lambda: returncode, communicate=lambda x: (stdout, stderr, returncode) ) monkeypatch.setattr( 'ceph_volume.util.system.subprocess.Popen', lambda *a, **kw: return_value) return apply class TestMkdirP(object): def test_existing_dir_does_not_raise_w_chown(self, monkeypatch, tmpdir): user = pwd.getpwnam(getpass.getuser()) uid, gid = user[2], user[3] monkeypatch.setattr(system, 'get_ceph_user_ids', lambda: (uid, gid,)) path = str(tmpdir) system.mkdir_p(path) assert os.path.isdir(path) def test_new_dir_w_chown(self, monkeypatch, tmpdir): user = pwd.getpwnam(getpass.getuser()) uid, gid = user[2], user[3] monkeypatch.setattr(system, 'get_ceph_user_ids', lambda: (uid, gid,)) path = os.path.join(str(tmpdir), 'new') system.mkdir_p(path) assert os.path.isdir(path) def test_existing_dir_does_not_raise_no_chown(self, tmpdir): path = str(tmpdir) system.mkdir_p(path, chown=False) assert os.path.isdir(path) def test_new_dir_no_chown(self, tmpdir): path = os.path.join(str(tmpdir), 'new') system.mkdir_p(path, chown=False) assert os.path.isdir(path) @pytest.fixture def fake_proc(tmpdir, monkeypatch): PROCDIR = str(tmpdir) proc_path = os.path.join(PROCDIR, 'mounts') with open(proc_path, 'w') as f: f.write(dedent("""nfsd /proc/fs/nfsd nfsd rw,relatime 0 0 rootfs / rootfs rw 0 0 sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0 proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=238292k,nr_inodes=59573,mode=755 0 0 securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0 tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0 devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0 tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0 cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0 cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0 configfs /sys/kernel/config configfs rw,relatime 0 0 /dev/mapper/VolGroup00-LogVol00 / xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0 selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0 debugfs /sys/kernel/debug debugfs rw,relatime 0 0 hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0 mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0 sunrpc /far/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0 /dev/sde4 /two/field/path nfsd /proc/fs/nfsd nfsd rw,relatime 0 0 /dev/sde2 /boot xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0 tmpfs /far/lib/ceph/osd/ceph-5 tmpfs rw,seclabel,relatime 0 0 tmpfs /far/lib/ceph/osd/ceph-7 tmpfs rw,seclabel,relatime 0 0 /dev/sda1 /far/lib/ceph/osd/ceph-0 xfs rw,seclabel,noatime,attr2,inode64,noquota 0 0 tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=50040k,mode=700,uid=1000,gid=1000 0 0 /dev/sdc2 /boot xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0 tmpfs /run/user/1000 tmpfs rw,seclabel,mode=700,uid=1000,gid=1000 0 0""")) monkeypatch.setattr(system, 'PROCDIR', PROCDIR) monkeypatch.setattr(os.path, 'exists', lambda x: True) class TestPathIsMounted(object): def test_is_mounted(self, fake_proc): assert system.path_is_mounted('/boot') is True def test_is_not_mounted(self, fake_proc): assert system.path_is_mounted('/far/fib/feph') is False def test_is_not_mounted_at_destination(self, fake_proc): assert system.path_is_mounted('/boot', destination='/dev/sda1') is False def test_is_mounted_at_destination(self, fake_proc): assert system.path_is_mounted('/boot', destination='/dev/sdc2') is True class TestDeviceIsMounted(object): def test_is_mounted(self, fake_proc): assert system.device_is_mounted('/dev/sda1') is True def test_path_is_not_device(self, fake_proc): assert system.device_is_mounted('/far/lib/ceph/osd/ceph-7') is False def test_is_not_mounted_at_destination(self, fake_proc): assert system.device_is_mounted('/dev/sda1', destination='/far/lib/ceph/osd/test-1') is False def test_is_mounted_at_destination(self, fake_proc): assert system.device_is_mounted('/dev/sda1', destination='/far/lib/ceph/osd/ceph-7') is False def test_is_realpath_dev_mounted_at_destination(self, fake_proc, monkeypatch): monkeypatch.setattr(system.os.path, 'realpath', lambda x: '/dev/sda1' if 'foo' in x else x) result = system.device_is_mounted('/dev/maper/foo', destination='/far/lib/ceph/osd/ceph-0') assert result is True def test_is_realpath_path_mounted_at_destination(self, fake_proc, monkeypatch): monkeypatch.setattr( system.os.path, 'realpath', lambda x: '/far/lib/ceph/osd/ceph-0' if 'symlink' in x else x) result = system.device_is_mounted('/dev/sda1', destination='/symlink/lib/ceph/osd/ceph-0') assert result is True class TestGetMounts(object): def test_not_mounted(self, tmpdir, monkeypatch): PROCDIR = str(tmpdir) proc_path = os.path.join(PROCDIR, 'mounts') with open(proc_path, 'w') as f: f.write('') monkeypatch.setattr(system, 'PROCDIR', PROCDIR) m = system.Mounts() assert m.get_mounts() == {} def test_is_mounted_(self, fake_proc): m = system.Mounts() assert m.get_mounts()['/dev/sdc2'] == ['/boot'] def test_ignores_two_fields(self, fake_proc): m = system.Mounts() assert m.get_mounts().get('/dev/sde4') is None def test_tmpfs_is_reported(self, fake_proc): m = system.Mounts() assert m.get_mounts()['tmpfs'][0] == '/dev/shm' def test_non_skip_devs_arent_reported(self, fake_proc): m = system.Mounts() assert m.get_mounts().get('cgroup') is None def test_multiple_mounts_are_appended(self, fake_proc): m = system.Mounts() assert len(m.get_mounts()['tmpfs']) == 7 def test_nonexistent_devices_are_skipped(self, tmpdir, monkeypatch): PROCDIR = str(tmpdir) proc_path = os.path.join(PROCDIR, 'mounts') with open(proc_path, 'w') as f: f.write(dedent("""nfsd /proc/fs/nfsd nfsd rw,relatime 0 0 /dev/sda1 /far/lib/ceph/osd/ceph-0 xfs rw,attr2,inode64,noquota 0 0 /dev/sda2 /far/lib/ceph/osd/ceph-1 xfs rw,attr2,inode64,noquota 0 0""")) monkeypatch.setattr(system, 'PROCDIR', PROCDIR) monkeypatch.setattr(os.path, 'exists', lambda x: False if x == '/dev/sda1' else True) m = system.Mounts() assert m.get_mounts().get('/dev/sda1') is None class TestIsBinary(object): def test_is_binary(self, fake_filesystem): binary_path = fake_filesystem.create_file('/tmp/fake-file', contents='asd\n\nlkjh\x00') assert system.is_binary(binary_path.path) def test_is_not_binary(self, fake_filesystem): binary_path = fake_filesystem.create_file('/tmp/fake-file', contents='asd\n\nlkjh0') assert system.is_binary(binary_path.path) is False class TestGetFileContents(object): def test_path_does_not_exist(self, tmpdir): filepath = os.path.join(str(tmpdir), 'doesnotexist') assert system.get_file_contents(filepath, 'default') == 'default' def test_path_has_contents(self, fake_filesystem): interesting_file = fake_filesystem.create_file('/tmp/fake-file', contents="1") result = system.get_file_contents(interesting_file.path) assert result == "1" def test_path_has_multiline_contents(self, fake_filesystem): interesting_file = fake_filesystem.create_file('/tmp/fake-file', contents="0\n1") result = system.get_file_contents(interesting_file.path) assert result == "0\n1" def test_exception_returns_default(self): with patch('builtins.open') as mocked_open: mocked_open.side_effect = Exception() result = system.get_file_contents('/tmp/fake-file') assert result == '' class TestWhich(object): def test_executable_exists_but_is_not_file(self, monkeypatch): monkeypatch.setattr(system.os.path, 'isfile', lambda x: False) monkeypatch.setattr(system.os.path, 'exists', lambda x: True) assert system.which('exedir') == 'exedir' def test_executable_does_not_exist(self, monkeypatch): monkeypatch.setattr(system.os.path, 'isfile', lambda x: False) monkeypatch.setattr(system.os.path, 'exists', lambda x: False) assert system.which('exedir') == 'exedir' def test_executable_exists_as_file(self, monkeypatch): monkeypatch.setattr(system.os, 'getenv', lambda x, y: '') monkeypatch.setattr(system.os.path, 'isfile', lambda x: x != 'ceph') monkeypatch.setattr(system.os.path, 'exists', lambda x: x != 'ceph') assert system.which('ceph') == '/usr/local/bin/ceph' def test_warnings_when_executable_isnt_matched(self, monkeypatch, capsys): monkeypatch.setattr(system.os.path, 'isfile', lambda x: True) monkeypatch.setattr(system.os.path, 'exists', lambda x: False) system.which('exedir') cap = capsys.readouterr() assert 'Executable exedir not in PATH' in cap.err def test_run_on_host_found(self, mock_find_executable_on_host): mock_find_executable_on_host(stdout="/sbin/lvs\n", stderr="some stderr message\n") assert system.which('lvs', run_on_host=True) == '/sbin/lvs' def test_run_on_host_not_found(self, mock_find_executable_on_host): mock_find_executable_on_host(stdout="", stderr="some stderr message\n") assert system.which('lvs', run_on_host=True) == 'lvs' @pytest.fixture def stub_which(monkeypatch): def apply(value='/bin/restorecon'): monkeypatch.setattr(system, 'which', lambda x: value) return apply # python2 has no FileNotFoundError try: FileNotFoundError except NameError: FileNotFoundError = OSError class TestSetContext(object): def setup_method(self): try: os.environ.pop('CEPH_VOLUME_SKIP_RESTORECON') except KeyError: pass @pytest.mark.parametrize('value', ['1', 'True', 'true', 'TRUE', 'yes']) def test_set_context_skips(self, stub_call, fake_run, value): stub_call(('', '', 0)) os.environ['CEPH_VOLUME_SKIP_RESTORECON'] = value system.set_context('/tmp/foo') assert fake_run.calls == [] @pytest.mark.parametrize('value', ['0', 'False', 'false', 'FALSE', 'no']) def test_set_context_doesnt_skip_with_env(self, stub_call, stub_which, fake_run, value): stub_call(('', '', 0)) stub_which() os.environ['CEPH_VOLUME_SKIP_RESTORECON'] = value system.set_context('/tmp/foo') assert len(fake_run.calls) def test_set_context_skips_on_executable(self, stub_call, stub_which, fake_run): stub_call(('', '', 0)) stub_which('restorecon') system.set_context('/tmp/foo') assert fake_run.calls == [] def test_set_context_no_skip_on_executable(self, stub_call, stub_which, fake_run): stub_call(('', '', 0)) stub_which('/bin/restorecon') system.set_context('/tmp/foo') assert len(fake_run.calls) @patch('ceph_volume.process.call') def test_selinuxenabled_doesnt_exist(self, mocked_call, fake_run): mocked_call.side_effect = FileNotFoundError() system.set_context('/tmp/foo') assert fake_run.calls == [] def test_selinuxenabled_is_not_enabled(self, stub_call, fake_run): stub_call(('', '', 1)) system.set_context('/tmp/foo') assert fake_run.calls == []
12,909
40.645161
156
py
null
ceph-main/src/ceph-volume/ceph_volume/tests/util/test_util.py
import pytest from ceph_volume import util class TestAsBytes(object): def test_bytes_just_gets_returned(self): bytes_string = "contents".encode('utf-8') assert util.as_bytes(bytes_string) == bytes_string def test_string_gets_converted_to_bytes(self): result = util.as_bytes('contents') assert isinstance(result, bytes) class TestStrToInt(object): def test_passing_a_float_str_comma(self): result = util.str_to_int("1,99") assert result == 1 def test_passing_a_float_does_not_round_comma(self): result = util.str_to_int("1,99", round_down=False) assert result == 2 @pytest.mark.parametrize("value", ['2', 2]) def test_passing_an_int(self, value): result = util.str_to_int(value) assert result == 2 @pytest.mark.parametrize("value", ['1.99', 1.99]) def test_passing_a_float(self, value): result = util.str_to_int(value) assert result == 1 @pytest.mark.parametrize("value", ['1.99', 1.99]) def test_passing_a_float_does_not_round(self, value): result = util.str_to_int(value, round_down=False) assert result == 2 def test_text_is_not_an_integer_like(self): with pytest.raises(RuntimeError) as error: util.str_to_int("1.4GB") assert str(error.value) == "Unable to convert to integer: '1.4GB'" def test_input_is_not_string(self): with pytest.raises(RuntimeError) as error: util.str_to_int(None) assert str(error.value) == "Unable to convert to integer: 'None'" def true_responses(upper_casing=False): if upper_casing: return ['Y', 'YES', ''] return ['y', 'yes', ''] def false_responses(upper_casing=False): if upper_casing: return ['N', 'NO'] return ['n', 'no'] def invalid_responses(): return [9, 0.1, 'h', [], {}, None] class TestStrToBool(object): @pytest.mark.parametrize('response', true_responses()) def test_trueish(self, response): assert util.str_to_bool(response) is True @pytest.mark.parametrize('response', false_responses()) def test_falseish(self, response): assert util.str_to_bool(response) is False @pytest.mark.parametrize('response', true_responses(True)) def test_trueish_upper(self, response): assert util.str_to_bool(response) is True @pytest.mark.parametrize('response', false_responses(True)) def test_falseish_upper(self, response): assert util.str_to_bool(response) is False @pytest.mark.parametrize('response', invalid_responses()) def test_invalid(self, response): with pytest.raises(ValueError): util.str_to_bool(response) class TestPromptBool(object): @pytest.mark.parametrize('response', true_responses()) def test_trueish(self, response): fake_input = lambda x: response qx = 'what the what?' assert util.prompt_bool(qx, input_=fake_input) is True @pytest.mark.parametrize('response', false_responses()) def test_falseish(self, response): fake_input = lambda x: response qx = 'what the what?' assert util.prompt_bool(qx, input_=fake_input) is False def test_try_again_true(self): responses = ['g', 'h', 'y'] fake_input = lambda x: responses.pop(0) qx = 'what the what?' assert util.prompt_bool(qx, input_=fake_input) is True def test_try_again_false(self): responses = ['g', 'h', 'n'] fake_input = lambda x: responses.pop(0) qx = 'what the what?' assert util.prompt_bool(qx, input_=fake_input) is False
3,642
30.136752
74
py
null
ceph-main/src/ceph-volume/ceph_volume/util/__init__.py
import logging from math import floor from ceph_volume import terminal try: input = raw_input # pylint: disable=redefined-builtin except NameError: pass logger = logging.getLogger(__name__) def as_string(string): """ Ensure that whatever type of string is incoming, it is returned as an actual string, versus 'bytes' which Python 3 likes to use. """ if isinstance(string, bytes): # we really ignore here if we can't properly decode with utf-8 return string.decode('utf-8', 'ignore') return string def as_bytes(string): """ Ensure that whatever type of string is incoming, it is returned as bytes, encoding to utf-8 otherwise """ if isinstance(string, bytes): return string return string.encode('utf-8', errors='ignore') def str_to_int(string, round_down=True): """ Parses a string number into an integer, optionally converting to a float and rounding down. Some LVM values may come with a comma instead of a dot to define decimals. This function normalizes a comma into a dot """ error_msg = "Unable to convert to integer: '%s'" % str(string) try: integer = float(string.replace(',', '.')) except AttributeError: # this might be a integer already, so try to use it, otherwise raise # the original exception if isinstance(string, (int, float)): integer = string else: logger.exception(error_msg) raise RuntimeError(error_msg) except (TypeError, ValueError): logger.exception(error_msg) raise RuntimeError(error_msg) if round_down: integer = floor(integer) else: integer = round(integer) return int(integer) def str_to_bool(val): """ Convert a string representation of truth to True or False True values are 'y', 'yes', or ''; case-insensitive False values are 'n', or 'no'; case-insensitive Raises ValueError if 'val' is anything else. """ true_vals = ['yes', 'y', ''] false_vals = ['no', 'n'] try: val = val.lower() except AttributeError: val = str(val).lower() if val in true_vals: return True elif val in false_vals: return False else: raise ValueError("Invalid input value: %s" % val) def prompt_bool(question, input_=None): """ Interface to prompt a boolean (or boolean-like) response from a user. Usually a confirmation. """ input_prompt = input_ or input prompt_format = '--> {question} '.format(question=question) response = input_prompt(prompt_format) try: return str_to_bool(response) except ValueError: terminal.error('Valid true responses are: y, yes, <Enter>') terminal.error('Valid false responses are: n, no') terminal.error('That response was invalid, please try again') return prompt_bool(question, input_=input_prompt) def merge_dict(x, y): """ Return two dicts merged """ z = x.copy() z.update(y) return z
3,072
27.453704
78
py
null
ceph-main/src/ceph-volume/ceph_volume/util/arg_validators.py
import argparse import os import math from ceph_volume import terminal, decorators, process from ceph_volume.util.device import Device from ceph_volume.util import disk def valid_osd_id(val): return str(int(val)) class ValidDevice(object): def __init__(self, as_string=False, gpt_ok=False): self.as_string = as_string self.gpt_ok = gpt_ok def __call__(self, dev_path): self.get_device(dev_path) self._validated_device = self._is_valid_device() return self._format_device(self._validated_device) def get_device(self, dev_path): self._device = Device(dev_path) self.dev_path = dev_path def _format_device(self, device): if self.as_string: if device.is_lv: # all codepaths expect an lv path to be returned in this format return "{}/{}".format(device.vg_name, device.lv_name) return device.path return device def _is_valid_device(self): error = None if not self._device.exists: error = "Unable to proceed with non-existing device: %s" % self.dev_path # FIXME this is not a nice API, this validator was meant to catch any # non-existing devices upfront, not check for gpt headers. Now this # needs to optionally skip checking gpt headers which is beyond # verifying if the device exists. The better solution would be to # configure this with a list of checks that can be excluded/included on # __init__ elif self._device.has_gpt_headers and not self.gpt_ok: error = "GPT headers found, they must be removed on: %s" % self.dev_path if self._device.has_partitions: raise RuntimeError("Device {} has partitions.".format(self.dev_path)) if error: raise argparse.ArgumentError(None, error) return self._device class ValidZapDevice(ValidDevice): def __call__(self, dev_path): super().get_device(dev_path) return self._format_device(self._is_valid_device()) def _is_valid_device(self, raise_sys_exit=True): super()._is_valid_device() return self._device class ValidDataDevice(ValidDevice): def __call__(self, dev_path): super().get_device(dev_path) return self._format_device(self._is_valid_device()) def _is_valid_device(self, raise_sys_exit=True): super()._is_valid_device() if self._device.used_by_ceph: terminal.info('Device {} is already prepared'.format(self.dev_path)) if raise_sys_exit: raise SystemExit(0) if self._device.has_fs and not self._device.used_by_ceph: raise RuntimeError("Device {} has a filesystem.".format(self.dev_path)) if self.dev_path[0] == '/' and disk.has_bluestore_label(self.dev_path): raise RuntimeError("Device {} has bluestore signature.".format(self.dev_path)) return self._device class ValidRawDevice(ValidDevice): def __call__(self, dev_path): super().get_device(dev_path) return self._format_device(self._is_valid_device()) def _is_valid_device(self, raise_sys_exit=True): out, err, rc = process.call([ 'ceph-bluestore-tool', 'show-label', '--dev', self.dev_path], verbose_on_failure=False) if not rc: terminal.info("Raw device {} is already prepared.".format(self.dev_path)) raise SystemExit(0) if disk.blkid(self.dev_path).get('TYPE') == 'crypto_LUKS': terminal.info("Raw device {} might already be in use for a dmcrypt OSD, skipping.".format(self.dev_path)) raise SystemExit(0) super()._is_valid_device() return self._device class ValidBatchDevice(ValidDevice): def __call__(self, dev_path): super().get_device(dev_path) return self._format_device(self._is_valid_device()) def _is_valid_device(self, raise_sys_exit=False): super()._is_valid_device() if self._device.is_partition: raise argparse.ArgumentError( None, '{} is a partition, please pass ' 'LVs or raw block devices'.format(self.dev_path)) return self._device class ValidBatchDataDevice(ValidBatchDevice, ValidDataDevice): def __call__(self, dev_path): super().get_device(dev_path) return self._format_device(self._is_valid_device()) def _is_valid_device(self): # if device is already used by ceph, # leave the validation to Batch.get_deployment_layout() # This way the idempotency isn't broken (especially when using --osds-per-device) for lv in self._device.lvs: if lv.tags.get('ceph.type') in ['db', 'wal']: return self._device if self._device.used_by_ceph: return self._device super()._is_valid_device(raise_sys_exit=False) return self._device class OSDPath(object): """ Validate path exists and it looks like an OSD directory. """ @decorators.needs_root def __call__(self, string): if not os.path.exists(string): error = "Path does not exist: %s" % string raise argparse.ArgumentError(None, error) arg_is_partition = disk.is_partition(string) if arg_is_partition: return os.path.abspath(string) absolute_path = os.path.abspath(string) if not os.path.isdir(absolute_path): error = "Argument is not a directory or device which is required to scan" raise argparse.ArgumentError(None, error) key_files = ['ceph_fsid', 'fsid', 'keyring', 'ready', 'type', 'whoami'] dir_files = os.listdir(absolute_path) for key_file in key_files: if key_file not in dir_files: terminal.error('All following files must exist in path: %s' % ' '.join(key_files)) error = "Required file (%s) was not found in OSD dir path: %s" % ( key_file, absolute_path ) raise argparse.ArgumentError(None, error) return os.path.abspath(string) def exclude_group_options(parser, groups, argv=None): """ ``argparse`` has the ability to check for mutually exclusive options, but it only allows a basic XOR behavior: only one flag can be used from a defined group of options. This doesn't help when two groups of options need to be separated. For example, with filestore and bluestore, neither set can be used in conjunction with the other set. This helper validator will consume the parser to inspect the group flags, and it will group them together from ``groups``. This allows proper error reporting, matching each incompatible flag with its group name. :param parser: The argparse object, once it has configured all flags. It is required to contain the group names being used to validate. :param groups: A list of group names (at least two), with the same used for ``add_argument_group`` :param argv: Consume the args (sys.argv) directly from this argument .. note: **Unfortunately** this will not be able to validate correctly when using default flags. In the case of filestore vs. bluestore, ceph-volume defaults to --bluestore, but we can't check that programmatically, we can only parse the flags seen via argv """ # Reduce the parser groups to only the groups we need to intersect parser_groups = [g for g in parser._action_groups if g.title in groups] # A mapping of the group name to flags/options group_flags = {} flags_to_verify = [] for group in parser_groups: # option groups may have more than one item in ``option_strings``, this # will loop over ``_group_actions`` which contains the # ``option_strings``, like ``['--filestore']`` group_flags[group.title] = [ option for group_action in group._group_actions for option in group_action.option_strings ] # Gather all the flags present in the groups so that we only check on those. for flags in group_flags.values(): flags_to_verify.extend(flags) seen = [] last_flag = None last_group = None for flag in argv: if flag not in flags_to_verify: continue for group_name, flags in group_flags.items(): if flag in flags: seen.append(group_name) # We are mutually excluding groups, so having more than 1 group # in ``seen`` means we must raise an error if len(set(seen)) == len(groups): terminal.warning('Incompatible flags were found, some values may get ignored') msg = 'Cannot use %s (%s) with %s (%s)' % ( last_flag, last_group, flag, group_name ) terminal.warning(msg) last_group = group_name last_flag = flag class ValidFraction(object): """ Validate fraction is in (0, 1.0] """ def __call__(self, fraction): fraction_float = float(fraction) if math.isnan(fraction_float) or fraction_float <= 0.0 or fraction_float > 1.0: raise argparse.ArgumentError(None, 'Fraction %f not in (0,1.0]' % fraction_float) return fraction_float
9,462
39.268085
117
py
null
ceph-main/src/ceph-volume/ceph_volume/util/constants.py
# mount flags mount = dict( xfs=['rw', 'noatime' , 'inode64'] ) # format flags mkfs = dict( xfs=[ # force overwriting previous fs '-f', # set the inode size to 2kb '-i', 'size=2048', ], ) # The fantastical world of ceph-disk labels, they should give you the # collywobbles ceph_disk_guids = { # luks '45b0969e-9b03-4f30-b4c6-35865ceff106': {'type': 'journal', 'encrypted': True, 'encryption_type': 'luks'}, 'cafecafe-9b03-4f30-b4c6-35865ceff106': {'type': 'block', 'encrypted': True, 'encryption_type': 'luks'}, '166418da-c469-4022-adf4-b30afd37f176': {'type': 'block.db', 'encrypted': True, 'encryption_type': 'luks'}, '86a32090-3647-40b9-bbbd-38d8c573aa86': {'type': 'block.wal', 'encrypted': True, 'encryption_type': 'luks'}, '4fbd7e29-9d25-41b8-afd0-35865ceff05d': {'type': 'data', 'encrypted': True, 'encryption_type': 'luks'}, # plain '45b0969e-9b03-4f30-b4c6-5ec00ceff106': {'type': 'journal', 'encrypted': True, 'encryption_type': 'plain'}, 'cafecafe-9b03-4f30-b4c6-5ec00ceff106': {'type': 'block', 'encrypted': True, 'encryption_type': 'plain'}, '93b0052d-02d9-4d8a-a43b-33a3ee4dfbc3': {'type': 'block.db', 'encrypted': True, 'encryption_type': 'plain'}, '306e8683-4fe2-4330-b7c0-00a917c16966': {'type': 'block.wal', 'encrypted': True, 'encryption_type': 'plain'}, '4fbd7e29-9d25-41b8-afd0-5ec00ceff05d': {'type': 'data', 'encrypted': True, 'encryption_type': 'plain'}, # regular guids that differ from plain 'fb3aabf9-d25f-47cc-bf5e-721d1816496b': {'type': 'lockbox', 'encrypted': False, 'encryption_type': None}, '30cd0809-c2b2-499c-8879-2d6b78529876': {'type': 'block.db', 'encrypted': False, 'encryption_type': None}, '5ce17fce-4087-4169-b7ff-056cc58473f9': {'type': 'block.wal', 'encrypted': False, 'encryption_type': None}, '4fbd7e29-9d25-41b8-afd0-062c0ceff05d': {'type': 'data', 'encrypted': False, 'encryption_type': None}, 'cafecafe-9b03-4f30-b4c6-b4b80ceff106': {'type': 'block', 'encrypted': False, 'encryption_type': None}, # multipath '01b41e1b-002a-453c-9f17-88793989ff8f': {'type': 'block.wal', 'encrypted': False, 'encryption_type': None}, 'ec6d6385-e346-45dc-be91-da2a7c8b3261': {'type': 'block.wal', 'encrypted': False, 'encryption_type': None}, '45b0969e-8ae0-4982-bf9d-5a8d867af560': {'type': 'journal', 'encrypted': False, 'encryption_type': None}, '4fbd7e29-8ae0-4982-bf9d-5a8d867af560': {'type': 'data', 'encrypted': False, 'encryption_type': None}, '7f4a666a-16f3-47a2-8445-152ef4d03f6c': {'type': 'lockbox', 'encrypted': False, 'encryption_type': None}, 'cafecafe-8ae0-4982-bf9d-5a8d867af560': {'type': 'block', 'encrypted': False, 'encryption_type': None}, }
2,743
57.382979
113
py
null
ceph-main/src/ceph-volume/ceph_volume/util/device.py
# -*- coding: utf-8 -*- import logging import os from functools import total_ordering from ceph_volume import sys_info from ceph_volume.api import lvm from ceph_volume.util import disk, system from ceph_volume.util.lsmdisk import LSMDisk from ceph_volume.util.constants import ceph_disk_guids from ceph_volume.util.disk import allow_loop_devices logger = logging.getLogger(__name__) report_template = """ {dev:<25} {size:<12} {device_nodes:<15} {rot!s:<7} {available!s:<9} {model}""" def encryption_status(abspath): """ Helper function to run ``encryption.status()``. It is done here to avoid a circular import issue (encryption module imports from this module) and to ease testing by allowing monkeypatching of this function. """ from ceph_volume.util import encryption return encryption.status(abspath) class Devices(object): """ A container for Device instances with reporting """ def __init__(self, filter_for_batch=False, with_lsm=False): lvs = lvm.get_lvs() lsblk_all = disk.lsblk_all() all_devices_vgs = lvm.get_all_devices_vgs() if not sys_info.devices: sys_info.devices = disk.get_devices() self.devices = [Device(k, with_lsm, lvs=lvs, lsblk_all=lsblk_all, all_devices_vgs=all_devices_vgs) for k in sys_info.devices.keys()] if filter_for_batch: self.devices = [d for d in self.devices if d.available_lvm_batch] def pretty_report(self): output = [ report_template.format( dev='Device Path', size='Size', rot='rotates', model='Model name', available='available', device_nodes='Device nodes', )] for device in sorted(self.devices): output.append(device.report()) return ''.join(output) def json_report(self): output = [] for device in sorted(self.devices): output.append(device.json_report()) return output @total_ordering class Device(object): pretty_template = """ {attr:<25} {value}""" report_fields = [ 'ceph_device', 'rejected_reasons', 'available', 'path', 'sys_api', 'device_id', 'lsm_data', ] pretty_report_sys_fields = [ 'actuators', 'human_readable_size', 'model', 'removable', 'ro', 'rotational', 'sas_address', 'scheduler_mode', 'vendor', ] # define some class variables; mostly to enable the use of autospec in # unittests lvs = [] def __init__(self, path, with_lsm=False, lvs=None, lsblk_all=None, all_devices_vgs=None): self.path = path # LVs can have a vg/lv path, while disks will have /dev/sda self.symlink = None # check if we are a symlink if os.path.islink(self.path): self.symlink = self.path real_path = os.path.realpath(self.path) # check if we are not a device mapper if "dm-" not in real_path: self.path = real_path if not sys_info.devices: if self.path: sys_info.devices = disk.get_devices(device=self.path) else: sys_info.devices = disk.get_devices() if sys_info.devices.get(self.path, {}): self.device_nodes = sys_info.devices[self.path]['device_nodes'] self.sys_api = sys_info.devices.get(self.path, {}) self.partitions = self._get_partitions() self.lv_api = None self.lvs = [] if not lvs else lvs self.lsblk_all = lsblk_all self.all_devices_vgs = all_devices_vgs self.vgs = [] self.vg_name = None self.lv_name = None self.disk_api = {} self.blkid_api = None self._exists = None self._is_lvm_member = None self.ceph_device = False self._parse() self.lsm_data = self.fetch_lsm(with_lsm) self.available_lvm, self.rejected_reasons_lvm = self._check_lvm_reject_reasons() self.available_raw, self.rejected_reasons_raw = self._check_raw_reject_reasons() self.available = self.available_lvm and self.available_raw self.rejected_reasons = list(set(self.rejected_reasons_lvm + self.rejected_reasons_raw)) self.device_id = self._get_device_id() def fetch_lsm(self, with_lsm): ''' Attempt to fetch libstoragemgmt (LSM) metadata, and return to the caller as a dict. An empty dict is passed back to the caller if the target path is not a block device, or lsm is unavailable on the host. Otherwise the json returned will provide LSM attributes, and any associated errors that lsm encountered when probing the device. ''' if not with_lsm or not self.exists or not self.is_device: return {} lsm_disk = LSMDisk(self.path) return lsm_disk.json_report() def __lt__(self, other): ''' Implementing this method and __eq__ allows the @total_ordering decorator to turn the Device class into a totally ordered type. This can slower then implementing all comparison operations. This sorting should put available devices before unavailable devices and sort on the path otherwise (str sorting). ''' if self.available == other.available: return self.path < other.path return self.available and not other.available def __eq__(self, other): return self.path == other.path def __hash__(self): return hash(self.path) def load_blkid_api(self): if self.blkid_api is None: self.blkid_api = disk.blkid(self.path) def _parse(self): lv = None if not self.sys_api: # if no device was found check if we are a partition partname = self.path.split('/')[-1] for device, info in sys_info.devices.items(): part = info['partitions'].get(partname, {}) if part: self.sys_api = part break if self.lvs: for _lv in self.lvs: # if the path is not absolute, we have 'vg/lv', let's use LV name # to get the LV. if self.path[0] == '/': if _lv.lv_path == self.path: lv = _lv break else: vgname, lvname = self.path.split('/') if _lv.lv_name == lvname and _lv.vg_name == vgname: lv = _lv break else: if self.path[0] == '/': lv = lvm.get_single_lv(filters={'lv_path': self.path}) else: vgname, lvname = self.path.split('/') lv = lvm.get_single_lv(filters={'lv_name': lvname, 'vg_name': vgname}) if lv: self.lv_api = lv self.lvs = [lv] self.path = lv.lv_path self.vg_name = lv.vg_name self.lv_name = lv.name self.ceph_device = lvm.is_ceph_device(lv) else: self.lvs = [] if self.lsblk_all: for dev in self.lsblk_all: if dev['NAME'] == os.path.basename(self.path): break else: dev = disk.lsblk(self.path) self.disk_api = dev device_type = dev.get('TYPE', '') # always check is this is an lvm member valid_types = ['part', 'disk', 'mpath'] if allow_loop_devices(): valid_types.append('loop') if device_type in valid_types: self._set_lvm_membership() self.ceph_disk = CephDiskDevice(self) def __repr__(self): prefix = 'Unknown' if self.is_lv: prefix = 'LV' elif self.is_partition: prefix = 'Partition' elif self.is_device: prefix = 'Raw Device' return '<%s: %s>' % (prefix, self.path) def pretty_report(self): def format_value(v): if isinstance(v, list): return ', '.join(v) else: return v def format_key(k): return k.strip('_').replace('_', ' ') output = ['\n====== Device report {} ======\n'.format(self.path)] output.extend( [self.pretty_template.format( attr=format_key(k), value=format_value(v)) for k, v in vars(self).items() if k in self.report_fields and k != 'disk_api' and k != 'sys_api'] ) output.extend( [self.pretty_template.format( attr=format_key(k), value=format_value(v)) for k, v in self.sys_api.items() if k in self.pretty_report_sys_fields]) for lv in self.lvs: output.append(""" --- Logical Volume ---""") output.extend( [self.pretty_template.format( attr=format_key(k), value=format_value(v)) for k, v in lv.report().items()]) return ''.join(output) def report(self): return report_template.format( dev=self.path, size=self.size_human, rot=self.rotational, available=self.available, model=self.model, device_nodes=self.device_nodes ) def json_report(self): output = {k.strip('_'): v for k, v in vars(self).items() if k in self.report_fields} output['lvs'] = [lv.report() for lv in self.lvs] return output def _get_device_id(self): """ Please keep this implementation in sync with get_device_id() in src/common/blkdev.cc """ props = ['ID_VENDOR', 'ID_MODEL', 'ID_MODEL_ENC', 'ID_SERIAL_SHORT', 'ID_SERIAL', 'ID_SCSI_SERIAL'] p = disk.udevadm_property(self.path, props) if p.get('ID_MODEL','').startswith('LVM PV '): p['ID_MODEL'] = p.get('ID_MODEL_ENC', '').replace('\\x20', ' ').strip() if 'ID_VENDOR' in p and 'ID_MODEL' in p and 'ID_SCSI_SERIAL' in p: dev_id = '_'.join([p['ID_VENDOR'], p['ID_MODEL'], p['ID_SCSI_SERIAL']]) elif 'ID_MODEL' in p and 'ID_SERIAL_SHORT' in p: dev_id = '_'.join([p['ID_MODEL'], p['ID_SERIAL_SHORT']]) elif 'ID_SERIAL' in p: dev_id = p['ID_SERIAL'] if dev_id.startswith('MTFD'): # Micron NVMes hide the vendor dev_id = 'Micron_' + dev_id else: # the else branch should fallback to using sysfs and ioctl to # retrieve device_id on FreeBSD. Still figuring out if/how the # python ioctl implementation does that on FreeBSD dev_id = '' dev_id = dev_id.replace(' ', '_') while '__' in dev_id: dev_id = dev_id.replace('__', '_') return dev_id def _set_lvm_membership(self): if self._is_lvm_member is None: # this is contentious, if a PV is recognized by LVM but has no # VGs, should we consider it as part of LVM? We choose not to # here, because most likely, we need to use VGs from this PV. self._is_lvm_member = False device_to_check = [self.path] device_to_check.extend(self.partitions) # a pv can only be in one vg, so this should be safe # FIXME: While the above assumption holds, sda1 and sda2 # can each host a PV and VG. I think the vg_name property is # actually unused (not 100% sure) and can simply be removed vgs = None if not self.all_devices_vgs: self.all_devices_vgs = lvm.get_all_devices_vgs() for path in device_to_check: for dev_vg in self.all_devices_vgs: if dev_vg.pv_name == path: vgs = [dev_vg] if vgs: self.vgs.extend(vgs) self.vg_name = vgs[0] self._is_lvm_member = True self.lvs.extend(lvm.get_device_lvs(path)) if self.lvs: self.ceph_device = any([True if lv.tags.get('ceph.osd_id') else False for lv in self.lvs]) def _get_partitions(self): """ For block devices LVM can reside on the raw block device or on a partition. Return a list of paths to be checked for a pv. """ partitions = [] path_dir = os.path.dirname(self.path) for partition in self.sys_api.get('partitions', {}).keys(): partitions.append(os.path.join(path_dir, partition)) return partitions @property def exists(self): return os.path.exists(self.path) @property def has_fs(self): self.load_blkid_api() return 'TYPE' in self.blkid_api @property def has_gpt_headers(self): self.load_blkid_api() return self.blkid_api.get("PTTYPE") == "gpt" @property def rotational(self): rotational = self.sys_api.get('rotational') if rotational is None: # fall back to lsblk if not found in sys_api # default to '1' if no value is found with lsblk either rotational = self.disk_api.get('ROTA', '1') return rotational == '1' @property def model(self): return self.sys_api['model'] @property def size_human(self): return self.sys_api['human_readable_size'] @property def size(self): return self.sys_api['size'] @property def parent_device(self): if 'PKNAME' in self.disk_api: return '/dev/%s' % self.disk_api['PKNAME'] return None @property def lvm_size(self): """ If this device was made into a PV it would lose 1GB in total size due to the 1GB physical extent size we set when creating volume groups """ size = disk.Size(b=self.size) lvm_size = disk.Size(gb=size.gb.as_int()) - disk.Size(gb=1) return lvm_size @property def is_lvm_member(self): if self._is_lvm_member is None: self._set_lvm_membership() return self._is_lvm_member @property def is_ceph_disk_member(self): def is_member(device): return 'ceph' in device.get('PARTLABEL', '') or \ device.get('PARTTYPE', '') in ceph_disk_guids.keys() # If we come from Devices(), self.lsblk_all is set already. # Otherwise, we have to grab the data. details = self.lsblk_all or disk.lsblk_all() _is_member = False if self.sys_api.get("partitions"): for part in self.sys_api.get("partitions").keys(): for dev in details: if part.startswith(dev['NAME']): if is_member(dev): _is_member = True return _is_member else: return is_member(self.disk_api) raise RuntimeError(f"Couln't check if device {self.path} is a ceph-disk member.") @property def has_bluestore_label(self): return disk.has_bluestore_label(self.path) @property def is_mapper(self): return self.path.startswith(('/dev/mapper', '/dev/dm-')) @property def device_type(self): self.load_blkid_api() if 'type' in self.sys_api: return self.sys_api['type'] elif self.disk_api: return self.disk_api['TYPE'] elif self.blkid_api: return self.blkid_api['TYPE'] @property def is_mpath(self): return self.device_type == 'mpath' @property def is_lv(self): return self.lv_api is not None @property def is_partition(self): self.load_blkid_api() if self.disk_api: return self.disk_api['TYPE'] == 'part' elif self.blkid_api: return self.blkid_api['TYPE'] == 'part' return False @property def is_device(self): self.load_blkid_api() api = None if self.disk_api: api = self.disk_api elif self.blkid_api: api = self.blkid_api if api: valid_types = ['disk', 'device', 'mpath'] if allow_loop_devices(): valid_types.append('loop') return self.device_type in valid_types return False @property def is_acceptable_device(self): return self.is_device or self.is_partition @property def is_encrypted(self): """ Only correct for LVs, device mappers, and partitions. Will report a ``None`` for raw devices. """ self.load_blkid_api() crypt_reports = [self.blkid_api.get('TYPE', ''), self.disk_api.get('FSTYPE', '')] if self.is_lv: # if disk APIs are reporting this is encrypted use that: if 'crypto_LUKS' in crypt_reports: return True # if ceph-volume created this, then a tag would let us know elif self.lv_api.encrypted: return True return False elif self.is_partition: return 'crypto_LUKS' in crypt_reports elif self.is_mapper: active_mapper = encryption_status(self.path) if active_mapper: # normalize a bit to ensure same values regardless of source encryption_type = active_mapper['type'].lower().strip('12') # turn LUKS1 or LUKS2 into luks return True if encryption_type in ['plain', 'luks'] else False else: return False else: return None @property def used_by_ceph(self): # only filter out data devices as journals could potentially be reused osd_ids = [lv.tags.get("ceph.osd_id") is not None for lv in self.lvs if lv.tags.get("ceph.type") in ["data", "block"]] return any(osd_ids) @property def journal_used_by_ceph(self): # similar to used_by_ceph() above. This is for 'journal' devices (db/wal/..) # needed by get_lvm_fast_allocs() in devices/lvm/batch.py # see https://tracker.ceph.com/issues/59640 osd_ids = [lv.tags.get("ceph.osd_id") is not None for lv in self.lvs if lv.tags.get("ceph.type") in ["db", "wal"]] return any(osd_ids) @property def vg_free_percent(self): if self.vgs: return [vg.free_percent for vg in self.vgs] else: return [1] @property def vg_size(self): if self.vgs: return [vg.size for vg in self.vgs] else: # TODO fix this...we can probably get rid of vg_free return self.vg_free @property def vg_free(self): ''' Returns the free space in all VGs on this device. If no VGs are present, returns the disk size. ''' if self.vgs: return [vg.free for vg in self.vgs] else: # We could also query 'lvmconfig # --typeconfig full' and use allocations -> physical_extent_size # value to project the space for a vg # assuming 4M extents here extent_size = 4194304 vg_free = int(self.size / extent_size) * extent_size if self.size % extent_size == 0: # If the extent size divides size exactly, deduct on extent for # LVM metadata vg_free -= extent_size return [vg_free] @property def has_partitions(self): ''' Boolean to determine if a given device has partitions. ''' if self.sys_api.get('partitions'): return True return False def _check_generic_reject_reasons(self): reasons = [ ('removable', 1, 'removable'), ('ro', 1, 'read-only'), ('locked', 1, 'locked'), ] rejected = [reason for (k, v, reason) in reasons if self.sys_api.get(k, '') == v] if self.is_acceptable_device: # reject disks smaller than 5GB if int(self.sys_api.get('size', 0)) < 5368709120: rejected.append('Insufficient space (<5GB)') else: rejected.append("Device type is not acceptable. It should be raw device or partition") if self.is_ceph_disk_member: rejected.append("Used by ceph-disk") try: if self.has_bluestore_label: rejected.append('Has BlueStore device label') except OSError as e: # likely failed to open the device. assuming it is BlueStore is the safest option # so that a possibly-already-existing OSD doesn't get overwritten logger.error('failed to determine if device {} is BlueStore. device should not be used to avoid false negatives. err: {}'.format(self.path, e)) rejected.append('Failed to determine if device is BlueStore') if self.is_partition: try: if disk.has_bluestore_label(self.parent_device): rejected.append('Parent has BlueStore device label') except OSError as e: # likely failed to open the device. assuming the parent is BlueStore is the safest # option so that a possibly-already-existing OSD doesn't get overwritten logger.error('failed to determine if partition {} (parent: {}) has a BlueStore parent. partition should not be used to avoid false negatives. err: {}'.format(self.path, self.parent_device, e)) rejected.append('Failed to determine if parent device is BlueStore') if self.has_gpt_headers: rejected.append('Has GPT headers') if self.has_partitions: rejected.append('Has partitions') return rejected def _check_lvm_reject_reasons(self): rejected = [] if self.vgs: available_vgs = [vg for vg in self.vgs if int(vg.vg_free_count) > 10] if not available_vgs: rejected.append('Insufficient space (<10 extents) on vgs') else: # only check generic if no vgs are present. Vgs might hold lvs and # that might cause 'locked' to trigger rejected.extend(self._check_generic_reject_reasons()) return len(rejected) == 0, rejected def _check_raw_reject_reasons(self): rejected = self._check_generic_reject_reasons() if len(self.vgs) > 0: rejected.append('LVM detected') return len(rejected) == 0, rejected @property def available_lvm_batch(self): if self.sys_api.get("partitions"): return False if system.device_is_mounted(self.path): return False return self.is_device or self.is_lv class CephDiskDevice(object): """ Detect devices that have been created by ceph-disk, report their type (journal, data, etc..). Requires a ``Device`` object as input. """ def __init__(self, device): self.device = device self._is_ceph_disk_member = None @property def partlabel(self): """ In containers, the 'PARTLABEL' attribute might not be detected correctly via ``lsblk``, so we poke at the value with ``lsblk`` first, falling back to ``blkid`` (which works correclty in containers). """ lsblk_partlabel = self.device.disk_api.get('PARTLABEL') if lsblk_partlabel: return lsblk_partlabel return self.device.blkid_api.get('PARTLABEL', '') @property def parttype(self): """ Seems like older version do not detect PARTTYPE correctly (assuming the info in util/disk.py#lsblk is still valid). SImply resolve to using blkid since lsblk will throw an error if asked for an unknown columns """ return self.device.blkid_api.get('PARTTYPE', '') @property def is_member(self): if self._is_ceph_disk_member is None: if 'ceph' in self.partlabel: self._is_ceph_disk_member = True return True elif self.parttype in ceph_disk_guids.keys(): return True return False return self._is_ceph_disk_member @property def type(self): types = [ 'data', 'wal', 'db', 'lockbox', 'journal', # ceph-disk uses 'ceph block' when placing data in bluestore, but # keeps the regular OSD files in 'ceph data' :( :( :( :( 'block', ] for t in types: if t in self.partlabel: return t label = ceph_disk_guids.get(self.parttype, {}) return label.get('type', 'unknown').split('.')[-1]
25,464
34.765449
208
py
null
ceph-main/src/ceph-volume/ceph_volume/util/disk.py
import logging import os import re import stat import time from ceph_volume import process from ceph_volume.api import lvm from ceph_volume.util.system import get_file_contents logger = logging.getLogger(__name__) # The blkid CLI tool has some oddities which prevents having one common call # to extract the information instead of having separate utilities. The `udev` # type of output is needed in older versions of blkid (v 2.23) that will not # work correctly with just the ``-p`` flag to bypass the cache for example. # Xenial doesn't have this problem as it uses a newer blkid version. def get_partuuid(device): """ If a device is a partition, it will probably have a PARTUUID on it that will persist and can be queried against `blkid` later to detect the actual device """ out, err, rc = process.call( ['blkid', '-c', '/dev/null', '-s', 'PARTUUID', '-o', 'value', device] ) return ' '.join(out).strip() def _blkid_parser(output): """ Parses the output from a system ``blkid`` call, requires output to be produced using the ``-p`` flag which bypasses the cache, mangling the names. These names are corrected to what it would look like without the ``-p`` flag. Normal output:: /dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" [...] """ # first spaced separated item is garbage, gets tossed: output = ' '.join(output.split()[1:]) # split again, respecting possible whitespace in quoted values pairs = output.split('" ') raw = {} processed = {} mapping = { 'UUID': 'UUID', 'TYPE': 'TYPE', 'PART_ENTRY_NAME': 'PARTLABEL', 'PART_ENTRY_UUID': 'PARTUUID', 'PART_ENTRY_TYPE': 'PARTTYPE', 'PTTYPE': 'PTTYPE', } for pair in pairs: try: column, value = pair.split('=') except ValueError: continue raw[column] = value.strip().strip().strip('"') for key, value in raw.items(): new_key = mapping.get(key) if not new_key: continue processed[new_key] = value return processed def blkid(device): """ The blkid interface to its CLI, creating an output similar to what is expected from ``lsblk``. In most cases, ``lsblk()`` should be the preferred method for extracting information about a device. There are some corner cases where it might provide information that is otherwise unavailable. The system call uses the ``-p`` flag which bypasses the cache, the caveat being that the keys produced are named completely different to expected names. For example, instead of ``PARTLABEL`` it provides a ``PART_ENTRY_NAME``. A bit of translation between these known keys is done, which is why ``lsblk`` should always be preferred: the output provided here is not as rich, given that a translation of keys is required for a uniform interface with the ``-p`` flag. Label name to expected output chart: cache bypass name expected name UUID UUID TYPE TYPE PART_ENTRY_NAME PARTLABEL PART_ENTRY_UUID PARTUUID """ out, err, rc = process.call( ['blkid', '-c', '/dev/null', '-p', device] ) return _blkid_parser(' '.join(out)) def get_part_entry_type(device): """ Parses the ``ID_PART_ENTRY_TYPE`` from the "low level" (bypasses the cache) output that uses the ``udev`` type of output. This output is intended to be used for udev rules, but it is useful in this case as it is the only consistent way to retrieve the GUID used by ceph-disk to identify devices. """ out, err, rc = process.call(['blkid', '-c', '/dev/null', '-p', '-o', 'udev', device]) for line in out: if 'ID_PART_ENTRY_TYPE=' in line: return line.split('=')[-1].strip() return '' def get_device_from_partuuid(partuuid): """ If a device has a partuuid, query blkid so that it can tell us what that device is """ out, err, rc = process.call( ['blkid', '-c', '/dev/null', '-t', 'PARTUUID="%s"' % partuuid, '-o', 'device'] ) return ' '.join(out).strip() def remove_partition(device): """ Removes a partition using parted :param device: A ``Device()`` object """ # Sometimes there's a race condition that makes 'ID_PART_ENTRY_NUMBER' be not present # in the output of `udevadm info --query=property`. # Probably not ideal and not the best fix but this allows to get around that issue. # The idea is to make it retry multiple times before actually failing. for i in range(10): udev_info = udevadm_property(device.path) partition_number = udev_info.get('ID_PART_ENTRY_NUMBER') if partition_number: break time.sleep(0.2) if not partition_number: raise RuntimeError('Unable to detect the partition number for device: %s' % device.path) process.run( ['parted', device.parent_device, '--script', '--', 'rm', partition_number] ) def _stat_is_device(stat_obj): """ Helper function that will interpret ``os.stat`` output directly, so that other functions can call ``os.stat`` once and interpret that result several times """ return stat.S_ISBLK(stat_obj) def _lsblk_parser(line): """ Parses lines in lsblk output. Requires output to be in pair mode (``-P`` flag). Lines need to be whole strings, the line gets split when processed. :param line: A string, with the full line from lsblk output """ # parse the COLUMN="value" output to construct the dictionary pairs = line.split('" ') parsed = {} for pair in pairs: try: column, value = pair.split('=') except ValueError: continue parsed[column] = value.strip().strip().strip('"') return parsed def device_family(device): """ Returns a list of associated devices. It assumes that ``device`` is a parent device. It is up to the caller to ensure that the device being used is a parent, not a partition. """ labels = ['NAME', 'PARTLABEL', 'TYPE'] command = ['lsblk', '-P', '-p', '-o', ','.join(labels), device] out, err, rc = process.call(command) devices = [] for line in out: devices.append(_lsblk_parser(line)) return devices def udevadm_property(device, properties=[]): """ Query udevadm for information about device properties. Optionally pass a list of properties to return. A requested property might not be returned if not present. Expected output format:: # udevadm info --query=property --name=/dev/sda :( DEVNAME=/dev/sda DEVTYPE=disk ID_ATA=1 ID_BUS=ata ID_MODEL=SK_hynix_SC311_SATA_512GB ID_PART_TABLE_TYPE=gpt ID_PART_TABLE_UUID=c8f91d57-b26c-4de1-8884-0c9541da288c ID_PATH=pci-0000:00:17.0-ata-3 ID_PATH_TAG=pci-0000_00_17_0-ata-3 ID_REVISION=70000P10 ID_SERIAL=SK_hynix_SC311_SATA_512GB_MS83N71801150416A TAGS=:systemd: USEC_INITIALIZED=16117769 ... """ out = _udevadm_info(device) ret = {} for line in out: p, v = line.split('=', 1) if not properties or p in properties: ret[p] = v return ret def _udevadm_info(device): """ Call udevadm and return the output """ cmd = ['udevadm', 'info', '--query=property', device] out, _err, _rc = process.call(cmd) return out def lsblk(device, columns=None, abspath=False): result = [] if not os.path.isdir(device): result = lsblk_all(device=device, columns=columns, abspath=abspath) if not result: logger.debug(f"{device} not found is lsblk report") return {} return result[0] def lsblk_all(device='', columns=None, abspath=False): """ Create a dictionary of identifying values for a device using ``lsblk``. Each supported column is a key, in its *raw* format (all uppercase usually). ``lsblk`` has support for certain "columns" (in blkid these would be labels), and these columns vary between distributions and ``lsblk`` versions. The newer versions support a richer set of columns, while older ones were a bit limited. These are a subset of lsblk columns which are known to work on both CentOS 7 and Xenial: NAME device name KNAME internal kernel device name PKNAME internal kernel parent device name MAJ:MIN major:minor device number FSTYPE filesystem type MOUNTPOINT where the device is mounted LABEL filesystem LABEL UUID filesystem UUID RO read-only device RM removable device MODEL device identifier SIZE size of the device STATE state of the device OWNER user name GROUP group name MODE device node permissions ALIGNMENT alignment offset MIN-IO minimum I/O size OPT-IO optimal I/O size PHY-SEC physical sector size LOG-SEC logical sector size ROTA rotational device SCHED I/O scheduler name RQ-SIZE request queue size TYPE device type PKNAME internal parent kernel device name DISC-ALN discard alignment offset DISC-GRAN discard granularity DISC-MAX discard max bytes DISC-ZERO discard zeroes data There is a bug in ``lsblk`` where using all the available (supported) columns will result in no output (!), in order to workaround this the following columns have been removed from the default reporting columns: * RQ-SIZE (request queue size) * MIN-IO minimum I/O size * OPT-IO optimal I/O size These should be available however when using `columns`. For example:: >>> lsblk('/dev/sda1', columns=['OPT-IO']) {'OPT-IO': '0'} Normal CLI output, as filtered by the flags in this function will look like :: $ lsblk -P -o NAME,KNAME,PKNAME,MAJ:MIN,FSTYPE,MOUNTPOINT NAME="sda1" KNAME="sda1" MAJ:MIN="8:1" FSTYPE="ext4" MOUNTPOINT="/" :param columns: A list of columns to report as keys in its original form. :param abspath: Set the flag for absolute paths on the report """ default_columns = [ 'NAME', 'KNAME', 'PKNAME', 'MAJ:MIN', 'FSTYPE', 'MOUNTPOINT', 'LABEL', 'UUID', 'RO', 'RM', 'MODEL', 'SIZE', 'STATE', 'OWNER', 'GROUP', 'MODE', 'ALIGNMENT', 'PHY-SEC', 'LOG-SEC', 'ROTA', 'SCHED', 'TYPE', 'DISC-ALN', 'DISC-GRAN', 'DISC-MAX', 'DISC-ZERO', 'PKNAME', 'PARTLABEL' ] columns = columns or default_columns # -P -> Produce pairs of COLUMN="value" # -p -> Return full paths to devices, not just the names, when ``abspath`` is set # -o -> Use the columns specified or default ones provided by this function base_command = ['lsblk', '-P'] if abspath: base_command.append('-p') base_command.append('-o') base_command.append(','.join(columns)) if device: base_command.append('--nodeps') base_command.append(device) out, err, rc = process.call(base_command) if rc != 0: raise RuntimeError(f"Error: {err}") result = [] for line in out: result.append(_lsblk_parser(line)) return result def is_device(dev): """ Boolean to determine if a given device is a block device (**not** a partition!) For example: /dev/sda would return True, but not /dev/sdc1 """ if not os.path.exists(dev): return False if not dev.startswith('/dev/'): return False if dev[len('/dev/'):].startswith('loop'): if not allow_loop_devices(): return False # fallback to stat return _stat_is_device(os.lstat(dev).st_mode) def is_partition(dev): """ Boolean to determine if a given device is a partition, like /dev/sda1 """ if not os.path.exists(dev): return False # use lsblk first, fall back to using stat TYPE = lsblk(dev).get('TYPE') if TYPE: return TYPE == 'part' # fallback to stat stat_obj = os.stat(dev) if _stat_is_device(stat_obj.st_mode): return False major = os.major(stat_obj.st_rdev) minor = os.minor(stat_obj.st_rdev) if os.path.exists('/sys/dev/block/%d:%d/partition' % (major, minor)): return True return False def is_ceph_rbd(dev): """ Boolean to determine if a given device is a ceph RBD device, like /dev/rbd0 """ return dev.startswith(('/dev/rbd')) class BaseFloatUnit(float): """ Base class to support float representations of size values. Suffix is computed on child classes by inspecting the class name """ def __repr__(self): return "<%s(%s)>" % (self.__class__.__name__, self.__float__()) def __str__(self): return "{size:.2f} {suffix}".format( size=self.__float__(), suffix=self.__class__.__name__.split('Float')[-1] ) def as_int(self): return int(self.real) def as_float(self): return self.real class FloatB(BaseFloatUnit): pass class FloatMB(BaseFloatUnit): pass class FloatGB(BaseFloatUnit): pass class FloatKB(BaseFloatUnit): pass class FloatTB(BaseFloatUnit): pass class FloatPB(BaseFloatUnit): pass class Size(object): """ Helper to provide an interface for different sizes given a single initial input. Allows for comparison between different size objects, which avoids the need to convert sizes before comparison (e.g. comparing megabytes against gigabytes). Common comparison operators are supported:: >>> hd1 = Size(gb=400) >>> hd2 = Size(gb=500) >>> hd1 > hd2 False >>> hd1 < hd2 True >>> hd1 == hd2 False >>> hd1 == Size(gb=400) True The Size object can also be multiplied or divided:: >>> hd1 <Size(400.00 GB)> >>> hd1 * 2 <Size(800.00 GB)> >>> hd1 <Size(800.00 GB)> Additions and subtractions are only supported between Size objects:: >>> Size(gb=224) - Size(gb=100) <Size(124.00 GB)> >>> Size(gb=1) + Size(mb=300) <Size(1.29 GB)> Can also display a human-readable representation, with automatic detection on best suited unit, or alternatively, specific unit representation:: >>> s = Size(mb=2211) >>> s <Size(2.16 GB)> >>> s.mb <FloatMB(2211.0)> >>> print("Total size: %s" % s.mb) Total size: 2211.00 MB >>> print("Total size: %s" % s) Total size: 2.16 GB """ @classmethod def parse(cls, size): if (len(size) > 2 and size[-2].lower() in ['k', 'm', 'g', 't', 'p'] and size[-1].lower() == 'b'): return cls(**{size[-2:].lower(): float(size[0:-2])}) elif size[-1].lower() in ['b', 'k', 'm', 'g', 't', 'p']: return cls(**{size[-1].lower(): float(size[0:-1])}) else: return cls(b=float(size)) def __init__(self, multiplier=1024, **kw): self._multiplier = multiplier # create a mapping of units-to-multiplier, skip bytes as that is # calculated initially always and does not need to convert aliases = [ [('k', 'kb', 'kilobytes'), self._multiplier], [('m', 'mb', 'megabytes'), self._multiplier ** 2], [('g', 'gb', 'gigabytes'), self._multiplier ** 3], [('t', 'tb', 'terabytes'), self._multiplier ** 4], [('p', 'pb', 'petabytes'), self._multiplier ** 5] ] # and mappings for units-to-formatters, including bytes and aliases for # each format_aliases = [ [('b', 'bytes'), FloatB], [('kb', 'kilobytes'), FloatKB], [('mb', 'megabytes'), FloatMB], [('gb', 'gigabytes'), FloatGB], [('tb', 'terabytes'), FloatTB], [('pb', 'petabytes'), FloatPB], ] self._formatters = {} for key, value in format_aliases: for alias in key: self._formatters[alias] = value self._factors = {} for key, value in aliases: for alias in key: self._factors[alias] = value for k, v in kw.items(): self._convert(v, k) # only pursue the first occurrence break def _convert(self, size, unit): """ Convert any size down to bytes so that other methods can rely on bytes being available always, regardless of what they pass in, avoiding the need for a mapping of every permutation. """ if unit in ['b', 'bytes']: self._b = size return factor = self._factors[unit] self._b = float(size * factor) def _get_best_format(self): """ Go through all the supported units, and use the first one that is less than 1024. This allows to represent size in the most readable format available """ for unit in ['b', 'kb', 'mb', 'gb', 'tb', 'pb']: if getattr(self, unit) > 1024: continue return getattr(self, unit) def __repr__(self): return "<Size(%s)>" % self._get_best_format() def __str__(self): return "%s" % self._get_best_format() def __format__(self, spec): return str(self._get_best_format()).__format__(spec) def __int__(self): return int(self._b) def __float__(self): return self._b def __lt__(self, other): if isinstance(other, Size): return self._b < other._b else: return self.b < other def __le__(self, other): if isinstance(other, Size): return self._b <= other._b else: return self.b <= other def __eq__(self, other): if isinstance(other, Size): return self._b == other._b else: return self.b == other def __ne__(self, other): if isinstance(other, Size): return self._b != other._b else: return self.b != other def __ge__(self, other): if isinstance(other, Size): return self._b >= other._b else: return self.b >= other def __gt__(self, other): if isinstance(other, Size): return self._b > other._b else: return self.b > other def __add__(self, other): if isinstance(other, Size): _b = self._b + other._b return Size(b=_b) raise TypeError('Cannot add "Size" object with int') def __sub__(self, other): if isinstance(other, Size): _b = self._b - other._b return Size(b=_b) raise TypeError('Cannot subtract "Size" object from int') def __mul__(self, other): if isinstance(other, Size): raise TypeError('Cannot multiply with "Size" object') _b = self._b * other return Size(b=_b) def __truediv__(self, other): if isinstance(other, Size): return self._b / other._b _b = self._b / other return Size(b=_b) def __div__(self, other): if isinstance(other, Size): return self._b / other._b _b = self._b / other return Size(b=_b) def __bool__(self): return self.b != 0 def __nonzero__(self): return self.__bool__() def __getattr__(self, unit): """ Calculate units on the fly, relies on the fact that ``bytes`` has been converted at instantiation. Units that don't exist will trigger an ``AttributeError`` """ try: formatter = self._formatters[unit] except KeyError: raise AttributeError('Size object has not attribute "%s"' % unit) if unit in ['b', 'bytes']: return formatter(self._b) try: factor = self._factors[unit] except KeyError: raise AttributeError('Size object has not attribute "%s"' % unit) return formatter(float(self._b) / factor) def human_readable_size(size): """ Take a size in bytes, and transform it into a human readable size with up to two decimals of precision. """ suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] for suffix in suffixes: if size >= 1024: size = size / 1024 else: break return "{size:.2f} {suffix}".format( size=size, suffix=suffix) def size_from_human_readable(s): """ Takes a human readable string and converts into a Size. If no unit is passed, bytes is assumed. """ s = s.replace(' ', '') if s[-1].isdigit(): return Size(b=float(s)) n = float(s[:-1]) if s[-1].lower() == 'p': return Size(pb=n) if s[-1].lower() == 't': return Size(tb=n) if s[-1].lower() == 'g': return Size(gb=n) if s[-1].lower() == 'm': return Size(mb=n) if s[-1].lower() == 'k': return Size(kb=n) return None def get_partitions_facts(sys_block_path): partition_metadata = {} for folder in os.listdir(sys_block_path): folder_path = os.path.join(sys_block_path, folder) if os.path.exists(os.path.join(folder_path, 'partition')): contents = get_file_contents(os.path.join(folder_path, 'partition')) if contents: part = {} partname = folder part_sys_block_path = os.path.join(sys_block_path, partname) part['start'] = get_file_contents(part_sys_block_path + "/start", 0) part['sectors'] = get_file_contents(part_sys_block_path + "/size", 0) part['sectorsize'] = get_file_contents( part_sys_block_path + "/queue/logical_block_size") if not part['sectorsize']: part['sectorsize'] = get_file_contents( part_sys_block_path + "/queue/hw_sector_size", 512) part['size'] = float(part['sectors']) * 512 part['human_readable_size'] = human_readable_size(float(part['sectors']) * 512) part['holders'] = [] for holder in os.listdir(part_sys_block_path + '/holders'): part['holders'].append(holder) partition_metadata[partname] = part return partition_metadata def is_mapper_device(device_name): return device_name.startswith(('/dev/mapper', '/dev/dm-')) def is_locked_raw_device(disk_path): """ A device can be locked by a third party software like a database. To detect that case, the device is opened in Read/Write and exclusive mode """ open_flags = (os.O_RDWR | os.O_EXCL) open_mode = 0 fd = None try: fd = os.open(disk_path, open_flags, open_mode) except OSError: return 1 try: os.close(fd) except OSError: return 1 return 0 class AllowLoopDevices(object): allow = False warned = False @classmethod def __call__(cls): val = os.environ.get("CEPH_VOLUME_ALLOW_LOOP_DEVICES", "false").lower() if val not in ("false", 'no', '0'): cls.allow = True if not cls.warned: logger.warning( "CEPH_VOLUME_ALLOW_LOOP_DEVICES is set in your " "environment, so we will allow the use of unattached loop" " devices as disks. This feature is intended for " "development purposes only and will never be supported in" " production. Issues filed based on this behavior will " "likely be ignored." ) cls.warned = True return cls.allow allow_loop_devices = AllowLoopDevices() def get_block_devs_sysfs(_sys_block_path='/sys/block', _sys_dev_block_path='/sys/dev/block', device=''): def holder_inner_loop(): for holder in holders: # /sys/block/sdy/holders/dm-8/dm/uuid holder_dm_type = get_file_contents(os.path.join(_sys_block_path, dev, f'holders/{holder}/dm/uuid')).split('-')[0].lower() if holder_dm_type == 'mpath': return True # First, get devices that are _not_ partitions result = list() if not device: dev_names = os.listdir(_sys_block_path) else: dev_names = [device] for dev in dev_names: name = kname = os.path.join("/dev", dev) if not os.path.exists(name): continue type_ = 'disk' holders = os.listdir(os.path.join(_sys_block_path, dev, 'holders')) if get_file_contents(os.path.join(_sys_block_path, dev, 'removable')) == "1": continue if holder_inner_loop(): continue dm_dir_path = os.path.join(_sys_block_path, dev, 'dm') if os.path.isdir(dm_dir_path): dm_type = get_file_contents(os.path.join(dm_dir_path, 'uuid')) type_ = dm_type.split('-')[0].lower() basename = get_file_contents(os.path.join(dm_dir_path, 'name')) name = os.path.join("/dev/mapper", basename) if dev.startswith('loop'): if not allow_loop_devices(): continue # Skip loop devices that are not attached if not os.path.exists(os.path.join(_sys_block_path, dev, 'loop')): continue type_ = 'loop' result.append([kname, name, type_]) # Next, look for devices that _are_ partitions for item in os.listdir(_sys_dev_block_path): is_part = get_file_contents(os.path.join(_sys_dev_block_path, item, 'partition')) == "1" dev = os.path.basename(os.readlink(os.path.join(_sys_dev_block_path, item))) if not is_part: continue name = kname = os.path.join("/dev", dev) result.append([name, kname, "part"]) return sorted(result, key=lambda x: x[0]) def get_devices(_sys_block_path='/sys/block', device=''): """ Captures all available block devices as reported by lsblk. Additional interesting metadata like sectors, size, vendor, solid/rotational, etc. is collected from /sys/block/<device> Returns a dictionary, where keys are the full paths to devices. ..note:: loop devices, removable media, and logical volumes are never included. """ device_facts = {} block_devs = get_block_devs_sysfs(_sys_block_path) block_types = ['disk', 'mpath'] if allow_loop_devices(): block_types.append('loop') for block in block_devs: devname = os.path.basename(block[0]) diskname = block[1] if block[2] not in block_types: continue sysdir = os.path.join(_sys_block_path, devname) metadata = {} # If the device is ceph rbd it gets excluded if is_ceph_rbd(diskname): continue # If the mapper device is a logical volume it gets excluded if is_mapper_device(diskname): if lvm.get_device_lvs(diskname): continue # all facts that have no defaults # (<name>, <path relative to _sys_block_path>) facts = [('removable', 'removable'), ('ro', 'ro'), ('vendor', 'device/vendor'), ('model', 'device/model'), ('rev', 'device/rev'), ('sas_address', 'device/sas_address'), ('sas_device_handle', 'device/sas_device_handle'), ('support_discard', 'queue/discard_granularity'), ('rotational', 'queue/rotational'), ('nr_requests', 'queue/nr_requests'), ] for key, file_ in facts: metadata[key] = get_file_contents(os.path.join(sysdir, file_)) device_slaves = os.listdir(os.path.join(sysdir, 'slaves')) if device_slaves: metadata['device_nodes'] = ','.join(device_slaves) else: metadata['device_nodes'] = devname metadata['actuators'] = None if os.path.isdir(sysdir + "/queue/independent_access_ranges/"): actuators = 0 while os.path.isdir(sysdir + "/queue/independent_access_ranges/" + str(actuators)): actuators += 1 metadata['actuators'] = actuators metadata['scheduler_mode'] = "" scheduler = get_file_contents(sysdir + "/queue/scheduler") if scheduler is not None: m = re.match(r".*?(\[(.*)\])", scheduler) if m: metadata['scheduler_mode'] = m.group(2) metadata['partitions'] = get_partitions_facts(sysdir) size = get_file_contents(os.path.join(sysdir, 'size'), 0) metadata['sectors'] = get_file_contents(os.path.join(sysdir, 'sectors'), 0) fallback_sectorsize = get_file_contents(sysdir + "/queue/hw_sector_size", 512) metadata['sectorsize'] = get_file_contents(sysdir + "/queue/logical_block_size", fallback_sectorsize) metadata['size'] = float(size) * 512 metadata['human_readable_size'] = human_readable_size(metadata['size']) metadata['path'] = diskname metadata['locked'] = is_locked_raw_device(metadata['path']) metadata['type'] = block[2] device_facts[diskname] = metadata return device_facts def has_bluestore_label(device_path): isBluestore = False bluestoreDiskSignature = 'bluestore block device' # 22 bytes long # throws OSError on failure logger.info("opening device {} to check for BlueStore label".format(device_path)) try: with open(device_path, "rb") as fd: # read first 22 bytes looking for bluestore disk signature signature = fd.read(22) if signature.decode('ascii', 'replace') == bluestoreDiskSignature: isBluestore = True except IsADirectoryError: logger.info(f'{device_path} is a directory, skipping.') return isBluestore
30,556
31.542066
133
py
null
ceph-main/src/ceph-volume/ceph_volume/util/encryption.py
import base64 import os import logging from ceph_volume import process, conf, terminal from ceph_volume.util import constants, system from ceph_volume.util.device import Device from .prepare import write_keyring from .disk import lsblk, device_family, get_part_entry_type logger = logging.getLogger(__name__) mlogger = terminal.MultiLogger(__name__) def get_key_size_from_conf(): """ Return the osd dmcrypt key size from config file. Default is 512. """ default_key_size = '512' key_size = conf.ceph.get_safe( 'osd', 'osd_dmcrypt_key_size', default='512', check_valid=False) if key_size not in ['256', '512']: logger.warning(("Invalid value set for osd_dmcrypt_key_size ({}). " "Falling back to {}bits".format(key_size, default_key_size))) return default_key_size return key_size def create_dmcrypt_key(): """ Create the secret dm-crypt key (KEK) used to encrypt/decrypt the Volume Key. """ random_string = os.urandom(128) key = base64.b64encode(random_string).decode('utf-8') return key def luks_format(key, device): """ Decrypt (open) an encrypted device, previously prepared with cryptsetup :param key: dmcrypt secret key, will be used for decrypting :param device: Absolute path to device """ command = [ 'cryptsetup', '--batch-mode', # do not prompt '--key-size', get_key_size_from_conf(), '--key-file', # misnomer, should be key '-', # because we indicate stdin for the key here 'luksFormat', device, ] process.call(command, stdin=key, terminal_verbose=True, show_command=True) def plain_open(key, device, mapping): """ Decrypt (open) an encrypted device, previously prepared with cryptsetup in plain mode .. note: ceph-disk will require an additional b64decode call for this to work :param key: dmcrypt secret key :param device: absolute path to device :param mapping: mapping name used to correlate device. Usually a UUID """ command = [ 'cryptsetup', '--key-file', '-', '--allow-discards', # allow discards (aka TRIM) requests for device 'open', device, mapping, '--type', 'plain', '--key-size', '256', ] process.call(command, stdin=key, terminal_verbose=True, show_command=True) def luks_open(key, device, mapping): """ Decrypt (open) an encrypted device, previously prepared with cryptsetup .. note: ceph-disk will require an additional b64decode call for this to work :param key: dmcrypt secret key :param device: absolute path to device :param mapping: mapping name used to correlate device. Usually a UUID """ command = [ 'cryptsetup', '--key-size', get_key_size_from_conf(), '--key-file', '-', '--allow-discards', # allow discards (aka TRIM) requests for device 'luksOpen', device, mapping, ] process.call(command, stdin=key, terminal_verbose=True, show_command=True) def dmcrypt_close(mapping): """ Encrypt (close) a device, previously decrypted with cryptsetup :param mapping: """ if not os.path.exists(mapping): logger.debug('device mapper path does not exist %s' % mapping) logger.debug('will skip cryptsetup removal') return # don't be strict about the remove call, but still warn on the terminal if it fails process.run(['cryptsetup', 'remove', mapping], stop_on_error=False) def get_dmcrypt_key(osd_id, osd_fsid, lockbox_keyring=None): """ Retrieve the dmcrypt (secret) key stored initially on the monitor. The key is sent initially with JSON, and the Monitor then mangles the name to ``dm-crypt/osd/<fsid>/luks`` The ``lockbox.keyring`` file is required for this operation, and it is assumed it will exist on the path for the same OSD that is being activated. To support scanning, it is optionally configurable to a custom location (e.g. inside a lockbox partition mounted in a temporary location) """ if lockbox_keyring is None: lockbox_keyring = '/var/lib/ceph/osd/%s-%s/lockbox.keyring' % (conf.cluster, osd_id) name = 'client.osd-lockbox.%s' % osd_fsid config_key = 'dm-crypt/osd/%s/luks' % osd_fsid mlogger.info(f'Running ceph config-key get {config_key}') stdout, stderr, returncode = process.call( [ 'ceph', '--cluster', conf.cluster, '--name', name, '--keyring', lockbox_keyring, 'config-key', 'get', config_key ], show_command=True, logfile_verbose=False ) if returncode != 0: raise RuntimeError('Unable to retrieve dmcrypt secret') return ' '.join(stdout).strip() def write_lockbox_keyring(osd_id, osd_fsid, secret): """ Helper to write the lockbox keyring. This is needed because the bluestore OSD will not persist the keyring. For bluestore: A tmpfs filesystem is mounted, so the path can get written to, but the files are ephemeral, which requires this file to be created every time it is activated. """ if os.path.exists('/var/lib/ceph/osd/%s-%s/lockbox.keyring' % (conf.cluster, osd_id)): return name = 'client.osd-lockbox.%s' % osd_fsid write_keyring( osd_id, secret, keyring_name='lockbox.keyring', name=name ) def status(device): """ Capture the metadata information of a possibly encrypted device, returning a dictionary with all the values found (if any). An encrypted device will contain information about a device. Example successful output looks like:: $ cryptsetup status /dev/mapper/ed6b5a26-eafe-4cd4-87e3-422ff61e26c4 /dev/mapper/ed6b5a26-eafe-4cd4-87e3-422ff61e26c4 is active and is in use. type: LUKS1 cipher: aes-xts-plain64 keysize: 256 bits device: /dev/sdc2 offset: 4096 sectors size: 20740063 sectors mode: read/write As long as the mapper device is in 'open' state, the ``status`` call will work. :param device: Absolute path or UUID of the device mapper """ command = [ 'cryptsetup', 'status', device, ] out, err, code = process.call(command, show_command=True, verbose_on_failure=False) metadata = {} if code != 0: logger.warning('failed to detect device mapper information') return metadata for line in out: # get rid of lines that might not be useful to construct the report: if not line.startswith(' '): continue try: column, value = line.split(': ') except ValueError: continue metadata[column.strip()] = value.strip().strip('"') return metadata def legacy_encrypted(device): """ Detect if a device was encrypted with ceph-disk or not. In the case of encrypted devices, include the type of encryption (LUKS, or PLAIN), and infer what the lockbox partition is. This function assumes that ``device`` will be a partition. """ disk_meta = {} if os.path.isdir(device): mounts = system.Mounts(paths=True).get_mounts() # yes, rebind the device variable here because a directory isn't going # to help with parsing device = mounts.get(device, [None])[0] if not device: raise RuntimeError('unable to determine the device mounted at %s' % device) metadata = {'encrypted': False, 'type': None, 'lockbox': '', 'device': device} # check if the device is online/decrypted first active_mapper = status(device) if active_mapper: # normalize a bit to ensure same values regardless of source metadata['type'] = active_mapper['type'].lower().strip('12') # turn LUKS1 or LUKS2 into luks metadata['encrypted'] = True if metadata['type'] in ['plain', 'luks'] else False # The true device is now available to this function, so it gets # re-assigned here for the lockbox checks to succeed (it is not # possible to guess partitions from a device mapper device otherwise device = active_mapper.get('device', device) metadata['device'] = device else: uuid = get_part_entry_type(device) guid_match = constants.ceph_disk_guids.get(uuid, {}) encrypted_guid = guid_match.get('encrypted', False) if encrypted_guid: metadata['encrypted'] = True metadata['type'] = guid_match['encryption_type'] # Lets find the lockbox location now, to do this, we need to find out the # parent device name for the device so that we can query all of its # associated devices and *then* look for one that has the 'lockbox' label # on it. Thanks for being awesome ceph-disk if not device == 'tmpfs': disk_meta = lsblk(device, abspath=True) if not disk_meta: return metadata parent_device = disk_meta['PKNAME'] # With the parent device set, we can now look for the lockbox listing associated devices devices = [Device(i['NAME']) for i in device_family(parent_device)] for d in devices: if d.ceph_disk.type == 'lockbox': metadata['lockbox'] = d.path break return metadata
9,497
33.413043
101
py
null
ceph-main/src/ceph-volume/ceph_volume/util/lsmdisk.py
""" This module handles the interaction with libstoragemgmt for local disk devices. Interaction may fail with LSM for a number of issues, but the intent here is to make this a soft fail, since LSM related data is not a critical component of ceph-volume. """ import logging try: from lsm import LocalDisk, LsmError from lsm import Disk as lsm_Disk except ImportError: lsm_available = False transport_map = {} health_map = {} lsm_Disk = None else: lsm_available = True transport_map = { lsm_Disk.LINK_TYPE_UNKNOWN: "Unavailable", lsm_Disk.LINK_TYPE_FC: "Fibre Channel", lsm_Disk.LINK_TYPE_SSA: "IBM SSA", lsm_Disk.LINK_TYPE_SBP: "Serial Bus", lsm_Disk.LINK_TYPE_SRP: "SCSI RDMA", lsm_Disk.LINK_TYPE_ISCSI: "iSCSI", lsm_Disk.LINK_TYPE_SAS: "SAS", lsm_Disk.LINK_TYPE_ADT: "ADT (Tape)", lsm_Disk.LINK_TYPE_ATA: "ATA/SATA", lsm_Disk.LINK_TYPE_USB: "USB", lsm_Disk.LINK_TYPE_SOP: "SCSI over PCI-E", lsm_Disk.LINK_TYPE_PCIE: "PCI-E", } health_map = { lsm_Disk.HEALTH_STATUS_UNKNOWN: "Unknown", lsm_Disk.HEALTH_STATUS_FAIL: "Fail", lsm_Disk.HEALTH_STATUS_WARN: "Warn", lsm_Disk.HEALTH_STATUS_GOOD: "Good", } logger = logging.getLogger(__name__) class LSMDisk: def __init__(self, dev_path): self.dev_path = dev_path self.error_list = set() if lsm_available: self.lsm_available = True self.disk = LocalDisk() else: self.lsm_available = False self.error_list.add("libstoragemgmt (lsm module) is unavailable") logger.info("LSM information is unavailable: libstoragemgmt is not installed") self.disk = None self.led_bits = None @property def errors(self): """show any errors that the LSM interaction has encountered (str)""" return ", ".join(self.error_list) def _query_lsm(self, func, path): """Common method used to call the LSM functions, returning the function's result or None""" # if disk is None, lsm is unavailable so all calls should return None if self.disk is None: return None method = getattr(self.disk, func) try: output = method(path) except LsmError as err: logger.error("LSM Error: {}".format(err._msg)) self.error_list.add(err._msg) return None else: return output @property def led_status(self): """Fetch LED status, store in the LSMDisk object and return current status (int)""" if self.led_bits is None: self.led_bits = self._query_lsm('led_status_get', self.dev_path) or 1 return self.led_bits else: return self.led_bits @property def led_ident_state(self): """Query a disks IDENT LED state to discover when it is On, Off or Unknown (str)""" if self.led_status == 1: return "Unsupported" if self.led_status & lsm_Disk.LED_STATUS_IDENT_ON == lsm_Disk.LED_STATUS_IDENT_ON: return "On" elif self.led_status & lsm_Disk.LED_STATUS_IDENT_OFF == lsm_Disk.LED_STATUS_IDENT_OFF: return "Off" elif self.led_status & lsm_Disk.LED_STATUS_IDENT_UNKNOWN == lsm_Disk.LED_STATUS_IDENT_UNKNOWN: return "Unknown" return "Unsupported" @property def led_fault_state(self): """Query a disks FAULT LED state to discover when it is On, Off or Unknown (str)""" if self.led_status == 1: return "Unsupported" if self.led_status & lsm_Disk.LED_STATUS_FAULT_ON == lsm_Disk.LED_STATUS_FAULT_ON: return "On" elif self.led_status & lsm_Disk.LED_STATUS_FAULT_OFF == lsm_Disk.LED_STATUS_FAULT_OFF: return "Off" elif self.led_status & lsm_Disk.LED_STATUS_FAULT_UNKNOWN == lsm_Disk.LED_STATUS_FAULT_UNKNOWN: return "Unknown" return "Unsupported" @property def led_ident_support(self): """Query the LED state to determine IDENT support: Unknown, Supported, Unsupported (str)""" if self.led_status == 1: return "Unknown" ident_states = ( lsm_Disk.LED_STATUS_IDENT_ON + lsm_Disk.LED_STATUS_IDENT_OFF + lsm_Disk.LED_STATUS_IDENT_UNKNOWN ) if (self.led_status & ident_states) == 0: return "Unsupported" return "Supported" @property def led_fault_support(self): """Query the LED state to determine FAULT support: Unknown, Supported, Unsupported (str)""" if self.led_status == 1: return "Unknown" fail_states = ( lsm_Disk.LED_STATUS_FAULT_ON + lsm_Disk.LED_STATUS_FAULT_OFF + lsm_Disk.LED_STATUS_FAULT_UNKNOWN ) if self.led_status & fail_states == 0: return "Unsupported" return "Supported" @property def health(self): """Determine the health of the disk from LSM : Unknown, Fail, Warn or Good (str)""" _health_int = self._query_lsm('health_status_get', self.dev_path) return health_map.get(_health_int, "Unknown") @property def transport(self): """Translate a disks link type to a human readable format (str)""" _link_type = self._query_lsm('link_type_get', self.dev_path) return transport_map.get(_link_type, "Unknown") @property def media_type(self): """Use the rpm value to determine the type of disk media: Flash or HDD (str)""" _rpm = self._query_lsm('rpm_get', self.dev_path) if _rpm is not None: if _rpm == 0: return "Flash" elif _rpm > 1: return "HDD" return "Unknown" def json_report(self): """Return the LSM related metadata for the current local disk (dict)""" if self.lsm_available: return { "serialNum": self._query_lsm('serial_num_get', self.dev_path) or "Unknown", "transport": self.transport, "mediaType": self.media_type, "rpm": self._query_lsm('rpm_get', self.dev_path) or "Unknown", "linkSpeed": self._query_lsm('link_speed_get', self.dev_path) or "Unknown", "health": self.health, "ledSupport": { "IDENTsupport": self.led_ident_support, "IDENTstatus": self.led_ident_state, "FAILsupport": self.led_fault_support, "FAILstatus": self.led_fault_state, }, "errors": list(self.error_list) } else: return {}
6,866
33.857868
102
py
null
ceph-main/src/ceph-volume/ceph_volume/util/prepare.py
""" These utilities for prepare provide all the pieces needed to prepare a device but also a compounded ("single call") helper to do them in order. Some plugins may want to change some part of the process, while others might want to consume the single-call helper """ import errno import os import logging import json import time from ceph_volume import process, conf, terminal from ceph_volume.util import system, constants, str_to_int, disk logger = logging.getLogger(__name__) mlogger = terminal.MultiLogger(__name__) def create_key(): stdout, stderr, returncode = process.call( ['ceph-authtool', '--gen-print-key'], show_command=True, logfile_verbose=False) if returncode != 0: raise RuntimeError('Unable to generate a new auth key') return ' '.join(stdout).strip() def write_keyring(osd_id, secret, keyring_name='keyring', name=None): """ Create a keyring file with the ``ceph-authtool`` utility. Constructs the path over well-known conventions for the OSD, and allows any other custom ``name`` to be set. :param osd_id: The ID for the OSD to be used :param secret: The key to be added as (as a string) :param name: Defaults to 'osd.{ID}' but can be used to add other client names, specifically for 'lockbox' type of keys :param keyring_name: Alternative keyring name, for supporting other types of keys like for lockbox """ osd_keyring = '/var/lib/ceph/osd/%s-%s/%s' % (conf.cluster, osd_id, keyring_name) name = name or 'osd.%s' % str(osd_id) mlogger.info(f'Creating keyring file for {name}') process.call( [ 'ceph-authtool', osd_keyring, '--create-keyring', '--name', name, '--add-key', secret ], logfile_verbose=False) system.chown(osd_keyring) def get_block_db_size(lv_format=True): """ Helper to retrieve the size (defined in megabytes in ceph.conf) to create the block.db logical volume, it "translates" the string into a float value, then converts that into gigabytes, and finally (optionally) it formats it back as a string so that it can be used for creating the LV. :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size would result in '5G', otherwise it will return a ``Size`` object. .. note: Configuration values are in bytes, unlike journals which are defined in gigabytes """ conf_db_size = None try: conf_db_size = conf.ceph.get_safe('osd', 'bluestore_block_db_size', None) except RuntimeError: logger.exception("failed to load ceph configuration, will use defaults") if not conf_db_size: logger.debug( 'block.db has no size configuration, will fallback to using as much as possible' ) # TODO better to return disk.Size(b=0) here return None logger.debug('bluestore_block_db_size set to %s' % conf_db_size) db_size = disk.Size(b=str_to_int(conf_db_size)) if db_size < disk.Size(gb=2): mlogger.error('Refusing to continue with configured size for block.db') raise RuntimeError('block.db sizes must be larger than 2GB, detected: %s' % db_size) if lv_format: return '%sG' % db_size.gb.as_int() return db_size def get_block_wal_size(lv_format=True): """ Helper to retrieve the size (defined in megabytes in ceph.conf) to create the block.wal logical volume, it "translates" the string into a float value, then converts that into gigabytes, and finally (optionally) it formats it back as a string so that it can be used for creating the LV. :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size would result in '5G', otherwise it will return a ``Size`` object. .. note: Configuration values are in bytes, unlike journals which are defined in gigabytes """ conf_wal_size = None try: conf_wal_size = conf.ceph.get_safe('osd', 'bluestore_block_wal_size', None) except RuntimeError: logger.exception("failed to load ceph configuration, will use defaults") if not conf_wal_size: logger.debug( 'block.wal has no size configuration, will fallback to using as much as possible' ) return None logger.debug('bluestore_block_wal_size set to %s' % conf_wal_size) wal_size = disk.Size(b=str_to_int(conf_wal_size)) if wal_size < disk.Size(gb=2): mlogger.error('Refusing to continue with configured size for block.wal') raise RuntimeError('block.wal sizes must be larger than 2GB, detected: %s' % wal_size) if lv_format: return '%sG' % wal_size.gb.as_int() return wal_size def create_id(fsid, json_secrets, osd_id=None): """ :param fsid: The osd fsid to create, always required :param json_secrets: a json-ready object with whatever secrets are wanted to be passed to the monitor :param osd_id: Reuse an existing ID from an OSD that's been destroyed, if the id does not exist in the cluster a new ID will be created """ bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster cmd = [ 'ceph', '--cluster', conf.cluster, '--name', 'client.bootstrap-osd', '--keyring', bootstrap_keyring, '-i', '-', 'osd', 'new', fsid ] if osd_id is not None: if osd_id_available(osd_id): cmd.append(osd_id) else: raise RuntimeError("The osd ID {} is already in use or does not exist.".format(osd_id)) stdout, stderr, returncode = process.call( cmd, stdin=json_secrets, show_command=True ) if returncode != 0: raise RuntimeError('Unable to create a new OSD id') return ' '.join(stdout).strip() def osd_id_available(osd_id): """ Checks to see if an osd ID exists and if it's available for reuse. Returns True if it is, False if it isn't. :param osd_id: The osd ID to check """ if osd_id is None: return False bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster stdout, stderr, returncode = process.call( [ 'ceph', '--cluster', conf.cluster, '--name', 'client.bootstrap-osd', '--keyring', bootstrap_keyring, 'osd', 'tree', '-f', 'json', ], show_command=True ) if returncode != 0: raise RuntimeError('Unable check if OSD id exists: %s' % osd_id) output = json.loads(''.join(stdout).strip()) osds = output['nodes'] osd = [osd for osd in osds if str(osd['id']) == str(osd_id)] if not osd or (osd and osd[0].get('status') == "destroyed"): return True return False def mount_tmpfs(path): process.run([ 'mount', '-t', 'tmpfs', 'tmpfs', path ]) # Restore SELinux context system.set_context(path) def create_osd_path(osd_id, tmpfs=False): path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) system.mkdir_p('/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)) if tmpfs: mount_tmpfs(path) def format_device(device): # only supports xfs command = ['mkfs', '-t', 'xfs'] # get the mkfs options if any for xfs, # fallback to the default options defined in constants.mkfs flags = conf.ceph.get_list( 'osd', 'osd_mkfs_options_xfs', default=constants.mkfs.get('xfs'), split=' ', ) # always force if '-f' not in flags: flags.insert(0, '-f') command.extend(flags) command.append(device) process.run(command) def _normalize_mount_flags(flags, extras=None): """ Mount flag options have to be a single string, separated by a comma. If the flags are separated by spaces, or with commas and spaces in ceph.conf, the mount options will be passed incorrectly. This will help when parsing ceph.conf values return something like:: ["rw,", "exec,"] Or:: [" rw ,", "exec"] :param flags: A list of flags, or a single string of mount flags :param extras: Extra set of mount flags, useful when custom devices like VDO need ad-hoc mount configurations """ # Instead of using set(), we append to this new list here, because set() # will create an arbitrary order on the items that is made worst when # testing with tools like tox that includes a randomizer seed. By # controlling the order, it is easier to correctly assert the expectation unique_flags = [] if isinstance(flags, list): if extras: flags.extend(extras) # ensure that spaces and commas are removed so that they can join # correctly, remove duplicates for f in flags: if f and f not in unique_flags: unique_flags.append(f.strip().strip(',')) return ','.join(unique_flags) # split them, clean them, and join them back again flags = flags.strip().split(' ') if extras: flags.extend(extras) # remove possible duplicates for f in flags: if f and f not in unique_flags: unique_flags.append(f.strip().strip(',')) flags = ','.join(unique_flags) # Before returning, split them again, since strings can be mashed up # together, preventing removal of duplicate entries return ','.join(set(flags.split(','))) def mount_osd(device, osd_id, **kw): extras = [] is_vdo = kw.get('is_vdo', '0') if is_vdo == '1': extras = ['discard'] destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) command = ['mount', '-t', 'xfs', '-o'] flags = conf.ceph.get_list( 'osd', 'osd_mount_options_xfs', default=constants.mount.get('xfs'), split=' ', ) command.append( _normalize_mount_flags(flags, extras=extras) ) command.append(device) command.append(destination) process.run(command) # Restore SELinux context system.set_context(destination) def _link_device(device, device_type, osd_id): """ Allow linking any device type in an OSD directory. ``device`` must the be source, with an absolute path and ``device_type`` will be the destination name, like 'journal', or 'block' """ device_path = '/var/lib/ceph/osd/%s-%s/%s' % ( conf.cluster, osd_id, device_type ) command = ['ln', '-s', device, device_path] system.chown(device) process.run(command) def _validate_bluestore_device(device, excepted_device_type, osd_uuid): """ Validate whether the given device is truly what it is supposed to be """ out, err, ret = process.call(['ceph-bluestore-tool', 'show-label', '--dev', device]) if err: terminal.error('ceph-bluestore-tool failed to run. %s'% err) raise SystemExit(1) if ret: terminal.error('no label on %s'% device) raise SystemExit(1) oj = json.loads(''.join(out)) if device not in oj: terminal.error('%s not in the output of ceph-bluestore-tool, buggy?'% device) raise SystemExit(1) current_device_type = oj[device]['description'] if current_device_type != excepted_device_type: terminal.error('%s is not a %s device but %s'% (device, excepted_device_type, current_device_type)) raise SystemExit(1) current_osd_uuid = oj[device]['osd_uuid'] if current_osd_uuid != osd_uuid: terminal.error('device %s is used by another osd %s as %s, should be %s'% (device, current_osd_uuid, current_device_type, osd_uuid)) raise SystemExit(1) def link_block(block_device, osd_id): _link_device(block_device, 'block', osd_id) def link_wal(wal_device, osd_id, osd_uuid=None): _validate_bluestore_device(wal_device, 'bluefs wal', osd_uuid) _link_device(wal_device, 'block.wal', osd_id) def link_db(db_device, osd_id, osd_uuid=None): _validate_bluestore_device(db_device, 'bluefs db', osd_uuid) _link_device(db_device, 'block.db', osd_id) def get_monmap(osd_id): """ Before creating the OSD files, a monmap needs to be retrieved so that it can be used to tell the monitor(s) about the new OSD. A call will look like:: ceph --cluster ceph --name client.bootstrap-osd \ --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring \ mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap """ path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id) bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster monmap_destination = os.path.join(path, 'activate.monmap') process.run([ 'ceph', '--cluster', conf.cluster, '--name', 'client.bootstrap-osd', '--keyring', bootstrap_keyring, 'mon', 'getmap', '-o', monmap_destination ]) def get_osdspec_affinity(): return os.environ.get('CEPH_VOLUME_OSDSPEC_AFFINITY', '') def osd_mkfs_bluestore(osd_id, fsid, keyring=None, wal=False, db=False): """ Create the files for the OSD to function. A normal call will look like: ceph-osd --cluster ceph --mkfs --mkkey -i 0 \ --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \ --osd-data /var/lib/ceph/osd/ceph-0 \ --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \ --keyring /var/lib/ceph/osd/ceph-0/keyring \ --setuser ceph --setgroup ceph In some cases it is required to use the keyring, when it is passed in as a keyword argument it is used as part of the ceph-osd command """ path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id) monmap = os.path.join(path, 'activate.monmap') system.chown(path) base_command = [ 'ceph-osd', '--cluster', conf.cluster, '--osd-objectstore', 'bluestore', '--mkfs', '-i', osd_id, '--monmap', monmap, ] supplementary_command = [ '--osd-data', path, '--osd-uuid', fsid, '--setuser', 'ceph', '--setgroup', 'ceph' ] if keyring is not None: base_command.extend(['--keyfile', '-']) if wal: base_command.extend( ['--bluestore-block-wal-path', wal] ) system.chown(wal) if db: base_command.extend( ['--bluestore-block-db-path', db] ) system.chown(db) if get_osdspec_affinity(): base_command.extend(['--osdspec-affinity', get_osdspec_affinity()]) command = base_command + supplementary_command """ When running in containers the --mkfs on raw device sometimes fails to acquire a lock through flock() on the device because systemd-udevd holds one temporarily. See KernelDevice.cc and _lock() to understand how ceph-osd acquires the lock. Because this is really transient, we retry up to 5 times and wait for 1 sec in-between """ for retry in range(5): _, _, returncode = process.call(command, stdin=keyring, terminal_verbose=True, show_command=True) if returncode == 0: break else: if returncode == errno.EWOULDBLOCK: time.sleep(1) logger.info('disk is held by another process, trying to mkfs again... (%s/5 attempt)' % retry) continue else: raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
15,676
33.006508
140
py
null
ceph-main/src/ceph-volume/ceph_volume/util/system.py
import errno import logging import os import pwd import platform import tempfile import uuid import subprocess import threading from ceph_volume import process, terminal from . import as_string # python2 has no FileNotFoundError try: FileNotFoundError except NameError: FileNotFoundError = OSError logger = logging.getLogger(__name__) mlogger = terminal.MultiLogger(__name__) # TODO: get these out of here and into a common area for others to consume if platform.system() == 'FreeBSD': FREEBSD = True DEFAULT_FS_TYPE = 'zfs' PROCDIR = '/compat/linux/proc' # FreeBSD does not have blockdevices any more BLOCKDIR = '/dev' ROOTGROUP = 'wheel' else: FREEBSD = False DEFAULT_FS_TYPE = 'xfs' PROCDIR = '/proc' BLOCKDIR = '/sys/block' ROOTGROUP = 'root' host_rootfs = '/rootfs' run_host_cmd = [ 'nsenter', '--mount={}/proc/1/ns/mnt'.format(host_rootfs), '--ipc={}/proc/1/ns/ipc'.format(host_rootfs), '--net={}/proc/1/ns/net'.format(host_rootfs), '--uts={}/proc/1/ns/uts'.format(host_rootfs) ] def generate_uuid(): return str(uuid.uuid4()) def find_executable_on_host(locations=[], executable='', binary_check='/bin/ls'): paths = ['{}/{}'.format(location, executable) for location in locations] command = [] command.extend(run_host_cmd + [binary_check] + paths) process = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, close_fds=True ) stdout = as_string(process.stdout.read()) if stdout: executable_on_host = stdout.split('\n')[0] logger.info('Executable {} found on the host, will use {}'.format(executable, executable_on_host)) return executable_on_host else: logger.warning('Executable {} not found on the host, will return {} as-is'.format(executable, executable)) return executable def which(executable, run_on_host=False): """find the location of an executable""" def _get_path(executable, locations): for location in locations: executable_path = os.path.join(location, executable) if os.path.exists(executable_path) and os.path.isfile(executable_path): return executable_path return None static_locations = ( '/usr/local/bin', '/bin', '/usr/bin', '/usr/local/sbin', '/usr/sbin', '/sbin', ) if not run_on_host: path = os.getenv('PATH', '') path_locations = path.split(':') exec_in_path = _get_path(executable, path_locations) if exec_in_path: return exec_in_path mlogger.warning('Executable {} not in PATH: {}'.format(executable, path)) exec_in_static_locations = _get_path(executable, static_locations) if exec_in_static_locations: mlogger.warning('Found executable under {}, please ensure $PATH is set correctly!'.format(exec_in_static_locations)) return exec_in_static_locations else: executable = find_executable_on_host(static_locations, executable) # At this point, either `find_executable_on_host()` found an executable on the host # or we fallback to just returning the argument as-is, to prevent a hard fail, and # hoping that the system might have the executable somewhere custom return executable def get_ceph_user_ids(): """ Return the id and gid of the ceph user """ try: user = pwd.getpwnam('ceph') except KeyError: # is this even possible? raise RuntimeError('"ceph" user is not available in the current system') return user[2], user[3] def get_file_contents(path, default=''): contents = default if not os.path.exists(path): return contents try: with open(path, 'r') as open_file: contents = open_file.read().strip() except Exception: logger.exception('Failed to read contents from: %s' % path) return contents def mkdir_p(path, chown=True): """ A `mkdir -p` that defaults to chown the path to the ceph user """ try: os.mkdir(path) except OSError as e: if e.errno == errno.EEXIST: pass else: raise if chown: uid, gid = get_ceph_user_ids() os.chown(path, uid, gid) def chown(path, recursive=True): """ ``chown`` a path to the ceph user (uid and guid fetched at runtime) """ uid, gid = get_ceph_user_ids() if os.path.islink(path): process.run(['chown', '-h', 'ceph:ceph', path]) path = os.path.realpath(path) if recursive: process.run(['chown', '-R', 'ceph:ceph', path]) else: os.chown(path, uid, gid) def is_binary(path): """ Detect if a file path is a binary or not. Will falsely report as binary when utf-16 encoded. In the ceph universe there is no such risk (yet) """ with open(path, 'rb') as fp: contents = fp.read(8192) if b'\x00' in contents: # a null byte may signal binary return True return False class tmp_mount(object): """ Temporarily mount a device on a temporary directory, and unmount it upon exit When ``encrypted`` is set to ``True``, the exit method will call out to close the device so that it doesn't remain open after mounting. It is assumed that it will be open because otherwise it wouldn't be possible to mount in the first place """ def __init__(self, device, encrypted=False): self.device = device self.path = None self.encrypted = encrypted def __enter__(self): self.path = tempfile.mkdtemp() process.run([ 'mount', '-v', self.device, self.path ]) return self.path def __exit__(self, exc_type, exc_val, exc_tb): process.run([ 'umount', '-v', self.path ]) if self.encrypted: # avoid a circular import from the encryption module from ceph_volume.util import encryption encryption.dmcrypt_close(self.device) def unmount_tmpfs(path): """ Removes the mount at the given path iff the path is a tmpfs mount point. Otherwise no action is taken. """ _out, _err, rc = process.call(['findmnt', '-t', 'tmpfs', '-M', path]) if rc != 0: logger.info('{} does not appear to be a tmpfs mount'.format(path)) else: logger.info('Unmounting tmpfs path at {}'.format( path)) unmount(path) def unmount(path): """ Removes mounts at the given path """ process.run([ 'umount', '-v', path, ]) def path_is_mounted(path, destination=None): """ Check if the given path is mounted """ m = Mounts(paths=True) mounts = m.get_mounts() realpath = os.path.realpath(path) mounted_locations = mounts.get(realpath, []) if destination: return destination in mounted_locations return mounted_locations != [] def device_is_mounted(dev, destination=None): """ Check if the given device is mounted, optionally validating that a destination exists """ plain_mounts = Mounts(devices=True) realpath_mounts = Mounts(devices=True, realpath=True) realpath_dev = os.path.realpath(dev) if dev.startswith('/') else dev destination = os.path.realpath(destination) if destination else None # plain mounts plain_dev_mounts = plain_mounts.get_mounts().get(dev, []) realpath_dev_mounts = plain_mounts.get_mounts().get(realpath_dev, []) # realpath mounts plain_dev_real_mounts = realpath_mounts.get_mounts().get(dev, []) realpath_dev_real_mounts = realpath_mounts.get_mounts().get(realpath_dev, []) mount_locations = [ plain_dev_mounts, realpath_dev_mounts, plain_dev_real_mounts, realpath_dev_real_mounts ] for mounts in mount_locations: if mounts: # we have a matching mount if destination: if destination in mounts: logger.info( '%s detected as mounted, exists at destination: %s', dev, destination ) return True else: logger.info('%s was found as mounted', dev) return True logger.info('%s was not found as mounted', dev) return False class Mounts(object): excluded_paths = [] def __init__(self, devices=False, paths=False, realpath=False): self.devices = devices self.paths = paths self.realpath = realpath def safe_realpath(self, path, timeout=0.2): def _realpath(path, result): p = os.path.realpath(path) result.append(p) result = [] t = threading.Thread(target=_realpath, args=(path, result)) t.setDaemon(True) t.start() t.join(timeout) if t.is_alive(): return None return result[0] def get_mounts(self): """ Create a mapping of all available system mounts so that other helpers can detect nicely what path or device is mounted It ignores (most of) non existing devices, but since some setups might need some extra device information, it will make an exception for: - tmpfs - devtmpfs - /dev/root If ``devices`` is set to ``True`` the mapping will be a device-to-path(s), if ``paths`` is set to ``True`` then the mapping will be a path-to-device(s) :param realpath: Resolve devices to use their realpaths. This is useful for paths like LVM where more than one path can point to the same device """ devices_mounted = {} paths_mounted = {} do_not_skip = ['tmpfs', 'devtmpfs', '/dev/root'] default_to_devices = self.devices is False and self.paths is False with open(PROCDIR + '/mounts', 'rb') as mounts: proc_mounts = mounts.readlines() for line in proc_mounts: fields = [as_string(f) for f in line.split()] if len(fields) < 3: continue if fields[0] in Mounts.excluded_paths or \ fields[1] in Mounts.excluded_paths: continue if self.realpath: if fields[0].startswith('/'): device = self.safe_realpath(fields[0]) if device is None: logger.warning(f"Can't get realpath on {fields[0]}, skipping.") Mounts.excluded_paths.append(fields[0]) continue else: device = fields[0] else: device = fields[0] path = self.safe_realpath(fields[1]) if path is None: logger.warning(f"Can't get realpath on {fields[1]}, skipping.") Mounts.excluded_paths.append(fields[1]) continue # only care about actual existing devices if not os.path.exists(device) or not device.startswith('/'): if device not in do_not_skip: continue if device in devices_mounted.keys(): devices_mounted[device].append(path) else: devices_mounted[device] = [path] if path in paths_mounted.keys(): paths_mounted[path].append(device) else: paths_mounted[path] = [device] # Default to returning information for devices if if self.devices is True or default_to_devices: return devices_mounted else: return paths_mounted def set_context(path, recursive=False): """ Calls ``restorecon`` to set the proper context on SELinux systems. Only if the ``restorecon`` executable is found anywhere in the path it will get called. If the ``CEPH_VOLUME_SKIP_RESTORECON`` environment variable is set to any of: "1", "true", "yes" the call will be skipped as well. Finally, if SELinux is not enabled, or not available in the system, ``restorecon`` will not be called. This is checked by calling out to the ``selinuxenabled`` executable. If that tool is not installed or returns a non-zero exit status then no further action is taken and this function will return. """ skip = os.environ.get('CEPH_VOLUME_SKIP_RESTORECON', '') if skip.lower() in ['1', 'true', 'yes']: logger.info( 'CEPH_VOLUME_SKIP_RESTORECON environ is set, will not call restorecon' ) return try: stdout, stderr, code = process.call(['selinuxenabled'], verbose_on_failure=False) except FileNotFoundError: logger.info('No SELinux found, skipping call to restorecon') return if code != 0: logger.info('SELinux is not enabled, will not call restorecon') return # restore selinux context to default policy values if which('restorecon').startswith('/'): if recursive: process.run(['restorecon', '-R', path]) else: process.run(['restorecon', path])
13,375
30.847619
128
py
null
ceph-main/src/ceph-volume/ceph_volume/util/templates.py
osd_header = """ {:-^100}""".format('') osd_component_titles = """ Type Path LV Size % of device""" osd_reused_id = """ OSD id {id_: <55}""" osd_component = """ {_type: <15} {path: <55} {size: <15} {percent:.2%}""" osd_encryption = """ encryption: {enc: <15}""" total_osds = """ Total OSDs: {total_osds} """ def filtered_devices(devices): string = """ Filtered Devices:""" for device, info in devices.items(): string += """ %s""" % device for reason in info['reasons']: string += """ %s""" % reason string += "\n" return string ssd_volume_group = """ Solid State VG: Targets: {target: <25} Total size: {total_lv_size: <25} Total LVs: {total_lvs: <25} Size per LV: {lv_size: <25} Devices: {block_db_devices} """
881
16.64
104
py
null
ceph-main/src/ceph-volume/plugin/zfs/setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """The setup script.""" from setuptools import setup, find_packages requirements = [ ] setup_requirements = [ ] setup( author="Willem Jan Withagen", author_email='[email protected]', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Environment :: Console', 'Intended Audience :: Information Technology', 'Intended Audience :: System Administrators', 'Operating System :: POSIX :: FreeBSD', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', ], description="Manage Ceph OSDs on ZFS pool/volume/filesystem", install_requires=requirements, license="BSD license", include_package_data=True, keywords='ceph-volume-zfs', name='ceph-volume-zfs', packages=find_packages(include=['ceph_volume_zfs']), setup_requires=setup_requirements, url='https://github.com/ceph/ceph/src/ceph-volume/plugin/zfs', version='0.1.0', zip_safe=False, entry_points = dict( ceph_volume_handlers = [ 'zfs = ceph_volume_zfs.zfs:ZFS', ], ), )
1,330
28.577778
66
py
null
ceph-main/src/ceph-volume/plugin/zfs/ceph_volume_zfs/__init__.py
# -*- coding: utf-8 -*- """Top-level package for Ceph volume on ZFS.""" __author__ = """Willem Jan Withagen""" __email__ = '[email protected]' import ceph_volume_zfs.zfs from collections import namedtuple sys_info = namedtuple('sys_info', ['devices']) sys_info.devices = dict()
281
19.142857
47
py
null
ceph-main/src/ceph-volume/plugin/zfs/ceph_volume_zfs/zfs.py
# -*- coding: utf-8 -*- from __future__ import print_function import argparse import os import sys import logging from textwrap import dedent from ceph_volume import log, conf, configuration from ceph_volume import exceptions from ceph_volume import terminal # The ceph-volume-zfs specific code import ceph_volume_zfs.zfs from ceph_volume_zfs import devices # from ceph_volume_zfs.util import device from ceph_volume_zfs.devices import zfs # the supported actions from ceph_volume_zfs.devices.zfs import inventory from ceph_volume_zfs.devices.zfs import prepare from ceph_volume_zfs.devices.zfs import zap if __name__ == '__main__': zfs.ZFS() class ZFS(object): # help info for subcommands help = "Use ZFS as the underlying technology for OSDs" # help info for the plugin help_menu = "Deploy OSDs with ZFS" _help = dedent(""" Use ZFS as the underlying technology for OSDs {sub_zfshelp} """) name = 'zfs' def __init__(self, argv=None, parse=True): self.zfs_mapper = { 'inventory': inventory.Inventory, 'prepare': prepare.Prepare, 'zap': zap.Zap, } if argv is None: self.argv = sys.argv else: self.argv = argv if parse: self.main(self.argv) def print_help(self, warning=False): return self._help.format( sub_zfshelp=terminal.subhelp(self.zfs_mapper) ) def get_environ_vars(self): environ_vars = [] for key, value in os.environ.items(): if key.startswith('CEPH_'): environ_vars.append("%s=%s" % (key, value)) if not environ_vars: return '' else: environ_vars.insert(0, '\nEnviron Variables:') return '\n'.join(environ_vars) def load_ceph_conf_path(self, cluster_name='ceph'): abspath = '/etc/ceph/%s.conf' % cluster_name conf.path = os.getenv('CEPH_CONF', abspath) conf.cluster = cluster_name def stat_ceph_conf(self): try: configuration.load(conf.path) return terminal.green(conf.path) except exceptions.ConfigurationError as error: return terminal.red(error) def load_log_path(self): conf.log_path = os.getenv('CEPH_VOLUME_LOG_PATH', '/var/log/ceph') def _get_split_args(self): subcommands = self.zfs_mapper.keys() slice_on_index = len(self.argv) pruned_args = self.argv for count, arg in enumerate(pruned_args): if arg in subcommands: slice_on_index = count break return pruned_args[:slice_on_index], pruned_args[slice_on_index:] def main(self, argv=None): if argv is None: return self.load_ceph_conf_path() # these need to be available for the help, which gets parsed super # early self.load_ceph_conf_path() self.load_log_path() main_args, subcommand_args = self._get_split_args() # no flags where passed in, return the help menu instead of waiting for # argparse which will end up complaning that there are no args if len(argv) < 1: print(self.print_help(warning=True)) return parser = argparse.ArgumentParser( prog='ceph-volume-zfs', formatter_class=argparse.RawDescriptionHelpFormatter, description=self.print_help(), ) parser.add_argument( '--cluster', default='ceph', help='Cluster name (defaults to "ceph")', ) parser.add_argument( '--log-level', default='debug', help='Change the file log level (defaults to debug)', ) parser.add_argument( '--log-path', default='/var/log/ceph/', help='Change the log path (defaults to /var/log/ceph)', ) args = parser.parse_args(main_args) conf.log_path = args.log_path if os.path.isdir(conf.log_path): conf.log_path = os.path.join(args.log_path, 'ceph-volume-zfs.log') log.setup() logger = logging.getLogger(__name__) logger.info("Running command: ceph-volume-zfs %s %s", " ".join(main_args), " ".join(subcommand_args)) # set all variables from args and load everything needed according to # them self.load_ceph_conf_path(cluster_name=args.cluster) try: conf.ceph = configuration.load(conf.path) except exceptions.ConfigurationError as error: # we warn only here, because it is possible that the configuration # file is not needed, or that it will be loaded by some other means # (like reading from zfs tags) logger.exception('ignoring inability to load ceph.conf') terminal.red(error) # dispatch to sub-commands terminal.dispatch(self.zfs_mapper, subcommand_args)
5,044
31.973856
79
py
null
ceph-main/src/ceph-volume/plugin/zfs/ceph_volume_zfs/api/__init__.py
""" Device API that can be shared among other implementations. """
67
16
58
py
null
ceph-main/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/__init__.py
# -*- coding: utf-8 -*- from . import zfs
42
13.333333
23
py
null
ceph-main/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/__init__.py
# -*- coding: utf-8 -*- import logging logger = logging.getLogger(__name__)
77
14.6
36
py
null
ceph-main/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/inventory.py
import argparse import json from textwrap import dedent # import ceph_volume.process from ceph_volume_zfs.util.disk import Disks class Inventory(object): help = 'Generate a list of available devices' def __init__(self, argv): self.argv = argv def format_report(self, inventory): if self.args.format == 'json': print(json.dumps(inventory.json_report())) elif self.args.format == 'json-pretty': print(json.dumps(inventory.json_report(), indent=4, sort_keys=True)) else: print(inventory.pretty_report()) def main(self): sub_command_help = dedent(""" Generate an inventory of available devices """) parser = argparse.ArgumentParser( prog='ceph-volume zfs inventory', description=sub_command_help, ) parser.add_argument( 'path', nargs='?', default=None, help=('Report on specific disk'), ) parser.add_argument( '--format', choices=['plain', 'json', 'json-pretty'], default='plain', help='Output format', ) self.args = parser.parse_args(self.argv) if self.args.path: self.format_report(Disks(self.args.path)) else: self.format_report(Disks())
1,363
25.745098
80
py
null
ceph-main/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/main.py
# vim: expandtab smarttab shiftwidth=4 softtabstop=4 import argparse from textwrap import dedent from ceph_volume import terminal from . import inventory from . import prepare from . import zap class ZFSDEV(object): help = 'Use ZFS to deploy OSDs' _help = dedent(""" Use ZFS to deploy OSDs {sub_help} """) def __init__(self, argv): self.argv = argv def print_help(self, sub_help): return self._help.format(sub_help=sub_help) def main(self): terminal.dispatch(self.mapper, self.argv) parser = argparse.ArgumentParser( prog='ceph-volume zfs', formatter_class=argparse.RawDescriptionHelpFormatter, description=self.print_help(terminal.subhelp(self.mapper)), ) parser.parse_args(self.argv) if len(self.argv) <= 1: return parser.print_help()
890
23.081081
71
py
null
ceph-main/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/prepare.py
import argparse from textwrap import dedent # from ceph_volume.util import arg_validators class Prepare(object): help = 'Prepare a device' def __init__(self, argv): self.argv = argv def main(self): sub_command_help = dedent(""" Prepare a device """) parser = argparse.ArgumentParser( prog='ceph-volume zfs prepare', description=sub_command_help, ) if len(self.argv) == 0 or len(self.argv) > 0: print("Prepare: Print Help") print(sub_command_help) return
581
21.384615
53
py
null
ceph-main/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/zap.py
import argparse from textwrap import dedent # from ceph_volume.util import arg_validators class Zap(object): help = 'Zap a device' def __init__(self, argv): self.argv = argv def main(self): sub_command_help = dedent(""" Zap a device """) parser = argparse.ArgumentParser( prog='ceph-volume zfs inventory', description=sub_command_help, ) parser.add_argument( 'devices', metavar='DEVICES', nargs='*', # type=arg_validators.ValidDevice(gpt_ok=True), default=[], help='Path to one or many lv (as vg/lv), partition (as /dev/sda1) or device (as /dev/sda)' ) if len(self.argv) == 0 or len(self.argv) > 0: print("Zap: Print Help") print(sub_command_help) return
871
23.914286
102
py
null
ceph-main/src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/__init__.py
# -*- coding: utf-8 -*-
24
11.5
23
py
null
ceph-main/src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/disk.py
import re from ceph_volume.util.disk import human_readable_size from ceph_volume import process from ceph_volume import sys_info report_template = """ /dev/{geomname:<16} {mediasize:<16} {rotational!s:<7} {descr}""" # {geomname:<25} {mediasize:<12} {rotational!s:<7} {mode!s:<9} {descr}""" def geom_disk_parser(block): """ Parses lines in 'geom disk list` output. Geom name: ada3 Providers: 1. Name: ada3 Mediasize: 40018599936 (37G) Sectorsize: 512 Stripesize: 4096 Stripeoffset: 0 Mode: r2w2e4 descr: Corsair CSSD-F40GB2 lunid: 5000000000000236 ident: 111465010000101800EC rotationrate: 0 fwsectors: 63 fwheads: 16 :param line: A string, with the full block for `geom disk list` """ pairs = block.split(';') parsed = {} for pair in pairs: if 'Providers' in pair: continue try: column, value = pair.split(':') except ValueError: continue # fixup column = re.sub("\s+", "", column) column= re.sub("^[0-9]+\.", "", column) value = value.strip() value = re.sub('\([0-9A-Z]+\)', '', value) parsed[column.lower()] = value return parsed def get_disk(diskname): """ Captures all available info from geom along with interesting metadata like sectors, size, vendor, solid/rotational, etc... Returns a dictionary, with all the geom fields as keys. """ command = ['/sbin/geom', 'disk', 'list', re.sub('/dev/', '', diskname)] out, err, rc = process.call(command) geom_block = "" for line in out: line.strip() geom_block += ";" + line disk = geom_disk_parser(geom_block) return disk def get_disks(): command = ['/sbin/geom', 'disk', 'status', '-s'] out, err, rc = process.call(command) disks = {} for path in out: dsk, rest1, rest2 = path.split() disk = get_disk(dsk) disks['/dev/'+dsk] = disk return disks class Disks(object): def __init__(self, path=None): if not sys_info.devices: sys_info.devices = get_disks() self.disks = {} for k in sys_info.devices: if path != None: if path in k: self.disks[k] = Disk(k) else: self.disks[k] = Disk(k) def pretty_report(self, all=True): output = [ report_template.format( geomname='Device Path', mediasize='Size', rotational='rotates', descr='Model name', mode='available', )] for disk in sorted(self.disks): output.append(self.disks[disk].report()) return ''.join(output) def json_report(self): output = [] for disk in sorted(self.disks): output.append(self.disks[disk].json_report()) return output class Disk(object): report_fields = [ 'rejected_reasons', 'available', 'path', 'sys_api', ] pretty_report_sys_fields = [ 'human_readable_size', 'model', 'removable', 'ro', 'rotational', 'sas_address', 'scheduler_mode', 'vendor', ] def __init__(self, path): self.abspath = path self.path = path self.reject_reasons = [] self.available = True self.sys_api = sys_info.devices.get(path) def report(self): return report_template.format( geomname=self.sys_api.get('geomname'), mediasize=human_readable_size(int(self.sys_api.get('mediasize'))), rotational=int(self.sys_api.get('rotationrate')) != 0, mode=self.sys_api.get('mode'), descr=self.sys_api.get('descr') ) def json_report(self): output = {k.strip('_'): v for k, v in vars(self).items()} return output
3,995
25.818792
78
py
null
ceph-main/src/cephadm/build.py
#!/usr/bin/python3 """Build cephadm from one or more files into a standalone executable. """ # TODO: If cephadm is being built and packaged within a format such as RPM # do we have to do anything special wrt passing in the version # of python to build with? Even with the intermediate cmake layer? import argparse import compileall import logging import os import pathlib import shutil import subprocess import tempfile import sys HAS_ZIPAPP = False try: import zipapp HAS_ZIPAPP = True except ImportError: pass log = logging.getLogger(__name__) _VALID_VERS_VARS = [ "CEPH_GIT_VER", "CEPH_GIT_NICE_VER", "CEPH_RELEASE", "CEPH_RELEASE_NAME", "CEPH_RELEASE_TYPE", ] def _reexec(python): """Switch to the selected version of python by exec'ing into the desired python path. Sets the _BUILD_PYTHON_SET env variable as a sentinel to indicate exec has been performed. """ env = os.environ.copy() env["_BUILD_PYTHON_SET"] = python os.execvpe(python, [python, __file__] + sys.argv[1:], env) def _did_rexec(): """Returns true if the process has already exec'ed into the desired python version. """ return bool(os.environ.get("_BUILD_PYTHON_SET", "")) def _build(dest, src, versioning_vars=None): """Build the binary.""" os.chdir(src) tempdir = pathlib.Path(tempfile.mkdtemp(suffix=".cephadm.build")) log.debug("working in %s", tempdir) try: if os.path.isfile("requirements.txt"): _install_deps(tempdir) log.info("Copying contents") # TODO: currently the only file relevant to a compiled cephadm is the # cephadm.py file. Once cephadm is broken up into multiple py files # (and possibly other libs from python-common, etc) we'll want some # sort organized structure to track what gets copied into the # dir to be zipped. For now we just have a simple call to copy # (and rename) the one file we care about. shutil.copy("cephadm.py", tempdir / "__main__.py") if versioning_vars: generate_version_file(versioning_vars, tempdir / "_version.py") _compile(dest, tempdir) finally: shutil.rmtree(tempdir) def _compile(dest, tempdir): """Compile the zipapp.""" log.info("Byte-compiling py to pyc") compileall.compile_dir( tempdir, maxlevels=16, legacy=True, quiet=1, workers=0, ) # TODO we could explicitly pass a python version here log.info("Constructing the zipapp file") try: zipapp.create_archive( source=tempdir, target=dest, interpreter=sys.executable, compressed=True, ) log.info("Zipapp created with compression") except TypeError: # automatically fall back to uncompressed zipapp.create_archive( source=tempdir, target=dest, interpreter=sys.executable, ) log.info("Zipapp created without compression") def _install_deps(tempdir): """Install dependencies with pip.""" # TODO we could explicitly pass a python version here log.info("Installing dependencies") # apparently pip doesn't have an API, just a cli. subprocess.check_call( [ sys.executable, "-m", "pip", "install", "--requirement", "requirements.txt", "--target", tempdir, ] ) def generate_version_file(versioning_vars, dest): log.info("Generating version file") log.debug("versioning_vars=%r", versioning_vars) with open(dest, "w") as fh: print("# GENERATED FILE -- do not edit", file=fh) for key, value in versioning_vars: print(f"{key} = {value!r}", file=fh) def version_kv_pair(value): if "=" not in value: raise argparse.ArgumentTypeError(f"not a key=value pair: {value!r}") key, value = value.split("=", 1) if key not in _VALID_VERS_VARS: raise argparse.ArgumentTypeError(f"Unexpected key: {key!r}") return key, value def main(): handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter("cephadm/build.py: %(message)s")) log.addHandler(handler) log.setLevel(logging.INFO) log.debug("argv: %r", sys.argv) parser = argparse.ArgumentParser() parser.add_argument( "dest", help="Destination path name for new cephadm binary" ) parser.add_argument( "--source", help="Directory containing cephadm sources" ) parser.add_argument( "--python", help="The path to the desired version of python" ) parser.add_argument( "--set-version-var", "-S", type=version_kv_pair, dest="version_vars", action="append", help="Set a key=value pair in the generated version info file", ) args = parser.parse_args() if not _did_rexec() and args.python: _reexec(args.python) log.info( "Python Version: {v.major}.{v.minor}.{v.micro}".format( v=sys.version_info ) ) log.info("Args: %s", vars(args)) if not HAS_ZIPAPP: # Unconditionally display an error that the version of python # lacks zipapp (probably too old). print("error: zipapp module not found", file=sys.stderr) print( "(zipapp is available in Python 3.5 or later." " are you using a new enough version?)", file=sys.stderr, ) sys.exit(2) if args.source: source = pathlib.Path(args.source).absolute() else: source = pathlib.Path(__file__).absolute().parent dest = pathlib.Path(args.dest).absolute() log.info("Source Dir: %s", source) log.info("Destination Path: %s", dest) _build(dest, source, versioning_vars=args.version_vars) if __name__ == "__main__": main()
5,958
28.068293
78
py
null
ceph-main/src/cephadm/build.sh
#!/bin/bash -ex SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" exec python3 $SCRIPT_DIR/build.py "$@"
120
19.166667
62
sh
null
ceph-main/src/cephadm/cephadm.py
#!/usr/bin/python3 import asyncio import asyncio.subprocess import argparse import datetime import fcntl import ipaddress import io import json import logging from logging.config import dictConfig import os import platform import pwd import random import shlex import shutil import socket import string import subprocess import sys import tempfile import time import errno import struct import ssl from enum import Enum from typing import Dict, List, Tuple, Optional, Union, Any, NoReturn, Callable, IO, Sequence, TypeVar, cast, Set, Iterable, TextIO, Generator import re import uuid from configparser import ConfigParser from contextlib import redirect_stdout, contextmanager from functools import wraps from glob import glob from io import StringIO from threading import Thread, Event from urllib.error import HTTPError, URLError from urllib.request import urlopen, Request from pathlib import Path FuncT = TypeVar('FuncT', bound=Callable) # Default container images ----------------------------------------------------- DEFAULT_IMAGE = 'quay.ceph.io/ceph-ci/ceph:main' DEFAULT_IMAGE_IS_MAIN = True DEFAULT_IMAGE_RELEASE = 'reef' DEFAULT_PROMETHEUS_IMAGE = 'quay.io/prometheus/prometheus:v2.43.0' DEFAULT_LOKI_IMAGE = 'docker.io/grafana/loki:2.4.0' DEFAULT_PROMTAIL_IMAGE = 'docker.io/grafana/promtail:2.4.0' DEFAULT_NODE_EXPORTER_IMAGE = 'quay.io/prometheus/node-exporter:v1.5.0' DEFAULT_ALERT_MANAGER_IMAGE = 'quay.io/prometheus/alertmanager:v0.25.0' DEFAULT_GRAFANA_IMAGE = 'quay.io/ceph/ceph-grafana:9.4.7' DEFAULT_HAPROXY_IMAGE = 'quay.io/ceph/haproxy:2.3' DEFAULT_KEEPALIVED_IMAGE = 'quay.io/ceph/keepalived:2.2.4' DEFAULT_SNMP_GATEWAY_IMAGE = 'docker.io/maxwo/snmp-notifier:v1.2.1' DEFAULT_ELASTICSEARCH_IMAGE = 'quay.io/omrizeneva/elasticsearch:6.8.23' DEFAULT_JAEGER_COLLECTOR_IMAGE = 'quay.io/jaegertracing/jaeger-collector:1.29' DEFAULT_JAEGER_AGENT_IMAGE = 'quay.io/jaegertracing/jaeger-agent:1.29' DEFAULT_JAEGER_QUERY_IMAGE = 'quay.io/jaegertracing/jaeger-query:1.29' DEFAULT_REGISTRY = 'docker.io' # normalize unqualified digests to this # ------------------------------------------------------------------------------ LATEST_STABLE_RELEASE = 'quincy' DATA_DIR = '/var/lib/ceph' LOG_DIR = '/var/log/ceph' LOCK_DIR = '/run/cephadm' LOGROTATE_DIR = '/etc/logrotate.d' SYSCTL_DIR = '/etc/sysctl.d' UNIT_DIR = '/etc/systemd/system' CEPH_CONF_DIR = 'config' CEPH_CONF = 'ceph.conf' CEPH_PUBKEY = 'ceph.pub' CEPH_KEYRING = 'ceph.client.admin.keyring' CEPH_DEFAULT_CONF = f'/etc/ceph/{CEPH_CONF}' CEPH_DEFAULT_KEYRING = f'/etc/ceph/{CEPH_KEYRING}' CEPH_DEFAULT_PUBKEY = f'/etc/ceph/{CEPH_PUBKEY}' LOG_DIR_MODE = 0o770 DATA_DIR_MODE = 0o700 DEFAULT_MODE = 0o600 CONTAINER_INIT = True MIN_PODMAN_VERSION = (2, 0, 2) CGROUPS_SPLIT_PODMAN_VERSION = (2, 1, 0) PIDS_LIMIT_UNLIMITED_PODMAN_VERSION = (3, 4, 1) CUSTOM_PS1 = r'[ceph: \u@\h \W]\$ ' DEFAULT_TIMEOUT = None # in seconds DEFAULT_RETRY = 15 DATEFMT = '%Y-%m-%dT%H:%M:%S.%fZ' QUIET_LOG_LEVEL = 9 # DEBUG is 10, so using 9 to be lower level than DEBUG NO_DEPRECATED = False logger: logging.Logger = None # type: ignore """ You can invoke cephadm in two ways: 1. The normal way, at the command line. 2. By piping the script to the python3 binary. In this latter case, you should prepend one or more lines to the beginning of the script. For arguments, injected_argv = [...] e.g., injected_argv = ['ls'] For reading stdin from the '--config-json -' argument, injected_stdin = '...' """ cached_stdin = None ################################## async def run_func(func: Callable, cmd: str) -> subprocess.CompletedProcess: logger.debug(f'running function {func.__name__}, with parms: {cmd}') response = func(cmd) return response async def concurrent_tasks(func: Callable, cmd_list: List[str]) -> List[Any]: tasks = [] for cmd in cmd_list: tasks.append(run_func(func, cmd)) data = await asyncio.gather(*tasks) return data class EndPoint: """EndPoint representing an ip:port format""" def __init__(self, ip: str, port: int) -> None: self.ip = ip self.port = port def __str__(self) -> str: return f'{self.ip}:{self.port}' def __repr__(self) -> str: return f'{self.ip}:{self.port}' class ContainerInfo: def __init__(self, container_id: str, image_name: str, image_id: str, start: str, version: str) -> None: self.container_id = container_id self.image_name = image_name self.image_id = image_id self.start = start self.version = version def __eq__(self, other: Any) -> bool: if not isinstance(other, ContainerInfo): return NotImplemented return (self.container_id == other.container_id and self.image_name == other.image_name and self.image_id == other.image_id and self.start == other.start and self.version == other.version) class DeploymentType(Enum): # Fresh deployment of a daemon. DEFAULT = 'Deploy' # Redeploying a daemon. Works the same as fresh # deployment minus port checking. REDEPLOY = 'Redeploy' # Reconfiguring a daemon. Rewrites config # files and potentially restarts daemon. RECONFIG = 'Reconfig' class BaseConfig: def __init__(self) -> None: self.image: str = '' self.docker: bool = False self.data_dir: str = DATA_DIR self.log_dir: str = LOG_DIR self.logrotate_dir: str = LOGROTATE_DIR self.sysctl_dir: str = SYSCTL_DIR self.unit_dir: str = UNIT_DIR self.verbose: bool = False self.timeout: Optional[int] = DEFAULT_TIMEOUT self.retry: int = DEFAULT_RETRY self.env: List[str] = [] self.memory_request: Optional[int] = None self.memory_limit: Optional[int] = None self.log_to_journald: Optional[bool] = None self.container_init: bool = CONTAINER_INIT self.container_engine: Optional[ContainerEngine] = None def set_from_args(self, args: argparse.Namespace) -> None: argdict: Dict[str, Any] = vars(args) for k, v in argdict.items(): if hasattr(self, k): setattr(self, k, v) class CephadmContext: def __init__(self) -> None: self.__dict__['_args'] = None self.__dict__['_conf'] = BaseConfig() def set_args(self, args: argparse.Namespace) -> None: self._conf.set_from_args(args) self._args = args def has_function(self) -> bool: return 'func' in self._args def __contains__(self, name: str) -> bool: return hasattr(self, name) def __getattr__(self, name: str) -> Any: if '_conf' in self.__dict__ and hasattr(self._conf, name): return getattr(self._conf, name) elif '_args' in self.__dict__ and hasattr(self._args, name): return getattr(self._args, name) else: return super().__getattribute__(name) def __setattr__(self, name: str, value: Any) -> None: if hasattr(self._conf, name): setattr(self._conf, name, value) elif hasattr(self._args, name): setattr(self._args, name, value) else: super().__setattr__(name, value) class ContainerEngine: def __init__(self) -> None: self.path = find_program(self.EXE) @property def EXE(self) -> str: raise NotImplementedError() def __str__(self) -> str: return f'{self.EXE} ({self.path})' class Podman(ContainerEngine): EXE = 'podman' def __init__(self) -> None: super().__init__() self._version: Optional[Tuple[int, ...]] = None @property def version(self) -> Tuple[int, ...]: if self._version is None: raise RuntimeError('Please call `get_version` first') return self._version def get_version(self, ctx: CephadmContext) -> None: out, _, _ = call_throws(ctx, [self.path, 'version', '--format', '{{.Client.Version}}'], verbosity=CallVerbosity.QUIET) self._version = _parse_podman_version(out) def __str__(self) -> str: version = '.'.join(map(str, self.version)) return f'{self.EXE} ({self.path}) version {version}' class Docker(ContainerEngine): EXE = 'docker' CONTAINER_PREFERENCE = (Podman, Docker) # prefer podman to docker # During normal cephadm operations (cephadm ls, gather-facts, etc ) we use: # stdout: for JSON output only # stderr: for error, debug, info, etc logging_config = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'cephadm': { 'format': '%(asctime)s %(thread)x %(levelname)s %(message)s' }, }, 'handlers': { 'console': { 'level': 'INFO', 'class': 'logging.StreamHandler', }, 'log_file': { 'level': 'DEBUG', 'class': 'logging.handlers.WatchedFileHandler', 'formatter': 'cephadm', 'filename': '%s/cephadm.log' % LOG_DIR, } }, 'loggers': { '': { 'level': 'DEBUG', 'handlers': ['console', 'log_file'], } } } class ExcludeErrorsFilter(logging.Filter): def filter(self, record: logging.LogRecord) -> bool: """Only lets through log messages with log level below WARNING .""" return record.levelno < logging.WARNING # When cephadm is used as standard binary (bootstrap, rm-cluster, etc) we use: # stdout: for debug and info # stderr: for errors and warnings interactive_logging_config = { 'version': 1, 'filters': { 'exclude_errors': { '()': ExcludeErrorsFilter } }, 'disable_existing_loggers': True, 'formatters': { 'cephadm': { 'format': '%(asctime)s %(thread)x %(levelname)s %(message)s' }, }, 'handlers': { 'console_stdout': { 'level': 'INFO', 'class': 'logging.StreamHandler', 'filters': ['exclude_errors'], 'stream': sys.stdout }, 'console_stderr': { 'level': 'WARNING', 'class': 'logging.StreamHandler', 'stream': sys.stderr }, 'log_file': { 'level': 'DEBUG', 'class': 'logging.handlers.WatchedFileHandler', 'formatter': 'cephadm', 'filename': '%s/cephadm.log' % LOG_DIR, } }, 'loggers': { '': { 'level': 'DEBUG', 'handlers': ['console_stdout', 'console_stderr', 'log_file'], } } } class termcolor: yellow = '\033[93m' red = '\033[31m' end = '\033[0m' class Error(Exception): pass class ClusterAlreadyExists(Exception): pass class TimeoutExpired(Error): pass class UnauthorizedRegistryError(Error): pass ################################## class Ceph(object): daemons = ('mon', 'mgr', 'osd', 'mds', 'rgw', 'rbd-mirror', 'crash', 'cephfs-mirror', 'ceph-exporter') gateways = ('iscsi', 'nfs') ################################## class OSD(object): @staticmethod def get_sysctl_settings() -> List[str]: return [ '# allow a large number of OSDs', 'fs.aio-max-nr = 1048576', 'kernel.pid_max = 4194304', ] ################################## class SNMPGateway: """Defines an SNMP gateway between Prometheus and SNMP monitoring Frameworks""" daemon_type = 'snmp-gateway' SUPPORTED_VERSIONS = ['V2c', 'V3'] default_image = DEFAULT_SNMP_GATEWAY_IMAGE DEFAULT_PORT = 9464 env_filename = 'snmp-gateway.conf' def __init__(self, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str], config_json: Dict[str, Any], image: Optional[str] = None) -> None: self.ctx = ctx self.fsid = fsid self.daemon_id = daemon_id self.image = image or SNMPGateway.default_image self.uid = config_json.get('uid', 0) self.gid = config_json.get('gid', 0) self.destination = config_json.get('destination', '') self.snmp_version = config_json.get('snmp_version', 'V2c') self.snmp_community = config_json.get('snmp_community', 'public') self.log_level = config_json.get('log_level', 'info') self.snmp_v3_auth_username = config_json.get('snmp_v3_auth_username', '') self.snmp_v3_auth_password = config_json.get('snmp_v3_auth_password', '') self.snmp_v3_auth_protocol = config_json.get('snmp_v3_auth_protocol', '') self.snmp_v3_priv_protocol = config_json.get('snmp_v3_priv_protocol', '') self.snmp_v3_priv_password = config_json.get('snmp_v3_priv_password', '') self.snmp_v3_engine_id = config_json.get('snmp_v3_engine_id', '') self.validate() @classmethod def init(cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]) -> 'SNMPGateway': cfgs = fetch_configs(ctx) assert cfgs # assert some config data was found return cls(ctx, fsid, daemon_id, cfgs, ctx.image) @staticmethod def get_version(ctx: CephadmContext, fsid: str, daemon_id: str) -> Optional[str]: """Return the version of the notifier from it's http endpoint""" path = os.path.join(ctx.data_dir, fsid, f'snmp-gateway.{daemon_id}', 'unit.meta') try: with open(path, 'r') as env: metadata = json.loads(env.read()) except (OSError, json.JSONDecodeError): return None ports = metadata.get('ports', []) if not ports: return None try: with urlopen(f'http://127.0.0.1:{ports[0]}/') as r: html = r.read().decode('utf-8').split('\n') except (HTTPError, URLError): return None for h in html: stripped = h.strip() if stripped.startswith(('<pre>', '<PRE>')) and \ stripped.endswith(('</pre>', '</PRE>')): # <pre>(version=1.2.1, branch=HEAD, revision=7... return stripped.split(',')[0].split('version=')[1] return None @property def port(self) -> int: ports = fetch_tcp_ports(self.ctx) if not ports: return self.DEFAULT_PORT return ports[0] def get_daemon_args(self) -> List[str]: v3_args = [] base_args = [ f'--web.listen-address=:{self.port}', f'--snmp.destination={self.destination}', f'--snmp.version={self.snmp_version}', f'--log.level={self.log_level}', '--snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl' ] if self.snmp_version == 'V3': # common auth settings v3_args.extend([ '--snmp.authentication-enabled', f'--snmp.authentication-protocol={self.snmp_v3_auth_protocol}', f'--snmp.security-engine-id={self.snmp_v3_engine_id}' ]) # authPriv setting is applied if we have a privacy protocol setting if self.snmp_v3_priv_protocol: v3_args.extend([ '--snmp.private-enabled', f'--snmp.private-protocol={self.snmp_v3_priv_protocol}' ]) return base_args + v3_args @property def data_dir(self) -> str: return os.path.join(self.ctx.data_dir, self.ctx.fsid, f'{self.daemon_type}.{self.daemon_id}') @property def conf_file_path(self) -> str: return os.path.join(self.data_dir, self.env_filename) def create_daemon_conf(self) -> None: """Creates the environment file holding 'secrets' passed to the snmp-notifier daemon""" with write_new(self.conf_file_path) as f: if self.snmp_version == 'V2c': f.write(f'SNMP_NOTIFIER_COMMUNITY={self.snmp_community}\n') else: f.write(f'SNMP_NOTIFIER_AUTH_USERNAME={self.snmp_v3_auth_username}\n') f.write(f'SNMP_NOTIFIER_AUTH_PASSWORD={self.snmp_v3_auth_password}\n') if self.snmp_v3_priv_password: f.write(f'SNMP_NOTIFIER_PRIV_PASSWORD={self.snmp_v3_priv_password}\n') def validate(self) -> None: """Validate the settings Raises: Error: if the fsid doesn't look like an fsid Error: if the snmp version is not supported Error: destination IP and port address missing """ if not is_fsid(self.fsid): raise Error(f'not a valid fsid: {self.fsid}') if self.snmp_version not in SNMPGateway.SUPPORTED_VERSIONS: raise Error(f'not a valid snmp version: {self.snmp_version}') if not self.destination: raise Error('config is missing destination attribute(<ip>:<port>) of the target SNMP listener') ################################## class Monitoring(object): """Define the configs for the monitoring containers""" port_map = { 'prometheus': [9095], # Avoid default 9090, due to conflict with cockpit UI 'node-exporter': [9100], 'grafana': [3000], 'alertmanager': [9093, 9094], 'loki': [3100], 'promtail': [9080] } components = { 'prometheus': { 'image': DEFAULT_PROMETHEUS_IMAGE, 'cpus': '2', 'memory': '4GB', 'args': [ '--config.file=/etc/prometheus/prometheus.yml', '--storage.tsdb.path=/prometheus', ], 'config-json-files': [ 'prometheus.yml', ], }, 'loki': { 'image': DEFAULT_LOKI_IMAGE, 'cpus': '1', 'memory': '1GB', 'args': [ '--config.file=/etc/loki/loki.yml', ], 'config-json-files': [ 'loki.yml' ], }, 'promtail': { 'image': DEFAULT_PROMTAIL_IMAGE, 'cpus': '1', 'memory': '1GB', 'args': [ '--config.file=/etc/promtail/promtail.yml', ], 'config-json-files': [ 'promtail.yml', ], }, 'node-exporter': { 'image': DEFAULT_NODE_EXPORTER_IMAGE, 'cpus': '1', 'memory': '1GB', 'args': [ '--no-collector.timex' ], }, 'grafana': { 'image': DEFAULT_GRAFANA_IMAGE, 'cpus': '2', 'memory': '4GB', 'args': [], 'config-json-files': [ 'grafana.ini', 'provisioning/datasources/ceph-dashboard.yml', 'certs/cert_file', 'certs/cert_key', ], }, 'alertmanager': { 'image': DEFAULT_ALERT_MANAGER_IMAGE, 'cpus': '2', 'memory': '2GB', 'args': [ '--cluster.listen-address=:{}'.format(port_map['alertmanager'][1]), ], 'config-json-files': [ 'alertmanager.yml', ], 'config-json-args': [ 'peers', ], }, } # type: ignore @staticmethod def get_version(ctx, container_id, daemon_type): # type: (CephadmContext, str, str) -> str """ :param: daemon_type Either "prometheus", "alertmanager", "loki", "promtail" or "node-exporter" """ assert daemon_type in ('prometheus', 'alertmanager', 'node-exporter', 'loki', 'promtail') cmd = daemon_type.replace('-', '_') code = -1 err = '' out = '' version = '' if daemon_type == 'alertmanager': for cmd in ['alertmanager', 'prometheus-alertmanager']: out, err, code = call(ctx, [ ctx.container_engine.path, 'exec', container_id, cmd, '--version' ], verbosity=CallVerbosity.QUIET) if code == 0: break cmd = 'alertmanager' # reset cmd for version extraction else: out, err, code = call(ctx, [ ctx.container_engine.path, 'exec', container_id, cmd, '--version' ], verbosity=CallVerbosity.QUIET) if code == 0: if err.startswith('%s, version ' % cmd): version = err.split(' ')[2] elif out.startswith('%s, version ' % cmd): version = out.split(' ')[2] return version ################################## @contextmanager def write_new( destination: Union[str, Path], *, owner: Optional[Tuple[int, int]] = None, perms: Optional[int] = DEFAULT_MODE, encoding: Optional[str] = None, ) -> Generator[IO, None, None]: """Write a new file in a robust manner, optionally specifying the owner, permissions, or encoding. This function takes care to never leave a file in a partially-written state due to a crash or power outage by writing to temporary file and then renaming that temp file over to the final destination once all data is written. Note that the temporary files can be leaked but only for a "crash" or power outage - regular exceptions will clean up the temporary file. """ destination = os.path.abspath(destination) tempname = f'{destination}.new' open_kwargs: Dict[str, Any] = {} if encoding: open_kwargs['encoding'] = encoding try: with open(tempname, 'w', **open_kwargs) as fh: yield fh fh.flush() os.fsync(fh.fileno()) if owner is not None: os.fchown(fh.fileno(), *owner) if perms is not None: os.fchmod(fh.fileno(), perms) except Exception: os.unlink(tempname) raise os.rename(tempname, destination) def populate_files(config_dir, config_files, uid, gid): # type: (str, Dict, int, int) -> None """create config files for different services""" for fname in config_files: config_file = os.path.join(config_dir, fname) config_content = dict_get_join(config_files, fname) logger.info('Write file: %s' % (config_file)) with write_new(config_file, owner=(uid, gid), encoding='utf-8') as f: f.write(config_content) class NFSGanesha(object): """Defines a NFS-Ganesha container""" daemon_type = 'nfs' entrypoint = '/usr/bin/ganesha.nfsd' daemon_args = ['-F', '-L', 'STDERR'] required_files = ['ganesha.conf'] port_map = { 'nfs': 2049, } def __init__(self, ctx, fsid, daemon_id, config_json, image=DEFAULT_IMAGE): # type: (CephadmContext, str, Union[int, str], Dict, str) -> None self.ctx = ctx self.fsid = fsid self.daemon_id = daemon_id self.image = image # config-json options self.pool = dict_get(config_json, 'pool', require=True) self.namespace = dict_get(config_json, 'namespace') self.userid = dict_get(config_json, 'userid') self.extra_args = dict_get(config_json, 'extra_args', []) self.files = dict_get(config_json, 'files', {}) self.rgw = dict_get(config_json, 'rgw', {}) # validate the supplied args self.validate() @classmethod def init(cls, ctx, fsid, daemon_id): # type: (CephadmContext, str, Union[int, str]) -> NFSGanesha return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image) def get_container_mounts(self, data_dir): # type: (str) -> Dict[str, str] mounts = dict() mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z' mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z' mounts[os.path.join(data_dir, 'etc/ganesha')] = '/etc/ganesha:z' if self.rgw: cluster = self.rgw.get('cluster', 'ceph') rgw_user = self.rgw.get('user', 'admin') mounts[os.path.join(data_dir, 'keyring.rgw')] = \ '/var/lib/ceph/radosgw/%s-%s/keyring:z' % (cluster, rgw_user) return mounts @staticmethod def get_container_envs(): # type: () -> List[str] envs = [ 'CEPH_CONF=%s' % (CEPH_DEFAULT_CONF) ] return envs @staticmethod def get_version(ctx, container_id): # type: (CephadmContext, str) -> Optional[str] version = None out, err, code = call(ctx, [ctx.container_engine.path, 'exec', container_id, NFSGanesha.entrypoint, '-v'], verbosity=CallVerbosity.QUIET) if code == 0: match = re.search(r'NFS-Ganesha Release\s*=\s*[V]*([\d.]+)', out) if match: version = match.group(1) return version def validate(self): # type: () -> None if not is_fsid(self.fsid): raise Error('not an fsid: %s' % self.fsid) if not self.daemon_id: raise Error('invalid daemon_id: %s' % self.daemon_id) if not self.image: raise Error('invalid image: %s' % self.image) # check for the required files if self.required_files: for fname in self.required_files: if fname not in self.files: raise Error('required file missing from config-json: %s' % fname) # check for an RGW config if self.rgw: if not self.rgw.get('keyring'): raise Error('RGW keyring is missing') if not self.rgw.get('user'): raise Error('RGW user is missing') def get_daemon_name(self): # type: () -> str return '%s.%s' % (self.daemon_type, self.daemon_id) def get_container_name(self, desc=None): # type: (Optional[str]) -> str cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name()) if desc: cname = '%s-%s' % (cname, desc) return cname def get_daemon_args(self): # type: () -> List[str] return self.daemon_args + self.extra_args def create_daemon_dirs(self, data_dir, uid, gid): # type: (str, int, int) -> None """Create files under the container data dir""" if not os.path.isdir(data_dir): raise OSError('data_dir is not a directory: %s' % (data_dir)) logger.info('Creating ganesha config...') # create the ganesha conf dir config_dir = os.path.join(data_dir, 'etc/ganesha') makedirs(config_dir, uid, gid, 0o755) # populate files from the config-json populate_files(config_dir, self.files, uid, gid) # write the RGW keyring if self.rgw: keyring_path = os.path.join(data_dir, 'keyring.rgw') with write_new(keyring_path, owner=(uid, gid)) as f: f.write(self.rgw.get('keyring', '')) ################################## class CephIscsi(object): """Defines a Ceph-Iscsi container""" daemon_type = 'iscsi' entrypoint = '/usr/bin/rbd-target-api' required_files = ['iscsi-gateway.cfg'] def __init__(self, ctx, fsid, daemon_id, config_json, image=DEFAULT_IMAGE): # type: (CephadmContext, str, Union[int, str], Dict, str) -> None self.ctx = ctx self.fsid = fsid self.daemon_id = daemon_id self.image = image # config-json options self.files = dict_get(config_json, 'files', {}) # validate the supplied args self.validate() @classmethod def init(cls, ctx, fsid, daemon_id): # type: (CephadmContext, str, Union[int, str]) -> CephIscsi return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image) @staticmethod def get_container_mounts(data_dir, log_dir): # type: (str, str) -> Dict[str, str] mounts = dict() mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z' mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z' mounts[os.path.join(data_dir, 'iscsi-gateway.cfg')] = '/etc/ceph/iscsi-gateway.cfg:z' mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config' mounts[log_dir] = '/var/log:z' mounts['/dev'] = '/dev' return mounts @staticmethod def get_container_binds(): # type: () -> List[List[str]] binds = [] lib_modules = ['type=bind', 'source=/lib/modules', 'destination=/lib/modules', 'ro=true'] binds.append(lib_modules) return binds @staticmethod def get_version(ctx, container_id): # type: (CephadmContext, str) -> Optional[str] version = None out, err, code = call(ctx, [ctx.container_engine.path, 'exec', container_id, '/usr/bin/python3', '-c', "import pkg_resources; print(pkg_resources.require('ceph_iscsi')[0].version)"], verbosity=CallVerbosity.QUIET) if code == 0: version = out.strip() return version def validate(self): # type: () -> None if not is_fsid(self.fsid): raise Error('not an fsid: %s' % self.fsid) if not self.daemon_id: raise Error('invalid daemon_id: %s' % self.daemon_id) if not self.image: raise Error('invalid image: %s' % self.image) # check for the required files if self.required_files: for fname in self.required_files: if fname not in self.files: raise Error('required file missing from config-json: %s' % fname) def get_daemon_name(self): # type: () -> str return '%s.%s' % (self.daemon_type, self.daemon_id) def get_container_name(self, desc=None): # type: (Optional[str]) -> str cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name()) if desc: cname = '%s-%s' % (cname, desc) return cname def create_daemon_dirs(self, data_dir, uid, gid): # type: (str, int, int) -> None """Create files under the container data dir""" if not os.path.isdir(data_dir): raise OSError('data_dir is not a directory: %s' % (data_dir)) logger.info('Creating ceph-iscsi config...') configfs_dir = os.path.join(data_dir, 'configfs') makedirs(configfs_dir, uid, gid, 0o755) # populate files from the config-json populate_files(data_dir, self.files, uid, gid) @staticmethod def configfs_mount_umount(data_dir, mount=True): # type: (str, bool) -> List[str] mount_path = os.path.join(data_dir, 'configfs') if mount: cmd = 'if ! grep -qs {0} /proc/mounts; then ' \ 'mount -t configfs none {0}; fi'.format(mount_path) else: cmd = 'if grep -qs {0} /proc/mounts; then ' \ 'umount {0}; fi'.format(mount_path) return cmd.split() def get_tcmu_runner_container(self): # type: () -> CephContainer # daemon_id, is used to generated the cid and pid files used by podman but as both tcmu-runner # and rbd-target-api have the same daemon_id, it conflits and prevent the second container from # starting. .tcmu runner is appended to the daemon_id to fix that. tcmu_container = get_container(self.ctx, self.fsid, self.daemon_type, str(self.daemon_id) + '.tcmu') tcmu_container.entrypoint = '/usr/bin/tcmu-runner' tcmu_container.cname = self.get_container_name(desc='tcmu') return tcmu_container ################################## class CephExporter(object): """Defines a Ceph exporter container""" daemon_type = 'ceph-exporter' entrypoint = '/usr/bin/ceph-exporter' DEFAULT_PORT = 9926 port_map = { 'ceph-exporter': DEFAULT_PORT, } def __init__(self, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str], config_json: Dict[str, Any], image: str = DEFAULT_IMAGE) -> None: self.ctx = ctx self.fsid = fsid self.daemon_id = daemon_id self.image = image self.sock_dir = config_json.get('sock-dir', '/var/run/ceph/') ipv4_addrs, ipv6_addrs = get_ip_addresses(get_hostname()) # use the first ipv4 (if any) otherwise use the first ipv6 addrs = next(iter(ipv4_addrs or ipv6_addrs), None) self.addrs = config_json.get('addrs', addrs) self.port = config_json.get('port', self.DEFAULT_PORT) self.prio_limit = config_json.get('prio-limit', 5) self.stats_period = config_json.get('stats-period', 5) self.validate() @classmethod def init(cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]) -> 'CephExporter': return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image) @staticmethod def get_container_mounts() -> Dict[str, str]: mounts = dict() mounts['/var/run/ceph'] = '/var/run/ceph:z' return mounts def get_daemon_args(self) -> List[str]: args = [ f'--sock-dir={self.sock_dir}', f'--addrs={self.addrs}', f'--port={self.port}', f'--prio-limit={self.prio_limit}', f'--stats-period={self.stats_period}', ] return args def validate(self) -> None: if not os.path.isdir(self.sock_dir): raise Error(f'Directory does not exist. Got: {self.sock_dir}') ################################## class HAproxy(object): """Defines an HAproxy container""" daemon_type = 'haproxy' required_files = ['haproxy.cfg'] default_image = DEFAULT_HAPROXY_IMAGE def __init__(self, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str], config_json: Dict, image: str) -> None: self.ctx = ctx self.fsid = fsid self.daemon_id = daemon_id self.image = image # config-json options self.files = dict_get(config_json, 'files', {}) self.validate() @classmethod def init(cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]) -> 'HAproxy': return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image) def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None: """Create files under the container data dir""" if not os.path.isdir(data_dir): raise OSError('data_dir is not a directory: %s' % (data_dir)) # create additional directories in data dir for HAproxy to use if not os.path.isdir(os.path.join(data_dir, 'haproxy')): makedirs(os.path.join(data_dir, 'haproxy'), uid, gid, DATA_DIR_MODE) data_dir = os.path.join(data_dir, 'haproxy') populate_files(data_dir, self.files, uid, gid) def get_daemon_args(self) -> List[str]: return ['haproxy', '-f', '/var/lib/haproxy/haproxy.cfg'] def validate(self): # type: () -> None if not is_fsid(self.fsid): raise Error('not an fsid: %s' % self.fsid) if not self.daemon_id: raise Error('invalid daemon_id: %s' % self.daemon_id) if not self.image: raise Error('invalid image: %s' % self.image) # check for the required files if self.required_files: for fname in self.required_files: if fname not in self.files: raise Error('required file missing from config-json: %s' % fname) def get_daemon_name(self): # type: () -> str return '%s.%s' % (self.daemon_type, self.daemon_id) def get_container_name(self, desc=None): # type: (Optional[str]) -> str cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name()) if desc: cname = '%s-%s' % (cname, desc) return cname def extract_uid_gid_haproxy(self) -> Tuple[int, int]: # better directory for this? return extract_uid_gid(self.ctx, file_path='/var/lib') @staticmethod def get_container_mounts(data_dir: str) -> Dict[str, str]: mounts = dict() mounts[os.path.join(data_dir, 'haproxy')] = '/var/lib/haproxy' return mounts @staticmethod def get_sysctl_settings() -> List[str]: return [ '# IP forwarding and non-local bind', 'net.ipv4.ip_forward = 1', 'net.ipv4.ip_nonlocal_bind = 1', ] ################################## class Keepalived(object): """Defines an Keepalived container""" daemon_type = 'keepalived' required_files = ['keepalived.conf'] default_image = DEFAULT_KEEPALIVED_IMAGE def __init__(self, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str], config_json: Dict, image: str) -> None: self.ctx = ctx self.fsid = fsid self.daemon_id = daemon_id self.image = image # config-json options self.files = dict_get(config_json, 'files', {}) self.validate() @classmethod def init(cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]) -> 'Keepalived': return cls(ctx, fsid, daemon_id, fetch_configs(ctx), ctx.image) def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None: """Create files under the container data dir""" if not os.path.isdir(data_dir): raise OSError('data_dir is not a directory: %s' % (data_dir)) # create additional directories in data dir for keepalived to use if not os.path.isdir(os.path.join(data_dir, 'keepalived')): makedirs(os.path.join(data_dir, 'keepalived'), uid, gid, DATA_DIR_MODE) # populate files from the config-json populate_files(data_dir, self.files, uid, gid) def validate(self): # type: () -> None if not is_fsid(self.fsid): raise Error('not an fsid: %s' % self.fsid) if not self.daemon_id: raise Error('invalid daemon_id: %s' % self.daemon_id) if not self.image: raise Error('invalid image: %s' % self.image) # check for the required files if self.required_files: for fname in self.required_files: if fname not in self.files: raise Error('required file missing from config-json: %s' % fname) def get_daemon_name(self): # type: () -> str return '%s.%s' % (self.daemon_type, self.daemon_id) def get_container_name(self, desc=None): # type: (Optional[str]) -> str cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name()) if desc: cname = '%s-%s' % (cname, desc) return cname @staticmethod def get_container_envs(): # type: () -> List[str] envs = [ 'KEEPALIVED_AUTOCONF=false', 'KEEPALIVED_CONF=/etc/keepalived/keepalived.conf', 'KEEPALIVED_CMD=/usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf', 'KEEPALIVED_DEBUG=false' ] return envs @staticmethod def get_sysctl_settings() -> List[str]: return [ '# IP forwarding and non-local bind', 'net.ipv4.ip_forward = 1', 'net.ipv4.ip_nonlocal_bind = 1', ] def extract_uid_gid_keepalived(self) -> Tuple[int, int]: # better directory for this? return extract_uid_gid(self.ctx, file_path='/var/lib') @staticmethod def get_container_mounts(data_dir: str) -> Dict[str, str]: mounts = dict() mounts[os.path.join(data_dir, 'keepalived.conf')] = '/etc/keepalived/keepalived.conf' return mounts ################################## class Tracing(object): """Define the configs for the jaeger tracing containers""" components: Dict[str, Dict[str, Any]] = { 'elasticsearch': { 'image': DEFAULT_ELASTICSEARCH_IMAGE, 'envs': ['discovery.type=single-node'] }, 'jaeger-agent': { 'image': DEFAULT_JAEGER_AGENT_IMAGE, }, 'jaeger-collector': { 'image': DEFAULT_JAEGER_COLLECTOR_IMAGE, }, 'jaeger-query': { 'image': DEFAULT_JAEGER_QUERY_IMAGE, }, } # type: ignore @staticmethod def set_configuration(config: Dict[str, str], daemon_type: str) -> None: if daemon_type in ['jaeger-collector', 'jaeger-query']: assert 'elasticsearch_nodes' in config Tracing.components[daemon_type]['envs'] = [ 'SPAN_STORAGE_TYPE=elasticsearch', f'ES_SERVER_URLS={config["elasticsearch_nodes"]}'] if daemon_type == 'jaeger-agent': assert 'collector_nodes' in config Tracing.components[daemon_type]['daemon_args'] = [ f'--reporter.grpc.host-port={config["collector_nodes"]}', '--processor.jaeger-compact.server-host-port=6799' ] ################################## class CustomContainer(object): """Defines a custom container""" daemon_type = 'container' def __init__(self, fsid: str, daemon_id: Union[int, str], config_json: Dict, image: str) -> None: self.fsid = fsid self.daemon_id = daemon_id self.image = image # config-json options self.entrypoint = dict_get(config_json, 'entrypoint') self.uid = dict_get(config_json, 'uid', 65534) # nobody self.gid = dict_get(config_json, 'gid', 65534) # nobody self.volume_mounts = dict_get(config_json, 'volume_mounts', {}) self.args = dict_get(config_json, 'args', []) self.envs = dict_get(config_json, 'envs', []) self.privileged = dict_get(config_json, 'privileged', False) self.bind_mounts = dict_get(config_json, 'bind_mounts', []) self.ports = dict_get(config_json, 'ports', []) self.dirs = dict_get(config_json, 'dirs', []) self.files = dict_get(config_json, 'files', {}) @classmethod def init(cls, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str]) -> 'CustomContainer': return cls(fsid, daemon_id, fetch_configs(ctx), ctx.image) def create_daemon_dirs(self, data_dir: str, uid: int, gid: int) -> None: """ Create dirs/files below the container data directory. """ logger.info('Creating custom container configuration ' 'dirs/files in {} ...'.format(data_dir)) if not os.path.isdir(data_dir): raise OSError('data_dir is not a directory: %s' % data_dir) for dir_path in self.dirs: logger.info('Creating directory: {}'.format(dir_path)) dir_path = os.path.join(data_dir, dir_path.strip('/')) makedirs(dir_path, uid, gid, 0o755) for file_path in self.files: logger.info('Creating file: {}'.format(file_path)) content = dict_get_join(self.files, file_path) file_path = os.path.join(data_dir, file_path.strip('/')) with write_new(file_path, owner=(uid, gid), encoding='utf-8') as f: f.write(content) def get_daemon_args(self) -> List[str]: return [] def get_container_args(self) -> List[str]: return self.args def get_container_envs(self) -> List[str]: return self.envs def get_container_mounts(self, data_dir: str) -> Dict[str, str]: """ Get the volume mounts. Relative source paths will be located below `/var/lib/ceph/<cluster-fsid>/<daemon-name>`. Example: { /foo/conf: /conf foo/conf: /conf } becomes { /foo/conf: /conf /var/lib/ceph/<cluster-fsid>/<daemon-name>/foo/conf: /conf } """ mounts = {} for source, destination in self.volume_mounts.items(): source = os.path.join(data_dir, source) mounts[source] = destination return mounts def get_container_binds(self, data_dir: str) -> List[List[str]]: """ Get the bind mounts. Relative `source=...` paths will be located below `/var/lib/ceph/<cluster-fsid>/<daemon-name>`. Example: [ 'type=bind', 'source=lib/modules', 'destination=/lib/modules', 'ro=true' ] becomes [ ... 'source=/var/lib/ceph/<cluster-fsid>/<daemon-name>/lib/modules', ... ] """ binds = self.bind_mounts.copy() for bind in binds: for index, value in enumerate(bind): match = re.match(r'^source=(.+)$', value) if match: bind[index] = 'source={}'.format(os.path.join( data_dir, match.group(1))) return binds ################################## def touch(file_path: str, uid: Optional[int] = None, gid: Optional[int] = None) -> None: Path(file_path).touch() if uid and gid: os.chown(file_path, uid, gid) ################################## def dict_get(d: Dict, key: str, default: Any = None, require: bool = False) -> Any: """ Helper function to get a key from a dictionary. :param d: The dictionary to process. :param key: The name of the key to get. :param default: The default value in case the key does not exist. Default is `None`. :param require: Set to `True` if the key is required. An exception will be raised if the key does not exist in the given dictionary. :return: Returns the value of the given key. :raises: :exc:`self.Error` if the given key does not exist and `require` is set to `True`. """ if require and key not in d.keys(): raise Error('{} missing from dict'.format(key)) return d.get(key, default) # type: ignore ################################## def dict_get_join(d: Dict[str, Any], key: str) -> Any: """ Helper function to get the value of a given key from a dictionary. `List` values will be converted to a string by joining them with a line break. :param d: The dictionary to process. :param key: The name of the key to get. :return: Returns the value of the given key. If it was a `list`, it will be joining with a line break. """ value = d.get(key) if isinstance(value, list): value = '\n'.join(map(str, value)) return value ################################## def get_supported_daemons(): # type: () -> List[str] supported_daemons = list(Ceph.daemons) supported_daemons.extend(Monitoring.components) supported_daemons.append(NFSGanesha.daemon_type) supported_daemons.append(CephIscsi.daemon_type) supported_daemons.append(CustomContainer.daemon_type) supported_daemons.append(HAproxy.daemon_type) supported_daemons.append(Keepalived.daemon_type) supported_daemons.append(CephadmAgent.daemon_type) supported_daemons.append(SNMPGateway.daemon_type) supported_daemons.extend(Tracing.components) assert len(supported_daemons) == len(set(supported_daemons)) return supported_daemons ################################## class PortOccupiedError(Error): pass def attempt_bind(ctx, s, address, port): # type: (CephadmContext, socket.socket, str, int) -> None try: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((address, port)) except OSError as e: if e.errno == errno.EADDRINUSE: msg = 'Cannot bind to IP %s port %d: %s' % (address, port, e) logger.warning(msg) raise PortOccupiedError(msg) else: raise Error(e) except Exception as e: raise Error(e) finally: s.close() def port_in_use(ctx, port_num): # type: (CephadmContext, int) -> bool """Detect whether a port is in use on the local machine - IPv4 and IPv6""" logger.info('Verifying port %d ...' % port_num) def _port_in_use(af: socket.AddressFamily, address: str) -> bool: try: s = socket.socket(af, socket.SOCK_STREAM) attempt_bind(ctx, s, address, port_num) except PortOccupiedError: return True except OSError as e: if e.errno in (errno.EAFNOSUPPORT, errno.EADDRNOTAVAIL): # Ignore EAFNOSUPPORT and EADDRNOTAVAIL as two interfaces are # being tested here and one might be intentionally be disabled. # In that case no error should be raised. return False else: raise e return False return any(_port_in_use(af, address) for af, address in ( (socket.AF_INET, '0.0.0.0'), (socket.AF_INET6, '::') )) def check_ip_port(ctx, ep): # type: (CephadmContext, EndPoint) -> None if not ctx.skip_ping_check: logger.info(f'Verifying IP {ep.ip} port {ep.port} ...') if is_ipv6(ep.ip): s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) ip = unwrap_ipv6(ep.ip) else: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ip = ep.ip attempt_bind(ctx, s, ip, ep.port) ################################## # this is an abbreviated version of # https://github.com/benediktschmitt/py-filelock/blob/master/filelock.py # that drops all of the compatibility (this is Unix/Linux only). class Timeout(TimeoutError): """ Raised when the lock could not be acquired in *timeout* seconds. """ def __init__(self, lock_file: str) -> None: """ """ #: The path of the file lock. self.lock_file = lock_file return None def __str__(self) -> str: temp = "The file lock '{}' could not be acquired."\ .format(self.lock_file) return temp class _Acquire_ReturnProxy(object): def __init__(self, lock: 'FileLock') -> None: self.lock = lock return None def __enter__(self) -> 'FileLock': return self.lock def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self.lock.release() return None class FileLock(object): def __init__(self, ctx: CephadmContext, name: str, timeout: int = -1) -> None: if not os.path.exists(LOCK_DIR): os.mkdir(LOCK_DIR, 0o700) self._lock_file = os.path.join(LOCK_DIR, name + '.lock') self.ctx = ctx # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. self._lock_file_fd: Optional[int] = None self.timeout = timeout # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. self._lock_counter = 0 return None @property def is_locked(self) -> bool: return self._lock_file_fd is not None def acquire(self, timeout: Optional[int] = None, poll_intervall: float = 0.05) -> _Acquire_ReturnProxy: """ Acquires the file lock or fails with a :exc:`Timeout` error. .. code-block:: python # You can use this method in the context manager (recommended) with lock.acquire(): pass # Or use an equivalent try-finally construct: lock.acquire() try: pass finally: lock.release() :arg float timeout: The maximum time waited for the file lock. If ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired. If ``timeout`` is None, the default :attr:`~timeout` is used. :arg float poll_intervall: We check once in *poll_intervall* seconds if we can acquire the file lock. :raises Timeout: if the lock could not be acquired in *timeout* seconds. .. versionchanged:: 2.0.0 This method returns now a *proxy* object instead of *self*, so that it can be used in a with statement without side effects. """ # Use the default timeout, if no timeout is provided. if timeout is None: timeout = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. self._lock_counter += 1 lock_id = id(self) lock_filename = self._lock_file start_time = time.time() try: while True: if not self.is_locked: logger.log(QUIET_LOG_LEVEL, 'Acquiring lock %s on %s', lock_id, lock_filename) self._acquire() if self.is_locked: logger.log(QUIET_LOG_LEVEL, 'Lock %s acquired on %s', lock_id, lock_filename) break elif timeout >= 0 and time.time() - start_time > timeout: logger.warning('Timeout acquiring lock %s on %s', lock_id, lock_filename) raise Timeout(self._lock_file) else: logger.log( QUIET_LOG_LEVEL, 'Lock %s not acquired on %s, waiting %s seconds ...', lock_id, lock_filename, poll_intervall ) time.sleep(poll_intervall) except Exception: # Something did go wrong, so decrement the counter. self._lock_counter = max(0, self._lock_counter - 1) raise return _Acquire_ReturnProxy(lock=self) def release(self, force: bool = False) -> None: """ Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0. Also note, that the lock file itself is not automatically deleted. :arg bool force: If true, the lock counter is ignored and the lock is released in every case. """ if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: # lock_id = id(self) # lock_filename = self._lock_file # Can't log in shutdown: # File "/usr/lib64/python3.9/logging/__init__.py", line 1175, in _open # NameError: name 'open' is not defined # logger.debug('Releasing lock %s on %s', lock_id, lock_filename) self._release() self._lock_counter = 0 # logger.debug('Lock %s released on %s', lock_id, lock_filename) return None def __enter__(self) -> 'FileLock': self.acquire() return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: self.release() return None def __del__(self) -> None: self.release(force=True) return None def _acquire(self) -> None: open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC fd = os.open(self._lock_file, open_mode) try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except (IOError, OSError): os.close(fd) else: self._lock_file_fd = fd return None def _release(self) -> None: # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition fd = self._lock_file_fd self._lock_file_fd = None fcntl.flock(fd, fcntl.LOCK_UN) # type: ignore os.close(fd) # type: ignore return None ################################## # Popen wrappers, lifted from ceph-volume class CallVerbosity(Enum): ##### # Format: # Normal Operation: <log-level-when-no-errors>, Errors: <log-level-when-error> # # NOTE: QUIET log level is custom level only used when --verbose is passed ##### # Normal Operation: None, Errors: None SILENT = 0 # Normal Operation: QUIET, Error: QUIET QUIET = 1 # Normal Operation: DEBUG, Error: DEBUG DEBUG = 2 # Normal Operation: QUIET, Error: INFO QUIET_UNLESS_ERROR = 3 # Normal Operation: DEBUG, Error: INFO VERBOSE_ON_FAILURE = 4 # Normal Operation: INFO, Error: INFO VERBOSE = 5 def success_log_level(self) -> int: _verbosity_level_to_log_level = { self.SILENT: 0, self.QUIET: QUIET_LOG_LEVEL, self.DEBUG: logging.DEBUG, self.QUIET_UNLESS_ERROR: QUIET_LOG_LEVEL, self.VERBOSE_ON_FAILURE: logging.DEBUG, self.VERBOSE: logging.INFO } return _verbosity_level_to_log_level[self] # type: ignore def error_log_level(self) -> int: _verbosity_level_to_log_level = { self.SILENT: 0, self.QUIET: QUIET_LOG_LEVEL, self.DEBUG: logging.DEBUG, self.QUIET_UNLESS_ERROR: logging.INFO, self.VERBOSE_ON_FAILURE: logging.INFO, self.VERBOSE: logging.INFO } return _verbosity_level_to_log_level[self] # type: ignore # disable coverage for the next block. this is copy-n-paste # from other code for compatibilty on older python versions if sys.version_info < (3, 8): # pragma: no cover import itertools import threading import warnings from asyncio import events class ThreadedChildWatcher(asyncio.AbstractChildWatcher): """Threaded child watcher implementation. The watcher uses a thread per process for waiting for the process finish. It doesn't require subscription on POSIX signal but a thread creation is not free. The watcher has O(1) complexity, its performance doesn't depend on amount of spawn processes. """ def __init__(self) -> None: self._pid_counter = itertools.count(0) self._threads: Dict[Any, Any] = {} def is_active(self) -> bool: return True def close(self) -> None: self._join_threads() def _join_threads(self) -> None: """Internal: Join all non-daemon threads""" threads = [thread for thread in list(self._threads.values()) if thread.is_alive() and not thread.daemon] for thread in threads: thread.join() def __enter__(self) -> Any: return self def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: pass def __del__(self, _warn: Any = warnings.warn) -> None: threads = [thread for thread in list(self._threads.values()) if thread.is_alive()] if threads: _warn(f'{self.__class__} has registered but not finished child processes', ResourceWarning, source=self) def add_child_handler(self, pid: Any, callback: Any, *args: Any) -> None: loop = events.get_event_loop() thread = threading.Thread(target=self._do_waitpid, name=f'waitpid-{next(self._pid_counter)}', args=(loop, pid, callback, args), daemon=True) self._threads[pid] = thread thread.start() def remove_child_handler(self, pid: Any) -> bool: # asyncio never calls remove_child_handler() !!! # The method is no-op but is implemented because # abstract base classe requires it return True def attach_loop(self, loop: Any) -> None: pass def _do_waitpid(self, loop: Any, expected_pid: Any, callback: Any, args: Any) -> None: assert expected_pid > 0 try: pid, status = os.waitpid(expected_pid, 0) except ChildProcessError: # The child process is already reaped # (may happen if waitpid() is called elsewhere). pid = expected_pid returncode = 255 logger.warning( 'Unknown child process pid %d, will report returncode 255', pid) else: if os.WIFEXITED(status): returncode = os.WEXITSTATUS(status) elif os.WIFSIGNALED(status): returncode = -os.WTERMSIG(status) else: raise ValueError(f'unknown wait status {status}') if loop.get_debug(): logger.debug('process %s exited with returncode %s', expected_pid, returncode) if loop.is_closed(): logger.warning('Loop %r that handles pid %r is closed', loop, pid) else: loop.call_soon_threadsafe(callback, pid, returncode, *args) self._threads.pop(expected_pid) # unlike SafeChildWatcher which handles SIGCHLD in the main thread, # ThreadedChildWatcher runs in a separated thread, hence allows us to # run create_subprocess_exec() in non-main thread, see # https://bugs.python.org/issue35621 asyncio.set_child_watcher(ThreadedChildWatcher()) try: from asyncio import run as async_run # type: ignore[attr-defined] except ImportError: # pragma: no cover # disable coverage for this block. it should be a copy-n-paste from # from newer libs for compatibilty on older python versions def async_run(coro): # type: ignore loop = asyncio.new_event_loop() try: asyncio.set_event_loop(loop) return loop.run_until_complete(coro) finally: try: loop.run_until_complete(loop.shutdown_asyncgens()) finally: asyncio.set_event_loop(None) loop.close() def call(ctx: CephadmContext, command: List[str], desc: Optional[str] = None, verbosity: CallVerbosity = CallVerbosity.VERBOSE_ON_FAILURE, timeout: Optional[int] = DEFAULT_TIMEOUT, **kwargs: Any) -> Tuple[str, str, int]: """ Wrap subprocess.Popen to - log stdout/stderr to a logger, - decode utf-8 - cleanly return out, err, returncode :param timeout: timeout in seconds """ prefix = command[0] if desc is None else desc if prefix: prefix += ': ' timeout = timeout or ctx.timeout async def run_with_timeout() -> Tuple[str, str, int]: process = await asyncio.create_subprocess_exec( *command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=os.environ.copy()) assert process.stdout assert process.stderr try: stdout, stderr = await asyncio.wait_for( process.communicate(), timeout, ) except asyncio.TimeoutError: # try to terminate the process assuming it is still running. It's # possible that even after killing the process it will not # complete, particularly if it is D-state. If that happens the # process.wait call will block, but we're no worse off than before # when the timeout did not work. Additionally, there are other # corner-cases we could try and handle here but we decided to start # simple. process.kill() await process.wait() logger.info(prefix + f'timeout after {timeout} seconds') return '', '', 124 else: assert process.returncode is not None return ( stdout.decode('utf-8'), stderr.decode('utf-8'), process.returncode, ) stdout, stderr, returncode = async_run(run_with_timeout()) log_level = verbosity.success_log_level() if returncode != 0: log_level = verbosity.error_log_level() logger.log(log_level, f'Non-zero exit code {returncode} from {" ".join(command)}') for line in stdout.splitlines(): logger.log(log_level, prefix + 'stdout ' + line) for line in stderr.splitlines(): logger.log(log_level, prefix + 'stderr ' + line) return stdout, stderr, returncode def call_throws( ctx: CephadmContext, command: List[str], desc: Optional[str] = None, verbosity: CallVerbosity = CallVerbosity.VERBOSE_ON_FAILURE, timeout: Optional[int] = DEFAULT_TIMEOUT, **kwargs: Any) -> Tuple[str, str, int]: out, err, ret = call(ctx, command, desc, verbosity, timeout, **kwargs) if ret: for s in (out, err): if s.strip() and len(s.splitlines()) <= 2: # readable message? raise RuntimeError(f'Failed command: {" ".join(command)}: {s}') raise RuntimeError('Failed command: %s' % ' '.join(command)) return out, err, ret def call_timeout(ctx, command, timeout): # type: (CephadmContext, List[str], int) -> int logger.debug('Running command (timeout=%s): %s' % (timeout, ' '.join(command))) def raise_timeout(command, timeout): # type: (List[str], int) -> NoReturn msg = 'Command `%s` timed out after %s seconds' % (command, timeout) logger.debug(msg) raise TimeoutExpired(msg) try: return subprocess.call(command, timeout=timeout, env=os.environ.copy()) except subprocess.TimeoutExpired: raise_timeout(command, timeout) ################################## def json_loads_retry(cli_func: Callable[[], str]) -> Any: for sleep_secs in [1, 4, 4]: try: return json.loads(cli_func()) except json.JSONDecodeError: logger.debug('Invalid JSON. Retrying in %s seconds...' % sleep_secs) time.sleep(sleep_secs) return json.loads(cli_func()) def is_available(ctx, what, func): # type: (CephadmContext, str, Callable[[], bool]) -> None """ Wait for a service to become available :param what: the name of the service :param func: the callable object that determines availability """ retry = ctx.retry logger.info('Waiting for %s...' % what) num = 1 while True: if func(): logger.info('%s is available' % what) break elif num > retry: raise Error('%s not available after %s tries' % (what, retry)) logger.info('%s not available, waiting (%s/%s)...' % (what, num, retry)) num += 1 time.sleep(2) def read_config(fn): # type: (Optional[str]) -> ConfigParser cp = ConfigParser() if fn: cp.read(fn) return cp def pathify(p): # type: (str) -> str p = os.path.expanduser(p) return os.path.abspath(p) def get_file_timestamp(fn): # type: (str) -> Optional[str] try: mt = os.path.getmtime(fn) return datetime.datetime.fromtimestamp( mt, tz=datetime.timezone.utc ).strftime(DATEFMT) except Exception: return None def try_convert_datetime(s): # type: (str) -> Optional[str] # This is super irritating because # 1) podman and docker use different formats # 2) python's strptime can't parse either one # # I've seen: # docker 18.09.7: 2020-03-03T09:21:43.636153304Z # podman 1.7.0: 2020-03-03T15:52:30.136257504-06:00 # 2020-03-03 15:52:30.136257504 -0600 CST # (In the podman case, there is a different string format for # 'inspect' and 'inspect --format {{.Created}}'!!) # In *all* cases, the 9 digit second precision is too much for # python's strptime. Shorten it to 6 digits. p = re.compile(r'(\.[\d]{6})[\d]*') s = p.sub(r'\1', s) # replace trailing Z with -0000, since (on python 3.6.8) it won't parse if s and s[-1] == 'Z': s = s[:-1] + '-0000' # cut off the redundant 'CST' part that strptime can't parse, if # present. v = s.split(' ') s = ' '.join(v[0:3]) # try parsing with several format strings fmts = [ '%Y-%m-%dT%H:%M:%S.%f%z', '%Y-%m-%d %H:%M:%S.%f %z', ] for f in fmts: try: # return timestamp normalized to UTC, rendered as DATEFMT. return datetime.datetime.strptime(s, f).astimezone(tz=datetime.timezone.utc).strftime(DATEFMT) except ValueError: pass return None def _parse_podman_version(version_str): # type: (str) -> Tuple[int, ...] def to_int(val: str, org_e: Optional[Exception] = None) -> int: if not val and org_e: raise org_e try: return int(val) except ValueError as e: return to_int(val[0:-1], org_e or e) return tuple(map(to_int, version_str.split('.'))) def get_hostname(): # type: () -> str return socket.gethostname() def get_short_hostname(): # type: () -> str return get_hostname().split('.', 1)[0] def get_fqdn(): # type: () -> str return socket.getfqdn() or socket.gethostname() def get_ip_addresses(hostname: str) -> Tuple[List[str], List[str]]: items = socket.getaddrinfo(hostname, None, flags=socket.AI_CANONNAME, type=socket.SOCK_STREAM) ipv4_addresses = [i[4][0] for i in items if i[0] == socket.AF_INET] ipv6_addresses = [i[4][0] for i in items if i[0] == socket.AF_INET6] return ipv4_addresses, ipv6_addresses def get_arch(): # type: () -> str return platform.uname().machine def generate_service_id(): # type: () -> str return get_short_hostname() + '.' + ''.join(random.choice(string.ascii_lowercase) for _ in range(6)) def generate_password(): # type: () -> str return ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(10)) def normalize_container_id(i): # type: (str) -> str # docker adds the sha256: prefix, but AFAICS both # docker (18.09.7 in bionic at least) and podman # both always use sha256, so leave off the prefix # for consistency. prefix = 'sha256:' if i.startswith(prefix): i = i[len(prefix):] return i def make_fsid(): # type: () -> str return str(uuid.uuid1()) def is_fsid(s): # type: (str) -> bool try: uuid.UUID(s) except ValueError: return False return True def validate_fsid(func: FuncT) -> FuncT: @wraps(func) def _validate_fsid(ctx: CephadmContext) -> Any: if 'fsid' in ctx and ctx.fsid: if not is_fsid(ctx.fsid): raise Error('not an fsid: %s' % ctx.fsid) return func(ctx) return cast(FuncT, _validate_fsid) def infer_fsid(func: FuncT) -> FuncT: """ If we only find a single fsid in /var/lib/ceph/*, use that """ @infer_config @wraps(func) def _infer_fsid(ctx: CephadmContext) -> Any: if 'fsid' in ctx and ctx.fsid: logger.debug('Using specified fsid: %s' % ctx.fsid) return func(ctx) fsids = set() cp = read_config(ctx.config) if cp.has_option('global', 'fsid'): fsids.add(cp.get('global', 'fsid')) daemon_list = list_daemons(ctx, detail=False) for daemon in daemon_list: if not is_fsid(daemon['fsid']): # 'unknown' fsid continue elif 'name' not in ctx or not ctx.name: # ctx.name not specified fsids.add(daemon['fsid']) elif daemon['name'] == ctx.name: # ctx.name is a match fsids.add(daemon['fsid']) fsids = sorted(fsids) if not fsids: # some commands do not always require an fsid pass elif len(fsids) == 1: logger.info('Inferring fsid %s' % fsids[0]) ctx.fsid = fsids[0] else: raise Error('Cannot infer an fsid, one must be specified (using --fsid): %s' % fsids) return func(ctx) return cast(FuncT, _infer_fsid) def infer_config(func: FuncT) -> FuncT: """ Infer the cluster configuration using the following priority order: 1- if the user has provided custom conf file (-c option) use it 2- otherwise if daemon --name has been provided use daemon conf 3- otherwise find the mon daemon conf file and use it (if v1) 4- otherwise if {ctx.data_dir}/{fsid}/{CEPH_CONF_DIR} dir exists use it 5- finally: fallback to the default file /etc/ceph/ceph.conf """ @wraps(func) def _infer_config(ctx: CephadmContext) -> Any: def config_path(daemon_type: str, daemon_name: str) -> str: data_dir = get_data_dir(ctx.fsid, ctx.data_dir, daemon_type, daemon_name) return os.path.join(data_dir, 'config') def get_mon_daemon_name(fsid: str) -> Optional[str]: daemon_list = list_daemons(ctx, detail=False) for daemon in daemon_list: if ( daemon.get('name', '').startswith('mon.') and daemon.get('fsid', '') == fsid and daemon.get('style', '') == 'cephadm:v1' and os.path.exists(config_path('mon', daemon['name'].split('.', 1)[1])) ): return daemon['name'] return None ctx.config = ctx.config if 'config' in ctx else None # check if user has provided conf by using -c option if ctx.config and (ctx.config != CEPH_DEFAULT_CONF): logger.debug(f'Using specified config: {ctx.config}') return func(ctx) if 'fsid' in ctx and ctx.fsid: name = ctx.name if ('name' in ctx and ctx.name) else get_mon_daemon_name(ctx.fsid) if name is not None: # daemon name has been specified (or inferred from mon), let's use its conf ctx.config = config_path(name.split('.', 1)[0], name.split('.', 1)[1]) else: # no daemon, in case the cluster has a config dir then use it ceph_conf = f'{ctx.data_dir}/{ctx.fsid}/{CEPH_CONF_DIR}/{CEPH_CONF}' if os.path.exists(ceph_conf): ctx.config = ceph_conf if ctx.config: logger.info(f'Inferring config {ctx.config}') elif os.path.exists(CEPH_DEFAULT_CONF): logger.debug(f'Using default config {CEPH_DEFAULT_CONF}') ctx.config = CEPH_DEFAULT_CONF return func(ctx) return cast(FuncT, _infer_config) def _get_default_image(ctx: CephadmContext) -> str: if DEFAULT_IMAGE_IS_MAIN: warn = """This is a development version of cephadm. For information regarding the latest stable release: https://docs.ceph.com/docs/{}/cephadm/install """.format(LATEST_STABLE_RELEASE) for line in warn.splitlines(): logger.warning('{}{}{}'.format(termcolor.yellow, line, termcolor.end)) return DEFAULT_IMAGE def infer_image(func: FuncT) -> FuncT: """ Use the most recent ceph image """ @wraps(func) def _infer_image(ctx: CephadmContext) -> Any: if not ctx.image: ctx.image = os.environ.get('CEPHADM_IMAGE') if not ctx.image: ctx.image = infer_local_ceph_image(ctx, ctx.container_engine.path) if not ctx.image: ctx.image = _get_default_image(ctx) return func(ctx) return cast(FuncT, _infer_image) def require_image(func: FuncT) -> FuncT: """ Require the global --image flag to be set """ @wraps(func) def _require_image(ctx: CephadmContext) -> Any: if not ctx.image: raise Error('This command requires the global --image option to be set') return func(ctx) return cast(FuncT, _require_image) def default_image(func: FuncT) -> FuncT: @wraps(func) def _default_image(ctx: CephadmContext) -> Any: update_default_image(ctx) return func(ctx) return cast(FuncT, _default_image) def update_default_image(ctx: CephadmContext) -> None: if getattr(ctx, 'image', None): return ctx.image = None # ensure ctx.image exists to avoid repeated `getattr`s name = getattr(ctx, 'name', None) if name: type_ = name.split('.', 1)[0] if type_ in Monitoring.components: ctx.image = Monitoring.components[type_]['image'] if type_ == 'haproxy': ctx.image = HAproxy.default_image if type_ == 'keepalived': ctx.image = Keepalived.default_image if type_ == SNMPGateway.daemon_type: ctx.image = SNMPGateway.default_image if type_ in Tracing.components: ctx.image = Tracing.components[type_]['image'] if not ctx.image: ctx.image = os.environ.get('CEPHADM_IMAGE') if not ctx.image: ctx.image = _get_default_image(ctx) def executes_early(func: FuncT) -> FuncT: """Decorator that indicates the command function is meant to have no dependencies and no environmental requirements and can therefore be executed as non-root and with no logging, etc. Commands that have this decorator applied must be simple and self-contained. """ cast(Any, func)._execute_early = True return func def deprecated_command(func: FuncT) -> FuncT: @wraps(func) def _deprecated_command(ctx: CephadmContext) -> Any: logger.warning(f'Deprecated command used: {func}') if NO_DEPRECATED: raise Error('running deprecated commands disabled') return func(ctx) return cast(FuncT, _deprecated_command) def get_container_info(ctx: CephadmContext, daemon_filter: str, by_name: bool) -> Optional[ContainerInfo]: """ :param ctx: Cephadm context :param daemon_filter: daemon name or type :param by_name: must be set to True if daemon name is provided :return: Container information or None """ def daemon_name_or_type(daemon: Dict[str, str]) -> str: return daemon['name'] if by_name else daemon['name'].split('.', 1)[0] if by_name and '.' not in daemon_filter: logger.warning(f'Trying to get container info using invalid daemon name {daemon_filter}') return None daemons = list_daemons(ctx, detail=False) matching_daemons = [d for d in daemons if daemon_name_or_type(d) == daemon_filter and d['fsid'] == ctx.fsid] if matching_daemons: d_type, d_id = matching_daemons[0]['name'].split('.', 1) out, _, code = get_container_stats(ctx, ctx.container_engine.path, ctx.fsid, d_type, d_id) if not code: (container_id, image_name, image_id, start, version) = out.strip().split(',') return ContainerInfo(container_id, image_name, image_id, start, version) return None def infer_local_ceph_image(ctx: CephadmContext, container_path: str) -> Optional[str]: """ Infer the local ceph image based on the following priority criteria: 1- the image specified by --image arg (if provided). 2- the same image as the daemon container specified by --name arg (if provided). 3- image used by any ceph container running on the host. In this case we use daemon types. 4- if no container is found then we use the most ceph recent image on the host. Note: any selected container must have the same fsid inferred previously. :return: The most recent local ceph image (already pulled) """ # '|' special character is used to separate the output fields into: # - Repository@digest # - Image Id # - Image Tag # - Image creation date out, _, _ = call_throws(ctx, [container_path, 'images', '--filter', 'label=ceph=True', '--filter', 'dangling=false', '--format', '{{.Repository}}@{{.Digest}}|{{.ID}}|{{.Tag}}|{{.CreatedAt}}']) container_info = None daemon_name = ctx.name if ('name' in ctx and ctx.name and '.' in ctx.name) else None daemons_ls = [daemon_name] if daemon_name is not None else Ceph.daemons # daemon types: 'mon', 'mgr', etc for daemon in daemons_ls: container_info = get_container_info(ctx, daemon, daemon_name is not None) if container_info is not None: logger.debug(f"Using container info for daemon '{daemon}'") break for image in out.splitlines(): if image and not image.isspace(): (digest, image_id, tag, created_date) = image.lstrip().split('|') if container_info is not None and image_id not in container_info.image_id: continue if digest and not digest.endswith('@'): logger.info(f"Using ceph image with id '{image_id}' and tag '{tag}' created on {created_date}\n{digest}") return digest return None def write_tmp(s, uid, gid): # type: (str, int, int) -> IO[str] tmp_f = tempfile.NamedTemporaryFile(mode='w', prefix='ceph-tmp') os.fchown(tmp_f.fileno(), uid, gid) tmp_f.write(s) tmp_f.flush() return tmp_f def makedirs(dir, uid, gid, mode): # type: (str, int, int, int) -> None if not os.path.exists(dir): os.makedirs(dir, mode=mode) else: os.chmod(dir, mode) os.chown(dir, uid, gid) os.chmod(dir, mode) # the above is masked by umask... def get_data_dir(fsid, data_dir, t, n): # type: (str, str, str, Union[int, str]) -> str return os.path.join(data_dir, fsid, '%s.%s' % (t, n)) def get_log_dir(fsid, log_dir): # type: (str, str) -> str return os.path.join(log_dir, fsid) def make_data_dir_base(fsid, data_dir, uid, gid): # type: (str, str, int, int) -> str data_dir_base = os.path.join(data_dir, fsid) makedirs(data_dir_base, uid, gid, DATA_DIR_MODE) makedirs(os.path.join(data_dir_base, 'crash'), uid, gid, DATA_DIR_MODE) makedirs(os.path.join(data_dir_base, 'crash', 'posted'), uid, gid, DATA_DIR_MODE) return data_dir_base def make_data_dir(ctx, fsid, daemon_type, daemon_id, uid=None, gid=None): # type: (CephadmContext, str, str, Union[int, str], Optional[int], Optional[int]) -> str if uid is None or gid is None: uid, gid = extract_uid_gid(ctx) make_data_dir_base(fsid, ctx.data_dir, uid, gid) data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) makedirs(data_dir, uid, gid, DATA_DIR_MODE) return data_dir def make_log_dir(ctx, fsid, uid=None, gid=None): # type: (CephadmContext, str, Optional[int], Optional[int]) -> str if uid is None or gid is None: uid, gid = extract_uid_gid(ctx) log_dir = get_log_dir(fsid, ctx.log_dir) makedirs(log_dir, uid, gid, LOG_DIR_MODE) return log_dir def make_var_run(ctx, fsid, uid, gid): # type: (CephadmContext, str, int, int) -> None call_throws(ctx, ['install', '-d', '-m0770', '-o', str(uid), '-g', str(gid), '/var/run/ceph/%s' % fsid]) def copy_tree(ctx, src, dst, uid=None, gid=None): # type: (CephadmContext, List[str], str, Optional[int], Optional[int]) -> None """ Copy a directory tree from src to dst """ if uid is None or gid is None: (uid, gid) = extract_uid_gid(ctx) for src_dir in src: dst_dir = dst if os.path.isdir(dst): dst_dir = os.path.join(dst, os.path.basename(src_dir)) logger.debug('copy directory `%s` -> `%s`' % (src_dir, dst_dir)) shutil.rmtree(dst_dir, ignore_errors=True) shutil.copytree(src_dir, dst_dir) # dirs_exist_ok needs python 3.8 for dirpath, dirnames, filenames in os.walk(dst_dir): logger.debug('chown %s:%s `%s`' % (uid, gid, dirpath)) os.chown(dirpath, uid, gid) for filename in filenames: logger.debug('chown %s:%s `%s`' % (uid, gid, filename)) os.chown(os.path.join(dirpath, filename), uid, gid) def copy_files(ctx, src, dst, uid=None, gid=None): # type: (CephadmContext, List[str], str, Optional[int], Optional[int]) -> None """ Copy a files from src to dst """ if uid is None or gid is None: (uid, gid) = extract_uid_gid(ctx) for src_file in src: dst_file = dst if os.path.isdir(dst): dst_file = os.path.join(dst, os.path.basename(src_file)) logger.debug('copy file `%s` -> `%s`' % (src_file, dst_file)) shutil.copyfile(src_file, dst_file) logger.debug('chown %s:%s `%s`' % (uid, gid, dst_file)) os.chown(dst_file, uid, gid) def move_files(ctx, src, dst, uid=None, gid=None): # type: (CephadmContext, List[str], str, Optional[int], Optional[int]) -> None """ Move files from src to dst """ if uid is None or gid is None: (uid, gid) = extract_uid_gid(ctx) for src_file in src: dst_file = dst if os.path.isdir(dst): dst_file = os.path.join(dst, os.path.basename(src_file)) if os.path.islink(src_file): # shutil.move() in py2 does not handle symlinks correctly src_rl = os.readlink(src_file) logger.debug("symlink '%s' -> '%s'" % (dst_file, src_rl)) os.symlink(src_rl, dst_file) os.unlink(src_file) else: logger.debug("move file '%s' -> '%s'" % (src_file, dst_file)) shutil.move(src_file, dst_file) logger.debug('chown %s:%s `%s`' % (uid, gid, dst_file)) os.chown(dst_file, uid, gid) def recursive_chown(path: str, uid: int, gid: int) -> None: for dirpath, dirnames, filenames in os.walk(path): os.chown(dirpath, uid, gid) for filename in filenames: os.chown(os.path.join(dirpath, filename), uid, gid) # copied from distutils def find_executable(executable: str, path: Optional[str] = None) -> Optional[str]: """Tries to find 'executable' in the directories listed in 'path'. A string listing directories separated by 'os.pathsep'; defaults to os.environ['PATH']. Returns the complete filename or None if not found. """ _, ext = os.path.splitext(executable) if (sys.platform == 'win32') and (ext != '.exe'): executable = executable + '.exe' # pragma: no cover if os.path.isfile(executable): return executable if path is None: path = os.environ.get('PATH', None) if path is None: try: path = os.confstr('CS_PATH') except (AttributeError, ValueError): # os.confstr() or CS_PATH is not available path = os.defpath # bpo-35755: Don't use os.defpath if the PATH environment variable is # set to an empty string # PATH='' doesn't match, whereas PATH=':' looks in the current directory if not path: return None paths = path.split(os.pathsep) for p in paths: f = os.path.join(p, executable) if os.path.isfile(f): # the file exists, we have a shot at spawn working return f return None def find_program(filename): # type: (str) -> str name = find_executable(filename) if name is None: raise ValueError('%s not found' % filename) return name def find_container_engine(ctx: CephadmContext) -> Optional[ContainerEngine]: if ctx.docker: return Docker() else: for i in CONTAINER_PREFERENCE: try: return i() except Exception: pass return None def check_container_engine(ctx: CephadmContext) -> ContainerEngine: engine = ctx.container_engine if not isinstance(engine, CONTAINER_PREFERENCE): # See https://github.com/python/mypy/issues/8993 exes: List[str] = [i.EXE for i in CONTAINER_PREFERENCE] # type: ignore raise Error('No container engine binary found ({}). Try run `apt/dnf/yum/zypper install <container engine>`'.format(' or '.join(exes))) elif isinstance(engine, Podman): engine.get_version(ctx) if engine.version < MIN_PODMAN_VERSION: raise Error('podman version %d.%d.%d or later is required' % MIN_PODMAN_VERSION) return engine def get_unit_name(fsid, daemon_type, daemon_id=None): # type: (str, str, Optional[Union[int, str]]) -> str # accept either name or type + id if daemon_id is not None: return 'ceph-%s@%s.%s' % (fsid, daemon_type, daemon_id) else: return 'ceph-%s@%s' % (fsid, daemon_type) def get_unit_name_by_daemon_name(ctx: CephadmContext, fsid: str, name: str) -> str: daemon = get_daemon_description(ctx, fsid, name) try: return daemon['systemd_unit'] except KeyError: raise Error('Failed to get unit name for {}'.format(daemon)) def check_unit(ctx, unit_name): # type: (CephadmContext, str) -> Tuple[bool, str, bool] # NOTE: we ignore the exit code here because systemctl outputs # various exit codes based on the state of the service, but the # string result is more explicit (and sufficient). enabled = False installed = False try: out, err, code = call(ctx, ['systemctl', 'is-enabled', unit_name], verbosity=CallVerbosity.QUIET) if code == 0: enabled = True installed = True elif 'disabled' in out: installed = True except Exception as e: logger.warning('unable to run systemctl: %s' % e) enabled = False installed = False state = 'unknown' try: out, err, code = call(ctx, ['systemctl', 'is-active', unit_name], verbosity=CallVerbosity.QUIET) out = out.strip() if out in ['active']: state = 'running' elif out in ['inactive']: state = 'stopped' elif out in ['failed', 'auto-restart']: state = 'error' else: state = 'unknown' except Exception as e: logger.warning('unable to run systemctl: %s' % e) state = 'unknown' return (enabled, state, installed) def check_units(ctx, units, enabler=None): # type: (CephadmContext, List[str], Optional[Packager]) -> bool for u in units: (enabled, state, installed) = check_unit(ctx, u) if enabled and state == 'running': logger.info('Unit %s is enabled and running' % u) return True if enabler is not None: if installed: logger.info('Enabling unit %s' % u) enabler.enable_service(u) return False def is_container_running(ctx: CephadmContext, c: 'CephContainer') -> bool: if ctx.name.split('.', 1)[0] in ['agent', 'cephadm-exporter']: # these are non-containerized daemon types return False return bool(get_running_container_name(ctx, c)) def get_running_container_name(ctx: CephadmContext, c: 'CephContainer') -> Optional[str]: for name in [c.cname, c.old_cname]: out, err, ret = call(ctx, [ ctx.container_engine.path, 'container', 'inspect', '--format', '{{.State.Status}}', name ]) if out.strip() == 'running': return name return None def get_legacy_config_fsid(cluster, legacy_dir=None): # type: (str, Optional[str]) -> Optional[str] config_file = '/etc/ceph/%s.conf' % cluster if legacy_dir is not None: config_file = os.path.abspath(legacy_dir + config_file) if os.path.exists(config_file): config = read_config(config_file) if config.has_section('global') and config.has_option('global', 'fsid'): return config.get('global', 'fsid') return None def get_legacy_daemon_fsid(ctx, cluster, daemon_type, daemon_id, legacy_dir=None): # type: (CephadmContext, str, str, Union[int, str], Optional[str]) -> Optional[str] fsid = None if daemon_type == 'osd': try: fsid_file = os.path.join(ctx.data_dir, daemon_type, 'ceph-%s' % daemon_id, 'ceph_fsid') if legacy_dir is not None: fsid_file = os.path.abspath(legacy_dir + fsid_file) with open(fsid_file, 'r') as f: fsid = f.read().strip() except IOError: pass if not fsid: fsid = get_legacy_config_fsid(cluster, legacy_dir=legacy_dir) return fsid def should_log_to_journald(ctx: CephadmContext) -> bool: if ctx.log_to_journald is not None: return ctx.log_to_journald return isinstance(ctx.container_engine, Podman) and \ ctx.container_engine.version >= CGROUPS_SPLIT_PODMAN_VERSION def get_daemon_args(ctx, fsid, daemon_type, daemon_id): # type: (CephadmContext, str, str, Union[int, str]) -> List[str] r = list() # type: List[str] if daemon_type in Ceph.daemons and daemon_type not in ['crash', 'ceph-exporter']: r += [ '--setuser', 'ceph', '--setgroup', 'ceph', '--default-log-to-file=false', ] log_to_journald = should_log_to_journald(ctx) if log_to_journald: r += [ '--default-log-to-journald=true', '--default-log-to-stderr=false', ] else: r += [ '--default-log-to-stderr=true', '--default-log-stderr-prefix=debug ', ] if daemon_type == 'mon': r += [ '--default-mon-cluster-log-to-file=false', ] if log_to_journald: r += [ '--default-mon-cluster-log-to-journald=true', '--default-mon-cluster-log-to-stderr=false', ] else: r += ['--default-mon-cluster-log-to-stderr=true'] elif daemon_type in Monitoring.components: metadata = Monitoring.components[daemon_type] r += metadata.get('args', list()) # set ip and port to bind to for nodeexporter,alertmanager,prometheus if daemon_type not in ['grafana', 'loki', 'promtail']: ip = '' port = Monitoring.port_map[daemon_type][0] meta = fetch_meta(ctx) if meta: if 'ip' in meta and meta['ip']: ip = meta['ip'] if 'ports' in meta and meta['ports']: port = meta['ports'][0] r += [f'--web.listen-address={ip}:{port}'] if daemon_type == 'prometheus': config = fetch_configs(ctx) retention_time = config.get('retention_time', '15d') retention_size = config.get('retention_size', '0') # default to disabled r += [f'--storage.tsdb.retention.time={retention_time}'] r += [f'--storage.tsdb.retention.size={retention_size}'] scheme = 'http' host = get_fqdn() # in case host is not an fqdn then we use the IP to # avoid producing a broken web.external-url link if '.' not in host: ipv4_addrs, ipv6_addrs = get_ip_addresses(get_hostname()) # use the first ipv4 (if any) otherwise use the first ipv6 addr = next(iter(ipv4_addrs or ipv6_addrs), None) host = wrap_ipv6(addr) if addr else host r += [f'--web.external-url={scheme}://{host}:{port}'] if daemon_type == 'alertmanager': config = fetch_configs(ctx) peers = config.get('peers', list()) # type: ignore for peer in peers: r += ['--cluster.peer={}'.format(peer)] try: r += [f'--web.config.file={config["web_config"]}'] except KeyError: pass # some alertmanager, by default, look elsewhere for a config r += ['--config.file=/etc/alertmanager/alertmanager.yml'] if daemon_type == 'promtail': r += ['--config.expand-env'] if daemon_type == 'prometheus': config = fetch_configs(ctx) try: r += [f'--web.config.file={config["web_config"]}'] except KeyError: pass if daemon_type == 'node-exporter': config = fetch_configs(ctx) try: r += [f'--web.config={config["web_config"]}'] except KeyError: pass r += ['--path.procfs=/host/proc', '--path.sysfs=/host/sys', '--path.rootfs=/rootfs'] elif daemon_type == 'jaeger-agent': r.extend(Tracing.components[daemon_type]['daemon_args']) elif daemon_type == NFSGanesha.daemon_type: nfs_ganesha = NFSGanesha.init(ctx, fsid, daemon_id) r += nfs_ganesha.get_daemon_args() elif daemon_type == CephExporter.daemon_type: ceph_exporter = CephExporter.init(ctx, fsid, daemon_id) r.extend(ceph_exporter.get_daemon_args()) elif daemon_type == HAproxy.daemon_type: haproxy = HAproxy.init(ctx, fsid, daemon_id) r += haproxy.get_daemon_args() elif daemon_type == CustomContainer.daemon_type: cc = CustomContainer.init(ctx, fsid, daemon_id) r.extend(cc.get_daemon_args()) elif daemon_type == SNMPGateway.daemon_type: sc = SNMPGateway.init(ctx, fsid, daemon_id) r.extend(sc.get_daemon_args()) return r def create_daemon_dirs(ctx, fsid, daemon_type, daemon_id, uid, gid, config=None, keyring=None): # type: (CephadmContext, str, str, Union[int, str], int, int, Optional[str], Optional[str]) -> None data_dir = make_data_dir(ctx, fsid, daemon_type, daemon_id, uid=uid, gid=gid) if daemon_type in Ceph.daemons: make_log_dir(ctx, fsid, uid=uid, gid=gid) if config: config_path = os.path.join(data_dir, 'config') with write_new(config_path, owner=(uid, gid)) as f: f.write(config) if keyring: keyring_path = os.path.join(data_dir, 'keyring') with write_new(keyring_path, owner=(uid, gid)) as f: f.write(keyring) if daemon_type in Monitoring.components.keys(): config_json = fetch_configs(ctx) # Set up directories specific to the monitoring component config_dir = '' data_dir_root = '' if daemon_type == 'prometheus': data_dir_root = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) config_dir = 'etc/prometheus' makedirs(os.path.join(data_dir_root, config_dir), uid, gid, 0o755) makedirs(os.path.join(data_dir_root, config_dir, 'alerting'), uid, gid, 0o755) makedirs(os.path.join(data_dir_root, 'data'), uid, gid, 0o755) recursive_chown(os.path.join(data_dir_root, 'etc'), uid, gid) recursive_chown(os.path.join(data_dir_root, 'data'), uid, gid) elif daemon_type == 'grafana': data_dir_root = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) config_dir = 'etc/grafana' makedirs(os.path.join(data_dir_root, config_dir), uid, gid, 0o755) makedirs(os.path.join(data_dir_root, config_dir, 'certs'), uid, gid, 0o755) makedirs(os.path.join(data_dir_root, config_dir, 'provisioning/datasources'), uid, gid, 0o755) makedirs(os.path.join(data_dir_root, 'data'), uid, gid, 0o755) touch(os.path.join(data_dir_root, 'data', 'grafana.db'), uid, gid) elif daemon_type == 'alertmanager': data_dir_root = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) config_dir = 'etc/alertmanager' makedirs(os.path.join(data_dir_root, config_dir), uid, gid, 0o755) makedirs(os.path.join(data_dir_root, config_dir, 'data'), uid, gid, 0o755) elif daemon_type == 'promtail': data_dir_root = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) config_dir = 'etc/promtail' makedirs(os.path.join(data_dir_root, config_dir), uid, gid, 0o755) makedirs(os.path.join(data_dir_root, 'data'), uid, gid, 0o755) elif daemon_type == 'loki': data_dir_root = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) config_dir = 'etc/loki' makedirs(os.path.join(data_dir_root, config_dir), uid, gid, 0o755) makedirs(os.path.join(data_dir_root, 'data'), uid, gid, 0o755) elif daemon_type == 'node-exporter': data_dir_root = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) config_dir = 'etc/node-exporter' makedirs(os.path.join(data_dir_root, config_dir), uid, gid, 0o755) recursive_chown(os.path.join(data_dir_root, 'etc'), uid, gid) # populate the config directory for the component from the config-json if 'files' in config_json: for fname in config_json['files']: # work around mypy wierdness where it thinks `str`s aren't Anys # when used for dictionary values! feels like possibly a mypy bug?! cfg = cast(Dict[str, Any], config_json['files']) content = dict_get_join(cfg, fname) if os.path.isabs(fname): fpath = os.path.join(data_dir_root, fname.lstrip(os.path.sep)) else: fpath = os.path.join(data_dir_root, config_dir, fname) with write_new(fpath, owner=(uid, gid), encoding='utf-8') as f: f.write(content) elif daemon_type == NFSGanesha.daemon_type: nfs_ganesha = NFSGanesha.init(ctx, fsid, daemon_id) nfs_ganesha.create_daemon_dirs(data_dir, uid, gid) elif daemon_type == CephIscsi.daemon_type: ceph_iscsi = CephIscsi.init(ctx, fsid, daemon_id) ceph_iscsi.create_daemon_dirs(data_dir, uid, gid) elif daemon_type == HAproxy.daemon_type: haproxy = HAproxy.init(ctx, fsid, daemon_id) haproxy.create_daemon_dirs(data_dir, uid, gid) elif daemon_type == Keepalived.daemon_type: keepalived = Keepalived.init(ctx, fsid, daemon_id) keepalived.create_daemon_dirs(data_dir, uid, gid) elif daemon_type == CustomContainer.daemon_type: cc = CustomContainer.init(ctx, fsid, daemon_id) cc.create_daemon_dirs(data_dir, uid, gid) elif daemon_type == SNMPGateway.daemon_type: sg = SNMPGateway.init(ctx, fsid, daemon_id) sg.create_daemon_conf() _write_custom_conf_files(ctx, daemon_type, str(daemon_id), fsid, uid, gid) def _write_custom_conf_files(ctx: CephadmContext, daemon_type: str, daemon_id: str, fsid: str, uid: int, gid: int) -> None: # mostly making this its own function to make unit testing easier ccfiles = fetch_custom_config_files(ctx) if not ccfiles: return custom_config_dir = os.path.join(ctx.data_dir, fsid, 'custom_config_files', f'{daemon_type}.{daemon_id}') if not os.path.exists(custom_config_dir): makedirs(custom_config_dir, uid, gid, 0o755) mandatory_keys = ['mount_path', 'content'] for ccf in ccfiles: if all(k in ccf for k in mandatory_keys): file_path = os.path.join(custom_config_dir, os.path.basename(ccf['mount_path'])) with write_new(file_path, owner=(uid, gid), encoding='utf-8') as f: f.write(ccf['content']) def get_parm(option: str) -> Dict[str, str]: js = _get_config_json(option) # custom_config_files is a special field that may be in the config # dict. It is used for mounting custom config files into daemon's containers # and should be accessed through the "fetch_custom_config_files" function. # For get_parm we need to discard it. js.pop('custom_config_files', None) return js def _get_config_json(option: str) -> Dict[str, Any]: if not option: return dict() global cached_stdin if option == '-': if cached_stdin is not None: j = cached_stdin else: j = sys.stdin.read() cached_stdin = j else: # inline json string if option[0] == '{' and option[-1] == '}': j = option # json file elif os.path.exists(option): with open(option, 'r') as f: j = f.read() else: raise Error('Config file {} not found'.format(option)) try: js = json.loads(j) except ValueError as e: raise Error('Invalid JSON in {}: {}'.format(option, e)) else: return js def fetch_meta(ctx: CephadmContext) -> Dict[str, Any]: """Return a dict containing metadata about a deployment. """ meta = getattr(ctx, 'meta_properties', None) if meta is not None: return meta mjson = getattr(ctx, 'meta_json', None) if mjson is not None: meta = json.loads(mjson) or {} ctx.meta_properties = meta return meta return {} def fetch_configs(ctx: CephadmContext) -> Dict[str, str]: """Return a dict containing arbitrary configuration parameters. This function filters out the key 'custom_config_files' which must not be part of a deployment's configuration key-value pairs. To access custom configuration file data, use `fetch_custom_config_files`. """ # ctx.config_blobs is *always* a dict. it is created once when # a command is parsed/processed and stored "forever" cfg_blobs = getattr(ctx, 'config_blobs', None) if cfg_blobs: cfg_blobs = dict(cfg_blobs) cfg_blobs.pop('custom_config_files', None) return cfg_blobs # ctx.config_json is the legacy equivalent of config_blobs. it is a # string that either contains json or refers to a file name where # the file contains json. cfg_json = getattr(ctx, 'config_json', None) if cfg_json: jdata = _get_config_json(cfg_json) or {} jdata.pop('custom_config_files', None) return jdata return {} def fetch_custom_config_files(ctx: CephadmContext) -> List[Dict[str, Any]]: """Return a list containing dicts that can be used to populate custom configuration files for containers. """ # NOTE: this function works like the opposite of fetch_configs. # instead of filtering out custom_config_files, it returns only # the content in that key. cfg_blobs = getattr(ctx, 'config_blobs', None) if cfg_blobs: return cfg_blobs.get('custom_config_files', []) cfg_json = getattr(ctx, 'config_json', None) if cfg_json: jdata = _get_config_json(cfg_json) return jdata.get('custom_config_files', []) return [] def fetch_tcp_ports(ctx: CephadmContext) -> List[int]: """Return a list of tcp ports, as integers, stored on the given ctx. """ ports = getattr(ctx, 'tcp_ports', None) if ports is None: return [] if isinstance(ports, str): return list(map(int, ports.split())) return ports def get_config_and_keyring(ctx): # type: (CephadmContext) -> Tuple[Optional[str], Optional[str]] config = None keyring = None d = fetch_configs(ctx) if d: config = d.get('config') keyring = d.get('keyring') if config and keyring: return config, keyring if 'config' in ctx and ctx.config: try: with open(ctx.config, 'r') as f: config = f.read() except FileNotFoundError as e: raise Error(e) if 'key' in ctx and ctx.key: keyring = '[%s]\n\tkey = %s\n' % (ctx.name, ctx.key) elif 'keyring' in ctx and ctx.keyring: try: with open(ctx.keyring, 'r') as f: keyring = f.read() except FileNotFoundError as e: raise Error(e) return config, keyring def get_container_binds(ctx, fsid, daemon_type, daemon_id): # type: (CephadmContext, str, str, Union[int, str, None]) -> List[List[str]] binds = list() if daemon_type == CephIscsi.daemon_type: binds.extend(CephIscsi.get_container_binds()) elif daemon_type == CustomContainer.daemon_type: assert daemon_id cc = CustomContainer.init(ctx, fsid, daemon_id) data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) binds.extend(cc.get_container_binds(data_dir)) return binds def get_container_mounts(ctx, fsid, daemon_type, daemon_id, no_config=False): # type: (CephadmContext, str, str, Union[int, str, None], Optional[bool]) -> Dict[str, str] mounts = dict() if daemon_type in Ceph.daemons: if fsid: run_path = os.path.join('/var/run/ceph', fsid) if os.path.exists(run_path): mounts[run_path] = '/var/run/ceph:z' log_dir = get_log_dir(fsid, ctx.log_dir) mounts[log_dir] = '/var/log/ceph:z' crash_dir = '/var/lib/ceph/%s/crash' % fsid if os.path.exists(crash_dir): mounts[crash_dir] = '/var/lib/ceph/crash:z' if daemon_type != 'crash' and should_log_to_journald(ctx): journald_sock_dir = '/run/systemd/journal' mounts[journald_sock_dir] = journald_sock_dir if daemon_type in Ceph.daemons and daemon_id: data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) if daemon_type == 'rgw': cdata_dir = '/var/lib/ceph/radosgw/ceph-rgw.%s' % (daemon_id) else: cdata_dir = '/var/lib/ceph/%s/ceph-%s' % (daemon_type, daemon_id) if daemon_type != 'crash': mounts[data_dir] = cdata_dir + ':z' if not no_config: mounts[data_dir + '/config'] = '/etc/ceph/ceph.conf:z' if daemon_type in ['rbd-mirror', 'cephfs-mirror', 'crash', 'ceph-exporter']: # these do not search for their keyrings in a data directory mounts[data_dir + '/keyring'] = '/etc/ceph/ceph.client.%s.%s.keyring' % (daemon_type, daemon_id) if daemon_type in ['mon', 'osd', 'clusterless-ceph-volume']: mounts['/dev'] = '/dev' # FIXME: narrow this down? mounts['/run/udev'] = '/run/udev' if daemon_type in ['osd', 'clusterless-ceph-volume']: mounts['/sys'] = '/sys' # for numa.cc, pick_address, cgroups, ... mounts['/run/lvm'] = '/run/lvm' mounts['/run/lock/lvm'] = '/run/lock/lvm' if daemon_type == 'osd': # selinux-policy in the container may not match the host. if HostFacts(ctx).selinux_enabled: cluster_dir = f'{ctx.data_dir}/{fsid}' selinux_folder = f'{cluster_dir}/selinux' if os.path.exists(cluster_dir): if not os.path.exists(selinux_folder): os.makedirs(selinux_folder, mode=0o755) mounts[selinux_folder] = '/sys/fs/selinux:ro' else: logger.error(f'Cluster direcotry {cluster_dir} does not exist.') mounts['/'] = '/rootfs' try: if ctx.shared_ceph_folder: # make easy manager modules/ceph-volume development ceph_folder = pathify(ctx.shared_ceph_folder) if os.path.exists(ceph_folder): mounts[ceph_folder + '/src/ceph-volume/ceph_volume'] = '/usr/lib/python3.6/site-packages/ceph_volume' mounts[ceph_folder + '/src/cephadm/cephadm.py'] = '/usr/sbin/cephadm' mounts[ceph_folder + '/src/pybind/mgr'] = '/usr/share/ceph/mgr' mounts[ceph_folder + '/src/python-common/ceph'] = '/usr/lib/python3.6/site-packages/ceph' mounts[ceph_folder + '/monitoring/ceph-mixin/dashboards_out'] = '/etc/grafana/dashboards/ceph-dashboard' mounts[ceph_folder + '/monitoring/ceph-mixin/prometheus_alerts.yml'] = '/etc/prometheus/ceph/ceph_default_alerts.yml' else: logger.error('{}{}{}'.format(termcolor.red, 'Ceph shared source folder does not exist.', termcolor.end)) except AttributeError: pass if daemon_type in Monitoring.components and daemon_id: data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) log_dir = get_log_dir(fsid, ctx.log_dir) if daemon_type == 'prometheus': mounts[os.path.join(data_dir, 'etc/prometheus')] = '/etc/prometheus:Z' mounts[os.path.join(data_dir, 'data')] = '/prometheus:Z' elif daemon_type == 'loki': mounts[os.path.join(data_dir, 'etc/loki')] = '/etc/loki:Z' mounts[os.path.join(data_dir, 'data')] = '/loki:Z' elif daemon_type == 'promtail': mounts[os.path.join(data_dir, 'etc/promtail')] = '/etc/promtail:Z' mounts[log_dir] = '/var/log/ceph:z' mounts[os.path.join(data_dir, 'data')] = '/promtail:Z' elif daemon_type == 'node-exporter': mounts[os.path.join(data_dir, 'etc/node-exporter')] = '/etc/node-exporter:Z' mounts['/proc'] = '/host/proc:ro' mounts['/sys'] = '/host/sys:ro' mounts['/'] = '/rootfs:ro' elif daemon_type == 'grafana': mounts[os.path.join(data_dir, 'etc/grafana/grafana.ini')] = '/etc/grafana/grafana.ini:Z' mounts[os.path.join(data_dir, 'etc/grafana/provisioning/datasources')] = '/etc/grafana/provisioning/datasources:Z' mounts[os.path.join(data_dir, 'etc/grafana/certs')] = '/etc/grafana/certs:Z' mounts[os.path.join(data_dir, 'data/grafana.db')] = '/var/lib/grafana/grafana.db:Z' elif daemon_type == 'alertmanager': mounts[os.path.join(data_dir, 'etc/alertmanager')] = '/etc/alertmanager:Z' if daemon_type == NFSGanesha.daemon_type: assert daemon_id data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) nfs_ganesha = NFSGanesha.init(ctx, fsid, daemon_id) mounts.update(nfs_ganesha.get_container_mounts(data_dir)) if daemon_type == HAproxy.daemon_type: assert daemon_id data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) mounts.update(HAproxy.get_container_mounts(data_dir)) if daemon_type == CephIscsi.daemon_type: assert daemon_id data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) # Removes ending ".tcmu" from data_dir a tcmu-runner uses the same data_dir # as rbd-runner-api if data_dir.endswith('.tcmu'): data_dir = re.sub(r'\.tcmu$', '', data_dir) log_dir = get_log_dir(fsid, ctx.log_dir) mounts.update(CephIscsi.get_container_mounts(data_dir, log_dir)) if daemon_type == Keepalived.daemon_type: assert daemon_id data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) mounts.update(Keepalived.get_container_mounts(data_dir)) if daemon_type == CustomContainer.daemon_type: assert daemon_id cc = CustomContainer.init(ctx, fsid, daemon_id) data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) mounts.update(cc.get_container_mounts(data_dir)) # Modifications podman makes to /etc/hosts causes issues with # certain daemons (specifically referencing "host.containers.internal" entry # being added to /etc/hosts in this case). To avoid that, but still # allow users to use /etc/hosts for hostname resolution, we can # mount the host's /etc/hosts file. # https://tracker.ceph.com/issues/58532 # https://tracker.ceph.com/issues/57018 if isinstance(ctx.container_engine, Podman): if os.path.exists('/etc/hosts'): if '/etc/hosts' not in mounts: mounts['/etc/hosts'] = '/etc/hosts:ro' return mounts def get_ceph_volume_container(ctx: CephadmContext, privileged: bool = True, cname: str = '', volume_mounts: Dict[str, str] = {}, bind_mounts: Optional[List[List[str]]] = None, args: List[str] = [], envs: Optional[List[str]] = None) -> 'CephContainer': if envs is None: envs = [] envs.append('CEPH_VOLUME_SKIP_RESTORECON=yes') envs.append('CEPH_VOLUME_DEBUG=1') return CephContainer( ctx, image=ctx.image, entrypoint='/usr/sbin/ceph-volume', args=args, volume_mounts=volume_mounts, bind_mounts=bind_mounts, envs=envs, privileged=privileged, cname=cname, memory_request=ctx.memory_request, memory_limit=ctx.memory_limit, ) def set_pids_limit_unlimited(ctx: CephadmContext, container_args: List[str]) -> None: # set container's pids-limit to unlimited rather than default (Docker 4096 / Podman 2048) # Useful for daemons like iscsi where the default pids-limit limits the number of luns # per iscsi target or rgw where increasing the rgw_thread_pool_size to a value near # the default pids-limit may cause the container to crash. if ( isinstance(ctx.container_engine, Podman) and ctx.container_engine.version >= PIDS_LIMIT_UNLIMITED_PODMAN_VERSION ): container_args.append('--pids-limit=-1') else: container_args.append('--pids-limit=0') def get_container(ctx: CephadmContext, fsid: str, daemon_type: str, daemon_id: Union[int, str], privileged: bool = False, ptrace: bool = False, container_args: Optional[List[str]] = None) -> 'CephContainer': entrypoint: str = '' name: str = '' ceph_args: List[str] = [] envs: List[str] = [] host_network: bool = True if daemon_type in Ceph.daemons: envs.append('TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=134217728') if container_args is None: container_args = [] if daemon_type in Ceph.daemons or daemon_type in Ceph.gateways: set_pids_limit_unlimited(ctx, container_args) if daemon_type in ['mon', 'osd']: # mon and osd need privileged in order for libudev to query devices privileged = True if daemon_type == 'rgw': entrypoint = '/usr/bin/radosgw' name = 'client.rgw.%s' % daemon_id elif daemon_type == 'rbd-mirror': entrypoint = '/usr/bin/rbd-mirror' name = 'client.rbd-mirror.%s' % daemon_id elif daemon_type == 'cephfs-mirror': entrypoint = '/usr/bin/cephfs-mirror' name = 'client.cephfs-mirror.%s' % daemon_id elif daemon_type == 'crash': entrypoint = '/usr/bin/ceph-crash' name = 'client.crash.%s' % daemon_id elif daemon_type in ['mon', 'mgr', 'mds', 'osd']: entrypoint = '/usr/bin/ceph-' + daemon_type name = '%s.%s' % (daemon_type, daemon_id) elif daemon_type in Monitoring.components: entrypoint = '' elif daemon_type in Tracing.components: entrypoint = '' name = '%s.%s' % (daemon_type, daemon_id) config = fetch_configs(ctx) Tracing.set_configuration(config, daemon_type) envs.extend(Tracing.components[daemon_type].get('envs', [])) elif daemon_type == NFSGanesha.daemon_type: entrypoint = NFSGanesha.entrypoint name = '%s.%s' % (daemon_type, daemon_id) envs.extend(NFSGanesha.get_container_envs()) elif daemon_type == CephExporter.daemon_type: entrypoint = CephExporter.entrypoint name = 'client.ceph-exporter.%s' % daemon_id elif daemon_type == HAproxy.daemon_type: name = '%s.%s' % (daemon_type, daemon_id) container_args.extend(['--user=root']) # haproxy 2.4 defaults to a different user elif daemon_type == Keepalived.daemon_type: name = '%s.%s' % (daemon_type, daemon_id) envs.extend(Keepalived.get_container_envs()) container_args.extend(['--cap-add=NET_ADMIN', '--cap-add=NET_RAW']) elif daemon_type == CephIscsi.daemon_type: entrypoint = CephIscsi.entrypoint name = '%s.%s' % (daemon_type, daemon_id) # So the container can modprobe iscsi_target_mod and have write perms # to configfs we need to make this a privileged container. privileged = True elif daemon_type == CustomContainer.daemon_type: cc = CustomContainer.init(ctx, fsid, daemon_id) entrypoint = cc.entrypoint host_network = False envs.extend(cc.get_container_envs()) container_args.extend(cc.get_container_args()) if daemon_type in Monitoring.components: uid, gid = extract_uid_gid_monitoring(ctx, daemon_type) monitoring_args = [ '--user', str(uid), # FIXME: disable cpu/memory limits for the time being (not supported # by ubuntu 18.04 kernel!) ] container_args.extend(monitoring_args) if daemon_type == 'node-exporter': # in order to support setting '--path.procfs=/host/proc','--path.sysfs=/host/sys', # '--path.rootfs=/rootfs' for node-exporter we need to disable selinux separation # between the node-exporter container and the host to avoid selinux denials container_args.extend(['--security-opt', 'label=disable']) elif daemon_type == 'crash': ceph_args = ['-n', name] elif daemon_type in Ceph.daemons: ceph_args = ['-n', name, '-f'] elif daemon_type == SNMPGateway.daemon_type: sg = SNMPGateway.init(ctx, fsid, daemon_id) container_args.append( f'--env-file={sg.conf_file_path}' ) # if using podman, set -d, --conmon-pidfile & --cidfile flags # so service can have Type=Forking if isinstance(ctx.container_engine, Podman): runtime_dir = '/run' container_args.extend([ '-d', '--log-driver', 'journald', '--conmon-pidfile', runtime_dir + '/ceph-%s@%s.%s.service-pid' % (fsid, daemon_type, daemon_id), '--cidfile', runtime_dir + '/ceph-%s@%s.%s.service-cid' % (fsid, daemon_type, daemon_id), ]) if ctx.container_engine.version >= CGROUPS_SPLIT_PODMAN_VERSION and not ctx.no_cgroups_split: container_args.append('--cgroups=split') # if /etc/hosts doesn't exist, we can be confident # users aren't using it for host name resolution # and adding --no-hosts avoids bugs created in certain daemons # by modifications podman makes to /etc/hosts # https://tracker.ceph.com/issues/58532 # https://tracker.ceph.com/issues/57018 if not os.path.exists('/etc/hosts'): container_args.extend(['--no-hosts']) return CephContainer.for_daemon( ctx, fsid=fsid, daemon_type=daemon_type, daemon_id=str(daemon_id), entrypoint=entrypoint, args=ceph_args + get_daemon_args(ctx, fsid, daemon_type, daemon_id), container_args=container_args, volume_mounts=get_container_mounts(ctx, fsid, daemon_type, daemon_id), bind_mounts=get_container_binds(ctx, fsid, daemon_type, daemon_id), envs=envs, privileged=privileged, ptrace=ptrace, host_network=host_network, ) def extract_uid_gid(ctx, img='', file_path='/var/lib/ceph'): # type: (CephadmContext, str, Union[str, List[str]]) -> Tuple[int, int] if not img: img = ctx.image if isinstance(file_path, str): paths = [file_path] else: paths = file_path ex: Optional[Tuple[str, RuntimeError]] = None for fp in paths: try: out = CephContainer( ctx, image=img, entrypoint='stat', args=['-c', '%u %g', fp] ).run(verbosity=CallVerbosity.QUIET_UNLESS_ERROR) uid, gid = out.split(' ') return int(uid), int(gid) except RuntimeError as e: ex = (fp, e) if ex: raise Error(f'Failed to extract uid/gid for path {ex[0]}: {ex[1]}') raise RuntimeError('uid/gid not found') def deploy_daemon(ctx: CephadmContext, fsid: str, daemon_type: str, daemon_id: Union[int, str], c: Optional['CephContainer'], uid: int, gid: int, config: Optional[str] = None, keyring: Optional[str] = None, osd_fsid: Optional[str] = None, deployment_type: DeploymentType = DeploymentType.DEFAULT, ports: Optional[List[int]] = None) -> None: ports = ports or [] # only check port in use if fresh deployment since service # we are redeploying/reconfiguring will already be using the port if deployment_type == DeploymentType.DEFAULT: if any([port_in_use(ctx, port) for port in ports]): if daemon_type == 'mgr': # non-fatal for mgr when we are in mgr_standby_modules=false, but we can't # tell whether that is the case here. logger.warning( f"ceph-mgr TCP port(s) {','.join(map(str, ports))} already in use" ) else: raise Error("TCP Port(s) '{}' required for {} already in use".format(','.join(map(str, ports)), daemon_type)) data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) if deployment_type == DeploymentType.RECONFIG and not os.path.exists(data_dir): raise Error('cannot reconfig, data path %s does not exist' % data_dir) if daemon_type == 'mon' and not os.path.exists(data_dir): assert config assert keyring # tmp keyring file tmp_keyring = write_tmp(keyring, uid, gid) # tmp config file tmp_config = write_tmp(config, uid, gid) # --mkfs create_daemon_dirs(ctx, fsid, daemon_type, daemon_id, uid, gid) mon_dir = get_data_dir(fsid, ctx.data_dir, 'mon', daemon_id) log_dir = get_log_dir(fsid, ctx.log_dir) CephContainer( ctx, image=ctx.image, entrypoint='/usr/bin/ceph-mon', args=[ '--mkfs', '-i', str(daemon_id), '--fsid', fsid, '-c', '/tmp/config', '--keyring', '/tmp/keyring', ] + get_daemon_args(ctx, fsid, 'mon', daemon_id), volume_mounts={ log_dir: '/var/log/ceph:z', mon_dir: '/var/lib/ceph/mon/ceph-%s:z' % (daemon_id), tmp_keyring.name: '/tmp/keyring:z', tmp_config.name: '/tmp/config:z', }, ).run() # write conf with write_new(mon_dir + '/config', owner=(uid, gid)) as f: f.write(config) else: # dirs, conf, keyring create_daemon_dirs( ctx, fsid, daemon_type, daemon_id, uid, gid, config, keyring) # only write out unit files and start daemon # with systemd if this is not a reconfig if deployment_type != DeploymentType.RECONFIG: if daemon_type == CephadmAgent.daemon_type: config_js = fetch_configs(ctx) assert isinstance(config_js, dict) cephadm_agent = CephadmAgent(ctx, fsid, daemon_id) cephadm_agent.deploy_daemon_unit(config_js) else: if c: deploy_daemon_units(ctx, fsid, uid, gid, daemon_type, daemon_id, c, osd_fsid=osd_fsid, ports=ports) else: raise RuntimeError('attempting to deploy a daemon without a container image') if not os.path.exists(data_dir + '/unit.created'): with write_new(data_dir + '/unit.created', owner=(uid, gid)) as f: f.write('mtime is time the daemon deployment was created\n') with write_new(data_dir + '/unit.configured', owner=(uid, gid)) as f: f.write('mtime is time we were last configured\n') update_firewalld(ctx, daemon_type) # Open ports explicitly required for the daemon if ports: fw = Firewalld(ctx) fw.open_ports(ports + fw.external_ports.get(daemon_type, [])) fw.apply_rules() # If this was a reconfig and the daemon is not a Ceph daemon, restart it # so it can pick up potential changes to its configuration files if deployment_type == DeploymentType.RECONFIG and daemon_type not in Ceph.daemons: # ceph daemons do not need a restart; others (presumably) do to pick # up the new config call_throws(ctx, ['systemctl', 'reset-failed', get_unit_name(fsid, daemon_type, daemon_id)]) call_throws(ctx, ['systemctl', 'restart', get_unit_name(fsid, daemon_type, daemon_id)]) def _write_container_cmd_to_bash(ctx, file_obj, container, comment=None, background=False): # type: (CephadmContext, IO[str], CephContainer, Optional[str], Optional[bool]) -> None if comment: # Sometimes adding a comment, especially if there are multiple containers in one # unit file, makes it easier to read and grok. file_obj.write('# ' + comment + '\n') # Sometimes, adding `--rm` to a run_cmd doesn't work. Let's remove the container manually file_obj.write('! ' + ' '.join(container.rm_cmd(old_cname=True)) + ' 2> /dev/null\n') file_obj.write('! ' + ' '.join(container.rm_cmd()) + ' 2> /dev/null\n') # Sometimes, `podman rm` doesn't find the container. Then you'll have to add `--storage` if isinstance(ctx.container_engine, Podman): file_obj.write( '! ' + ' '.join([shlex.quote(a) for a in container.rm_cmd(storage=True)]) + ' 2> /dev/null\n') file_obj.write( '! ' + ' '.join([shlex.quote(a) for a in container.rm_cmd(old_cname=True, storage=True)]) + ' 2> /dev/null\n') # container run command file_obj.write( ' '.join([shlex.quote(a) for a in container.run_cmd()]) + (' &' if background else '') + '\n') def clean_cgroup(ctx: CephadmContext, fsid: str, unit_name: str) -> None: # systemd may fail to cleanup cgroups from previous stopped unit, which will cause next "systemctl start" to fail. # see https://tracker.ceph.com/issues/50998 CGROUPV2_PATH = Path('/sys/fs/cgroup') if not (CGROUPV2_PATH / 'system.slice').exists(): # Only unified cgroup is affected, skip if not the case return slice_name = 'system-ceph\\x2d{}.slice'.format(fsid.replace('-', '\\x2d')) cg_path = CGROUPV2_PATH / 'system.slice' / slice_name / f'{unit_name}.service' if not cg_path.exists(): return def cg_trim(path: Path) -> None: for p in path.iterdir(): if p.is_dir(): cg_trim(p) path.rmdir() try: cg_trim(cg_path) except OSError: logger.warning(f'Failed to trim old cgroups {cg_path}') def deploy_daemon_units( ctx: CephadmContext, fsid: str, uid: int, gid: int, daemon_type: str, daemon_id: Union[int, str], c: 'CephContainer', enable: bool = True, start: bool = True, osd_fsid: Optional[str] = None, ports: Optional[List[int]] = None, ) -> None: # cmd def add_stop_actions(f: TextIO, timeout: Optional[int]) -> None: # following generated script basically checks if the container exists # before stopping it. Exit code will be success either if it doesn't # exist or if it exists and is stopped successfully. container_exists = f'{ctx.container_engine.path} inspect %s &>/dev/null' f.write(f'! {container_exists % c.old_cname} || {" ".join(c.stop_cmd(old_cname=True, timeout=timeout))} \n') f.write(f'! {container_exists % c.cname} || {" ".join(c.stop_cmd(timeout=timeout))} \n') data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id) run_file_path = data_dir + '/unit.run' meta_file_path = data_dir + '/unit.meta' with write_new(run_file_path) as f, write_new(meta_file_path) as metaf: f.write('set -e\n') if daemon_type in Ceph.daemons: install_path = find_program('install') f.write('{install_path} -d -m0770 -o {uid} -g {gid} /var/run/ceph/{fsid}\n'.format(install_path=install_path, fsid=fsid, uid=uid, gid=gid)) # pre-start cmd(s) if daemon_type == 'osd': # osds have a pre-start step assert osd_fsid simple_fn = os.path.join('/etc/ceph/osd', '%s-%s.json.adopted-by-cephadm' % (daemon_id, osd_fsid)) if os.path.exists(simple_fn): f.write('# Simple OSDs need chown on startup:\n') for n in ['block', 'block.db', 'block.wal']: p = os.path.join(data_dir, n) f.write('[ ! -L {p} ] || chown {uid}:{gid} {p}\n'.format(p=p, uid=uid, gid=gid)) else: # if ceph-volume does not support 'ceph-volume activate', we must # do 'ceph-volume lvm activate'. test_cv = get_ceph_volume_container( ctx, args=['activate', '--bad-option'], volume_mounts=get_container_mounts(ctx, fsid, daemon_type, daemon_id), bind_mounts=get_container_binds(ctx, fsid, daemon_type, daemon_id), cname='ceph-%s-%s.%s-activate-test' % (fsid, daemon_type, daemon_id), ) out, err, ret = call(ctx, test_cv.run_cmd(), verbosity=CallVerbosity.SILENT) # bad: ceph-volume: error: unrecognized arguments: activate --bad-option # good: ceph-volume: error: unrecognized arguments: --bad-option if 'unrecognized arguments: activate' in err: # older ceph-volume without top-level activate or --no-tmpfs cmd = [ 'lvm', 'activate', str(daemon_id), osd_fsid, '--no-systemd', ] else: cmd = [ 'activate', '--osd-id', str(daemon_id), '--osd-uuid', osd_fsid, '--no-systemd', '--no-tmpfs', ] prestart = get_ceph_volume_container( ctx, args=cmd, volume_mounts=get_container_mounts(ctx, fsid, daemon_type, daemon_id), bind_mounts=get_container_binds(ctx, fsid, daemon_type, daemon_id), cname='ceph-%s-%s.%s-activate' % (fsid, daemon_type, daemon_id), ) _write_container_cmd_to_bash(ctx, f, prestart, 'LVM OSDs use ceph-volume lvm activate') elif daemon_type == CephIscsi.daemon_type: f.write(' '.join(CephIscsi.configfs_mount_umount(data_dir, mount=True)) + '\n') ceph_iscsi = CephIscsi.init(ctx, fsid, daemon_id) tcmu_container = ceph_iscsi.get_tcmu_runner_container() _write_container_cmd_to_bash(ctx, f, tcmu_container, 'iscsi tcmu-runner container', background=True) _write_container_cmd_to_bash(ctx, f, c, '%s.%s' % (daemon_type, str(daemon_id))) # some metadata about the deploy meta: Dict[str, Any] = fetch_meta(ctx) meta.update({ 'memory_request': int(ctx.memory_request) if ctx.memory_request else None, 'memory_limit': int(ctx.memory_limit) if ctx.memory_limit else None, }) if not meta.get('ports'): meta['ports'] = ports metaf.write(json.dumps(meta, indent=4) + '\n') timeout = 30 if daemon_type == 'osd' else None # post-stop command(s) with write_new(data_dir + '/unit.poststop') as f: # this is a fallback to eventually stop any underlying container that was not stopped properly by unit.stop, # this could happen in very slow setups as described in the issue https://tracker.ceph.com/issues/58242. add_stop_actions(cast(TextIO, f), timeout) if daemon_type == 'osd': assert osd_fsid poststop = get_ceph_volume_container( ctx, args=[ 'lvm', 'deactivate', str(daemon_id), osd_fsid, ], volume_mounts=get_container_mounts(ctx, fsid, daemon_type, daemon_id), bind_mounts=get_container_binds(ctx, fsid, daemon_type, daemon_id), cname='ceph-%s-%s.%s-deactivate' % (fsid, daemon_type, daemon_id), ) _write_container_cmd_to_bash(ctx, f, poststop, 'deactivate osd') elif daemon_type == CephIscsi.daemon_type: # make sure we also stop the tcmu container runtime_dir = '/run' ceph_iscsi = CephIscsi.init(ctx, fsid, daemon_id) tcmu_container = ceph_iscsi.get_tcmu_runner_container() f.write('! ' + ' '.join(tcmu_container.stop_cmd()) + '\n') f.write('! ' + 'rm ' + runtime_dir + '/ceph-%s@%s.%s.service-pid' % (fsid, daemon_type, str(daemon_id) + '.tcmu') + '\n') f.write('! ' + 'rm ' + runtime_dir + '/ceph-%s@%s.%s.service-cid' % (fsid, daemon_type, str(daemon_id) + '.tcmu') + '\n') f.write(' '.join(CephIscsi.configfs_mount_umount(data_dir, mount=False)) + '\n') # post-stop command(s) with write_new(data_dir + '/unit.stop') as f: add_stop_actions(cast(TextIO, f), timeout) if c: with write_new(data_dir + '/unit.image') as f: f.write(c.image + '\n') # sysctl install_sysctl(ctx, fsid, daemon_type) # systemd install_base_units(ctx, fsid) unit = get_unit_file(ctx, fsid) unit_file = 'ceph-%[email protected]' % (fsid) with write_new(ctx.unit_dir + '/' + unit_file, perms=None) as f: f.write(unit) call_throws(ctx, ['systemctl', 'daemon-reload']) unit_name = get_unit_name(fsid, daemon_type, daemon_id) call(ctx, ['systemctl', 'stop', unit_name], verbosity=CallVerbosity.DEBUG) call(ctx, ['systemctl', 'reset-failed', unit_name], verbosity=CallVerbosity.DEBUG) if enable: call_throws(ctx, ['systemctl', 'enable', unit_name]) if start: clean_cgroup(ctx, fsid, unit_name) call_throws(ctx, ['systemctl', 'start', unit_name]) class Firewalld(object): # for specifying ports we should always open when opening # ports for a daemon of that type. Main use case is for ports # that we should open when deploying the daemon type but that # the daemon itself may not necessarily need to bind to the port. # This needs to be handed differently as we don't want to fail # deployment if the port cannot be bound to but we still want to # open the port in the firewall. external_ports: Dict[str, List[int]] = { 'iscsi': [3260] # 3260 is the well known iSCSI port } def __init__(self, ctx): # type: (CephadmContext) -> None self.ctx = ctx self.available = self.check() def check(self): # type: () -> bool self.cmd = find_executable('firewall-cmd') if not self.cmd: logger.debug('firewalld does not appear to be present') return False (enabled, state, _) = check_unit(self.ctx, 'firewalld.service') if not enabled: logger.debug('firewalld.service is not enabled') return False if state != 'running': logger.debug('firewalld.service is not running') return False logger.info('firewalld ready') return True def enable_service_for(self, daemon_type): # type: (str) -> None if not self.available: logger.debug('Not possible to enable service <%s>. firewalld.service is not available' % daemon_type) return if daemon_type == 'mon': svc = 'ceph-mon' elif daemon_type in ['mgr', 'mds', 'osd']: svc = 'ceph' elif daemon_type == NFSGanesha.daemon_type: svc = 'nfs' else: return if not self.cmd: raise RuntimeError('command not defined') out, err, ret = call(self.ctx, [self.cmd, '--permanent', '--query-service', svc], verbosity=CallVerbosity.DEBUG) if ret: logger.info('Enabling firewalld service %s in current zone...' % svc) out, err, ret = call(self.ctx, [self.cmd, '--permanent', '--add-service', svc]) if ret: raise RuntimeError( 'unable to add service %s to current zone: %s' % (svc, err)) else: logger.debug('firewalld service %s is enabled in current zone' % svc) def open_ports(self, fw_ports): # type: (List[int]) -> None if not self.available: logger.debug('Not possible to open ports <%s>. firewalld.service is not available' % fw_ports) return if not self.cmd: raise RuntimeError('command not defined') for port in fw_ports: tcp_port = str(port) + '/tcp' out, err, ret = call(self.ctx, [self.cmd, '--permanent', '--query-port', tcp_port], verbosity=CallVerbosity.DEBUG) if ret: logger.info('Enabling firewalld port %s in current zone...' % tcp_port) out, err, ret = call(self.ctx, [self.cmd, '--permanent', '--add-port', tcp_port]) if ret: raise RuntimeError('unable to add port %s to current zone: %s' % (tcp_port, err)) else: logger.debug('firewalld port %s is enabled in current zone' % tcp_port) def close_ports(self, fw_ports): # type: (List[int]) -> None if not self.available: logger.debug('Not possible to close ports <%s>. firewalld.service is not available' % fw_ports) return if not self.cmd: raise RuntimeError('command not defined') for port in fw_ports: tcp_port = str(port) + '/tcp' out, err, ret = call(self.ctx, [self.cmd, '--permanent', '--query-port', tcp_port], verbosity=CallVerbosity.DEBUG) if not ret: logger.info('Disabling port %s in current zone...' % tcp_port) out, err, ret = call(self.ctx, [self.cmd, '--permanent', '--remove-port', tcp_port]) if ret: raise RuntimeError('unable to remove port %s from current zone: %s' % (tcp_port, err)) else: logger.info(f'Port {tcp_port} disabled') else: logger.info(f'firewalld port {tcp_port} already closed') def apply_rules(self): # type: () -> None if not self.available: return if not self.cmd: raise RuntimeError('command not defined') call_throws(self.ctx, [self.cmd, '--reload']) def update_firewalld(ctx, daemon_type): # type: (CephadmContext, str) -> None if not ('skip_firewalld' in ctx and ctx.skip_firewalld): firewall = Firewalld(ctx) firewall.enable_service_for(daemon_type) firewall.apply_rules() def install_sysctl(ctx: CephadmContext, fsid: str, daemon_type: str) -> None: """ Set up sysctl settings """ def _write(conf: Path, lines: List[str]) -> None: lines = [ '# created by cephadm', '', *lines, '', ] with write_new(conf, owner=None, perms=None) as f: f.write('\n'.join(lines)) conf = Path(ctx.sysctl_dir).joinpath(f'90-ceph-{fsid}-{daemon_type}.conf') lines: List = [] if daemon_type == 'osd': lines = OSD.get_sysctl_settings() elif daemon_type == 'haproxy': lines = HAproxy.get_sysctl_settings() elif daemon_type == 'keepalived': lines = Keepalived.get_sysctl_settings() lines = filter_sysctl_settings(ctx, lines) # apply the sysctl settings if lines: Path(ctx.sysctl_dir).mkdir(mode=0o755, exist_ok=True) _write(conf, lines) call_throws(ctx, ['sysctl', '--system']) def sysctl_get(ctx: CephadmContext, variable: str) -> Union[str, None]: """ Read a sysctl setting by executing 'sysctl -b {variable}' """ out, err, code = call(ctx, ['sysctl', '-b', variable]) return out or None def filter_sysctl_settings(ctx: CephadmContext, lines: List[str]) -> List[str]: """ Given a list of sysctl settings, examine the system's current configuration and return those which are not currently set as described. """ def test_setting(desired_line: str) -> bool: # Remove any comments comment_start = desired_line.find('#') if comment_start != -1: desired_line = desired_line[:comment_start] desired_line = desired_line.strip() if not desired_line or desired_line.isspace(): return False setting, desired_value = map(lambda s: s.strip(), desired_line.split('=')) if not setting or not desired_value: return False actual_value = sysctl_get(ctx, setting) return desired_value != actual_value return list(filter(test_setting, lines)) def migrate_sysctl_dir(ctx: CephadmContext, fsid: str) -> None: """ Cephadm once used '/usr/lib/sysctl.d' for storing sysctl configuration. This moves it to '/etc/sysctl.d'. """ deprecated_location: str = '/usr/lib/sysctl.d' deprecated_confs: List[str] = glob(f'{deprecated_location}/90-ceph-{fsid}-*.conf') if not deprecated_confs: return file_count: int = len(deprecated_confs) logger.info(f'Found sysctl {file_count} files in deprecated location {deprecated_location}. Starting Migration.') for conf in deprecated_confs: try: shutil.move(conf, ctx.sysctl_dir) file_count -= 1 except shutil.Error as err: if str(err).endswith('already exists'): logger.warning(f'Destination file already exists. Deleting {conf}.') try: os.unlink(conf) file_count -= 1 except OSError as del_err: logger.warning(f'Could not remove {conf}: {del_err}.') else: logger.warning(f'Could not move {conf} from {deprecated_location} to {ctx.sysctl_dir}: {err}') # Log successful migration if file_count == 0: logger.info(f'Successfully migrated sysctl config to {ctx.sysctl_dir}.') return # Log partially successful / unsuccessful migration files_processed: int = len(deprecated_confs) if file_count < files_processed: status: str = f'partially successful (failed {file_count}/{files_processed})' elif file_count == files_processed: status = 'unsuccessful' logger.warning(f'Migration of sysctl configuration {status}. You may want to perform a migration manually.') def install_base_units(ctx, fsid): # type: (CephadmContext, str) -> None """ Set up ceph.target and ceph-$fsid.target units. """ # global unit existed = os.path.exists(ctx.unit_dir + '/ceph.target') with write_new(ctx.unit_dir + '/ceph.target', perms=None) as f: f.write('[Unit]\n' 'Description=All Ceph clusters and services\n' '\n' '[Install]\n' 'WantedBy=multi-user.target\n') if not existed: # we disable before enable in case a different ceph.target # (from the traditional package) is present; while newer # systemd is smart enough to disable the old # (/lib/systemd/...) and enable the new (/etc/systemd/...), # some older versions of systemd error out with EEXIST. call_throws(ctx, ['systemctl', 'disable', 'ceph.target']) call_throws(ctx, ['systemctl', 'enable', 'ceph.target']) call_throws(ctx, ['systemctl', 'start', 'ceph.target']) # cluster unit existed = os.path.exists(ctx.unit_dir + '/ceph-%s.target' % fsid) with write_new(ctx.unit_dir + f'/ceph-{fsid}.target', perms=None) as f: f.write( '[Unit]\n' 'Description=Ceph cluster {fsid}\n' 'PartOf=ceph.target\n' 'Before=ceph.target\n' '\n' '[Install]\n' 'WantedBy=multi-user.target ceph.target\n'.format( fsid=fsid) ) if not existed: call_throws(ctx, ['systemctl', 'enable', 'ceph-%s.target' % fsid]) call_throws(ctx, ['systemctl', 'start', 'ceph-%s.target' % fsid]) # don't overwrite file in order to allow users to manipulate it if os.path.exists(ctx.logrotate_dir + f'/ceph-{fsid}'): return # logrotate for the cluster with write_new(ctx.logrotate_dir + f'/ceph-{fsid}', perms=None) as f: """ This is a bit sloppy in that the killall/pkill will touch all ceph daemons in all containers, but I don't see an elegant way to send SIGHUP *just* to the daemons for this cluster. (1) systemd kill -s will get the signal to podman, but podman will exit. (2) podman kill will get the signal to the first child (bash), but that isn't the ceph daemon. This is simpler and should be harmless. """ f.write("""# created by cephadm /var/log/ceph/%s/*.log { rotate 7 daily compress sharedscripts postrotate killall -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror cephfs-mirror || pkill -1 -x 'ceph-mon|ceph-mgr|ceph-mds|ceph-osd|ceph-fuse|radosgw|rbd-mirror|cephfs-mirror' || true endscript missingok notifempty su root root } """ % fsid) def get_unit_file(ctx, fsid): # type: (CephadmContext, str) -> str extra_args = '' if isinstance(ctx.container_engine, Podman): extra_args = ('ExecStartPre=-/bin/rm -f %t/%n-pid %t/%n-cid\n' 'ExecStopPost=-/bin/rm -f %t/%n-pid %t/%n-cid\n' 'Type=forking\n' 'PIDFile=%t/%n-pid\n') if ctx.container_engine.version >= CGROUPS_SPLIT_PODMAN_VERSION: extra_args += 'Delegate=yes\n' docker = isinstance(ctx.container_engine, Docker) u = """# generated by cephadm [Unit] Description=Ceph %i for {fsid} # According to: # http://www.freedesktop.org/wiki/Software/systemd/NetworkTarget # these can be removed once ceph-mon will dynamically change network # configuration. After=network-online.target local-fs.target time-sync.target{docker_after} Wants=network-online.target local-fs.target time-sync.target {docker_requires} PartOf=ceph-{fsid}.target Before=ceph-{fsid}.target [Service] LimitNOFILE=1048576 LimitNPROC=1048576 EnvironmentFile=-/etc/environment ExecStart=/bin/bash {data_dir}/{fsid}/%i/unit.run ExecStop=-/bin/bash -c 'bash {data_dir}/{fsid}/%i/unit.stop' ExecStopPost=-/bin/bash {data_dir}/{fsid}/%i/unit.poststop KillMode=none Restart=on-failure RestartSec=10s TimeoutStartSec=200 TimeoutStopSec=120 StartLimitInterval=30min StartLimitBurst=5 {extra_args} [Install] WantedBy=ceph-{fsid}.target """.format(fsid=fsid, data_dir=ctx.data_dir, extra_args=extra_args, # if docker, we depend on docker.service docker_after=' docker.service' if docker else '', docker_requires='Requires=docker.service\n' if docker else '') return u ################################## class CephContainer: def __init__(self, ctx: CephadmContext, image: str, entrypoint: str, args: List[str] = [], volume_mounts: Dict[str, str] = {}, cname: str = '', container_args: List[str] = [], envs: Optional[List[str]] = None, privileged: bool = False, ptrace: bool = False, bind_mounts: Optional[List[List[str]]] = None, init: Optional[bool] = None, host_network: bool = True, memory_request: Optional[str] = None, memory_limit: Optional[str] = None, ) -> None: self.ctx = ctx self.image = image self.entrypoint = entrypoint self.args = args self.volume_mounts = volume_mounts self._cname = cname self.container_args = container_args self.envs = envs self.privileged = privileged self.ptrace = ptrace self.bind_mounts = bind_mounts if bind_mounts else [] self.init = init if init else ctx.container_init self.host_network = host_network self.memory_request = memory_request self.memory_limit = memory_limit @classmethod def for_daemon(cls, ctx: CephadmContext, fsid: str, daemon_type: str, daemon_id: str, entrypoint: str, args: List[str] = [], volume_mounts: Dict[str, str] = {}, container_args: List[str] = [], envs: Optional[List[str]] = None, privileged: bool = False, ptrace: bool = False, bind_mounts: Optional[List[List[str]]] = None, init: Optional[bool] = None, host_network: bool = True, memory_request: Optional[str] = None, memory_limit: Optional[str] = None, ) -> 'CephContainer': return cls( ctx, image=ctx.image, entrypoint=entrypoint, args=args, volume_mounts=volume_mounts, cname='ceph-%s-%s.%s' % (fsid, daemon_type, daemon_id), container_args=container_args, envs=envs, privileged=privileged, ptrace=ptrace, bind_mounts=bind_mounts, init=init, host_network=host_network, memory_request=memory_request, memory_limit=memory_limit, ) @property def cname(self) -> str: """ podman adds the current container name to the /etc/hosts file. Turns out, python's `socket.getfqdn()` differs from `hostname -f`, when we have the container names containing dots in it.: # podman run --name foo.bar.baz.com ceph/ceph /bin/bash [root@sebastians-laptop /]# cat /etc/hosts 127.0.0.1 localhost ::1 localhost 127.0.1.1 sebastians-laptop foo.bar.baz.com [root@sebastians-laptop /]# hostname -f sebastians-laptop [root@sebastians-laptop /]# python3 -c 'import socket; print(socket.getfqdn())' foo.bar.baz.com Fascinatingly, this doesn't happen when using dashes. """ return self._cname.replace('.', '-') @cname.setter def cname(self, val: str) -> None: self._cname = val @property def old_cname(self) -> str: return self._cname def run_cmd(self) -> List[str]: cmd_args: List[str] = [ str(self.ctx.container_engine.path), 'run', '--rm', '--ipc=host', # some containers (ahem, haproxy) override this, but we want a fast # shutdown always (and, more importantly, a successful exit even if we # fall back to SIGKILL). '--stop-signal=SIGTERM', ] if isinstance(self.ctx.container_engine, Podman): if os.path.exists('/etc/ceph/podman-auth.json'): cmd_args.append('--authfile=/etc/ceph/podman-auth.json') if isinstance(self.ctx.container_engine, Docker): cmd_args.extend(['--ulimit', 'nofile=1048576']) envs: List[str] = [ '-e', 'CONTAINER_IMAGE=%s' % self.image, '-e', 'NODE_NAME=%s' % get_hostname(), ] vols: List[str] = [] binds: List[str] = [] if self.memory_request: cmd_args.extend(['-e', 'POD_MEMORY_REQUEST', str(self.memory_request)]) if self.memory_limit: cmd_args.extend(['-e', 'POD_MEMORY_LIMIT', str(self.memory_limit)]) cmd_args.extend(['--memory', str(self.memory_limit)]) if self.host_network: cmd_args.append('--net=host') if self.entrypoint: cmd_args.extend(['--entrypoint', self.entrypoint]) if self.privileged: cmd_args.extend([ '--privileged', # let OSD etc read block devs that haven't been chowned '--group-add=disk']) if self.ptrace and not self.privileged: # if privileged, the SYS_PTRACE cap is already added # in addition, --cap-add and --privileged are mutually # exclusive since podman >= 2.0 cmd_args.append('--cap-add=SYS_PTRACE') if self.init: cmd_args.append('--init') if self.cname: cmd_args.extend(['--name', self.cname]) if self.envs: for env in self.envs: envs.extend(['-e', env]) vols = sum( [['-v', '%s:%s' % (host_dir, container_dir)] for host_dir, container_dir in self.volume_mounts.items()], []) binds = sum([['--mount', '{}'.format(','.join(bind))] for bind in self.bind_mounts], []) return \ cmd_args + self.container_args + \ envs + vols + binds + \ [self.image] + self.args # type: ignore def shell_cmd(self, cmd: List[str]) -> List[str]: cmd_args: List[str] = [ str(self.ctx.container_engine.path), 'run', '--rm', '--ipc=host', ] envs: List[str] = [ '-e', 'CONTAINER_IMAGE=%s' % self.image, '-e', 'NODE_NAME=%s' % get_hostname(), ] vols: List[str] = [] binds: List[str] = [] if self.host_network: cmd_args.append('--net=host') if self.ctx.no_hosts: cmd_args.append('--no-hosts') if self.privileged: cmd_args.extend([ '--privileged', # let OSD etc read block devs that haven't been chowned '--group-add=disk', ]) if self.init: cmd_args.append('--init') if self.envs: for env in self.envs: envs.extend(['-e', env]) vols = sum( [['-v', '%s:%s' % (host_dir, container_dir)] for host_dir, container_dir in self.volume_mounts.items()], []) binds = sum([['--mount', '{}'.format(','.join(bind))] for bind in self.bind_mounts], []) return cmd_args + self.container_args + envs + vols + binds + [ '--entrypoint', cmd[0], self.image, ] + cmd[1:] def exec_cmd(self, cmd): # type: (List[str]) -> List[str] cname = get_running_container_name(self.ctx, self) if not cname: raise Error('unable to find container "{}"'.format(self.cname)) return [ str(self.ctx.container_engine.path), 'exec', ] + self.container_args + [ self.cname, ] + cmd def rm_cmd(self, old_cname: bool = False, storage: bool = False) -> List[str]: ret = [ str(self.ctx.container_engine.path), 'rm', '-f', ] if storage: ret.append('--storage') if old_cname: ret.append(self.old_cname) else: ret.append(self.cname) return ret def stop_cmd(self, old_cname: bool = False, timeout: Optional[int] = None) -> List[str]: if timeout is None: ret = [ str(self.ctx.container_engine.path), 'stop', self.old_cname if old_cname else self.cname, ] else: ret = [ str(self.ctx.container_engine.path), 'stop', '-t', f'{timeout}', self.old_cname if old_cname else self.cname, ] return ret def run(self, timeout=DEFAULT_TIMEOUT, verbosity=CallVerbosity.VERBOSE_ON_FAILURE): # type: (Optional[int], CallVerbosity) -> str out, _, _ = call_throws(self.ctx, self.run_cmd(), desc=self.entrypoint, timeout=timeout, verbosity=verbosity) return out ##################################### class MgrListener(Thread): def __init__(self, agent: 'CephadmAgent') -> None: self.agent = agent self.stop = False super(MgrListener, self).__init__(target=self.run) def run(self) -> None: listenSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) listenSocket.bind(('0.0.0.0', int(self.agent.listener_port))) listenSocket.settimeout(60) listenSocket.listen(1) ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.verify_mode = ssl.CERT_REQUIRED ssl_ctx.load_cert_chain(self.agent.listener_cert_path, self.agent.listener_key_path) ssl_ctx.load_verify_locations(self.agent.ca_path) secureListenSocket = ssl_ctx.wrap_socket(listenSocket, server_side=True) while not self.stop: try: try: conn, _ = secureListenSocket.accept() except socket.timeout: continue try: length: int = int(conn.recv(10).decode()) except Exception as e: err_str = f'Failed to extract length of payload from message: {e}' conn.send(err_str.encode()) logger.error(err_str) continue while True: payload = conn.recv(length).decode() if not payload: break try: data: Dict[Any, Any] = json.loads(payload) self.handle_json_payload(data) except Exception as e: err_str = f'Failed to extract json payload from message: {e}' conn.send(err_str.encode()) logger.error(err_str) else: conn.send(b'ACK') if 'config' in data: self.agent.wakeup() self.agent.ls_gatherer.wakeup() self.agent.volume_gatherer.wakeup() logger.debug(f'Got mgr message {data}') except Exception as e: logger.error(f'Mgr Listener encountered exception: {e}') def shutdown(self) -> None: self.stop = True def handle_json_payload(self, data: Dict[Any, Any]) -> None: self.agent.ack = int(data['counter']) if 'config' in data: logger.info('Received new config from mgr') config = data['config'] for filename in config: if filename in self.agent.required_files: file_path = os.path.join(self.agent.daemon_dir, filename) with write_new(file_path) as f: f.write(config[filename]) self.agent.pull_conf_settings() self.agent.wakeup() class CephadmAgent(): daemon_type = 'agent' default_port = 8498 loop_interval = 30 stop = False required_files = [ 'agent.json', 'keyring', 'root_cert.pem', 'listener.crt', 'listener.key', ] def __init__(self, ctx: CephadmContext, fsid: str, daemon_id: Union[int, str] = ''): self.ctx = ctx self.fsid = fsid self.daemon_id = daemon_id self.starting_port = 14873 self.target_ip = '' self.target_port = '' self.host = '' self.daemon_dir = os.path.join(ctx.data_dir, self.fsid, f'{self.daemon_type}.{self.daemon_id}') self.config_path = os.path.join(self.daemon_dir, 'agent.json') self.keyring_path = os.path.join(self.daemon_dir, 'keyring') self.ca_path = os.path.join(self.daemon_dir, 'root_cert.pem') self.listener_cert_path = os.path.join(self.daemon_dir, 'listener.crt') self.listener_key_path = os.path.join(self.daemon_dir, 'listener.key') self.listener_port = '' self.ack = 1 self.event = Event() self.mgr_listener = MgrListener(self) self.ls_gatherer = AgentGatherer(self, lambda: self._get_ls(), 'Ls') self.volume_gatherer = AgentGatherer(self, lambda: self._ceph_volume(enhanced=False), 'Volume') self.device_enhanced_scan = False self.recent_iteration_run_times: List[float] = [0.0, 0.0, 0.0] self.recent_iteration_index: int = 0 self.cached_ls_values: Dict[str, Dict[str, str]] = {} def validate(self, config: Dict[str, str] = {}) -> None: # check for the required files for fname in self.required_files: if fname not in config: raise Error('required file missing from config: %s' % fname) def deploy_daemon_unit(self, config: Dict[str, str] = {}) -> None: if not config: raise Error('Agent needs a config') assert isinstance(config, dict) self.validate(config) # Create the required config files in the daemons dir, with restricted permissions for filename in config: if filename in self.required_files: file_path = os.path.join(self.daemon_dir, filename) with write_new(file_path) as f: f.write(config[filename]) unit_run_path = os.path.join(self.daemon_dir, 'unit.run') with write_new(unit_run_path) as f: f.write(self.unit_run()) meta: Dict[str, Any] = fetch_meta(self.ctx) meta_file_path = os.path.join(self.daemon_dir, 'unit.meta') with write_new(meta_file_path) as f: f.write(json.dumps(meta, indent=4) + '\n') unit_file_path = os.path.join(self.ctx.unit_dir, self.unit_name()) with write_new(unit_file_path) as f: f.write(self.unit_file()) call_throws(self.ctx, ['systemctl', 'daemon-reload']) call(self.ctx, ['systemctl', 'stop', self.unit_name()], verbosity=CallVerbosity.DEBUG) call(self.ctx, ['systemctl', 'reset-failed', self.unit_name()], verbosity=CallVerbosity.DEBUG) call_throws(self.ctx, ['systemctl', 'enable', '--now', self.unit_name()]) def unit_name(self) -> str: return '{}.service'.format(get_unit_name(self.fsid, self.daemon_type, self.daemon_id)) def unit_run(self) -> str: py3 = shutil.which('python3') binary_path = os.path.realpath(sys.argv[0]) return ('set -e\n' + f'{py3} {binary_path} agent --fsid {self.fsid} --daemon-id {self.daemon_id} &\n') def unit_file(self) -> str: return """#generated by cephadm [Unit] Description=cephadm agent for cluster {fsid} PartOf=ceph-{fsid}.target Before=ceph-{fsid}.target [Service] Type=forking ExecStart=/bin/bash {data_dir}/unit.run Restart=on-failure RestartSec=10s [Install] WantedBy=ceph-{fsid}.target """.format( fsid=self.fsid, data_dir=self.daemon_dir ) def shutdown(self) -> None: self.stop = True if self.mgr_listener.is_alive(): self.mgr_listener.shutdown() if self.ls_gatherer.is_alive(): self.ls_gatherer.shutdown() if self.volume_gatherer.is_alive(): self.volume_gatherer.shutdown() def wakeup(self) -> None: self.event.set() def pull_conf_settings(self) -> None: try: with open(self.config_path, 'r') as f: config = json.load(f) self.target_ip = config['target_ip'] self.target_port = config['target_port'] self.loop_interval = int(config['refresh_period']) self.starting_port = int(config['listener_port']) self.host = config['host'] use_lsm = config['device_enhanced_scan'] except Exception as e: self.shutdown() raise Error(f'Failed to get agent target ip and port from config: {e}') try: with open(self.keyring_path, 'r') as f: self.keyring = f.read() except Exception as e: self.shutdown() raise Error(f'Failed to get agent keyring: {e}') assert self.target_ip and self.target_port self.device_enhanced_scan = False if use_lsm.lower() == 'true': self.device_enhanced_scan = True self.volume_gatherer.update_func(lambda: self._ceph_volume(enhanced=self.device_enhanced_scan)) def run(self) -> None: self.pull_conf_settings() try: for _ in range(1001): if not port_in_use(self.ctx, self.starting_port): self.listener_port = str(self.starting_port) break self.starting_port += 1 if not self.listener_port: raise Error(f'All 1000 ports starting at {str(self.starting_port - 1001)} taken.') except Exception as e: raise Error(f'Failed to pick port for agent to listen on: {e}') if not self.mgr_listener.is_alive(): self.mgr_listener.start() if not self.ls_gatherer.is_alive(): self.ls_gatherer.start() if not self.volume_gatherer.is_alive(): self.volume_gatherer.start() ssl_ctx = ssl.create_default_context() ssl_ctx.check_hostname = True ssl_ctx.verify_mode = ssl.CERT_REQUIRED ssl_ctx.load_verify_locations(self.ca_path) while not self.stop: start_time = time.monotonic() ack = self.ack # part of the networks info is returned as a set which is not JSON # serializable. The set must be converted to a list networks = list_networks(self.ctx) networks_list: Dict[str, Dict[str, List[str]]] = {} for key in networks.keys(): networks_list[key] = {} for k, v in networks[key].items(): networks_list[key][k] = list(v) data = json.dumps({'host': self.host, 'ls': (self.ls_gatherer.data if self.ack == self.ls_gatherer.ack and self.ls_gatherer.data is not None else []), 'networks': networks_list, 'facts': HostFacts(self.ctx).dump(), 'volume': (self.volume_gatherer.data if self.ack == self.volume_gatherer.ack and self.volume_gatherer.data is not None else ''), 'ack': str(ack), 'keyring': self.keyring, 'port': self.listener_port}) data = data.encode('ascii') url = f'https://{self.target_ip}:{self.target_port}/data/' try: req = Request(url, data, {'Content-Type': 'application/json'}) send_time = time.monotonic() with urlopen(req, context=ssl_ctx) as response: response_str = response.read() response_json = json.loads(response_str) total_request_time = datetime.timedelta(seconds=(time.monotonic() - send_time)).total_seconds() logger.info(f'Received mgr response: "{response_json["result"]}" {total_request_time} seconds after sending request.') except Exception as e: logger.error(f'Failed to send metadata to mgr: {e}') end_time = time.monotonic() run_time = datetime.timedelta(seconds=(end_time - start_time)) self.recent_iteration_run_times[self.recent_iteration_index] = run_time.total_seconds() self.recent_iteration_index = (self.recent_iteration_index + 1) % 3 run_time_average = sum(self.recent_iteration_run_times, 0.0) / len([t for t in self.recent_iteration_run_times if t]) self.event.wait(max(self.loop_interval - int(run_time_average), 0)) self.event.clear() def _ceph_volume(self, enhanced: bool = False) -> Tuple[str, bool]: self.ctx.command = 'inventory --format=json'.split() if enhanced: self.ctx.command.append('--with-lsm') self.ctx.fsid = self.fsid stream = io.StringIO() with redirect_stdout(stream): command_ceph_volume(self.ctx) stdout = stream.getvalue() if stdout: return (stdout, False) else: raise Exception('ceph-volume returned empty value') def _daemon_ls_subset(self) -> Dict[str, Dict[str, Any]]: # gets a subset of ls info quickly. The results of this will tell us if our # cached info is still good or if we need to run the full ls again. # for legacy containers, we just grab the full info. For cephadmv1 containers, # we only grab enabled, state, mem_usage and container id. If container id has # not changed for any daemon, we assume our cached info is good. daemons: Dict[str, Dict[str, Any]] = {} data_dir = self.ctx.data_dir seen_memusage = {} # type: Dict[str, int] out, err, code = call( self.ctx, [self.ctx.container_engine.path, 'stats', '--format', '{{.ID}},{{.MemUsage}}', '--no-stream'], verbosity=CallVerbosity.DEBUG ) seen_memusage_cid_len, seen_memusage = _parse_mem_usage(code, out) # we need a mapping from container names to ids. Later we will convert daemon # names to container names to get daemons container id to see if it has changed out, err, code = call( self.ctx, [self.ctx.container_engine.path, 'ps', '--format', '{{.ID}},{{.Names}}', '--no-trunc'], verbosity=CallVerbosity.DEBUG ) name_id_mapping: Dict[str, str] = self._parse_container_id_name(code, out) for i in os.listdir(data_dir): if i in ['mon', 'osd', 'mds', 'mgr']: daemon_type = i for j in os.listdir(os.path.join(data_dir, i)): if '-' not in j: continue (cluster, daemon_id) = j.split('-', 1) legacy_unit_name = 'ceph-%s@%s' % (daemon_type, daemon_id) (enabled, state, _) = check_unit(self.ctx, legacy_unit_name) daemons[f'{daemon_type}.{daemon_id}'] = { 'style': 'legacy', 'name': '%s.%s' % (daemon_type, daemon_id), 'fsid': self.ctx.fsid if self.ctx.fsid is not None else 'unknown', 'systemd_unit': legacy_unit_name, 'enabled': 'true' if enabled else 'false', 'state': state, } elif is_fsid(i): fsid = str(i) # convince mypy that fsid is a str here for j in os.listdir(os.path.join(data_dir, i)): if '.' in j and os.path.isdir(os.path.join(data_dir, fsid, j)): (daemon_type, daemon_id) = j.split('.', 1) unit_name = get_unit_name(fsid, daemon_type, daemon_id) (enabled, state, _) = check_unit(self.ctx, unit_name) daemons[j] = { 'style': 'cephadm:v1', 'systemd_unit': unit_name, 'enabled': 'true' if enabled else 'false', 'state': state, } c = CephContainer.for_daemon(self.ctx, self.ctx.fsid, daemon_type, daemon_id, 'bash') container_id: Optional[str] = None for name in (c.cname, c.old_cname): if name in name_id_mapping: container_id = name_id_mapping[name] break daemons[j]['container_id'] = container_id if container_id: daemons[j]['memory_usage'] = seen_memusage.get(container_id[0:seen_memusage_cid_len]) return daemons def _parse_container_id_name(self, code: int, out: str) -> Dict[str, str]: # map container names to ids from ps output name_id_mapping = {} # type: Dict[str, str] if not code: for line in out.splitlines(): id, name = line.split(',') name_id_mapping[name] = id return name_id_mapping def _get_ls(self) -> Tuple[List[Dict[str, str]], bool]: if not self.cached_ls_values: logger.info('No cached ls output. Running full daemon ls') ls = list_daemons(self.ctx) for d in ls: self.cached_ls_values[d['name']] = d return (ls, True) else: ls_subset = self._daemon_ls_subset() need_full_ls = False state_change = False if set(self.cached_ls_values.keys()) != set(ls_subset.keys()): # case for a new daemon in ls or an old daemon no longer appearing. # If that happens we need a full ls logger.info('Change detected in state of daemons. Running full daemon ls') self.cached_ls_values = {} ls = list_daemons(self.ctx) for d in ls: self.cached_ls_values[d['name']] = d return (ls, True) for daemon, info in self.cached_ls_values.items(): if info['style'] == 'legacy': # for legacy containers, ls_subset just grabs all the info self.cached_ls_values[daemon] = ls_subset[daemon] else: if info['container_id'] != ls_subset[daemon]['container_id']: # case for container id having changed. We need full ls as # info we didn't grab like version and start time could have changed need_full_ls = True break # want to know if a daemons state change because in those cases we want # to report back quicker if ( self.cached_ls_values[daemon]['enabled'] != ls_subset[daemon]['enabled'] or self.cached_ls_values[daemon]['state'] != ls_subset[daemon]['state'] ): state_change = True # if we reach here, container id matched. Update the few values we do track # from ls subset: state, enabled, memory_usage. self.cached_ls_values[daemon]['enabled'] = ls_subset[daemon]['enabled'] self.cached_ls_values[daemon]['state'] = ls_subset[daemon]['state'] if 'memory_usage' in ls_subset[daemon]: self.cached_ls_values[daemon]['memory_usage'] = ls_subset[daemon]['memory_usage'] if need_full_ls: logger.info('Change detected in state of daemons. Running full daemon ls') ls = list_daemons(self.ctx) self.cached_ls_values = {} for d in ls: self.cached_ls_values[d['name']] = d return (ls, True) else: ls = [info for daemon, info in self.cached_ls_values.items()] return (ls, state_change) class AgentGatherer(Thread): def __init__(self, agent: 'CephadmAgent', func: Callable, gatherer_type: str = 'Unnamed', initial_ack: int = 0) -> None: self.agent = agent self.func = func self.gatherer_type = gatherer_type self.ack = initial_ack self.event = Event() self.data: Any = None self.stop = False self.recent_iteration_run_times: List[float] = [0.0, 0.0, 0.0] self.recent_iteration_index: int = 0 super(AgentGatherer, self).__init__(target=self.run) def run(self) -> None: while not self.stop: try: start_time = time.monotonic() ack = self.agent.ack change = False try: self.data, change = self.func() except Exception as e: logger.error(f'{self.gatherer_type} Gatherer encountered exception gathering data: {e}') self.data = None if ack != self.ack or change: self.ack = ack self.agent.wakeup() end_time = time.monotonic() run_time = datetime.timedelta(seconds=(end_time - start_time)) self.recent_iteration_run_times[self.recent_iteration_index] = run_time.total_seconds() self.recent_iteration_index = (self.recent_iteration_index + 1) % 3 run_time_average = sum(self.recent_iteration_run_times, 0.0) / len([t for t in self.recent_iteration_run_times if t]) self.event.wait(max(self.agent.loop_interval - int(run_time_average), 0)) self.event.clear() except Exception as e: logger.error(f'{self.gatherer_type} Gatherer encountered exception: {e}') def shutdown(self) -> None: self.stop = True def wakeup(self) -> None: self.event.set() def update_func(self, func: Callable) -> None: self.func = func def command_agent(ctx: CephadmContext) -> None: agent = CephadmAgent(ctx, ctx.fsid, ctx.daemon_id) if not os.path.isdir(agent.daemon_dir): raise Error(f'Agent daemon directory {agent.daemon_dir} does not exist. Perhaps agent was never deployed?') agent.run() ################################## @executes_early def command_version(ctx): # type: (CephadmContext) -> int import importlib try: vmod = importlib.import_module('_version') except ImportError: print('cephadm version UNKNOWN') return 1 _unset = '<UNSET>' print('cephadm version {0} ({1}) {2} ({3})'.format( getattr(vmod, 'CEPH_GIT_NICE_VER', _unset), getattr(vmod, 'CEPH_GIT_VER', _unset), getattr(vmod, 'CEPH_RELEASE_NAME', _unset), getattr(vmod, 'CEPH_RELEASE_TYPE', _unset), )) return 0 ################################## @default_image def command_pull(ctx): # type: (CephadmContext) -> int try: _pull_image(ctx, ctx.image, ctx.insecure) except UnauthorizedRegistryError: err_str = 'Failed to pull container image. Check that host(s) are logged into the registry' logger.debug(f'Pulling image for `command_pull` failed: {err_str}') raise Error(err_str) return command_inspect_image(ctx) def _pull_image(ctx, image, insecure=False): # type: (CephadmContext, str, bool) -> None logger.info('Pulling container image %s...' % image) ignorelist = [ 'error creating read-write layer with ID', 'net/http: TLS handshake timeout', 'Digest did not match, expected', ] cmd = [ctx.container_engine.path, 'pull', image] if isinstance(ctx.container_engine, Podman): if insecure: cmd.append('--tls-verify=false') if os.path.exists('/etc/ceph/podman-auth.json'): cmd.append('--authfile=/etc/ceph/podman-auth.json') cmd_str = ' '.join(cmd) for sleep_secs in [1, 4, 25]: out, err, ret = call(ctx, cmd, verbosity=CallVerbosity.QUIET_UNLESS_ERROR) if not ret: return if 'unauthorized' in err: raise UnauthorizedRegistryError() if not any(pattern in err for pattern in ignorelist): raise Error('Failed command: %s' % cmd_str) logger.info('`%s` failed transiently. Retrying. waiting %s seconds...' % (cmd_str, sleep_secs)) time.sleep(sleep_secs) raise Error('Failed command: %s: maximum retries reached' % cmd_str) ################################## @require_image @infer_image def command_inspect_image(ctx): # type: (CephadmContext) -> int out, err, ret = call_throws(ctx, [ ctx.container_engine.path, 'inspect', '--format', '{{.ID}},{{.RepoDigests}}', ctx.image]) if ret: return errno.ENOENT info_from = get_image_info_from_inspect(out.strip(), ctx.image) ver = CephContainer(ctx, ctx.image, 'ceph', ['--version']).run().strip() info_from['ceph_version'] = ver print(json.dumps(info_from, indent=4, sort_keys=True)) return 0 def normalize_image_digest(digest: str) -> str: """ Normal case: >>> normalize_image_digest('ceph/ceph', 'docker.io') 'docker.io/ceph/ceph' No change: >>> normalize_image_digest('quay.ceph.io/ceph/ceph', 'docker.io') 'quay.ceph.io/ceph/ceph' >>> normalize_image_digest('docker.io/ubuntu', 'docker.io') 'docker.io/ubuntu' >>> normalize_image_digest('localhost/ceph', 'docker.io') 'localhost/ceph' """ known_shortnames = [ 'ceph/ceph', 'ceph/daemon', 'ceph/daemon-base', ] for image in known_shortnames: if digest.startswith(image): return f'{DEFAULT_REGISTRY}/{digest}' return digest def get_image_info_from_inspect(out, image): # type: (str, str) -> Dict[str, Union[str,List[str]]] image_id, digests = out.split(',', 1) if not out: raise Error('inspect {}: empty result'.format(image)) r = { 'image_id': normalize_container_id(image_id) } # type: Dict[str, Union[str,List[str]]] if digests: r['repo_digests'] = list(map(normalize_image_digest, digests[1: -1].split(' '))) return r ################################## def check_subnet(subnets: str) -> Tuple[int, List[int], str]: """Determine whether the given string is a valid subnet :param subnets: subnet string, a single definition or comma separated list of CIDR subnets :returns: return code, IP version list of the subnets and msg describing any errors validation errors """ rc = 0 versions = set() errors = [] subnet_list = subnets.split(',') for subnet in subnet_list: # ensure the format of the string is as expected address/netmask subnet = subnet.strip() if not re.search(r'\/\d+$', subnet): rc = 1 errors.append(f'{subnet} is not in CIDR format (address/netmask)') continue try: v = ipaddress.ip_network(subnet).version versions.add(v) except ValueError as e: rc = 1 errors.append(f'{subnet} invalid: {str(e)}') return rc, list(versions), ', '.join(errors) def unwrap_ipv6(address): # type: (str) -> str if address.startswith('[') and address.endswith(']'): return address[1: -1] return address def wrap_ipv6(address): # type: (str) -> str # We cannot assume it's already wrapped or even an IPv6 address if # it's already wrapped it'll not pass (like if it's a hostname) and trigger # the ValueError try: if ipaddress.ip_address(address).version == 6: return f'[{address}]' except ValueError: pass return address def is_ipv6(address): # type: (str) -> bool address = unwrap_ipv6(address) try: return ipaddress.ip_address(address).version == 6 except ValueError: logger.warning('Address: {} is not a valid IP address'.format(address)) return False def ip_in_subnets(ip_addr: str, subnets: str) -> bool: """Determine if the ip_addr belongs to any of the subnets list.""" subnet_list = [x.strip() for x in subnets.split(',')] for subnet in subnet_list: ip_address = unwrap_ipv6(ip_addr) if is_ipv6(ip_addr) else ip_addr if ipaddress.ip_address(ip_address) in ipaddress.ip_network(subnet): return True return False def parse_mon_addrv(addrv_arg: str) -> List[EndPoint]: """Parse mon-addrv param into a list of mon end points.""" r = re.compile(r':(\d+)$') addrv_args = [] addr_arg = addrv_arg if addr_arg[0] != '[' or addr_arg[-1] != ']': raise Error(f'--mon-addrv value {addr_arg} must use square brackets') for addr in addr_arg[1: -1].split(','): hasport = r.findall(addr) if not hasport: raise Error(f'--mon-addrv value {addr_arg} must include port number') port_str = hasport[0] addr = re.sub(r'^v\d+:', '', addr) # strip off v1: or v2: prefix base_ip = addr[0:-(len(port_str)) - 1] addrv_args.append(EndPoint(base_ip, int(port_str))) return addrv_args def parse_mon_ip(mon_ip: str) -> List[EndPoint]: """Parse mon-ip param into a list of mon end points.""" r = re.compile(r':(\d+)$') addrv_args = [] hasport = r.findall(mon_ip) if hasport: port_str = hasport[0] base_ip = mon_ip[0:-(len(port_str)) - 1] addrv_args.append(EndPoint(base_ip, int(port_str))) else: # No port provided: use fixed ports for ceph monitor addrv_args.append(EndPoint(mon_ip, 3300)) addrv_args.append(EndPoint(mon_ip, 6789)) return addrv_args def build_addrv_params(addrv: List[EndPoint]) -> str: """Convert mon end-points (ip:port) into the format: [v[1|2]:ip:port1]""" if len(addrv) > 2: raise Error('Detected a local mon-addrv list with more than 2 entries.') port_to_ver: Dict[int, str] = {6789: 'v1', 3300: 'v2'} addr_arg_list: List[str] = [] for ep in addrv: if ep.port in port_to_ver: ver = port_to_ver[ep.port] else: ver = 'v2' # default mon protocol version if port is not provided logger.warning(f'Using msgr2 protocol for unrecognized port {ep}') addr_arg_list.append(f'{ver}:{ep.ip}:{ep.port}') addr_arg = '[{0}]'.format(','.join(addr_arg_list)) return addr_arg def get_public_net_from_cfg(ctx: CephadmContext) -> Optional[str]: """Get mon public network from configuration file.""" cp = read_config(ctx.config) if not cp.has_option('global', 'public_network'): return None # Ensure all public CIDR networks are valid public_network = cp.get('global', 'public_network').strip('"').strip("'") rc, _, err_msg = check_subnet(public_network) if rc: raise Error(f'Invalid public_network {public_network} parameter: {err_msg}') # Ensure all public CIDR networks are configured locally configured_subnets = set([x.strip() for x in public_network.split(',')]) local_subnets = set([x[0] for x in list_networks(ctx).items()]) valid_public_net = False for net in configured_subnets: if net in local_subnets: valid_public_net = True else: logger.warning(f'The public CIDR network {net} (from -c conf file) is not configured locally.') if not valid_public_net: raise Error(f'None of the public CIDR network(s) {configured_subnets} (from -c conf file) is configured locally.') # Ensure public_network is compatible with the provided mon-ip (or mon-addrv) if ctx.mon_ip: if not ip_in_subnets(ctx.mon_ip, public_network): raise Error(f'The provided --mon-ip {ctx.mon_ip} does not belong to any public_network(s) {public_network}') elif ctx.mon_addrv: addrv_args = parse_mon_addrv(ctx.mon_addrv) for addrv in addrv_args: if not ip_in_subnets(addrv.ip, public_network): raise Error(f'The provided --mon-addrv {addrv.ip} ip does not belong to any public_network(s) {public_network}') logger.debug(f'Using mon public network from configuration file {public_network}') return public_network def infer_mon_network(ctx: CephadmContext, mon_eps: List[EndPoint]) -> Optional[str]: """Infer mon public network from local network.""" # Make sure IP is configured locally, and then figure out the CIDR network mon_networks = [] for net, ifaces in list_networks(ctx).items(): # build local_ips list for the specified network local_ips: List[Union[ipaddress.IPv4Address, ipaddress.IPv6Address]] = [] for _, ls in ifaces.items(): local_ips.extend([ipaddress.ip_address(ip) for ip in ls]) # check if any of mon ips belong to this net for mon_ep in mon_eps: try: if ipaddress.ip_address(unwrap_ipv6(mon_ep.ip)) in local_ips: mon_networks.append(net) logger.info(f'Mon IP `{mon_ep.ip}` is in CIDR network `{net}`') except ValueError as e: logger.warning(f'Cannot infer CIDR network for mon IP `{mon_ep.ip}` : {e}') if not mon_networks: raise Error('Cannot infer CIDR network. Pass --skip-mon-network to configure it later') else: logger.debug(f'Inferred mon public CIDR from local network configuration {mon_networks}') mon_networks = list(set(mon_networks)) # remove duplicates return ','.join(mon_networks) def prepare_mon_addresses(ctx: CephadmContext) -> Tuple[str, bool, Optional[str]]: """Get mon public network configuration.""" ipv6 = False addrv_args: List[EndPoint] = [] mon_addrv: str = '' # i.e: [v2:192.168.100.1:3300,v1:192.168.100.1:6789] if ctx.mon_ip: ipv6 = is_ipv6(ctx.mon_ip) if ipv6: ctx.mon_ip = wrap_ipv6(ctx.mon_ip) addrv_args = parse_mon_ip(ctx.mon_ip) mon_addrv = build_addrv_params(addrv_args) elif ctx.mon_addrv: ipv6 = ctx.mon_addrv.count('[') > 1 addrv_args = parse_mon_addrv(ctx.mon_addrv) mon_addrv = ctx.mon_addrv else: raise Error('must specify --mon-ip or --mon-addrv') if addrv_args: for end_point in addrv_args: check_ip_port(ctx, end_point) logger.debug(f'Base mon IP(s) is {addrv_args}, mon addrv is {mon_addrv}') mon_network = None if not ctx.skip_mon_network: mon_network = get_public_net_from_cfg(ctx) or infer_mon_network(ctx, addrv_args) return (mon_addrv, ipv6, mon_network) def prepare_cluster_network(ctx: CephadmContext) -> Tuple[str, bool]: # the cluster network may not exist on this node, so all we can do is # validate that the address given is valid ipv4 or ipv6 subnet ipv6_cluster_network = False cp = read_config(ctx.config) cluster_network = ctx.cluster_network if cluster_network is None and cp.has_option('global', 'cluster_network'): cluster_network = cp.get('global', 'cluster_network').strip('"').strip("'") if cluster_network: cluster_nets = set([x.strip() for x in cluster_network.split(',')]) local_subnets = set([x[0] for x in list_networks(ctx).items()]) for net in cluster_nets: if net not in local_subnets: logger.warning(f'The cluster CIDR network {net} is not configured locally.') rc, versions, err_msg = check_subnet(cluster_network) if rc: raise Error(f'Invalid --cluster-network parameter: {err_msg}') ipv6_cluster_network = True if 6 in versions else False else: logger.info('Internal network (--cluster-network) has not ' 'been provided, OSD replication will default to ' 'the public_network') return cluster_network, ipv6_cluster_network def create_initial_keys( ctx: CephadmContext, uid: int, gid: int, mgr_id: str ) -> Tuple[str, str, str, Any, Any]: # type: ignore _image = ctx.image # create some initial keys logger.info('Creating initial keys...') mon_key = CephContainer( ctx, image=_image, entrypoint='/usr/bin/ceph-authtool', args=['--gen-print-key'], ).run().strip() admin_key = CephContainer( ctx, image=_image, entrypoint='/usr/bin/ceph-authtool', args=['--gen-print-key'], ).run().strip() mgr_key = CephContainer( ctx, image=_image, entrypoint='/usr/bin/ceph-authtool', args=['--gen-print-key'], ).run().strip() keyring = ('[mon.]\n' '\tkey = %s\n' '\tcaps mon = allow *\n' '[client.admin]\n' '\tkey = %s\n' '\tcaps mon = allow *\n' '\tcaps mds = allow *\n' '\tcaps mgr = allow *\n' '\tcaps osd = allow *\n' '[mgr.%s]\n' '\tkey = %s\n' '\tcaps mon = profile mgr\n' '\tcaps mds = allow *\n' '\tcaps osd = allow *\n' % (mon_key, admin_key, mgr_id, mgr_key)) admin_keyring = write_tmp('[client.admin]\n' '\tkey = ' + admin_key + '\n', uid, gid) # tmp keyring file bootstrap_keyring = write_tmp(keyring, uid, gid) return (mon_key, mgr_key, admin_key, bootstrap_keyring, admin_keyring) def create_initial_monmap( ctx: CephadmContext, uid: int, gid: int, fsid: str, mon_id: str, mon_addr: str ) -> Any: logger.info('Creating initial monmap...') monmap = write_tmp('', 0, 0) out = CephContainer( ctx, image=ctx.image, entrypoint='/usr/bin/monmaptool', args=[ '--create', '--clobber', '--fsid', fsid, '--addv', mon_id, mon_addr, '/tmp/monmap' ], volume_mounts={ monmap.name: '/tmp/monmap:z', }, ).run() logger.debug(f'monmaptool for {mon_id} {mon_addr} on {out}') # pass monmap file to ceph user for use by ceph-mon --mkfs below os.fchown(monmap.fileno(), uid, gid) return monmap def prepare_create_mon( ctx: CephadmContext, uid: int, gid: int, fsid: str, mon_id: str, bootstrap_keyring_path: str, monmap_path: str ) -> Tuple[str, str]: logger.info('Creating mon...') create_daemon_dirs(ctx, fsid, 'mon', mon_id, uid, gid) mon_dir = get_data_dir(fsid, ctx.data_dir, 'mon', mon_id) log_dir = get_log_dir(fsid, ctx.log_dir) out = CephContainer( ctx, image=ctx.image, entrypoint='/usr/bin/ceph-mon', args=[ '--mkfs', '-i', mon_id, '--fsid', fsid, '-c', '/dev/null', '--monmap', '/tmp/monmap', '--keyring', '/tmp/keyring', ] + get_daemon_args(ctx, fsid, 'mon', mon_id), volume_mounts={ log_dir: '/var/log/ceph:z', mon_dir: '/var/lib/ceph/mon/ceph-%s:z' % (mon_id), bootstrap_keyring_path: '/tmp/keyring:z', monmap_path: '/tmp/monmap:z', }, ).run() logger.debug(f'create mon.{mon_id} on {out}') return (mon_dir, log_dir) def create_mon( ctx: CephadmContext, uid: int, gid: int, fsid: str, mon_id: str ) -> None: mon_c = get_container(ctx, fsid, 'mon', mon_id) ctx.meta_properties = {'service_name': 'mon'} deploy_daemon(ctx, fsid, 'mon', mon_id, mon_c, uid, gid, config=None, keyring=None) def wait_for_mon( ctx: CephadmContext, mon_id: str, mon_dir: str, admin_keyring_path: str, config_path: str ) -> None: logger.info('Waiting for mon to start...') c = CephContainer( ctx, image=ctx.image, entrypoint='/usr/bin/ceph', args=[ 'status'], volume_mounts={ mon_dir: '/var/lib/ceph/mon/ceph-%s:z' % (mon_id), admin_keyring_path: '/etc/ceph/ceph.client.admin.keyring:z', config_path: '/etc/ceph/ceph.conf:z', }, ) # wait for the service to become available def is_mon_available(): # type: () -> bool timeout = ctx.timeout if ctx.timeout else 60 # seconds out, err, ret = call(ctx, c.run_cmd(), desc=c.entrypoint, timeout=timeout, verbosity=CallVerbosity.QUIET_UNLESS_ERROR) return ret == 0 is_available(ctx, 'mon', is_mon_available) def create_mgr( ctx: CephadmContext, uid: int, gid: int, fsid: str, mgr_id: str, mgr_key: str, config: str, clifunc: Callable ) -> None: logger.info('Creating mgr...') mgr_keyring = '[mgr.%s]\n\tkey = %s\n' % (mgr_id, mgr_key) mgr_c = get_container(ctx, fsid, 'mgr', mgr_id) # Note:the default port used by the Prometheus node exporter is opened in fw ctx.meta_properties = {'service_name': 'mgr'} ports = [9283, 8765] if not ctx.skip_monitoring_stack: ports.append(8443) deploy_daemon(ctx, fsid, 'mgr', mgr_id, mgr_c, uid, gid, config=config, keyring=mgr_keyring, ports=ports) # wait for the service to become available logger.info('Waiting for mgr to start...') def is_mgr_available(): # type: () -> bool timeout = ctx.timeout if ctx.timeout else 60 # seconds try: out = clifunc(['status', '-f', 'json-pretty'], timeout=timeout, verbosity=CallVerbosity.QUIET_UNLESS_ERROR) j = json.loads(out) return j.get('mgrmap', {}).get('available', False) except Exception as e: logger.debug('status failed: %s' % e) return False is_available(ctx, 'mgr', is_mgr_available) def prepare_ssh( ctx: CephadmContext, cli: Callable, wait_for_mgr_restart: Callable ) -> None: cli(['cephadm', 'set-user', ctx.ssh_user]) if ctx.ssh_config: logger.info('Using provided ssh config...') mounts = { pathify(ctx.ssh_config.name): '/tmp/cephadm-ssh-config:z', } cli(['cephadm', 'set-ssh-config', '-i', '/tmp/cephadm-ssh-config'], extra_mounts=mounts) if ctx.ssh_private_key and ctx.ssh_public_key: logger.info('Using provided ssh keys...') mounts = { pathify(ctx.ssh_private_key.name): '/tmp/cephadm-ssh-key:z', pathify(ctx.ssh_public_key.name): '/tmp/cephadm-ssh-key.pub:z' } cli(['cephadm', 'set-priv-key', '-i', '/tmp/cephadm-ssh-key'], extra_mounts=mounts) cli(['cephadm', 'set-pub-key', '-i', '/tmp/cephadm-ssh-key.pub'], extra_mounts=mounts) ssh_pub = cli(['cephadm', 'get-pub-key']) else: logger.info('Generating ssh key...') cli(['cephadm', 'generate-key']) ssh_pub = cli(['cephadm', 'get-pub-key']) with open(ctx.output_pub_ssh_key, 'w') as f: f.write(ssh_pub) logger.info('Wrote public SSH key to %s' % ctx.output_pub_ssh_key) authorize_ssh_key(ssh_pub, ctx.ssh_user) host = get_hostname() logger.info('Adding host %s...' % host) try: args = ['orch', 'host', 'add', host] if ctx.mon_ip: args.append(unwrap_ipv6(ctx.mon_ip)) elif ctx.mon_addrv: addrv_args = parse_mon_addrv(ctx.mon_addrv) args.append(unwrap_ipv6(addrv_args[0].ip)) cli(args) except RuntimeError as e: raise Error('Failed to add host <%s>: %s' % (host, e)) for t in ['mon', 'mgr']: if not ctx.orphan_initial_daemons: logger.info('Deploying %s service with default placement...' % t) cli(['orch', 'apply', t]) else: logger.info('Deploying unmanaged %s service...' % t) cli(['orch', 'apply', t, '--unmanaged']) if not ctx.orphan_initial_daemons: logger.info('Deploying crash service with default placement...') cli(['orch', 'apply', 'crash']) if not ctx.skip_monitoring_stack: for t in ['ceph-exporter', 'prometheus', 'grafana', 'node-exporter', 'alertmanager']: logger.info('Deploying %s service with default placement...' % t) try: cli(['orch', 'apply', t]) except RuntimeError: ctx.error_code = -errno.EINVAL logger.error(f'Failed to apply service type {t}. ' 'Perhaps the ceph version being bootstrapped does not support it') if ctx.with_centralized_logging: for t in ['loki', 'promtail']: logger.info('Deploying %s service with default placement...' % t) try: cli(['orch', 'apply', t]) except RuntimeError: ctx.error_code = -errno.EINVAL logger.error(f'Failed to apply service type {t}. ' 'Perhaps the ceph version being bootstrapped does not support it') def enable_cephadm_mgr_module( cli: Callable, wait_for_mgr_restart: Callable ) -> None: logger.info('Enabling cephadm module...') cli(['mgr', 'module', 'enable', 'cephadm']) wait_for_mgr_restart() logger.info('Setting orchestrator backend to cephadm...') cli(['orch', 'set', 'backend', 'cephadm']) def prepare_dashboard( ctx: CephadmContext, uid: int, gid: int, cli: Callable, wait_for_mgr_restart: Callable ) -> None: # Configure SSL port (cephadm only allows to configure dashboard SSL port) # if the user does not want to use SSL he can change this setting once the cluster is up cli(['config', 'set', 'mgr', 'mgr/dashboard/ssl_server_port', str(ctx.ssl_dashboard_port)]) # configuring dashboard parameters logger.info('Enabling the dashboard module...') cli(['mgr', 'module', 'enable', 'dashboard']) wait_for_mgr_restart() # dashboard crt and key if ctx.dashboard_key and ctx.dashboard_crt: logger.info('Using provided dashboard certificate...') mounts = { pathify(ctx.dashboard_crt.name): '/tmp/dashboard.crt:z', pathify(ctx.dashboard_key.name): '/tmp/dashboard.key:z' } cli(['dashboard', 'set-ssl-certificate', '-i', '/tmp/dashboard.crt'], extra_mounts=mounts) cli(['dashboard', 'set-ssl-certificate-key', '-i', '/tmp/dashboard.key'], extra_mounts=mounts) else: logger.info('Generating a dashboard self-signed certificate...') cli(['dashboard', 'create-self-signed-cert']) logger.info('Creating initial admin user...') password = ctx.initial_dashboard_password or generate_password() tmp_password_file = write_tmp(password, uid, gid) cmd = ['dashboard', 'ac-user-create', ctx.initial_dashboard_user, '-i', '/tmp/dashboard.pw', 'administrator', '--force-password'] if not ctx.dashboard_password_noupdate: cmd.append('--pwd-update-required') cli(cmd, extra_mounts={pathify(tmp_password_file.name): '/tmp/dashboard.pw:z'}) logger.info('Fetching dashboard port number...') out = cli(['config', 'get', 'mgr', 'mgr/dashboard/ssl_server_port']) port = int(out) # Open dashboard port if not ('skip_firewalld' in ctx and ctx.skip_firewalld): fw = Firewalld(ctx) fw.open_ports([port]) fw.apply_rules() logger.info('Ceph Dashboard is now available at:\n\n' '\t URL: https://%s:%s/\n' '\t User: %s\n' '\tPassword: %s\n' % ( get_fqdn(), port, ctx.initial_dashboard_user, password)) def prepare_bootstrap_config( ctx: CephadmContext, fsid: str, mon_addr: str, image: str ) -> str: cp = read_config(ctx.config) if not cp.has_section('global'): cp.add_section('global') cp.set('global', 'fsid', fsid) cp.set('global', 'mon_host', mon_addr) cp.set('global', 'container_image', image) if not cp.has_section('mon'): cp.add_section('mon') if ( not cp.has_option('mon', 'auth_allow_insecure_global_id_reclaim') and not cp.has_option('mon', 'auth allow insecure global id reclaim') ): cp.set('mon', 'auth_allow_insecure_global_id_reclaim', 'false') if ctx.single_host_defaults: logger.info('Adjusting default settings to suit single-host cluster...') # replicate across osds, not hosts if ( not cp.has_option('global', 'osd_crush_chooseleaf_type') and not cp.has_option('global', 'osd crush chooseleaf type') ): cp.set('global', 'osd_crush_chooseleaf_type', '0') # replica 2x if ( not cp.has_option('global', 'osd_pool_default_size') and not cp.has_option('global', 'osd pool default size') ): cp.set('global', 'osd_pool_default_size', '2') # disable mgr standby modules (so we can colocate multiple mgrs on one host) if not cp.has_section('mgr'): cp.add_section('mgr') if ( not cp.has_option('mgr', 'mgr_standby_modules') and not cp.has_option('mgr', 'mgr standby modules') ): cp.set('mgr', 'mgr_standby_modules', 'false') if ctx.log_to_file: cp.set('global', 'log_to_file', 'true') cp.set('global', 'log_to_stderr', 'false') cp.set('global', 'log_to_journald', 'false') cp.set('global', 'mon_cluster_log_to_file', 'true') cp.set('global', 'mon_cluster_log_to_stderr', 'false') cp.set('global', 'mon_cluster_log_to_journald', 'false') cpf = StringIO() cp.write(cpf) config = cpf.getvalue() if ctx.registry_json or ctx.registry_url: command_registry_login(ctx) return config def finish_bootstrap_config( ctx: CephadmContext, fsid: str, config: str, mon_id: str, mon_dir: str, mon_network: Optional[str], ipv6: bool, cli: Callable, cluster_network: Optional[str], ipv6_cluster_network: bool ) -> None: if not ctx.no_minimize_config: logger.info('Assimilating anything we can from ceph.conf...') cli([ 'config', 'assimilate-conf', '-i', '/var/lib/ceph/mon/ceph-%s/config' % mon_id ], { mon_dir: '/var/lib/ceph/mon/ceph-%s:z' % mon_id }) logger.info('Generating new minimal ceph.conf...') cli([ 'config', 'generate-minimal-conf', '-o', '/var/lib/ceph/mon/ceph-%s/config' % mon_id ], { mon_dir: '/var/lib/ceph/mon/ceph-%s:z' % mon_id }) # re-read our minimized config with open(mon_dir + '/config', 'r') as f: config = f.read() logger.info('Restarting the monitor...') call_throws(ctx, [ 'systemctl', 'restart', get_unit_name(fsid, 'mon', mon_id) ]) elif 'image' in ctx and ctx.image: # we still want to assimilate the given container image if provided cli(['config', 'set', 'global', 'container_image', f'{ctx.image}']) if mon_network: cp = read_config(ctx.config) cfg_section = 'global' if cp.has_option('global', 'public_network') else 'mon' logger.info(f'Setting public_network to {mon_network} in {cfg_section} config section') cli(['config', 'set', cfg_section, 'public_network', mon_network]) if cluster_network: logger.info(f'Setting cluster_network to {cluster_network}') cli(['config', 'set', 'global', 'cluster_network', cluster_network]) if ipv6 or ipv6_cluster_network: logger.info('Enabling IPv6 (ms_bind_ipv6) binding') cli(['config', 'set', 'global', 'ms_bind_ipv6', 'true']) with open(ctx.output_config, 'w') as f: f.write(config) logger.info('Wrote config to %s' % ctx.output_config) pass def _extract_host_info_from_applied_spec(f: Iterable[str]) -> List[Dict[str, str]]: # overall goal of this function is to go through an applied spec and find # the hostname (and addr is provided) for each host spec in the applied spec. # Generally, we should be able to just pass the spec to the mgr module where # proper yaml parsing can happen, but for host specs in particular we want to # be able to distribute ssh keys, which requires finding the hostname (and addr # if possible) for each potential host spec in the applied spec. specs: List[List[str]] = [] current_spec: List[str] = [] for line in f: if re.search(r'^---\s+', line): if current_spec: specs.append(current_spec) current_spec = [] else: line = line.strip() if line: current_spec.append(line) if current_spec: specs.append(current_spec) host_specs: List[List[str]] = [] for spec in specs: for line in spec: if 'service_type' in line: try: _, type = line.split(':') type = type.strip() if type == 'host': host_specs.append(spec) except ValueError as e: spec_str = '\n'.join(spec) logger.error(f'Failed to pull service_type from spec:\n{spec_str}. Got error: {e}') break spec_str = '\n'.join(spec) logger.error(f'Failed to find service_type within spec:\n{spec_str}') host_dicts = [] for s in host_specs: host_dict = _extract_host_info_from_spec(s) # if host_dict is empty here, we failed to pull the hostname # for the host from the spec. This should have already been logged # so at this point we just don't want to include it in our output if host_dict: host_dicts.append(host_dict) return host_dicts def _extract_host_info_from_spec(host_spec: List[str]) -> Dict[str, str]: # note:for our purposes here, we only really want the hostname # and address of the host from each of these specs in order to # be able to distribute ssh keys. We will later apply the spec # through the mgr module where proper yaml parsing can be done # The returned dicts from this function should only contain # one or two entries, one (required) for hostname, one (optional) for addr # { # hostname: <hostname> # addr: <ip-addr> # } # if we fail to find the hostname, an empty dict is returned host_dict = {} # type: Dict[str, str] for line in host_spec: for field in ['hostname', 'addr']: if field in line: try: _, field_value = line.split(':') field_value = field_value.strip() host_dict[field] = field_value except ValueError as e: spec_str = '\n'.join(host_spec) logger.error(f'Error trying to pull {field} from host spec:\n{spec_str}. Got error: {e}') if 'hostname' not in host_dict: spec_str = '\n'.join(host_spec) logger.error(f'Could not find hostname in host spec:\n{spec_str}') return {} return host_dict def _distribute_ssh_keys(ctx: CephadmContext, host_info: Dict[str, str], bootstrap_hostname: str) -> int: # copy ssh key to hosts in host spec (used for apply spec) ssh_key = CEPH_DEFAULT_PUBKEY if ctx.ssh_public_key: ssh_key = ctx.ssh_public_key.name if bootstrap_hostname != host_info['hostname']: if 'addr' in host_info: addr = host_info['addr'] else: addr = host_info['hostname'] out, err, code = call(ctx, ['sudo', '-u', ctx.ssh_user, 'ssh-copy-id', '-f', '-i', ssh_key, '-o StrictHostKeyChecking=no', '%s@%s' % (ctx.ssh_user, addr)]) if code: logger.error('\nCopying ssh key to host %s at address %s failed!\n' % (host_info['hostname'], addr)) return 1 else: logger.info('Added ssh key to host %s at address %s' % (host_info['hostname'], addr)) return 0 def save_cluster_config(ctx: CephadmContext, uid: int, gid: int, fsid: str) -> None: """Save cluster configuration to the per fsid directory """ def copy_file(src: str, dst: str) -> None: if src: shutil.copyfile(src, dst) conf_dir = f'{ctx.data_dir}/{fsid}/{CEPH_CONF_DIR}' makedirs(conf_dir, uid, gid, DATA_DIR_MODE) if os.path.exists(conf_dir): logger.info(f'Saving cluster configuration to {conf_dir} directory') copy_file(ctx.output_config, os.path.join(conf_dir, CEPH_CONF)) copy_file(ctx.output_keyring, os.path.join(conf_dir, CEPH_KEYRING)) # ctx.output_pub_ssh_key may not exist if user has provided custom ssh keys if (os.path.exists(ctx.output_pub_ssh_key)): copy_file(ctx.output_pub_ssh_key, os.path.join(conf_dir, CEPH_PUBKEY)) else: logger.warning(f'Cannot create cluster configuration directory {conf_dir}') def rollback(func: FuncT) -> FuncT: """ """ @wraps(func) def _rollback(ctx: CephadmContext) -> Any: try: return func(ctx) except ClusterAlreadyExists: # another cluster with the provided fsid already exists: don't remove. raise except (KeyboardInterrupt, Exception) as e: logger.error(f'{type(e).__name__}: {e}') if ctx.cleanup_on_failure: logger.info('\n\n' '\t***************\n' '\tCephadm hit an issue during cluster installation. Current cluster files will be deleted automatically,\n' '\tto disable this behaviour you can pass the --no-cleanup-on-failure flag. In case of any previous\n' '\tbroken installation user must use the following command to completely delete the broken cluster:\n\n' '\t> cephadm rm-cluster --force --zap-osds --fsid <fsid>\n\n' '\tfor more information please refer to https://docs.ceph.com/en/latest/cephadm/operations/#purging-a-cluster\n' '\t***************\n\n') _rm_cluster(ctx, keep_logs=False, zap_osds=False) else: logger.info('\n\n' '\t***************\n' '\tCephadm hit an issue during cluster installation. Current cluster files will NOT BE DELETED automatically to change\n' '\tthis behaviour you can pass the --cleanup-on-failure. To remove this broken cluster manually please run:\n\n' f'\t > cephadm rm-cluster --force --fsid {ctx.fsid}\n\n' '\tin case of any previous broken installation user must use the rm-cluster command to delete the broken cluster:\n\n' '\t > cephadm rm-cluster --force --zap-osds --fsid <fsid>\n\n' '\tfor more information please refer to https://docs.ceph.com/en/latest/cephadm/operations/#purging-a-cluster\n' '\t***************\n\n') raise return cast(FuncT, _rollback) @rollback @default_image def command_bootstrap(ctx): # type: (CephadmContext) -> int ctx.error_code = 0 if not ctx.output_config: ctx.output_config = os.path.join(ctx.output_dir, CEPH_CONF) if not ctx.output_keyring: ctx.output_keyring = os.path.join(ctx.output_dir, CEPH_KEYRING) if not ctx.output_pub_ssh_key: ctx.output_pub_ssh_key = os.path.join(ctx.output_dir, CEPH_PUBKEY) if bool(ctx.ssh_private_key) is not bool(ctx.ssh_public_key): raise Error('--ssh-private-key and --ssh-public-key must be provided together or not at all.') if ctx.fsid: data_dir_base = os.path.join(ctx.data_dir, ctx.fsid) if os.path.exists(data_dir_base): raise ClusterAlreadyExists(f"A cluster with the same fsid '{ctx.fsid}' already exists.") else: logger.warning('Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts.') # initial vars ctx.fsid = ctx.fsid or make_fsid() fsid = ctx.fsid if not is_fsid(fsid): raise Error('not an fsid: %s' % fsid) # verify output files for f in [ctx.output_config, ctx.output_keyring, ctx.output_pub_ssh_key]: if not ctx.allow_overwrite: if os.path.exists(f): raise ClusterAlreadyExists('%s already exists; delete or pass --allow-overwrite to overwrite' % f) dirname = os.path.dirname(f) if dirname and not os.path.exists(dirname): fname = os.path.basename(f) logger.info(f'Creating directory {dirname} for {fname}') try: # use makedirs to create intermediate missing dirs os.makedirs(dirname, 0o755) except PermissionError: raise Error(f'Unable to create {dirname} due to permissions failure. Retry with root, or sudo or preallocate the directory.') (user_conf, _) = get_config_and_keyring(ctx) if ctx.ssh_user != 'root': check_ssh_connectivity(ctx) if not ctx.skip_prepare_host: command_prepare_host(ctx) else: logger.info('Skip prepare_host') logger.info('Cluster fsid: %s' % fsid) hostname = get_hostname() if '.' in hostname and not ctx.allow_fqdn_hostname: raise Error('hostname is a fully qualified domain name (%s); either fix (e.g., "sudo hostname %s" or similar) or pass --allow-fqdn-hostname' % (hostname, hostname.split('.')[0])) mon_id = ctx.mon_id or get_short_hostname() mgr_id = ctx.mgr_id or generate_service_id() lock = FileLock(ctx, fsid) lock.acquire() (addr_arg, ipv6, mon_network) = prepare_mon_addresses(ctx) cluster_network, ipv6_cluster_network = prepare_cluster_network(ctx) config = prepare_bootstrap_config(ctx, fsid, addr_arg, ctx.image) if not ctx.skip_pull: try: _pull_image(ctx, ctx.image) except UnauthorizedRegistryError: err_str = 'Failed to pull container image. Check that correct registry credentials are provided in bootstrap by --registry-url, --registry-username, --registry-password, or supply --registry-json with credentials' logger.debug(f'Pulling image for bootstrap on {hostname} failed: {err_str}') raise Error(err_str) image_ver = CephContainer(ctx, ctx.image, 'ceph', ['--version']).run().strip() logger.info(f'Ceph version: {image_ver}') if not ctx.allow_mismatched_release: image_release = image_ver.split()[4] if image_release not in \ [DEFAULT_IMAGE_RELEASE, LATEST_STABLE_RELEASE]: raise Error( f'Container release {image_release} != cephadm release {DEFAULT_IMAGE_RELEASE};' ' please use matching version of cephadm (pass --allow-mismatched-release to continue anyway)' ) logger.info('Extracting ceph user uid/gid from container image...') (uid, gid) = extract_uid_gid(ctx) # create some initial keys (mon_key, mgr_key, admin_key, bootstrap_keyring, admin_keyring) = create_initial_keys(ctx, uid, gid, mgr_id) monmap = create_initial_monmap(ctx, uid, gid, fsid, mon_id, addr_arg) (mon_dir, log_dir) = prepare_create_mon(ctx, uid, gid, fsid, mon_id, bootstrap_keyring.name, monmap.name) with write_new(mon_dir + '/config', owner=(uid, gid)) as f: f.write(config) make_var_run(ctx, fsid, uid, gid) create_mon(ctx, uid, gid, fsid, mon_id) # config to issue various CLI commands tmp_config = write_tmp(config, uid, gid) # a CLI helper to reduce our typing def cli(cmd, extra_mounts={}, timeout=DEFAULT_TIMEOUT, verbosity=CallVerbosity.VERBOSE_ON_FAILURE): # type: (List[str], Dict[str, str], Optional[int], CallVerbosity) -> str mounts = { log_dir: '/var/log/ceph:z', admin_keyring.name: '/etc/ceph/ceph.client.admin.keyring:z', tmp_config.name: '/etc/ceph/ceph.conf:z', } for k, v in extra_mounts.items(): mounts[k] = v timeout = timeout or ctx.timeout return CephContainer( ctx, image=ctx.image, entrypoint='/usr/bin/ceph', args=cmd, volume_mounts=mounts, ).run(timeout=timeout, verbosity=verbosity) wait_for_mon(ctx, mon_id, mon_dir, admin_keyring.name, tmp_config.name) finish_bootstrap_config(ctx, fsid, config, mon_id, mon_dir, mon_network, ipv6, cli, cluster_network, ipv6_cluster_network) # output files with write_new(ctx.output_keyring) as f: f.write('[client.admin]\n' '\tkey = ' + admin_key + '\n') logger.info('Wrote keyring to %s' % ctx.output_keyring) # create mgr create_mgr(ctx, uid, gid, fsid, mgr_id, mgr_key, config, cli) if user_conf: # user given config settings were already assimilated earlier # but if the given settings contained any attributes in # the mgr (e.g. mgr/cephadm/container_image_prometheus) # they don't seem to be stored if there isn't a mgr yet. # Since re-assimilating the same conf settings should be # idempotent we can just do it again here. with tempfile.NamedTemporaryFile(buffering=0) as tmp: tmp.write(user_conf.encode('utf-8')) cli(['config', 'assimilate-conf', '-i', '/var/lib/ceph/user.conf'], {tmp.name: '/var/lib/ceph/user.conf:z'}) # wait for mgr to restart (after enabling a module) def wait_for_mgr_restart() -> None: # first get latest mgrmap epoch from the mon. try newer 'mgr # stat' command first, then fall back to 'mgr dump' if # necessary try: j = json_loads_retry(lambda: cli(['mgr', 'stat'], verbosity=CallVerbosity.QUIET_UNLESS_ERROR)) except Exception: j = json_loads_retry(lambda: cli(['mgr', 'dump'], verbosity=CallVerbosity.QUIET_UNLESS_ERROR)) epoch = j['epoch'] # wait for mgr to have it logger.info('Waiting for the mgr to restart...') def mgr_has_latest_epoch(): # type: () -> bool try: out = cli(['tell', 'mgr', 'mgr_status']) j = json.loads(out) return j['mgrmap_epoch'] >= epoch except Exception as e: logger.debug('tell mgr mgr_status failed: %s' % e) return False is_available(ctx, 'mgr epoch %d' % epoch, mgr_has_latest_epoch) enable_cephadm_mgr_module(cli, wait_for_mgr_restart) # ssh if not ctx.skip_ssh: prepare_ssh(ctx, cli, wait_for_mgr_restart) if ctx.registry_url and ctx.registry_username and ctx.registry_password: registry_credentials = {'url': ctx.registry_url, 'username': ctx.registry_username, 'password': ctx.registry_password} cli(['config-key', 'set', 'mgr/cephadm/registry_credentials', json.dumps(registry_credentials)]) cli(['config', 'set', 'mgr', 'mgr/cephadm/container_init', str(ctx.container_init), '--force']) if not ctx.skip_dashboard: prepare_dashboard(ctx, uid, gid, cli, wait_for_mgr_restart) if ctx.output_config == CEPH_DEFAULT_CONF and not ctx.skip_admin_label and not ctx.no_minimize_config: logger.info('Enabling client.admin keyring and conf on hosts with "admin" label') try: cli(['orch', 'client-keyring', 'set', 'client.admin', 'label:_admin']) cli(['orch', 'host', 'label', 'add', get_hostname(), '_admin']) except Exception: logger.info('Unable to set up "admin" label; assuming older version of Ceph') if ctx.apply_spec: logger.info('Applying %s to cluster' % ctx.apply_spec) # copy ssh key to hosts in spec file with open(ctx.apply_spec) as f: host_dicts = _extract_host_info_from_applied_spec(f) for h in host_dicts: _distribute_ssh_keys(ctx, h, hostname) mounts = {} mounts[pathify(ctx.apply_spec)] = '/tmp/spec.yml:ro' try: out = cli(['orch', 'apply', '-i', '/tmp/spec.yml'], extra_mounts=mounts) logger.info(out) except Exception: ctx.error_code = -errno.EINVAL logger.info('\nApplying %s to cluster failed!\n' % ctx.apply_spec) save_cluster_config(ctx, uid, gid, fsid) # enable autotune for osd_memory_target logger.info('Enabling autotune for osd_memory_target') cli(['config', 'set', 'osd', 'osd_memory_target_autotune', 'true']) # Notify the Dashboard to show the 'Expand cluster' page on first log in. cli(['config-key', 'set', 'mgr/dashboard/cluster/status', 'INSTALLED']) logger.info('You can access the Ceph CLI as following in case of multi-cluster or non-default config:\n\n' '\tsudo %s shell --fsid %s -c %s -k %s\n' % ( sys.argv[0], fsid, ctx.output_config, ctx.output_keyring)) logger.info('Or, if you are only running a single cluster on this host:\n\n\tsudo %s shell \n' % (sys.argv[0])) logger.info('Please consider enabling telemetry to help improve Ceph:\n\n' '\tceph telemetry on\n\n' 'For more information see:\n\n' '\thttps://docs.ceph.com/en/latest/mgr/telemetry/\n') logger.info('Bootstrap complete.') return ctx.error_code ################################## def command_registry_login(ctx: CephadmContext) -> int: if ctx.registry_json: logger.info('Pulling custom registry login info from %s.' % ctx.registry_json) d = get_parm(ctx.registry_json) if d.get('url') and d.get('username') and d.get('password'): ctx.registry_url = d.get('url') ctx.registry_username = d.get('username') ctx.registry_password = d.get('password') registry_login(ctx, ctx.registry_url, ctx.registry_username, ctx.registry_password) else: raise Error('json provided for custom registry login did not include all necessary fields. ' 'Please setup json file as\n' '{\n' ' "url": "REGISTRY_URL",\n' ' "username": "REGISTRY_USERNAME",\n' ' "password": "REGISTRY_PASSWORD"\n' '}\n') elif ctx.registry_url and ctx.registry_username and ctx.registry_password: registry_login(ctx, ctx.registry_url, ctx.registry_username, ctx.registry_password) else: raise Error('Invalid custom registry arguments received. To login to a custom registry include ' '--registry-url, --registry-username and --registry-password ' 'options or --registry-json option') return 0 def registry_login(ctx: CephadmContext, url: Optional[str], username: Optional[str], password: Optional[str]) -> None: logger.info('Logging into custom registry.') try: engine = ctx.container_engine cmd = [engine.path, 'login', '-u', username, '-p', password, url] if isinstance(engine, Podman): cmd.append('--authfile=/etc/ceph/podman-auth.json') out, _, _ = call_throws(ctx, cmd) if isinstance(engine, Podman): os.chmod('/etc/ceph/podman-auth.json', DEFAULT_MODE) except Exception: raise Error('Failed to login to custom registry @ %s as %s with given password' % (ctx.registry_url, ctx.registry_username)) ################################## def extract_uid_gid_monitoring(ctx, daemon_type): # type: (CephadmContext, str) -> Tuple[int, int] if daemon_type == 'prometheus': uid, gid = extract_uid_gid(ctx, file_path='/etc/prometheus') elif daemon_type == 'node-exporter': uid, gid = 65534, 65534 elif daemon_type == 'grafana': uid, gid = extract_uid_gid(ctx, file_path='/var/lib/grafana') elif daemon_type == 'loki': uid, gid = extract_uid_gid(ctx, file_path='/etc/loki') elif daemon_type == 'promtail': uid, gid = extract_uid_gid(ctx, file_path='/etc/promtail') elif daemon_type == 'alertmanager': uid, gid = extract_uid_gid(ctx, file_path=['/etc/alertmanager', '/etc/prometheus']) else: raise Error('{} not implemented yet'.format(daemon_type)) return uid, gid def get_deployment_container(ctx: CephadmContext, fsid: str, daemon_type: str, daemon_id: Union[int, str], privileged: bool = False, ptrace: bool = False, container_args: Optional[List[str]] = None) -> 'CephContainer': # wrapper for get_container specifically for containers made during the `cephadm deploy` # command. Adds some extra things such as extra container args and custom config files c = get_container(ctx, fsid, daemon_type, daemon_id, privileged, ptrace, container_args) if 'extra_container_args' in ctx and ctx.extra_container_args: c.container_args.extend(ctx.extra_container_args) if 'extra_entrypoint_args' in ctx and ctx.extra_entrypoint_args: c.args.extend(ctx.extra_entrypoint_args) ccfiles = fetch_custom_config_files(ctx) if ccfiles: mandatory_keys = ['mount_path', 'content'] for conf in ccfiles: if all(k in conf for k in mandatory_keys): mount_path = conf['mount_path'] file_path = os.path.join( ctx.data_dir, fsid, 'custom_config_files', f'{daemon_type}.{daemon_id}', os.path.basename(mount_path) ) c.volume_mounts[file_path] = mount_path return c def get_deployment_type(ctx: CephadmContext, daemon_type: str, daemon_id: str) -> DeploymentType: deployment_type: DeploymentType = DeploymentType.DEFAULT if ctx.reconfig: deployment_type = DeploymentType.RECONFIG unit_name = get_unit_name(ctx.fsid, daemon_type, daemon_id) (_, state, _) = check_unit(ctx, unit_name) if state == 'running' or is_container_running(ctx, CephContainer.for_daemon(ctx, ctx.fsid, daemon_type, daemon_id, 'bash')): # if reconfig was set, that takes priority over redeploy. If # this is considered a fresh deployment at this stage, # mark it as a redeploy to avoid port checking if deployment_type == DeploymentType.DEFAULT: deployment_type = DeploymentType.REDEPLOY logger.info(f'{deployment_type.value} daemon {ctx.name} ...') return deployment_type @default_image @deprecated_command def command_deploy(ctx): # type: (CephadmContext) -> None _common_deploy(ctx) def read_configuration_source(ctx: CephadmContext) -> Dict[str, Any]: """Read a JSON configuration based on the `ctx.source` value.""" source = '-' if 'source' in ctx and ctx.source: source = ctx.source if source == '-': config_data = json.load(sys.stdin) else: with open(source, 'rb') as fh: config_data = json.load(fh) logger.debug('Loaded deploy configuration: %r', config_data) return config_data def apply_deploy_config_to_ctx( config_data: Dict[str, Any], ctx: CephadmContext, ) -> None: """Bind properties taken from the config_data dictionary to our ctx, similar to how cli options on `deploy` are bound to the context. """ ctx.name = config_data['name'] image = config_data.get('image', '') if image: ctx.image = image if 'fsid' in config_data: ctx.fsid = config_data['fsid'] if 'meta' in config_data: ctx.meta_properties = config_data['meta'] if 'config_blobs' in config_data: ctx.config_blobs = config_data['config_blobs'] # many functions don't check that an attribute is set on the ctx # (with getattr or the '__contains__' func on ctx). # This reuses the defaults from the CLI options so we don't # have to repeat things and they can stay in sync. facade = ArgumentFacade() _add_deploy_parser_args(facade) facade.apply(ctx) for key, value in config_data.get('params', {}).items(): if key not in facade.defaults: logger.warning('unexpected parameter: %r=%r', key, value) setattr(ctx, key, value) update_default_image(ctx) logger.debug('Determined image: %r', ctx.image) def command_deploy_from(ctx: CephadmContext) -> None: """The deploy-from command is similar to deploy but sources nearly all configuration parameters from an input JSON configuration file. """ config_data = read_configuration_source(ctx) apply_deploy_config_to_ctx(config_data, ctx) _common_deploy(ctx) def _common_deploy(ctx: CephadmContext) -> None: daemon_type, daemon_id = ctx.name.split('.', 1) if daemon_type not in get_supported_daemons(): raise Error('daemon type %s not recognized' % daemon_type) lock = FileLock(ctx, ctx.fsid) lock.acquire() deployment_type = get_deployment_type(ctx, daemon_type, daemon_id) # Migrate sysctl conf files from /usr/lib to /etc migrate_sysctl_dir(ctx, ctx.fsid) # Get and check ports explicitly required to be opened daemon_ports = fetch_tcp_ports(ctx) _dispatch_deploy(ctx, daemon_type, daemon_id, daemon_ports, deployment_type) def _dispatch_deploy( ctx: CephadmContext, daemon_type: str, daemon_id: str, daemon_ports: List[int], deployment_type: DeploymentType, ) -> None: if daemon_type in Ceph.daemons: config, keyring = get_config_and_keyring(ctx) uid, gid = extract_uid_gid(ctx) make_var_run(ctx, ctx.fsid, uid, gid) config_json = fetch_configs(ctx) c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id, ptrace=ctx.allow_ptrace) if daemon_type == 'mon' and config_json is not None: if 'crush_location' in config_json: c_loc = config_json['crush_location'] # was originally "c.args.extend(['--set-crush-location', c_loc])" # but that doesn't seem to persist in the object after it's passed # in further function calls c.args = c.args + ['--set-crush-location', c_loc] deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, config=config, keyring=keyring, osd_fsid=ctx.osd_fsid, deployment_type=deployment_type, ports=daemon_ports) elif daemon_type in Monitoring.components: # monitoring daemon - prometheus, grafana, alertmanager, node-exporter # Default Checks # make sure provided config-json is sufficient config = fetch_configs(ctx) # type: ignore required_files = Monitoring.components[daemon_type].get('config-json-files', list()) required_args = Monitoring.components[daemon_type].get('config-json-args', list()) if required_files: if not config or not all(c in config.get('files', {}).keys() for c in required_files): # type: ignore raise Error('{} deployment requires config-json which must ' 'contain file content for {}'.format(daemon_type.capitalize(), ', '.join(required_files))) if required_args: if not config or not all(c in config.keys() for c in required_args): # type: ignore raise Error('{} deployment requires config-json which must ' 'contain arg for {}'.format(daemon_type.capitalize(), ', '.join(required_args))) uid, gid = extract_uid_gid_monitoring(ctx, daemon_type) c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, deployment_type=deployment_type, ports=daemon_ports) elif daemon_type == NFSGanesha.daemon_type: # only check ports if this is a fresh deployment if deployment_type == DeploymentType.DEFAULT and not daemon_ports: daemon_ports = list(NFSGanesha.port_map.values()) config, keyring = get_config_and_keyring(ctx) # TODO: extract ganesha uid/gid (997, 994) ? uid, gid = extract_uid_gid(ctx) c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, config=config, keyring=keyring, deployment_type=deployment_type, ports=daemon_ports) elif daemon_type == CephIscsi.daemon_type: config, keyring = get_config_and_keyring(ctx) uid, gid = extract_uid_gid(ctx) c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, config=config, keyring=keyring, deployment_type=deployment_type, ports=daemon_ports) elif daemon_type in Tracing.components: uid, gid = 65534, 65534 c = get_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, deployment_type=deployment_type, ports=daemon_ports) elif daemon_type == HAproxy.daemon_type: haproxy = HAproxy.init(ctx, ctx.fsid, daemon_id) uid, gid = haproxy.extract_uid_gid_haproxy() c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, deployment_type=deployment_type, ports=daemon_ports) elif daemon_type == Keepalived.daemon_type: keepalived = Keepalived.init(ctx, ctx.fsid, daemon_id) uid, gid = keepalived.extract_uid_gid_keepalived() c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid, gid, deployment_type=deployment_type, ports=daemon_ports) elif daemon_type == CustomContainer.daemon_type: cc = CustomContainer.init(ctx, ctx.fsid, daemon_id) # only check ports if this is a fresh deployment if deployment_type == DeploymentType.DEFAULT: daemon_ports.extend(cc.ports) c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id, privileged=cc.privileged, ptrace=ctx.allow_ptrace) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, uid=cc.uid, gid=cc.gid, config=None, keyring=None, deployment_type=deployment_type, ports=daemon_ports) elif daemon_type == CephadmAgent.daemon_type: # get current user gid and uid uid = os.getuid() gid = os.getgid() deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, None, uid, gid, deployment_type=deployment_type, ports=daemon_ports) elif daemon_type == SNMPGateway.daemon_type: sc = SNMPGateway.init(ctx, ctx.fsid, daemon_id) c = get_deployment_container(ctx, ctx.fsid, daemon_type, daemon_id) deploy_daemon(ctx, ctx.fsid, daemon_type, daemon_id, c, sc.uid, sc.gid, deployment_type=deployment_type, ports=daemon_ports) else: raise Error('daemon type {} not implemented in command_deploy function' .format(daemon_type)) ################################## @infer_image def command_run(ctx): # type: (CephadmContext) -> int (daemon_type, daemon_id) = ctx.name.split('.', 1) c = get_container(ctx, ctx.fsid, daemon_type, daemon_id) command = c.run_cmd() return call_timeout(ctx, command, ctx.timeout) ################################## @infer_fsid @infer_config @infer_image @validate_fsid def command_shell(ctx): # type: (CephadmContext) -> int cp = read_config(ctx.config) if cp.has_option('global', 'fsid') and \ cp.get('global', 'fsid') != ctx.fsid: raise Error('fsid does not match ceph.conf') if ctx.name: if '.' in ctx.name: (daemon_type, daemon_id) = ctx.name.split('.', 1) else: daemon_type = ctx.name daemon_id = None else: daemon_type = 'osd' # get the most mounts daemon_id = None if ctx.fsid and daemon_type in Ceph.daemons: make_log_dir(ctx, ctx.fsid) if daemon_id and not ctx.fsid: raise Error('must pass --fsid to specify cluster') # in case a dedicated keyring for the specified fsid is found we us it. # Otherwise, use /etc/ceph files by default, if present. We do this instead of # making these defaults in the arg parser because we don't want an error # if they don't exist. if not ctx.keyring: keyring_file = f'{ctx.data_dir}/{ctx.fsid}/{CEPH_CONF_DIR}/{CEPH_KEYRING}' if os.path.exists(keyring_file): ctx.keyring = keyring_file elif os.path.exists(CEPH_DEFAULT_KEYRING): ctx.keyring = CEPH_DEFAULT_KEYRING container_args: List[str] = ['-i'] mounts = get_container_mounts(ctx, ctx.fsid, daemon_type, daemon_id, no_config=True if ctx.config else False) binds = get_container_binds(ctx, ctx.fsid, daemon_type, daemon_id) if ctx.config: mounts[pathify(ctx.config)] = '/etc/ceph/ceph.conf:z' if ctx.keyring: mounts[pathify(ctx.keyring)] = '/etc/ceph/ceph.keyring:z' if ctx.mount: for _mount in ctx.mount: split_src_dst = _mount.split(':') mount = pathify(split_src_dst[0]) filename = os.path.basename(split_src_dst[0]) if len(split_src_dst) > 1: dst = split_src_dst[1] if len(split_src_dst) == 3: dst = '{}:{}'.format(dst, split_src_dst[2]) mounts[mount] = dst else: mounts[mount] = '/mnt/{}'.format(filename) if ctx.command: command = ctx.command else: command = ['bash'] container_args += [ '-t', '-e', 'LANG=C', '-e', 'PS1=%s' % CUSTOM_PS1, ] if ctx.fsid: home = os.path.join(ctx.data_dir, ctx.fsid, 'home') if not os.path.exists(home): logger.debug('Creating root home at %s' % home) makedirs(home, 0, 0, 0o660) if os.path.exists('/etc/skel'): for f in os.listdir('/etc/skel'): if f.startswith('.bash'): shutil.copyfile(os.path.join('/etc/skel', f), os.path.join(home, f)) mounts[home] = '/root' for i in ctx.volume: a, b = i.split(':', 1) mounts[a] = b c = CephContainer( ctx, image=ctx.image, entrypoint='doesnotmatter', args=[], container_args=container_args, volume_mounts=mounts, bind_mounts=binds, envs=ctx.env, privileged=True) command = c.shell_cmd(command) return call_timeout(ctx, command, ctx.timeout) ################################## @infer_fsid def command_enter(ctx): # type: (CephadmContext) -> int if not ctx.fsid: raise Error('must pass --fsid to specify cluster') (daemon_type, daemon_id) = ctx.name.split('.', 1) container_args = ['-i'] # type: List[str] if ctx.command: command = ctx.command else: command = ['sh'] container_args += [ '-t', '-e', 'LANG=C', '-e', 'PS1=%s' % CUSTOM_PS1, ] c = CephContainer( ctx, image=ctx.image, entrypoint='doesnotmatter', container_args=container_args, cname='ceph-%s-%s.%s' % (ctx.fsid, daemon_type, daemon_id), ) command = c.exec_cmd(command) return call_timeout(ctx, command, ctx.timeout) ################################## @infer_fsid @infer_image @validate_fsid def command_ceph_volume(ctx): # type: (CephadmContext) -> None cp = read_config(ctx.config) if cp.has_option('global', 'fsid') and \ cp.get('global', 'fsid') != ctx.fsid: raise Error('fsid does not match ceph.conf') if ctx.fsid: make_log_dir(ctx, ctx.fsid) lock = FileLock(ctx, ctx.fsid) lock.acquire() (uid, gid) = (0, 0) # ceph-volume runs as root mounts = get_container_mounts(ctx, ctx.fsid, 'osd', None) tmp_config = None tmp_keyring = None (config, keyring) = get_config_and_keyring(ctx) if config: # tmp config file tmp_config = write_tmp(config, uid, gid) mounts[tmp_config.name] = '/etc/ceph/ceph.conf:z' if keyring: # tmp keyring file tmp_keyring = write_tmp(keyring, uid, gid) mounts[tmp_keyring.name] = '/var/lib/ceph/bootstrap-osd/ceph.keyring:z' c = get_ceph_volume_container( ctx, envs=ctx.env, args=ctx.command, volume_mounts=mounts, ) out, err, code = call_throws(ctx, c.run_cmd(), verbosity=CallVerbosity.QUIET_UNLESS_ERROR) if not code: print(out) ################################## @infer_fsid def command_unit(ctx): # type: (CephadmContext) -> int if not ctx.fsid: raise Error('must pass --fsid to specify cluster') unit_name = get_unit_name_by_daemon_name(ctx, ctx.fsid, ctx.name) _, _, code = call( ctx, ['systemctl', ctx.command, unit_name], verbosity=CallVerbosity.VERBOSE, desc='' ) return code ################################## @infer_fsid def command_logs(ctx): # type: (CephadmContext) -> None if not ctx.fsid: raise Error('must pass --fsid to specify cluster') unit_name = get_unit_name_by_daemon_name(ctx, ctx.fsid, ctx.name) cmd = [find_program('journalctl')] cmd.extend(['-u', unit_name]) if ctx.command: cmd.extend(ctx.command) # call this directly, without our wrapper, so that we get an unmolested # stdout with logger prefixing. logger.debug('Running command: %s' % ' '.join(cmd)) subprocess.call(cmd, env=os.environ.copy()) # type: ignore ################################## def list_networks(ctx): # type: (CephadmContext) -> Dict[str,Dict[str, Set[str]]] # sadly, 18.04's iproute2 4.15.0-2ubun doesn't support the -j flag, # so we'll need to use a regex to parse 'ip' command output. # # out, _, _ = call_throws(['ip', '-j', 'route', 'ls']) # j = json.loads(out) # for x in j: res = _list_ipv4_networks(ctx) res.update(_list_ipv6_networks(ctx)) return res def _list_ipv4_networks(ctx: CephadmContext) -> Dict[str, Dict[str, Set[str]]]: execstr: Optional[str] = find_executable('ip') if not execstr: raise FileNotFoundError("unable to find 'ip' command") out, _, _ = call_throws(ctx, [execstr, 'route', 'ls'], verbosity=CallVerbosity.QUIET_UNLESS_ERROR) return _parse_ipv4_route(out) def _parse_ipv4_route(out: str) -> Dict[str, Dict[str, Set[str]]]: r = {} # type: Dict[str, Dict[str, Set[str]]] p = re.compile(r'^(\S+) (?:via \S+)? ?dev (\S+) (.*)scope link (.*)src (\S+)') for line in out.splitlines(): m = p.findall(line) if not m: continue net = m[0][0] if '/' not in net: # aggregate /32 mask for single host sub-networks net += '/32' iface = m[0][1] ip = m[0][4] if net not in r: r[net] = {} if iface not in r[net]: r[net][iface] = set() r[net][iface].add(ip) return r def _list_ipv6_networks(ctx: CephadmContext) -> Dict[str, Dict[str, Set[str]]]: execstr: Optional[str] = find_executable('ip') if not execstr: raise FileNotFoundError("unable to find 'ip' command") routes, _, _ = call_throws(ctx, [execstr, '-6', 'route', 'ls'], verbosity=CallVerbosity.QUIET_UNLESS_ERROR) ips, _, _ = call_throws(ctx, [execstr, '-6', 'addr', 'ls'], verbosity=CallVerbosity.QUIET_UNLESS_ERROR) return _parse_ipv6_route(routes, ips) def _parse_ipv6_route(routes: str, ips: str) -> Dict[str, Dict[str, Set[str]]]: r = {} # type: Dict[str, Dict[str, Set[str]]] route_p = re.compile(r'^(\S+) dev (\S+) proto (\S+) metric (\S+) .*pref (\S+)$') ip_p = re.compile(r'^\s+inet6 (\S+)/(.*)scope (.*)$') iface_p = re.compile(r'^(\d+): (\S+): (.*)$') for line in routes.splitlines(): m = route_p.findall(line) if not m or m[0][0].lower() == 'default': continue net = m[0][0] if '/' not in net: # aggregate /128 mask for single host sub-networks net += '/128' iface = m[0][1] if iface == 'lo': # skip loopback devices continue if net not in r: r[net] = {} if iface not in r[net]: r[net][iface] = set() iface = None for line in ips.splitlines(): m = ip_p.findall(line) if not m: m = iface_p.findall(line) if m: # drop @... suffix, if present iface = m[0][1].split('@')[0] continue ip = m[0][0] # find the network it belongs to net = [n for n in r.keys() if ipaddress.ip_address(ip) in ipaddress.ip_network(n)] if net and iface in r[net[0]]: assert iface r[net[0]][iface].add(ip) return r def command_list_networks(ctx): # type: (CephadmContext) -> None r = list_networks(ctx) def serialize_sets(obj: Any) -> Any: return list(obj) if isinstance(obj, set) else obj print(json.dumps(r, indent=4, default=serialize_sets)) ################################## def command_ls(ctx): # type: (CephadmContext) -> None ls = list_daemons(ctx, detail=not ctx.no_detail, legacy_dir=ctx.legacy_dir) print(json.dumps(ls, indent=4)) def with_units_to_int(v: str) -> int: if v.endswith('iB'): v = v[:-2] elif v.endswith('B'): v = v[:-1] mult = 1 if v[-1].upper() == 'K': mult = 1024 v = v[:-1] elif v[-1].upper() == 'M': mult = 1024 * 1024 v = v[:-1] elif v[-1].upper() == 'G': mult = 1024 * 1024 * 1024 v = v[:-1] elif v[-1].upper() == 'T': mult = 1024 * 1024 * 1024 * 1024 v = v[:-1] return int(float(v) * mult) def list_daemons(ctx, detail=True, legacy_dir=None): # type: (CephadmContext, bool, Optional[str]) -> List[Dict[str, str]] host_version: Optional[str] = None ls = [] container_path = ctx.container_engine.path data_dir = ctx.data_dir if legacy_dir is not None: data_dir = os.path.abspath(legacy_dir + data_dir) # keep track of ceph versions we see seen_versions = {} # type: Dict[str, Optional[str]] # keep track of image digests seen_digests = {} # type: Dict[str, List[str]] # keep track of memory and cpu usage we've seen seen_memusage = {} # type: Dict[str, int] seen_cpuperc = {} # type: Dict[str, str] out, err, code = call( ctx, [container_path, 'stats', '--format', '{{.ID}},{{.MemUsage}}', '--no-stream'], verbosity=CallVerbosity.QUIET ) seen_memusage_cid_len, seen_memusage = _parse_mem_usage(code, out) out, err, code = call( ctx, [container_path, 'stats', '--format', '{{.ID}},{{.CPUPerc}}', '--no-stream'], verbosity=CallVerbosity.QUIET ) seen_cpuperc_cid_len, seen_cpuperc = _parse_cpu_perc(code, out) # /var/lib/ceph if os.path.exists(data_dir): for i in os.listdir(data_dir): if i in ['mon', 'osd', 'mds', 'mgr']: daemon_type = i for j in os.listdir(os.path.join(data_dir, i)): if '-' not in j: continue (cluster, daemon_id) = j.split('-', 1) fsid = get_legacy_daemon_fsid(ctx, cluster, daemon_type, daemon_id, legacy_dir=legacy_dir) legacy_unit_name = 'ceph-%s@%s' % (daemon_type, daemon_id) val: Dict[str, Any] = { 'style': 'legacy', 'name': '%s.%s' % (daemon_type, daemon_id), 'fsid': fsid if fsid is not None else 'unknown', 'systemd_unit': legacy_unit_name, } if detail: (val['enabled'], val['state'], _) = check_unit(ctx, legacy_unit_name) if not host_version: try: out, err, code = call(ctx, ['ceph', '-v'], verbosity=CallVerbosity.QUIET) if not code and out.startswith('ceph version '): host_version = out.split(' ')[2] except Exception: pass val['host_version'] = host_version ls.append(val) elif is_fsid(i): fsid = str(i) # convince mypy that fsid is a str here for j in os.listdir(os.path.join(data_dir, i)): if '.' in j and os.path.isdir(os.path.join(data_dir, fsid, j)): name = j (daemon_type, daemon_id) = j.split('.', 1) unit_name = get_unit_name(fsid, daemon_type, daemon_id) else: continue val = { 'style': 'cephadm:v1', 'name': name, 'fsid': fsid, 'systemd_unit': unit_name, } if detail: # get container id (val['enabled'], val['state'], _) = check_unit(ctx, unit_name) container_id = None image_name = None image_id = None image_digests = None version = None start_stamp = None out, err, code = get_container_stats(ctx, container_path, fsid, daemon_type, daemon_id) if not code: (container_id, image_name, image_id, start, version) = out.strip().split(',') image_id = normalize_container_id(image_id) daemon_type = name.split('.', 1)[0] start_stamp = try_convert_datetime(start) # collect digests for this image id image_digests = seen_digests.get(image_id) if not image_digests: out, err, code = call( ctx, [ container_path, 'image', 'inspect', image_id, '--format', '{{.RepoDigests}}', ], verbosity=CallVerbosity.QUIET) if not code: image_digests = list(set(map( normalize_image_digest, out.strip()[1:-1].split(' ')))) seen_digests[image_id] = image_digests # identify software version inside the container (if we can) if not version or '.' not in version: version = seen_versions.get(image_id, None) if daemon_type == NFSGanesha.daemon_type: version = NFSGanesha.get_version(ctx, container_id) if daemon_type == CephIscsi.daemon_type: version = CephIscsi.get_version(ctx, container_id) elif not version: if daemon_type in Ceph.daemons: out, err, code = call(ctx, [container_path, 'exec', container_id, 'ceph', '-v'], verbosity=CallVerbosity.QUIET) if not code and \ out.startswith('ceph version '): version = out.split(' ')[2] seen_versions[image_id] = version elif daemon_type == 'grafana': out, err, code = call(ctx, [container_path, 'exec', container_id, 'grafana-server', '-v'], verbosity=CallVerbosity.QUIET) if not code and \ out.startswith('Version '): version = out.split(' ')[1] seen_versions[image_id] = version elif daemon_type in ['prometheus', 'alertmanager', 'node-exporter', 'loki', 'promtail']: version = Monitoring.get_version(ctx, container_id, daemon_type) seen_versions[image_id] = version elif daemon_type == 'haproxy': out, err, code = call(ctx, [container_path, 'exec', container_id, 'haproxy', '-v'], verbosity=CallVerbosity.QUIET) if not code and \ out.startswith('HA-Proxy version '): version = out.split(' ')[2] seen_versions[image_id] = version elif daemon_type == 'keepalived': out, err, code = call(ctx, [container_path, 'exec', container_id, 'keepalived', '--version'], verbosity=CallVerbosity.QUIET) if not code and \ err.startswith('Keepalived '): version = err.split(' ')[1] if version[0] == 'v': version = version[1:] seen_versions[image_id] = version elif daemon_type == CustomContainer.daemon_type: # Because a custom container can contain # everything, we do not know which command # to execute to get the version. pass elif daemon_type == SNMPGateway.daemon_type: version = SNMPGateway.get_version(ctx, fsid, daemon_id) seen_versions[image_id] = version else: logger.warning('version for unknown daemon type %s' % daemon_type) else: vfile = os.path.join(data_dir, fsid, j, 'unit.image') # type: ignore try: with open(vfile, 'r') as f: image_name = f.read().strip() or None except IOError: pass # unit.meta? mfile = os.path.join(data_dir, fsid, j, 'unit.meta') # type: ignore try: with open(mfile, 'r') as f: meta = json.loads(f.read()) val.update(meta) except IOError: pass val['container_id'] = container_id val['container_image_name'] = image_name val['container_image_id'] = image_id val['container_image_digests'] = image_digests if container_id: val['memory_usage'] = seen_memusage.get(container_id[0:seen_memusage_cid_len]) val['cpu_percentage'] = seen_cpuperc.get(container_id[0:seen_cpuperc_cid_len]) val['version'] = version val['started'] = start_stamp val['created'] = get_file_timestamp( os.path.join(data_dir, fsid, j, 'unit.created') ) val['deployed'] = get_file_timestamp( os.path.join(data_dir, fsid, j, 'unit.image')) val['configured'] = get_file_timestamp( os.path.join(data_dir, fsid, j, 'unit.configured')) ls.append(val) return ls def _parse_mem_usage(code: int, out: str) -> Tuple[int, Dict[str, int]]: # keep track of memory usage we've seen seen_memusage = {} # type: Dict[str, int] seen_memusage_cid_len = 0 if not code: for line in out.splitlines(): (cid, usage) = line.split(',') (used, limit) = usage.split(' / ') try: seen_memusage[cid] = with_units_to_int(used) if not seen_memusage_cid_len: seen_memusage_cid_len = len(cid) except ValueError: logger.info('unable to parse memory usage line\n>{}'.format(line)) pass return seen_memusage_cid_len, seen_memusage def _parse_cpu_perc(code: int, out: str) -> Tuple[int, Dict[str, str]]: seen_cpuperc = {} seen_cpuperc_cid_len = 0 if not code: for line in out.splitlines(): (cid, cpuperc) = line.split(',') try: seen_cpuperc[cid] = cpuperc if not seen_cpuperc_cid_len: seen_cpuperc_cid_len = len(cid) except ValueError: logger.info('unable to parse cpu percentage line\n>{}'.format(line)) pass return seen_cpuperc_cid_len, seen_cpuperc def get_daemon_description(ctx, fsid, name, detail=False, legacy_dir=None): # type: (CephadmContext, str, str, bool, Optional[str]) -> Dict[str, str] for d in list_daemons(ctx, detail=detail, legacy_dir=legacy_dir): if d['fsid'] != fsid: continue if d['name'] != name: continue return d raise Error('Daemon not found: {}. See `cephadm ls`'.format(name)) def get_container_stats(ctx: CephadmContext, container_path: str, fsid: str, daemon_type: str, daemon_id: str) -> Tuple[str, str, int]: c = CephContainer.for_daemon(ctx, fsid, daemon_type, daemon_id, 'bash') out, err, code = '', '', -1 for name in (c.cname, c.old_cname): cmd = [ container_path, 'inspect', '--format', '{{.Id}},{{.Config.Image}},{{.Image}},{{.Created}},{{index .Config.Labels "io.ceph.version"}}', name ] out, err, code = call(ctx, cmd, verbosity=CallVerbosity.QUIET) if not code: break return out, err, code ################################## @default_image def command_adopt(ctx): # type: (CephadmContext) -> None if not ctx.skip_pull: try: _pull_image(ctx, ctx.image) except UnauthorizedRegistryError: err_str = 'Failed to pull container image. Host may not be logged into container registry. Try `cephadm registry-login --registry-url <url> --registry-username <username> --registry-password <password>` or supply login info via a json file with `cephadm registry-login --registry-json <file>`' logger.debug(f'Pulling image for `command_adopt` failed: {err_str}') raise Error(err_str) (daemon_type, daemon_id) = ctx.name.split('.', 1) # legacy check if ctx.style != 'legacy': raise Error('adoption of style %s not implemented' % ctx.style) # lock fsid = get_legacy_daemon_fsid(ctx, ctx.cluster, daemon_type, daemon_id, legacy_dir=ctx.legacy_dir) if not fsid: raise Error('could not detect legacy fsid; set fsid in ceph.conf') lock = FileLock(ctx, fsid) lock.acquire() # call correct adoption if daemon_type in Ceph.daemons: command_adopt_ceph(ctx, daemon_type, daemon_id, fsid) elif daemon_type == 'prometheus': command_adopt_prometheus(ctx, daemon_id, fsid) elif daemon_type == 'grafana': command_adopt_grafana(ctx, daemon_id, fsid) elif daemon_type == 'node-exporter': raise Error('adoption of node-exporter not implemented') elif daemon_type == 'alertmanager': command_adopt_alertmanager(ctx, daemon_id, fsid) else: raise Error('daemon type %s not recognized' % daemon_type) class AdoptOsd(object): def __init__(self, ctx, osd_data_dir, osd_id): # type: (CephadmContext, str, str) -> None self.ctx = ctx self.osd_data_dir = osd_data_dir self.osd_id = osd_id def check_online_osd(self): # type: () -> Tuple[Optional[str], Optional[str]] osd_fsid, osd_type = None, None path = os.path.join(self.osd_data_dir, 'fsid') try: with open(path, 'r') as f: osd_fsid = f.read().strip() logger.info('Found online OSD at %s' % path) except IOError: logger.info('Unable to read OSD fsid from %s' % path) if os.path.exists(os.path.join(self.osd_data_dir, 'type')): with open(os.path.join(self.osd_data_dir, 'type')) as f: osd_type = f.read().strip() else: logger.info('"type" file missing for OSD data dir') return osd_fsid, osd_type def check_offline_lvm_osd(self): # type: () -> Tuple[Optional[str], Optional[str]] osd_fsid, osd_type = None, None c = get_ceph_volume_container( self.ctx, args=['lvm', 'list', '--format=json'], ) out, err, code = call_throws(self.ctx, c.run_cmd()) if not code: try: js = json.loads(out) if self.osd_id in js: logger.info('Found offline LVM OSD {}'.format(self.osd_id)) osd_fsid = js[self.osd_id][0]['tags']['ceph.osd_fsid'] for device in js[self.osd_id]: if device['tags']['ceph.type'] == 'block': osd_type = 'bluestore' break if device['tags']['ceph.type'] == 'data': osd_type = 'filestore' break except ValueError as e: logger.info('Invalid JSON in ceph-volume lvm list: {}'.format(e)) return osd_fsid, osd_type def check_offline_simple_osd(self): # type: () -> Tuple[Optional[str], Optional[str]] osd_fsid, osd_type = None, None osd_file = glob('/etc/ceph/osd/{}-[a-f0-9-]*.json'.format(self.osd_id)) if len(osd_file) == 1: with open(osd_file[0], 'r') as f: try: js = json.loads(f.read()) logger.info('Found offline simple OSD {}'.format(self.osd_id)) osd_fsid = js['fsid'] osd_type = js['type'] if osd_type != 'filestore': # need this to be mounted for the adopt to work, as it # needs to move files from this directory call_throws(self.ctx, ['mount', js['data']['path'], self.osd_data_dir]) except ValueError as e: logger.info('Invalid JSON in {}: {}'.format(osd_file, e)) return osd_fsid, osd_type def change_cluster_name(self) -> None: logger.info('Attempting to convert osd cluster name to ceph . . .') c = get_ceph_volume_container( self.ctx, args=['lvm', 'list', '{}'.format(self.osd_id), '--format=json'], ) out, err, code = call_throws(self.ctx, c.run_cmd()) if code: raise Exception(f'Failed to get list of LVs: {err}\nceph-volume failed with rc {code}') try: js = json.loads(out) if not js: raise RuntimeError(f'Failed to find osd.{self.osd_id}') device: Optional[Dict[Any, Any]] = None for d in js[self.osd_id]: if d['type'] == 'block': device = d break if not device: raise RuntimeError(f'Failed to find block device for osd.{self.osd_id}') vg = device['vg_name'] out, err, code = call_throws(self.ctx, ['lvchange', '--deltag', f'ceph.cluster_name={self.ctx.cluster}', vg]) if code: raise RuntimeError(f"Can't delete tag ceph.cluster_name={self.ctx.cluster} on osd.{self.osd_id}.\nlvchange failed with rc {code}") out, err, code = call_throws(self.ctx, ['lvchange', '--addtag', 'ceph.cluster_name=ceph', vg]) if code: raise RuntimeError(f"Can't add tag ceph.cluster_name=ceph on osd.{self.osd_id}.\nlvchange failed with rc {code}") logger.info('Successfully converted osd cluster name') except (Exception, RuntimeError) as e: logger.info(f'Failed to convert osd cluster name: {e}') def command_adopt_ceph(ctx, daemon_type, daemon_id, fsid): # type: (CephadmContext, str, str, str) -> None (uid, gid) = extract_uid_gid(ctx) data_dir_src = ('/var/lib/ceph/%s/%s-%s' % (daemon_type, ctx.cluster, daemon_id)) data_dir_src = os.path.abspath(ctx.legacy_dir + data_dir_src) if not os.path.exists(data_dir_src): raise Error("{}.{} data directory '{}' does not exist. " 'Incorrect ID specified, or daemon already adopted?'.format( daemon_type, daemon_id, data_dir_src)) osd_fsid = None if daemon_type == 'osd': adopt_osd = AdoptOsd(ctx, data_dir_src, daemon_id) osd_fsid, osd_type = adopt_osd.check_online_osd() if not osd_fsid: osd_fsid, osd_type = adopt_osd.check_offline_lvm_osd() if not osd_fsid: osd_fsid, osd_type = adopt_osd.check_offline_simple_osd() if not osd_fsid: raise Error('Unable to find OSD {}'.format(daemon_id)) elif ctx.cluster != 'ceph': adopt_osd.change_cluster_name() logger.info('objectstore_type is %s' % osd_type) assert osd_type if osd_type == 'filestore': raise Error('FileStore is not supported by cephadm') # NOTE: implicit assumption here that the units correspond to the # cluster we are adopting based on the /etc/{defaults,sysconfig}/ceph # CLUSTER field. unit_name = 'ceph-%s@%s' % (daemon_type, daemon_id) (enabled, state, _) = check_unit(ctx, unit_name) if state == 'running': logger.info('Stopping old systemd unit %s...' % unit_name) call_throws(ctx, ['systemctl', 'stop', unit_name]) if enabled: logger.info('Disabling old systemd unit %s...' % unit_name) call_throws(ctx, ['systemctl', 'disable', unit_name]) # data logger.info('Moving data...') data_dir_dst = make_data_dir(ctx, fsid, daemon_type, daemon_id, uid=uid, gid=gid) move_files(ctx, glob(os.path.join(data_dir_src, '*')), data_dir_dst, uid=uid, gid=gid) logger.debug('Remove dir `%s`' % (data_dir_src)) if os.path.ismount(data_dir_src): call_throws(ctx, ['umount', data_dir_src]) os.rmdir(data_dir_src) logger.info('Chowning content...') call_throws(ctx, ['chown', '-c', '-R', '%d.%d' % (uid, gid), data_dir_dst]) if daemon_type == 'mon': # rename *.ldb -> *.sst, in case they are coming from ubuntu store = os.path.join(data_dir_dst, 'store.db') num_renamed = 0 if os.path.exists(store): for oldf in os.listdir(store): if oldf.endswith('.ldb'): newf = oldf.replace('.ldb', '.sst') oldp = os.path.join(store, oldf) newp = os.path.join(store, newf) logger.debug('Renaming %s -> %s' % (oldp, newp)) os.rename(oldp, newp) if num_renamed: logger.info('Renamed %d leveldb *.ldb files to *.sst', num_renamed) if daemon_type == 'osd': for n in ['block', 'block.db', 'block.wal']: p = os.path.join(data_dir_dst, n) if os.path.exists(p): logger.info('Chowning %s...' % p) os.chown(p, uid, gid) # disable the ceph-volume 'simple' mode files on the host simple_fn = os.path.join('/etc/ceph/osd', '%s-%s.json' % (daemon_id, osd_fsid)) if os.path.exists(simple_fn): new_fn = simple_fn + '.adopted-by-cephadm' logger.info('Renaming %s -> %s', simple_fn, new_fn) os.rename(simple_fn, new_fn) logger.info('Disabling host unit ceph-volume@ simple unit...') call(ctx, ['systemctl', 'disable', 'ceph-volume@simple-%s-%s.service' % (daemon_id, osd_fsid)]) else: # assume this is an 'lvm' c-v for now, but don't error # out if it's not. logger.info('Disabling host unit ceph-volume@ lvm unit...') call(ctx, ['systemctl', 'disable', 'ceph-volume@lvm-%s-%s.service' % (daemon_id, osd_fsid)]) # config config_src = '/etc/ceph/%s.conf' % (ctx.cluster) config_src = os.path.abspath(ctx.legacy_dir + config_src) config_dst = os.path.join(data_dir_dst, 'config') copy_files(ctx, [config_src], config_dst, uid=uid, gid=gid) # logs logger.info('Moving logs...') log_dir_src = ('/var/log/ceph/%s-%s.%s.log*' % (ctx.cluster, daemon_type, daemon_id)) log_dir_src = os.path.abspath(ctx.legacy_dir + log_dir_src) log_dir_dst = make_log_dir(ctx, fsid, uid=uid, gid=gid) move_files(ctx, glob(log_dir_src), log_dir_dst, uid=uid, gid=gid) logger.info('Creating new units...') make_var_run(ctx, fsid, uid, gid) c = get_container(ctx, fsid, daemon_type, daemon_id) deploy_daemon_units(ctx, fsid, uid, gid, daemon_type, daemon_id, c, enable=True, # unconditionally enable the new unit start=(state == 'running' or ctx.force_start), osd_fsid=osd_fsid) update_firewalld(ctx, daemon_type) def command_adopt_prometheus(ctx, daemon_id, fsid): # type: (CephadmContext, str, str) -> None daemon_type = 'prometheus' (uid, gid) = extract_uid_gid_monitoring(ctx, daemon_type) # should try to set the ports we know cephadm defaults # to for these services in the firewall. ports = Monitoring.port_map['prometheus'] _stop_and_disable(ctx, 'prometheus') data_dir_dst = make_data_dir(ctx, fsid, daemon_type, daemon_id, uid=uid, gid=gid) # config config_src = '/etc/prometheus/prometheus.yml' config_src = os.path.abspath(ctx.legacy_dir + config_src) config_dst = os.path.join(data_dir_dst, 'etc/prometheus') makedirs(config_dst, uid, gid, 0o755) copy_files(ctx, [config_src], config_dst, uid=uid, gid=gid) # data data_src = '/var/lib/prometheus/metrics/' data_src = os.path.abspath(ctx.legacy_dir + data_src) data_dst = os.path.join(data_dir_dst, 'data') copy_tree(ctx, [data_src], data_dst, uid=uid, gid=gid) make_var_run(ctx, fsid, uid, gid) c = get_container(ctx, fsid, daemon_type, daemon_id) deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid, deployment_type=DeploymentType.REDEPLOY, ports=ports) update_firewalld(ctx, daemon_type) def command_adopt_grafana(ctx, daemon_id, fsid): # type: (CephadmContext, str, str) -> None daemon_type = 'grafana' (uid, gid) = extract_uid_gid_monitoring(ctx, daemon_type) # should try to set the ports we know cephadm defaults # to for these services in the firewall. ports = Monitoring.port_map['grafana'] _stop_and_disable(ctx, 'grafana-server') data_dir_dst = make_data_dir(ctx, fsid, daemon_type, daemon_id, uid=uid, gid=gid) # config config_src = '/etc/grafana/grafana.ini' config_src = os.path.abspath(ctx.legacy_dir + config_src) config_dst = os.path.join(data_dir_dst, 'etc/grafana') makedirs(config_dst, uid, gid, 0o755) copy_files(ctx, [config_src], config_dst, uid=uid, gid=gid) prov_src = '/etc/grafana/provisioning/' prov_src = os.path.abspath(ctx.legacy_dir + prov_src) prov_dst = os.path.join(data_dir_dst, 'etc/grafana') copy_tree(ctx, [prov_src], prov_dst, uid=uid, gid=gid) # cert cert = '/etc/grafana/grafana.crt' key = '/etc/grafana/grafana.key' if os.path.exists(cert) and os.path.exists(key): cert_src = '/etc/grafana/grafana.crt' cert_src = os.path.abspath(ctx.legacy_dir + cert_src) makedirs(os.path.join(data_dir_dst, 'etc/grafana/certs'), uid, gid, 0o755) cert_dst = os.path.join(data_dir_dst, 'etc/grafana/certs/cert_file') copy_files(ctx, [cert_src], cert_dst, uid=uid, gid=gid) key_src = '/etc/grafana/grafana.key' key_src = os.path.abspath(ctx.legacy_dir + key_src) key_dst = os.path.join(data_dir_dst, 'etc/grafana/certs/cert_key') copy_files(ctx, [key_src], key_dst, uid=uid, gid=gid) _adjust_grafana_ini(os.path.join(config_dst, 'grafana.ini')) else: logger.debug('Skipping ssl, missing cert {} or key {}'.format(cert, key)) # data - possible custom dashboards/plugins data_src = '/var/lib/grafana/' data_src = os.path.abspath(ctx.legacy_dir + data_src) data_dst = os.path.join(data_dir_dst, 'data') copy_tree(ctx, [data_src], data_dst, uid=uid, gid=gid) make_var_run(ctx, fsid, uid, gid) c = get_container(ctx, fsid, daemon_type, daemon_id) deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid, deployment_type=DeploymentType.REDEPLOY, ports=ports) update_firewalld(ctx, daemon_type) def command_adopt_alertmanager(ctx, daemon_id, fsid): # type: (CephadmContext, str, str) -> None daemon_type = 'alertmanager' (uid, gid) = extract_uid_gid_monitoring(ctx, daemon_type) # should try to set the ports we know cephadm defaults # to for these services in the firewall. ports = Monitoring.port_map['alertmanager'] _stop_and_disable(ctx, 'prometheus-alertmanager') data_dir_dst = make_data_dir(ctx, fsid, daemon_type, daemon_id, uid=uid, gid=gid) # config config_src = '/etc/prometheus/alertmanager.yml' config_src = os.path.abspath(ctx.legacy_dir + config_src) config_dst = os.path.join(data_dir_dst, 'etc/alertmanager') makedirs(config_dst, uid, gid, 0o755) copy_files(ctx, [config_src], config_dst, uid=uid, gid=gid) # data data_src = '/var/lib/prometheus/alertmanager/' data_src = os.path.abspath(ctx.legacy_dir + data_src) data_dst = os.path.join(data_dir_dst, 'etc/alertmanager/data') copy_tree(ctx, [data_src], data_dst, uid=uid, gid=gid) make_var_run(ctx, fsid, uid, gid) c = get_container(ctx, fsid, daemon_type, daemon_id) deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid, deployment_type=DeploymentType.REDEPLOY, ports=ports) update_firewalld(ctx, daemon_type) def _adjust_grafana_ini(filename): # type: (str) -> None # Update cert_file, cert_key pathnames in server section # ConfigParser does not preserve comments try: with open(filename, 'r') as grafana_ini: lines = grafana_ini.readlines() with write_new(filename, perms=None) as grafana_ini: server_section = False for line in lines: if line.startswith('['): server_section = False if line.startswith('[server]'): server_section = True if server_section: line = re.sub(r'^cert_file.*', 'cert_file = /etc/grafana/certs/cert_file', line) line = re.sub(r'^cert_key.*', 'cert_key = /etc/grafana/certs/cert_key', line) grafana_ini.write(line) except OSError as err: raise Error('Cannot update {}: {}'.format(filename, err)) def _stop_and_disable(ctx, unit_name): # type: (CephadmContext, str) -> None (enabled, state, _) = check_unit(ctx, unit_name) if state == 'running': logger.info('Stopping old systemd unit %s...' % unit_name) call_throws(ctx, ['systemctl', 'stop', unit_name]) if enabled: logger.info('Disabling old systemd unit %s...' % unit_name) call_throws(ctx, ['systemctl', 'disable', unit_name]) ################################## def command_rm_daemon(ctx): # type: (CephadmContext) -> None lock = FileLock(ctx, ctx.fsid) lock.acquire() (daemon_type, daemon_id) = ctx.name.split('.', 1) unit_name = get_unit_name_by_daemon_name(ctx, ctx.fsid, ctx.name) if daemon_type in ['mon', 'osd'] and not ctx.force: raise Error('must pass --force to proceed: ' 'this command may destroy precious data!') call(ctx, ['systemctl', 'stop', unit_name], verbosity=CallVerbosity.DEBUG) call(ctx, ['systemctl', 'reset-failed', unit_name], verbosity=CallVerbosity.DEBUG) call(ctx, ['systemctl', 'disable', unit_name], verbosity=CallVerbosity.DEBUG) # force remove rgw admin socket file if leftover if daemon_type in ['rgw']: rgw_asok_path = f'/var/run/ceph/{ctx.fsid}/ceph-client.{ctx.name}.*.asok' call(ctx, ['rm', '-rf', rgw_asok_path], verbosity=CallVerbosity.DEBUG) data_dir = get_data_dir(ctx.fsid, ctx.data_dir, daemon_type, daemon_id) if daemon_type in ['mon', 'osd', 'prometheus'] and \ not ctx.force_delete_data: # rename it out of the way -- do not delete backup_dir = os.path.join(ctx.data_dir, ctx.fsid, 'removed') if not os.path.exists(backup_dir): makedirs(backup_dir, 0, 0, DATA_DIR_MODE) dirname = '%s.%s_%s' % (daemon_type, daemon_id, datetime.datetime.utcnow().strftime(DATEFMT)) os.rename(data_dir, os.path.join(backup_dir, dirname)) else: call_throws(ctx, ['rm', '-rf', data_dir]) ports: List[int] = fetch_tcp_ports(ctx) if ports: try: fw = Firewalld(ctx) fw.close_ports(ports) fw.apply_rules() except RuntimeError as e: # in case we cannot close the ports we will remove # the daemon but keep them open. logger.warning(f' Error when trying to close ports: {e}') ################################## def _zap(ctx: CephadmContext, what: str) -> None: mounts = get_container_mounts(ctx, ctx.fsid, 'clusterless-ceph-volume', None) c = get_ceph_volume_container(ctx, args=['lvm', 'zap', '--destroy', what], volume_mounts=mounts, envs=ctx.env) logger.info(f'Zapping {what}...') out, err, code = call_throws(ctx, c.run_cmd()) @infer_image def _zap_osds(ctx: CephadmContext) -> None: # assume fsid lock already held # list mounts = get_container_mounts(ctx, ctx.fsid, 'clusterless-ceph-volume', None) c = get_ceph_volume_container(ctx, args=['inventory', '--format', 'json'], volume_mounts=mounts, envs=ctx.env) out, err, code = call_throws(ctx, c.run_cmd()) if code: raise Error('failed to list osd inventory') try: ls = json.loads(out) except ValueError as e: raise Error(f'Invalid JSON in ceph-volume inventory: {e}') for i in ls: matches = [lv.get('cluster_fsid') == ctx.fsid and i.get('ceph_device') for lv in i.get('lvs', [])] if any(matches) and all(matches): _zap(ctx, i.get('path')) elif any(matches): lv_names = [lv['name'] for lv in i.get('lvs', [])] # TODO: we need to map the lv_names back to device paths (the vg # id isn't part of the output here!) logger.warning(f'Not zapping LVs (not implemented): {lv_names}') def command_zap_osds(ctx: CephadmContext) -> None: if not ctx.force: raise Error('must pass --force to proceed: ' 'this command may destroy precious data!') lock = FileLock(ctx, ctx.fsid) lock.acquire() _zap_osds(ctx) ################################## def get_ceph_cluster_count(ctx: CephadmContext) -> int: return len([c for c in os.listdir(ctx.data_dir) if is_fsid(c)]) def command_rm_cluster(ctx: CephadmContext) -> None: if not ctx.force: raise Error('must pass --force to proceed: ' 'this command may destroy precious data!') lock = FileLock(ctx, ctx.fsid) lock.acquire() _rm_cluster(ctx, ctx.keep_logs, ctx.zap_osds) def _rm_cluster(ctx: CephadmContext, keep_logs: bool, zap_osds: bool) -> None: if not ctx.fsid: raise Error('must select the cluster to delete by passing --fsid to proceed') def disable_systemd_service(unit_name: str) -> None: call(ctx, ['systemctl', 'stop', unit_name], verbosity=CallVerbosity.DEBUG) call(ctx, ['systemctl', 'reset-failed', unit_name], verbosity=CallVerbosity.DEBUG) call(ctx, ['systemctl', 'disable', unit_name], verbosity=CallVerbosity.DEBUG) logger.info(f'Deleting cluster with fsid: {ctx.fsid}') # stop + disable individual daemon units for d in list_daemons(ctx, detail=False): if d['fsid'] != ctx.fsid: continue if d['style'] != 'cephadm:v1': continue disable_systemd_service(get_unit_name(ctx.fsid, d['name'])) # cluster units for unit_name in ['ceph-%s.target' % ctx.fsid]: disable_systemd_service(unit_name) slice_name = 'system-ceph\\x2d{}.slice'.format(ctx.fsid.replace('-', '\\x2d')) call(ctx, ['systemctl', 'stop', slice_name], verbosity=CallVerbosity.DEBUG) # osds? if zap_osds: _zap_osds(ctx) # rm units call_throws(ctx, ['rm', '-f', ctx.unit_dir + '/ceph-%[email protected]' % ctx.fsid]) call_throws(ctx, ['rm', '-f', ctx.unit_dir + '/ceph-%s.target' % ctx.fsid]) call_throws(ctx, ['rm', '-rf', ctx.unit_dir + '/ceph-%s.target.wants' % ctx.fsid]) # rm data call_throws(ctx, ['rm', '-rf', ctx.data_dir + '/' + ctx.fsid]) if not keep_logs: # rm logs call_throws(ctx, ['rm', '-rf', ctx.log_dir + '/' + ctx.fsid]) call_throws(ctx, ['rm', '-rf', ctx.log_dir + '/*.wants/ceph-%s@*' % ctx.fsid]) # rm logrotate config call_throws(ctx, ['rm', '-f', ctx.logrotate_dir + '/ceph-%s' % ctx.fsid]) # if last cluster on host remove shared files if get_ceph_cluster_count(ctx) == 0: disable_systemd_service('ceph.target') # rm shared ceph target files call_throws(ctx, ['rm', '-f', ctx.unit_dir + '/multi-user.target.wants/ceph.target']) call_throws(ctx, ['rm', '-f', ctx.unit_dir + '/ceph.target']) # rm cephadm logrotate config call_throws(ctx, ['rm', '-f', ctx.logrotate_dir + '/cephadm']) if not keep_logs: # remove all cephadm logs for fname in glob(f'{ctx.log_dir}/cephadm.log*'): os.remove(fname) # rm sysctl settings sysctl_dirs: List[Path] = [Path(ctx.sysctl_dir), Path('/usr/lib/sysctl.d')] for sysctl_dir in sysctl_dirs: for p in sysctl_dir.glob(f'90-ceph-{ctx.fsid}-*.conf'): p.unlink() # cleanup remaining ceph directories ceph_dirs = [f'/run/ceph/{ctx.fsid}', f'/tmp/cephadm-{ctx.fsid}', f'/var/run/ceph/{ctx.fsid}'] for dd in ceph_dirs: shutil.rmtree(dd, ignore_errors=True) # clean up config, keyring, and pub key files files = [CEPH_DEFAULT_CONF, CEPH_DEFAULT_PUBKEY, CEPH_DEFAULT_KEYRING] if os.path.exists(files[0]): valid_fsid = False with open(files[0]) as f: if ctx.fsid in f.read(): valid_fsid = True if valid_fsid: # rm configuration files on /etc/ceph for n in range(0, len(files)): if os.path.exists(files[n]): os.remove(files[n]) ################################## def check_time_sync(ctx, enabler=None): # type: (CephadmContext, Optional[Packager]) -> bool units = [ 'chrony.service', # 18.04 (at least) 'chronyd.service', # el / opensuse 'systemd-timesyncd.service', 'ntpd.service', # el7 (at least) 'ntp.service', # 18.04 (at least) 'ntpsec.service', # 20.04 (at least) / buster 'openntpd.service', # ubuntu / debian ] if not check_units(ctx, units, enabler): logger.warning('No time sync service is running; checked for %s' % units) return False return True def command_check_host(ctx: CephadmContext) -> None: errors = [] commands = ['systemctl', 'lvcreate'] try: engine = check_container_engine(ctx) logger.info(f'{engine} is present') except Error as e: errors.append(str(e)) for command in commands: try: find_program(command) logger.info('%s is present' % command) except ValueError: errors.append('%s binary does not appear to be installed' % command) # check for configured+running chronyd or ntp if not check_time_sync(ctx): errors.append('No time synchronization is active') if 'expect_hostname' in ctx and ctx.expect_hostname: if get_hostname().lower() != ctx.expect_hostname.lower(): errors.append('hostname "%s" does not match expected hostname "%s"' % ( get_hostname(), ctx.expect_hostname)) else: logger.info('Hostname "%s" matches what is expected.', ctx.expect_hostname) if errors: raise Error('\nERROR: '.join(errors)) logger.info('Host looks OK') ################################## def get_ssh_vars(ssh_user: str) -> Tuple[int, int, str]: try: s_pwd = pwd.getpwnam(ssh_user) except KeyError: raise Error('Cannot find uid/gid for ssh-user: %s' % (ssh_user)) ssh_uid = s_pwd.pw_uid ssh_gid = s_pwd.pw_gid ssh_dir = os.path.join(s_pwd.pw_dir, '.ssh') return ssh_uid, ssh_gid, ssh_dir def authorize_ssh_key(ssh_pub_key: str, ssh_user: str) -> bool: """Authorize the public key for the provided ssh user""" def key_in_file(path: str, key: str) -> bool: if not os.path.exists(path): return False with open(path) as f: lines = f.readlines() for line in lines: if line.strip() == key.strip(): return True return False logger.info(f'Adding key to {ssh_user}@localhost authorized_keys...') if ssh_pub_key is None or ssh_pub_key.isspace(): raise Error('Trying to authorize an empty ssh key') ssh_pub_key = ssh_pub_key.strip() ssh_uid, ssh_gid, ssh_dir = get_ssh_vars(ssh_user) if not os.path.exists(ssh_dir): makedirs(ssh_dir, ssh_uid, ssh_gid, 0o700) auth_keys_file = '%s/authorized_keys' % ssh_dir if key_in_file(auth_keys_file, ssh_pub_key): logger.info(f'key already in {ssh_user}@localhost authorized_keys...') return False add_newline = False if os.path.exists(auth_keys_file): with open(auth_keys_file, 'r') as f: f.seek(0, os.SEEK_END) if f.tell() > 0: f.seek(f.tell() - 1, os.SEEK_SET) # go to last char if f.read() != '\n': add_newline = True with open(auth_keys_file, 'a') as f: os.fchown(f.fileno(), ssh_uid, ssh_gid) # just in case we created it os.fchmod(f.fileno(), DEFAULT_MODE) # just in case we created it if add_newline: f.write('\n') f.write(ssh_pub_key + '\n') return True def revoke_ssh_key(key: str, ssh_user: str) -> None: """Revoke the public key authorization for the ssh user""" ssh_uid, ssh_gid, ssh_dir = get_ssh_vars(ssh_user) auth_keys_file = '%s/authorized_keys' % ssh_dir deleted = False if os.path.exists(auth_keys_file): with open(auth_keys_file, 'r') as f: lines = f.readlines() _, filename = tempfile.mkstemp() with open(filename, 'w') as f: os.fchown(f.fileno(), ssh_uid, ssh_gid) os.fchmod(f.fileno(), DEFAULT_MODE) # secure access to the keys file for line in lines: if line.strip() == key.strip(): deleted = True else: f.write(line) if deleted: shutil.move(filename, auth_keys_file) else: logger.warning('Cannot find the ssh key to be deleted') def check_ssh_connectivity(ctx: CephadmContext) -> None: def cmd_is_available(cmd: str) -> bool: if shutil.which(cmd) is None: logger.warning(f'Command not found: {cmd}') return False return True if not cmd_is_available('ssh') or not cmd_is_available('ssh-keygen'): logger.warning('Cannot check ssh connectivity. Skipping...') return logger.info('Verifying ssh connectivity ...') if ctx.ssh_private_key and ctx.ssh_public_key: # let's use the keys provided by the user ssh_priv_key_path = pathify(ctx.ssh_private_key.name) ssh_pub_key_path = pathify(ctx.ssh_public_key.name) else: # no custom keys, let's generate some random keys just for this check ssh_priv_key_path = f'/tmp/ssh_key_{uuid.uuid1()}' ssh_pub_key_path = f'{ssh_priv_key_path}.pub' ssh_key_gen_cmd = ['ssh-keygen', '-q', '-t', 'rsa', '-N', '', '-C', '', '-f', ssh_priv_key_path] _, _, code = call(ctx, ssh_key_gen_cmd) if code != 0: logger.warning('Cannot generate keys to check ssh connectivity.') return with open(ssh_pub_key_path, 'r') as f: key = f.read().strip() new_key = authorize_ssh_key(key, ctx.ssh_user) ssh_cfg_file_arg = ['-F', pathify(ctx.ssh_config.name)] if ctx.ssh_config else [] _, _, code = call(ctx, ['ssh', '-o StrictHostKeyChecking=no', *ssh_cfg_file_arg, '-i', ssh_priv_key_path, '-o PasswordAuthentication=no', f'{ctx.ssh_user}@{get_hostname()}', 'sudo echo']) # we only remove the key if it's a new one. In case the user has provided # some already existing key then we don't alter authorized_keys file if new_key: revoke_ssh_key(key, ctx.ssh_user) pub_key_msg = '- The public key file configured by --ssh-public-key is valid\n' if ctx.ssh_public_key else '' prv_key_msg = '- The private key file configured by --ssh-private-key is valid\n' if ctx.ssh_private_key else '' ssh_cfg_msg = '- The ssh configuration file configured by --ssh-config is valid\n' if ctx.ssh_config else '' err_msg = f""" ** Please verify your user's ssh configuration and make sure: - User {ctx.ssh_user} must have passwordless sudo access {pub_key_msg}{prv_key_msg}{ssh_cfg_msg} """ if code != 0: raise Error(err_msg) def command_prepare_host(ctx: CephadmContext) -> None: logger.info('Verifying podman|docker is present...') pkg = None try: check_container_engine(ctx) except Error as e: logger.warning(str(e)) if not pkg: pkg = create_packager(ctx) pkg.install_podman() logger.info('Verifying lvm2 is present...') if not find_executable('lvcreate'): if not pkg: pkg = create_packager(ctx) pkg.install(['lvm2']) logger.info('Verifying time synchronization is in place...') if not check_time_sync(ctx): if not pkg: pkg = create_packager(ctx) pkg.install(['chrony']) # check again, and this time try to enable # the service check_time_sync(ctx, enabler=pkg) if 'expect_hostname' in ctx and ctx.expect_hostname and ctx.expect_hostname != get_hostname(): logger.warning('Adjusting hostname from %s -> %s...' % (get_hostname(), ctx.expect_hostname)) call_throws(ctx, ['hostname', ctx.expect_hostname]) with open('/etc/hostname', 'w') as f: f.write(ctx.expect_hostname + '\n') logger.info('Repeating the final host check...') command_check_host(ctx) ################################## class CustomValidation(argparse.Action): def _check_name(self, values: str) -> None: try: (daemon_type, daemon_id) = values.split('.', 1) except ValueError: raise argparse.ArgumentError(self, 'must be of the format <type>.<id>. For example, osd.1 or prometheus.myhost.com') daemons = get_supported_daemons() if daemon_type not in daemons: raise argparse.ArgumentError(self, 'name must declare the type of daemon e.g. ' '{}'.format(', '.join(daemons))) def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], option_string: Optional[str] = None) -> None: assert isinstance(values, str) if self.dest == 'name': self._check_name(values) setattr(namespace, self.dest, values) ################################## def get_distro(): # type: () -> Tuple[Optional[str], Optional[str], Optional[str]] distro = None distro_version = None distro_codename = None with open('/etc/os-release', 'r') as f: for line in f.readlines(): line = line.strip() if '=' not in line or line.startswith('#'): continue (var, val) = line.split('=', 1) if val[0] == '"' and val[-1] == '"': val = val[1:-1] if var == 'ID': distro = val.lower() elif var == 'VERSION_ID': distro_version = val.lower() elif var == 'VERSION_CODENAME': distro_codename = val.lower() return distro, distro_version, distro_codename class Packager(object): def __init__(self, ctx: CephadmContext, stable: Optional[str] = None, version: Optional[str] = None, branch: Optional[str] = None, commit: Optional[str] = None): assert \ (stable and not version and not branch and not commit) or \ (not stable and version and not branch and not commit) or \ (not stable and not version and branch) or \ (not stable and not version and not branch and not commit) self.ctx = ctx self.stable = stable self.version = version self.branch = branch self.commit = commit def validate(self) -> None: """Validate parameters before writing any state to disk.""" pass def add_repo(self) -> None: raise NotImplementedError def rm_repo(self) -> None: raise NotImplementedError def install(self, ls: List[str]) -> None: raise NotImplementedError def install_podman(self) -> None: raise NotImplementedError def query_shaman(self, distro: str, distro_version: Any, branch: Optional[str], commit: Optional[str]) -> str: # query shaman logger.info('Fetching repo metadata from shaman and chacra...') shaman_url = 'https://shaman.ceph.com/api/repos/ceph/{branch}/{sha1}/{distro}/{distro_version}/repo/?arch={arch}'.format( distro=distro, distro_version=distro_version, branch=branch, sha1=commit or 'latest', arch=get_arch() ) try: shaman_response = urlopen(shaman_url) except HTTPError as err: logger.error('repository not found in shaman (might not be available yet)') raise Error('%s, failed to fetch %s' % (err, shaman_url)) chacra_url = '' try: chacra_url = shaman_response.geturl() chacra_response = urlopen(chacra_url) except HTTPError as err: logger.error('repository not found in chacra (might not be available yet)') raise Error('%s, failed to fetch %s' % (err, chacra_url)) return chacra_response.read().decode('utf-8') def repo_gpgkey(self) -> Tuple[str, str]: if self.ctx.gpg_url: return self.ctx.gpg_url, 'manual' if self.stable or self.version: return 'https://download.ceph.com/keys/release.gpg', 'release' else: return 'https://download.ceph.com/keys/autobuild.gpg', 'autobuild' def enable_service(self, service: str) -> None: """ Start and enable the service (typically using systemd). """ call_throws(self.ctx, ['systemctl', 'enable', '--now', service]) class Apt(Packager): DISTRO_NAMES = { 'ubuntu': 'ubuntu', 'debian': 'debian', } def __init__(self, ctx: CephadmContext, stable: Optional[str], version: Optional[str], branch: Optional[str], commit: Optional[str], distro: Optional[str], distro_version: Optional[str], distro_codename: Optional[str]) -> None: super(Apt, self).__init__(ctx, stable=stable, version=version, branch=branch, commit=commit) assert distro self.ctx = ctx self.distro = self.DISTRO_NAMES[distro] self.distro_codename = distro_codename self.distro_version = distro_version def repo_path(self) -> str: return '/etc/apt/sources.list.d/ceph.list' def add_repo(self) -> None: url, name = self.repo_gpgkey() logger.info('Installing repo GPG key from %s...' % url) try: response = urlopen(url) except HTTPError as err: logger.error('failed to fetch GPG repo key from %s: %s' % ( url, err)) raise Error('failed to fetch GPG key') key = response.read() with open('/etc/apt/trusted.gpg.d/ceph.%s.gpg' % name, 'wb') as f: f.write(key) if self.version: content = 'deb %s/debian-%s/ %s main\n' % ( self.ctx.repo_url, self.version, self.distro_codename) elif self.stable: content = 'deb %s/debian-%s/ %s main\n' % ( self.ctx.repo_url, self.stable, self.distro_codename) else: content = self.query_shaman(self.distro, self.distro_codename, self.branch, self.commit) logger.info('Installing repo file at %s...' % self.repo_path()) with open(self.repo_path(), 'w') as f: f.write(content) self.update() def rm_repo(self) -> None: for name in ['autobuild', 'release', 'manual']: p = '/etc/apt/trusted.gpg.d/ceph.%s.gpg' % name if os.path.exists(p): logger.info('Removing repo GPG key %s...' % p) os.unlink(p) if os.path.exists(self.repo_path()): logger.info('Removing repo at %s...' % self.repo_path()) os.unlink(self.repo_path()) if self.distro == 'ubuntu': self.rm_kubic_repo() def install(self, ls: List[str]) -> None: logger.info('Installing packages %s...' % ls) call_throws(self.ctx, ['apt-get', 'install', '-y'] + ls) def update(self) -> None: logger.info('Updating package list...') call_throws(self.ctx, ['apt-get', 'update']) def install_podman(self) -> None: if self.distro == 'ubuntu': logger.info('Setting up repo for podman...') self.add_kubic_repo() self.update() logger.info('Attempting podman install...') try: self.install(['podman']) except Error: logger.info('Podman did not work. Falling back to docker...') self.install(['docker.io']) def kubic_repo_url(self) -> str: return 'https://download.opensuse.org/repositories/devel:/kubic:/' \ 'libcontainers:/stable/xUbuntu_%s/' % self.distro_version def kubic_repo_path(self) -> str: return '/etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list' def kubic_repo_gpgkey_url(self) -> str: return '%s/Release.key' % self.kubic_repo_url() def kubic_repo_gpgkey_path(self) -> str: return '/etc/apt/trusted.gpg.d/kubic.release.gpg' def add_kubic_repo(self) -> None: url = self.kubic_repo_gpgkey_url() logger.info('Installing repo GPG key from %s...' % url) try: response = urlopen(url) except HTTPError as err: logger.error('failed to fetch GPG repo key from %s: %s' % ( url, err)) raise Error('failed to fetch GPG key') key = response.read().decode('utf-8') tmp_key = write_tmp(key, 0, 0) keyring = self.kubic_repo_gpgkey_path() call_throws(self.ctx, ['apt-key', '--keyring', keyring, 'add', tmp_key.name]) logger.info('Installing repo file at %s...' % self.kubic_repo_path()) content = 'deb %s /\n' % self.kubic_repo_url() with open(self.kubic_repo_path(), 'w') as f: f.write(content) def rm_kubic_repo(self) -> None: keyring = self.kubic_repo_gpgkey_path() if os.path.exists(keyring): logger.info('Removing repo GPG key %s...' % keyring) os.unlink(keyring) p = self.kubic_repo_path() if os.path.exists(p): logger.info('Removing repo at %s...' % p) os.unlink(p) class YumDnf(Packager): DISTRO_NAMES = { 'centos': ('centos', 'el'), 'rhel': ('centos', 'el'), 'scientific': ('centos', 'el'), 'rocky': ('centos', 'el'), 'almalinux': ('centos', 'el'), 'ol': ('centos', 'el'), 'fedora': ('fedora', 'fc'), 'mariner': ('mariner', 'cm'), } def __init__(self, ctx: CephadmContext, stable: Optional[str], version: Optional[str], branch: Optional[str], commit: Optional[str], distro: Optional[str], distro_version: Optional[str]) -> None: super(YumDnf, self).__init__(ctx, stable=stable, version=version, branch=branch, commit=commit) assert distro assert distro_version self.ctx = ctx self.major = int(distro_version.split('.')[0]) self.distro_normalized = self.DISTRO_NAMES[distro][0] self.distro_code = self.DISTRO_NAMES[distro][1] + str(self.major) if (self.distro_code == 'fc' and self.major >= 30) or \ (self.distro_code == 'el' and self.major >= 8): self.tool = 'dnf' elif (self.distro_code == 'cm'): self.tool = 'tdnf' else: self.tool = 'yum' def custom_repo(self, **kw: Any) -> str: """ Repo files need special care in that a whole line should not be present if there is no value for it. Because we were using `format()` we could not conditionally add a line for a repo file. So the end result would contain a key with a missing value (say if we were passing `None`). For example, it could look like:: [ceph repo] name= ceph repo proxy= gpgcheck= Which breaks. This function allows us to conditionally add lines, preserving an order and be more careful. Previously, and for historical purposes, this is how the template used to look:: custom_repo = [{repo_name}] name={name} baseurl={baseurl} enabled={enabled} gpgcheck={gpgcheck} type={_type} gpgkey={gpgkey} proxy={proxy} """ lines = [] # by using tuples (vs a dict) we preserve the order of what we want to # return, like starting with a [repo name] tmpl = ( ('reponame', '[%s]'), ('name', 'name=%s'), ('baseurl', 'baseurl=%s'), ('enabled', 'enabled=%s'), ('gpgcheck', 'gpgcheck=%s'), ('_type', 'type=%s'), ('gpgkey', 'gpgkey=%s'), ('proxy', 'proxy=%s'), ('priority', 'priority=%s'), ) for line in tmpl: tmpl_key, tmpl_value = line # key values from tmpl # ensure that there is an actual value (not None nor empty string) if tmpl_key in kw and kw.get(tmpl_key) not in (None, ''): lines.append(tmpl_value % kw.get(tmpl_key)) return '\n'.join(lines) def repo_path(self) -> str: return '/etc/yum.repos.d/ceph.repo' def repo_baseurl(self) -> str: assert self.stable or self.version if self.version: return '%s/rpm-%s/%s' % (self.ctx.repo_url, self.version, self.distro_code) else: return '%s/rpm-%s/%s' % (self.ctx.repo_url, self.stable, self.distro_code) def validate(self) -> None: if self.distro_code.startswith('fc'): raise Error('Ceph team does not build Fedora specific packages and therefore cannot add repos for this distro') if self.distro_code == 'el7': if self.stable and self.stable >= 'pacific': raise Error('Ceph does not support pacific or later for this version of this linux distro and therefore cannot add a repo for it') if self.version and self.version.split('.')[0] >= '16': raise Error('Ceph does not support 16.y.z or later for this version of this linux distro and therefore cannot add a repo for it') if self.stable or self.version: # we know that yum & dnf require there to be a # $base_url/$arch/repodata/repomd.xml so we can test if this URL # is gettable in order to validate the inputs test_url = self.repo_baseurl() + '/noarch/repodata/repomd.xml' try: urlopen(test_url) except HTTPError as err: logger.error('unable to fetch repo metadata: %r', err) raise Error('failed to fetch repository metadata. please check' ' the provided parameters are correct and try again') def add_repo(self) -> None: if self.stable or self.version: content = '' for n, t in { 'Ceph': '$basearch', 'Ceph-noarch': 'noarch', 'Ceph-source': 'SRPMS'}.items(): content += '[%s]\n' % (n) content += self.custom_repo( name='Ceph %s' % t, baseurl=self.repo_baseurl() + '/' + t, enabled=1, gpgcheck=1, gpgkey=self.repo_gpgkey()[0], ) content += '\n\n' else: content = self.query_shaman(self.distro_normalized, self.major, self.branch, self.commit) logger.info('Writing repo to %s...' % self.repo_path()) with open(self.repo_path(), 'w') as f: f.write(content) if self.distro_code.startswith('el'): logger.info('Enabling EPEL...') call_throws(self.ctx, [self.tool, 'install', '-y', 'epel-release']) def rm_repo(self) -> None: if os.path.exists(self.repo_path()): os.unlink(self.repo_path()) def install(self, ls: List[str]) -> None: logger.info('Installing packages %s...' % ls) call_throws(self.ctx, [self.tool, 'install', '-y'] + ls) def install_podman(self) -> None: self.install(['podman']) class Zypper(Packager): DISTRO_NAMES = [ 'sles', 'opensuse-tumbleweed', 'opensuse-leap' ] def __init__(self, ctx: CephadmContext, stable: Optional[str], version: Optional[str], branch: Optional[str], commit: Optional[str], distro: Optional[str], distro_version: Optional[str]) -> None: super(Zypper, self).__init__(ctx, stable=stable, version=version, branch=branch, commit=commit) assert distro is not None self.ctx = ctx self.tool = 'zypper' self.distro = 'opensuse' self.distro_version = '15.1' if 'tumbleweed' not in distro and distro_version is not None: self.distro_version = distro_version def custom_repo(self, **kw: Any) -> str: """ See YumDnf for format explanation. """ lines = [] # by using tuples (vs a dict) we preserve the order of what we want to # return, like starting with a [repo name] tmpl = ( ('reponame', '[%s]'), ('name', 'name=%s'), ('baseurl', 'baseurl=%s'), ('enabled', 'enabled=%s'), ('gpgcheck', 'gpgcheck=%s'), ('_type', 'type=%s'), ('gpgkey', 'gpgkey=%s'), ('proxy', 'proxy=%s'), ('priority', 'priority=%s'), ) for line in tmpl: tmpl_key, tmpl_value = line # key values from tmpl # ensure that there is an actual value (not None nor empty string) if tmpl_key in kw and kw.get(tmpl_key) not in (None, ''): lines.append(tmpl_value % kw.get(tmpl_key)) return '\n'.join(lines) def repo_path(self) -> str: return '/etc/zypp/repos.d/ceph.repo' def repo_baseurl(self) -> str: assert self.stable or self.version if self.version: return '%s/rpm-%s/%s' % (self.ctx.repo_url, self.stable, self.distro) else: return '%s/rpm-%s/%s' % (self.ctx.repo_url, self.stable, self.distro) def add_repo(self) -> None: if self.stable or self.version: content = '' for n, t in { 'Ceph': '$basearch', 'Ceph-noarch': 'noarch', 'Ceph-source': 'SRPMS'}.items(): content += '[%s]\n' % (n) content += self.custom_repo( name='Ceph %s' % t, baseurl=self.repo_baseurl() + '/' + t, enabled=1, gpgcheck=1, gpgkey=self.repo_gpgkey()[0], ) content += '\n\n' else: content = self.query_shaman(self.distro, self.distro_version, self.branch, self.commit) logger.info('Writing repo to %s...' % self.repo_path()) with open(self.repo_path(), 'w') as f: f.write(content) def rm_repo(self) -> None: if os.path.exists(self.repo_path()): os.unlink(self.repo_path()) def install(self, ls: List[str]) -> None: logger.info('Installing packages %s...' % ls) call_throws(self.ctx, [self.tool, 'in', '-y'] + ls) def install_podman(self) -> None: self.install(['podman']) def create_packager(ctx: CephadmContext, stable: Optional[str] = None, version: Optional[str] = None, branch: Optional[str] = None, commit: Optional[str] = None) -> Packager: distro, distro_version, distro_codename = get_distro() if distro in YumDnf.DISTRO_NAMES: return YumDnf(ctx, stable=stable, version=version, branch=branch, commit=commit, distro=distro, distro_version=distro_version) elif distro in Apt.DISTRO_NAMES: return Apt(ctx, stable=stable, version=version, branch=branch, commit=commit, distro=distro, distro_version=distro_version, distro_codename=distro_codename) elif distro in Zypper.DISTRO_NAMES: return Zypper(ctx, stable=stable, version=version, branch=branch, commit=commit, distro=distro, distro_version=distro_version) raise Error('Distro %s version %s not supported' % (distro, distro_version)) def command_add_repo(ctx: CephadmContext) -> None: if ctx.version and ctx.release: raise Error('you can specify either --release or --version but not both') if not ctx.version and not ctx.release and not ctx.dev and not ctx.dev_commit: raise Error('please supply a --release, --version, --dev or --dev-commit argument') if ctx.version: try: (x, y, z) = ctx.version.split('.') except Exception: raise Error('version must be in the form x.y.z (e.g., 15.2.0)') if ctx.release: # Pacific =/= pacific in this case, set to undercase to avoid confusion ctx.release = ctx.release.lower() pkg = create_packager(ctx, stable=ctx.release, version=ctx.version, branch=ctx.dev, commit=ctx.dev_commit) pkg.validate() pkg.add_repo() logger.info('Completed adding repo.') def command_rm_repo(ctx: CephadmContext) -> None: pkg = create_packager(ctx) pkg.rm_repo() def command_install(ctx: CephadmContext) -> None: pkg = create_packager(ctx) pkg.install(ctx.packages) def command_rescan_disks(ctx: CephadmContext) -> str: def probe_hba(scan_path: str) -> None: """Tell the adapter to rescan""" with open(scan_path, 'w') as f: f.write('- - -') cmd = ctx.func.__name__.replace('command_', '') logger.info(f'{cmd}: starting') start = time.time() all_scan_files = glob('/sys/class/scsi_host/*/scan') scan_files = [] skipped = [] for scan_path in all_scan_files: adapter_name = os.path.basename(os.path.dirname(scan_path)) proc_name = read_file([os.path.join(os.path.dirname(scan_path), 'proc_name')]) if proc_name in ['unknown', 'usb-storage']: skipped.append(os.path.basename(scan_path)) logger.info(f'{cmd}: rescan skipping incompatible host adapter {adapter_name} : {proc_name}') continue scan_files.append(scan_path) if not scan_files: logger.info(f'{cmd}: no compatible HBAs found') return 'Ok. No compatible HBAs found' responses = async_run(concurrent_tasks(probe_hba, scan_files)) failures = [r for r in responses if r] logger.info(f'{cmd}: Complete. {len(scan_files)} adapters rescanned, {len(failures)} failures, {len(skipped)} skipped') elapsed = time.time() - start if failures: plural = 's' if len(failures) > 1 else '' if len(failures) == len(scan_files): return f'Failed. All {len(scan_files)} rescan requests failed' else: return f'Partial. {len(scan_files) - len(failures)} successful, {len(failures)} failure{plural} against: {", ".join(failures)}' return f'Ok. {len(all_scan_files)} adapters detected: {len(scan_files)} rescanned, {len(skipped)} skipped, {len(failures)} failed ({elapsed:.2f}s)' ################################## def get_ipv4_address(ifname): # type: (str) -> str def _extract(sock: socket.socket, offset: int) -> str: return socket.inet_ntop( socket.AF_INET, fcntl.ioctl( sock.fileno(), offset, struct.pack('256s', bytes(ifname[:15], 'utf-8')) )[20:24]) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: addr = _extract(s, 35093) # '0x8915' = SIOCGIFADDR dq_mask = _extract(s, 35099) # 0x891b = SIOCGIFNETMASK except OSError: # interface does not have an ipv4 address return '' dec_mask = sum([bin(int(i)).count('1') for i in dq_mask.split('.')]) return '{}/{}'.format(addr, dec_mask) def get_ipv6_address(ifname): # type: (str) -> str if not os.path.exists('/proc/net/if_inet6'): return '' raw = read_file(['/proc/net/if_inet6']) data = raw.splitlines() # based on docs @ https://www.tldp.org/HOWTO/Linux+IPv6-HOWTO/ch11s04.html # field 0 is ipv6, field 2 is scope for iface_setting in data: field = iface_setting.split() if field[-1] == ifname: ipv6_raw = field[0] ipv6_fmtd = ':'.join([ipv6_raw[_p:_p + 4] for _p in range(0, len(field[0]), 4)]) # apply naming rules using ipaddress module ipv6 = ipaddress.ip_address(ipv6_fmtd) return '{}/{}'.format(str(ipv6), int('0x{}'.format(field[2]), 16)) return '' def bytes_to_human(num, mode='decimal'): # type: (float, str) -> str """Convert a bytes value into it's human-readable form. :param num: number, in bytes, to convert :param mode: Either decimal (default) or binary to determine divisor :returns: string representing the bytes value in a more readable format """ unit_list = ['', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB'] divisor = 1000.0 yotta = 'YB' if mode == 'binary': unit_list = ['', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB'] divisor = 1024.0 yotta = 'YiB' for unit in unit_list: if abs(num) < divisor: return '%3.1f%s' % (num, unit) num /= divisor return '%.1f%s' % (num, yotta) def read_file(path_list, file_name=''): # type: (List[str], str) -> str """Returns the content of the first file found within the `path_list` :param path_list: list of file paths to search :param file_name: optional file_name to be applied to a file path :returns: content of the file or 'Unknown' """ for path in path_list: if file_name: file_path = os.path.join(path, file_name) else: file_path = path if os.path.exists(file_path): with open(file_path, 'rb') as f: try: content = f.read().decode('utf-8', 'ignore').strip() except OSError: # sysfs may populate the file, but for devices like # virtio reads can fail return 'Unknown' else: return content return 'Unknown' ################################## class Enclosure: def __init__(self, enc_id: str, enc_path: str, dev_path: str): """External disk enclosure metadata Args: :param enc_id: enclosure id (normally a WWN) :param enc_path: sysfs path to HBA attached to the enclosure e.g. /sys/class/scsi_generic/sg11/device/enclosure/0:0:9:0 :param dev_path: sysfs path to the generic scsi device for the enclosure HBA e.g. /sys/class/scsi_generic/sg2 """ self._path: str = dev_path self._dev_path: str = os.path.join(dev_path, 'device') self._enc_path: str = enc_path self.ses_paths: List[str] = [] self.path_count: int = 0 self.vendor: str = '' self.model: str = '' self.enc_id: str = enc_id self.components: Union[int, str] = 0 self.device_lookup: Dict[str, str] = {} self.device_count: int = 0 self.slot_map: Dict[str, Dict[str, str]] = {} self._probe() def _probe(self) -> None: """Analyse the dev paths to identify enclosure related information""" self.vendor = read_file([os.path.join(self._dev_path, 'vendor')]) self.model = read_file([os.path.join(self._dev_path, 'model')]) self.components = read_file([os.path.join(self._enc_path, 'components')]) slot_paths = glob(os.path.join(self._enc_path, '*', 'slot')) for slot_path in slot_paths: slot = read_file([slot_path]) serial_path = os.path.join(os.path.dirname(slot_path), 'device', 'vpd_pg80') serial = '' if os.path.exists(serial_path): serial_raw = read_file([serial_path]) serial = (''.join(char for char in serial_raw if char in string.printable)).strip() self.device_lookup[serial] = slot slot_dir = os.path.dirname(slot_path) self.slot_map[slot] = { 'status': read_file([os.path.join(slot_dir, 'status')]), 'fault': read_file([os.path.join(slot_dir, 'fault')]), 'locate': read_file([os.path.join(slot_dir, 'locate')]), 'serial': serial, } self.device_count = len(self.device_lookup) self.update(os.path.basename(self._path)) def update(self, dev_id: str) -> None: """Update an enclosure object with a related sg device name :param dev_id (str): device name e.g. sg2 """ self.ses_paths.append(dev_id) self.path_count = len(self.ses_paths) def _dump(self) -> Dict[str, Any]: """Return a dict representation of the object""" return {k: v for k, v in self.__dict__.items() if not k.startswith('_')} def __str__(self) -> str: """Return a formatted json representation of the object as a string""" return json.dumps(self._dump(), indent=2) def __repr__(self) -> str: """Return a json representation of the object as a string""" return json.dumps(self._dump()) def as_json(self) -> Dict[str, Any]: """Return a dict representing the object""" return self._dump() class HostFacts(): _dmi_path_list = ['/sys/class/dmi/id'] _nic_path_list = ['/sys/class/net'] _apparmor_path_list = ['/etc/apparmor'] _disk_vendor_workarounds = { '0x1af4': 'Virtio Block Device' } _excluded_block_devices = ('sr', 'zram', 'dm-', 'loop', 'md') _sg_generic_glob = '/sys/class/scsi_generic/*' def __init__(self, ctx: CephadmContext): self.ctx: CephadmContext = ctx self.cpu_model: str = 'Unknown' self.sysctl_options: Dict[str, str] = self._populate_sysctl_options() self.cpu_count: int = 0 self.cpu_cores: int = 0 self.cpu_threads: int = 0 self.interfaces: Dict[str, Any] = {} self._meminfo: List[str] = read_file(['/proc/meminfo']).splitlines() self._get_cpuinfo() self._process_nics() self.arch: str = platform.processor() self.kernel: str = platform.release() self._enclosures = self._discover_enclosures() self._block_devices = self._get_block_devs() self._device_list = self._get_device_info() def _populate_sysctl_options(self) -> Dict[str, str]: sysctl_options = {} out, _, _ = call_throws(self.ctx, ['sysctl', '-a'], verbosity=CallVerbosity.QUIET_UNLESS_ERROR) if out: for line in out.splitlines(): option, value = line.split('=') sysctl_options[option.strip()] = value.strip() return sysctl_options def _discover_enclosures(self) -> Dict[str, Enclosure]: """Build a dictionary of discovered scsi enclosures Enclosures are detected by walking the scsi generic sysfs hierarchy. Any device tree that holds an 'enclosure' subdirectory is interpreted as an enclosure. Once identified the enclosure directory is analysis to identify key descriptors that will help relate disks to enclosures and disks to enclosure slots. :return: Dict[str, Enclosure]: a map of enclosure id (hex) to enclosure object """ sg_paths: List[str] = glob(HostFacts._sg_generic_glob) enclosures: Dict[str, Enclosure] = {} for sg_path in sg_paths: enc_path = os.path.join(sg_path, 'device', 'enclosure') if os.path.exists(enc_path): enc_dirs = glob(os.path.join(enc_path, '*')) if len(enc_dirs) != 1: # incomplete enclosure spec - expecting ONE dir in the format # host(adapter):bus:target:lun e.g. 16:0:0:0 continue enc_path = enc_dirs[0] enc_id = read_file([os.path.join(enc_path, 'id')]) if enc_id in enclosures: enclosures[enc_id].update(os.path.basename(sg_path)) continue enclosure = Enclosure(enc_id, enc_path, sg_path) enclosures[enc_id] = enclosure return enclosures @property def enclosures(self) -> Dict[str, Dict[str, Any]]: """Dump the enclosure objects as dicts""" return {k: v._dump() for k, v in self._enclosures.items()} @property def enclosure_count(self) -> int: """Return the number of enclosures detected""" return len(self._enclosures.keys()) def _get_cpuinfo(self): # type: () -> None """Determine cpu information via /proc/cpuinfo""" raw = read_file(['/proc/cpuinfo']) output = raw.splitlines() cpu_set = set() for line in output: field = [f.strip() for f in line.split(':')] if 'model name' in line: self.cpu_model = field[1] if 'physical id' in line: cpu_set.add(field[1]) if 'siblings' in line: self.cpu_threads = int(field[1].strip()) if 'cpu cores' in line: self.cpu_cores = int(field[1].strip()) pass self.cpu_count = len(cpu_set) def _get_block_devs(self): # type: () -> List[str] """Determine the list of block devices by looking at /sys/block""" return [dev for dev in os.listdir('/sys/block') if not dev.startswith(HostFacts._excluded_block_devices)] @property def operating_system(self): # type: () -> str """Determine OS version""" raw_info = read_file(['/etc/os-release']) os_release = raw_info.splitlines() rel_str = 'Unknown' rel_dict = dict() for line in os_release: if '=' in line: var_name, var_value = line.split('=') rel_dict[var_name] = var_value.strip('"') # Would normally use PRETTY_NAME, but NAME and VERSION are more # consistent if all(_v in rel_dict for _v in ['NAME', 'VERSION']): rel_str = '{} {}'.format(rel_dict['NAME'], rel_dict['VERSION']) return rel_str @property def hostname(self): # type: () -> str """Return the hostname""" return platform.node() @property def shortname(self) -> str: return platform.node().split('.', 1)[0] @property def fqdn(self) -> str: return get_fqdn() @property def subscribed(self): # type: () -> str """Highlevel check to see if the host is subscribed to receive updates/support""" def _red_hat(): # type: () -> str # RHEL 7 and RHEL 8 entitlements_dir = '/etc/pki/entitlement' if os.path.exists(entitlements_dir): pems = glob('{}/*.pem'.format(entitlements_dir)) if len(pems) >= 2: return 'Yes' return 'No' os_name = self.operating_system if os_name.upper().startswith('RED HAT'): return _red_hat() return 'Unknown' @property def hdd_count(self): # type: () -> int """Return a count of HDDs (spinners)""" return len(self.hdd_list) def _get_capacity(self, dev): # type: (str) -> int """Determine the size of a given device The kernel always bases device size calculations based on a 512 byte sector. For more information see https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/include/linux/types.h?h=v5.15.63#n120 """ size_path = os.path.join('/sys/block', dev, 'size') size_blocks = int(read_file([size_path])) return size_blocks * 512 def _get_capacity_by_type(self, disk_type='hdd'): # type: (str) -> int """Return the total capacity of a category of device (flash or hdd)""" capacity: int = 0 for dev in self._device_list: if dev['disk_type'] == disk_type: disk_capacity = cast(int, dev.get('disk_size_bytes', 0)) capacity += disk_capacity return capacity def _get_device_info(self): # type: () -> List[Dict[str, object]] """Return a 'pretty' name list for each unique device in the `dev_list`""" disk_list = list() # serial_num_lookup is a dict of serial number -> List of devices with that serial number serial_num_lookup: Dict[str, List[str]] = {} # make a map of devname -> disk path. this path name may indicate the physical slot # of a drive (phyXX) disk_path_map: Dict[str, str] = {} for path in glob('/dev/disk/by-path/*'): tgt_raw = Path(path).resolve() tgt = os.path.basename(str(tgt_raw)) disk_path_map[tgt] = path # make a map of holder (dm-XX) -> full mpath name dm_device_map: Dict[str, str] = {} for mpath in glob('/dev/mapper/mpath*'): tgt_raw = Path(mpath).resolve() tgt = os.path.basename(str(tgt_raw)) dm_device_map[tgt] = mpath # main loop to process all eligible block devices for dev in self._block_devices: enclosure_id = '' enclosure_slot = '' scsi_addr = '' mpath = '' disk_model = read_file(['/sys/block/{}/device/model'.format(dev)]).strip() disk_rev = read_file(['/sys/block/{}/device/rev'.format(dev)]).strip() disk_wwid = read_file(['/sys/block/{}/device/wwid'.format(dev)]).strip() vendor = read_file(['/sys/block/{}/device/vendor'.format(dev)]).strip() rotational = read_file(['/sys/block/{}/queue/rotational'.format(dev)]) holders_raw = glob('/sys/block/{}/holders/*'.format(dev)) if len(holders_raw) == 1: # mpath will have 1 holder entry holder = os.path.basename(holders_raw[0]) mpath = dm_device_map.get(holder, '') disk_type = 'hdd' if rotational == '1' else 'flash' scsi_addr_path = glob('/sys/block/{}/device/bsg/*'.format(dev)) if len(scsi_addr_path) == 1: scsi_addr = os.path.basename(scsi_addr_path[0]) # vpd_pg80 isn't guaranteed (libvirt, vmware for example) serial_raw = read_file(['/sys/block/{}/device/vpd_pg80'.format(dev)]) serial = (''.join(i for i in serial_raw if i in string.printable)).strip() if serial.lower() == 'unknown': serial = '' else: if serial in serial_num_lookup: serial_num_lookup[serial].append(dev) else: serial_num_lookup[serial] = [dev] for enc_id, enclosure in self._enclosures.items(): if serial in enclosure.device_lookup.keys(): enclosure_id = enc_id enclosure_slot = enclosure.device_lookup[serial] disk_vendor = HostFacts._disk_vendor_workarounds.get(vendor, vendor) disk_size_bytes = self._get_capacity(dev) disk_list.append({ 'description': '{} {} ({})'.format(disk_vendor, disk_model, bytes_to_human(disk_size_bytes)), 'vendor': disk_vendor, 'model': disk_model, 'rev': disk_rev, 'wwid': disk_wwid, 'dev_name': dev, 'disk_size_bytes': disk_size_bytes, 'disk_type': disk_type, 'serial': serial, 'alt_dev_name': '', 'scsi_addr': scsi_addr, 'enclosure_id': enclosure_id, 'enclosure_slot': enclosure_slot, 'path_id': disk_path_map.get(dev, ''), 'mpath': mpath, }) # process the devices to drop duplicate physical devs based on matching # the unique serial number disk_list_unique: List[Dict[str, Any]] = [] serials_seen: List[str] = [] for dev in disk_list: serial = str(dev['serial']) if serial: if serial in serials_seen: continue else: serials_seen.append(serial) devs = serial_num_lookup[serial].copy() devs.remove(str(dev['dev_name'])) dev['alt_dev_name'] = ','.join(devs) disk_list_unique.append(dev) return disk_list_unique @property def hdd_list(self): # type: () -> List[Dict[str, object]] """Return a list of devices that are HDDs (spinners)""" return [dev for dev in self._device_list if dev['disk_type'] == 'hdd'] @property def flash_list(self): # type: () -> List[Dict[str, object]] """Return a list of devices that are flash based (SSD, NVMe)""" return [dev for dev in self._device_list if dev['disk_type'] == 'flash'] @property def hdd_capacity_bytes(self): # type: () -> int """Return the total capacity for all HDD devices (bytes)""" return self._get_capacity_by_type(disk_type='hdd') @property def hdd_capacity(self): # type: () -> str """Return the total capacity for all HDD devices (human readable format)""" return bytes_to_human(self.hdd_capacity_bytes) @property def cpu_load(self): # type: () -> Dict[str, float] """Return the cpu load average data for the host""" raw = read_file(['/proc/loadavg']).strip() data = raw.split() return { '1min': float(data[0]), '5min': float(data[1]), '15min': float(data[2]), } @property def flash_count(self): # type: () -> int """Return the number of flash devices in the system (SSD, NVMe)""" return len(self.flash_list) @property def flash_capacity_bytes(self): # type: () -> int """Return the total capacity for all flash devices (bytes)""" return self._get_capacity_by_type(disk_type='flash') @property def flash_capacity(self): # type: () -> str """Return the total capacity for all Flash devices (human readable format)""" return bytes_to_human(self.flash_capacity_bytes) def _process_nics(self): # type: () -> None """Look at the NIC devices and extract network related metadata""" # from https://github.com/torvalds/linux/blob/master/include/uapi/linux/if_arp.h hw_lookup = { '1': 'ethernet', '32': 'infiniband', '772': 'loopback', } for nic_path in HostFacts._nic_path_list: if not os.path.exists(nic_path): continue for iface in os.listdir(nic_path): if os.path.exists(os.path.join(nic_path, iface, 'bridge')): nic_type = 'bridge' elif os.path.exists(os.path.join(nic_path, iface, 'bonding')): nic_type = 'bonding' else: nic_type = hw_lookup.get(read_file([os.path.join(nic_path, iface, 'type')]), 'Unknown') if nic_type == 'loopback': # skip loopback devices continue lower_devs_list = [os.path.basename(link.replace('lower_', '')) for link in glob(os.path.join(nic_path, iface, 'lower_*'))] upper_devs_list = [os.path.basename(link.replace('upper_', '')) for link in glob(os.path.join(nic_path, iface, 'upper_*'))] try: mtu = int(read_file([os.path.join(nic_path, iface, 'mtu')])) except ValueError: mtu = 0 operstate = read_file([os.path.join(nic_path, iface, 'operstate')]) try: speed = int(read_file([os.path.join(nic_path, iface, 'speed')])) except (OSError, ValueError): # OSError : device doesn't support the ethtool get_link_ksettings # ValueError : raised when the read fails, and returns Unknown # # Either way, we show a -1 when speed isn't available speed = -1 dev_link = os.path.join(nic_path, iface, 'device') if os.path.exists(dev_link): iftype = 'physical' driver_path = os.path.join(dev_link, 'driver') if os.path.exists(driver_path): driver = os.path.basename(os.path.realpath(driver_path)) else: driver = 'Unknown' else: iftype = 'logical' driver = '' self.interfaces[iface] = { 'mtu': mtu, 'upper_devs_list': upper_devs_list, 'lower_devs_list': lower_devs_list, 'operstate': operstate, 'iftype': iftype, 'nic_type': nic_type, 'driver': driver, 'speed': speed, 'ipv4_address': get_ipv4_address(iface), 'ipv6_address': get_ipv6_address(iface), } @property def nic_count(self): # type: () -> int """Return a total count of all physical NICs detected in the host""" phys_devs = [] for iface in self.interfaces: if self.interfaces[iface]['iftype'] == 'physical': phys_devs.append(iface) return len(phys_devs) def _get_mem_data(self, field_name): # type: (str) -> int for line in self._meminfo: if line.startswith(field_name): _d = line.split() return int(_d[1]) return 0 @property def memory_total_kb(self): # type: () -> int """Determine the memory installed (kb)""" return self._get_mem_data('MemTotal') @property def memory_free_kb(self): # type: () -> int """Determine the memory free (not cache, immediately usable)""" return self._get_mem_data('MemFree') @property def memory_available_kb(self): # type: () -> int """Determine the memory available to new applications without swapping""" return self._get_mem_data('MemAvailable') @property def vendor(self): # type: () -> str """Determine server vendor from DMI data in sysfs""" return read_file(HostFacts._dmi_path_list, 'sys_vendor') @property def model(self): # type: () -> str """Determine server model information from DMI data in sysfs""" family = read_file(HostFacts._dmi_path_list, 'product_family') product = read_file(HostFacts._dmi_path_list, 'product_name') if family == 'Unknown' and product: return '{}'.format(product) return '{} ({})'.format(family, product) @property def bios_version(self): # type: () -> str """Determine server BIOS version from DMI data in sysfs""" return read_file(HostFacts._dmi_path_list, 'bios_version') @property def bios_date(self): # type: () -> str """Determine server BIOS date from DMI data in sysfs""" return read_file(HostFacts._dmi_path_list, 'bios_date') @property def chassis_serial(self): # type: () -> str """Determine chassis serial number from DMI data in sysfs""" return read_file(HostFacts._dmi_path_list, 'chassis_serial') @property def board_serial(self): # type: () -> str """Determine mainboard serial number from DMI data in sysfs""" return read_file(HostFacts._dmi_path_list, 'board_serial') @property def product_serial(self): # type: () -> str """Determine server's serial number from DMI data in sysfs""" return read_file(HostFacts._dmi_path_list, 'product_serial') @property def timestamp(self): # type: () -> float """Return the current time as Epoch seconds""" return time.time() @property def system_uptime(self): # type: () -> float """Return the system uptime (in secs)""" raw_time = read_file(['/proc/uptime']) up_secs, _ = raw_time.split() return float(up_secs) @property def kernel_security(self): # type: () -> Dict[str, str] """Determine the security features enabled in the kernel - SELinux, AppArmor""" def _fetch_selinux() -> Dict[str, str]: """Get the selinux status""" security = {} try: out, err, code = call(self.ctx, ['sestatus'], verbosity=CallVerbosity.QUIET) security['type'] = 'SELinux' status, mode, policy = '', '', '' for line in out.split('\n'): if line.startswith('SELinux status:'): k, v = line.split(':') status = v.strip() elif line.startswith('Current mode:'): k, v = line.split(':') mode = v.strip() elif line.startswith('Loaded policy name:'): k, v = line.split(':') policy = v.strip() if status == 'disabled': security['description'] = 'SELinux: Disabled' else: security['description'] = 'SELinux: Enabled({}, {})'.format(mode, policy) except Exception as e: logger.info('unable to get selinux status: %s' % e) return security def _fetch_apparmor() -> Dict[str, str]: """Read the apparmor profiles directly, returning an overview of AppArmor status""" security = {} for apparmor_path in HostFacts._apparmor_path_list: if os.path.exists(apparmor_path): security['type'] = 'AppArmor' security['description'] = 'AppArmor: Enabled' try: profiles = read_file(['/sys/kernel/security/apparmor/profiles']) if len(profiles) == 0: return {} except OSError: pass else: summary = {} # type: Dict[str, int] for line in profiles.split('\n'): item, mode = line.split(' ') mode = mode.strip('()') if mode in summary: summary[mode] += 1 else: summary[mode] = 0 summary_str = ','.join(['{} {}'.format(v, k) for k, v in summary.items()]) security = {**security, **summary} # type: ignore security['description'] += '({})'.format(summary_str) return security return {} ret = {} if os.path.exists('/sys/kernel/security/lsm'): lsm = read_file(['/sys/kernel/security/lsm']).strip() if 'selinux' in lsm: ret = _fetch_selinux() elif 'apparmor' in lsm: ret = _fetch_apparmor() else: return { 'type': 'Unknown', 'description': 'Linux Security Module framework is active, but is not using SELinux or AppArmor' } if ret: return ret return { 'type': 'None', 'description': 'Linux Security Module framework is not available' } @property def selinux_enabled(self) -> bool: return (self.kernel_security['type'] == 'SELinux') and \ (self.kernel_security['description'] != 'SELinux: Disabled') @property def kernel_parameters(self): # type: () -> Dict[str, str] """Get kernel parameters required/used in Ceph clusters""" k_param = {} out, _, _ = call_throws(self.ctx, ['sysctl', '-a'], verbosity=CallVerbosity.SILENT) if out: param_list = out.split('\n') param_dict = {param.split(' = ')[0]: param.split(' = ')[-1] for param in param_list} # return only desired parameters if 'net.ipv4.ip_nonlocal_bind' in param_dict: k_param['net.ipv4.ip_nonlocal_bind'] = param_dict['net.ipv4.ip_nonlocal_bind'] return k_param @staticmethod def _process_net_data(tcp_file: str, protocol: str = 'tcp') -> List[int]: listening_ports = [] # Connections state documentation # tcp - https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/net/tcp_states.h # udp - uses 07 (TCP_CLOSE or UNCONN, since udp is stateless. test with netcat -ul <port>) listening_state = { 'tcp': '0A', 'udp': '07' } if protocol not in listening_state.keys(): return [] if os.path.exists(tcp_file): with open(tcp_file) as f: tcp_data = f.readlines()[1:] for con in tcp_data: con_info = con.strip().split() if con_info[3] == listening_state[protocol]: local_port = int(con_info[1].split(':')[1], 16) listening_ports.append(local_port) return listening_ports @property def tcp_ports_used(self) -> List[int]: return HostFacts._process_net_data('/proc/net/tcp') @property def tcp6_ports_used(self) -> List[int]: return HostFacts._process_net_data('/proc/net/tcp6') @property def udp_ports_used(self) -> List[int]: return HostFacts._process_net_data('/proc/net/udp', 'udp') @property def udp6_ports_used(self) -> List[int]: return HostFacts._process_net_data('/proc/net/udp6', 'udp') def dump(self): # type: () -> str """Return the attributes of this HostFacts object as json""" data = { k: getattr(self, k) for k in dir(self) if not k.startswith('_') and isinstance(getattr(self, k), (float, int, str, list, dict, tuple)) } return json.dumps(data, indent=2, sort_keys=True) ################################## def command_gather_facts(ctx: CephadmContext) -> None: """gather_facts is intended to provide host related metadata to the caller""" host = HostFacts(ctx) print(host.dump()) ################################## def systemd_target_state(ctx: CephadmContext, target_name: str, subsystem: str = 'ceph') -> bool: # TODO: UNITTEST return os.path.exists( os.path.join( ctx.unit_dir, f'{subsystem}.target.wants', target_name ) ) def target_exists(ctx: CephadmContext) -> bool: return os.path.exists(ctx.unit_dir + '/ceph.target') @infer_fsid def command_maintenance(ctx: CephadmContext) -> str: if not ctx.fsid: raise Error('failed - must pass --fsid to specify cluster') target = f'ceph-{ctx.fsid}.target' if ctx.maintenance_action.lower() == 'enter': logger.info('Requested to place host into maintenance') if systemd_target_state(ctx, target): _out, _err, code = call(ctx, ['systemctl', 'disable', target], verbosity=CallVerbosity.DEBUG) if code: logger.error(f'Failed to disable the {target} target') return 'failed - to disable the target' else: # stopping a target waits by default _out, _err, code = call(ctx, ['systemctl', 'stop', target], verbosity=CallVerbosity.DEBUG) if code: logger.error(f'Failed to stop the {target} target') return 'failed - to disable the target' else: return f'success - systemd target {target} disabled' else: return 'skipped - target already disabled' else: logger.info('Requested to exit maintenance state') # if we've never deployed a daemon on this host there will be no systemd # target to disable so attempting a disable will fail. We still need to # return success here or host will be permanently stuck in maintenance mode # as no daemons can be deployed so no systemd target will ever exist to disable. if not target_exists(ctx): return 'skipped - systemd target not present on this host. Host removed from maintenance mode.' # exit maintenance request if not systemd_target_state(ctx, target): _out, _err, code = call(ctx, ['systemctl', 'enable', target], verbosity=CallVerbosity.DEBUG) if code: logger.error(f'Failed to enable the {target} target') return 'failed - unable to enable the target' else: # starting a target waits by default _out, _err, code = call(ctx, ['systemctl', 'start', target], verbosity=CallVerbosity.DEBUG) if code: logger.error(f'Failed to start the {target} target') return 'failed - unable to start the target' else: return f'success - systemd target {target} enabled and started' return f'success - systemd target {target} enabled and started' ################################## class ArgumentFacade: def __init__(self) -> None: self.defaults: Dict[str, Any] = {} def add_argument(self, *args: Any, **kwargs: Any) -> None: if not args: raise ValueError('expected at least one argument') name = args[0] if not name.startswith('--'): raise ValueError(f'expected long option, got: {name!r}') name = name[2:].replace('-', '_') value = kwargs.pop('default', None) self.defaults[name] = value def apply(self, ctx: CephadmContext) -> None: for key, value in self.defaults.items(): setattr(ctx, key, value) def _add_deploy_parser_args( parser_deploy: Union[argparse.ArgumentParser, ArgumentFacade], ) -> None: parser_deploy.add_argument( '--config', '-c', help='config file for new daemon') parser_deploy.add_argument( '--config-json', help='Additional configuration information in JSON format') parser_deploy.add_argument( '--keyring', help='keyring for new daemon') parser_deploy.add_argument( '--key', help='key for new daemon') parser_deploy.add_argument( '--osd-fsid', help='OSD uuid, if creating an OSD container') parser_deploy.add_argument( '--skip-firewalld', action='store_true', help='Do not configure firewalld') parser_deploy.add_argument( '--tcp-ports', help='List of tcp ports to open in the host firewall') parser_deploy.add_argument( '--reconfig', action='store_true', help='Reconfigure a previously deployed daemon') parser_deploy.add_argument( '--allow-ptrace', action='store_true', help='Allow SYS_PTRACE on daemon container') parser_deploy.add_argument( '--container-init', action='store_true', default=CONTAINER_INIT, help=argparse.SUPPRESS) parser_deploy.add_argument( '--memory-request', help='Container memory request/target' ) parser_deploy.add_argument( '--memory-limit', help='Container memory hard limit' ) parser_deploy.add_argument( '--meta-json', help='JSON dict of additional metadata' ) parser_deploy.add_argument( '--extra-container-args', action='append', default=[], help='Additional container arguments to apply to daemon' ) parser_deploy.add_argument( '--extra-entrypoint-args', action='append', default=[], help='Additional entrypoint arguments to apply to deamon' ) def _get_parser(): # type: () -> argparse.ArgumentParser parser = argparse.ArgumentParser( description='Bootstrap Ceph daemons with systemd and containers.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '--image', help='container image. Can also be set via the "CEPHADM_IMAGE" ' 'env var') parser.add_argument( '--docker', action='store_true', help='use docker instead of podman') parser.add_argument( '--data-dir', default=DATA_DIR, help='base directory for daemon data') parser.add_argument( '--log-dir', default=LOG_DIR, help='base directory for daemon logs') parser.add_argument( '--logrotate-dir', default=LOGROTATE_DIR, help='location of logrotate configuration files') parser.add_argument( '--sysctl-dir', default=SYSCTL_DIR, help='location of sysctl configuration files') parser.add_argument( '--unit-dir', default=UNIT_DIR, help='base directory for systemd units') parser.add_argument( '--verbose', '-v', action='store_true', help='Show debug-level log messages') parser.add_argument( '--timeout', type=int, default=DEFAULT_TIMEOUT, help='timeout in seconds') parser.add_argument( '--retry', type=int, default=DEFAULT_RETRY, help='max number of retries') parser.add_argument( '--env', '-e', action='append', default=[], help='set environment variable') parser.add_argument( '--no-container-init', action='store_true', default=not CONTAINER_INIT, help='Do not run podman/docker with `--init`') parser.add_argument( '--no-cgroups-split', action='store_true', default=False, help='Do not run containers with --cgroups=split (currently only relevant when using podman)') subparsers = parser.add_subparsers(help='sub-command') parser_version = subparsers.add_parser( 'version', help='get cephadm version') parser_version.set_defaults(func=command_version) parser_pull = subparsers.add_parser( 'pull', help='pull the default container image') parser_pull.set_defaults(func=command_pull) parser_pull.add_argument( '--insecure', action='store_true', help=argparse.SUPPRESS, ) parser_inspect_image = subparsers.add_parser( 'inspect-image', help='inspect local container image') parser_inspect_image.set_defaults(func=command_inspect_image) parser_ls = subparsers.add_parser( 'ls', help='list daemon instances on this host') parser_ls.set_defaults(func=command_ls) parser_ls.add_argument( '--no-detail', action='store_true', help='Do not include daemon status') parser_ls.add_argument( '--legacy-dir', default='/', help='base directory for legacy daemon data') parser_list_networks = subparsers.add_parser( 'list-networks', help='list IP networks') parser_list_networks.set_defaults(func=command_list_networks) parser_adopt = subparsers.add_parser( 'adopt', help='adopt daemon deployed with a different tool') parser_adopt.set_defaults(func=command_adopt) parser_adopt.add_argument( '--name', '-n', required=True, help='daemon name (type.id)') parser_adopt.add_argument( '--style', required=True, help='deployment style (legacy, ...)') parser_adopt.add_argument( '--cluster', default='ceph', help='cluster name') parser_adopt.add_argument( '--legacy-dir', default='/', help='base directory for legacy daemon data') parser_adopt.add_argument( '--config-json', help='Additional configuration information in JSON format') parser_adopt.add_argument( '--skip-firewalld', action='store_true', help='Do not configure firewalld') parser_adopt.add_argument( '--skip-pull', action='store_true', help='do not pull the default image before adopting') parser_adopt.add_argument( '--force-start', action='store_true', help='start newly adopted daemon, even if it was not running previously') parser_adopt.add_argument( '--container-init', action='store_true', default=CONTAINER_INIT, help=argparse.SUPPRESS) parser_rm_daemon = subparsers.add_parser( 'rm-daemon', help='remove daemon instance') parser_rm_daemon.set_defaults(func=command_rm_daemon) parser_rm_daemon.add_argument( '--name', '-n', required=True, action=CustomValidation, help='daemon name (type.id)') parser_rm_daemon.add_argument( '--tcp-ports', help='List of tcp ports to close in the host firewall') parser_rm_daemon.add_argument( '--fsid', required=True, help='cluster FSID') parser_rm_daemon.add_argument( '--force', action='store_true', help='proceed, even though this may destroy valuable data') parser_rm_daemon.add_argument( '--force-delete-data', action='store_true', help='delete valuable daemon data instead of making a backup') parser_rm_cluster = subparsers.add_parser( 'rm-cluster', help='remove all daemons for a cluster') parser_rm_cluster.set_defaults(func=command_rm_cluster) parser_rm_cluster.add_argument( '--fsid', required=True, help='cluster FSID') parser_rm_cluster.add_argument( '--force', action='store_true', help='proceed, even though this may destroy valuable data') parser_rm_cluster.add_argument( '--keep-logs', action='store_true', help='do not remove log files') parser_rm_cluster.add_argument( '--zap-osds', action='store_true', help='zap OSD devices for this cluster') parser_run = subparsers.add_parser( 'run', help='run a ceph daemon, in a container, in the foreground') parser_run.set_defaults(func=command_run) parser_run.add_argument( '--name', '-n', required=True, help='daemon name (type.id)') parser_run.add_argument( '--fsid', required=True, help='cluster FSID') parser_shell = subparsers.add_parser( 'shell', help='run an interactive shell inside a daemon container') parser_shell.set_defaults(func=command_shell) parser_shell.add_argument( '--shared_ceph_folder', metavar='CEPH_SOURCE_FOLDER', help='Development mode. Several folders in containers are volumes mapped to different sub-folders in the ceph source folder') parser_shell.add_argument( '--fsid', help='cluster FSID') parser_shell.add_argument( '--name', '-n', help='daemon name (type.id)') parser_shell.add_argument( '--config', '-c', help='ceph.conf to pass through to the container') parser_shell.add_argument( '--keyring', '-k', help='ceph.keyring to pass through to the container') parser_shell.add_argument( '--mount', '-m', help=('mount a file or directory in the container. ' 'Support multiple mounts. ' 'ie: `--mount /foo /bar:/bar`. ' 'When no destination is passed, default is /mnt'), nargs='+') parser_shell.add_argument( '--env', '-e', action='append', default=[], help='set environment variable') parser_shell.add_argument( '--volume', '-v', action='append', default=[], help='set environment variable') parser_shell.add_argument( 'command', nargs=argparse.REMAINDER, help='command (optional)') parser_shell.add_argument( '--no-hosts', action='store_true', help='dont pass /etc/hosts through to the container') parser_enter = subparsers.add_parser( 'enter', help='run an interactive shell inside a running daemon container') parser_enter.set_defaults(func=command_enter) parser_enter.add_argument( '--fsid', help='cluster FSID') parser_enter.add_argument( '--name', '-n', required=True, help='daemon name (type.id)') parser_enter.add_argument( 'command', nargs=argparse.REMAINDER, help='command') parser_ceph_volume = subparsers.add_parser( 'ceph-volume', help='run ceph-volume inside a container') parser_ceph_volume.set_defaults(func=command_ceph_volume) parser_ceph_volume.add_argument( '--shared_ceph_folder', metavar='CEPH_SOURCE_FOLDER', help='Development mode. Several folders in containers are volumes mapped to different sub-folders in the ceph source folder') parser_ceph_volume.add_argument( '--fsid', help='cluster FSID') parser_ceph_volume.add_argument( '--config-json', help='JSON file with config and (client.bootstrap-osd) key') parser_ceph_volume.add_argument( '--config', '-c', help='ceph conf file') parser_ceph_volume.add_argument( '--keyring', '-k', help='ceph.keyring to pass through to the container') parser_ceph_volume.add_argument( 'command', nargs=argparse.REMAINDER, help='command') parser_zap_osds = subparsers.add_parser( 'zap-osds', help='zap all OSDs associated with a particular fsid') parser_zap_osds.set_defaults(func=command_zap_osds) parser_zap_osds.add_argument( '--fsid', required=True, help='cluster FSID') parser_zap_osds.add_argument( '--force', action='store_true', help='proceed, even though this may destroy valuable data') parser_unit = subparsers.add_parser( 'unit', help="operate on the daemon's systemd unit") parser_unit.set_defaults(func=command_unit) parser_unit.add_argument( 'command', help='systemd command (start, stop, restart, enable, disable, ...)') parser_unit.add_argument( '--fsid', help='cluster FSID') parser_unit.add_argument( '--name', '-n', required=True, help='daemon name (type.id)') parser_logs = subparsers.add_parser( 'logs', help='print journald logs for a daemon container') parser_logs.set_defaults(func=command_logs) parser_logs.add_argument( '--fsid', help='cluster FSID') parser_logs.add_argument( '--name', '-n', required=True, help='daemon name (type.id)') parser_logs.add_argument( 'command', nargs='*', help='additional journalctl args') parser_bootstrap = subparsers.add_parser( 'bootstrap', help='bootstrap a cluster (mon + mgr daemons)') parser_bootstrap.set_defaults(func=command_bootstrap) parser_bootstrap.add_argument( '--config', '-c', help='ceph conf file to incorporate') parser_bootstrap.add_argument( '--mon-id', required=False, help='mon id (default: local hostname)') group = parser_bootstrap.add_mutually_exclusive_group() group.add_argument( '--mon-addrv', help='mon IPs (e.g., [v2:localipaddr:3300,v1:localipaddr:6789])') group.add_argument( '--mon-ip', help='mon IP') parser_bootstrap.add_argument( '--mgr-id', required=False, help='mgr id (default: randomly generated)') parser_bootstrap.add_argument( '--fsid', help='cluster FSID') parser_bootstrap.add_argument( '--output-dir', default='/etc/ceph', help='directory to write config, keyring, and pub key files') parser_bootstrap.add_argument( '--output-keyring', help='location to write keyring file with new cluster admin and mon keys') parser_bootstrap.add_argument( '--output-config', help='location to write conf file to connect to new cluster') parser_bootstrap.add_argument( '--output-pub-ssh-key', help="location to write the cluster's public SSH key") parser_bootstrap.add_argument( '--skip-admin-label', action='store_true', help='do not create admin label for ceph.conf and client.admin keyring distribution') parser_bootstrap.add_argument( '--skip-ssh', action='store_true', help='skip setup of ssh key on local host') parser_bootstrap.add_argument( '--initial-dashboard-user', default='admin', help='Initial user for the dashboard') parser_bootstrap.add_argument( '--initial-dashboard-password', help='Initial password for the initial dashboard user') parser_bootstrap.add_argument( '--ssl-dashboard-port', type=int, default=8443, help='Port number used to connect with dashboard using SSL') parser_bootstrap.add_argument( '--dashboard-key', type=argparse.FileType('r'), help='Dashboard key') parser_bootstrap.add_argument( '--dashboard-crt', type=argparse.FileType('r'), help='Dashboard certificate') parser_bootstrap.add_argument( '--ssh-config', type=argparse.FileType('r'), help='SSH config') parser_bootstrap.add_argument( '--ssh-private-key', type=argparse.FileType('r'), help='SSH private key') parser_bootstrap.add_argument( '--ssh-public-key', type=argparse.FileType('r'), help='SSH public key') parser_bootstrap.add_argument( '--ssh-user', default='root', help='set user for SSHing to cluster hosts, passwordless sudo will be needed for non-root users') parser_bootstrap.add_argument( '--skip-mon-network', action='store_true', help='set mon public_network based on bootstrap mon ip') parser_bootstrap.add_argument( '--skip-dashboard', action='store_true', help='do not enable the Ceph Dashboard') parser_bootstrap.add_argument( '--dashboard-password-noupdate', action='store_true', help='stop forced dashboard password change') parser_bootstrap.add_argument( '--no-minimize-config', action='store_true', help='do not assimilate and minimize the config file') parser_bootstrap.add_argument( '--skip-ping-check', action='store_true', help='do not verify that mon IP is pingable') parser_bootstrap.add_argument( '--skip-pull', action='store_true', help='do not pull the default image before bootstrapping') parser_bootstrap.add_argument( '--skip-firewalld', action='store_true', help='Do not configure firewalld') parser_bootstrap.add_argument( '--allow-overwrite', action='store_true', help='allow overwrite of existing --output-* config/keyring/ssh files') # following logic to have both '--cleanup-on-failure' and '--no-cleanup-on-failure' # has been included in argparse of python v3.9, however since we have to support # older python versions the following is more generic. Once python v3.9 becomes # the minium supported version we can implement the same by using the new option # argparse.BooleanOptionalAction group = parser_bootstrap.add_mutually_exclusive_group() group.add_argument( '--cleanup-on-failure', action='store_true', default=True, help='Delete cluster files in case of a failed installation') group.add_argument( '--no-cleanup-on-failure', action='store_const', const=False, dest='cleanup_on_failure', help='Do not delete cluster files in case of a failed installation') parser_bootstrap.add_argument( '--allow-fqdn-hostname', action='store_true', help='allow hostname that is fully-qualified (contains ".")') parser_bootstrap.add_argument( '--allow-mismatched-release', action='store_true', help="allow bootstrap of ceph that doesn't match this version of cephadm") parser_bootstrap.add_argument( '--skip-prepare-host', action='store_true', help='Do not prepare host') parser_bootstrap.add_argument( '--orphan-initial-daemons', action='store_true', help='Set mon and mgr service to `unmanaged`, Do not create the crash service') parser_bootstrap.add_argument( '--skip-monitoring-stack', action='store_true', help='Do not automatically provision monitoring stack (prometheus, grafana, alertmanager, node-exporter)') parser_bootstrap.add_argument( '--with-centralized-logging', action='store_true', help='Automatically provision centralized logging (promtail, loki)') parser_bootstrap.add_argument( '--apply-spec', help='Apply cluster spec after bootstrap (copy ssh key, add hosts and apply services)') parser_bootstrap.add_argument( '--shared_ceph_folder', metavar='CEPH_SOURCE_FOLDER', help='Development mode. Several folders in containers are volumes mapped to different sub-folders in the ceph source folder') parser_bootstrap.add_argument( '--registry-url', help='url for custom registry') parser_bootstrap.add_argument( '--registry-username', help='username for custom registry') parser_bootstrap.add_argument( '--registry-password', help='password for custom registry') parser_bootstrap.add_argument( '--registry-json', help='json file with custom registry login info (URL, Username, Password)') parser_bootstrap.add_argument( '--container-init', action='store_true', default=CONTAINER_INIT, help=argparse.SUPPRESS) parser_bootstrap.add_argument( '--cluster-network', help='subnet to use for cluster replication, recovery and heartbeats (in CIDR notation network/mask)') parser_bootstrap.add_argument( '--single-host-defaults', action='store_true', help='adjust configuration defaults to suit a single-host cluster') parser_bootstrap.add_argument( '--log-to-file', action='store_true', help='configure cluster to log to traditional log files in /var/log/ceph/$fsid') parser_deploy = subparsers.add_parser( 'deploy', help='deploy a daemon') parser_deploy.set_defaults(func=command_deploy) parser_deploy.add_argument( '--name', required=True, action=CustomValidation, help='daemon name (type.id)') parser_deploy.add_argument( '--fsid', required=True, help='cluster FSID') _add_deploy_parser_args(parser_deploy) parser_orch = subparsers.add_parser( '_orch', ) subparsers_orch = parser_orch.add_subparsers( title='Orchestrator Driven Commands', description='Commands that are typically only run by cephadm mgr module', ) parser_deploy_from = subparsers_orch.add_parser( 'deploy', help='deploy a daemon') parser_deploy_from.set_defaults(func=command_deploy_from) # currently cephadm mgr module passes an fsid option on the CLI too # TODO: remove this and always source fsid from the JSON? parser_deploy_from.add_argument( '--fsid', help='cluster FSID') parser_deploy_from.add_argument( 'source', default='-', nargs='?', help='Configuration input source file', ) parser_check_host = subparsers.add_parser( 'check-host', help='check host configuration') parser_check_host.set_defaults(func=command_check_host) parser_check_host.add_argument( '--expect-hostname', help='Check that hostname matches an expected value') parser_prepare_host = subparsers.add_parser( 'prepare-host', help='prepare a host for cephadm use') parser_prepare_host.set_defaults(func=command_prepare_host) parser_prepare_host.add_argument( '--expect-hostname', help='Set hostname') parser_add_repo = subparsers.add_parser( 'add-repo', help='configure package repository') parser_add_repo.set_defaults(func=command_add_repo) parser_add_repo.add_argument( '--release', help='use latest version of a named release (e.g., {})'.format(LATEST_STABLE_RELEASE)) parser_add_repo.add_argument( '--version', help='use specific upstream version (x.y.z)') parser_add_repo.add_argument( '--dev', help='use specified bleeding edge build from git branch or tag') parser_add_repo.add_argument( '--dev-commit', help='use specified bleeding edge build from git commit') parser_add_repo.add_argument( '--gpg-url', help='specify alternative GPG key location') parser_add_repo.add_argument( '--repo-url', default='https://download.ceph.com', help='specify alternative repo location') # TODO: proxy? parser_rm_repo = subparsers.add_parser( 'rm-repo', help='remove package repository configuration') parser_rm_repo.set_defaults(func=command_rm_repo) parser_install = subparsers.add_parser( 'install', help='install ceph package(s)') parser_install.set_defaults(func=command_install) parser_install.add_argument( 'packages', nargs='*', default=['cephadm'], help='packages') parser_registry_login = subparsers.add_parser( 'registry-login', help='log host into authenticated registry') parser_registry_login.set_defaults(func=command_registry_login) parser_registry_login.add_argument( '--registry-url', help='url for custom registry') parser_registry_login.add_argument( '--registry-username', help='username for custom registry') parser_registry_login.add_argument( '--registry-password', help='password for custom registry') parser_registry_login.add_argument( '--registry-json', help='json file with custom registry login info (URL, Username, Password)') parser_registry_login.add_argument( '--fsid', help='cluster FSID') parser_gather_facts = subparsers.add_parser( 'gather-facts', help='gather and return host related information (JSON format)') parser_gather_facts.set_defaults(func=command_gather_facts) parser_maintenance = subparsers.add_parser( 'host-maintenance', help='Manage the maintenance state of a host') parser_maintenance.add_argument( '--fsid', help='cluster FSID') parser_maintenance.add_argument( 'maintenance_action', type=str, choices=['enter', 'exit'], help='Maintenance action - enter maintenance, or exit maintenance') parser_maintenance.set_defaults(func=command_maintenance) parser_agent = subparsers.add_parser( 'agent', help='start cephadm agent') parser_agent.set_defaults(func=command_agent) parser_agent.add_argument( '--fsid', required=True, help='cluster FSID') parser_agent.add_argument( '--daemon-id', help='daemon id for agent') parser_disk_rescan = subparsers.add_parser( 'disk-rescan', help='rescan all HBAs to detect new/removed devices') parser_disk_rescan.set_defaults(func=command_rescan_disks) return parser def _parse_args(av: List[str]) -> argparse.Namespace: parser = _get_parser() args = parser.parse_args(av) if 'command' in args and args.command and args.command[0] == '--': args.command.pop(0) # workaround argparse to deprecate the subparser `--container-init` flag # container_init and no_container_init must always be mutually exclusive container_init_args = ('--container-init', '--no-container-init') if set(container_init_args).issubset(av): parser.error('argument %s: not allowed with argument %s' % (container_init_args)) elif '--container-init' in av: args.no_container_init = not args.container_init else: args.container_init = not args.no_container_init assert args.container_init is not args.no_container_init return args def cephadm_init_ctx(args: List[str]) -> CephadmContext: ctx = CephadmContext() ctx.set_args(_parse_args(args)) return ctx def cephadm_init_logging(ctx: CephadmContext, args: List[str]) -> None: """Configure the logging for cephadm as well as updating the system to have the expected log dir and logrotate configuration. """ logging.addLevelName(QUIET_LOG_LEVEL, 'QUIET') global logger if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) operations = ['bootstrap', 'rm-cluster'] if any(op in args for op in operations): dictConfig(interactive_logging_config) else: dictConfig(logging_config) logger = logging.getLogger() logger.setLevel(QUIET_LOG_LEVEL) if not os.path.exists(ctx.logrotate_dir + '/cephadm'): with open(ctx.logrotate_dir + '/cephadm', 'w') as f: f.write("""# created by cephadm /var/log/ceph/cephadm.log { rotate 7 daily compress missingok notifempty su root root } """) if ctx.verbose: for handler in logger.handlers: if handler.name in ['console', 'log_file', 'console_stdout']: handler.setLevel(QUIET_LOG_LEVEL) logger.debug('%s\ncephadm %s' % ('-' * 80, args)) def cephadm_require_root() -> None: """Exit if the process is not running as root.""" if os.geteuid() != 0: sys.stderr.write('ERROR: cephadm should be run as root\n') sys.exit(1) def main() -> None: av: List[str] = [] av = sys.argv[1:] ctx = cephadm_init_ctx(av) if not ctx.has_function(): sys.stderr.write('No command specified; pass -h or --help for usage\n') sys.exit(1) if ctx.has_function() and getattr(ctx.func, '_execute_early', False): try: sys.exit(ctx.func(ctx)) except Error as e: if ctx.verbose: raise logger.error('ERROR: %s' % e) sys.exit(1) cephadm_require_root() cephadm_init_logging(ctx, av) try: # podman or docker? ctx.container_engine = find_container_engine(ctx) if ctx.func not in \ [ command_check_host, command_prepare_host, command_add_repo, command_rm_repo, command_install ]: check_container_engine(ctx) # command handler r = ctx.func(ctx) except (Error, ClusterAlreadyExists) as e: if ctx.verbose: raise logger.error('ERROR: %s' % e) sys.exit(1) if not r: r = 0 sys.exit(r) if __name__ == '__main__': main()
395,702
37.04105
305
py
null
ceph-main/src/cephadm/vstart-cleanup.sh
#!/bin/sh -ex bin/ceph mon rm `hostname` for f in `bin/ceph orch ls | grep -v NAME | awk '{print $1}'` ; do bin/ceph orch rm $f --force done
146
20
66
sh
null
ceph-main/src/cephadm/vstart-smoke.sh
#!/bin/bash -ex # this is a smoke test, meant to be run against vstart.sh. host="$(hostname)" bin/init-ceph stop || true MON=1 OSD=1 MDS=0 MGR=1 ../src/vstart.sh -d -n -x -l --cephadm export CEPH_DEV=1 bin/ceph orch ls bin/ceph orch apply mds foo 1 bin/ceph orch ls | grep foo while ! bin/ceph orch ps | grep mds.foo ; do sleep 1 ; done bin/ceph orch ps bin/ceph orch host ls bin/ceph orch rm crash ! bin/ceph orch ls | grep crash bin/ceph orch apply crash '*' bin/ceph orch ls | grep crash while ! bin/ceph orch ps | grep crash ; do sleep 1 ; done bin/ceph orch ps | grep crash.$host | grep running bin/ceph orch ls | grep crash | grep 1/1 bin/ceph orch daemon rm crash.$host while ! bin/ceph orch ps | grep crash ; do sleep 1 ; done bin/ceph orch daemon stop crash.$host bin/ceph orch daemon start crash.$host bin/ceph orch daemon restart crash.$host bin/ceph orch daemon reconfig crash.$host bin/ceph orch daemon redeploy crash.$host bin/ceph orch host ls | grep $host bin/ceph orch host label add $host fooxyz bin/ceph orch host ls | grep $host | grep fooxyz bin/ceph orch host label rm $host fooxyz ! bin/ceph orch host ls | grep $host | grep fooxyz bin/ceph orch host set-addr $host $host bin/ceph cephadm check-host $host #! bin/ceph cephadm check-host $host 1.2.3.4 #bin/ceph orch host set-addr $host 1.2.3.4 #! bin/ceph cephadm check-host $host bin/ceph orch host set-addr $host $host bin/ceph cephadm check-host $host bin/ceph orch apply mgr 1 bin/ceph orch rm mgr --force # we don't want a mgr to take over for ours bin/ceph orch daemon add mon $host:127.0.0.1 while ! bin/ceph mon dump | grep 'epoch 2' ; do sleep 1 ; done bin/ceph orch apply rbd-mirror 1 bin/ceph orch apply node-exporter '*' bin/ceph orch apply prometheus 1 bin/ceph orch apply alertmanager 1 bin/ceph orch apply grafana 1 while ! bin/ceph dashboard get-grafana-api-url | grep $host ; do sleep 1 ; done bin/ceph orch apply rgw foo --placement=1 bin/ceph orch ps bin/ceph orch ls # clean up bin/ceph orch rm mds.foo bin/ceph orch rm rgw.myrealm.myzone bin/ceph orch rm rbd-mirror bin/ceph orch rm node-exporter bin/ceph orch rm alertmanager bin/ceph orch rm grafana bin/ceph orch rm prometheus bin/ceph orch rm crash bin/ceph mon rm $host ! bin/ceph orch daemon rm mon.$host bin/ceph orch daemon rm mon.$host --force echo OK
2,331
25.804598
79
sh
null
ceph-main/src/cephadm/box/__init__.py
0
0
0
py
null
ceph-main/src/cephadm/box/box.py
#!/bin/python3 import argparse import os import stat import json import sys import host import osd from multiprocessing import Process, Pool from util import ( BoxType, Config, Target, ensure_inside_container, ensure_outside_container, get_boxes_container_info, run_cephadm_shell_command, run_dc_shell_command, run_dc_shell_commands, get_container_engine, run_shell_command, run_shell_commands, ContainerEngine, DockerEngine, PodmanEngine, colored, engine, engine_compose, Colors, get_seed_name ) CEPH_IMAGE = 'quay.ceph.io/ceph-ci/ceph:main' BOX_IMAGE = 'cephadm-box:latest' # NOTE: this image tar is a trickeroo so cephadm won't pull the image everytime # we deploy a cluster. Keep in mind that you'll be responsible for pulling the # image yourself with `./box.py -v cluster setup` CEPH_IMAGE_TAR = 'docker/ceph/image/quay.ceph.image.tar' CEPH_ROOT = '../../../' DASHBOARD_PATH = '../../../src/pybind/mgr/dashboard/frontend/' root_error_msg = """ WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING sudo with this script can kill your computer, try again without sudo if you value your time. """ def remove_ceph_image_tar(): if os.path.exists(CEPH_IMAGE_TAR): os.remove(CEPH_IMAGE_TAR) def cleanup_box() -> None: osd.cleanup_osds() remove_ceph_image_tar() def image_exists(image_name: str): # extract_tag assert image_name.find(':') image_name, tag = image_name.split(':') engine = get_container_engine() images = engine.run('image ls').split('\n') IMAGE_NAME = 0 TAG = 1 for image in images: image = image.split() print(image) print(image_name, tag) if image[IMAGE_NAME] == image_name and image[TAG] == tag: return True return False def get_ceph_image(): print('Getting ceph image') engine = get_container_engine() engine.run(f'pull {CEPH_IMAGE}') # update engine.run(f'build -t {CEPH_IMAGE} docker/ceph') if not os.path.exists('docker/ceph/image'): os.mkdir('docker/ceph/image') remove_ceph_image_tar() engine.run(f'save {CEPH_IMAGE} -o {CEPH_IMAGE_TAR}') run_shell_command(f'chmod 777 {CEPH_IMAGE_TAR}') print('Ceph image added') def get_box_image(): print('Getting box image') engine = get_container_engine() engine.run(f'build -t cephadm-box -f {engine.dockerfile} .') print('Box image added') def check_dashboard(): if not os.path.exists(os.path.join(CEPH_ROOT, 'dist')): print(colored('Missing build in dashboard', Colors.WARNING)) def check_cgroups(): if not os.path.exists('/sys/fs/cgroup/cgroup.controllers'): print(colored('cgroups v1 is not supported', Colors.FAIL)) print('Enable cgroups v2 please') sys.exit(666) def check_selinux(): selinux = run_shell_command('getenforce') if 'Disabled' not in selinux: print(colored('selinux should be disabled, please disable it if you ' 'don\'t want unexpected behaviour.', Colors.WARNING)) def dashboard_setup(): command = f'cd {DASHBOARD_PATH} && npm install' run_shell_command(command) command = f'cd {DASHBOARD_PATH} && npm run build' run_shell_command(command) class Cluster(Target): _help = 'Manage docker cephadm boxes' actions = ['bootstrap', 'start', 'down', 'list', 'bash', 'setup', 'cleanup'] def set_args(self): self.parser.add_argument( 'action', choices=Cluster.actions, help='Action to perform on the box' ) self.parser.add_argument('--osds', type=int, default=3, help='Number of osds') self.parser.add_argument('--hosts', type=int, default=1, help='Number of hosts') self.parser.add_argument('--skip-deploy-osds', action='store_true', help='skip deploy osd') self.parser.add_argument('--skip-create-loop', action='store_true', help='skip create loopback device') self.parser.add_argument('--skip-monitoring-stack', action='store_true', help='skip monitoring stack') self.parser.add_argument('--skip-dashboard', action='store_true', help='skip dashboard') self.parser.add_argument('--expanded', action='store_true', help='deploy 3 hosts and 3 osds') self.parser.add_argument('--jobs', type=int, help='Number of jobs scheduled in parallel') @ensure_outside_container def setup(self): check_cgroups() check_selinux() targets = [ get_ceph_image, get_box_image, dashboard_setup ] results = [] jobs = Config.get('jobs') if jobs: jobs = int(jobs) else: jobs = None pool = Pool(jobs) for target in targets: results.append(pool.apply_async(target)) for result in results: result.wait() @ensure_outside_container def cleanup(self): cleanup_box() @ensure_inside_container def bootstrap(self): print('Running bootstrap on seed') cephadm_path = str(os.environ.get('CEPHADM_PATH')) engine = get_container_engine() if isinstance(engine, DockerEngine): engine.restart() st = os.stat(cephadm_path) os.chmod(cephadm_path, st.st_mode | stat.S_IEXEC) engine.run('load < /cephadm/box/docker/ceph/image/quay.ceph.image.tar') # cephadm guid error because it sometimes tries to use quay.ceph.io/ceph-ci/ceph:<none> # instead of main branch's tag run_shell_command('export CEPH_SOURCE_FOLDER=/ceph') run_shell_command('export CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:main') run_shell_command( 'echo "export CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:main" >> ~/.bashrc' ) extra_args = [] extra_args.append('--skip-pull') # cephadm prints in warning, let's redirect it to the output so shell_command doesn't # complain extra_args.append('2>&0') extra_args = ' '.join(extra_args) skip_monitoring_stack = ( '--skip-monitoring-stack' if Config.get('skip-monitoring-stack') else '' ) skip_dashboard = '--skip-dashboard' if Config.get('skip-dashboard') else '' fsid = Config.get('fsid') config_folder = str(Config.get('config_folder')) config = str(Config.get('config')) keyring = str(Config.get('keyring')) if not os.path.exists(config_folder): os.mkdir(config_folder) cephadm_bootstrap_command = ( '$CEPHADM_PATH --verbose bootstrap ' '--mon-ip "$(hostname -i)" ' '--allow-fqdn-hostname ' '--initial-dashboard-password admin ' '--dashboard-password-noupdate ' '--shared_ceph_folder /ceph ' '--allow-overwrite ' f'--output-config {config} ' f'--output-keyring {keyring} ' f'--output-config {config} ' f'--fsid "{fsid}" ' '--log-to-file ' f'{skip_dashboard} ' f'{skip_monitoring_stack} ' f'{extra_args} ' ) print('Running cephadm bootstrap...') run_shell_command(cephadm_bootstrap_command, expect_exit_code=120) print('Cephadm bootstrap complete') run_shell_command('sudo vgchange --refresh') run_shell_command('cephadm ls') run_shell_command('ln -s /ceph/src/cephadm/box/box.py /usr/bin/box') run_cephadm_shell_command('ceph -s') print('Bootstrap completed!') @ensure_outside_container def start(self): check_cgroups() check_selinux() osds = int(Config.get('osds')) hosts = int(Config.get('hosts')) engine = get_container_engine() # ensure boxes don't exist self.down() # podman is ran without sudo if isinstance(engine, PodmanEngine): I_am = run_shell_command('whoami') if 'root' in I_am: print(root_error_msg) sys.exit(1) print('Checking docker images') if not image_exists(CEPH_IMAGE): get_ceph_image() if not image_exists(BOX_IMAGE): get_box_image() used_loop = "" if not Config.get('skip_create_loop'): print('Creating OSD devices...') used_loop = osd.create_loopback_devices(osds) print(f'Added {osds} logical volumes in a loopback device') print('Starting containers') engine.up(hosts) containers = engine.get_containers() seed = engine.get_seed() # Umounting somehow brings back the contents of the host /sys/dev/block. # On startup /sys/dev/block is empty. After umount, we can see symlinks again # so that lsblk is able to run as expected run_dc_shell_command('umount /sys/dev/block', seed) run_shell_command('sudo sysctl net.ipv4.conf.all.forwarding=1') run_shell_command('sudo iptables -P FORWARD ACCEPT') # don't update clock with chronyd / setup chronyd on all boxes chronyd_setup = """ sed 's/$OPTIONS/-x/g' /usr/lib/systemd/system/chronyd.service -i systemctl daemon-reload systemctl start chronyd systemctl status --no-pager chronyd """ for container in containers: print(colored('Got container:', Colors.OKCYAN), str(container)) for container in containers: run_dc_shell_commands(chronyd_setup, container) print('Seting up host ssh servers') for container in containers: print(colored('Setting up ssh server for:', Colors.OKCYAN), str(container)) host._setup_ssh(container) verbose = '-v' if Config.get('verbose') else '' skip_deploy = '--skip-deploy-osds' if Config.get('skip-deploy-osds') else '' skip_monitoring_stack = ( '--skip-monitoring-stack' if Config.get('skip-monitoring-stack') else '' ) skip_dashboard = '--skip-dashboard' if Config.get('skip-dashboard') else '' box_bootstrap_command = ( f'/cephadm/box/box.py {verbose} --engine {engine.command} cluster bootstrap ' f'--osds {osds} ' f'--hosts {hosts} ' f'{skip_deploy} ' f'{skip_dashboard} ' f'{skip_monitoring_stack} ' ) print(box_bootstrap_command) run_dc_shell_command(box_bootstrap_command, seed) expanded = Config.get('expanded') if expanded: info = get_boxes_container_info() ips = info['ips'] hostnames = info['hostnames'] print(ips) if hosts > 0: host._copy_cluster_ssh_key(ips) host._add_hosts(ips, hostnames) if not Config.get('skip-deploy-osds'): print('Deploying osds... This could take up to minutes') osd.deploy_osds(osds) print('Osds deployed') dashboard_ip = 'localhost' info = get_boxes_container_info(with_seed=True) if isinstance(engine, DockerEngine): for i in range(info['size']): if get_seed_name() in info['container_names'][i]: dashboard_ip = info["ips"][i] print(colored(f'dashboard available at https://{dashboard_ip}:8443', Colors.OKGREEN)) print('Bootstrap finished successfully') @ensure_outside_container def down(self): engine = get_container_engine() if isinstance(engine, PodmanEngine): containers = json.loads(engine.run('container ls --format json')) for container in containers: for name in container['Names']: if name.startswith('box_hosts_'): engine.run(f'container kill {name}') engine.run(f'container rm {name}') pods = json.loads(engine.run('pod ls --format json')) for pod in pods: if 'Name' in pod and pod['Name'].startswith('box_pod_host'): name = pod['Name'] engine.run(f'pod kill {name}') engine.run(f'pod rm {name}') else: run_shell_command(f'{engine_compose()} -f {Config.get("docker_yaml")} down') print('Successfully killed all boxes') @ensure_outside_container def list(self): info = get_boxes_container_info(with_seed=True) for i in range(info['size']): ip = info['ips'][i] name = info['container_names'][i] hostname = info['hostnames'][i] print(f'{name} \t{ip} \t{hostname}') @ensure_outside_container def bash(self): # we need verbose to see the prompt after running shell command Config.set('verbose', True) print('Seed bash') engine = get_container_engine() engine.run(f'exec -it {engine.seed_name} bash') targets = { 'cluster': Cluster, 'osd': osd.Osd, 'host': host.Host, } def main(): parser = argparse.ArgumentParser() parser.add_argument( '-v', action='store_true', dest='verbose', help='be more verbose' ) parser.add_argument( '--engine', type=str, default='podman', dest='engine', help='choose engine between "docker" and "podman"' ) subparsers = parser.add_subparsers() target_instances = {} for name, target in targets.items(): target_instances[name] = target(None, subparsers) for count, arg in enumerate(sys.argv, 1): if arg in targets: instance = target_instances[arg] if hasattr(instance, 'main'): instance.argv = sys.argv[count:] instance.set_args() args = parser.parse_args() Config.add_args(vars(args)) instance.main() sys.exit(0) parser.print_help() if __name__ == '__main__': main()
14,100
32.978313
111
py
null
ceph-main/src/cephadm/box/docker-compose-docker.yml
version: "2.4" services: cephadm-host-base: build: context: . environment: - CEPH_BRANCH=master image: cephadm-box privileged: true stop_signal: RTMIN+3 volumes: - ../../../:/ceph - ..:/cephadm - ./daemon.json:/etc/docker/daemon.json # dangerous, maybe just map the loopback # https://stackoverflow.com/questions/36880565/why-dont-my-udev-rules-work-inside-of-a-running-docker-container - /dev:/dev networks: - public mem_limit: "20g" scale: -1 seed: extends: service: cephadm-host-base ports: - "3000:3000" - "8443:8443" - "9095:9095" scale: 1 hosts: extends: service: cephadm-host-base scale: 3 volumes: var-lib-docker: networks: public:
792
18.825
117
yml
null
ceph-main/src/cephadm/box/docker-compose.cgroup1.yml
version: "2.4" # If cgroups v2 is disabled then add cgroup fs services: seed: volumes: - "/sys/fs/cgroup:/sys/fs/cgroup:ro" hosts: volumes: - "/sys/fs/cgroup:/sys/fs/cgroup:ro"
250
21.818182
52
yml
null
ceph-main/src/cephadm/box/host.py
import os from typing import List, Union from util import ( Config, HostContainer, Target, get_boxes_container_info, get_container_engine, inside_container, run_cephadm_shell_command, run_dc_shell_command, run_shell_command, engine, BoxType ) def _setup_ssh(container: HostContainer): if inside_container(): if not os.path.exists('/root/.ssh/known_hosts'): run_shell_command('echo "y" | ssh-keygen -b 2048 -t rsa -f /root/.ssh/id_rsa -q -N ""', expect_error=True) run_shell_command('echo "root:root" | chpasswd') with open('/etc/ssh/sshd_config', 'a+') as f: f.write('PermitRootLogin yes\n') f.write('PasswordAuthentication yes\n') f.flush() run_shell_command('systemctl restart sshd') else: print('Redirecting to _setup_ssh to container') verbose = '-v' if Config.get('verbose') else '' run_dc_shell_command( f'/cephadm/box/box.py {verbose} --engine {engine()} host setup_ssh {container.name}', container ) def _add_hosts(ips: Union[List[str], str], hostnames: Union[List[str], str]): if inside_container(): assert len(ips) == len(hostnames) for i in range(len(ips)): run_cephadm_shell_command(f'ceph orch host add {hostnames[i]} {ips[i]}') else: print('Redirecting to _add_hosts to container') verbose = '-v' if Config.get('verbose') else '' print(ips) ips = ' '.join(ips) ips = f'{ips}' hostnames = ' '.join(hostnames) hostnames = f'{hostnames}' seed = get_container_engine().get_seed() run_dc_shell_command( f'/cephadm/box/box.py {verbose} --engine {engine()} host add_hosts {seed.name} --ips {ips} --hostnames {hostnames}', seed ) def _copy_cluster_ssh_key(ips: Union[List[str], str]): if inside_container(): local_ip = run_shell_command('hostname -i') for ip in ips: if ip != local_ip: run_shell_command( ( 'sshpass -p "root" ssh-copy-id -f ' f'-o StrictHostKeyChecking=no -i /etc/ceph/ceph.pub "root@{ip}"' ) ) else: print('Redirecting to _copy_cluster_ssh to container') verbose = '-v' if Config.get('verbose') else '' print(ips) ips = ' '.join(ips) ips = f'{ips}' # assume we only have one seed seed = get_container_engine().get_seed() run_dc_shell_command( f'/cephadm/box/box.py {verbose} --engine {engine()} host copy_cluster_ssh_key {seed.name} --ips {ips}', seed ) class Host(Target): _help = 'Run seed/host related commands' actions = ['setup_ssh', 'copy_cluster_ssh_key', 'add_hosts'] def set_args(self): self.parser.add_argument('action', choices=Host.actions) self.parser.add_argument( 'container_name', type=str, help='box_{type}_{index}. In docker, type can be seed or hosts. In podman only hosts.' ) self.parser.add_argument('--ips', nargs='*', help='List of host ips') self.parser.add_argument( '--hostnames', nargs='*', help='List of hostnames ips(relative to ip list)' ) def setup_ssh(self): container_name = Config.get('container_name') engine = get_container_engine() _setup_ssh(engine.get_container(container_name)) def add_hosts(self): ips = Config.get('ips') if not ips: ips = get_boxes_container_info()['ips'] hostnames = Config.get('hostnames') if not hostnames: hostnames = get_boxes_container_info()['hostnames'] _add_hosts(ips, hostnames) def copy_cluster_ssh_key(self): ips = Config.get('ips') if not ips: ips = get_boxes_container_info()['ips'] _copy_cluster_ssh_key(ips)
4,095
32.85124
132
py
null
ceph-main/src/cephadm/box/osd.py
import json import os import time import re from typing import Dict from util import ( BoxType, Config, Target, ensure_inside_container, ensure_outside_container, get_orch_hosts, run_cephadm_shell_command, run_dc_shell_command, get_container_engine, run_shell_command, ) DEVICES_FILE="./devices.json" def remove_loop_img() -> None: loop_image = Config.get('loop_img') if os.path.exists(loop_image): os.remove(loop_image) def create_loopback_devices(osds: int) -> Dict[int, Dict[str, str]]: assert osds cleanup_osds() osd_devs = dict() for i in range(osds): img_name = f'osd{i}' loop_dev = create_loopback_device(img_name) osd_devs[i] = dict(img_name=img_name, device=loop_dev) with open(DEVICES_FILE, 'w') as dev_file: dev_file.write(json.dumps(osd_devs)) return osd_devs def create_loopback_device(img_name, size_gb=5): loop_img_dir = Config.get('loop_img_dir') run_shell_command(f'mkdir -p {loop_img_dir}') loop_img = os.path.join(loop_img_dir, img_name) run_shell_command(f'rm -f {loop_img}') run_shell_command(f'dd if=/dev/zero of={loop_img} bs=1 count=0 seek={size_gb}G') loop_dev = run_shell_command(f'sudo losetup -f') if not os.path.exists(loop_dev): dev_minor = re.match(r'\/dev\/[^\d]+(\d+)', loop_dev).groups()[0] run_shell_command(f'sudo mknod -m777 {loop_dev} b 7 {dev_minor}') run_shell_command(f'sudo chown {os.getuid()}:{os.getgid()} {loop_dev}') if os.path.ismount(loop_dev): os.umount(loop_dev) run_shell_command(f'sudo losetup {loop_dev} {loop_img}') run_shell_command(f'sudo chown {os.getuid()}:{os.getgid()} {loop_dev}') return loop_dev def get_lvm_osd_data(data: str) -> Dict[str, str]: osd_lvm_info = run_cephadm_shell_command(f'ceph-volume lvm list {data}') osd_data = {} for line in osd_lvm_info.split('\n'): line = line.strip() if not line: continue line = line.split() if line[0].startswith('===') or line[0].startswith('[block]'): continue # "block device" key -> "block_device" key = '_'.join(line[:-1]) osd_data[key] = line[-1] return osd_data def load_osd_devices(): if not os.path.exists(DEVICES_FILE): return dict() with open(DEVICES_FILE) as dev_file: devs = json.loads(dev_file.read()) return devs @ensure_inside_container def deploy_osd(data: str, hostname: str) -> bool: out = run_cephadm_shell_command(f'ceph orch daemon add osd {hostname}:{data} raw') return 'Created osd(s)' in out def cleanup_osds() -> None: loop_img_dir = Config.get('loop_img_dir') osd_devs = load_osd_devices() for osd in osd_devs.values(): device = osd['device'] if 'loop' in device: loop_img = os.path.join(loop_img_dir, osd['img_name']) run_shell_command(f'sudo losetup -d {device}', expect_error=True) if os.path.exists(loop_img): os.remove(loop_img) run_shell_command(f'rm -rf {loop_img_dir}') def deploy_osds(count: int): osd_devs = load_osd_devices() hosts = get_orch_hosts() host_index = 0 seed = get_container_engine().get_seed() v = '-v' if Config.get('verbose') else '' for osd in osd_devs.values(): deployed = False while not deployed: print(hosts) hostname = hosts[host_index]['hostname'] deployed = run_dc_shell_command( f'/cephadm/box/box.py {v} osd deploy --data {osd["device"]} --hostname {hostname}', seed ) deployed = 'created osd' in deployed.lower() or 'already created?' in deployed.lower() print('Waiting 5 seconds to re-run deploy osd...') time.sleep(5) host_index = (host_index + 1) % len(hosts) class Osd(Target): _help = """ Deploy osds and create needed block devices with loopback devices: Actions: - deploy: Deploy an osd given a block device - create_loop: Create needed loopback devices and block devices in logical volumes for a number of osds. - destroy: Remove all osds and the underlying loopback devices. """ actions = ['deploy', 'create_loop', 'destroy'] def set_args(self): self.parser.add_argument('action', choices=Osd.actions) self.parser.add_argument('--data', type=str, help='path to a block device') self.parser.add_argument('--hostname', type=str, help='host to deploy osd') self.parser.add_argument('--osds', type=int, default=0, help='number of osds') def deploy(self): data = Config.get('data') hostname = Config.get('hostname') if not hostname: # assume this host hostname = run_shell_command('hostname') if not data: deploy_osds(Config.get('osds')) else: deploy_osd(data, hostname) @ensure_outside_container def create_loop(self): osds = Config.get('osds') create_loopback_devices(int(osds)) print('Successfully created loopback devices') @ensure_outside_container def destroy(self): cleanup_osds()
5,262
32.310127
99
py
null
ceph-main/src/cephadm/box/util.py
import json import os import subprocess import sys import copy from abc import ABCMeta, abstractmethod from enum import Enum from typing import Any, Callable, Dict, List class Colors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' class Config: args = { 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'config_folder': '/etc/ceph/', 'config': '/etc/ceph/ceph.conf', 'keyring': '/etc/ceph/ceph.keyring', 'loop_img': 'loop-images/loop.img', 'engine': 'podman', 'docker_yaml': 'docker-compose-docker.yml', 'docker_v1_yaml': 'docker-compose.cgroup1.yml', 'podman_yaml': 'docker-compose-podman.yml', 'loop_img_dir': 'loop-images', } @staticmethod def set(key, value): Config.args[key] = value @staticmethod def get(key): if key in Config.args: return Config.args[key] return None @staticmethod def add_args(args: Dict[str, str]) -> None: Config.args.update(args) class Target: def __init__(self, argv, subparsers): self.argv = argv self.parser = subparsers.add_parser( self.__class__.__name__.lower(), help=self.__class__._help ) def set_args(self): """ adding the required arguments of the target should go here, example: self.parser.add_argument(..) """ raise NotImplementedError() def main(self): """ A target will be setup by first calling this main function where the parser is initialized. """ args = self.parser.parse_args(self.argv) Config.add_args(vars(args)) function = getattr(self, args.action) function() def ensure_outside_container(func) -> Callable: def wrapper(*args, **kwargs): if not inside_container(): return func(*args, **kwargs) else: raise RuntimeError('This command should be ran outside a container') return wrapper def ensure_inside_container(func) -> bool: def wrapper(*args, **kwargs): if inside_container(): return func(*args, **kwargs) else: raise RuntimeError('This command should be ran inside a container') return wrapper def colored(msg, color: Colors): return color + msg + Colors.ENDC class BoxType(str, Enum): SEED = 'seed' HOST = 'host' class HostContainer: def __init__(self, _name, _type) -> None: self._name: str = _name self._type: BoxType = _type @property def name(self) -> str: return self._name @property def type(self) -> BoxType: return self._type def __str__(self) -> str: return f'{self.name} {self.type}' def run_shell_command(command: str, expect_error=False, verbose=True, expect_exit_code=0) -> str: if Config.get('verbose'): print(f'{colored("Running command", Colors.HEADER)}: {colored(command, Colors.OKBLUE)}') process = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) out = '' err = '' # let's read when output comes so it is in real time while True: # TODO: improve performance of this part, I think this part is a problem pout = process.stdout.read(1).decode('latin1') if pout == '' and process.poll() is not None: break if pout: if Config.get('verbose') and verbose: sys.stdout.write(pout) sys.stdout.flush() out += pout process.wait() err += process.stderr.read().decode('latin1').strip() out = out.strip() if process.returncode != 0 and not expect_error and process.returncode != expect_exit_code: err = colored(err, Colors.FAIL); raise RuntimeError(f'Failed command: {command}\n{err}\nexit code: {process.returncode}') sys.exit(1) return out def run_dc_shell_commands(commands: str, container: HostContainer, expect_error=False) -> str: for command in commands.split('\n'): command = command.strip() if not command: continue run_dc_shell_command(command.strip(), container, expect_error=expect_error) def run_shell_commands(commands: str, expect_error=False) -> str: for command in commands.split('\n'): command = command.strip() if not command: continue run_shell_command(command, expect_error=expect_error) @ensure_inside_container def run_cephadm_shell_command(command: str, expect_error=False) -> str: config = Config.get('config') keyring = Config.get('keyring') fsid = Config.get('fsid') with_cephadm_image = 'CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:main' out = run_shell_command( f'{with_cephadm_image} cephadm --verbose shell --fsid {fsid} --config {config} --keyring {keyring} -- {command}', expect_error, ) return out def run_dc_shell_command( command: str, container: HostContainer, expect_error=False ) -> str: out = get_container_engine().run_exec(container, command, expect_error=expect_error) return out def inside_container() -> bool: return os.path.exists('/.box_container') def get_container_id(container_name: str): return run_shell_command(f"{engine()} ps | \grep " + container_name + " | awk '{ print $1 }'") def engine(): return Config.get('engine') def engine_compose(): return f'{engine()}-compose' def get_seed_name(): if engine() == 'docker': return 'seed' elif engine() == 'podman': return 'box_hosts_0' else: print(f'unkown engine {engine()}') sys.exit(1) @ensure_outside_container def get_boxes_container_info(with_seed: bool = False) -> Dict[str, Any]: # NOTE: this could be cached ips_query = engine() + " inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}} %tab% {{.Name}} %tab% {{.Config.Hostname}}' $("+ engine() + " ps -aq) --format json" containers = json.loads(run_shell_command(ips_query, verbose=False)) # FIXME: if things get more complex a class representing a container info might be useful, # for now representing data this way is faster. info = {'size': 0, 'ips': [], 'container_names': [], 'hostnames': []} for container in containers: # Most commands use hosts only name = container['Name'] if name.startswith('box_hosts'): if not with_seed and name == get_seed_name(): continue info['size'] += 1 print(container['NetworkSettings']) if 'Networks' in container['NetworkSettings']: info['ips'].append(container['NetworkSettings']['Networks']['box_network']['IPAddress']) else: info['ips'].append('n/a') info['container_names'].append(name) info['hostnames'].append(container['Config']['Hostname']) return info def get_orch_hosts(): if inside_container(): orch_host_ls_out = run_cephadm_shell_command('ceph orch host ls --format json') else: orch_host_ls_out = run_dc_shell_command(f'cephadm shell --keyring /etc/ceph/ceph.keyring --config /etc/ceph/ceph.conf -- ceph orch host ls --format json', get_container_engine().get_seed()) sp = orch_host_ls_out.split('\n') orch_host_ls_out = sp[len(sp) - 1] hosts = json.loads(orch_host_ls_out) return hosts class ContainerEngine(metaclass=ABCMeta): @property @abstractmethod def command(self) -> str: pass @property @abstractmethod def seed_name(self) -> str: pass @property @abstractmethod def dockerfile(self) -> str: pass @property def host_name_prefix(self) -> str: return 'box_hosts_' @abstractmethod def up(self, hosts: int): pass def run_exec(self, container: HostContainer, command: str, expect_error: bool = False): return run_shell_command(' '.join([self.command, 'exec', container.name, command]), expect_error=expect_error) def run(self, engine_command: str, expect_error: bool = False): return run_shell_command(' '.join([self.command, engine_command]), expect_error=expect_error) def get_containers(self) -> List[HostContainer]: ps_out = json.loads(run_shell_command('podman ps --format json')) containers = [] for container in ps_out: if not container['Names']: raise RuntimeError(f'Container {container} missing name') name = container['Names'][0] if name == self.seed_name: containers.append(HostContainer(name, BoxType.SEED)) elif name.startswith(self.host_name_prefix): containers.append(HostContainer(name, BoxType.HOST)) return containers def get_seed(self) -> HostContainer: for container in self.get_containers(): if container.type == BoxType.SEED: return container raise RuntimeError('Missing seed container') def get_container(self, container_name: str): containers = self.get_containers() for container in containers: if container.name == container_name: return container return None def restart(self): pass class DockerEngine(ContainerEngine): command = 'docker' seed_name = 'seed' dockerfile = 'DockerfileDocker' def restart(self): run_shell_command('systemctl restart docker') def up(self, hosts: int): dcflags = f'-f {Config.get("docker_yaml")}' if not os.path.exists('/sys/fs/cgroup/cgroup.controllers'): dcflags += f' -f {Config.get("docker_v1_yaml")}' run_shell_command(f'{engine_compose()} {dcflags} up --scale hosts={hosts} -d') class PodmanEngine(ContainerEngine): command = 'podman' seed_name = 'box_hosts_0' dockerfile = 'DockerfilePodman' CAPS = [ "SYS_ADMIN", "NET_ADMIN", "SYS_TIME", "SYS_RAWIO", "MKNOD", "NET_RAW", "SETUID", "SETGID", "CHOWN", "SYS_PTRACE", "SYS_TTY_CONFIG", "CAP_AUDIT_WRITE", "CAP_AUDIT_CONTROL", ] VOLUMES = [ '../../../:/ceph:z', '../:/cephadm:z', '/run/udev:/run/udev', '/sys/dev/block:/sys/dev/block', '/sys/fs/cgroup:/sys/fs/cgroup:ro', '/dev/fuse:/dev/fuse', '/dev/disk:/dev/disk', '/sys/devices/virtual/block:/sys/devices/virtual/block', '/sys/block:/dev/block', '/dev/mapper:/dev/mapper', '/dev/mapper/control:/dev/mapper/control', ] TMPFS = ['/run', '/tmp'] # FIXME: right now we are assuming every service will be exposed through the seed, but this is far # from the truth. Services can be deployed on different hosts so we need a system to manage this. SEED_PORTS = [ 8443, # dashboard 3000, # grafana 9093, # alertmanager 9095 # prometheus ] def setup_podman_env(self, hosts: int = 1, osd_devs={}): network_name = 'box_network' networks = run_shell_command('podman network ls') if network_name not in networks: run_shell_command(f'podman network create -d bridge {network_name}') args = [ '--group-add', 'keep-groups', '--device', '/dev/fuse' , '-it' , '-d', '-e', 'CEPH_BRANCH=main', '--stop-signal', 'RTMIN+3' ] for cap in self.CAPS: args.append('--cap-add') args.append(cap) for volume in self.VOLUMES: args.append('-v') args.append(volume) for tmp in self.TMPFS: args.append('--tmpfs') args.append(tmp) for osd_dev in osd_devs.values(): device = osd_dev["device"] args.append('--device') args.append(f'{device}:{device}') for host in range(hosts+1): # 0 will be the seed options = copy.copy(args) options.append('--name') options.append(f'box_hosts_{host}') options.append('--network') options.append(f'{network_name}') if host == 0: for port in self.SEED_PORTS: options.append('-p') options.append(f'{port}:{port}') options.append('cephadm-box') options = ' '.join(options) run_shell_command(f'podman run {options}') def up(self, hosts: int): import osd self.setup_podman_env(hosts=hosts, osd_devs=osd.load_osd_devices()) def get_container_engine() -> ContainerEngine: if engine() == 'docker': return DockerEngine() else: return PodmanEngine()
13,334
30.599526
182
py
null
ceph-main/src/cephadm/containers/keepalived/README.md
# quay.io/ceph/keepalived A small [ubi8-minimal](https://catalog.redhat.com/software/containers/registry/registry.access.redhat.com/repository/ubi8/ubi-minimal) based Docker container that provides a method of IP high availability via [keepalived](http://www.keepalived.org/) (VRRP failover), and optional Kubernetes API Server monitoring. If allowed to auto configure (default behaviour) it will automatically generate a unicast based failover configuration with a minimal amount of user supplied information. For specific information on Keepalived, please see the man page on [keepalived.conf](http://linux.die.net/man/5/keepalived.conf) or the [Keepalived User Guide](http://www.keepalived.org/pdf/UserGuide.pdf). ## Index - [quay.io/ceph/keepalived](#cephkeepalived) - [Index](#index) - [Prerequisites](#prerequisites) - [Configuration](#configuration) - [Execution Control](#execution-control) - [Autoconfiguration Options](#autoconfiguration-options) - [Kubernetes Options](#kubernetes-options) - [Suggested Kubernetes Settings](#suggested-kubernetes-settings) - [Example Keepalived Configs](#example-keepalived-configs) - [Example Autogenerated Keepalived Master Config](#example-autogenerated-keepalived-master-config) - [Example Autogenerated Keepalived Backup Config](#example-autogenerated-keepalived-backup-config) - [Example Run Commands](#example-run-commands) - [Example Master Run Command](#example-master-run-command) - [Example Backup Run Command](#example-backup-run-command) ## Prerequisites Before attempting to deploy the keepalived container, the host must allow non local binding of ipv4 addresses. To do this, configure the sysctl tunable `net.ipv4.ip_nonlocal_bind=1`. In addition to enabling the nonlocal binds, the container must be run with both host networking (`--net=host`) and security setting CAP_NET_ADMIN (`--cap-add NET_ADMIN`) capability. These allow the container to manage the host's networking configuration, and this is essential to the function of keepalived. ## Configuration ### Execution Control | Variable | Default | |:---------------------:|:------------------------------------------------:| | `KEEPALIVED_AUTOCONF` | `true` | | `KEEPALIVED_CONF` | `/etc/keepalived/keepalived.conf` | | `KEEPALIVED_CMD` | `/usr/sbin/keepalived -n -l -f $KEEPALIVED_CONF` | | `KEEPALIVED_DEBUG` | `false` | * `KEEPALIVED_AUTOCONF` - Enables or disables the auto-configuration of keepalived. * `KEEPALIVED_CONF` - The path to the keepalived configuration file. * `KEEPALIVED_CMD` - The command called to execute keepalived. * `KEEPALIVED_DEBUG` - Enables or disables debug level logging for keepalived (adds `-D` to `KEEPALIVED_CMD`. ### Autoconfiguration Options | Variable | Default | |:-------------------------------------------:|:----------------------------------:| | `KEEPALIVED_ADVERT_INT` | `1` | | `KEEPALIVED_AUTH_PASS` | `pwd$KEEPALIVED_VIRTUAL_ROUTER_ID` | | `KEEPALIVED_INTERFACE` | `eth0` | | `KEEPALIVED_PRIORITY` | `200` | | `KEEPALIVED_STATE` | `MASTER` | | `KEEPALIVED_TRACK_INTERFACE_###` | | | `KEEPALIVED_UNICAST_SRC_IP` | | | `KEEPALIVED_UNICAST_PEER_###` | | | `KEEPALIVED_VIRTUAL_IPADDRESS_###` | | | `KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_###` | | | `KEEPALIVED_VIRTUAL_ROUTER_ID` | `1` | | `KEEPALIVED_KUBE_APISERVER_CHECK` | `false` | * `KEEPALIVED_ADVERT_INT` - The VRRP advertisement interval (in seconds). * `KEEPALIVED_AUTH_PASS` - A shared password used to authenticate each node in a VRRP group (**Note:** If password is longer than 8 characters, only the first 8 characters are used). * `KEEPALIVED_INTERFACE` - The host interface that keepalived will monitor and use for VRRP traffic. * `KEEPALIVED_PRIORITY` - Election value, the server configured with the highest priority will become the Master. * `KEEPALIVED_STATE` - Defines the server role as Master or Backup. (**Options:** `MASTER` or `BACKUP`). * `KEEPALIVED_TRACK_INTERFACE_###` - An interface that's state should be monitored (e.g. eth0). More than one can be supplied as long as the variable name ends in a number from 0-999. * `KEEPALIVED_UNICAST_SRC_IP` - The IP on the host that the keepalived daemon should bind to. **Note:** If not specified, it will be the first IP bound to the interface specified in `KEEPALIVED_INTERFACE`. * `KEEPALIVED_UNICAST_PEER_###` - An IP of a peer participating in the VRRP group. More tha one can be supplied as long as the variable name ends in a number from 0-999. * `KEEPALIVED_VIRTUAL_IPADDRESS_###` - An instance of an address that will be monitored and failed over from one host to another. These should be a quoted string in the form of: `<IPADDRESS>/<MASK> brd <BROADCAST_IP> dev <DEVICE> scope <SCOPE> label <LABEL>` At a minimum the ip address, mask and device should be specified e.g. `KEEPALIVED_VIRTUAL_IPADDRESS_1="10.10.0.2/24 dev eth0"`. More than one can be supplied as long as the variable name ends in a number from 0-999. **Note:** Keepalived has a hard limit of **20** addresses that can be monitored. More can be failed over with the monitored addresses via `KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_###`. * `KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_###` - An instance of an address that will be failed over with the monitored addresses supplied via `KEEPALIVED_VIRTUAL_IPADDRESS_###`. These should be a quoted string in the form of: `<IPADDRESS>/<MASK> brd <BROADCAST_IP> dev <DEVICE> scope <SCOPE> label <LABEL>` At a minimum the ip address, mask and device should be specified e.g. `KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_1="172.16.1.20/24 dev eth1"`. More than one can be supplied as long as the variable name ends in a number from 0-999. * `KEEPALIVED_VIRTUAL_ROUTER_ID` - A unique number from 0 to 255 that should identify the VRRP group. Master and Backup should have the same value. Multiple instances of keepalived can be run on the same host, but each pair **MUST** have a unique virtual router id. * `KEEPALIVED_KUBE_APISERVER_CHECK` - If enabled it configures a simple check script for the Kubernetes API-Server. For more information on this feature, please see the [Kubernetes Options](#kubernetes-options) section. ### Kubernetes Options | **Variable** | **Default** | |:-----------------------------:|:----------------------------------------------:| | `KUBE_APISERVER_ADDRESS` | parsed from `KEEPALIVED_VIRTUAL_IPADDRESS_###` | | `KUBE_APISERVER_PORT` | `6443` | | `KUBE_APISERVER_CHK_INTERVAL` | `3` | | `KUBE_APISERVER_CHK_FALL` | `10` | | `KUBE_APISERVER_CHK_RISE` | `2` | | `KUBE_APISERVER_CHK_WEIGHT` | `-50` | * `KUBE_APISERVER_ADDRESS` - The Virtual IP being used for the Kube API Server. If none is supplied, it is assumed to be the lowest numbered entry in the `KEEPALIVED_VIRTUAL_IPADDRESS_###` variables. * `KUBE_APISERVER_PORT` - The port to use in conjunction with the `KUBE_APISERVER_ADDRESS`. * `KUBE_APISERVER_CHK_INTERVAL` - The interval in seconds between calling the script. * `KUBE_APISERVER_CHK_FALL` - The number of consecutive non-zero script exits before setting the state to `FAULT`. * `KUBE_APISERVER_CHK_RISE` - The number of consecutive zero script exits before exiting the `FAULT` state. * `KUBE_APISERVER_CHK_WEIGHT` - The weight to apply to the priority when the service enters the `FAULT` state. --- ### Suggested Kubernetes Settings Assuming there are three nodes running the kube-apiserver, you cannot rely on setting just the`KEEPALIVED_STATE` parameter to manage failover across the nodes. To manage kube-apiserver failover, enable the healthcheck option with `KEEPALIVED_KUBE_APISERVER_CHECK`, and set the `KEEPALIVED_PRIORITY` manually for the three instances. | **Node** | **Priority** | |:--------:|:------------:| | node-01 | 200 | | node-02 | 190 | | node-03 | 180 | With the default weight of `-50`, if `node-01` has an issue, it's priority will drop to `150` and allow `node-02` to take over, the same is repeated if `node-02` has a failure dropping it's weight to `140` and `node-03` takes over. Recovery occurs in the same order with the system with the highest priority being promoted to master. ### Example Keepalived Configs ##### Example Autogenerated Keepalived Master Config ``` vrrp_instance MAIN { state MASTER interface eth0 virtual_router_id 2 priority 200 advert_int 1 unicast_src_ip 10.10.0.21 unicast_peer { 10.10.0.22 } authentication { auth_type PASS auth_pass pwd1 } virtual_ipaddress { 10.10.0.2/24 dev eth0 } virtual_ipaddress_excluded { 172.16.1.20/24 dev eth1 } track_interface { eth0 eth1 } } ``` ##### Example Autogenerated Keepalived Backup Config ``` vrrp_instance MAIN { state BACKUP interface eth0 virtual_router_id 2 priority 100 advert_int 1 unicast_src_ip 10.10.0.22 unicast_peer { 10.10.0.21 } authentication { auth_type PASS auth_pass pwd1 } virtual_ipaddress { 10.10.0.2/24 dev eth0 } virtual_ipaddress_excluded { 172.16.1.20/24 dev eth1 } track_interface { eth0 eth1 } } ``` ## Example Run Commands ##### Example Master Run Command ```bash docker run -d --net=host --cap-add NET_ADMIN \ -e KEEPALIVED_AUTOCONF=true \ -e KEEPALIVED_STATE=MASTER \ -e KEEPALIVED_INTERFACE=eth0 \ -e KEEPALIVED_VIRTUAL_ROUTER_ID=2 \ -e KEEPALIVED_UNICAST_SRC_IP=10.10.0.21 \ -e KEEPALIVED_UNICAST_PEER_0=10.10.0.22 \ -e KEEPALIVED_TRACK_INTERFACE_1=eth0 \ -e KEEPALIVED_TRACK_INTERFACE_2=eth1 \ -e KEEPALIVED_VIRTUAL_IPADDRESS_1="10.10.0.3/24 dev eth0" \ -e KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_1="172.16.1.20/24 dev eth1" \ quay.io/ceph/keepalived ``` ##### Example Backup Run Command ```bash docker run -d --net=host --cap-add NET_ADMIN \ -e KEEPALIVED_AUTOCONF=true \ -e KEEPALIVED_STATE=BACKUP \ -e KEEPALIVED_INTERFACE=eth0 \ -e KEEPALIVED_VIRTUAL_ROUTER_ID=2 \ -e KEEPALIVED_UNICAST_SRC_IP=10.10.0.22 \ -e KEEPALIVED_UNICAST_PEER_0=10.10.0.21 \ -e KEEPALIVED_TRACK_INTERFACE_1=eth0 \ -e KEEPALIVED_TRACK_INTERFACE_2=eth1 \ -e KEEPALIVED_VIRTUAL_IPADDRESS_1="10.10.0.3/24 dev eth0" \ -e KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_1="172.16.1.20/24 dev eth1" \ quay.io/ceph/keepalived ```
11,431
47.854701
657
md
null
ceph-main/src/cephadm/containers/keepalived/skel/init.sh
#!/bin/bash set -e set -o pipefail KEEPALIVED_DEBUG=${KEEPALIVED_DEBUG:-false} KEEPALIVED_KUBE_APISERVER_CHECK=${KEEPALIVED_KUBE_APISERVER_CHECK:-false} KEEPALIVED_CONF=${KEEPALIVED_CONF:-/etc/keepalived/keepalived.conf} KEEPALIVED_VAR_RUN=${KEEPALIVED_VAR_RUN:-/var/run/keepalived} if [[ ${KEEPALIVED_DEBUG,,} == 'true' ]]; then kd_cmd="/usr/sbin/keepalived -n -l -D -f $KEEPALIVED_CONF" else kd_cmd="/usr/sbin/keepalived -n -l -f $KEEPALIVED_CONF" fi KEEPALIVED_CMD=${KEEPALIVED_CMD:-"$kd_cmd"} rm -fr "$KEEPALIVED_VAR_RUN" exec $KEEPALIVED_CMD
557
24.363636
73
sh
null
ceph-main/src/cephadm/tests/__init__.py
0
0
0
py
null
ceph-main/src/cephadm/tests/fixtures.py
import mock import os import pytest import time from contextlib import contextmanager from pyfakefs import fake_filesystem from typing import Dict, List, Optional def import_cephadm(): """Import cephadm as a module.""" import cephadm as _cephadm return _cephadm def mock_docker(): _cephadm = import_cephadm() docker = mock.Mock(_cephadm.Docker) docker.path = '/usr/bin/docker' return docker def mock_podman(): _cephadm = import_cephadm() podman = mock.Mock(_cephadm.Podman) podman.path = '/usr/bin/podman' podman.version = (2, 1, 0) return podman def _daemon_path(): return os.getcwd() def mock_bad_firewalld(): def raise_bad_firewalld(): raise Exception('Called bad firewalld') _cephadm = import_cephadm() f = mock.Mock(_cephadm.Firewalld) f.enable_service_for = lambda _: raise_bad_firewalld() f.apply_rules = lambda: raise_bad_firewalld() f.open_ports = lambda _: raise_bad_firewalld() def _mock_scrape_host(obj, interval): try: raise ValueError("wah") except Exception as e: obj._handle_thread_exception(e, 'host') def _mock_run(obj): t = obj._create_thread(obj._scrape_host_facts, 'host', 5) time.sleep(1) if not t.is_alive(): obj.cephadm_cache.update_health('host', "inactive", "host thread stopped") @pytest.fixture() def cephadm_fs( fs: fake_filesystem.FakeFilesystem, ): """ use pyfakefs to stub filesystem calls """ uid = os.getuid() gid = os.getgid() def fchown(fd, _uid, _gid): """pyfakefs doesn't provide a working fchown or fchmod. In order to get permissions working generally across renames we need to provide our own implemenation. """ file_obj = fs.get_open_file(fd).get_object() file_obj.st_uid = _uid file_obj.st_gid = _gid _cephadm = import_cephadm() with mock.patch('os.fchown', side_effect=fchown), \ mock.patch('os.fchmod'), \ mock.patch('platform.processor', return_value='x86_64'), \ mock.patch('cephadm.extract_uid_gid', return_value=(uid, gid)): try: if not fake_filesystem.is_root(): fake_filesystem.set_uid(0) except AttributeError: pass fs.create_dir(_cephadm.DATA_DIR) fs.create_dir(_cephadm.LOG_DIR) fs.create_dir(_cephadm.LOCK_DIR) fs.create_dir(_cephadm.LOGROTATE_DIR) fs.create_dir(_cephadm.UNIT_DIR) fs.create_dir('/sys/block') yield fs @pytest.fixture() def host_sysfs(fs: fake_filesystem.FakeFilesystem): """Create a fake filesystem to represent sysfs""" enc_path = '/sys/class/scsi_generic/sg2/device/enclosure/0:0:1:0' dev_path = '/sys/class/scsi_generic/sg2/device' slot_count = 12 fs.create_dir(dev_path) fs.create_file(os.path.join(dev_path, 'vendor'), contents="EnclosuresInc") fs.create_file(os.path.join(dev_path, 'model'), contents="D12") fs.create_file(os.path.join(enc_path, 'id'), contents='1') fs.create_file(os.path.join(enc_path, 'components'), contents=str(slot_count)) for slot_num in range(slot_count): slot_dir = os.path.join(enc_path, str(slot_num)) fs.create_file(os.path.join(slot_dir, 'locate'), contents='0') fs.create_file(os.path.join(slot_dir, 'fault'), contents='0') fs.create_file(os.path.join(slot_dir, 'slot'), contents=str(slot_num)) if slot_num < 6: fs.create_file(os.path.join(slot_dir, 'status'), contents='Ok') slot_dev = os.path.join(slot_dir, 'device') fs.create_dir(slot_dev) fs.create_file(os.path.join(slot_dev, 'vpd_pg80'), contents=f'fake{slot_num:0>3}') else: fs.create_file(os.path.join(slot_dir, 'status'), contents='not installed') yield fs @contextmanager def with_cephadm_ctx( cmd: List[str], list_networks: Optional[Dict[str, Dict[str, List[str]]]] = None, hostname: Optional[str] = None, ): """ :param cmd: cephadm command argv :param list_networks: mock 'list-networks' return :param hostname: mock 'socket.gethostname' return """ if not hostname: hostname = 'host1' _cephadm = import_cephadm() with mock.patch('cephadm.attempt_bind'), \ mock.patch('cephadm.call', return_value=('', '', 0)), \ mock.patch('cephadm.call_timeout', return_value=0), \ mock.patch('cephadm.find_executable', return_value='foo'), \ mock.patch('cephadm.get_container_info', return_value=None), \ mock.patch('cephadm.is_available', return_value=True), \ mock.patch('cephadm.json_loads_retry', return_value={'epoch' : 1}), \ mock.patch('cephadm.logger'), \ mock.patch('socket.gethostname', return_value=hostname): ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx(cmd) ctx.container_engine = mock_podman() if list_networks is not None: with mock.patch('cephadm.list_networks', return_value=list_networks): yield ctx else: yield ctx
5,143
30.558282
94
py
null
ceph-main/src/cephadm/tests/test_agent.py
from unittest import mock import copy, datetime, json, os, socket, threading import pytest from tests.fixtures import with_cephadm_ctx, cephadm_fs, import_cephadm from typing import Optional _cephadm = import_cephadm() FSID = "beefbeef-beef-beef-1234-beefbeefbeef" AGENT_ID = 'host1' AGENT_DIR = f'/var/lib/ceph/{FSID}/agent.{AGENT_ID}' def test_agent_validate(): required_files = _cephadm.CephadmAgent.required_files with with_cephadm_ctx([]) as ctx: agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID) for i in range(len(required_files)): incomplete_files = {s: 'text' for s in [f for j, f in enumerate(required_files) if j != i]} with pytest.raises(_cephadm.Error, match=f'required file missing from config: {required_files[i]}'): agent.validate(incomplete_files) all_files = {s: 'text' for s in required_files} agent.validate(all_files) def _check_file(path, content): assert os.path.exists(path) with open(path) as f: fcontent = f.read() assert fcontent == content @mock.patch('cephadm.call_throws') def test_agent_deploy_daemon_unit(_call_throws, cephadm_fs): _call_throws.return_value = ('', '', 0) agent_id = AGENT_ID with with_cephadm_ctx([]) as ctx: ctx.meta_json = json.dumps({'meta': 'data'}) agent = _cephadm.CephadmAgent(ctx, FSID, agent_id) cephadm_fs.create_dir(AGENT_DIR) with pytest.raises(_cephadm.Error, match='Agent needs a config'): agent.deploy_daemon_unit() config = {s: f'text for {s}' for s in _cephadm.CephadmAgent.required_files} config['not-required-file.txt'] = 'don\'t write me' agent.deploy_daemon_unit(config) # check required config file were all created for fname in _cephadm.CephadmAgent.required_files: _check_file(f'{AGENT_DIR}/{fname}', f'text for {fname}') # assert non-required file was not written assert not os.path.exists(f'{AGENT_DIR}/not-required-file.txt') # check unit.run file was created correctly _check_file(f'{AGENT_DIR}/unit.run', agent.unit_run()) # check unit.meta file created correctly _check_file(f'{AGENT_DIR}/unit.meta', json.dumps({'meta': 'data'}, indent=4) + '\n') # check unit file was created correctly _check_file(f'{ctx.unit_dir}/{agent.unit_name()}', agent.unit_file()) expected_call_throws_calls = [ mock.call(ctx, ['systemctl', 'daemon-reload']), mock.call(ctx, ['systemctl', 'enable', '--now', agent.unit_name()]), ] _call_throws.assert_has_calls(expected_call_throws_calls) expected_call_calls = [ mock.call(ctx, ['systemctl', 'stop', agent.unit_name()], verbosity=_cephadm.CallVerbosity.DEBUG), mock.call(ctx, ['systemctl', 'reset-failed', agent.unit_name()], verbosity=_cephadm.CallVerbosity.DEBUG), ] _cephadm.call.assert_has_calls(expected_call_calls) @mock.patch('threading.Thread.is_alive') def test_agent_shutdown(_is_alive): with with_cephadm_ctx([]) as ctx: agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID) _is_alive.return_value = True assert agent.stop == False assert agent.mgr_listener.stop == False assert agent.ls_gatherer.stop == False assert agent.volume_gatherer.stop == False agent.shutdown() assert agent.stop == True assert agent.mgr_listener.stop == True assert agent.ls_gatherer.stop == True assert agent.volume_gatherer.stop == True def test_agent_wakeup(): with with_cephadm_ctx([]) as ctx: agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID) assert agent.event.is_set() == False agent.wakeup() assert agent.event.is_set() == True @mock.patch("cephadm.CephadmAgent.shutdown") @mock.patch("cephadm.AgentGatherer.update_func") def test_pull_conf_settings(_update_func, _shutdown, cephadm_fs): target_ip = '192.168.0.0' target_port = 9876 refresh_period = 20 listener_port = 5678 host = AGENT_ID device_enhanced_scan = 'True' with with_cephadm_ctx([]) as ctx: agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID) full_config = { 'target_ip': target_ip, 'target_port': target_port, 'refresh_period': refresh_period, 'listener_port': listener_port, 'host': host, 'device_enhanced_scan': device_enhanced_scan } cephadm_fs.create_dir(AGENT_DIR) with open(agent.config_path, 'w') as f: f.write(json.dumps(full_config)) with pytest.raises(_cephadm.Error, match="Failed to get agent keyring:"): agent.pull_conf_settings() _shutdown.assert_called() with open(agent.keyring_path, 'w') as f: f.write('keyring') assert agent.device_enhanced_scan == False agent.pull_conf_settings() assert agent.host == host assert agent.target_ip == target_ip assert agent.target_port == target_port assert agent.loop_interval == refresh_period assert agent.starting_port == listener_port assert agent.device_enhanced_scan == True assert agent.keyring == 'keyring' _update_func.assert_called() full_config.pop('target_ip') with open(agent.config_path, 'w') as f: f.write(json.dumps(full_config)) with pytest.raises(_cephadm.Error, match="Failed to get agent target ip and port from config:"): agent.pull_conf_settings() @mock.patch("cephadm.command_ceph_volume") def test_agent_ceph_volume(_ceph_volume): def _ceph_volume_outputter(_): print("ceph-volume output") def _ceph_volume_empty(_): pass with with_cephadm_ctx([]) as ctx: agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID) _ceph_volume.side_effect = _ceph_volume_outputter out, _ = agent._ceph_volume(False) assert ctx.command == ['inventory', '--format=json'] assert out == "ceph-volume output\n" out, _ = agent._ceph_volume(True) assert ctx.command == ['inventory', '--format=json', '--with-lsm'] assert out == "ceph-volume output\n" _ceph_volume.side_effect = _ceph_volume_empty with pytest.raises(Exception, match='ceph-volume returned empty value'): out, _ = agent._ceph_volume(False) def test_agent_daemon_ls_subset(cephadm_fs): # Basing part of this test on some actual sample output # Some sample "podman stats --format '{{.ID}},{{.MemUsage}}' --no-stream" output # 3f2b31d19ecd,456.4MB / 41.96GB # 5aca2499e0f8,7.082MB / 41.96GB # fe0cef07d5f7,35.91MB / 41.96GB # Sample "podman ps --format '{{.ID}},{{.Names}}' --no-trunc" output with the same containers # fe0cef07d5f71c5c604f7d1b4a4ac2e27873c96089d015014524e803361b4a30,ceph-4434fa7c-5602-11ed-b719-5254006ef86b-mon-host1 # 3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e70660340cd1f,ceph-4434fa7c-5602-11ed-b719-5254006ef86b-mgr-host1-pntmho # 5aca2499e0f8fb903788ff90eb03fe6ed58c7ed177caf278fed199936aff7b4a,ceph-4434fa7c-5602-11ed-b719-5254006ef86b-crash-host1 # Some of the components from that output mgr_cid = '3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e70660340cd1f' mon_cid = 'fe0cef07d5f71c5c604f7d1b4a4ac2e27873c96089d015014524e803361b4a30' crash_cid = '5aca2499e0f8fb903788ff90eb03fe6ed58c7ed177caf278fed199936aff7b4a' mgr_short_cid = mgr_cid[0:12] mon_short_cid = mon_cid[0:12] crash_short_cid = crash_cid[0:12] #Rebuilding the output but with our testing FSID and components (to allow alteration later for whatever reason) mem_out = f"""{mgr_short_cid},456.4MB / 41.96GB {crash_short_cid},7.082MB / 41.96GB {mon_short_cid},35.91MB / 41.96GB""" ps_out = f"""{mon_cid},ceph-{FSID}-mon-host1 {mgr_cid},ceph-{FSID}-mgr-host1-pntmho {crash_cid},ceph-{FSID}-crash-host1""" def _fake_call(ctx, cmd, desc=None, verbosity=_cephadm.CallVerbosity.VERBOSE_ON_FAILURE, timeout=_cephadm.DEFAULT_TIMEOUT, **kwargs): if 'stats' in cmd: return (mem_out, '', 0) elif 'ps' in cmd: return (ps_out, '', 0) return ('out', 'err', 0) cephadm_fs.create_dir(AGENT_DIR) cephadm_fs.create_dir(f'/var/lib/ceph/mon/ceph-host1') # legacy daemon cephadm_fs.create_dir(f'/var/lib/ceph/osd/nothing') # improper directory, should be skipped cephadm_fs.create_dir(f'/var/lib/ceph/{FSID}/mgr.host1.pntmho') # cephadm daemon cephadm_fs.create_dir(f'/var/lib/ceph/{FSID}/crash.host1') # cephadm daemon with with_cephadm_ctx([]) as ctx: ctx.fsid = FSID agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID) _cephadm.call.side_effect = _fake_call daemons = agent._daemon_ls_subset() assert 'agent.host1' in daemons assert 'mgr.host1.pntmho' in daemons assert 'crash.host1' in daemons assert 'mon.host1' in daemons assert daemons['mon.host1']['style'] == 'legacy' assert daemons['mgr.host1.pntmho']['style'] == 'cephadm:v1' assert daemons['crash.host1']['style'] == 'cephadm:v1' assert daemons['agent.host1']['style'] == 'cephadm:v1' assert daemons['mgr.host1.pntmho']['systemd_unit'] == f'ceph-{FSID}@mgr.host1.pntmho' assert daemons['agent.host1']['systemd_unit'] == f'ceph-{FSID}@agent.host1' assert daemons['crash.host1']['systemd_unit'] == f'ceph-{FSID}@crash.host1' assert daemons['mgr.host1.pntmho']['container_id'] == mgr_cid assert daemons['crash.host1']['container_id'] == crash_cid assert daemons['mgr.host1.pntmho']['memory_usage'] == 478570086 # 456.4 MB assert daemons['crash.host1']['memory_usage'] == 7426015 # 7.082 MB @mock.patch("cephadm.list_daemons") @mock.patch("cephadm.CephadmAgent._daemon_ls_subset") def test_agent_get_ls(_ls_subset, _ls, cephadm_fs): ls_out = [{ "style": "cephadm:v1", "name": "mgr.host1.pntmho", "fsid": FSID, "systemd_unit": f"ceph-{FSID}@mgr.host1.pntmho", "enabled": True, "state": "running", "service_name": "mgr", "memory_request": None, "memory_limit": None, "ports": [ 9283, 8765 ], "container_id": "3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e70660340cd1f", "container_image_name": "quay.io/ceph/ceph:testing", "container_image_id": "3300e39269f0c13ae45026cf233d8b3fff1303d52f2598a69c7fba0bb8405164", "container_image_digests": [ "quay.io/ceph/ceph@sha256:d4f3522528ee79904f9e530bdce438acac30a039e9a0b3cf31d8b614f9f96a30" ], "memory_usage": 507510784, "cpu_percentage": "5.95%", "version": "18.0.0-556-gb4d1a199", "started": "2022-10-27T14:19:36.086664Z", "created": "2022-10-27T14:19:36.282281Z", "deployed": "2022-10-27T14:19:35.377275Z", "configured": "2022-10-27T14:22:40.316912Z" },{ "style": "cephadm:v1", "name": "agent.host1", "fsid": FSID, "systemd_unit": f"ceph-{FSID}@agent.host1", "enabled": True, "state": "running", "service_name": "agent", "ports": [], "ip": None, "deployed_by": [ "quay.io/ceph/ceph@sha256:d4f3522528ee79904f9e530bdce438acac30a039e9a0b3cf31d8b614f9f96a30" ], "rank": None, "rank_generation": None, "extra_container_args": None, "container_id": None, "container_image_name": None, "container_image_id": None, "container_image_digests": None, "version": None, "started": None, "created": "2022-10-27T19:46:49.751594Z", "deployed": None, "configured": "2022-10-27T19:46:49.751594Z" }, { "style": "legacy", "name": "mon.host1", "fsid": FSID, "systemd_unit": "ceph-mon@host1", "enabled": False, "state": "stopped", "host_version": None }] ls_subset_out = { 'mgr.host1.pntmho': { "style": "cephadm:v1", "fsid": FSID, "systemd_unit": f"ceph-{FSID}@mgr.host1.pntmho", "enabled": True, "state": "running", "container_id": "3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e70660340cd1f", "memory_usage": 507510784, }, 'agent.host1': { "style": "cephadm:v1", "fsid": FSID, "systemd_unit": f"ceph-{FSID}@agent.host1", "enabled": True, "state": "running", "container_id": None }, 'mon.host1': { "style": "legacy", "name": "mon.host1", "fsid": FSID, "systemd_unit": "ceph-mon@host1", "enabled": False, "state": "stopped", "host_version": None }} _ls.return_value = ls_out _ls_subset.return_value = ls_subset_out with with_cephadm_ctx([]) as ctx: ctx.fsid = FSID agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID) # first pass, no cached daemon metadata daemons, changed = agent._get_ls() assert daemons == ls_out assert changed # second pass, should recognize that daemons have not changed and just keep cached values daemons, changed = agent._get_ls() assert daemons == daemons assert not changed # change a container id so it needs to get more info ls_subset_out2 = copy.deepcopy(ls_subset_out) ls_out2 = copy.deepcopy(ls_out) ls_subset_out2['mgr.host1.pntmho']['container_id'] = '3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e7066034aaaaa' ls_out2[0]['container_id'] = '3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e7066034aaaaa' _ls.return_value = ls_out2 _ls_subset.return_value = ls_subset_out2 assert agent.cached_ls_values['mgr.host1.pntmho']['container_id'] == "3f2b31d19ecdd586640cc9c6ef7c0fe62157a3f7a71fcb60c91e70660340cd1f" daemons, changed = agent._get_ls() assert daemons == ls_out2 assert changed # run again with the same data so it should use cached values daemons, changed = agent._get_ls() assert daemons == ls_out2 assert not changed # change the state of a container so new daemon metadata is needed ls_subset_out3 = copy.deepcopy(ls_subset_out2) ls_out3 = copy.deepcopy(ls_out2) ls_subset_out3['mgr.host1.pntmho']['enabled'] = False ls_out3[0]['enabled'] = False _ls.return_value = ls_out3 _ls_subset.return_value = ls_subset_out3 assert agent.cached_ls_values['mgr.host1.pntmho']['enabled'] == True daemons, changed = agent._get_ls() assert daemons == ls_out3 assert changed # run again with the same data so it should use cached values daemons, changed = agent._get_ls() assert daemons == ls_out3 assert not changed # remove a daemon so new metadats is needed ls_subset_out4 = copy.deepcopy(ls_subset_out3) ls_out4 = copy.deepcopy(ls_out3) ls_subset_out4.pop('mon.host1') ls_out4.pop() _ls.return_value = ls_out4 _ls_subset.return_value = ls_subset_out4 assert 'mon.host1' in agent.cached_ls_values daemons, changed = agent._get_ls() assert daemons == ls_out4 assert changed # run again with the same data so it should use cached values daemons, changed = agent._get_ls() assert daemons == ls_out4 assert not changed @mock.patch("threading.Event.clear") @mock.patch("threading.Event.wait") @mock.patch("urllib.request.Request.__init__") @mock.patch("cephadm.urlopen") @mock.patch("cephadm.list_networks") @mock.patch("cephadm.HostFacts.dump") @mock.patch("cephadm.HostFacts.__init__", lambda _, __: None) @mock.patch("ssl.SSLContext.load_verify_locations") @mock.patch("threading.Thread.is_alive") @mock.patch("cephadm.MgrListener.start") @mock.patch("cephadm.AgentGatherer.start") @mock.patch("cephadm.port_in_use") @mock.patch("cephadm.CephadmAgent.pull_conf_settings") def test_agent_run(_pull_conf_settings, _port_in_use, _gatherer_start, _listener_start, _is_alive, _load_verify_locations, _HF_dump, _list_networks, _urlopen, _RQ_init, _wait, _clear): target_ip = '192.168.0.0' target_port = '9999' refresh_period = 20 listener_port = 7770 open_listener_port = 7777 host = AGENT_ID device_enhanced_scan = False def _fake_port_in_use(ctx, port): if port == open_listener_port: return False return True network_data: Dict[str, Dict[str, Set[str]]] = { "10.2.1.0/24": { "eth1": set(["10.2.1.122"]) }, "192.168.122.0/24": { "eth0": set(["192.168.122.221"]) }, "fe80::/64": { "eth0": set(["fe80::5054:ff:fe3f:d94e"]), "eth1": set(["fe80::5054:ff:fe3f:aa4a"]), } } # the json serializable version of the networks data # we expect the agent to actually send network_data_no_sets: Dict[str, Dict[str, List[str]]] = { "10.2.1.0/24": { "eth1": ["10.2.1.122"] }, "192.168.122.0/24": { "eth0": ["192.168.122.221"] }, "fe80::/64": { "eth0": ["fe80::5054:ff:fe3f:d94e"], "eth1": ["fe80::5054:ff:fe3f:aa4a"], } } class FakeHTTPResponse(): def __init__(self): pass def __enter__(self): return self def __exit__(self, type, value, tb): pass def read(self): return json.dumps({'valid': 'output', 'result': '400'}) _port_in_use.side_effect = _fake_port_in_use _is_alive.return_value = False _HF_dump.return_value = 'Host Facts' _list_networks.return_value = network_data _urlopen.side_effect = lambda *args, **kwargs: FakeHTTPResponse() _RQ_init.side_effect = lambda *args, **kwargs: None with with_cephadm_ctx([]) as ctx: ctx.fsid = FSID agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID) agent.keyring = 'agent keyring' agent.ack = 7 agent.volume_gatherer.ack = 7 agent.volume_gatherer.data = 'ceph-volume inventory data' agent.ls_gatherer.ack = 7 agent.ls_gatherer.data = [{'valid_daemon': 'valid_metadata'}] def _set_conf(): agent.target_ip = target_ip agent.target_port = target_port agent.loop_interval = refresh_period agent.starting_port = listener_port agent.host = host agent.device_enhanced_scan = device_enhanced_scan _pull_conf_settings.side_effect = _set_conf # technically the run function loops forever unless the agent # is told to stop. To get around that we're going to have the # event.wait() (which happens at the end of the loop) to throw # a special exception type. If we catch this exception we can # consider it as being a "success" run class EventCleared(Exception): pass _clear.side_effect = EventCleared('SUCCESS') with pytest.raises(EventCleared, match='SUCCESS'): agent.run() expected_data = { 'host': host, 'ls': [{'valid_daemon': 'valid_metadata'}], 'networks': network_data_no_sets, 'facts': 'Host Facts', 'volume': 'ceph-volume inventory data', 'ack': str(7), 'keyring': 'agent keyring', 'port': str(open_listener_port) } _RQ_init.assert_called_with( f'https://{target_ip}:{target_port}/data/', json.dumps(expected_data).encode('ascii'), {'Content-Type': 'application/json'} ) _listener_start.assert_called() _gatherer_start.assert_called() _urlopen.assert_called() # agent should not go down if connections fail _urlopen.side_effect = Exception() with pytest.raises(EventCleared, match='SUCCESS'): agent.run() # should fail if no ports are open for listener _port_in_use.side_effect = lambda _, __: True agent.listener_port = None with pytest.raises(Exception, match='Failed to pick port for agent to listen on: All 1000 ports starting at 7770 taken.'): agent.run() @mock.patch("cephadm.CephadmAgent.pull_conf_settings") @mock.patch("cephadm.CephadmAgent.wakeup") def test_mgr_listener_handle_json_payload(_agent_wakeup, _pull_conf_settings, cephadm_fs): with with_cephadm_ctx([]) as ctx: ctx.fsid = FSID agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID) cephadm_fs.create_dir(AGENT_DIR) data_no_config = { 'counter': 7 } agent.mgr_listener.handle_json_payload(data_no_config) _agent_wakeup.assert_not_called() _pull_conf_settings.assert_not_called() assert not any(os.path.exists(os.path.join(AGENT_DIR, s)) for s in agent.required_files) data_with_config = { 'counter': 7, 'config': { 'unrequired-file': 'unrequired-text' } } data_with_config['config'].update({s: f'{s} text' for s in agent.required_files if s != agent.required_files[2]}) agent.mgr_listener.handle_json_payload(data_with_config) _agent_wakeup.assert_called() _pull_conf_settings.assert_called() assert all(os.path.exists(os.path.join(AGENT_DIR, s)) for s in agent.required_files if s != agent.required_files[2]) assert not os.path.exists(os.path.join(AGENT_DIR, agent.required_files[2])) assert not os.path.exists(os.path.join(AGENT_DIR, 'unrequired-file')) @mock.patch("socket.socket") @mock.patch("ssl.SSLContext.wrap_socket") @mock.patch("cephadm.MgrListener.handle_json_payload") @mock.patch("ssl.SSLContext.load_verify_locations") @mock.patch("ssl.SSLContext.load_cert_chain") def test_mgr_listener_run(_load_cert_chain, _load_verify_locations, _handle_json_payload, _wrap_context, _socket, cephadm_fs): with with_cephadm_ctx([]) as ctx: ctx.fsid = FSID agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID) cephadm_fs.create_dir(AGENT_DIR) payload = json.dumps({'counter': 3, 'config': {s: f'{s} text' for s in agent.required_files if s != agent.required_files[1]}}) class FakeSocket: def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, fileno=None): self.family = family self.type = type def bind(*args, **kwargs): return def settimeout(*args, **kwargs): return def listen(*args, **kwargs): return class FakeSecureSocket: def __init__(self, pload): self.payload = pload self._conn = FakeConn(self.payload) self.accepted = False def accept(self): # to make mgr listener run loop stop running, # set it to stop after accepting a "connection" # on our fake socket so only one iteration of the loop # actually happens agent.mgr_listener.stop = True accepted = True return self._conn, None def load_cert_chain(*args, **kwargs): return def load_verify_locations(*args, **kwargs): return class FakeConn: def __init__(self, payload: str = ''): payload_len_str = str(len(payload.encode('utf-8'))) while len(payload_len_str.encode('utf-8')) < 10: payload_len_str = '0' + payload_len_str self.payload = (payload_len_str + payload).encode('utf-8') self.buffer_len = len(self.payload) def recv(self, len: Optional[int] = None): if not len or len >= self.buffer_len: ret = self.payload self.payload = b'' self.buffer_len = 0 return ret else: ret = self.payload[:len] self.payload = self.payload[len:] self.buffer_len = self.buffer_len - len return ret FSS_good_data = FakeSecureSocket(payload) FSS_bad_json = FakeSecureSocket('bad json') _socket = FakeSocket agent.listener_port = 7777 # first run, should successfully receive properly structured json payload _wrap_context.side_effect = [FSS_good_data] agent.mgr_listener.stop = False FakeConn.send = mock.Mock(return_value=None) agent.mgr_listener.run() # verify payload was correctly extracted assert _handle_json_payload.called_with(json.loads(payload)) FakeConn.send.assert_called_once_with(b'ACK') # second run, with bad json data received _wrap_context.side_effect = [FSS_bad_json] agent.mgr_listener.stop = False FakeConn.send = mock.Mock(return_value=None) agent.mgr_listener.run() FakeConn.send.assert_called_once_with(b'Failed to extract json payload from message: Expecting value: line 1 column 1 (char 0)') # third run, no proper length as beginning og payload FSS_no_length = FakeSecureSocket(payload) FSS_no_length.payload = FSS_no_length.payload[10:] FSS_no_length._conn.payload = FSS_no_length._conn.payload[10:] FSS_no_length._conn.buffer_len -= 10 _wrap_context.side_effect = [FSS_no_length] agent.mgr_listener.stop = False FakeConn.send = mock.Mock(return_value=None) agent.mgr_listener.run() FakeConn.send.assert_called_once_with(b'Failed to extract length of payload from message: invalid literal for int() with base 10: \'{"counter"\'') # some exception handling for full coverage FSS_exc_testing = FakeSecureSocket(payload) FSS_exc_testing.accept = mock.MagicMock() def _accept(*args, **kwargs): if not FSS_exc_testing.accepted: FSS_exc_testing.accepted = True raise socket.timeout() else: agent.mgr_listener.stop = True raise Exception() FSS_exc_testing.accept.side_effect = _accept _wrap_context.side_effect = [FSS_exc_testing] agent.mgr_listener.stop = False FakeConn.send = mock.Mock(return_value=None) agent.mgr_listener.run() FakeConn.send.assert_not_called() FSS_exc_testing.accept.call_count == 3 @mock.patch("cephadm.CephadmAgent._get_ls") def test_gatherer_update_func(_get_ls, cephadm_fs): with with_cephadm_ctx([]) as ctx: ctx.fsid = FSID agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID) cephadm_fs.create_dir(AGENT_DIR) def _sample_func(): return 7 agent.ls_gatherer.func() _get_ls.assert_called() _get_ls = mock.MagicMock() agent.ls_gatherer.update_func(_sample_func) out = agent.ls_gatherer.func() assert out == 7 _get_ls.assert_not_called() @mock.patch("cephadm.CephadmAgent.wakeup") @mock.patch("time.monotonic") @mock.patch("threading.Event.wait") def test_gatherer_run(_wait, _time, _agent_wakeup, cephadm_fs): with with_cephadm_ctx([]) as ctx: ctx.fsid = FSID agent = _cephadm.CephadmAgent(ctx, FSID, AGENT_ID) cephadm_fs.create_dir(AGENT_DIR) agent.loop_interval = 30 agent.ack = 23 _sample_func = lambda *args, **kwargs: ('sample out', True) agent.ls_gatherer.update_func(_sample_func) agent.ls_gatherer.ack = 20 agent.ls_gatherer.stop = False def _fake_clear(*args, **kwargs): agent.ls_gatherer.stop = True _time.side_effect = [0, 20, 0, 20, 0, 20] # start at time 0, complete at time 20 _wait.return_value = None with mock.patch("threading.Event.clear") as _clear: _clear.side_effect = _fake_clear agent.ls_gatherer.run() _wait.assert_called_with(10) # agent loop_interval - run time assert agent.ls_gatherer.data == 'sample out' assert agent.ls_gatherer.ack == 23 _agent_wakeup.assert_called_once() _clear.assert_called_once() _exc_func = lambda *args, **kwargs: Exception() agent.ls_gatherer.update_func(_exc_func) agent.ls_gatherer.ack = 20 agent.ls_gatherer.stop = False with mock.patch("threading.Event.clear") as _clear: _clear.side_effect = _fake_clear agent.ls_gatherer.run() assert agent.ls_gatherer.data is None assert agent.ls_gatherer.ack == agent.ack # should have run full loop despite exception _clear.assert_called_once() # test general exception for full coverage _agent_wakeup.side_effect = [Exception()] agent.ls_gatherer.update_func(_sample_func) agent.ls_gatherer.stop = False # just to force only one iteration _time.side_effect = _fake_clear with mock.patch("threading.Event.clear") as _clear: _clear.side_effect = Exception() agent.ls_gatherer.run() assert agent.ls_gatherer.data == 'sample out' assert agent.ls_gatherer.ack == agent.ack # should not have gotten to end of loop _clear.assert_not_called() @mock.patch("cephadm.CephadmAgent.run") def test_command_agent(_agent_run, cephadm_fs): with with_cephadm_ctx([]) as ctx: ctx.fsid = FSID ctx.daemon_id = AGENT_ID with pytest.raises(Exception, match=f"Agent daemon directory {AGENT_DIR} does not exist. Perhaps agent was never deployed?"): _cephadm.command_agent(ctx) cephadm_fs.create_dir(AGENT_DIR) _cephadm.command_agent(ctx) _agent_run.assert_called()
30,479
37.052434
154
py
null
ceph-main/src/cephadm/tests/test_cephadm.py
# type: ignore import errno import json import mock import os import pytest import socket import unittest from textwrap import dedent from .fixtures import ( cephadm_fs, mock_docker, mock_podman, with_cephadm_ctx, mock_bad_firewalld, import_cephadm, ) from pyfakefs import fake_filesystem from pyfakefs import fake_filesystem_unittest _cephadm = import_cephadm() def get_ceph_conf( fsid='00000000-0000-0000-0000-0000deadbeef', mon_host='[v2:192.168.1.1:3300/0,v1:192.168.1.1:6789/0]'): return f''' # minimal ceph.conf for {fsid} [global] fsid = {fsid} mon_host = {mon_host} ''' class TestCephAdm(object): def test_docker_unit_file(self): ctx = _cephadm.CephadmContext() ctx.container_engine = mock_docker() r = _cephadm.get_unit_file(ctx, '9b9d7609-f4d5-4aba-94c8-effa764d96c9') assert 'Requires=docker.service' in r ctx.container_engine = mock_podman() r = _cephadm.get_unit_file(ctx, '9b9d7609-f4d5-4aba-94c8-effa764d96c9') assert 'Requires=docker.service' not in r @mock.patch('cephadm.logger') def test_attempt_bind(self, _logger): ctx = None address = None port = 0 def os_error(errno): _os_error = OSError() _os_error.errno = errno return _os_error for side_effect, expected_exception in ( (os_error(errno.EADDRINUSE), _cephadm.PortOccupiedError), (os_error(errno.EAFNOSUPPORT), _cephadm.Error), (os_error(errno.EADDRNOTAVAIL), _cephadm.Error), (None, None), ): _socket = mock.Mock() _socket.bind.side_effect = side_effect try: _cephadm.attempt_bind(ctx, _socket, address, port) except Exception as e: assert isinstance(e, expected_exception) else: if expected_exception is not None: assert False @mock.patch('cephadm.attempt_bind') @mock.patch('cephadm.logger') def test_port_in_use(self, _logger, _attempt_bind): empty_ctx = None assert _cephadm.port_in_use(empty_ctx, 9100) == False _attempt_bind.side_effect = _cephadm.PortOccupiedError('msg') assert _cephadm.port_in_use(empty_ctx, 9100) == True os_error = OSError() os_error.errno = errno.EADDRNOTAVAIL _attempt_bind.side_effect = os_error assert _cephadm.port_in_use(empty_ctx, 9100) == False os_error = OSError() os_error.errno = errno.EAFNOSUPPORT _attempt_bind.side_effect = os_error assert _cephadm.port_in_use(empty_ctx, 9100) == False @mock.patch('socket.socket') @mock.patch('cephadm.logger') def test_check_ip_port_success(self, _logger, _socket): ctx = _cephadm.CephadmContext() ctx.skip_ping_check = False # enables executing port check with `check_ip_port` for address, address_family in ( ('0.0.0.0', socket.AF_INET), ('::', socket.AF_INET6), ): try: _cephadm.check_ip_port(ctx, _cephadm.EndPoint(address, 9100)) except: assert False else: assert _socket.call_args == mock.call(address_family, socket.SOCK_STREAM) @mock.patch('socket.socket') @mock.patch('cephadm.logger') def test_check_ip_port_failure(self, _logger, _socket): ctx = _cephadm.CephadmContext() ctx.skip_ping_check = False # enables executing port check with `check_ip_port` def os_error(errno): _os_error = OSError() _os_error.errno = errno return _os_error for address, address_family in ( ('0.0.0.0', socket.AF_INET), ('::', socket.AF_INET6), ): for side_effect, expected_exception in ( (os_error(errno.EADDRINUSE), _cephadm.PortOccupiedError), (os_error(errno.EADDRNOTAVAIL), _cephadm.Error), (os_error(errno.EAFNOSUPPORT), _cephadm.Error), (None, None), ): mock_socket_obj = mock.Mock() mock_socket_obj.bind.side_effect = side_effect _socket.return_value = mock_socket_obj try: _cephadm.check_ip_port(ctx, _cephadm.EndPoint(address, 9100)) except Exception as e: assert isinstance(e, expected_exception) else: if side_effect is not None: assert False def test_is_not_fsid(self): assert not _cephadm.is_fsid('no-uuid') def test_is_fsid(self): assert _cephadm.is_fsid('e863154d-33c7-4350-bca5-921e0467e55b') def test__get_parser_image(self): args = _cephadm._parse_args(['--image', 'foo', 'version']) assert args.image == 'foo' def test_check_required_global_args(self): ctx = _cephadm.CephadmContext() mock_fn = mock.Mock() mock_fn.return_value = 0 require_image = _cephadm.require_image(mock_fn) with pytest.raises(_cephadm.Error, match='This command requires the global --image option to be set'): require_image(ctx) ctx.image = 'sample-image' require_image(ctx) @mock.patch('cephadm.logger') def test_parse_mem_usage(self, _logger): len, summary = _cephadm._parse_mem_usage(0, 'c6290e3f1489,-- / --') assert summary == {} def test_CustomValidation(self): assert _cephadm._parse_args(['deploy', '--name', 'mon.a', '--fsid', 'fsid']) with pytest.raises(SystemExit): _cephadm._parse_args(['deploy', '--name', 'wrong', '--fsid', 'fsid']) @pytest.mark.parametrize("test_input, expected", [ ("1.6.2", (1,6,2)), ("1.6.2-stable2", (1,6,2)), ]) def test_parse_podman_version(self, test_input, expected): assert _cephadm._parse_podman_version(test_input) == expected def test_parse_podman_version_invalid(self): with pytest.raises(ValueError) as res: _cephadm._parse_podman_version('inval.id') assert 'inval' in str(res.value) @mock.patch('cephadm.logger') def test_is_ipv6(self, _logger): for good in ("[::1]", "::1", "fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"): assert _cephadm.is_ipv6(good) for bad in ("127.0.0.1", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffg", "1:2:3:4:5:6:7:8:9", "fd00::1::1", "[fg::1]"): assert not _cephadm.is_ipv6(bad) def test_unwrap_ipv6(self): def unwrap_test(address, expected): assert _cephadm.unwrap_ipv6(address) == expected tests = [ ('::1', '::1'), ('[::1]', '::1'), ('[fde4:8dba:82e1:0:5054:ff:fe6a:357]', 'fde4:8dba:82e1:0:5054:ff:fe6a:357'), ('can actually be any string', 'can actually be any string'), ('[but needs to be stripped] ', '[but needs to be stripped] ')] for address, expected in tests: unwrap_test(address, expected) def test_wrap_ipv6(self): def wrap_test(address, expected): assert _cephadm.wrap_ipv6(address) == expected tests = [ ('::1', '[::1]'), ('[::1]', '[::1]'), ('fde4:8dba:82e1:0:5054:ff:fe6a:357', '[fde4:8dba:82e1:0:5054:ff:fe6a:357]'), ('myhost.example.com', 'myhost.example.com'), ('192.168.0.1', '192.168.0.1'), ('', ''), ('fd00::1::1', 'fd00::1::1')] for address, expected in tests: wrap_test(address, expected) @mock.patch('cephadm.Firewalld', mock_bad_firewalld) @mock.patch('cephadm.logger') def test_skip_firewalld(self, _logger, cephadm_fs): """ test --skip-firewalld actually skips changing firewall """ ctx = _cephadm.CephadmContext() with pytest.raises(Exception): _cephadm.update_firewalld(ctx, 'mon') ctx.skip_firewalld = True _cephadm.update_firewalld(ctx, 'mon') ctx.skip_firewalld = False with pytest.raises(Exception): _cephadm.update_firewalld(ctx, 'mon') ctx = _cephadm.CephadmContext() ctx.ssl_dashboard_port = 8888 ctx.dashboard_key = None ctx.dashboard_password_noupdate = True ctx.initial_dashboard_password = 'password' ctx.initial_dashboard_user = 'User' with pytest.raises(Exception): _cephadm.prepare_dashboard(ctx, 0, 0, lambda _, extra_mounts=None, ___=None : '5', lambda : None) ctx.skip_firewalld = True _cephadm.prepare_dashboard(ctx, 0, 0, lambda _, extra_mounts=None, ___=None : '5', lambda : None) ctx.skip_firewalld = False with pytest.raises(Exception): _cephadm.prepare_dashboard(ctx, 0, 0, lambda _, extra_mounts=None, ___=None : '5', lambda : None) @mock.patch('cephadm.logger') @mock.patch('cephadm.fetch_custom_config_files') @mock.patch('cephadm.get_container') def test_get_deployment_container(self, _get_container, _get_config, _logger): """ test get_deployment_container properly makes use of extra container args and custom conf files """ ctx = _cephadm.CephadmContext() ctx.config_json = '-' ctx.extra_container_args = [ '--pids-limit=12345', '--something', ] ctx.data_dir = 'data' _get_config.return_value = [ { 'mount_path': '/etc/testing.str', 'content': 'this\nis\na\nstring', } ] _get_container.return_value = _cephadm.CephContainer.for_daemon( ctx, fsid='9b9d7609-f4d5-4aba-94c8-effa764d96c9', daemon_type='grafana', daemon_id='host1', entrypoint='', args=[], container_args=[], volume_mounts={}, bind_mounts=[], envs=[], privileged=False, ptrace=False, host_network=True, ) c = _cephadm.get_deployment_container(ctx, '9b9d7609-f4d5-4aba-94c8-effa764d96c9', 'grafana', 'host1',) assert '--pids-limit=12345' in c.container_args assert '--something' in c.container_args assert os.path.join('data', '9b9d7609-f4d5-4aba-94c8-effa764d96c9', 'custom_config_files', 'grafana.host1', 'testing.str') in c.volume_mounts assert c.volume_mounts[os.path.join('data', '9b9d7609-f4d5-4aba-94c8-effa764d96c9', 'custom_config_files', 'grafana.host1', 'testing.str')] == '/etc/testing.str' @mock.patch('cephadm.logger') @mock.patch('cephadm.FileLock') @mock.patch('cephadm.deploy_daemon') @mock.patch('cephadm.fetch_configs') @mock.patch('cephadm.make_var_run') @mock.patch('cephadm.migrate_sysctl_dir') @mock.patch('cephadm.check_unit', lambda *args, **kwargs: (None, 'running', None)) @mock.patch('cephadm.get_unit_name', lambda *args, **kwargs: 'mon-unit-name') @mock.patch('cephadm.extract_uid_gid', lambda *args, **kwargs: (0, 0)) @mock.patch('cephadm.get_deployment_container') @mock.patch('cephadm.read_configuration_source', lambda c: {}) @mock.patch('cephadm.apply_deploy_config_to_ctx', lambda d, c: None) def test_mon_crush_location(self, _get_deployment_container, _migrate_sysctl, _make_var_run, _fetch_configs, _deploy_daemon, _file_lock, _logger): """ test that crush location for mon is set if it is included in config_json """ ctx = _cephadm.CephadmContext() ctx.name = 'mon.test' ctx.fsid = '9b9d7609-f4d5-4aba-94c8-effa764d96c9' ctx.reconfig = False ctx.container_engine = mock_docker() ctx.allow_ptrace = True ctx.config_json = '-' ctx.osd_fsid = '0' ctx.tcp_ports = '3300 6789' _fetch_configs.return_value = { 'crush_location': 'database=a' } _get_deployment_container.return_value = _cephadm.CephContainer.for_daemon( ctx, fsid='9b9d7609-f4d5-4aba-94c8-effa764d96c9', daemon_type='mon', daemon_id='test', entrypoint='', args=[], container_args=[], volume_mounts={}, bind_mounts=[], envs=[], privileged=False, ptrace=False, host_network=True, ) def _crush_location_checker(ctx, fsid, daemon_type, daemon_id, container, uid, gid, **kwargs): print(container.args) raise Exception(' '.join(container.args)) _deploy_daemon.side_effect = _crush_location_checker with pytest.raises(Exception, match='--set-crush-location database=a'): _cephadm.command_deploy_from(ctx) @mock.patch('cephadm.logger') @mock.patch('cephadm.fetch_custom_config_files') def test_write_custom_conf_files(self, _get_config, _logger, cephadm_fs): """ test _write_custom_conf_files writes the conf files correctly """ ctx = _cephadm.CephadmContext() ctx.config_json = '-' ctx.data_dir = _cephadm.DATA_DIR _get_config.return_value = [ { 'mount_path': '/etc/testing.str', 'content': 'this\nis\na\nstring', }, { 'mount_path': '/etc/testing.conf', 'content': 'very_cool_conf_setting: very_cool_conf_value\nx: y', }, { 'mount_path': '/etc/no-content.conf', }, ] _cephadm._write_custom_conf_files(ctx, 'mon', 'host1', 'fsid', 0, 0) with open(os.path.join(_cephadm.DATA_DIR, 'fsid', 'custom_config_files', 'mon.host1', 'testing.str'), 'r') as f: assert 'this\nis\na\nstring' == f.read() with open(os.path.join(_cephadm.DATA_DIR, 'fsid', 'custom_config_files', 'mon.host1', 'testing.conf'), 'r') as f: assert 'very_cool_conf_setting: very_cool_conf_value\nx: y' == f.read() with pytest.raises(FileNotFoundError): open(os.path.join(_cephadm.DATA_DIR, 'fsid', 'custom_config_files', 'mon.host1', 'no-content.conf'), 'r') @mock.patch('cephadm.call_throws') @mock.patch('cephadm.get_parm') @mock.patch('cephadm.logger') def test_registry_login(self, _logger, _get_parm, _call_throws): # test normal valid login with url, username and password specified _call_throws.return_value = '', '', 0 ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx( ['registry-login', '--registry-url', 'sample-url', '--registry-username', 'sample-user', '--registry-password', 'sample-pass']) ctx.container_engine = mock_docker() retval = _cephadm.command_registry_login(ctx) assert retval == 0 # test bad login attempt with invalid arguments given ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx( ['registry-login', '--registry-url', 'bad-args-url']) with pytest.raises(Exception) as e: assert _cephadm.command_registry_login(ctx) assert str(e.value) == ('Invalid custom registry arguments received. To login to a custom registry include ' '--registry-url, --registry-username and --registry-password options or --registry-json option') # test normal valid login with json file _get_parm.return_value = {"url": "sample-url", "username": "sample-username", "password": "sample-password"} ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx( ['registry-login', '--registry-json', 'sample-json']) ctx.container_engine = mock_docker() retval = _cephadm.command_registry_login(ctx) assert retval == 0 # test bad login attempt with bad json file _get_parm.return_value = {"bad-json": "bad-json"} ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx( ['registry-login', '--registry-json', 'sample-json']) with pytest.raises(Exception) as e: assert _cephadm.command_registry_login(ctx) assert str(e.value) == ("json provided for custom registry login did not include all necessary fields. " "Please setup json file as\n" "{\n" " \"url\": \"REGISTRY_URL\",\n" " \"username\": \"REGISTRY_USERNAME\",\n" " \"password\": \"REGISTRY_PASSWORD\"\n" "}\n") # test login attempt with valid arguments where login command fails _call_throws.side_effect = Exception ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx( ['registry-login', '--registry-url', 'sample-url', '--registry-username', 'sample-user', '--registry-password', 'sample-pass']) with pytest.raises(Exception) as e: _cephadm.command_registry_login(ctx) assert str(e.value) == "Failed to login to custom registry @ sample-url as sample-user with given password" def test_get_image_info_from_inspect(self): # podman out = """204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1,[docker.io/ceph/ceph@sha256:1cc9b824e1b076cdff52a9aa3f0cc8557d879fb2fbbba0cafed970aca59a3992]""" r = _cephadm.get_image_info_from_inspect(out, 'registry/ceph/ceph:latest') print(r) assert r == { 'image_id': '204a01f9b0b6710dd0c0af7f37ce7139c47ff0f0105d778d7104c69282dfbbf1', 'repo_digests': ['docker.io/ceph/ceph@sha256:1cc9b824e1b076cdff52a9aa3f0cc8557d879fb2fbbba0cafed970aca59a3992'] } # docker out = """sha256:16f4549cf7a8f112bbebf7946749e961fbbd1b0838627fe619aab16bc17ce552,[quay.ceph.io/ceph-ci/ceph@sha256:4e13da36c1bd6780b312a985410ae678984c37e6a9493a74c87e4a50b9bda41f]""" r = _cephadm.get_image_info_from_inspect(out, 'registry/ceph/ceph:latest') assert r == { 'image_id': '16f4549cf7a8f112bbebf7946749e961fbbd1b0838627fe619aab16bc17ce552', 'repo_digests': ['quay.ceph.io/ceph-ci/ceph@sha256:4e13da36c1bd6780b312a985410ae678984c37e6a9493a74c87e4a50b9bda41f'] } # multiple digests (podman) out = """e935122ab143a64d92ed1fbb27d030cf6e2f0258207be1baf1b509c466aeeb42,[docker.io/prom/prometheus@sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4 docker.io/prom/prometheus@sha256:efd99a6be65885c07c559679a0df4ec709604bcdd8cd83f0d00a1a683b28fb6a]""" r = _cephadm.get_image_info_from_inspect(out, 'registry/prom/prometheus:latest') assert r == { 'image_id': 'e935122ab143a64d92ed1fbb27d030cf6e2f0258207be1baf1b509c466aeeb42', 'repo_digests': [ 'docker.io/prom/prometheus@sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4', 'docker.io/prom/prometheus@sha256:efd99a6be65885c07c559679a0df4ec709604bcdd8cd83f0d00a1a683b28fb6a', ] } def test_dict_get(self): result = _cephadm.dict_get({'a': 1}, 'a', require=True) assert result == 1 result = _cephadm.dict_get({'a': 1}, 'b') assert result is None result = _cephadm.dict_get({'a': 1}, 'b', default=2) assert result == 2 def test_dict_get_error(self): with pytest.raises(_cephadm.Error): _cephadm.dict_get({'a': 1}, 'b', require=True) def test_dict_get_join(self): result = _cephadm.dict_get_join({'foo': ['a', 'b']}, 'foo') assert result == 'a\nb' result = _cephadm.dict_get_join({'foo': [1, 2]}, 'foo') assert result == '1\n2' result = _cephadm.dict_get_join({'bar': 'a'}, 'bar') assert result == 'a' result = _cephadm.dict_get_join({'a': 1}, 'a') assert result == 1 @mock.patch('os.listdir', return_value=[]) @mock.patch('cephadm.logger') def test_infer_local_ceph_image(self, _logger, _listdir): ctx = _cephadm.CephadmContext() ctx.fsid = '00000000-0000-0000-0000-0000deadbeez' ctx.container_engine = mock_podman() # make sure the right image is selected when container is found cinfo = _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972', 'registry.hub.docker.com/rkachach/ceph:custom-v0.5', '514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d', '2022-04-19 13:45:20.97146228 +0000 UTC', '') out = '''quay.ceph.io/ceph-ci/ceph@sha256:87f200536bb887b36b959e887d5984dd7a3f008a23aa1f283ab55d48b22c6185|dad864ee21e9|main|2022-03-23 16:29:19 +0000 UTC quay.ceph.io/ceph-ci/ceph@sha256:b50b130fcda2a19f8507ddde3435bb4722266956e1858ac395c838bc1dcf1c0e|514e6a882f6e|pacific|2022-03-23 15:58:34 +0000 UTC docker.io/ceph/ceph@sha256:939a46c06b334e094901560c8346de33c00309e3e3968a2db240eb4897c6a508|666bbfa87e8d|v15.2.5|2020-09-16 14:15:15 +0000 UTC''' with mock.patch('cephadm.call_throws', return_value=(out, '', '')): with mock.patch('cephadm.get_container_info', return_value=cinfo): image = _cephadm.infer_local_ceph_image(ctx, ctx.container_engine) assert image == 'quay.ceph.io/ceph-ci/ceph@sha256:b50b130fcda2a19f8507ddde3435bb4722266956e1858ac395c838bc1dcf1c0e' # make sure first valid image is used when no container_info is found out = '''quay.ceph.io/ceph-ci/ceph@sha256:87f200536bb887b36b959e887d5984dd7a3f008a23aa1f283ab55d48b22c6185|dad864ee21e9|main|2022-03-23 16:29:19 +0000 UTC quay.ceph.io/ceph-ci/ceph@sha256:b50b130fcda2a19f8507ddde3435bb4722266956e1858ac395c838bc1dcf1c0e|514e6a882f6e|pacific|2022-03-23 15:58:34 +0000 UTC docker.io/ceph/ceph@sha256:939a46c06b334e094901560c8346de33c00309e3e3968a2db240eb4897c6a508|666bbfa87e8d|v15.2.5|2020-09-16 14:15:15 +0000 UTC''' with mock.patch('cephadm.call_throws', return_value=(out, '', '')): with mock.patch('cephadm.get_container_info', return_value=None): image = _cephadm.infer_local_ceph_image(ctx, ctx.container_engine) assert image == 'quay.ceph.io/ceph-ci/ceph@sha256:87f200536bb887b36b959e887d5984dd7a3f008a23aa1f283ab55d48b22c6185' # make sure images without digest are discarded (no container_info is found) out = '''quay.ceph.io/ceph-ci/ceph@||| docker.io/ceph/ceph@||| docker.io/ceph/ceph@sha256:939a46c06b334e094901560c8346de33c00309e3e3968a2db240eb4897c6a508|666bbfa87e8d|v15.2.5|2020-09-16 14:15:15 +0000 UTC''' with mock.patch('cephadm.call_throws', return_value=(out, '', '')): with mock.patch('cephadm.get_container_info', return_value=None): image = _cephadm.infer_local_ceph_image(ctx, ctx.container_engine) assert image == 'docker.io/ceph/ceph@sha256:939a46c06b334e094901560c8346de33c00309e3e3968a2db240eb4897c6a508' @pytest.mark.parametrize('daemon_filter, by_name, daemon_list, container_stats, output', [ # get container info by type ('mon') ( 'mon', False, [ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, {'name': 'mgr.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, ], ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,", "", 0), _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972', 'registry.hub.docker.com/rkachach/ceph:custom-v0.5', '666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4', '2022-04-19 13:45:20.97146228 +0000 UTC', '') ), # get container info by name ('mon.ceph-node-0') ( 'mon.ceph-node-0', True, [ {'name': 'mgr.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, ], ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,", "", 0), _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972', 'registry.hub.docker.com/rkachach/ceph:custom-v0.5', '666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4', '2022-04-19 13:45:20.97146228 +0000 UTC', '') ), # get container info by name (same daemon but two different fsids) ( 'mon.ceph-node-0', True, [ {'name': 'mon.ceph-node-0', 'fsid': '10000000-0000-0000-0000-0000deadbeef'}, {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, ], ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,", "", 0), _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972', 'registry.hub.docker.com/rkachach/ceph:custom-v0.5', '666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4', '2022-04-19 13:45:20.97146228 +0000 UTC', '') ), # get container info by type (bad container stats: 127 code) ( 'mon', False, [ {'name': 'mon.ceph-node-0', 'fsid': '00000000-FFFF-0000-0000-0000deadbeef'}, {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, ], ("", "", 127), None ), # get container info by name (bad container stats: 127 code) ( 'mon.ceph-node-0', True, [ {'name': 'mgr.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, ], ("", "", 127), None ), # get container info by invalid name (doens't contain '.') ( 'mon-ceph-node-0', True, [ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, ], ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,", "", 0), None ), # get container info by invalid name (empty) ( '', True, [ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, ], ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,", "", 0), None ), # get container info by invalid type (empty) ( '', False, [ {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, {'name': 'mon.ceph-node-0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}, ], ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,", "", 0), None ), # get container info by name: no match (invalid fsid) ( 'mon', False, [ {'name': 'mon.ceph-node-0', 'fsid': '00000000-1111-0000-0000-0000deadbeef'}, {'name': 'mon.ceph-node-0', 'fsid': '00000000-2222-0000-0000-0000deadbeef'}, ], ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972,registry.hub.docker.com/rkachach/ceph:custom-v0.5,666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4,2022-04-19 13:45:20.97146228 +0000 UTC,", "", 0), None ), # get container info by name: no match ( 'mon.ceph-node-0', True, [], None, None ), # get container info by type: no match ( 'mgr', False, [], None, None ), ]) @mock.patch('cephadm.logger') def test_get_container_info(self, _logger, daemon_filter, by_name, daemon_list, container_stats, output): ctx = _cephadm.CephadmContext() ctx.fsid = '00000000-0000-0000-0000-0000deadbeef' ctx.container_engine = mock_podman() with mock.patch('cephadm.list_daemons', return_value=daemon_list): with mock.patch('cephadm.get_container_stats', return_value=container_stats): assert _cephadm.get_container_info(ctx, daemon_filter, by_name) == output def test_should_log_to_journald(self): ctx = _cephadm.CephadmContext() # explicit ctx.log_to_journald = True assert _cephadm.should_log_to_journald(ctx) ctx.log_to_journald = None # enable if podman support --cgroup=split ctx.container_engine = mock_podman() ctx.container_engine.version = (2, 1, 0) assert _cephadm.should_log_to_journald(ctx) # disable on old podman ctx.container_engine.version = (2, 0, 0) assert not _cephadm.should_log_to_journald(ctx) # disable on docker ctx.container_engine = mock_docker() assert not _cephadm.should_log_to_journald(ctx) def test_normalize_image_digest(self): s = 'myhostname:5000/ceph/ceph@sha256:753886ad9049004395ae990fbb9b096923b5a518b819283141ee8716ddf55ad1' assert _cephadm.normalize_image_digest(s) == s s = 'ceph/ceph:latest' assert _cephadm.normalize_image_digest(s) == f'{_cephadm.DEFAULT_REGISTRY}/{s}' @pytest.mark.parametrize('fsid, ceph_conf, list_daemons, result, err, ', [ ( None, None, [], None, None, ), ( '00000000-0000-0000-0000-0000deadbeef', None, [], '00000000-0000-0000-0000-0000deadbeef', None, ), ( '00000000-0000-0000-0000-0000deadbeef', None, [ {'fsid': '10000000-0000-0000-0000-0000deadbeef'}, {'fsid': '20000000-0000-0000-0000-0000deadbeef'}, ], '00000000-0000-0000-0000-0000deadbeef', None, ), ( None, None, [ {'fsid': '00000000-0000-0000-0000-0000deadbeef'}, ], '00000000-0000-0000-0000-0000deadbeef', None, ), ( None, None, [ {'fsid': '10000000-0000-0000-0000-0000deadbeef'}, {'fsid': '20000000-0000-0000-0000-0000deadbeef'}, ], None, r'Cannot infer an fsid', ), ( None, get_ceph_conf(fsid='00000000-0000-0000-0000-0000deadbeef'), [], '00000000-0000-0000-0000-0000deadbeef', None, ), ( None, get_ceph_conf(fsid='00000000-0000-0000-0000-0000deadbeef'), [ {'fsid': '00000000-0000-0000-0000-0000deadbeef'}, ], '00000000-0000-0000-0000-0000deadbeef', None, ), ( None, get_ceph_conf(fsid='00000000-0000-0000-0000-0000deadbeef'), [ {'fsid': '10000000-0000-0000-0000-0000deadbeef'}, {'fsid': '20000000-0000-0000-0000-0000deadbeef'}, ], None, r'Cannot infer an fsid', ), ]) @mock.patch('cephadm.call') @mock.patch('cephadm.logger') def test_infer_fsid(self, _logger, _call, fsid, ceph_conf, list_daemons, result, err, cephadm_fs): # build the context ctx = _cephadm.CephadmContext() ctx.fsid = fsid # mock the decorator mock_fn = mock.Mock() mock_fn.return_value = 0 infer_fsid = _cephadm.infer_fsid(mock_fn) # mock the ceph.conf file content if ceph_conf: f = cephadm_fs.create_file('ceph.conf', contents=ceph_conf) ctx.config = f.path # test with mock.patch('cephadm.list_daemons', return_value=list_daemons): if err: with pytest.raises(_cephadm.Error, match=err): infer_fsid(ctx) else: infer_fsid(ctx) assert ctx.fsid == result @pytest.mark.parametrize('fsid, other_conf_files, config, name, list_daemons, result, ', [ # per cluster conf has more precedence than default conf ( '00000000-0000-0000-0000-0000deadbeef', [_cephadm.CEPH_DEFAULT_CONF], None, None, [], '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf', ), # mon daemon conf has more precedence than cluster conf and default conf ( '00000000-0000-0000-0000-0000deadbeef', ['/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf', _cephadm.CEPH_DEFAULT_CONF], None, None, [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'cephadm:v1'}], '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config', ), # daemon conf (--name option) has more precedence than cluster, default and mon conf ( '00000000-0000-0000-0000-0000deadbeef', ['/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf', '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config', _cephadm.CEPH_DEFAULT_CONF], None, 'osd.0', [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'cephadm:v1'}, {'name': 'osd.0', 'fsid': '00000000-0000-0000-0000-0000deadbeef'}], '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/osd.0/config', ), # user provided conf ('/foo/ceph.conf') more precedence than any other conf ( '00000000-0000-0000-0000-0000deadbeef', ['/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf', _cephadm.CEPH_DEFAULT_CONF, '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config'], '/foo/ceph.conf', None, [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'cephadm:v1'}], '/foo/ceph.conf', ), ]) @mock.patch('cephadm.call') @mock.patch('cephadm.logger') def test_infer_config_precedence(self, _logger, _call, other_conf_files, fsid, config, name, list_daemons, result, cephadm_fs): # build the context ctx = _cephadm.CephadmContext() ctx.fsid = fsid ctx.config = config ctx.name = name # mock the decorator mock_fn = mock.Mock() mock_fn.return_value = 0 infer_config = _cephadm.infer_config(mock_fn) # mock the config file cephadm_fs.create_file(result) # mock other potential config files for f in other_conf_files: cephadm_fs.create_file(f) # test with mock.patch('cephadm.list_daemons', return_value=list_daemons): infer_config(ctx) assert ctx.config == result @pytest.mark.parametrize('fsid, config, name, list_daemons, result, ', [ ( None, '/foo/bar.conf', None, [], '/foo/bar.conf', ), ( '00000000-0000-0000-0000-0000deadbeef', None, None, [], _cephadm.CEPH_DEFAULT_CONF, ), ( '00000000-0000-0000-0000-0000deadbeef', None, None, [], '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/config/ceph.conf', ), ( '00000000-0000-0000-0000-0000deadbeef', None, None, [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'cephadm:v1'}], '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config', ), ( '00000000-0000-0000-0000-0000deadbeef', None, None, [{'name': 'mon.a', 'fsid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'style': 'cephadm:v1'}], _cephadm.CEPH_DEFAULT_CONF, ), ( '00000000-0000-0000-0000-0000deadbeef', None, None, [{'name': 'mon.a', 'fsid': '00000000-0000-0000-0000-0000deadbeef', 'style': 'legacy'}], _cephadm.CEPH_DEFAULT_CONF, ), ( '00000000-0000-0000-0000-0000deadbeef', None, None, [{'name': 'osd.0'}], _cephadm.CEPH_DEFAULT_CONF, ), ( '00000000-0000-0000-0000-0000deadbeef', '/foo/bar.conf', 'mon.a', [{'name': 'mon.a', 'style': 'cephadm:v1'}], '/foo/bar.conf', ), ( '00000000-0000-0000-0000-0000deadbeef', None, 'mon.a', [], '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/mon.a/config', ), ( '00000000-0000-0000-0000-0000deadbeef', None, 'osd.0', [], '/var/lib/ceph/00000000-0000-0000-0000-0000deadbeef/osd.0/config', ), ( None, None, None, [], _cephadm.CEPH_DEFAULT_CONF, ), ]) @mock.patch('cephadm.call') @mock.patch('cephadm.logger') def test_infer_config(self, _logger, _call, fsid, config, name, list_daemons, result, cephadm_fs): # build the context ctx = _cephadm.CephadmContext() ctx.fsid = fsid ctx.config = config ctx.name = name # mock the decorator mock_fn = mock.Mock() mock_fn.return_value = 0 infer_config = _cephadm.infer_config(mock_fn) # mock the config file cephadm_fs.create_file(result) # test with mock.patch('cephadm.list_daemons', return_value=list_daemons): infer_config(ctx) assert ctx.config == result @mock.patch('cephadm.call') def test_extract_uid_gid_fail(self, _call): err = """Error: container_linux.go:370: starting container process caused: process_linux.go:459: container init caused: process_linux.go:422: setting cgroup config for procHooks process caused: Unit libpod-056038e1126191fba41d8a037275136f2d7aeec9710b9ee ff792c06d8544b983.scope not found.: OCI runtime error""" _call.return_value = ('', err, 127) ctx = _cephadm.CephadmContext() ctx.container_engine = mock_podman() with pytest.raises(_cephadm.Error, match='OCI'): _cephadm.extract_uid_gid(ctx) @pytest.mark.parametrize('test_input, expected', [ ([_cephadm.make_fsid(), _cephadm.make_fsid(), _cephadm.make_fsid()], 3), ([_cephadm.make_fsid(), 'invalid-fsid', _cephadm.make_fsid(), '0b87e50c-8e77-11ec-b890-'], 2), (['f6860ec2-8e76-11ec-', '0b87e50c-8e77-11ec-b890-', ''], 0), ([], 0), ]) def test_get_ceph_cluster_count(self, test_input, expected): ctx = _cephadm.CephadmContext() with mock.patch('os.listdir', return_value=test_input): assert _cephadm.get_ceph_cluster_count(ctx) == expected def test_set_image_minimize_config(self): def throw_cmd(cmd): raise _cephadm.Error(' '.join(cmd)) ctx = _cephadm.CephadmContext() ctx.image = 'test_image' ctx.no_minimize_config = True fake_cli = lambda cmd, __=None, ___=None: throw_cmd(cmd) with pytest.raises(_cephadm.Error, match='config set global container_image test_image'): _cephadm.finish_bootstrap_config( ctx=ctx, fsid=_cephadm.make_fsid(), config='', mon_id='a', mon_dir='mon_dir', mon_network=None, ipv6=False, cli=fake_cli, cluster_network=None, ipv6_cluster_network=False ) class TestCustomContainer(unittest.TestCase): cc: _cephadm.CustomContainer def setUp(self): self.cc = _cephadm.CustomContainer( 'e863154d-33c7-4350-bca5-921e0467e55b', 'container', config_json={ 'entrypoint': 'bash', 'gid': 1000, 'args': [ '--no-healthcheck', '-p 6800:6800' ], 'envs': ['SECRET=password'], 'ports': [8080, 8443], 'volume_mounts': { '/CONFIG_DIR': '/foo/conf', 'bar/config': '/bar:ro' }, 'bind_mounts': [ [ 'type=bind', 'source=/CONFIG_DIR', 'destination=/foo/conf', '' ], [ 'type=bind', 'source=bar/config', 'destination=/bar:ro', 'ro=true' ] ] }, image='docker.io/library/hello-world:latest' ) def test_entrypoint(self): self.assertEqual(self.cc.entrypoint, 'bash') def test_uid_gid(self): self.assertEqual(self.cc.uid, 65534) self.assertEqual(self.cc.gid, 1000) def test_ports(self): self.assertEqual(self.cc.ports, [8080, 8443]) def test_get_container_args(self): result = self.cc.get_container_args() self.assertEqual(result, [ '--no-healthcheck', '-p 6800:6800' ]) def test_get_container_envs(self): result = self.cc.get_container_envs() self.assertEqual(result, ['SECRET=password']) def test_get_container_mounts(self): result = self.cc.get_container_mounts('/xyz') self.assertDictEqual(result, { '/CONFIG_DIR': '/foo/conf', '/xyz/bar/config': '/bar:ro' }) def test_get_container_binds(self): result = self.cc.get_container_binds('/xyz') self.assertEqual(result, [ [ 'type=bind', 'source=/CONFIG_DIR', 'destination=/foo/conf', '' ], [ 'type=bind', 'source=/xyz/bar/config', 'destination=/bar:ro', 'ro=true' ] ]) class TestMaintenance: systemd_target = "ceph.00000000-0000-0000-0000-000000c0ffee.target" fsid = '0ea8cdd0-1bbf-11ec-a9c7-5254002763fa' def test_systemd_target_OK(self, tmp_path): base = tmp_path wants = base / "ceph.target.wants" wants.mkdir() target = wants / TestMaintenance.systemd_target target.touch() ctx = _cephadm.CephadmContext() ctx.unit_dir = str(base) assert _cephadm.systemd_target_state(ctx, target.name) def test_systemd_target_NOTOK(self, tmp_path): base = tmp_path ctx = _cephadm.CephadmContext() ctx.unit_dir = str(base) assert not _cephadm.systemd_target_state(ctx, TestMaintenance.systemd_target) def test_parser_OK(self): args = _cephadm._parse_args(['host-maintenance', 'enter']) assert args.maintenance_action == 'enter' def test_parser_BAD(self): with pytest.raises(SystemExit): _cephadm._parse_args(['host-maintenance', 'wah']) @mock.patch('os.listdir', return_value=[]) @mock.patch('cephadm.call') @mock.patch('cephadm.logger') @mock.patch('cephadm.systemd_target_state') def test_enter_failure_1(self, _target_state, _logger, _call, _listdir): _call.return_value = '', '', 999 _target_state.return_value = True ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx( ['host-maintenance', 'enter', '--fsid', TestMaintenance.fsid]) ctx.container_engine = mock_podman() retval = _cephadm.command_maintenance(ctx) assert retval.startswith('failed') @mock.patch('os.listdir', return_value=[]) @mock.patch('cephadm.call') @mock.patch('cephadm.logger') @mock.patch('cephadm.systemd_target_state') def test_enter_failure_2(self, _target_state, _logger, _call, _listdir): _call.side_effect = [('', '', 0), ('', '', 999), ('', '', 0), ('', '', 999)] _target_state.return_value = True ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx( ['host-maintenance', 'enter', '--fsid', TestMaintenance.fsid]) ctx.container_engine = mock_podman() retval = _cephadm.command_maintenance(ctx) assert retval.startswith('failed') @mock.patch('os.listdir', return_value=[]) @mock.patch('cephadm.call') @mock.patch('cephadm.logger') @mock.patch('cephadm.systemd_target_state') @mock.patch('cephadm.target_exists') def test_exit_failure_1(self, _target_exists, _target_state, _logger, _call, _listdir): _call.return_value = '', '', 999 _target_state.return_value = False _target_exists.return_value = True ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx( ['host-maintenance', 'exit', '--fsid', TestMaintenance.fsid]) ctx.container_engine = mock_podman() retval = _cephadm.command_maintenance(ctx) assert retval.startswith('failed') @mock.patch('os.listdir', return_value=[]) @mock.patch('cephadm.call') @mock.patch('cephadm.logger') @mock.patch('cephadm.systemd_target_state') @mock.patch('cephadm.target_exists') def test_exit_failure_2(self, _target_exists, _target_state, _logger, _call, _listdir): _call.side_effect = [('', '', 0), ('', '', 999), ('', '', 0), ('', '', 999)] _target_state.return_value = False _target_exists.return_value = True ctx: _cephadm.CephadmContext = _cephadm.cephadm_init_ctx( ['host-maintenance', 'exit', '--fsid', TestMaintenance.fsid]) ctx.container_engine = mock_podman() retval = _cephadm.command_maintenance(ctx) assert retval.startswith('failed') class TestMonitoring(object): @mock.patch('cephadm.call') def test_get_version_alertmanager(self, _call): ctx = _cephadm.CephadmContext() ctx.container_engine = mock_podman() daemon_type = 'alertmanager' # binary `prometheus` _call.return_value = '', '{}, version 0.16.1'.format(daemon_type), 0 version = _cephadm.Monitoring.get_version(ctx, 'container_id', daemon_type) assert version == '0.16.1' # binary `prometheus-alertmanager` _call.side_effect = ( ('', '', 1), ('', '{}, version 0.16.1'.format(daemon_type), 0), ) version = _cephadm.Monitoring.get_version(ctx, 'container_id', daemon_type) assert version == '0.16.1' @mock.patch('cephadm.call') def test_get_version_prometheus(self, _call): ctx = _cephadm.CephadmContext() ctx.container_engine = mock_podman() daemon_type = 'prometheus' _call.return_value = '', '{}, version 0.16.1'.format(daemon_type), 0 version = _cephadm.Monitoring.get_version(ctx, 'container_id', daemon_type) assert version == '0.16.1' def test_prometheus_external_url(self): ctx = _cephadm.CephadmContext() ctx.config_json = json.dumps({'files': {}, 'retention_time': '15d'}) daemon_type = 'prometheus' daemon_id = 'home' fsid = 'aaf5a720-13fe-4a3b-82b9-2d99b7fd9704' args = _cephadm.get_daemon_args(ctx, fsid, daemon_type, daemon_id) assert any([x.startswith('--web.external-url=http://') for x in args]) @mock.patch('cephadm.call') def test_get_version_node_exporter(self, _call): ctx = _cephadm.CephadmContext() ctx.container_engine = mock_podman() daemon_type = 'node-exporter' _call.return_value = '', '{}, version 0.16.1'.format(daemon_type.replace('-', '_')), 0 version = _cephadm.Monitoring.get_version(ctx, 'container_id', daemon_type) assert version == '0.16.1' def test_create_daemon_dirs_prometheus(self, cephadm_fs): """ Ensures the required and optional files given in the configuration are created and mapped correctly inside the container. Tests absolute and relative file paths given in the configuration. """ fsid = 'aaf5a720-13fe-4a3b-82b9-2d99b7fd9704' daemon_type = 'prometheus' uid, gid = 50, 50 daemon_id = 'home' ctx = _cephadm.CephadmContext() ctx.data_dir = '/somedir' ctx.config_json = json.dumps({ 'files': { 'prometheus.yml': 'foo', '/etc/prometheus/alerting/ceph_alerts.yml': 'bar' } }) _cephadm.create_daemon_dirs(ctx, fsid, daemon_type, daemon_id, uid, gid, config=None, keyring=None) prefix = '{data_dir}/{fsid}/{daemon_type}.{daemon_id}'.format( data_dir=ctx.data_dir, fsid=fsid, daemon_type=daemon_type, daemon_id=daemon_id ) expected = { 'etc/prometheus/prometheus.yml': 'foo', 'etc/prometheus/alerting/ceph_alerts.yml': 'bar', } for file,content in expected.items(): file = os.path.join(prefix, file) assert os.path.exists(file) with open(file) as f: assert f.read() == content # assert uid/gid after redeploy new_uid = uid+1 new_gid = gid+1 _cephadm.create_daemon_dirs(ctx, fsid, daemon_type, daemon_id, new_uid, new_gid, config=None, keyring=None) for file,content in expected.items(): file = os.path.join(prefix, file) assert os.stat(file).st_uid == new_uid assert os.stat(file).st_gid == new_gid class TestBootstrap(object): @staticmethod def _get_cmd(*args): return [ 'bootstrap', '--allow-mismatched-release', '--skip-prepare-host', '--skip-dashboard', *args, ] ###############################################3 def test_config(self, cephadm_fs): conf_file = 'foo' cmd = self._get_cmd( '--mon-ip', '192.168.1.1', '--skip-mon-network', '--config', conf_file, ) with with_cephadm_ctx(cmd) as ctx: msg = r'No such file or directory' with pytest.raises(_cephadm.Error, match=msg): _cephadm.command_bootstrap(ctx) cephadm_fs.create_file(conf_file) with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_bootstrap(ctx) assert retval == 0 def test_no_mon_addr(self, cephadm_fs): cmd = self._get_cmd() with with_cephadm_ctx(cmd) as ctx: msg = r'must specify --mon-ip or --mon-addrv' with pytest.raises(_cephadm.Error, match=msg): _cephadm.command_bootstrap(ctx) def test_skip_mon_network(self, cephadm_fs): cmd = self._get_cmd('--mon-ip', '192.168.1.1') with with_cephadm_ctx(cmd, list_networks={}) as ctx: msg = r'--skip-mon-network' with pytest.raises(_cephadm.Error, match=msg): _cephadm.command_bootstrap(ctx) cmd += ['--skip-mon-network'] with with_cephadm_ctx(cmd, list_networks={}) as ctx: retval = _cephadm.command_bootstrap(ctx) assert retval == 0 @pytest.mark.parametrize('mon_ip, list_networks, result', [ # IPv4 ( 'eth0', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, False, ), ( '0.0.0.0', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, False, ), ( '192.168.1.0', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, False, ), ( '192.168.1.1', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, True, ), ( '192.168.1.1:1234', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, True, ), ( '192.168.1.1:0123', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, True, ), # IPv6 ( '::', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, False, ), ( '::ffff:192.168.1.0', {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}}, False, ), ( '::ffff:192.168.1.1', {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}}, True, ), ( '::ffff:c0a8:101', {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}}, True, ), ( '[::ffff:c0a8:101]:1234', {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}}, True, ), ( '[::ffff:c0a8:101]:0123', {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}}, True, ), ( '0000:0000:0000:0000:0000:FFFF:C0A8:0101', {"ffff::/64": {"eth0": ["::ffff:c0a8:101"]}}, True, ), ]) def test_mon_ip(self, mon_ip, list_networks, result, cephadm_fs): cmd = self._get_cmd('--mon-ip', mon_ip) if not result: with with_cephadm_ctx(cmd, list_networks=list_networks) as ctx: msg = r'--skip-mon-network' with pytest.raises(_cephadm.Error, match=msg): _cephadm.command_bootstrap(ctx) else: with with_cephadm_ctx(cmd, list_networks=list_networks) as ctx: retval = _cephadm.command_bootstrap(ctx) assert retval == 0 @pytest.mark.parametrize('mon_addrv, list_networks, err', [ # IPv4 ( '192.168.1.1', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, r'must use square brackets', ), ( '[192.168.1.1]', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, r'must include port number', ), ( '[192.168.1.1:1234]', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, None, ), ( '[192.168.1.1:0123]', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, None, ), ( '[v2:192.168.1.1:3300,v1:192.168.1.1:6789]', {'192.168.1.0/24': {'eth0': ['192.168.1.1']}}, None, ), # IPv6 ( '[::ffff:192.168.1.1:1234]', {'ffff::/64': {'eth0': ['::ffff:c0a8:101']}}, None, ), ( '[::ffff:192.168.1.1:0123]', {'ffff::/64': {'eth0': ['::ffff:c0a8:101']}}, None, ), ( '[0000:0000:0000:0000:0000:FFFF:C0A8:0101:1234]', {'ffff::/64': {'eth0': ['::ffff:c0a8:101']}}, None, ), ( '[v2:0000:0000:0000:0000:0000:FFFF:C0A8:0101:3300,v1:0000:0000:0000:0000:0000:FFFF:C0A8:0101:6789]', {'ffff::/64': {'eth0': ['::ffff:c0a8:101']}}, None, ), ]) def test_mon_addrv(self, mon_addrv, list_networks, err, cephadm_fs): cmd = self._get_cmd('--mon-addrv', mon_addrv) if err: with with_cephadm_ctx(cmd, list_networks=list_networks) as ctx: with pytest.raises(_cephadm.Error, match=err): _cephadm.command_bootstrap(ctx) else: with with_cephadm_ctx(cmd, list_networks=list_networks) as ctx: retval = _cephadm.command_bootstrap(ctx) assert retval == 0 def test_allow_fqdn_hostname(self, cephadm_fs): hostname = 'foo.bar' cmd = self._get_cmd( '--mon-ip', '192.168.1.1', '--skip-mon-network', ) with with_cephadm_ctx(cmd, hostname=hostname) as ctx: msg = r'--allow-fqdn-hostname' with pytest.raises(_cephadm.Error, match=msg): _cephadm.command_bootstrap(ctx) cmd += ['--allow-fqdn-hostname'] with with_cephadm_ctx(cmd, hostname=hostname) as ctx: retval = _cephadm.command_bootstrap(ctx) assert retval == 0 @pytest.mark.parametrize('fsid, err', [ ('', None), ('00000000-0000-0000-0000-0000deadbeef', None), ('00000000-0000-0000-0000-0000deadbeez', 'not an fsid'), ]) def test_fsid(self, fsid, err, cephadm_fs): cmd = self._get_cmd( '--mon-ip', '192.168.1.1', '--skip-mon-network', '--fsid', fsid, ) with with_cephadm_ctx(cmd) as ctx: if err: with pytest.raises(_cephadm.Error, match=err): _cephadm.command_bootstrap(ctx) else: retval = _cephadm.command_bootstrap(ctx) assert retval == 0 class TestShell(object): def test_fsid(self, cephadm_fs): fsid = '00000000-0000-0000-0000-0000deadbeef' cmd = ['shell', '--fsid', fsid] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 assert ctx.fsid == fsid cmd = ['shell', '--fsid', '00000000-0000-0000-0000-0000deadbeez'] with with_cephadm_ctx(cmd) as ctx: err = 'not an fsid' with pytest.raises(_cephadm.Error, match=err): retval = _cephadm.command_shell(ctx) assert retval == 1 assert ctx.fsid == None s = get_ceph_conf(fsid=fsid) f = cephadm_fs.create_file('ceph.conf', contents=s) cmd = ['shell', '--fsid', fsid, '--config', f.path] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 assert ctx.fsid == fsid cmd = ['shell', '--fsid', '10000000-0000-0000-0000-0000deadbeef', '--config', f.path] with with_cephadm_ctx(cmd) as ctx: err = 'fsid does not match ceph.conf' with pytest.raises(_cephadm.Error, match=err): retval = _cephadm.command_shell(ctx) assert retval == 1 assert ctx.fsid == None def test_name(self, cephadm_fs): cmd = ['shell', '--name', 'foo'] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 cmd = ['shell', '--name', 'foo.bar'] with with_cephadm_ctx(cmd) as ctx: err = r'must pass --fsid' with pytest.raises(_cephadm.Error, match=err): retval = _cephadm.command_shell(ctx) assert retval == 1 fsid = '00000000-0000-0000-0000-0000deadbeef' cmd = ['shell', '--name', 'foo.bar', '--fsid', fsid] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 def test_config(self, cephadm_fs): cmd = ['shell'] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 assert ctx.config == None cephadm_fs.create_file(_cephadm.CEPH_DEFAULT_CONF) with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 assert ctx.config == _cephadm.CEPH_DEFAULT_CONF cmd = ['shell', '--config', 'foo'] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 assert ctx.config == 'foo' def test_keyring(self, cephadm_fs): cmd = ['shell'] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 assert ctx.keyring == None cephadm_fs.create_file(_cephadm.CEPH_DEFAULT_KEYRING) with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 assert ctx.keyring == _cephadm.CEPH_DEFAULT_KEYRING cmd = ['shell', '--keyring', 'foo'] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 assert ctx.keyring == 'foo' @mock.patch('cephadm.CephContainer') def test_mount_no_dst(self, _ceph_container, cephadm_fs): cmd = ['shell', '--mount', '/etc/foo'] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 assert _ceph_container.call_args.kwargs['volume_mounts']['/etc/foo'] == '/mnt/foo' @mock.patch('cephadm.CephContainer') def test_mount_with_dst_no_opt(self, _ceph_container, cephadm_fs): cmd = ['shell', '--mount', '/etc/foo:/opt/foo/bar'] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 assert _ceph_container.call_args.kwargs['volume_mounts']['/etc/foo'] == '/opt/foo/bar' @mock.patch('cephadm.CephContainer') def test_mount_with_dst_and_opt(self, _ceph_container, cephadm_fs): cmd = ['shell', '--mount', '/etc/foo:/opt/foo/bar:Z'] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_shell(ctx) assert retval == 0 assert _ceph_container.call_args.kwargs['volume_mounts']['/etc/foo'] == '/opt/foo/bar:Z' class TestCephVolume(object): @staticmethod def _get_cmd(*args): return [ 'ceph-volume', *args, '--', 'inventory', '--format', 'json' ] def test_noop(self, cephadm_fs): cmd = self._get_cmd() with with_cephadm_ctx(cmd) as ctx: _cephadm.command_ceph_volume(ctx) assert ctx.fsid == None assert ctx.config == None assert ctx.keyring == None assert ctx.config_json == None def test_fsid(self, cephadm_fs): fsid = '00000000-0000-0000-0000-0000deadbeef' cmd = self._get_cmd('--fsid', fsid) with with_cephadm_ctx(cmd) as ctx: _cephadm.command_ceph_volume(ctx) assert ctx.fsid == fsid cmd = self._get_cmd('--fsid', '00000000-0000-0000-0000-0000deadbeez') with with_cephadm_ctx(cmd) as ctx: err = 'not an fsid' with pytest.raises(_cephadm.Error, match=err): retval = _cephadm.command_shell(ctx) assert retval == 1 assert ctx.fsid == None s = get_ceph_conf(fsid=fsid) f = cephadm_fs.create_file('ceph.conf', contents=s) cmd = self._get_cmd('--fsid', fsid, '--config', f.path) with with_cephadm_ctx(cmd) as ctx: _cephadm.command_ceph_volume(ctx) assert ctx.fsid == fsid cmd = self._get_cmd('--fsid', '10000000-0000-0000-0000-0000deadbeef', '--config', f.path) with with_cephadm_ctx(cmd) as ctx: err = 'fsid does not match ceph.conf' with pytest.raises(_cephadm.Error, match=err): _cephadm.command_ceph_volume(ctx) assert ctx.fsid == None def test_config(self, cephadm_fs): cmd = self._get_cmd('--config', 'foo') with with_cephadm_ctx(cmd) as ctx: err = r'No such file or directory' with pytest.raises(_cephadm.Error, match=err): _cephadm.command_ceph_volume(ctx) cephadm_fs.create_file('bar') cmd = self._get_cmd('--config', 'bar') with with_cephadm_ctx(cmd) as ctx: _cephadm.command_ceph_volume(ctx) assert ctx.config == 'bar' def test_keyring(self, cephadm_fs): cmd = self._get_cmd('--keyring', 'foo') with with_cephadm_ctx(cmd) as ctx: err = r'No such file or directory' with pytest.raises(_cephadm.Error, match=err): _cephadm.command_ceph_volume(ctx) cephadm_fs.create_file('bar') cmd = self._get_cmd('--keyring', 'bar') with with_cephadm_ctx(cmd) as ctx: _cephadm.command_ceph_volume(ctx) assert ctx.keyring == 'bar' class TestIscsi: def test_unit_run(self, cephadm_fs): fsid = '9b9d7609-f4d5-4aba-94c8-effa764d96c9' config_json = { 'files': {'iscsi-gateway.cfg': ''} } with with_cephadm_ctx(['--image=ceph/ceph'], list_networks={}) as ctx: import json ctx.container_engine = mock_docker() ctx.config_json = json.dumps(config_json) ctx.fsid = fsid _cephadm.get_parm.return_value = config_json c = _cephadm.get_container(ctx, fsid, 'iscsi', 'daemon_id') _cephadm.make_data_dir(ctx, fsid, 'iscsi', 'daemon_id') _cephadm.deploy_daemon_units( ctx, fsid, 0, 0, 'iscsi', 'daemon_id', c, True, True ) with open('/var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/unit.run') as f: assert f.read() == """set -e if ! grep -qs /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs /proc/mounts; then mount -t configfs none /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs; fi # iscsi tcmu-runner container ! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.daemon_id-tcmu 2> /dev/null ! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu 2> /dev/null /usr/bin/docker run --rm --ipc=host --stop-signal=SIGTERM --ulimit nofile=1048576 --net=host --entrypoint /usr/bin/tcmu-runner --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id-tcmu --pids-limit=0 -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph & # iscsi.daemon_id ! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.daemon_id 2> /dev/null ! /usr/bin/docker rm -f ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id 2> /dev/null /usr/bin/docker run --rm --ipc=host --stop-signal=SIGTERM --ulimit nofile=1048576 --net=host --entrypoint /usr/bin/rbd-target-api --privileged --group-add=disk --init --name ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-daemon_id --pids-limit=0 -e CONTAINER_IMAGE=ceph/ceph -e NODE_NAME=host1 -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/config:/etc/ceph/ceph.conf:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/keyring:/etc/ceph/keyring:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/iscsi-gateway.cfg:/etc/ceph/iscsi-gateway.cfg:z -v /var/lib/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9/iscsi.daemon_id/configfs:/sys/kernel/config -v /var/log/ceph/9b9d7609-f4d5-4aba-94c8-effa764d96c9:/var/log:z -v /dev:/dev --mount type=bind,source=/lib/modules,destination=/lib/modules,ro=true ceph/ceph """ def test_get_container(self): """ Due to a combination of socket.getfqdn() and podman's behavior to add the container name into the /etc/hosts file, we cannot use periods in container names. But we need to be able to detect old existing containers. Assert this behaviour. I think we can remove this in Ceph R """ fsid = '9b9d7609-f4d5-4aba-94c8-effa764d96c9' with with_cephadm_ctx(['--image=ceph/ceph'], list_networks={}) as ctx: ctx.fsid = fsid c = _cephadm.get_container(ctx, fsid, 'iscsi', 'something') assert c.cname == 'ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi-something' assert c.old_cname == 'ceph-9b9d7609-f4d5-4aba-94c8-effa764d96c9-iscsi.something' class TestCheckHost: @mock.patch('cephadm.find_executable', return_value='foo') @mock.patch('cephadm.check_time_sync', return_value=True) @mock.patch('cephadm.logger') def test_container_engine(self, _logger, _find_executable, _check_time_sync): ctx = _cephadm.CephadmContext() ctx.container_engine = None err = r'No container engine binary found' with pytest.raises(_cephadm.Error, match=err): _cephadm.command_check_host(ctx) ctx.container_engine = mock_podman() _cephadm.command_check_host(ctx) ctx.container_engine = mock_docker() _cephadm.command_check_host(ctx) class TestRmRepo: @pytest.mark.parametrize('os_release', [ # Apt dedent(""" NAME="Ubuntu" VERSION="20.04 LTS (Focal Fossa)" ID=ubuntu ID_LIKE=debian PRETTY_NAME="Ubuntu 20.04 LTS" VERSION_ID="20.04" HOME_URL="https://www.ubuntu.com/" SUPPORT_URL="https://help.ubuntu.com/" BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" VERSION_CODENAME=focal UBUNTU_CODENAME=focal """), # YumDnf dedent(""" NAME="CentOS Linux" VERSION="8 (Core)" ID="centos" ID_LIKE="rhel fedora" VERSION_ID="8" PLATFORM_ID="platform:el8" PRETTY_NAME="CentOS Linux 8 (Core)" ANSI_COLOR="0;31" CPE_NAME="cpe:/o:centos:centos:8" HOME_URL="https://www.centos.org/" BUG_REPORT_URL="https://bugs.centos.org/" CENTOS_MANTISBT_PROJECT="CentOS-8" CENTOS_MANTISBT_PROJECT_VERSION="8" REDHAT_SUPPORT_PRODUCT="centos" REDHAT_SUPPORT_PRODUCT_VERSION="8" """), # Zypper dedent(""" NAME="openSUSE Tumbleweed" # VERSION="20210810" ID="opensuse-tumbleweed" ID_LIKE="opensuse suse" VERSION_ID="20210810" PRETTY_NAME="openSUSE Tumbleweed" ANSI_COLOR="0;32" CPE_NAME="cpe:/o:opensuse:tumbleweed:20210810" BUG_REPORT_URL="https://bugs.opensuse.org" HOME_URL="https://www.opensuse.org/" DOCUMENTATION_URL="https://en.opensuse.org/Portal:Tumbleweed" LOGO="distributor-logo" """), ]) @mock.patch('cephadm.find_executable', return_value='foo') def test_container_engine(self, _find_executable, os_release, cephadm_fs): cephadm_fs.create_file('/etc/os-release', contents=os_release) ctx = _cephadm.CephadmContext() ctx.container_engine = None _cephadm.command_rm_repo(ctx) ctx.container_engine = mock_podman() _cephadm.command_rm_repo(ctx) ctx.container_engine = mock_docker() _cephadm.command_rm_repo(ctx) class TestValidateRepo: @pytest.mark.parametrize('values', [ # Apt - no checks dict( version="", release="pacific", err_text="", os_release=dedent(""" NAME="Ubuntu" VERSION="20.04 LTS (Focal Fossa)" ID=ubuntu ID_LIKE=debian PRETTY_NAME="Ubuntu 20.04 LTS" VERSION_ID="20.04" HOME_URL="https://www.ubuntu.com/" SUPPORT_URL="https://help.ubuntu.com/" BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" VERSION_CODENAME=focal UBUNTU_CODENAME=focal """)), # YumDnf on Centos8 - OK dict( version="", release="pacific", err_text="", os_release=dedent(""" NAME="CentOS Linux" VERSION="8 (Core)" ID="centos" ID_LIKE="rhel fedora" VERSION_ID="8" PLATFORM_ID="platform:el8" PRETTY_NAME="CentOS Linux 8 (Core)" ANSI_COLOR="0;31" CPE_NAME="cpe:/o:centos:centos:8" HOME_URL="https://www.centos.org/" BUG_REPORT_URL="https://bugs.centos.org/" CENTOS_MANTISBT_PROJECT="CentOS-8" CENTOS_MANTISBT_PROJECT_VERSION="8" REDHAT_SUPPORT_PRODUCT="centos" REDHAT_SUPPORT_PRODUCT_VERSION="8" """)), # YumDnf on Fedora - Fedora not supported dict( version="", release="pacific", err_text="does not build Fedora", os_release=dedent(""" NAME="Fedora Linux" VERSION="35 (Cloud Edition)" ID=fedora VERSION_ID=35 VERSION_CODENAME="" PLATFORM_ID="platform:f35" PRETTY_NAME="Fedora Linux 35 (Cloud Edition)" ANSI_COLOR="0;38;2;60;110;180" LOGO=fedora-logo-icon CPE_NAME="cpe:/o:fedoraproject:fedora:35" HOME_URL="https://fedoraproject.org/" DOCUMENTATION_URL="https://docs.fedoraproject.org/en-US/fedora/f35/system-administrators-guide/" SUPPORT_URL="https://ask.fedoraproject.org/" BUG_REPORT_URL="https://bugzilla.redhat.com/" REDHAT_BUGZILLA_PRODUCT="Fedora" REDHAT_BUGZILLA_PRODUCT_VERSION=35 REDHAT_SUPPORT_PRODUCT="Fedora" REDHAT_SUPPORT_PRODUCT_VERSION=35 PRIVACY_POLICY_URL="https://fedoraproject.org/wiki/Legal:PrivacyPolicy" VARIANT="Cloud Edition" VARIANT_ID=cloud """)), # YumDnf on Centos 7 - no pacific dict( version="", release="pacific", err_text="does not support pacific", os_release=dedent(""" NAME="CentOS Linux" VERSION="7 (Core)" ID="centos" ID_LIKE="rhel fedora" VERSION_ID="7" PRETTY_NAME="CentOS Linux 7 (Core)" ANSI_COLOR="0;31" CPE_NAME="cpe:/o:centos:centos:7" HOME_URL="https://www.centos.org/" BUG_REPORT_URL="https://bugs.centos.org/" CENTOS_MANTISBT_PROJECT="CentOS-7" CENTOS_MANTISBT_PROJECT_VERSION="7" REDHAT_SUPPORT_PRODUCT="centos" REDHAT_SUPPORT_PRODUCT_VERSION="7" """)), # YumDnf on Centos 7 - nothing after pacific dict( version="", release="zillions", err_text="does not support pacific", os_release=dedent(""" NAME="CentOS Linux" VERSION="7 (Core)" ID="centos" ID_LIKE="rhel fedora" VERSION_ID="7" PRETTY_NAME="CentOS Linux 7 (Core)" ANSI_COLOR="0;31" CPE_NAME="cpe:/o:centos:centos:7" HOME_URL="https://www.centos.org/" BUG_REPORT_URL="https://bugs.centos.org/" CENTOS_MANTISBT_PROJECT="CentOS-7" CENTOS_MANTISBT_PROJECT_VERSION="7" REDHAT_SUPPORT_PRODUCT="centos" REDHAT_SUPPORT_PRODUCT_VERSION="7" """)), # YumDnf on Centos 7 - nothing v16 or higher dict( version="v16.1.3", release="", err_text="does not support", os_release=dedent(""" NAME="CentOS Linux" VERSION="7 (Core)" ID="centos" ID_LIKE="rhel fedora" VERSION_ID="7" PRETTY_NAME="CentOS Linux 7 (Core)" ANSI_COLOR="0;31" CPE_NAME="cpe:/o:centos:centos:7" HOME_URL="https://www.centos.org/" BUG_REPORT_URL="https://bugs.centos.org/" CENTOS_MANTISBT_PROJECT="CentOS-7" CENTOS_MANTISBT_PROJECT_VERSION="7" REDHAT_SUPPORT_PRODUCT="centos" REDHAT_SUPPORT_PRODUCT_VERSION="7" """)), ]) @mock.patch('cephadm.find_executable', return_value='foo') def test_distro_validation(self, _find_executable, values, cephadm_fs): os_release = values['os_release'] release = values['release'] version = values['version'] err_text = values['err_text'] cephadm_fs.create_file('/etc/os-release', contents=os_release) ctx = _cephadm.CephadmContext() ctx.repo_url = 'http://localhost' pkg = _cephadm.create_packager(ctx, stable=release, version=version) if err_text: with pytest.raises(_cephadm.Error, match=err_text): pkg.validate() else: with mock.patch('cephadm.urlopen', return_value=None): pkg.validate() @pytest.mark.parametrize('values', [ # Apt - not checked dict( version="", release="pacific", err_text="", os_release=dedent(""" NAME="Ubuntu" VERSION="20.04 LTS (Focal Fossa)" ID=ubuntu ID_LIKE=debian PRETTY_NAME="Ubuntu 20.04 LTS" VERSION_ID="20.04" HOME_URL="https://www.ubuntu.com/" SUPPORT_URL="https://help.ubuntu.com/" BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" VERSION_CODENAME=focal UBUNTU_CODENAME=focal """)), # YumDnf on Centos8 - force failure dict( version="", release="foobar", err_text="failed to fetch repository metadata", os_release=dedent(""" NAME="CentOS Linux" VERSION="8 (Core)" ID="centos" ID_LIKE="rhel fedora" VERSION_ID="8" PLATFORM_ID="platform:el8" PRETTY_NAME="CentOS Linux 8 (Core)" ANSI_COLOR="0;31" CPE_NAME="cpe:/o:centos:centos:8" HOME_URL="https://www.centos.org/" BUG_REPORT_URL="https://bugs.centos.org/" CENTOS_MANTISBT_PROJECT="CentOS-8" CENTOS_MANTISBT_PROJECT_VERSION="8" REDHAT_SUPPORT_PRODUCT="centos" REDHAT_SUPPORT_PRODUCT_VERSION="8" """)), ]) @mock.patch('cephadm.find_executable', return_value='foo') @mock.patch('cephadm.logger') def test_http_validation(self, _logger, _find_executable, values, cephadm_fs): from urllib.error import HTTPError os_release = values['os_release'] release = values['release'] version = values['version'] err_text = values['err_text'] cephadm_fs.create_file('/etc/os-release', contents=os_release) ctx = _cephadm.CephadmContext() ctx.repo_url = 'http://localhost' pkg = _cephadm.create_packager(ctx, stable=release, version=version) with mock.patch('cephadm.urlopen') as _urlopen: _urlopen.side_effect = HTTPError(ctx.repo_url, 404, "not found", None, fp=None) if err_text: with pytest.raises(_cephadm.Error, match=err_text): pkg.validate() else: pkg.validate() class TestPull: @mock.patch('time.sleep') @mock.patch('cephadm.call', return_value=('', '', 0)) @mock.patch('cephadm.get_image_info_from_inspect', return_value={}) @mock.patch('cephadm.logger') def test_error(self, _logger, _get_image_info_from_inspect, _call, _sleep): ctx = _cephadm.CephadmContext() ctx.container_engine = mock_podman() ctx.insecure = False _call.return_value = ('', '', 0) retval = _cephadm.command_pull(ctx) assert retval == 0 err = 'maximum retries reached' _call.return_value = ('', 'foobar', 1) with pytest.raises(_cephadm.Error) as e: _cephadm.command_pull(ctx) assert err not in str(e.value) _call.return_value = ('', 'net/http: TLS handshake timeout', 1) with pytest.raises(_cephadm.Error) as e: _cephadm.command_pull(ctx) assert err in str(e.value) @mock.patch('cephadm.get_image_info_from_inspect', return_value={}) @mock.patch('cephadm.infer_local_ceph_image', return_value='last_local_ceph_image') def test_image(self, _infer_local_ceph_image, _get_image_info_from_inspect): cmd = ['pull'] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_pull(ctx) assert retval == 0 assert ctx.image == _cephadm.DEFAULT_IMAGE with mock.patch.dict(os.environ, {"CEPHADM_IMAGE": 'cephadm_image_environ'}): cmd = ['pull'] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_pull(ctx) assert retval == 0 assert ctx.image == 'cephadm_image_environ' cmd = ['--image', 'cephadm_image_param', 'pull'] with with_cephadm_ctx(cmd) as ctx: retval = _cephadm.command_pull(ctx) assert retval == 0 assert ctx.image == 'cephadm_image_param' class TestApplySpec: def test_extract_host_info_from_applied_spec(self, cephadm_fs): yaml = '''--- service_type: host hostname: vm-00 addr: 192.168.122.44 labels: - example1 - example2 --- service_type: host hostname: vm-01 addr: 192.168.122.247 labels: - grafana --- service_type: host hostname: vm-02 --- --- service_type: rgw service_id: myrgw spec: rgw_frontend_ssl_certificate: | -----BEGIN PRIVATE KEY----- V2VyIGRhcyBsaWVzdCBpc3QgZG9vZi4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFt ZXQsIGNvbnNldGV0dXIgc2FkaXBzY2luZyBlbGl0ciwgc2VkIGRpYW0gbm9udW15 IGVpcm1vZCB0ZW1wb3IgaW52aWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWdu YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8= -----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- V2VyIGRhcyBsaWVzdCBpc3QgZG9vZi4gTG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFt ZXQsIGNvbnNldGV0dXIgc2FkaXBzY2luZyBlbGl0ciwgc2VkIGRpYW0gbm9udW15 IGVpcm1vZCB0ZW1wb3IgaW52aWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWdu YSBhbGlxdXlhbSBlcmF0LCBzZWQgZGlhbSB2b2x1cHR1YS4gQXQgdmVybyBlb3Mg ZXQgYWNjdXNhbSBldCBqdXN0byBkdW8= -----END CERTIFICATE----- ssl: true --- ''' cephadm_fs.create_file('spec.yml', contents=yaml) retdic = [{'hostname': 'vm-00', 'addr': '192.168.122.44'}, {'hostname': 'vm-01', 'addr': '192.168.122.247'}, {'hostname': 'vm-02',}] with open('spec.yml') as f: dic = _cephadm._extract_host_info_from_applied_spec(f) assert dic == retdic @mock.patch('cephadm.call', return_value=('', '', 0)) @mock.patch('cephadm.logger') def test_distribute_ssh_keys(self, _logger, _call): ctx = _cephadm.CephadmContext() ctx.ssh_public_key = None ctx.ssh_user = 'root' host_spec = {'service_type': 'host', 'hostname': 'vm-02', 'addr': '192.168.122.165'} retval = _cephadm._distribute_ssh_keys(ctx, host_spec, 'bootstrap_hostname') assert retval == 0 _call.return_value = ('', '', 1) retval = _cephadm._distribute_ssh_keys(ctx, host_spec, 'bootstrap_hostname') assert retval == 1 class TestSNMPGateway: V2c_config = { 'snmp_community': 'public', 'destination': '192.168.1.10:162', 'snmp_version': 'V2c', } V3_no_priv_config = { 'destination': '192.168.1.10:162', 'snmp_version': 'V3', 'snmp_v3_auth_username': 'myuser', 'snmp_v3_auth_password': 'mypassword', 'snmp_v3_auth_protocol': 'SHA', 'snmp_v3_engine_id': '8000C53F00000000', } V3_priv_config = { 'destination': '192.168.1.10:162', 'snmp_version': 'V3', 'snmp_v3_auth_username': 'myuser', 'snmp_v3_auth_password': 'mypassword', 'snmp_v3_auth_protocol': 'SHA', 'snmp_v3_priv_protocol': 'DES', 'snmp_v3_priv_password': 'mysecret', 'snmp_v3_engine_id': '8000C53F00000000', } no_destination_config = { 'snmp_version': 'V3', 'snmp_v3_auth_username': 'myuser', 'snmp_v3_auth_password': 'mypassword', 'snmp_v3_auth_protocol': 'SHA', 'snmp_v3_priv_protocol': 'DES', 'snmp_v3_priv_password': 'mysecret', 'snmp_v3_engine_id': '8000C53F00000000', } bad_version_config = { 'snmp_community': 'public', 'destination': '192.168.1.10:162', 'snmp_version': 'V1', } def test_unit_run_V2c(self, cephadm_fs): fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6' with with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks={}) as ctx: import json ctx.config_json = json.dumps(self.V2c_config) ctx.fsid = fsid ctx.tcp_ports = '9464' _cephadm.get_parm.return_value = self.V2c_config c = _cephadm.get_container(ctx, fsid, 'snmp-gateway', 'daemon_id') _cephadm.make_data_dir(ctx, fsid, 'snmp-gateway', 'daemon_id') _cephadm.create_daemon_dirs(ctx, fsid, 'snmp-gateway', 'daemon_id', 0, 0) with open(f'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/snmp-gateway.conf', 'r') as f: conf = f.read().rstrip() assert conf == 'SNMP_NOTIFIER_COMMUNITY=public' _cephadm.deploy_daemon_units( ctx, fsid, 0, 0, 'snmp-gateway', 'daemon_id', c, True, True ) with open(f'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/unit.run', 'r') as f: run_cmd = f.readlines()[-1].rstrip() assert run_cmd.endswith('docker.io/maxwo/snmp-notifier:v1.2.1 --web.listen-address=:9464 --snmp.destination=192.168.1.10:162 --snmp.version=V2c --log.level=info --snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl') def test_unit_run_V3_noPriv(self, cephadm_fs): fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6' with with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks={}) as ctx: import json ctx.config_json = json.dumps(self.V3_no_priv_config) ctx.fsid = fsid ctx.tcp_ports = '9465' _cephadm.get_parm.return_value = self.V3_no_priv_config c = _cephadm.get_container(ctx, fsid, 'snmp-gateway', 'daemon_id') _cephadm.make_data_dir(ctx, fsid, 'snmp-gateway', 'daemon_id') _cephadm.create_daemon_dirs(ctx, fsid, 'snmp-gateway', 'daemon_id', 0, 0) with open(f'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/snmp-gateway.conf', 'r') as f: conf = f.read() assert conf == 'SNMP_NOTIFIER_AUTH_USERNAME=myuser\nSNMP_NOTIFIER_AUTH_PASSWORD=mypassword\n' _cephadm.deploy_daemon_units( ctx, fsid, 0, 0, 'snmp-gateway', 'daemon_id', c, True, True ) with open(f'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/unit.run', 'r') as f: run_cmd = f.readlines()[-1].rstrip() assert run_cmd.endswith('docker.io/maxwo/snmp-notifier:v1.2.1 --web.listen-address=:9465 --snmp.destination=192.168.1.10:162 --snmp.version=V3 --log.level=info --snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl --snmp.authentication-enabled --snmp.authentication-protocol=SHA --snmp.security-engine-id=8000C53F00000000') def test_unit_run_V3_Priv(self, cephadm_fs): fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6' with with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks={}) as ctx: import json ctx.config_json = json.dumps(self.V3_priv_config) ctx.fsid = fsid ctx.tcp_ports = '9464' _cephadm.get_parm.return_value = self.V3_priv_config c = _cephadm.get_container(ctx, fsid, 'snmp-gateway', 'daemon_id') _cephadm.make_data_dir(ctx, fsid, 'snmp-gateway', 'daemon_id') _cephadm.create_daemon_dirs(ctx, fsid, 'snmp-gateway', 'daemon_id', 0, 0) with open(f'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/snmp-gateway.conf', 'r') as f: conf = f.read() assert conf == 'SNMP_NOTIFIER_AUTH_USERNAME=myuser\nSNMP_NOTIFIER_AUTH_PASSWORD=mypassword\nSNMP_NOTIFIER_PRIV_PASSWORD=mysecret\n' _cephadm.deploy_daemon_units( ctx, fsid, 0, 0, 'snmp-gateway', 'daemon_id', c, True, True ) with open(f'/var/lib/ceph/{fsid}/snmp-gateway.daemon_id/unit.run', 'r') as f: run_cmd = f.readlines()[-1].rstrip() assert run_cmd.endswith('docker.io/maxwo/snmp-notifier:v1.2.1 --web.listen-address=:9464 --snmp.destination=192.168.1.10:162 --snmp.version=V3 --log.level=info --snmp.trap-description-template=/etc/snmp_notifier/description-template.tpl --snmp.authentication-enabled --snmp.authentication-protocol=SHA --snmp.security-engine-id=8000C53F00000000 --snmp.private-enabled --snmp.private-protocol=DES') def test_unit_run_no_dest(self, cephadm_fs): fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6' with with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks={}) as ctx: import json ctx.config_json = json.dumps(self.no_destination_config) ctx.fsid = fsid ctx.tcp_ports = '9464' _cephadm.get_parm.return_value = self.no_destination_config with pytest.raises(Exception) as e: c = _cephadm.get_container(ctx, fsid, 'snmp-gateway', 'daemon_id') assert str(e.value) == "config is missing destination attribute(<ip>:<port>) of the target SNMP listener" def test_unit_run_bad_version(self, cephadm_fs): fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6' with with_cephadm_ctx(['--image=docker.io/maxwo/snmp-notifier:v1.2.1'], list_networks={}) as ctx: import json ctx.config_json = json.dumps(self.bad_version_config) ctx.fsid = fsid ctx.tcp_ports = '9464' _cephadm.get_parm.return_value = self.bad_version_config with pytest.raises(Exception) as e: c = _cephadm.get_container(ctx, fsid, 'snmp-gateway', 'daemon_id') assert str(e.value) == 'not a valid snmp version: V1' class TestNetworkValidation: def test_ipv4_subnet(self): rc, v, msg = _cephadm.check_subnet('192.168.1.0/24') assert rc == 0 and v[0] == 4 def test_ipv4_subnet_list(self): rc, v, msg = _cephadm.check_subnet('192.168.1.0/24,10.90.90.0/24') assert rc == 0 and not msg def test_ipv4_subnet_list_with_spaces(self): rc, v, msg = _cephadm.check_subnet('192.168.1.0/24, 10.90.90.0/24 ') assert rc == 0 and not msg def test_ipv4_subnet_badlist(self): rc, v, msg = _cephadm.check_subnet('192.168.1.0/24,192.168.1.1') assert rc == 1 and msg def test_ipv4_subnet_mixed(self): rc, v, msg = _cephadm.check_subnet('192.168.100.0/24,fe80::/64') assert rc == 0 and v == [4,6] def test_ipv6_subnet(self): rc, v, msg = _cephadm.check_subnet('fe80::/64') assert rc == 0 and v[0] == 6 def test_subnet_mask_missing(self): rc, v, msg = _cephadm.check_subnet('192.168.1.58') assert rc == 1 and msg def test_subnet_mask_junk(self): rc, v, msg = _cephadm.check_subnet('wah') assert rc == 1 and msg def test_ip_in_subnet(self): # valid ip and only one valid subnet rc = _cephadm.ip_in_subnets('192.168.100.1', '192.168.100.0/24') assert rc is True # valid ip and valid subnets list without spaces rc = _cephadm.ip_in_subnets('192.168.100.1', '192.168.100.0/24,10.90.90.0/24') assert rc is True # valid ip and valid subnets list with spaces rc = _cephadm.ip_in_subnets('10.90.90.2', '192.168.1.0/24, 192.168.100.0/24, 10.90.90.0/24') assert rc is True # valid ip that doesn't belong to any subnet rc = _cephadm.ip_in_subnets('192.168.100.2', '192.168.50.0/24, 10.90.90.0/24') assert rc is False # valid ip that doesn't belong to the subnet (only 14 hosts) rc = _cephadm.ip_in_subnets('192.168.100.20', '192.168.100.0/28') assert rc is False # valid ip and valid IPV6 network rc = _cephadm.ip_in_subnets('fe80::5054:ff:fef4:873a', 'fe80::/64') assert rc is True # valid wrapped ip and valid IPV6 network rc = _cephadm.ip_in_subnets('[fe80::5054:ff:fef4:873a]', 'fe80::/64') assert rc is True # valid ip and that doesn't belong to IPV6 network rc = _cephadm.ip_in_subnets('fe80::5054:ff:fef4:873a', '2001:db8:85a3::/64') assert rc is False # invalid IPv4 and valid subnets list with pytest.raises(Exception): rc = _cephadm.ip_in_sublets('10.90.200.', '192.168.1.0/24, 192.168.100.0/24, 10.90.90.0/24') # invalid IPv6 and valid subnets list with pytest.raises(Exception): rc = _cephadm.ip_in_sublets('fe80:2030:31:24', 'fe80::/64') @pytest.mark.parametrize("conf", [ """[global] public_network='1.1.1.0/24,2.2.2.0/24' cluster_network="3.3.3.0/24, 4.4.4.0/24" """, """[global] public_network=" 1.1.1.0/24,2.2.2.0/24 " cluster_network=3.3.3.0/24, 4.4.4.0/24 """, """[global] public_network= 1.1.1.0/24, 2.2.2.0/24 cluster_network='3.3.3.0/24,4.4.4.0/24' """]) @mock.patch('cephadm.list_networks') @mock.patch('cephadm.logger') def test_get_networks_from_conf(self, _logger, _list_networks, conf, cephadm_fs): cephadm_fs.create_file('ceph.conf', contents=conf) _list_networks.return_value = {'1.1.1.0/24': {'eth0': ['1.1.1.1']}, '2.2.2.0/24': {'eth1': ['2.2.2.2']}, '3.3.3.0/24': {'eth2': ['3.3.3.3']}, '4.4.4.0/24': {'eth3': ['4.4.4.4']}} ctx = _cephadm.CephadmContext() ctx.config = 'ceph.conf' ctx.mon_ip = '1.1.1.1' ctx.cluster_network = None # what the cephadm module does with the public network string is # [x.strip() for x in out.split(',')] # so we must make sure our output, through that alteration, # generates correctly formatted networks def _str_to_networks(s): return [x.strip() for x in s.split(',')] public_network = _cephadm.get_public_net_from_cfg(ctx) assert _str_to_networks(public_network) == ['1.1.1.0/24', '2.2.2.0/24'] cluster_network, ipv6 = _cephadm.prepare_cluster_network(ctx) assert not ipv6 assert _str_to_networks(cluster_network) == ['3.3.3.0/24', '4.4.4.0/24'] class TestSysctl: @mock.patch('cephadm.sysctl_get') def test_filter_sysctl_settings(self, _sysctl_get): ctx = _cephadm.CephadmContext() input = [ # comment-only lines should be ignored "# just a comment", # As should whitespace-only lines", " \t ", " = \t ", # inline comments are stripped when querying "something = value # inline comment", "fs.aio-max-nr = 1048576", "kernel.pid_max = 4194304", "vm.lowmem_reserve_ratio = 256\t256\t32\t0\t0", " vm.max_map_count = 65530 ", " vm.max_map_count = 65530 ", ] _sysctl_get.side_effect = [ "value", "1", "4194304", "256\t256\t32\t0\t0", "65530", "something else", ] result = _cephadm.filter_sysctl_settings(ctx, input) assert len(_sysctl_get.call_args_list) == 6 assert _sysctl_get.call_args_list[0].args[1] == "something" assert _sysctl_get.call_args_list[1].args[1] == "fs.aio-max-nr" assert _sysctl_get.call_args_list[2].args[1] == "kernel.pid_max" assert _sysctl_get.call_args_list[3].args[1] == "vm.lowmem_reserve_ratio" assert _sysctl_get.call_args_list[4].args[1] == "vm.max_map_count" assert _sysctl_get.call_args_list[5].args[1] == "vm.max_map_count" assert result == [ "fs.aio-max-nr = 1048576", " vm.max_map_count = 65530 ", ] class TestJaeger: single_es_node_conf = { 'elasticsearch_nodes': 'http://192.168.0.1:9200'} multiple_es_nodes_conf = { 'elasticsearch_nodes': 'http://192.168.0.1:9200,http://192.168.0.2:9300'} agent_conf = { 'collector_nodes': 'test:14250'} def test_single_es(self, cephadm_fs): fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6' with with_cephadm_ctx(['--image=quay.io/jaegertracing/jaeger-collector:1.29'], list_networks={}) as ctx: import json ctx.config_json = json.dumps(self.single_es_node_conf) ctx.fsid = fsid c = _cephadm.get_container(ctx, fsid, 'jaeger-collector', 'daemon_id') _cephadm.create_daemon_dirs(ctx, fsid, 'jaeger-collector', 'daemon_id', 0, 0) _cephadm.deploy_daemon_units( ctx, fsid, 0, 0, 'jaeger-collector', 'daemon_id', c, True, True ) with open(f'/var/lib/ceph/{fsid}/jaeger-collector.daemon_id/unit.run', 'r') as f: run_cmd = f.readlines()[-1].rstrip() assert run_cmd.endswith('SPAN_STORAGE_TYPE=elasticsearch -e ES_SERVER_URLS=http://192.168.0.1:9200 quay.io/jaegertracing/jaeger-collector:1.29') def test_multiple_es(self, cephadm_fs): fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6' with with_cephadm_ctx(['--image=quay.io/jaegertracing/jaeger-collector:1.29'], list_networks={}) as ctx: import json ctx.config_json = json.dumps(self.multiple_es_nodes_conf) ctx.fsid = fsid c = _cephadm.get_container(ctx, fsid, 'jaeger-collector', 'daemon_id') _cephadm.create_daemon_dirs(ctx, fsid, 'jaeger-collector', 'daemon_id', 0, 0) _cephadm.deploy_daemon_units( ctx, fsid, 0, 0, 'jaeger-collector', 'daemon_id', c, True, True ) with open(f'/var/lib/ceph/{fsid}/jaeger-collector.daemon_id/unit.run', 'r') as f: run_cmd = f.readlines()[-1].rstrip() assert run_cmd.endswith('SPAN_STORAGE_TYPE=elasticsearch -e ES_SERVER_URLS=http://192.168.0.1:9200,http://192.168.0.2:9300 quay.io/jaegertracing/jaeger-collector:1.29') def test_jaeger_agent(self, cephadm_fs): fsid = 'ca734440-3dc6-11ec-9b98-5254002537a6' with with_cephadm_ctx(['--image=quay.io/jaegertracing/jaeger-agent:1.29'], list_networks={}) as ctx: import json ctx.config_json = json.dumps(self.agent_conf) ctx.fsid = fsid c = _cephadm.get_container(ctx, fsid, 'jaeger-agent', 'daemon_id') _cephadm.create_daemon_dirs(ctx, fsid, 'jaeger-agent', 'daemon_id', 0, 0) _cephadm.deploy_daemon_units( ctx, fsid, 0, 0, 'jaeger-agent', 'daemon_id', c, True, True ) with open(f'/var/lib/ceph/{fsid}/jaeger-agent.daemon_id/unit.run', 'r') as f: run_cmd = f.readlines()[-1].rstrip() assert run_cmd.endswith('quay.io/jaegertracing/jaeger-agent:1.29 --reporter.grpc.host-port=test:14250 --processor.jaeger-compact.server-host-port=6799') class TestRescan(fake_filesystem_unittest.TestCase): def setUp(self): self.setUpPyfakefs() if not fake_filesystem.is_root(): fake_filesystem.set_uid(0) self.fs.create_dir('/sys/class') self.ctx = _cephadm.CephadmContext() self.ctx.func = _cephadm.command_rescan_disks @mock.patch('cephadm.logger') def test_no_hbas(self, _logger): out = _cephadm.command_rescan_disks(self.ctx) assert out == 'Ok. No compatible HBAs found' @mock.patch('cephadm.logger') def test_success(self, _logger): self.fs.create_file('/sys/class/scsi_host/host0/scan') self.fs.create_file('/sys/class/scsi_host/host1/scan') out = _cephadm.command_rescan_disks(self.ctx) assert out.startswith('Ok. 2 adapters detected: 2 rescanned, 0 skipped, 0 failed') @mock.patch('cephadm.logger') def test_skip_usb_adapter(self, _logger): self.fs.create_file('/sys/class/scsi_host/host0/scan') self.fs.create_file('/sys/class/scsi_host/host1/scan') self.fs.create_file('/sys/class/scsi_host/host1/proc_name', contents='usb-storage') out = _cephadm.command_rescan_disks(self.ctx) assert out.startswith('Ok. 2 adapters detected: 1 rescanned, 1 skipped, 0 failed') @mock.patch('cephadm.logger') def test_skip_unknown_adapter(self, _logger): self.fs.create_file('/sys/class/scsi_host/host0/scan') self.fs.create_file('/sys/class/scsi_host/host1/scan') self.fs.create_file('/sys/class/scsi_host/host1/proc_name', contents='unknown') out = _cephadm.command_rescan_disks(self.ctx) assert out.startswith('Ok. 2 adapters detected: 1 rescanned, 1 skipped, 0 failed')
108,029
39.59752
870
py
null
ceph-main/src/cephadm/tests/test_container_engine.py
from unittest import mock import pytest from tests.fixtures import with_cephadm_ctx, import_cephadm _cephadm = import_cephadm() def test_container_engine(): with pytest.raises(NotImplementedError): _cephadm.ContainerEngine() class PhonyContainerEngine(_cephadm.ContainerEngine): EXE = "true" with mock.patch("cephadm.find_program") as find_program: find_program.return_value = "/usr/bin/true" pce = PhonyContainerEngine() assert str(pce) == "true (/usr/bin/true)" def test_podman(): with mock.patch("cephadm.find_program") as find_program: find_program.return_value = "/usr/bin/podman" pm = _cephadm.Podman() find_program.assert_called() with pytest.raises(RuntimeError): pm.version with mock.patch("cephadm.call_throws") as call_throws: call_throws.return_value = ("4.9.9", None, None) with with_cephadm_ctx([]) as ctx: pm.get_version(ctx) assert pm.version == (4, 9, 9) assert str(pm) == "podman (/usr/bin/podman) version 4.9.9" def test_podman_badversion(): with mock.patch("cephadm.find_program") as find_program: find_program.return_value = "/usr/bin/podman" pm = _cephadm.Podman() find_program.assert_called() with mock.patch("cephadm.call_throws") as call_throws: call_throws.return_value = ("4.10.beta2", None, None) with with_cephadm_ctx([]) as ctx: with pytest.raises(ValueError): pm.get_version(ctx) def test_docker(): with mock.patch("cephadm.find_program") as find_program: find_program.return_value = "/usr/bin/docker" docker = _cephadm.Docker() assert str(docker) == "docker (/usr/bin/docker)"
1,810
31.927273
66
py
null
ceph-main/src/cephadm/tests/test_enclosure.py
import pytest from unittest import mock from tests.fixtures import host_sysfs, import_cephadm _cephadm = import_cephadm() @pytest.fixture def enclosure(host_sysfs): e = _cephadm.Enclosure( enc_id='1', enc_path='/sys/class/scsi_generic/sg2/device/enclosure/0:0:1:0', dev_path='/sys/class/scsi_generic/sg2') yield e class TestEnclosure: def test_enc_metadata(self, enclosure): """Check metadata for the enclosure e.g. vendor and model""" assert enclosure.vendor == "EnclosuresInc" assert enclosure.components == '12' assert enclosure.model == "D12" assert enclosure.enc_id == '1' assert enclosure.ses_paths == ['sg2'] assert enclosure.path_count == 1 def test_enc_slots(self, enclosure): """Check slot count""" assert len(enclosure.slot_map) == 12 def test_enc_slot_format(self, enclosure): """Check the attributes of a slot are as expected""" assert all(k in ['fault', 'locate', 'serial', 'status'] for k, _v in enclosure.slot_map['0'].items()) def test_enc_slot_status(self, enclosure): """Check the number of occupied slots is correct""" occupied_slots = [slot_id for slot_id in enclosure.slot_map if enclosure.slot_map[slot_id].get('status').upper() == 'OK'] assert len(occupied_slots) == 6 def test_enc_disk_count(self, enclosure): """Check the disks found matches the slot info""" assert len(enclosure.device_lookup) == 6 assert enclosure.device_count == 6 def test_enc_device_serial(self, enclosure): """Check the device serial numbers are as expected""" assert all(fake_serial in enclosure.device_lookup.keys() for fake_serial in [ 'fake000', 'fake001', 'fake002', 'fake003', 'fake004', 'fake005']) def test_enc_slot_to_serial(self, enclosure): """Check serial number to slot matches across slot_map and device_lookup""" for serial, slot in enclosure.device_lookup.items(): assert enclosure.slot_map[slot].get('serial') == serial
2,318
30.767123
87
py
null
ceph-main/src/cephadm/tests/test_ingress.py
from unittest import mock import json import pytest from tests.fixtures import with_cephadm_ctx, cephadm_fs, import_cephadm _cephadm = import_cephadm() SAMPLE_UUID = "2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae" SAMPLE_HAPROXY_IMAGE = "registry.example.net/haproxy/haproxy:latest" SAMPLE_KEEPALIVED_IMAGE = "registry.example.net/keepalive/keepalived:latest" def good_haproxy_json(): return haproxy_json(files=True) def haproxy_json(**kwargs): if kwargs.get("files"): return { "files": { "haproxy.cfg": "", }, } return {} def good_keepalived_json(): return keepalived_json(files=True) def keepalived_json(**kwargs): if kwargs.get("files"): return { "files": { "keepalived.conf": "", }, } return {} @pytest.mark.parametrize( "args", # args: <fsid>, <daemon_id>, <config_json>, <image> [ # fail due to: invalid fsid (["foobar", "wilma", good_haproxy_json(), SAMPLE_HAPROXY_IMAGE]), # fail due to: invalid daemon_id ([SAMPLE_UUID, "", good_haproxy_json(), SAMPLE_HAPROXY_IMAGE]), # fail due to: invalid image ([SAMPLE_UUID, "wilma", good_haproxy_json(), ""]), # fail due to: no files in config_json ( [ SAMPLE_UUID, "wilma", haproxy_json(files=False), SAMPLE_HAPROXY_IMAGE, ] ), ], ) def test_haproxy_validation_errors(args): with pytest.raises(_cephadm.Error): with with_cephadm_ctx([]) as ctx: _cephadm.HAproxy(ctx, *args) def test_haproxy_init(): with with_cephadm_ctx([]) as ctx: ctx.config_json = json.dumps(good_haproxy_json()) ctx.image = SAMPLE_HAPROXY_IMAGE hap = _cephadm.HAproxy.init( ctx, SAMPLE_UUID, "wilma", ) assert hap.fsid == SAMPLE_UUID assert hap.daemon_id == "wilma" assert hap.image == SAMPLE_HAPROXY_IMAGE def test_haproxy_container_mounts(): with with_cephadm_ctx([]) as ctx: hap = _cephadm.HAproxy( ctx, SAMPLE_UUID, "wilma", good_haproxy_json(), SAMPLE_HAPROXY_IMAGE, ) cmounts = hap.get_container_mounts("/var/tmp") assert len(cmounts) == 1 assert cmounts["/var/tmp/haproxy"] == "/var/lib/haproxy" def test_haproxy_get_daemon_name(): with with_cephadm_ctx([]) as ctx: hap = _cephadm.HAproxy( ctx, SAMPLE_UUID, "wilma", good_haproxy_json(), SAMPLE_HAPROXY_IMAGE, ) assert hap.get_daemon_name() == "haproxy.wilma" def test_haproxy_get_container_name(): with with_cephadm_ctx([]) as ctx: hap = _cephadm.HAproxy( ctx, SAMPLE_UUID, "wilma", good_haproxy_json(), SAMPLE_HAPROXY_IMAGE, ) name1 = hap.get_container_name() assert ( name1 == "ceph-2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae-haproxy.wilma" ) name2 = hap.get_container_name(desc="extra") assert ( name2 == "ceph-2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae-haproxy.wilma-extra" ) def test_haproxy_get_daemon_args(): with with_cephadm_ctx([]) as ctx: hap = _cephadm.HAproxy( ctx, SAMPLE_UUID, "wilma", good_haproxy_json(), SAMPLE_HAPROXY_IMAGE, ) args = hap.get_daemon_args() assert args == ["haproxy", "-f", "/var/lib/haproxy/haproxy.cfg"] @mock.patch("cephadm.logger") def test_haproxy_create_daemon_dirs(_logger, cephadm_fs): with with_cephadm_ctx([]) as ctx: hap = _cephadm.HAproxy( ctx, SAMPLE_UUID, "wilma", good_haproxy_json(), SAMPLE_HAPROXY_IMAGE, ) with pytest.raises(OSError): hap.create_daemon_dirs("/var/tmp", 45, 54) cephadm_fs.create_dir("/var/tmp") hap.create_daemon_dirs("/var/tmp", 45, 54) # TODO: make assertions about the dirs created def test_haproxy_extract_uid_gid_haproxy(): with with_cephadm_ctx([]) as ctx: hap = _cephadm.HAproxy( ctx, SAMPLE_UUID, "wilma", good_haproxy_json(), SAMPLE_HAPROXY_IMAGE, ) with mock.patch("cephadm.CephContainer") as cc: cc.return_value.run.return_value = "500 500" uid, gid = hap.extract_uid_gid_haproxy() cc.return_value.run.assert_called() assert uid == 500 assert gid == 500 def test_haproxy_get_sysctl_settings(): with with_cephadm_ctx([]) as ctx: hap = _cephadm.HAproxy( ctx, SAMPLE_UUID, "wilma", good_haproxy_json(), SAMPLE_HAPROXY_IMAGE, ) ss = hap.get_sysctl_settings() assert len(ss) == 3 @pytest.mark.parametrize( "args", # args: <fsid>, <daemon_id>, <config_json>, <image> [ # fail due to: invalid fsid ( [ "foobar", "barney", good_keepalived_json(), SAMPLE_KEEPALIVED_IMAGE, ] ), # fail due to: invalid daemon_id ([SAMPLE_UUID, "", good_keepalived_json(), SAMPLE_KEEPALIVED_IMAGE]), # fail due to: invalid image ([SAMPLE_UUID, "barney", good_keepalived_json(), ""]), # fail due to: no files in config_json ( [ SAMPLE_UUID, "barney", keepalived_json(files=False), SAMPLE_KEEPALIVED_IMAGE, ] ), ], ) def test_keepalived_validation_errors(args): with pytest.raises(_cephadm.Error): with with_cephadm_ctx([]) as ctx: _cephadm.Keepalived(ctx, *args) def test_keepalived_init(): with with_cephadm_ctx([]) as ctx: ctx.config_json = json.dumps(good_keepalived_json()) ctx.image = SAMPLE_KEEPALIVED_IMAGE kad = _cephadm.Keepalived.init( ctx, SAMPLE_UUID, "barney", ) assert kad.fsid == SAMPLE_UUID assert kad.daemon_id == "barney" assert kad.image == SAMPLE_KEEPALIVED_IMAGE def test_keepalived_container_mounts(): with with_cephadm_ctx([]) as ctx: kad = _cephadm.Keepalived( ctx, SAMPLE_UUID, "barney", good_keepalived_json(), SAMPLE_KEEPALIVED_IMAGE, ) cmounts = kad.get_container_mounts("/var/tmp") assert len(cmounts) == 1 assert ( cmounts["/var/tmp/keepalived.conf"] == "/etc/keepalived/keepalived.conf" ) def test_keepalived_get_daemon_name(): with with_cephadm_ctx([]) as ctx: kad = _cephadm.Keepalived( ctx, SAMPLE_UUID, "barney", good_keepalived_json(), SAMPLE_KEEPALIVED_IMAGE, ) assert kad.get_daemon_name() == "keepalived.barney" def test_keepalived_get_container_name(): with with_cephadm_ctx([]) as ctx: kad = _cephadm.Keepalived( ctx, SAMPLE_UUID, "barney", good_keepalived_json(), SAMPLE_KEEPALIVED_IMAGE, ) name1 = kad.get_container_name() assert ( name1 == "ceph-2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae-keepalived.barney" ) name2 = kad.get_container_name(desc="extra") assert ( name2 == "ceph-2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae-keepalived.barney-extra" ) def test_keepalived_get_container_envs(): with with_cephadm_ctx([]) as ctx: kad = _cephadm.Keepalived( ctx, SAMPLE_UUID, "barney", good_keepalived_json(), SAMPLE_KEEPALIVED_IMAGE, ) args = kad.get_container_envs() assert args == [ "KEEPALIVED_AUTOCONF=false", "KEEPALIVED_CONF=/etc/keepalived/keepalived.conf", "KEEPALIVED_CMD=/usr/sbin/keepalived -n -l -f /etc/keepalived/keepalived.conf", "KEEPALIVED_DEBUG=false", ] @mock.patch("cephadm.logger") def test_keepalived_create_daemon_dirs(_logger, cephadm_fs): with with_cephadm_ctx([]) as ctx: kad = _cephadm.Keepalived( ctx, SAMPLE_UUID, "barney", good_keepalived_json(), SAMPLE_KEEPALIVED_IMAGE, ) with pytest.raises(OSError): kad.create_daemon_dirs("/var/tmp", 45, 54) cephadm_fs.create_dir("/var/tmp") kad.create_daemon_dirs("/var/tmp", 45, 54) # TODO: make assertions about the dirs created def test_keepalived_extract_uid_gid_keepalived(): with with_cephadm_ctx([]) as ctx: kad = _cephadm.Keepalived( ctx, SAMPLE_UUID, "barney", good_keepalived_json(), SAMPLE_KEEPALIVED_IMAGE, ) with mock.patch("cephadm.CephContainer") as cc: cc.return_value.run.return_value = "500 500" uid, gid = kad.extract_uid_gid_keepalived() cc.return_value.run.assert_called() assert uid == 500 assert gid == 500 def test_keepalived_get_sysctl_settings(): with with_cephadm_ctx([]) as ctx: kad = _cephadm.Keepalived( ctx, SAMPLE_UUID, "barney", good_keepalived_json(), SAMPLE_KEEPALIVED_IMAGE, ) ss = kad.get_sysctl_settings() assert len(ss) == 3
9,840
27.037037
91
py
null
ceph-main/src/cephadm/tests/test_networks.py
import json from textwrap import dedent from unittest import mock import pytest from tests.fixtures import with_cephadm_ctx, cephadm_fs, import_cephadm _cephadm = import_cephadm() class TestCommandListNetworks: @pytest.mark.parametrize("test_input, expected", [ ( dedent(""" default via 192.168.178.1 dev enxd89ef3f34260 proto dhcp metric 100 10.0.0.0/8 via 10.4.0.1 dev tun0 proto static metric 50 10.3.0.0/21 via 10.4.0.1 dev tun0 proto static metric 50 10.4.0.1 dev tun0 proto kernel scope link src 10.4.0.2 metric 50 137.1.0.0/16 via 10.4.0.1 dev tun0 proto static metric 50 138.1.0.0/16 via 10.4.0.1 dev tun0 proto static metric 50 139.1.0.0/16 via 10.4.0.1 dev tun0 proto static metric 50 140.1.0.0/17 via 10.4.0.1 dev tun0 proto static metric 50 141.1.0.0/16 via 10.4.0.1 dev tun0 proto static metric 50 172.16.100.34 via 172.16.100.34 dev eth1 proto kernel scope link src 172.16.100.34 192.168.122.1 dev ens3 proto dhcp scope link src 192.168.122.236 metric 100 169.254.0.0/16 dev docker0 scope link metric 1000 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 192.168.39.0/24 dev virbr1 proto kernel scope link src 192.168.39.1 linkdown 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 linkdown 192.168.178.0/24 dev enxd89ef3f34260 proto kernel scope link src 192.168.178.28 metric 100 192.168.178.1 dev enxd89ef3f34260 proto static scope link metric 100 195.135.221.12 via 192.168.178.1 dev enxd89ef3f34260 proto static metric 100 """), { '172.16.100.34/32': {'eth1': {'172.16.100.34'}}, '192.168.122.1/32': {'ens3': {'192.168.122.236'}}, '10.4.0.1/32': {'tun0': {'10.4.0.2'}}, '172.17.0.0/16': {'docker0': {'172.17.0.1'}}, '192.168.39.0/24': {'virbr1': {'192.168.39.1'}}, '192.168.122.0/24': {'virbr0': {'192.168.122.1'}}, '192.168.178.0/24': {'enxd89ef3f34260': {'192.168.178.28'}} } ), ( dedent(""" default via 10.3.64.1 dev eno1 proto static metric 100 10.3.64.0/24 dev eno1 proto kernel scope link src 10.3.64.23 metric 100 10.3.64.0/24 dev eno1 proto kernel scope link src 10.3.64.27 metric 100 10.88.0.0/16 dev cni-podman0 proto kernel scope link src 10.88.0.1 linkdown 172.21.0.0/20 via 172.21.3.189 dev tun0 172.21.1.0/20 via 172.21.3.189 dev tun0 172.21.2.1 via 172.21.3.189 dev tun0 172.21.3.1 dev tun0 proto kernel scope link src 172.21.3.2 172.21.4.0/24 via 172.21.3.1 dev tun0 172.21.5.0/24 via 172.21.3.1 dev tun0 172.21.6.0/24 via 172.21.3.1 dev tun0 172.21.7.0/24 via 172.21.3.1 dev tun0 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 linkdown 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 linkdown 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 linkdown 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 linkdown """), { '10.3.64.0/24': {'eno1': {'10.3.64.23', '10.3.64.27'}}, '10.88.0.0/16': {'cni-podman0': {'10.88.0.1'}}, '172.21.3.1/32': {'tun0': {'172.21.3.2'}}, '192.168.122.0/24': {'virbr0': {'192.168.122.1'}} } ), ]) def test_parse_ipv4_route(self, test_input, expected): assert _cephadm._parse_ipv4_route(test_input) == expected @pytest.mark.parametrize("test_routes, test_ips, expected", [ ( dedent(""" ::1 dev lo proto kernel metric 256 pref medium fe80::/64 dev eno1 proto kernel metric 100 pref medium fe80::/64 dev br-3d443496454c proto kernel metric 256 linkdown pref medium fe80::/64 dev tun0 proto kernel metric 256 pref medium fe80::/64 dev br-4355f5dbb528 proto kernel metric 256 pref medium fe80::/64 dev docker0 proto kernel metric 256 linkdown pref medium fe80::/64 dev cni-podman0 proto kernel metric 256 linkdown pref medium fe80::/64 dev veth88ba1e8 proto kernel metric 256 pref medium fe80::/64 dev vethb6e5fc7 proto kernel metric 256 pref medium fe80::/64 dev vethaddb245 proto kernel metric 256 pref medium fe80::/64 dev vethbd14d6b proto kernel metric 256 pref medium fe80::/64 dev veth13e8fd2 proto kernel metric 256 pref medium fe80::/64 dev veth1d3aa9e proto kernel metric 256 pref medium fe80::/64 dev vethe485ca9 proto kernel metric 256 pref medium """), dedent(""" 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 state UNKNOWN qlen 1000 inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: eno1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP qlen 1000 inet6 fe80::225:90ff:fee5:26e8/64 scope link noprefixroute valid_lft forever preferred_lft forever 6: br-3d443496454c: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 state DOWN inet6 fe80::42:23ff:fe9d:ee4/64 scope link valid_lft forever preferred_lft forever 7: br-4355f5dbb528: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP inet6 fe80::42:6eff:fe35:41fe/64 scope link valid_lft forever preferred_lft forever 8: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 state DOWN inet6 fe80::42:faff:fee6:40a0/64 scope link valid_lft forever preferred_lft forever 11: tun0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1500 state UNKNOWN qlen 100 inet6 fe80::98a6:733e:dafd:350/64 scope link stable-privacy valid_lft forever preferred_lft forever 28: cni-podman0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 state DOWN qlen 1000 inet6 fe80::3449:cbff:fe89:b87e/64 scope link valid_lft forever preferred_lft forever 31: vethaddb245@if30: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP inet6 fe80::90f7:3eff:feed:a6bb/64 scope link valid_lft forever preferred_lft forever 33: veth88ba1e8@if32: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP inet6 fe80::d:f5ff:fe73:8c82/64 scope link valid_lft forever preferred_lft forever 35: vethbd14d6b@if34: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP inet6 fe80::b44f:8ff:fe6f:813d/64 scope link valid_lft forever preferred_lft forever 37: vethb6e5fc7@if36: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP inet6 fe80::4869:c6ff:feaa:8afe/64 scope link valid_lft forever preferred_lft forever 39: veth13e8fd2@if38: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP inet6 fe80::78f4:71ff:fefe:eb40/64 scope link valid_lft forever preferred_lft forever 41: veth1d3aa9e@if40: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP inet6 fe80::24bd:88ff:fe28:5b18/64 scope link valid_lft forever preferred_lft forever 43: vethe485ca9@if42: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP inet6 fe80::6425:87ff:fe42:b9f0/64 scope link valid_lft forever preferred_lft forever """), { "fe80::/64": { "eno1": {"fe80::225:90ff:fee5:26e8"}, "br-3d443496454c": {"fe80::42:23ff:fe9d:ee4"}, "tun0": {"fe80::98a6:733e:dafd:350"}, "br-4355f5dbb528": {"fe80::42:6eff:fe35:41fe"}, "docker0": {"fe80::42:faff:fee6:40a0"}, "cni-podman0": {"fe80::3449:cbff:fe89:b87e"}, "veth88ba1e8": {"fe80::d:f5ff:fe73:8c82"}, "vethb6e5fc7": {"fe80::4869:c6ff:feaa:8afe"}, "vethaddb245": {"fe80::90f7:3eff:feed:a6bb"}, "vethbd14d6b": {"fe80::b44f:8ff:fe6f:813d"}, "veth13e8fd2": {"fe80::78f4:71ff:fefe:eb40"}, "veth1d3aa9e": {"fe80::24bd:88ff:fe28:5b18"}, "vethe485ca9": {"fe80::6425:87ff:fe42:b9f0"}, } } ), ( dedent(""" ::1 dev lo proto kernel metric 256 pref medium 2001:1458:301:eb::100:1a dev ens20f0 proto kernel metric 100 pref medium 2001:1458:301:eb::/64 dev ens20f0 proto ra metric 100 pref medium fd01:1458:304:5e::/64 dev ens20f0 proto ra metric 100 pref medium fe80::/64 dev ens20f0 proto kernel metric 100 pref medium default proto ra metric 100 nexthop via fe80::46ec:ce00:b8a0:d3c8 dev ens20f0 weight 1 nexthop via fe80::46ec:ce00:b8a2:33c8 dev ens20f0 weight 1 pref medium """), dedent(""" 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 state UNKNOWN qlen 1000 inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens20f0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP qlen 1000 inet6 2001:1458:301:eb::100:1a/128 scope global dynamic noprefixroute valid_lft 590879sec preferred_lft 590879sec inet6 fe80::2e60:cff:fef8:da41/64 scope link noprefixroute valid_lft forever preferred_lft forever inet6 fe80::2e60:cff:fef8:da41/64 scope link noprefixroute valid_lft forever preferred_lft forever inet6 fe80::2e60:cff:fef8:da41/64 scope link noprefixroute valid_lft forever preferred_lft forever """), { '2001:1458:301:eb::100:1a/128': { 'ens20f0': { '2001:1458:301:eb::100:1a' }, }, '2001:1458:301:eb::/64': { 'ens20f0': set(), }, 'fe80::/64': { 'ens20f0': {'fe80::2e60:cff:fef8:da41'}, }, 'fd01:1458:304:5e::/64': { 'ens20f0': set() }, } ), ( dedent(""" ::1 dev lo proto kernel metric 256 pref medium fe80::/64 dev ceph-brx proto kernel metric 256 pref medium fe80::/64 dev brx.0 proto kernel metric 256 pref medium default via fe80::327c:5e00:6487:71e0 dev enp3s0f1 proto ra metric 1024 expires 1790sec hoplimit 64 pref medium """), dedent(""" 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 state UNKNOWN qlen 1000 inet6 ::1/128 scope host valid_lft forever preferred_lft forever 5: enp3s0f1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP qlen 1000 inet6 fe80::ec4:7aff:fe8f:cb83/64 scope link noprefixroute valid_lft forever preferred_lft forever 6: ceph-brx: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP qlen 1000 inet6 fe80::d8a1:69ff:fede:8f58/64 scope link valid_lft forever preferred_lft forever 7: brx.0@eno1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 state UP qlen 1000 inet6 fe80::a4cb:54ff:fecc:f2a2/64 scope link valid_lft forever preferred_lft forever """), { 'fe80::/64': { 'brx.0': {'fe80::a4cb:54ff:fecc:f2a2'}, 'ceph-brx': {'fe80::d8a1:69ff:fede:8f58'} } } ), ]) def test_parse_ipv6_route(self, test_routes, test_ips, expected): assert _cephadm._parse_ipv6_route(test_routes, test_ips) == expected @mock.patch.object(_cephadm, 'call_throws', return_value=('10.4.0.1 dev tun0 proto kernel scope link src 10.4.0.2 metric 50\n', '', '')) def test_command_list_networks(self, cephadm_fs, capsys): with with_cephadm_ctx([]) as ctx: _cephadm.command_list_networks(ctx) assert json.loads(capsys.readouterr().out) == { '10.4.0.1/32': {'tun0': ['10.4.0.2']} }
12,884
54.064103
140
py
null
ceph-main/src/cephadm/tests/test_nfs.py
from unittest import mock import json import pytest from tests.fixtures import with_cephadm_ctx, cephadm_fs, import_cephadm _cephadm = import_cephadm() SAMPLE_UUID = "2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae" def good_nfs_json(): return nfs_json( pool=True, files=True, ) def nfs_json(**kwargs): result = {} if kwargs.get("pool"): result["pool"] = "party" if kwargs.get("files"): result["files"] = { "ganesha.conf": "", } if kwargs.get("rgw_content"): result["rgw"] = dict(kwargs["rgw_content"]) elif kwargs.get("rgw"): result["rgw"] = { "keyring": "foobar", "user": "jsmith", } return result @pytest.mark.parametrize( "args,kwargs", # args: <fsid>, <daemon_id>, <config_json>; kwargs: <image> [ # fail due to: invalid fsid (["foobar", "fred", good_nfs_json()], {}), # fail due to: invalid daemon_id ([SAMPLE_UUID, "", good_nfs_json()], {}), # fail due to: invalid image ( [SAMPLE_UUID, "fred", good_nfs_json()], {"image": ""}, ), # fail due to: no files in config_json ( [ SAMPLE_UUID, "fred", nfs_json(pool=True), ], {}, ), # fail due to: no pool in config_json ( [ SAMPLE_UUID, "fred", nfs_json(files=True), ], {}, ), # fail due to: bad rgw content ( [ SAMPLE_UUID, "fred", nfs_json(pool=True, files=True, rgw_content={"foo": True}), ], {}, ), # fail due to: rgw keyring given but no user ( [ SAMPLE_UUID, "fred", nfs_json( pool=True, files=True, rgw_content={"keyring": "foo"} ), ], {}, ), ], ) def test_nfsganesha_validation_errors(args, kwargs): with pytest.raises(_cephadm.Error): with with_cephadm_ctx([]) as ctx: _cephadm.NFSGanesha(ctx, *args, **kwargs) def test_nfsganesha_init(): with with_cephadm_ctx([]) as ctx: ctx.config_json = json.dumps(good_nfs_json()) ctx.image = "test_image" nfsg = _cephadm.NFSGanesha.init( ctx, SAMPLE_UUID, "fred", ) assert nfsg.fsid == SAMPLE_UUID assert nfsg.daemon_id == "fred" assert nfsg.pool == "party" def test_nfsganesha_container_mounts(): with with_cephadm_ctx([]) as ctx: nfsg = _cephadm.NFSGanesha( ctx, SAMPLE_UUID, "fred", good_nfs_json(), ) cmounts = nfsg.get_container_mounts("/var/tmp") assert len(cmounts) == 3 assert cmounts["/var/tmp/config"] == "/etc/ceph/ceph.conf:z" assert cmounts["/var/tmp/keyring"] == "/etc/ceph/keyring:z" assert cmounts["/var/tmp/etc/ganesha"] == "/etc/ganesha:z" with with_cephadm_ctx([]) as ctx: nfsg = _cephadm.NFSGanesha( ctx, SAMPLE_UUID, "fred", nfs_json(pool=True, files=True, rgw=True), ) cmounts = nfsg.get_container_mounts("/var/tmp") assert len(cmounts) == 4 assert cmounts["/var/tmp/config"] == "/etc/ceph/ceph.conf:z" assert cmounts["/var/tmp/keyring"] == "/etc/ceph/keyring:z" assert cmounts["/var/tmp/etc/ganesha"] == "/etc/ganesha:z" assert ( cmounts["/var/tmp/keyring.rgw"] == "/var/lib/ceph/radosgw/ceph-jsmith/keyring:z" ) def test_nfsganesha_container_envs(): with with_cephadm_ctx([]) as ctx: nfsg = _cephadm.NFSGanesha( ctx, SAMPLE_UUID, "fred", good_nfs_json(), ) envs = nfsg.get_container_envs() assert len(envs) == 1 assert envs[0] == "CEPH_CONF=/etc/ceph/ceph.conf" def test_nfsganesha_get_version(): with with_cephadm_ctx([]) as ctx: nfsg = _cephadm.NFSGanesha( ctx, SAMPLE_UUID, "fred", good_nfs_json(), ) with mock.patch("cephadm.call") as _call: _call.return_value = ("NFS-Ganesha Release = V100", "", 0) ver = nfsg.get_version(ctx, "fake_version") _call.assert_called() assert ver == "100" def test_nfsganesha_get_daemon_name(): with with_cephadm_ctx([]) as ctx: nfsg = _cephadm.NFSGanesha( ctx, SAMPLE_UUID, "fred", good_nfs_json(), ) assert nfsg.get_daemon_name() == "nfs.fred" def test_nfsganesha_get_container_name(): with with_cephadm_ctx([]) as ctx: nfsg = _cephadm.NFSGanesha( ctx, SAMPLE_UUID, "fred", good_nfs_json(), ) name1 = nfsg.get_container_name() assert name1 == "ceph-2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae-nfs.fred" name2 = nfsg.get_container_name(desc="extra") assert ( name2 == "ceph-2d018a3f-8a8f-4cb9-a7cf-48bebb2cbaae-nfs.fred-extra" ) def test_nfsganesha_get_daemon_args(): with with_cephadm_ctx([]) as ctx: nfsg = _cephadm.NFSGanesha( ctx, SAMPLE_UUID, "fred", good_nfs_json(), ) args = nfsg.get_daemon_args() assert args == ["-F", "-L", "STDERR"] @mock.patch("cephadm.logger") def test_nfsganesha_create_daemon_dirs(_logger, cephadm_fs): with with_cephadm_ctx([]) as ctx: nfsg = _cephadm.NFSGanesha( ctx, SAMPLE_UUID, "fred", good_nfs_json(), ) with pytest.raises(OSError): nfsg.create_daemon_dirs("/var/tmp", 45, 54) cephadm_fs.create_dir("/var/tmp") nfsg.create_daemon_dirs("/var/tmp", 45, 54) # TODO: make assertions about the dirs created @mock.patch("cephadm.logger") def test_nfsganesha_create_daemon_dirs_rgw(_logger, cephadm_fs): with with_cephadm_ctx([]) as ctx: nfsg = _cephadm.NFSGanesha( ctx, SAMPLE_UUID, "fred", nfs_json(pool=True, files=True, rgw=True), ) cephadm_fs.create_dir("/var/tmp") nfsg.create_daemon_dirs("/var/tmp", 45, 54) # TODO: make assertions about the dirs created
6,620
26.5875
79
py
null
ceph-main/src/cephadm/tests/test_util_funcs.py
# Tests for various assorted utility functions found within cephadm # from unittest import mock import functools import io import os import sys import pytest from tests.fixtures import with_cephadm_ctx, import_cephadm _cephadm = import_cephadm() class TestCopyTree: def _copy_tree(self, *args, **kwargs): with with_cephadm_ctx([]) as ctx: with mock.patch("cephadm.extract_uid_gid") as eug: eug.return_value = (os.getuid(), os.getgid()) _cephadm.copy_tree(ctx, *args, **kwargs) def test_one_dir(self, tmp_path): """Copy one dir into a non-existing dest dir.""" src1 = tmp_path / "src1" dst = tmp_path / "dst" src1.mkdir(parents=True) with (src1 / "foo.txt").open("w") as fh: fh.write("hello\n") fh.write("earth\n") assert not (dst / "foo.txt").exists() self._copy_tree([src1], dst) assert (dst / "foo.txt").exists() def test_one_existing_dir(self, tmp_path): """Copy one dir into an existing dest dir.""" src1 = tmp_path / "src1" dst = tmp_path / "dst" src1.mkdir(parents=True) dst.mkdir(parents=True) with (src1 / "foo.txt").open("w") as fh: fh.write("hello\n") fh.write("earth\n") assert not (dst / "src1").exists() self._copy_tree([src1], dst) assert (dst / "src1/foo.txt").exists() def test_two_dirs(self, tmp_path): """Copy two source directories into an existing dest dir.""" src1 = tmp_path / "src1" src2 = tmp_path / "src2" dst = tmp_path / "dst" src1.mkdir(parents=True) src2.mkdir(parents=True) dst.mkdir(parents=True) with (src1 / "foo.txt").open("w") as fh: fh.write("hello\n") fh.write("earth\n") with (src2 / "bar.txt").open("w") as fh: fh.write("goodbye\n") fh.write("mars\n") assert not (dst / "src1").exists() assert not (dst / "src2").exists() self._copy_tree([src1, src2], dst) assert (dst / "src1/foo.txt").exists() assert (dst / "src2/bar.txt").exists() def test_one_dir_set_uid(self, tmp_path): """Explicity pass uid/gid values and assert these are passed to chown.""" # Because this test will often be run by non-root users it is necessary # to mock os.chown or we too easily run into perms issues. src1 = tmp_path / "src1" dst = tmp_path / "dst" src1.mkdir(parents=True) with (src1 / "foo.txt").open("w") as fh: fh.write("hello\n") fh.write("earth\n") assert not (dst / "foo.txt").exists() with mock.patch("os.chown") as _chown: _chown.return_value = None self._copy_tree([src1], dst, uid=0, gid=0) assert len(_chown.mock_calls) >= 2 for c in _chown.mock_calls: assert c == mock.call(mock.ANY, 0, 0) assert (dst / "foo.txt").exists() class TestCopyFiles: def _copy_files(self, *args, **kwargs): with with_cephadm_ctx([]) as ctx: with mock.patch("cephadm.extract_uid_gid") as eug: eug.return_value = (os.getuid(), os.getgid()) _cephadm.copy_files(ctx, *args, **kwargs) def test_one_file(self, tmp_path): """Copy one file into the dest dir.""" file1 = tmp_path / "f1.txt" dst = tmp_path / "dst" dst.mkdir(parents=True) with file1.open("w") as fh: fh.write("its test time\n") self._copy_files([file1], dst) assert (dst / "f1.txt").exists() def test_one_file_nodest(self, tmp_path): """Copy one file to the given destination path.""" file1 = tmp_path / "f1.txt" dst = tmp_path / "dst" with file1.open("w") as fh: fh.write("its test time\n") self._copy_files([file1], dst) assert not dst.is_dir() assert dst.is_file() assert dst.open("r").read() == "its test time\n" def test_three_files(self, tmp_path): """Copy one file into the dest dir.""" file1 = tmp_path / "f1.txt" file2 = tmp_path / "f2.txt" file3 = tmp_path / "f3.txt" dst = tmp_path / "dst" dst.mkdir(parents=True) with file1.open("w") as fh: fh.write("its test time\n") with file2.open("w") as fh: fh.write("f2\n") with file3.open("w") as fh: fh.write("f3\n") self._copy_files([file1, file2, file3], dst) assert (dst / "f1.txt").exists() assert (dst / "f2.txt").exists() assert (dst / "f3.txt").exists() def test_three_files_nodest(self, tmp_path): """Copy files to dest path (not a dir). This is not a useful operation.""" file1 = tmp_path / "f1.txt" file2 = tmp_path / "f2.txt" file3 = tmp_path / "f3.txt" dst = tmp_path / "dst" with file1.open("w") as fh: fh.write("its test time\n") with file2.open("w") as fh: fh.write("f2\n") with file3.open("w") as fh: fh.write("f3\n") self._copy_files([file1, file2, file3], dst) assert not dst.is_dir() assert dst.is_file() assert dst.open("r").read() == "f3\n" def test_one_file_set_uid(self, tmp_path): """Explicity pass uid/gid values and assert these are passed to chown.""" # Because this test will often be run by non-root users it is necessary # to mock os.chown or we too easily run into perms issues. file1 = tmp_path / "f1.txt" dst = tmp_path / "dst" dst.mkdir(parents=True) with file1.open("w") as fh: fh.write("its test time\n") assert not (dst / "f1.txt").exists() with mock.patch("os.chown") as _chown: _chown.return_value = None self._copy_files([file1], dst, uid=0, gid=0) assert len(_chown.mock_calls) >= 1 for c in _chown.mock_calls: assert c == mock.call(mock.ANY, 0, 0) assert (dst / "f1.txt").exists() class TestMoveFiles: def _move_files(self, *args, **kwargs): with with_cephadm_ctx([]) as ctx: with mock.patch("cephadm.extract_uid_gid") as eug: eug.return_value = (os.getuid(), os.getgid()) _cephadm.move_files(ctx, *args, **kwargs) def test_one_file(self, tmp_path): """Move a named file to test dest path.""" file1 = tmp_path / "f1.txt" dst = tmp_path / "dst" with file1.open("w") as fh: fh.write("lets moove\n") assert not dst.exists() assert file1.is_file() self._move_files([file1], dst) assert dst.is_file() assert not file1.exists() def test_one_file_destdir(self, tmp_path): """Move a file into an existing dest dir.""" file1 = tmp_path / "f1.txt" dst = tmp_path / "dst" dst.mkdir(parents=True) with file1.open("w") as fh: fh.write("lets moove\n") assert not (dst / "f1.txt").exists() assert file1.is_file() self._move_files([file1], dst) assert (dst / "f1.txt").is_file() assert not file1.exists() def test_one_file_one_link(self, tmp_path): """Move a file and a symlink to that file to a dest dir.""" file1 = tmp_path / "f1.txt" link1 = tmp_path / "lnk" dst = tmp_path / "dst" dst.mkdir(parents=True) with file1.open("w") as fh: fh.write("lets moove\n") os.symlink("f1.txt", link1) assert not (dst / "f1.txt").exists() assert file1.is_file() assert link1.exists() self._move_files([file1, link1], dst) assert (dst / "f1.txt").is_file() assert (dst / "lnk").is_symlink() assert not file1.exists() assert not link1.exists() assert (dst / "f1.txt").open("r").read() == "lets moove\n" assert (dst / "lnk").open("r").read() == "lets moove\n" def test_one_file_set_uid(self, tmp_path): """Explicity pass uid/gid values and assert these are passed to chown.""" # Because this test will often be run by non-root users it is necessary # to mock os.chown or we too easily run into perms issues. file1 = tmp_path / "f1.txt" dst = tmp_path / "dst" with file1.open("w") as fh: fh.write("lets moove\n") assert not dst.exists() assert file1.is_file() with mock.patch("os.chown") as _chown: _chown.return_value = None self._move_files([file1], dst, uid=0, gid=0) assert len(_chown.mock_calls) >= 1 for c in _chown.mock_calls: assert c == mock.call(mock.ANY, 0, 0) assert dst.is_file() assert not file1.exists() def test_recursive_chown(tmp_path): d1 = tmp_path / "dir1" d2 = d1 / "dir2" f1 = d2 / "file1.txt" d2.mkdir(parents=True) with f1.open("w") as fh: fh.write("low down\n") with mock.patch("os.chown") as _chown: _chown.return_value = None _cephadm.recursive_chown(str(d1), uid=500, gid=500) assert len(_chown.mock_calls) == 3 assert _chown.mock_calls[0] == mock.call(str(d1), 500, 500) assert _chown.mock_calls[1] == mock.call(str(d2), 500, 500) assert _chown.mock_calls[2] == mock.call(str(f1), 500, 500) class TestFindExecutable: def test_standard_exe(self): # pretty much every system will have `true` on the path. It's a safe choice # for the first assertion exe = _cephadm.find_executable("true") assert exe.endswith("true") def test_custom_path(self, tmp_path): foo_sh = tmp_path / "foo.sh" with open(foo_sh, "w") as fh: fh.write("#!/bin/sh\n") fh.write("echo foo\n") foo_sh.chmod(0o755) exe = _cephadm.find_executable(foo_sh) assert str(exe) == str(foo_sh) def test_no_path(self, monkeypatch): monkeypatch.delenv("PATH") exe = _cephadm.find_executable("true") assert exe.endswith("true") def test_no_path_no_confstr(self, monkeypatch): def _fail(_): raise ValueError("fail") monkeypatch.delenv("PATH") monkeypatch.setattr("os.confstr", _fail) exe = _cephadm.find_executable("true") assert exe.endswith("true") def test_unset_path(self): exe = _cephadm.find_executable("true", path="") assert exe is None def test_no_such_exe(self): exe = _cephadm.find_executable("foo_bar-baz.noway") assert exe is None def test_find_program(): exe = _cephadm.find_program("true") assert exe.endswith("true") with pytest.raises(ValueError): _cephadm.find_program("foo_bar-baz.noway") def _mk_fake_call(enabled, active): def _fake_call(ctx, cmd, **kwargs): if "is-enabled" in cmd: if isinstance(enabled, Exception): raise enabled return enabled if "is-active" in cmd: if isinstance(active, Exception): raise active return active raise ValueError("should not get here") return _fake_call @pytest.mark.parametrize( "enabled_out, active_out, expected", [ ( # ok, all is well ("", "", 0), ("active", "", 0), (True, "running", True), ), ( # disabled, unknown if active ("disabled", "", 1), ("", "", 0), (False, "unknown", True), ), ( # is-enabled error (not disabled, unknown if active ("bleh", "", 1), ("", "", 0), (False, "unknown", False), ), ( # is-enabled ok, inactive is stopped ("", "", 0), ("inactive", "", 0), (True, "stopped", True), ), ( # is-enabled ok, failed is error ("", "", 0), ("failed", "", 0), (True, "error", True), ), ( # is-enabled ok, auto-restart is error ("", "", 0), ("auto-restart", "", 0), (True, "error", True), ), ( # error exec'ing is-enabled cmd ValueError("bonk"), ("active", "", 0), (False, "running", False), ), ( # error exec'ing is-enabled cmd ("", "", 0), ValueError("blat"), (True, "unknown", True), ), ], ) def test_check_unit(enabled_out, active_out, expected): with with_cephadm_ctx([]) as ctx: _cephadm.call.side_effect = _mk_fake_call( enabled=enabled_out, active=active_out, ) enabled, state, installed = _cephadm.check_unit(ctx, "foobar") assert (enabled, state, installed) == expected class FakeEnabler: def __init__(self, should_be_called): self._should_be_called = should_be_called self._services = [] def enable_service(self, service): self._services.append(service) def check_expected(self): if not self._should_be_called: assert not self._services return # there are currently seven chron/chrony type services that # cephadm looks for. Make sure it probed for each of them # or more in case someone adds to the list. assert len(self._services) >= 7 assert "chrony.service" in self._services assert "ntp.service" in self._services @pytest.mark.parametrize( "call_fn, enabler, expected", [ # Test that time sync services are not enabled ( _mk_fake_call( enabled=("", "", 1), active=("", "", 1), ), None, False, ), # Test that time sync service is enabled ( _mk_fake_call( enabled=("", "", 0), active=("active", "", 0), ), None, True, ), # Test that time sync is not enabled, and try to enable them. # This one needs to be not running, but installed in order to # call the enabler. It should call the enabler with every known # service name. ( _mk_fake_call( enabled=("disabled", "", 1), active=("", "", 1), ), FakeEnabler(True), False, ), # Test that time sync is enabled, with an enabler passed which # will check that the enabler was never called. ( _mk_fake_call( enabled=("", "", 0), active=("active", "", 0), ), FakeEnabler(False), True, ), ], ) def test_check_time_sync(call_fn, enabler, expected): """The check_time_sync call actually checks if a time synchronization service is enabled. It is also the only consumer of check_units. """ with with_cephadm_ctx([]) as ctx: _cephadm.call.side_effect = call_fn result = _cephadm.check_time_sync(ctx, enabler=enabler) assert result == expected if enabler is not None: enabler.check_expected() @pytest.mark.parametrize( "content, expected", [ ( """#JUNK FOO=1 """, (None, None, None), ), ( """# A sample from a real centos system NAME="CentOS Stream" VERSION="8" ID="centos" ID_LIKE="rhel fedora" VERSION_ID="8" PLATFORM_ID="platform:el8" PRETTY_NAME="CentOS Stream 8" ANSI_COLOR="0;31" CPE_NAME="cpe:/o:centos:centos:8" HOME_URL="https://centos.org/" BUG_REPORT_URL="https://bugzilla.redhat.com/" REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 8" REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" """, ("centos", "8", None), ), ( """# Minimal but complete, made up vals ID="hpec" VERSION_ID="33" VERSION_CODENAME="hpec nimda" """, ("hpec", "33", "hpec nimda"), ), ( """# Minimal but complete, no quotes ID=hpec VERSION_ID=33 VERSION_CODENAME=hpec nimda """, ("hpec", "33", "hpec nimda"), ), ], ) def test_get_distro(monkeypatch, content, expected): def _fake_open(*args, **kwargs): return io.StringIO(content) monkeypatch.setattr("builtins.open", _fake_open) assert _cephadm.get_distro() == expected class FakeContext: """FakeContext is a minimal type for passing as a ctx, when with_cephadm_ctx is not appropriate (it enables too many mocks, etc). """ timeout = 30 def _has_non_zero_exit(clog): assert any("Non-zero exit" in ll for _, _, ll in clog.record_tuples) def _has_values_somewhere(clog, values, non_zero=True): if non_zero: _has_non_zero_exit(clog) for value in values: assert any(value in ll for _, _, ll in clog.record_tuples) @pytest.mark.parametrize( "pyline, expected, call_kwargs, log_check", [ pytest.param( "import time; time.sleep(0.1)", ("", "", 0), {}, None, id="brief-sleep", ), pytest.param( "import sys; sys.exit(2)", ("", "", 2), {}, _has_non_zero_exit, id="exit-non-zero", ), pytest.param( "import sys; sys.exit(0)", ("", "", 0), {"desc": "success"}, None, id="success-with-desc", ), pytest.param( "print('foo'); print('bar')", ("foo\nbar\n", "", 0), {"desc": "stdout"}, None, id="stdout-print", ), pytest.param( "import sys; sys.stderr.write('la\\nla\\nla\\n')", ("", "la\nla\nla\n", 0), {"desc": "stderr"}, None, id="stderr-print", ), pytest.param( "for i in range(501): print(i, flush=True)", lambda r: r[2] == 0 and r[1] == "" and "500" in r[0].splitlines(), {}, None, id="stdout-long", ), pytest.param( "for i in range(1000000): print(i, flush=True)", lambda r: r[2] == 0 and r[1] == "" and len(r[0].splitlines()) == 1000000, {}, None, id="stdout-very-long", ), pytest.param( "import sys; sys.stderr.write('pow\\noof\\nouch\\n'); sys.exit(1)", ("", "pow\noof\nouch\n", 1), {"desc": "stderr"}, functools.partial( _has_values_somewhere, values=["pow", "oof", "ouch"], non_zero=True, ), id="stderr-logged-non-zero", ), pytest.param( "import time; time.sleep(4)", ("", "", 124), {"timeout": 1}, None, id="long-sleep", ), pytest.param( "import time\nfor i in range(100):\n\tprint(i, flush=True); time.sleep(0.01)", ("", "", 124), {"timeout": 0.5}, None, id="slow-print-timeout", ), # Commands that time out collect no logs, return empty std{out,err} strings ], ) def test_call(caplog, monkeypatch, pyline, expected, call_kwargs, log_check): import logging caplog.set_level(logging.INFO) monkeypatch.setattr("cephadm.logger", logging.getLogger()) ctx = FakeContext() result = _cephadm.call(ctx, [sys.executable, "-c", pyline], **call_kwargs) if callable(expected): assert expected(result) else: assert result == expected if callable(log_check): log_check(caplog) class TestWriteNew: def test_success(self, tmp_path): "Test the simple basic feature of writing a file." dest = tmp_path / "foo.txt" with _cephadm.write_new(dest) as fh: fh.write("something\n") fh.write("something else\n") with open(dest, "r") as fh: assert fh.read() == "something\nsomething else\n" def test_write_ower_mode(self, tmp_path): "Test that the owner and perms options function." dest = tmp_path / "foo.txt" # if this is test run as non-root, we can't really change ownership uid = os.getuid() gid = os.getgid() with _cephadm.write_new(dest, owner=(uid, gid), perms=0o600) as fh: fh.write("xomething\n") fh.write("xomething else\n") with open(dest, "r") as fh: assert fh.read() == "xomething\nxomething else\n" sr = os.fstat(fh.fileno()) assert sr.st_uid == uid assert sr.st_gid == gid assert (sr.st_mode & 0o777) == 0o600 def test_encoding(self, tmp_path): "Test that the encoding option functions." dest = tmp_path / "foo.txt" msg = "\u2603\u26C5\n" with _cephadm.write_new(dest, encoding='utf-8') as fh: fh.write(msg) with open(dest, "rb") as fh: b1 = fh.read() assert b1.decode('utf-8') == msg dest = tmp_path / "foo2.txt" with _cephadm.write_new(dest, encoding='utf-16le') as fh: fh.write(msg) with open(dest, "rb") as fh: b2 = fh.read() assert b2.decode('utf-16le') == msg # the binary data should differ due to the different encodings assert b1 != b2 def test_cleanup(self, tmp_path): "Test that an exception during write leaves no file behind." dest = tmp_path / "foo.txt" with pytest.raises(ValueError): with _cephadm.write_new(dest) as fh: fh.write("hello\n") raise ValueError("foo") fh.write("world\n") assert not dest.exists() assert not dest.with_name(dest.name+".new").exists() assert list(dest.parent.iterdir()) == [] class CompareContext1: cfg_data = { "name": "mane", "fsid": "foobar", "image": "fake.io/noway/nohow:gndn", "meta": { "fruit": "banana", "vegetable": "carrot", }, "params": { "osd_fsid": "robble", "tcp_ports": [404, 9999], }, "config_blobs": { "alpha": {"sloop": "John B"}, "beta": {"forest": "birch"}, "gamma": {"forest": "pine"}, }, } def check(self, ctx): assert ctx.name == 'mane' assert ctx.fsid == 'foobar' assert ctx.image == 'fake.io/noway/nohow:gndn' assert ctx.meta_properties == {"fruit": "banana", "vegetable": "carrot"} assert ctx.config_blobs == { "alpha": {"sloop": "John B"}, "beta": {"forest": "birch"}, "gamma": {"forest": "pine"}, } assert ctx.osd_fsid == "robble" assert ctx.tcp_ports == [404, 9999] class CompareContext2: cfg_data = { "name": "cc2", "fsid": "foobar", "meta": { "fruit": "banana", "vegetable": "carrot", }, "params": {}, "config_blobs": { "alpha": {"sloop": "John B"}, "beta": {"forest": "birch"}, "gamma": {"forest": "pine"}, }, } def check(self, ctx): assert ctx.name == 'cc2' assert ctx.fsid == 'foobar' assert ctx.image == 'quay.ceph.io/ceph-ci/ceph:main' assert ctx.meta_properties == {"fruit": "banana", "vegetable": "carrot"} assert ctx.config_blobs == { "alpha": {"sloop": "John B"}, "beta": {"forest": "birch"}, "gamma": {"forest": "pine"}, } assert ctx.osd_fsid is None assert ctx.tcp_ports is None @pytest.mark.parametrize( "cc", [ CompareContext1(), CompareContext2(), ], ) def test_apply_deploy_config_to_ctx(cc, monkeypatch): import logging monkeypatch.setattr("cephadm.logger", logging.getLogger()) ctx = FakeContext() _cephadm.apply_deploy_config_to_ctx(cc.cfg_data, ctx) cc.check(ctx)
24,599
29.407911
90
py
null
ceph-main/src/client/Client.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <[email protected]> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ // unix-ey fs stuff #include <unistd.h> #include <sys/types.h> #include <time.h> #include <utime.h> #include <string.h> #include <sys/stat.h> #include <sys/param.h> #include <fcntl.h> #include <sys/file.h> #ifndef _WIN32 #include <sys/utsname.h> #endif #include <sys/uio.h> #include <boost/lexical_cast.hpp> #include <boost/fusion/include/std_pair.hpp> #include "common/async/waiter.h" #if defined(__FreeBSD__) #define XATTR_CREATE 0x1 #define XATTR_REPLACE 0x2 #elif !defined(_WIN32) #include <sys/xattr.h> #endif #if defined(__linux__) #include <linux/falloc.h> #endif #include <sys/statvfs.h> #include "common/config.h" #include "common/version.h" #include "common/async/blocked_completion.h" #include "mon/MonClient.h" #include "messages/MClientCaps.h" #include "messages/MClientLease.h" #include "messages/MClientQuota.h" #include "messages/MClientReclaim.h" #include "messages/MClientReclaimReply.h" #include "messages/MClientReconnect.h" #include "messages/MClientReply.h" #include "messages/MClientRequest.h" #include "messages/MClientRequestForward.h" #include "messages/MClientSession.h" #include "messages/MClientSnap.h" #include "messages/MClientMetrics.h" #include "messages/MCommandReply.h" #include "messages/MFSMap.h" #include "messages/MFSMapUser.h" #include "messages/MMDSMap.h" #include "messages/MOSDMap.h" #include "mds/flock.h" #include "mds/cephfs_features.h" #include "mds/snap.h" #include "osd/OSDMap.h" #include "osdc/Filer.h" #include "common/Cond.h" #include "common/perf_counters.h" #include "common/admin_socket.h" #include "common/errno.h" #include "include/str_list.h" #define dout_subsys ceph_subsys_client #include "include/lru.h" #include "include/compat.h" #include "include/stringify.h" #include "include/random.h" #include "Client.h" #include "Inode.h" #include "Dentry.h" #include "Delegation.h" #include "Dir.h" #include "ClientSnapRealm.h" #include "Fh.h" #include "MetaSession.h" #include "MetaRequest.h" #include "ObjecterWriteback.h" #include "posix_acl.h" #include "include/ceph_assert.h" #include "include/stat.h" #include "include/cephfs/ceph_ll_client.h" #if HAVE_GETGROUPLIST #include <grp.h> #include <pwd.h> #include <unistd.h> #endif #undef dout_prefix #define dout_prefix *_dout << "client." << whoami << " " #define tout(cct) if (!cct->_conf->client_trace.empty()) traceout // FreeBSD fails to define this #ifndef O_DSYNC #define O_DSYNC 0x0 #endif // Darwin fails to define this #ifndef O_RSYNC #define O_RSYNC 0x0 #endif #ifndef O_DIRECT #define O_DIRECT 0x0 #endif // Windows doesn't define those values. While the Posix compatibilty layer // doesn't support those values, the Windows native functions do provide // similar flags. Special care should be taken if we're going to use those // flags in ceph-dokan. The current values are no-ops, while propagating // them to the rest of the code might cause the Windows functions to reject // them as invalid. #ifndef O_NOFOLLOW #define O_NOFOLLOW 0x0 #endif #ifndef O_SYNC #define O_SYNC 0x0 #endif #define DEBUG_GETATTR_CAPS (CEPH_CAP_XATTR_SHARED) #ifndef S_IXUGO #define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH) #endif using std::dec; using std::hex; using std::list; using std::oct; using std::pair; using std::string; using std::vector; using namespace TOPNSPC::common; namespace bs = boost::system; namespace ca = ceph::async; void client_flush_set_callback(void *p, ObjectCacher::ObjectSet *oset) { Client *client = static_cast<Client*>(p); client->flush_set_callback(oset); } bool Client::is_reserved_vino(vinodeno_t &vino) { if (MDS_IS_PRIVATE_INO(vino.ino)) { ldout(cct, -1) << __func__ << " attempt to access reserved inode number " << vino << dendl; return true; } return false; } // running average and standard deviation -- presented in // Donald Knuth's TAoCP, Volume II. double calc_average(double old_avg, double value, uint64_t count) { double new_avg; if (count == 1) { new_avg = value; } else { new_avg = old_avg + ((value - old_avg) / count); } return new_avg; } double calc_sq_sum(double old_sq_sum, double old_mean, double new_mean, double value, uint64_t count) { double new_sq_sum; if (count == 1) { new_sq_sum = 0.0; } else { new_sq_sum = old_sq_sum + (value - old_mean)*(value - new_mean); } return new_sq_sum; } // ------------- Client::CommandHook::CommandHook(Client *client) : m_client(client) { } int Client::CommandHook::call( std::string_view command, const cmdmap_t& cmdmap, const bufferlist&, Formatter *f, std::ostream& errss, bufferlist& out) { f->open_object_section("result"); { std::scoped_lock l{m_client->client_lock}; if (command == "mds_requests") m_client->dump_mds_requests(f); else if (command == "mds_sessions") { bool cap_dump = false; cmd_getval(cmdmap, "cap_dump", cap_dump); m_client->dump_mds_sessions(f, cap_dump); } else if (command == "dump_cache") m_client->dump_cache(f); else if (command == "kick_stale_sessions") m_client->_kick_stale_sessions(); else if (command == "status") m_client->dump_status(f); else ceph_abort_msg("bad command registered"); } f->close_section(); return 0; } // ------------- int Client::get_fd_inode(int fd, InodeRef *in) { int r = 0; if (fd == CEPHFS_AT_FDCWD) { *in = cwd; } else { Fh *f = get_filehandle(fd); if (!f) { r = -CEPHFS_EBADF; } else { *in = f->inode; } } return r; } dir_result_t::dir_result_t(Inode *in, const UserPerm& perms) : inode(in), offset(0), next_offset(2), release_count(0), ordered_count(0), cache_index(0), start_shared_gen(0), perms(perms) { } void Client::_reset_faked_inos() { ino_t start = 1024; free_faked_inos.clear(); free_faked_inos.insert(start, (uint32_t)-1 - start + 1); last_used_faked_ino = 0; last_used_faked_root = 0; #ifdef _WIN32 // On Windows, sizeof(ino_t) is just 2. Despite that, most "native" // Windows structures, including Dokan ones, are using 64B identifiers. _use_faked_inos = false; #else _use_faked_inos = sizeof(ino_t) < 8 || cct->_conf->client_use_faked_inos; #endif } void Client::_assign_faked_ino(Inode *in) { if (0 == last_used_faked_ino) last_used_faked_ino = last_used_faked_ino + 2048; // start(1024)~2048 reserved for _assign_faked_root interval_set<ino_t>::const_iterator it = free_faked_inos.lower_bound(last_used_faked_ino + 1); if (it == free_faked_inos.end() && last_used_faked_ino > 0) { last_used_faked_ino = 2048; it = free_faked_inos.lower_bound(last_used_faked_ino + 1); } ceph_assert(it != free_faked_inos.end()); if (last_used_faked_ino < it.get_start()) { ceph_assert(it.get_len() > 0); last_used_faked_ino = it.get_start(); } else { ++last_used_faked_ino; ceph_assert(it.get_start() + it.get_len() > last_used_faked_ino); } in->faked_ino = last_used_faked_ino; free_faked_inos.erase(in->faked_ino); faked_ino_map[in->faked_ino] = in->vino(); } /* * In the faked mode, if you export multiple subdirectories, * you will see that the inode numbers of the exported subdirectories * are the same. so we distinguish the mount point by reserving * the "fake ids" between "1024~2048" and combining the last * 10bits(0x3ff) of the "root inodes". */ void Client::_assign_faked_root(Inode *in) { interval_set<ino_t>::const_iterator it = free_faked_inos.lower_bound(last_used_faked_root + 1); if (it == free_faked_inos.end() && last_used_faked_root > 0) { last_used_faked_root = 0; it = free_faked_inos.lower_bound(last_used_faked_root + 1); } ceph_assert(it != free_faked_inos.end()); vinodeno_t inode_info = in->vino(); uint64_t inode_num = (uint64_t)inode_info.ino; ldout(cct, 10) << "inode_num " << inode_num << "inode_num & 0x3ff=" << (inode_num & 0x3ff)<< dendl; last_used_faked_root = it.get_start() + (inode_num & 0x3ff); // 0x3ff mask and get_start will not exceed 2048 ceph_assert(it.get_start() + it.get_len() > last_used_faked_root); in->faked_ino = last_used_faked_root; free_faked_inos.erase(in->faked_ino); faked_ino_map[in->faked_ino] = in->vino(); } void Client::_release_faked_ino(Inode *in) { free_faked_inos.insert(in->faked_ino); faked_ino_map.erase(in->faked_ino); } vinodeno_t Client::_map_faked_ino(ino_t ino) { vinodeno_t vino; if (ino == 1) vino = root->vino(); else if (faked_ino_map.count(ino)) vino = faked_ino_map[ino]; else vino = vinodeno_t(0, CEPH_NOSNAP); ldout(cct, 10) << __func__ << " " << ino << " -> " << vino << dendl; return vino; } vinodeno_t Client::map_faked_ino(ino_t ino) { std::scoped_lock lock(client_lock); return _map_faked_ino(ino); } // cons/des Client::Client(Messenger *m, MonClient *mc, Objecter *objecter_) : Dispatcher(m->cct->get()), timer(m->cct, timer_lock, false), messenger(m), monclient(mc), objecter(objecter_), whoami(mc->get_global_id()), mount_state(CLIENT_UNMOUNTED, "Client::mountstate_lock"), initialize_state(CLIENT_NEW, "Client::initstate_lock"), cct_deleter{m->cct, [](CephContext *p) {p->put();}}, async_ino_invalidator(m->cct), async_dentry_invalidator(m->cct), interrupt_finisher(m->cct), remount_finisher(m->cct), async_ino_releasor(m->cct), objecter_finisher(m->cct), m_command_hook(this), fscid(0) { _reset_faked_inos(); user_id = cct->_conf->client_mount_uid; group_id = cct->_conf->client_mount_gid; fuse_default_permissions = cct->_conf.get_val<bool>( "fuse_default_permissions"); _collect_and_send_global_metrics = cct->_conf.get_val<bool>( "client_collect_and_send_global_metrics"); mount_timeout = cct->_conf.get_val<std::chrono::seconds>( "client_mount_timeout"); caps_release_delay = cct->_conf.get_val<std::chrono::seconds>( "client_caps_release_delay"); if (cct->_conf->client_acl_type == "posix_acl") acl_type = POSIX_ACL; lru.lru_set_midpoint(cct->_conf->client_cache_mid); // file handles free_fd_set.insert(10, 1<<30); mdsmap.reset(new MDSMap); // osd interfaces writeback_handler.reset(new ObjecterWriteback(objecter, &objecter_finisher, &client_lock)); objectcacher.reset(new ObjectCacher(cct, "libcephfs", *writeback_handler, client_lock, client_flush_set_callback, // all commit callback (void*)this, cct->_conf->client_oc_size, cct->_conf->client_oc_max_objects, cct->_conf->client_oc_max_dirty, cct->_conf->client_oc_target_dirty, cct->_conf->client_oc_max_dirty_age, true)); } Client::~Client() { ceph_assert(ceph_mutex_is_not_locked(client_lock)); // If the task is crashed or aborted and doesn't // get any chance to run the umount and shutdow. { std::scoped_lock l{client_lock}; tick_thread_stopped = true; upkeep_cond.notify_one(); } if (upkeeper.joinable()) upkeeper.join(); // It is necessary to hold client_lock, because any inode destruction // may call into ObjectCacher, which asserts that it's lock (which is // client_lock) is held. std::scoped_lock l{client_lock}; tear_down_cache(); } void Client::tear_down_cache() { // fd's for (auto &[fd, fh] : fd_map) { ldout(cct, 1) << __func__ << " forcing close of fh " << fd << " ino " << fh->inode->ino << dendl; _release_fh(fh); } fd_map.clear(); while (!opened_dirs.empty()) { dir_result_t *dirp = *opened_dirs.begin(); ldout(cct, 1) << __func__ << " forcing close of dir " << dirp << " ino " << dirp->inode->ino << dendl; _closedir(dirp); } // caps! // *** FIXME *** // empty lru trim_cache(); ceph_assert(lru.lru_get_size() == 0); // close root ino ceph_assert(inode_map.size() <= 1 + root_parents.size()); if (root && inode_map.size() == 1 + root_parents.size()) { root.reset(); } ceph_assert(inode_map.empty()); } inodeno_t Client::get_root_ino() { std::scoped_lock l(client_lock); if (use_faked_inos()) return root->faked_ino; else return root->ino; } Inode *Client::get_root() { std::scoped_lock l(client_lock); root->ll_get(); return root.get(); } // debug crapola void Client::dump_inode(Formatter *f, Inode *in, set<Inode*>& did, bool disconnected) { filepath path; in->make_long_path(path); ldout(cct, 1) << "dump_inode: " << (disconnected ? "DISCONNECTED ":"") << "inode " << in->ino << " " << path << " ref " << in->get_nref() << " " << *in << dendl; if (f) { f->open_object_section("inode"); f->dump_stream("path") << path; if (disconnected) f->dump_int("disconnected", 1); in->dump(f); f->close_section(); } did.insert(in); if (in->dir) { ldout(cct, 1) << " dir " << in->dir << " size " << in->dir->dentries.size() << dendl; for (ceph::unordered_map<string, Dentry*>::iterator it = in->dir->dentries.begin(); it != in->dir->dentries.end(); ++it) { ldout(cct, 1) << " " << in->ino << " dn " << it->first << " " << it->second << " ref " << it->second->ref << dendl; if (f) { f->open_object_section("dentry"); it->second->dump(f); f->close_section(); } if (it->second->inode) dump_inode(f, it->second->inode.get(), did, false); } } } void Client::dump_cache(Formatter *f) { set<Inode*> did; ldout(cct, 1) << __func__ << dendl; if (f) f->open_array_section("cache"); if (root) dump_inode(f, root.get(), did, true); // make a second pass to catch anything disconnected for (ceph::unordered_map<vinodeno_t, Inode*>::iterator it = inode_map.begin(); it != inode_map.end(); ++it) { if (did.count(it->second)) continue; dump_inode(f, it->second, did, true); } if (f) f->close_section(); } void Client::dump_status(Formatter *f) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); ldout(cct, 1) << __func__ << dendl; const epoch_t osd_epoch = objecter->with_osdmap(std::mem_fn(&OSDMap::get_epoch)); if (f) { f->open_object_section("metadata"); for (const auto& kv : metadata) f->dump_string(kv.first.c_str(), kv.second); f->close_section(); f->dump_int("dentry_count", lru.lru_get_size()); f->dump_int("dentry_pinned_count", lru.lru_get_num_pinned()); f->dump_int("id", get_nodeid().v); entity_inst_t inst(messenger->get_myname(), messenger->get_myaddr_legacy()); f->dump_object("inst", inst); f->dump_object("addr", inst.addr); f->dump_stream("inst_str") << inst.name << " " << inst.addr.get_legacy_str(); f->dump_string("addr_str", inst.addr.get_legacy_str()); f->dump_int("inode_count", inode_map.size()); f->dump_int("mds_epoch", mdsmap->get_epoch()); f->dump_int("osd_epoch", osd_epoch); f->dump_int("osd_epoch_barrier", cap_epoch_barrier); f->dump_bool("blocklisted", blocklisted); f->dump_string("fs_name", mdsmap->get_fs_name()); } } void Client::_pre_init() { timer.init(); objecter_finisher.start(); filer.reset(new Filer(objecter, &objecter_finisher)); objectcacher->start(); } int Client::init() { RWRef_t iref_writer(initialize_state, CLIENT_INITIALIZING, false); ceph_assert(iref_writer.is_first_writer()); _pre_init(); { std::scoped_lock l{client_lock}; messenger->add_dispatcher_tail(this); } _finish_init(); iref_writer.update_state(CLIENT_INITIALIZED); return 0; } void Client::_finish_init() { { std::scoped_lock l{client_lock}; // logger PerfCountersBuilder plb(cct, "client", l_c_first, l_c_last); plb.add_time_avg(l_c_reply, "reply", "Latency of receiving a reply on metadata request"); plb.add_time_avg(l_c_lat, "lat", "Latency of processing a metadata request"); plb.add_time_avg(l_c_wrlat, "wrlat", "Latency of a file data write operation"); plb.add_time_avg(l_c_read, "rdlat", "Latency of a file data read operation"); plb.add_time_avg(l_c_fsync, "fsync", "Latency of a file sync operation"); // average, standard deviation mds/r/w/ latencies plb.add_time(l_c_md_avg, "mdavg", "Average latency for processing metadata requests"); plb.add_u64(l_c_md_sqsum, "mdsqsum", "Sum of squares (to calculate variability/stdev) for metadata requests"); plb.add_u64(l_c_md_ops, "mdops", "Total metadata IO operations"); plb.add_time(l_c_rd_avg, "readavg", "Average latency for processing read requests"); plb.add_u64(l_c_rd_sqsum, "readsqsum", "Sum of squares ((to calculate variability/stdev) for read requests"); plb.add_u64(l_c_rd_ops, "rdops", "Total read IO operations"); plb.add_time(l_c_wr_avg, "writeavg", "Average latency for processing write requests"); plb.add_u64(l_c_wr_sqsum, "writesqsum", "Sum of squares ((to calculate variability/stdev) for write requests"); plb.add_u64(l_c_wr_ops, "rdops", "Total write IO operations"); logger.reset(plb.create_perf_counters()); cct->get_perfcounters_collection()->add(logger.get()); } cct->_conf.add_observer(this); AdminSocket* admin_socket = cct->get_admin_socket(); int ret = admin_socket->register_command("mds_requests", &m_command_hook, "show in-progress mds requests"); if (ret < 0) { lderr(cct) << "error registering admin socket command: " << cpp_strerror(-ret) << dendl; } ret = admin_socket->register_command("mds_sessions " "name=cap_dump,type=CephBool,req=false", &m_command_hook, "show mds session state"); if (ret < 0) { lderr(cct) << "error registering admin socket command: " << cpp_strerror(-ret) << dendl; } ret = admin_socket->register_command("dump_cache", &m_command_hook, "show in-memory metadata cache contents"); if (ret < 0) { lderr(cct) << "error registering admin socket command: " << cpp_strerror(-ret) << dendl; } ret = admin_socket->register_command("kick_stale_sessions", &m_command_hook, "kick sessions that were remote reset"); if (ret < 0) { lderr(cct) << "error registering admin socket command: " << cpp_strerror(-ret) << dendl; } ret = admin_socket->register_command("status", &m_command_hook, "show overall client status"); if (ret < 0) { lderr(cct) << "error registering admin socket command: " << cpp_strerror(-ret) << dendl; } } void Client::shutdown() { ldout(cct, 1) << __func__ << dendl; // If we were not mounted, but were being used for sending // MDS commands, we may have sessions that need closing. { std::scoped_lock l{client_lock}; // To make sure the tick thread will be stoppped before // destructing the Client, just in case like the _mount() // failed but didn't not get a chance to stop the tick // thread tick_thread_stopped = true; upkeep_cond.notify_one(); _close_sessions(); } cct->_conf.remove_observer(this); cct->get_admin_socket()->unregister_commands(&m_command_hook); if (ino_invalidate_cb) { ldout(cct, 10) << "shutdown stopping cache invalidator finisher" << dendl; async_ino_invalidator.wait_for_empty(); async_ino_invalidator.stop(); } if (dentry_invalidate_cb) { ldout(cct, 10) << "shutdown stopping dentry invalidator finisher" << dendl; async_dentry_invalidator.wait_for_empty(); async_dentry_invalidator.stop(); } if (switch_interrupt_cb) { ldout(cct, 10) << "shutdown stopping interrupt finisher" << dendl; interrupt_finisher.wait_for_empty(); interrupt_finisher.stop(); } if (remount_cb) { ldout(cct, 10) << "shutdown stopping remount finisher" << dendl; remount_finisher.wait_for_empty(); remount_finisher.stop(); } if (ino_release_cb) { ldout(cct, 10) << "shutdown stopping inode release finisher" << dendl; async_ino_releasor.wait_for_empty(); async_ino_releasor.stop(); } objectcacher->stop(); // outside of client_lock! this does a join. /* * We are shuting down the client. * * Just declare the state to CLIENT_NEW to block and fail any * new comming "reader" and then try to wait all the in-flight * "readers" to finish. */ RWRef_t iref_writer(initialize_state, CLIENT_NEW, false); if (!iref_writer.is_first_writer()) return; iref_writer.wait_readers_done(); { std::scoped_lock l(timer_lock); timer.shutdown(); } objecter_finisher.wait_for_empty(); objecter_finisher.stop(); if (logger) { cct->get_perfcounters_collection()->remove(logger.get()); logger.reset(); } } void Client::update_io_stat_metadata(utime_t latency) { auto lat_nsec = latency.to_nsec(); // old values are used to compute new ones auto o_avg = logger->tget(l_c_md_avg).to_nsec(); auto o_sqsum = logger->get(l_c_md_sqsum); auto n_avg = calc_average(o_avg, lat_nsec, nr_metadata_request); auto n_sqsum = calc_sq_sum(o_sqsum, o_avg, n_avg, lat_nsec, nr_metadata_request); logger->tinc(l_c_lat, latency); logger->tinc(l_c_reply, latency); utime_t avg; avg.set_from_double(n_avg / 1000000000); logger->tset(l_c_md_avg, avg); logger->set(l_c_md_sqsum, n_sqsum); logger->set(l_c_md_ops, nr_metadata_request); } void Client::update_io_stat_read(utime_t latency) { auto lat_nsec = latency.to_nsec(); // old values are used to compute new ones auto o_avg = logger->tget(l_c_rd_avg).to_nsec(); auto o_sqsum = logger->get(l_c_rd_sqsum); auto n_avg = calc_average(o_avg, lat_nsec, nr_read_request); auto n_sqsum = calc_sq_sum(o_sqsum, o_avg, n_avg, lat_nsec, nr_read_request); logger->tinc(l_c_read, latency); utime_t avg; avg.set_from_double(n_avg / 1000000000); logger->tset(l_c_rd_avg, avg); logger->set(l_c_rd_sqsum, n_sqsum); logger->set(l_c_rd_ops, nr_read_request); } void Client::update_io_stat_write(utime_t latency) { auto lat_nsec = latency.to_nsec(); // old values are used to compute new ones auto o_avg = logger->tget(l_c_wr_avg).to_nsec(); auto o_sqsum = logger->get(l_c_wr_sqsum); auto n_avg = calc_average(o_avg, lat_nsec, nr_write_request); auto n_sqsum = calc_sq_sum(o_sqsum, o_avg, n_avg, lat_nsec, nr_write_request); logger->tinc(l_c_wrlat, latency); utime_t avg; avg.set_from_double(n_avg / 1000000000); logger->tset(l_c_wr_avg, avg); logger->set(l_c_wr_sqsum, n_sqsum); logger->set(l_c_wr_ops, nr_write_request); } // =================== // metadata cache stuff void Client::trim_cache(bool trim_kernel_dcache) { uint64_t max = cct->_conf->client_cache_size; ldout(cct, 20) << "trim_cache size " << lru.lru_get_size() << " max " << max << dendl; unsigned last = 0; while (lru.lru_get_size() != last) { last = lru.lru_get_size(); if (!is_unmounting() && lru.lru_get_size() <= max) break; // trim! Dentry *dn = static_cast<Dentry*>(lru.lru_get_next_expire()); if (!dn) break; // done trim_dentry(dn); } if (trim_kernel_dcache && lru.lru_get_size() > max) _invalidate_kernel_dcache(); // hose root? if (lru.lru_get_size() == 0 && root && root->get_nref() == 1 && inode_map.size() == 1 + root_parents.size()) { ldout(cct, 15) << "trim_cache trimmed root " << root << dendl; root.reset(); } } void Client::trim_cache_for_reconnect(MetaSession *s) { mds_rank_t mds = s->mds_num; ldout(cct, 20) << __func__ << " mds." << mds << dendl; int trimmed = 0; list<Dentry*> skipped; while (lru.lru_get_size() > 0) { Dentry *dn = static_cast<Dentry*>(lru.lru_expire()); if (!dn) break; if ((dn->inode && dn->inode->caps.count(mds)) || dn->dir->parent_inode->caps.count(mds)) { trim_dentry(dn); trimmed++; } else skipped.push_back(dn); } for(list<Dentry*>::iterator p = skipped.begin(); p != skipped.end(); ++p) lru.lru_insert_mid(*p); ldout(cct, 20) << __func__ << " mds." << mds << " trimmed " << trimmed << " dentries" << dendl; if (s->caps.size() > 0) _invalidate_kernel_dcache(); } void Client::trim_dentry(Dentry *dn) { ldout(cct, 15) << "trim_dentry unlinking dn " << dn->name << " in dir " << std::hex << dn->dir->parent_inode->ino << std::dec << dendl; if (dn->inode) { Inode *diri = dn->dir->parent_inode; clear_dir_complete_and_ordered(diri, true); } unlink(dn, false, false); // drop dir, drop dentry } void Client::update_inode_file_size(Inode *in, int issued, uint64_t size, uint64_t truncate_seq, uint64_t truncate_size) { uint64_t prior_size = in->size; if (truncate_seq > in->truncate_seq || (truncate_seq == in->truncate_seq && size > in->size)) { ldout(cct, 10) << "size " << in->size << " -> " << size << dendl; in->size = size; in->reported_size = size; if (truncate_seq != in->truncate_seq) { ldout(cct, 10) << "truncate_seq " << in->truncate_seq << " -> " << truncate_seq << dendl; in->truncate_seq = truncate_seq; in->oset.truncate_seq = truncate_seq; // truncate cached file data if (prior_size > size) { _invalidate_inode_cache(in, size, prior_size - size); } } // truncate inline data if (in->inline_version < CEPH_INLINE_NONE) { uint32_t len = in->inline_data.length(); if (size < len) in->inline_data.splice(size, len - size); } } if (truncate_seq >= in->truncate_seq && in->truncate_size != truncate_size) { if (in->is_file()) { ldout(cct, 10) << "truncate_size " << in->truncate_size << " -> " << truncate_size << dendl; in->truncate_size = truncate_size; in->oset.truncate_size = truncate_size; } else { ldout(cct, 0) << "Hmmm, truncate_seq && truncate_size changed on non-file inode!" << dendl; } } } void Client::update_inode_file_time(Inode *in, int issued, uint64_t time_warp_seq, utime_t ctime, utime_t mtime, utime_t atime) { ldout(cct, 10) << __func__ << " " << *in << " " << ccap_string(issued) << " ctime " << ctime << " mtime " << mtime << dendl; if (time_warp_seq > in->time_warp_seq) ldout(cct, 10) << " mds time_warp_seq " << time_warp_seq << " is higher than local time_warp_seq " << in->time_warp_seq << dendl; int warn = false; // be careful with size, mtime, atime if (issued & (CEPH_CAP_FILE_EXCL| CEPH_CAP_FILE_WR| CEPH_CAP_FILE_BUFFER| CEPH_CAP_AUTH_EXCL| CEPH_CAP_XATTR_EXCL)) { ldout(cct, 30) << "Yay have enough caps to look at our times" << dendl; if (ctime > in->ctime) in->ctime = ctime; if (time_warp_seq > in->time_warp_seq) { //the mds updated times, so take those! in->mtime = mtime; in->atime = atime; in->time_warp_seq = time_warp_seq; } else if (time_warp_seq == in->time_warp_seq) { //take max times if (mtime > in->mtime) in->mtime = mtime; if (atime > in->atime) in->atime = atime; } else if (issued & CEPH_CAP_FILE_EXCL) { //ignore mds values as we have a higher seq } else warn = true; } else { ldout(cct, 30) << "Don't have enough caps, just taking mds' time values" << dendl; if (time_warp_seq >= in->time_warp_seq) { in->ctime = ctime; in->mtime = mtime; in->atime = atime; in->time_warp_seq = time_warp_seq; } else warn = true; } if (warn) { ldout(cct, 0) << "WARNING: " << *in << " mds time_warp_seq " << time_warp_seq << " is lower than local time_warp_seq " << in->time_warp_seq << dendl; } } void Client::_fragmap_remove_non_leaves(Inode *in) { for (map<frag_t,int>::iterator p = in->fragmap.begin(); p != in->fragmap.end(); ) if (!in->dirfragtree.is_leaf(p->first)) in->fragmap.erase(p++); else ++p; } void Client::_fragmap_remove_stopped_mds(Inode *in, mds_rank_t mds) { for (auto p = in->fragmap.begin(); p != in->fragmap.end(); ) if (p->second == mds) in->fragmap.erase(p++); else ++p; } Inode * Client::add_update_inode(InodeStat *st, utime_t from, MetaSession *session, const UserPerm& request_perms) { Inode *in; bool was_new = false; if (inode_map.count(st->vino)) { in = inode_map[st->vino]; ldout(cct, 12) << __func__ << " had " << *in << " caps " << ccap_string(st->cap.caps) << dendl; } else { in = new Inode(this, st->vino, &st->layout); inode_map[st->vino] = in; if (use_faked_inos()) _assign_faked_ino(in); if (!root) { root = in; if (use_faked_inos()) _assign_faked_root(root.get()); root_ancestor = in; cwd = root; } else if (is_mounting()) { root_parents[root_ancestor] = in; root_ancestor = in; } // immutable bits in->ino = st->vino.ino; in->snapid = st->vino.snapid; in->mode = st->mode & S_IFMT; was_new = true; } in->rdev = st->rdev; if (in->is_symlink()) in->symlink = st->symlink; // only update inode if mds info is strictly newer, or it is the same and projected (odd). bool new_version = false; if (in->version == 0 || ((st->cap.flags & CEPH_CAP_FLAG_AUTH) && (in->version & ~1) < st->version)) new_version = true; int issued; in->caps_issued(&issued); issued |= in->caps_dirty(); int new_issued = ~issued & (int)st->cap.caps; bool need_snapdir_attr_refresh = false; if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) && !(issued & CEPH_CAP_AUTH_EXCL)) { in->mode = st->mode; in->uid = st->uid; in->gid = st->gid; in->btime = st->btime; in->snap_btime = st->snap_btime; in->snap_metadata = st->snap_metadata; in->fscrypt_auth = st->fscrypt_auth; need_snapdir_attr_refresh = true; } if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) && !(issued & CEPH_CAP_LINK_EXCL)) { in->nlink = st->nlink; } if (new_version || (new_issued & CEPH_CAP_ANY_RD)) { need_snapdir_attr_refresh = true; update_inode_file_time(in, issued, st->time_warp_seq, st->ctime, st->mtime, st->atime); } if (new_version || (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) { in->layout = st->layout; in->fscrypt_file = st->fscrypt_file; update_inode_file_size(in, issued, st->size, st->truncate_seq, st->truncate_size); } if (in->is_dir()) { if (new_version || (new_issued & CEPH_CAP_FILE_SHARED)) { in->dirstat = st->dirstat; } // dir_layout/rstat/quota are not tracked by capability, update them only if // the inode stat is from auth mds if (new_version || (st->cap.flags & CEPH_CAP_FLAG_AUTH)) { in->dir_layout = st->dir_layout; ldout(cct, 20) << " dir hash is " << (int)in->dir_layout.dl_dir_hash << dendl; in->rstat = st->rstat; in->quota = st->quota; in->dir_pin = st->dir_pin; } // move me if/when version reflects fragtree changes. if (in->dirfragtree != st->dirfragtree) { in->dirfragtree = st->dirfragtree; _fragmap_remove_non_leaves(in); } } if ((in->xattr_version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && st->xattrbl.length() && st->xattr_version > in->xattr_version) { auto p = st->xattrbl.cbegin(); decode(in->xattrs, p); in->xattr_version = st->xattr_version; need_snapdir_attr_refresh = true; } if (st->inline_version > in->inline_version) { in->inline_data = st->inline_data; in->inline_version = st->inline_version; } /* always take a newer change attr */ ldout(cct, 12) << __func__ << " client inode change_attr: " << in->change_attr << " , mds inodestat change_attr: " << st->change_attr << dendl; if (st->change_attr > in->change_attr) in->change_attr = st->change_attr; if (st->version > in->version) in->version = st->version; if (was_new) ldout(cct, 12) << __func__ << " adding " << *in << " caps " << ccap_string(st->cap.caps) << dendl; if (!st->cap.caps) return in; // as with readdir returning indoes in different snaprealms (no caps!) if (in->snapid == CEPH_NOSNAP) { add_update_cap(in, session, st->cap.cap_id, st->cap.caps, st->cap.wanted, st->cap.seq, st->cap.mseq, inodeno_t(st->cap.realm), st->cap.flags, request_perms); if (in->auth_cap && in->auth_cap->session == session) { in->max_size = st->max_size; in->rstat = st->rstat; } // setting I_COMPLETE needs to happen after adding the cap if (in->is_dir() && (st->cap.caps & CEPH_CAP_FILE_SHARED) && (issued & CEPH_CAP_FILE_EXCL) == 0 && in->dirstat.nfiles == 0 && in->dirstat.nsubdirs == 0) { ldout(cct, 10) << " marking (I_COMPLETE|I_DIR_ORDERED) on empty dir " << *in << dendl; in->flags |= I_COMPLETE | I_DIR_ORDERED; if (in->dir) { ldout(cct, 10) << " dir is open on empty dir " << in->ino << " with " << in->dir->dentries.size() << " entries, marking all dentries null" << dendl; in->dir->readdir_cache.clear(); for (const auto& p : in->dir->dentries) { unlink(p.second, true, true); // keep dir, keep dentry } if (in->dir->dentries.empty()) close_dir(in->dir); } } } else { in->snap_caps |= st->cap.caps; } if (need_snapdir_attr_refresh && in->is_dir() && in->snapid == CEPH_NOSNAP) { vinodeno_t vino(in->ino, CEPH_SNAPDIR); if (inode_map.count(vino)) { refresh_snapdir_attrs(inode_map[vino], in); } } return in; } /* * insert_dentry_inode - insert + link a single dentry + inode into the metadata cache. */ Dentry *Client::insert_dentry_inode(Dir *dir, const string& dname, LeaseStat *dlease, Inode *in, utime_t from, MetaSession *session, Dentry *old_dentry) { Dentry *dn = NULL; if (dir->dentries.count(dname)) dn = dir->dentries[dname]; ldout(cct, 12) << __func__ << " '" << dname << "' vino " << in->vino() << " in dir " << dir->parent_inode->vino() << " dn " << dn << dendl; if (dn && dn->inode) { if (dn->inode->vino() == in->vino()) { touch_dn(dn); ldout(cct, 12) << " had dentry " << dname << " with correct vino " << dn->inode->vino() << dendl; } else { ldout(cct, 12) << " had dentry " << dname << " with WRONG vino " << dn->inode->vino() << dendl; unlink(dn, true, true); // keep dir, keep dentry } } if (!dn || !dn->inode) { InodeRef tmp_ref(in); if (old_dentry) { if (old_dentry->dir != dir) { Inode *old_diri = old_dentry->dir->parent_inode; clear_dir_complete_and_ordered(old_diri, false); } unlink(old_dentry, dir == old_dentry->dir, false); // drop dentry, keep dir open if its the same dir } Inode *diri = dir->parent_inode; clear_dir_complete_and_ordered(diri, false); dn = link(dir, dname, in, dn); if (old_dentry) { dn->is_renaming = false; signal_cond_list(waiting_for_rename); } } update_dentry_lease(dn, dlease, from, session); return dn; } void Client::update_dentry_lease(Dentry *dn, LeaseStat *dlease, utime_t from, MetaSession *session) { utime_t dttl = from; dttl += (float)dlease->duration_ms / 1000.0; ldout(cct, 15) << __func__ << " " << *dn << " " << *dlease << " from " << from << dendl; ceph_assert(dn); if (dlease->mask & CEPH_LEASE_VALID) { if (dttl > dn->lease_ttl) { ldout(cct, 10) << "got dentry lease on " << dn->name << " dur " << dlease->duration_ms << "ms ttl " << dttl << dendl; dn->lease_ttl = dttl; dn->lease_mds = session->mds_num; dn->lease_seq = dlease->seq; dn->lease_gen = session->cap_gen; } } dn->cap_shared_gen = dn->dir->parent_inode->shared_gen; if (dlease->mask & CEPH_LEASE_PRIMARY_LINK) dn->mark_primary(); dn->alternate_name = std::move(dlease->alternate_name); } /* * update MDS location cache for a single inode */ void Client::update_dir_dist(Inode *in, DirStat *dst, mds_rank_t from) { // auth ldout(cct, 20) << "got dirfrag map for " << in->ino << " frag " << dst->frag << " to mds " << dst->auth << dendl; if (dst->auth >= 0) { in->fragmap[dst->frag] = dst->auth; } else { in->fragmap.erase(dst->frag); } if (!in->dirfragtree.is_leaf(dst->frag)) { in->dirfragtree.force_to_leaf(cct, dst->frag); _fragmap_remove_non_leaves(in); } // replicated, only update from auth mds reply if (from == dst->auth) { in->dir_replicated = !dst->dist.empty(); if (!dst->dist.empty()) in->frag_repmap[dst->frag].assign(dst->dist.begin(), dst->dist.end()) ; else in->frag_repmap.erase(dst->frag); } } void Client::clear_dir_complete_and_ordered(Inode *diri, bool complete) { if (complete) diri->dir_release_count++; else diri->dir_ordered_count++; if (diri->flags & I_COMPLETE) { if (complete) { ldout(cct, 10) << " clearing (I_COMPLETE|I_DIR_ORDERED) on " << *diri << dendl; diri->flags &= ~(I_COMPLETE | I_DIR_ORDERED); } else { if (diri->flags & I_DIR_ORDERED) { ldout(cct, 10) << " clearing I_DIR_ORDERED on " << *diri << dendl; diri->flags &= ~I_DIR_ORDERED; } } if (diri->dir) diri->dir->readdir_cache.clear(); } } /* * insert results from readdir or lssnap into the metadata cache. */ void Client::insert_readdir_results(MetaRequest *request, MetaSession *session, Inode *diri, Inode *diri_other) { auto& reply = request->reply; ConnectionRef con = request->reply->get_connection(); uint64_t features; if(session->mds_features.test(CEPHFS_FEATURE_REPLY_ENCODING)) { features = (uint64_t)-1; } else { features = con->get_features(); } dir_result_t *dirp = request->dirp; ceph_assert(dirp); // the extra buffer list is only set for readdir, lssnap and // readdir_snapdiff replies auto p = reply->get_extra_bl().cbegin(); if (!p.end()) { // snapdir? if (request->head.op == CEPH_MDS_OP_LSSNAP) { ceph_assert(diri); diri = open_snapdir(diri); } bool snapdiff_req = request->head.op == CEPH_MDS_OP_READDIR_SNAPDIFF; frag_t fg; unsigned offset_hash; if (snapdiff_req) { fg = (unsigned)request->head.args.snapdiff.frag; offset_hash = (unsigned)request->head.args.snapdiff.offset_hash; } else { fg = (unsigned)request->head.args.readdir.frag; offset_hash = (unsigned)request->head.args.readdir.offset_hash; } // only open dir if we're actually adding stuff to it! Dir *dir = diri->open_dir(); ceph_assert(dir); //open opponent dir for snapdiff if any Dir *dir_other = nullptr; if (snapdiff_req) { ceph_assert(diri_other); dir_other = diri_other->open_dir(); ceph_assert(dir_other); } // dirstat DirStat dst(p, features); __u32 numdn; __u16 flags; decode(numdn, p); decode(flags, p); bool end = ((unsigned)flags & CEPH_READDIR_FRAG_END); bool hash_order = ((unsigned)flags & CEPH_READDIR_HASH_ORDER); unsigned readdir_offset = dirp->next_offset; string readdir_start = dirp->last_name; ceph_assert(!readdir_start.empty() || readdir_offset == 2); unsigned last_hash = 0; if (hash_order) { if (!readdir_start.empty()) { last_hash = ceph_frag_value(diri->hash_dentry_name(readdir_start)); } else if (flags & CEPH_READDIR_OFFSET_HASH) { /* mds understands offset_hash */ last_hash = offset_hash; } } if (fg != dst.frag) { ldout(cct, 10) << "insert_trace got new frag " << fg << " -> " << dst.frag << dendl; fg = dst.frag; if (!hash_order) { readdir_offset = 2; readdir_start.clear(); dirp->offset = dir_result_t::make_fpos(fg, readdir_offset, false); } } ldout(cct, 10) << __func__ << " " << numdn << " readdir items, end=" << end << ", hash_order=" << hash_order << ", readdir_start " << readdir_start << ", last_hash " << last_hash << ", next_offset " << readdir_offset << dendl; if (diri->snapid != CEPH_SNAPDIR && fg.is_leftmost() && readdir_offset == 2 && !(hash_order && last_hash)) { dirp->release_count = diri->dir_release_count; dirp->ordered_count = diri->dir_ordered_count; dirp->start_shared_gen = diri->shared_gen; dirp->cache_index = 0; } dirp->buffer_frag = fg; _readdir_drop_dirp_buffer(dirp); dirp->buffer.reserve(numdn); string dname; LeaseStat dlease; for (unsigned i=0; i<numdn; i++) { decode(dname, p); dlease.decode(p, features); InodeStat ist(p, features); ldout(cct, 15) << "" << i << ": '" << dname << "'" << dendl; Inode *in = add_update_inode(&ist, request->sent_stamp, session, request->perms); auto *effective_dir = dir; auto *effective_diri = diri; if (snapdiff_req && in->snapid != diri->snapid) { ceph_assert(diri_other); ceph_assert(dir_other); effective_diri = diri_other; effective_dir = dir_other; } Dentry *dn; if (effective_dir->dentries.count(dname)) { Dentry *olddn = effective_dir->dentries[dname]; if (olddn->inode != in) { // replace incorrect dentry unlink(olddn, true, true); // keep dir, dentry dn = link(effective_dir, dname, in, olddn); ceph_assert(dn == olddn); } else { // keep existing dn dn = olddn; touch_dn(dn); } } else { // new dn dn = link(effective_dir, dname, in, NULL); } dn->alternate_name = std::move(dlease.alternate_name); update_dentry_lease(dn, &dlease, request->sent_stamp, session); if (hash_order) { unsigned hash = ceph_frag_value(effective_diri->hash_dentry_name(dname)); if (hash != last_hash) readdir_offset = 2; last_hash = hash; dn->offset = dir_result_t::make_fpos(hash, readdir_offset++, true); } else { dn->offset = dir_result_t::make_fpos(fg, readdir_offset++, false); } // add to readdir cache if (!snapdiff_req && dirp->release_count == effective_diri->dir_release_count && dirp->ordered_count == effective_diri->dir_ordered_count && dirp->start_shared_gen == effective_diri->shared_gen) { if (dirp->cache_index == effective_dir->readdir_cache.size()) { if (i == 0) { ceph_assert(!dirp->inode->is_complete_and_ordered()); dir->readdir_cache.reserve(dirp->cache_index + numdn); } effective_dir->readdir_cache.push_back(dn); } else if (dirp->cache_index < effective_dir->readdir_cache.size()) { if (dirp->inode->is_complete_and_ordered()) ceph_assert(effective_dir->readdir_cache[dirp->cache_index] == dn); else effective_dir->readdir_cache[dirp->cache_index] = dn; } else { ceph_abort_msg("unexpected readdir buffer idx"); } dirp->cache_index++; } // add to cached result list dirp->buffer.push_back(dir_result_t::dentry(dn->offset, dname, dn->alternate_name, in)); ldout(cct, 15) << __func__ << " " << hex << dn->offset << dec << ": '" << dname << "' -> " << in->ino << dendl; } if (numdn > 0) dirp->last_name = dname; if (end) dirp->next_offset = 2; else dirp->next_offset = readdir_offset; if (dir->is_empty()) close_dir(dir); if (dir_other && dir_other->is_empty()) close_dir(dir_other); } } /** insert_trace * * insert a trace from a MDS reply into the cache. */ Inode* Client::insert_trace(MetaRequest *request, MetaSession *session) { auto& reply = request->reply; int op = request->get_op(); ldout(cct, 10) << "insert_trace from " << request->sent_stamp << " mds." << session->mds_num << " is_target=" << (int)reply->head.is_target << " is_dentry=" << (int)reply->head.is_dentry << dendl; auto p = reply->get_trace_bl().cbegin(); if (request->got_unsafe) { ldout(cct, 10) << "insert_trace -- already got unsafe; ignoring" << dendl; ceph_assert(p.end()); return NULL; } if (p.end()) { ldout(cct, 10) << "insert_trace -- no trace" << dendl; Dentry *d = request->dentry(); if (d) { Inode *diri = d->dir->parent_inode; clear_dir_complete_and_ordered(diri, true); } if (d && reply->get_result() == 0) { if (op == CEPH_MDS_OP_RENAME) { // rename Dentry *od = request->old_dentry(); ldout(cct, 10) << " unlinking rename src dn " << od << " for traceless reply" << dendl; ceph_assert(od); unlink(od, true, true); // keep dir, dentry } else if (op == CEPH_MDS_OP_RMDIR || op == CEPH_MDS_OP_UNLINK) { // unlink, rmdir ldout(cct, 10) << " unlinking unlink/rmdir dn " << d << " for traceless reply" << dendl; unlink(d, true, true); // keep dir, dentry } } return NULL; } ConnectionRef con = request->reply->get_connection(); uint64_t features; if (session->mds_features.test(CEPHFS_FEATURE_REPLY_ENCODING)) { features = (uint64_t)-1; } else { features = con->get_features(); } ldout(cct, 10) << " features 0x" << hex << features << dec << dendl; // snap trace SnapRealm *realm = NULL; if (reply->snapbl.length()) update_snap_trace(session, reply->snapbl, &realm); ldout(cct, 10) << " hrm " << " is_target=" << (int)reply->head.is_target << " is_dentry=" << (int)reply->head.is_dentry << dendl; InodeStat dirst; DirStat dst; string dname; LeaseStat dlease; InodeStat ist; if (reply->head.is_dentry) { dirst.decode(p, features); dst.decode(p, features); decode(dname, p); dlease.decode(p, features); } Inode *in = 0; if (reply->head.is_target) { ist.decode(p, features); if (cct->_conf->client_debug_getattr_caps) { unsigned wanted = 0; if (op == CEPH_MDS_OP_GETATTR || op == CEPH_MDS_OP_LOOKUP) wanted = request->head.args.getattr.mask; else if (op == CEPH_MDS_OP_OPEN || op == CEPH_MDS_OP_CREATE) wanted = request->head.args.open.mask; if ((wanted & CEPH_CAP_XATTR_SHARED) && !(ist.xattr_version > 0 && ist.xattrbl.length() > 0)) ceph_abort_msg("MDS reply does not contain xattrs"); } in = add_update_inode(&ist, request->sent_stamp, session, request->perms); } Inode *diri = NULL; if (reply->head.is_dentry) { diri = add_update_inode(&dirst, request->sent_stamp, session, request->perms); mds_rank_t from_mds = mds_rank_t(reply->get_source().num()); update_dir_dist(diri, &dst, from_mds); // dir stat info is attached to .. if (in) { Dir *dir = diri->open_dir(); insert_dentry_inode(dir, dname, &dlease, in, request->sent_stamp, session, (op == CEPH_MDS_OP_RENAME) ? request->old_dentry() : NULL); } else { Dentry *dn = NULL; if (diri->dir && diri->dir->dentries.count(dname)) { dn = diri->dir->dentries[dname]; if (dn->inode) { clear_dir_complete_and_ordered(diri, false); unlink(dn, true, true); // keep dir, dentry } } if (dlease.duration_ms > 0) { if (!dn) { Dir *dir = diri->open_dir(); dn = link(dir, dname, NULL, NULL); } update_dentry_lease(dn, &dlease, request->sent_stamp, session); } } } else if (op == CEPH_MDS_OP_LOOKUPSNAP || op == CEPH_MDS_OP_MKSNAP) { ldout(cct, 10) << " faking snap lookup weirdness" << dendl; // fake it for snap lookup vinodeno_t vino = ist.vino; vino.snapid = CEPH_SNAPDIR; ceph_assert(inode_map.count(vino)); diri = inode_map[vino]; string dname = request->path.last_dentry(); LeaseStat dlease; dlease.duration_ms = 0; if (in) { Dir *dir = diri->open_dir(); insert_dentry_inode(dir, dname, &dlease, in, request->sent_stamp, session); } else { if (diri->dir && diri->dir->dentries.count(dname)) { Dentry *dn = diri->dir->dentries[dname]; if (dn->inode) unlink(dn, true, true); // keep dir, dentry } } } if (in) { if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP) { insert_readdir_results(request, session, in, nullptr); } else if (op == CEPH_MDS_OP_LOOKUPNAME) { // hack: return parent inode instead in = diri; } else if (op == CEPH_MDS_OP_READDIR_SNAPDIFF) { // provide both request's inode (aka snapA) and traced one (snapB) // to properly match snapdiff results insert_readdir_results(request, session, request->inode(), in); } if (request->dentry() == NULL && in != request->inode()) { // pin the target inode if its parent dentry is not pinned request->set_other_inode(in); } } if (realm) put_snap_realm(realm); request->target = in; return in; } // ------- mds_rank_t Client::choose_target_mds(MetaRequest *req, Inode** phash_diri) { mds_rank_t mds = MDS_RANK_NONE; __u32 hash = 0; bool is_hash = false; int issued = 0; Inode *in = NULL; Dentry *de = NULL; if (req->resend_mds >= 0) { mds = req->resend_mds; req->resend_mds = -1; ldout(cct, 10) << __func__ << " resend_mds specified as mds." << mds << dendl; goto out; } if (cct->_conf->client_use_random_mds) goto random_mds; in = req->inode(); de = req->dentry(); if (in) { ldout(cct, 20) << __func__ << " starting with req->inode " << *in << dendl; if (req->path.depth()) { hash = in->hash_dentry_name(req->path[0]); ldout(cct, 20) << __func__ << " inode dir hash is " << (int)in->dir_layout.dl_dir_hash << " on " << req->path[0] << " => " << hash << dendl; is_hash = true; } } else if (de) { if (de->inode) { in = de->inode.get(); ldout(cct, 20) << __func__ << " starting with req->dentry inode " << *in << dendl; } else { in = de->dir->parent_inode; hash = in->hash_dentry_name(de->name); ldout(cct, 20) << __func__ << " dentry dir hash is " << (int)in->dir_layout.dl_dir_hash << " on " << de->name << " => " << hash << dendl; is_hash = true; } } if (in) { if (in->snapid != CEPH_NOSNAP) { ldout(cct, 10) << __func__ << " " << *in << " is snapped, using nonsnap parent" << dendl; while (in->snapid != CEPH_NOSNAP) { if (in->snapid == CEPH_SNAPDIR) in = in->snapdir_parent.get(); else if (!in->dentries.empty()) /* In most cases there will only be one dentry, so getting it * will be the correct action. If there are multiple hard links, * I think the MDS should be able to redirect as needed*/ in = in->get_first_parent()->dir->parent_inode; else { ldout(cct, 10) << __func__ << "got unlinked inode, can't look at parent" << dendl; break; } } is_hash = false; } ldout(cct, 20) << __func__ << " " << *in << " is_hash=" << is_hash << " hash=" << hash << dendl; if (req->get_op() == CEPH_MDS_OP_GETATTR) issued = req->inode()->caps_issued(); if (is_hash && S_ISDIR(in->mode) && (!in->fragmap.empty() || !in->frag_repmap.empty())) { frag_t fg = in->dirfragtree[hash]; if (!req->auth_is_best(issued)) { auto repmapit = in->frag_repmap.find(fg); if (repmapit != in->frag_repmap.end()) { auto& repmap = repmapit->second; auto r = ceph::util::generate_random_number<uint64_t>(0, repmap.size()-1); mds = repmap.at(r); } } else if (in->fragmap.count(fg)) { mds = in->fragmap[fg]; if (phash_diri) *phash_diri = in; } else if (in->auth_cap) { req->send_to_auth = true; mds = in->auth_cap->session->mds_num; } if (mds >= 0) { ldout(cct, 10) << __func__ << " from dirfragtree hash" << dendl; goto out; } } if (in->auth_cap && req->auth_is_best(issued)) { mds = in->auth_cap->session->mds_num; } else if (!in->caps.empty()) { mds = in->caps.begin()->second.session->mds_num; } else { goto random_mds; } ldout(cct, 10) << __func__ << " from caps on inode " << *in << dendl; goto out; } random_mds: if (mds < 0) { mds = _get_random_up_mds(); ldout(cct, 10) << "did not get mds through better means, so chose random mds " << mds << dendl; } out: ldout(cct, 20) << "mds is " << mds << dendl; return mds; } void Client::connect_mds_targets(mds_rank_t mds) { ldout(cct, 10) << __func__ << " for mds." << mds << dendl; ceph_assert(mds_sessions.count(mds)); const MDSMap::mds_info_t& info = mdsmap->get_mds_info(mds); for (const auto &rank : info.export_targets) { if (mds_sessions.count(rank) == 0 && mdsmap->is_clientreplay_or_active_or_stopping(rank)) { ldout(cct, 10) << "check_mds_sessions opening mds." << mds << " export target mds." << rank << dendl; auto session = _get_or_open_mds_session(rank); if (session->state == MetaSession::STATE_OPENING || session->state == MetaSession::STATE_OPEN) continue; _open_mds_session(rank); } } } void Client::dump_mds_sessions(Formatter *f, bool cap_dump) { f->dump_int("id", get_nodeid().v); entity_inst_t inst(messenger->get_myname(), messenger->get_myaddr_legacy()); f->dump_object("inst", inst); f->dump_stream("inst_str") << inst; f->dump_stream("addr_str") << inst.addr; f->open_array_section("sessions"); for (const auto &p : mds_sessions) { f->open_object_section("session"); p.second->dump(f, cap_dump); f->close_section(); } f->close_section(); f->dump_int("mdsmap_epoch", mdsmap->get_epoch()); } void Client::dump_mds_requests(Formatter *f) { for (map<ceph_tid_t, MetaRequest*>::iterator p = mds_requests.begin(); p != mds_requests.end(); ++p) { f->open_object_section("request"); p->second->dump(f); f->close_section(); } } int Client::verify_reply_trace(int r, MetaSession *session, MetaRequest *request, const MConstRef<MClientReply>& reply, InodeRef *ptarget, bool *pcreated, const UserPerm& perms) { // check whether this request actually did the create, and set created flag bufferlist extra_bl; inodeno_t created_ino; bool got_created_ino = false; ceph::unordered_map<vinodeno_t, Inode*>::iterator p; extra_bl = reply->get_extra_bl(); if (extra_bl.length() >= 8) { if (session->mds_features.test(CEPHFS_FEATURE_DELEG_INO)) { struct openc_response_t ocres; decode(ocres, extra_bl); created_ino = ocres.created_ino; /* * The userland cephfs client doesn't have a way to do an async create * (yet), so just discard delegated_inos for now. Eventually we should * store them and use them in create calls, even if they are synchronous, * if only for testing purposes. */ ldout(cct, 10) << "delegated_inos: " << ocres.delegated_inos << dendl; } else { // u64 containing number of created ino decode(created_ino, extra_bl); } ldout(cct, 10) << "make_request created ino " << created_ino << dendl; got_created_ino = true; } if (pcreated) *pcreated = got_created_ino; if (request->target) { *ptarget = request->target; ldout(cct, 20) << "make_request target is " << *ptarget->get() << dendl; } else { if (got_created_ino && (p = inode_map.find(vinodeno_t(created_ino, CEPH_NOSNAP))) != inode_map.end()) { (*ptarget) = p->second; ldout(cct, 20) << "make_request created, target is " << *ptarget->get() << dendl; } else { // we got a traceless reply, and need to look up what we just // created. for now, do this by name. someday, do this by the // ino... which we know! FIXME. InodeRef target; Dentry *d = request->dentry(); if (d) { if (d->dir) { ldout(cct, 10) << "make_request got traceless reply, looking up #" << d->dir->parent_inode->ino << "/" << d->name << " got_ino " << got_created_ino << " ino " << created_ino << dendl; r = _do_lookup(d->dir->parent_inode, d->name, request->regetattr_mask, &target, perms); } else { // if the dentry is not linked, just do our best. see #5021. ceph_abort_msg("how did this happen? i want logs!"); } } else { Inode *in = request->inode(); ldout(cct, 10) << "make_request got traceless reply, forcing getattr on #" << in->ino << dendl; r = _getattr(in, request->regetattr_mask, perms, true); target = in; } if (r >= 0) { // verify ino returned in reply and trace_dist are the same if (got_created_ino && created_ino.val != target->ino.val) { ldout(cct, 5) << "create got ino " << created_ino << " but then failed on lookup; EINTR?" << dendl; r = -CEPHFS_EINTR; } if (ptarget) ptarget->swap(target); } } } return r; } /** * make a request * * Blocking helper to make an MDS request. * * If the ptarget flag is set, behavior changes slightly: the caller * expects to get a pointer to the inode we are creating or operating * on. As a result, we will follow up any traceless mutation reply * with a getattr or lookup to transparently handle a traceless reply * from the MDS (as when the MDS restarts and the client has to replay * a request). * * @param request the MetaRequest to execute * @param perms The user uid/gid to execute as (eventually, full group lists?) * @param ptarget [optional] address to store a pointer to the target inode we want to create or operate on * @param pcreated [optional; required if ptarget] where to store a bool of whether our create atomically created a file * @param use_mds [optional] prefer a specific mds (-1 for default) * @param pdirbl [optional; disallowed if ptarget] where to pass extra reply payload to the caller */ int Client::make_request(MetaRequest *request, const UserPerm& perms, InodeRef *ptarget, bool *pcreated, mds_rank_t use_mds, bufferlist *pdirbl, size_t feature_needed) { int r = 0; // assign a unique tid ceph_tid_t tid = ++last_tid; request->set_tid(tid); // and timestamp request->op_stamp = ceph_clock_now(); request->created = ceph::coarse_mono_clock::now(); // make note mds_requests[tid] = request->get(); if (oldest_tid == 0 && request->get_op() != CEPH_MDS_OP_SETFILELOCK) oldest_tid = tid; request->set_caller_perms(perms); if (cct->_conf->client_inject_fixed_oldest_tid) { ldout(cct, 20) << __func__ << " injecting fixed oldest_client_tid(1)" << dendl; request->set_oldest_client_tid(1); } else { request->set_oldest_client_tid(oldest_tid); } // hack target mds? if (use_mds >= 0) request->resend_mds = use_mds; MetaSessionRef session = NULL; while (1) { if (request->aborted()) break; if (blocklisted) { request->abort(-CEPHFS_EBLOCKLISTED); break; } // set up wait cond ceph::condition_variable caller_cond; request->caller_cond = &caller_cond; // choose mds Inode *hash_diri = NULL; mds_rank_t mds = choose_target_mds(request, &hash_diri); int mds_state = (mds == MDS_RANK_NONE) ? MDSMap::STATE_NULL : mdsmap->get_state(mds); if (mds_state != MDSMap::STATE_ACTIVE && mds_state != MDSMap::STATE_STOPPING) { if (mds_state == MDSMap::STATE_NULL && mds >= mdsmap->get_max_mds()) { if (hash_diri) { ldout(cct, 10) << " target mds." << mds << " has stopped, remove it from fragmap" << dendl; _fragmap_remove_stopped_mds(hash_diri, mds); } else { ldout(cct, 10) << " target mds." << mds << " has stopped, trying a random mds" << dendl; request->resend_mds = _get_random_up_mds(); } } else { ldout(cct, 10) << " target mds." << mds << " not active, waiting for new mdsmap" << dendl; wait_on_list(waiting_for_mdsmap); } continue; } // open a session? if (!have_open_session(mds)) { session = _get_or_open_mds_session(mds); if (session->state == MetaSession::STATE_REJECTED) { request->abort(-CEPHFS_EPERM); break; } // wait if (session->state == MetaSession::STATE_OPENING) { ldout(cct, 10) << "waiting for session to mds." << mds << " to open" << dendl; wait_on_context_list(session->waiting_for_open); continue; } if (!have_open_session(mds)) continue; } else { session = mds_sessions.at(mds); } if (feature_needed != ULONG_MAX && !session->mds_features.test(feature_needed)) { request->abort(-CEPHFS_EOPNOTSUPP); break; } // send request. send_request(request, session.get()); // wait for signal ldout(cct, 20) << "awaiting reply|forward|kick on " << &caller_cond << dendl; request->kick = false; std::unique_lock l{client_lock, std::adopt_lock}; caller_cond.wait(l, [request] { return (request->reply || // reply request->resend_mds >= 0 || // forward request->kick); }); l.release(); request->caller_cond = nullptr; // did we get a reply? if (request->reply) break; } if (!request->reply) { ceph_assert(request->aborted()); ceph_assert(!request->got_unsafe); r = request->get_abort_code(); request->item.remove_myself(); unregister_request(request); put_request(request); return r; } // got it! auto reply = std::move(request->reply); r = reply->get_result(); if (r >= 0) request->success = true; // kick dispatcher (we've got it!) ceph_assert(request->dispatch_cond); request->dispatch_cond->notify_all(); ldout(cct, 20) << "sendrecv kickback on tid " << tid << " " << request->dispatch_cond << dendl; request->dispatch_cond = 0; if (r >= 0 && ptarget) r = verify_reply_trace(r, session.get(), request, reply, ptarget, pcreated, perms); if (pdirbl) *pdirbl = reply->get_extra_bl(); // -- log times -- utime_t lat = ceph_clock_now(); lat -= request->sent_stamp; ldout(cct, 20) << "lat " << lat << dendl; ++nr_metadata_request; update_io_stat_metadata(lat); put_request(request); return r; } void Client::unregister_request(MetaRequest *req) { mds_requests.erase(req->tid); if (req->tid == oldest_tid) { map<ceph_tid_t, MetaRequest*>::iterator p = mds_requests.upper_bound(oldest_tid); while (true) { if (p == mds_requests.end()) { oldest_tid = 0; break; } if (p->second->get_op() != CEPH_MDS_OP_SETFILELOCK) { oldest_tid = p->first; break; } ++p; } } put_request(req); } void Client::put_request(MetaRequest *request) { if (request->_put()) { int op = -1; if (request->success) op = request->get_op(); InodeRef other_in; request->take_other_inode(&other_in); delete request; if (other_in && (op == CEPH_MDS_OP_RMDIR || op == CEPH_MDS_OP_RENAME || op == CEPH_MDS_OP_RMSNAP)) { _try_to_trim_inode(other_in.get(), false); } } } int Client::encode_inode_release(Inode *in, MetaRequest *req, mds_rank_t mds, int drop, int unless, int force) { ldout(cct, 20) << __func__ << " enter(in:" << *in << ", req:" << req << " mds:" << mds << ", drop:" << ccap_string(drop) << ", unless:" << ccap_string(unless) << ", force:" << force << ")" << dendl; int released = 0; auto it = in->caps.find(mds); if (it != in->caps.end()) { Cap &cap = it->second; drop &= ~(in->dirty_caps | get_caps_used(in)); if ((drop & cap.issued) && !(unless & cap.issued)) { ldout(cct, 25) << "dropping caps " << ccap_string(drop) << dendl; cap.issued &= ~drop; cap.implemented &= ~drop; released = 1; } else { released = force; } if (released) { cap.wanted = in->caps_wanted(); if (&cap == in->auth_cap && !(cap.wanted & CEPH_CAP_ANY_FILE_WR)) { in->requested_max_size = 0; ldout(cct, 25) << "reset requested_max_size due to not wanting any file write cap" << dendl; } ceph_mds_request_release rel; rel.ino = in->ino; rel.cap_id = cap.cap_id; rel.seq = cap.seq; rel.issue_seq = cap.issue_seq; rel.mseq = cap.mseq; rel.caps = cap.implemented; rel.wanted = cap.wanted; rel.dname_len = 0; rel.dname_seq = 0; req->cap_releases.push_back(MClientRequest::Release(rel,"")); } } ldout(cct, 25) << __func__ << " exit(in:" << *in << ") released:" << released << dendl; return released; } void Client::encode_dentry_release(Dentry *dn, MetaRequest *req, mds_rank_t mds, int drop, int unless) { ldout(cct, 20) << __func__ << " enter(dn:" << dn << ")" << dendl; int released = 0; if (dn->dir) released = encode_inode_release(dn->dir->parent_inode, req, mds, drop, unless, 1); if (released && dn->lease_mds == mds) { ldout(cct, 25) << "preemptively releasing dn to mds" << dendl; auto& rel = req->cap_releases.back(); rel.item.dname_len = dn->name.length(); rel.item.dname_seq = dn->lease_seq; rel.dname = dn->name; dn->lease_mds = -1; } ldout(cct, 25) << __func__ << " exit(dn:" << dn << ")" << dendl; } /* * This requires the MClientRequest *request member to be set. * It will error out horribly without one. * Additionally, if you set any *drop member, you'd better have * set the corresponding dentry! */ void Client::encode_cap_releases(MetaRequest *req, mds_rank_t mds) { ldout(cct, 20) << __func__ << " enter (req: " << req << ", mds: " << mds << ")" << dendl; if (req->inode_drop && req->inode()) encode_inode_release(req->inode(), req, mds, req->inode_drop, req->inode_unless); if (req->old_inode_drop && req->old_inode()) encode_inode_release(req->old_inode(), req, mds, req->old_inode_drop, req->old_inode_unless); if (req->other_inode_drop && req->other_inode()) encode_inode_release(req->other_inode(), req, mds, req->other_inode_drop, req->other_inode_unless); if (req->dentry_drop && req->dentry()) encode_dentry_release(req->dentry(), req, mds, req->dentry_drop, req->dentry_unless); if (req->old_dentry_drop && req->old_dentry()) encode_dentry_release(req->old_dentry(), req, mds, req->old_dentry_drop, req->old_dentry_unless); ldout(cct, 25) << __func__ << " exit (req: " << req << ", mds " << mds <<dendl; } bool Client::have_open_session(mds_rank_t mds) { const auto &it = mds_sessions.find(mds); return it != mds_sessions.end() && (it->second->state == MetaSession::STATE_OPEN || it->second->state == MetaSession::STATE_STALE); } MetaSessionRef Client::_get_mds_session(mds_rank_t mds, Connection *con) { const auto &it = mds_sessions.find(mds); if (it == mds_sessions.end() || it->second->con != con) { return NULL; } else { return it->second; } } MetaSessionRef Client::_get_or_open_mds_session(mds_rank_t mds) { auto it = mds_sessions.find(mds); return it == mds_sessions.end() ? _open_mds_session(mds) : it->second; } /** * Populate a map of strings with client-identifying metadata, * such as the hostname. Call this once at initialization. */ void Client::populate_metadata(const std::string &mount_root) { // Hostname #ifdef _WIN32 // TODO: move this to compat.h char hostname[64]; DWORD hostname_sz = 64; GetComputerNameA(hostname, &hostname_sz); metadata["hostname"] = hostname; #else struct utsname u; int r = uname(&u); if (r >= 0) { metadata["hostname"] = u.nodename; ldout(cct, 20) << __func__ << " read hostname '" << u.nodename << "'" << dendl; } else { ldout(cct, 1) << __func__ << " failed to read hostname (" << cpp_strerror(r) << ")" << dendl; } #endif metadata["pid"] = stringify(getpid()); // Ceph entity id (the '0' in "client.0") metadata["entity_id"] = cct->_conf->name.get_id(); // Our mount position if (!mount_root.empty()) { metadata["root"] = mount_root; } // Ceph version metadata["ceph_version"] = pretty_version_to_str(); metadata["ceph_sha1"] = git_version_to_str(); // Apply any metadata from the user's configured overrides std::vector<std::string> tokens; get_str_vec(cct->_conf->client_metadata, ",", tokens); for (const auto &i : tokens) { auto eqpos = i.find("="); // Throw out anything that isn't of the form "<str>=<str>" if (eqpos == 0 || eqpos == std::string::npos || eqpos == i.size()) { lderr(cct) << "Invalid metadata keyval pair: '" << i << "'" << dendl; continue; } metadata[i.substr(0, eqpos)] = i.substr(eqpos + 1); } } /** * Optionally add or override client metadata fields. */ void Client::update_metadata(std::string const &k, std::string const &v) { RWRef_t iref_reader(initialize_state, CLIENT_INITIALIZED); ceph_assert(iref_reader.is_state_satisfied()); std::scoped_lock l(client_lock); auto it = metadata.find(k); if (it != metadata.end()) { ldout(cct, 1) << __func__ << " warning, overriding metadata field '" << k << "' from '" << it->second << "' to '" << v << "'" << dendl; } metadata[k] = v; } MetaSessionRef Client::_open_mds_session(mds_rank_t mds) { ldout(cct, 10) << __func__ << " mds." << mds << dendl; auto addrs = mdsmap->get_addrs(mds); auto em = mds_sessions.emplace(std::piecewise_construct, std::forward_as_tuple(mds), std::forward_as_tuple(new MetaSession(mds, messenger->connect_to_mds(addrs), addrs))); ceph_assert(em.second); /* not already present */ auto session = em.first->second; auto m = make_message<MClientSession>(CEPH_SESSION_REQUEST_OPEN); m->metadata = metadata; m->supported_features = feature_bitset_t(CEPHFS_FEATURES_CLIENT_SUPPORTED); m->metric_spec = feature_bitset_t(CEPHFS_METRIC_FEATURES_ALL); session->con->send_message2(std::move(m)); return session; } void Client::_close_mds_session(MetaSession *s) { ldout(cct, 2) << __func__ << " mds." << s->mds_num << " seq " << s->seq << dendl; s->state = MetaSession::STATE_CLOSING; s->con->send_message2(make_message<MClientSession>(CEPH_SESSION_REQUEST_CLOSE, s->seq)); } void Client::_closed_mds_session(MetaSession *s, int err, bool rejected) { ldout(cct, 5) << __func__ << " mds." << s->mds_num << " seq " << s->seq << dendl; if (rejected && s->state != MetaSession::STATE_CLOSING) s->state = MetaSession::STATE_REJECTED; else s->state = MetaSession::STATE_CLOSED; s->con->mark_down(); signal_context_list(s->waiting_for_open); mount_cond.notify_all(); remove_session_caps(s, err); kick_requests_closed(s); mds_ranks_closing.erase(s->mds_num); if (s->state == MetaSession::STATE_CLOSED) mds_sessions.erase(s->mds_num); } void Client::handle_client_session(const MConstRef<MClientSession>& m) { mds_rank_t from = mds_rank_t(m->get_source().num()); ldout(cct, 10) << __func__ << " " << *m << " from mds." << from << dendl; std::scoped_lock cl(client_lock); auto session = _get_mds_session(from, m->get_connection().get()); if (!session) { ldout(cct, 10) << " discarding session message from sessionless mds " << m->get_source_inst() << dendl; return; } switch (m->get_op()) { case CEPH_SESSION_OPEN: { if (session->state == MetaSession::STATE_OPEN) { ldout(cct, 10) << "mds." << from << " already opened, ignore it" << dendl; return; } /* * The connection maybe broken and the session in client side * has been reinitialized, need to update the seq anyway. */ if (!session->seq && m->get_seq()) session->seq = m->get_seq(); session->mds_features = std::move(m->supported_features); session->mds_metric_flags = std::move(m->metric_spec.metric_flags); renew_caps(session.get()); session->state = MetaSession::STATE_OPEN; if (is_unmounting()) mount_cond.notify_all(); else connect_mds_targets(from); signal_context_list(session->waiting_for_open); break; } case CEPH_SESSION_CLOSE: _closed_mds_session(session.get()); break; case CEPH_SESSION_RENEWCAPS: if (session->cap_renew_seq == m->get_seq()) { bool was_stale = ceph_clock_now() >= session->cap_ttl; session->cap_ttl = session->last_cap_renew_request + mdsmap->get_session_timeout(); if (was_stale) wake_up_session_caps(session.get(), false); } break; case CEPH_SESSION_STALE: // invalidate session caps/leases session->cap_gen++; session->cap_ttl = ceph_clock_now(); session->cap_ttl -= 1; renew_caps(session.get()); break; case CEPH_SESSION_RECALL_STATE: /* * Call the renew caps and flush cap releases just before * triming the caps in case the tick() won't get a chance * to run them, which could cause the client to be blocklisted * and MDS daemons trying to recall the caps again and * again. * * In most cases it will do nothing, and the new cap releases * added by trim_caps() followed will be deferred flushing * by tick(). */ renew_and_flush_cap_releases(); trim_caps(session.get(), m->get_max_caps()); break; case CEPH_SESSION_FLUSHMSG: /* flush cap release */ if (auto& m = session->release; m) { session->con->send_message2(std::move(m)); } session->con->send_message2(make_message<MClientSession>(CEPH_SESSION_FLUSHMSG_ACK, m->get_seq())); break; case CEPH_SESSION_FORCE_RO: force_session_readonly(session.get()); break; case CEPH_SESSION_REJECT: { std::string_view error_str; auto it = m->metadata.find("error_string"); if (it != m->metadata.end()) error_str = it->second; else error_str = "unknown error"; lderr(cct) << "mds." << from << " rejected us (" << error_str << ")" << dendl; _closed_mds_session(session.get(), -CEPHFS_EPERM, true); } break; default: ceph_abort(); } } bool Client::_any_stale_sessions() const { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); for (const auto &p : mds_sessions) { if (p.second->state == MetaSession::STATE_STALE) { return true; } } return false; } void Client::_kick_stale_sessions() { ldout(cct, 1) << __func__ << dendl; for (auto it = mds_sessions.begin(); it != mds_sessions.end(); ) { auto s = it->second; if (s->state == MetaSession::STATE_REJECTED) { mds_sessions.erase(it->first); continue; } if (s->state == MetaSession::STATE_STALE) _closed_mds_session(s.get()); } } void Client::send_request(MetaRequest *request, MetaSession *session, bool drop_cap_releases) { // make the request mds_rank_t mds = session->mds_num; ldout(cct, 10) << __func__ << " rebuilding request " << request->get_tid() << " for mds." << mds << dendl; auto r = build_client_request(request, mds); if (!r) return; if (request->dentry()) { r->set_dentry_wanted(); } if (request->got_unsafe) { r->set_replayed_op(); if (request->target) r->head.ino = request->target->ino; } else { encode_cap_releases(request, mds); if (drop_cap_releases) // we haven't send cap reconnect yet, drop cap releases request->cap_releases.clear(); else r->releases.swap(request->cap_releases); } r->set_mdsmap_epoch(mdsmap->get_epoch()); if (r->head.op == CEPH_MDS_OP_SETXATTR) { objecter->with_osdmap([r](const OSDMap& o) { r->set_osdmap_epoch(o.get_epoch()); }); } if (request->mds == -1) { request->sent_stamp = ceph_clock_now(); ldout(cct, 20) << __func__ << " set sent_stamp to " << request->sent_stamp << dendl; } request->mds = mds; Inode *in = request->inode(); if (in) { auto it = in->caps.find(mds); if (it != in->caps.end()) { request->sent_on_mseq = it->second.mseq; } } session->requests.push_back(&request->item); ldout(cct, 10) << __func__ << " " << *r << " to mds." << mds << dendl; session->con->send_message2(std::move(r)); } ref_t<MClientRequest> Client::build_client_request(MetaRequest *request, mds_rank_t mds) { auto session = mds_sessions.at(mds); bool old_version = !session->mds_features.test(CEPHFS_FEATURE_32BITS_RETRY_FWD); /* * Avoid inifinite retrying after overflow. * * The client will increase the retry count and if the MDS is * old version, so we limit to retry at most 256 times. */ if (request->retry_attempt) { int old_max_retry = sizeof(((struct ceph_mds_request_head*)0)->num_retry); old_max_retry = 1 << (old_max_retry * CHAR_BIT); if ((old_version && request->retry_attempt >= old_max_retry) || (uint32_t)request->retry_attempt >= UINT32_MAX) { request->abort(-CEPHFS_EMULTIHOP); request->caller_cond->notify_all(); ldout(cct, 1) << __func__ << " request tid " << request->tid << " retry seq overflow" << ", abort it" << dendl; return nullptr; } } auto req = make_message<MClientRequest>(request->get_op(), old_version); req->set_tid(request->tid); req->set_stamp(request->op_stamp); memcpy(&req->head, &request->head, sizeof(ceph_mds_request_head)); // if the filepath's haven't been set, set them! if (request->path.empty()) { Inode *in = request->inode(); Dentry *de = request->dentry(); if (in) in->make_nosnap_relative_path(request->path); else if (de) { if (de->inode) de->inode->make_nosnap_relative_path(request->path); else if (de->dir) { de->dir->parent_inode->make_nosnap_relative_path(request->path); request->path.push_dentry(de->name); } else ldout(cct, 1) << "Warning -- unable to construct a filepath!" << " No path, inode, or appropriately-endowed dentry given!" << dendl; } else ldout(cct, 1) << "Warning -- unable to construct a filepath!" << " No path, inode, or dentry given!" << dendl; } req->set_filepath(request->get_filepath()); req->set_filepath2(request->get_filepath2()); req->set_alternate_name(request->alternate_name); req->set_data(request->data); req->fscrypt_auth = request->fscrypt_auth; req->fscrypt_file = request->fscrypt_file; req->set_retry_attempt(request->retry_attempt++); req->head.ext_num_fwd = request->num_fwd; const gid_t *_gids; int gid_count = request->perms.get_gids(&_gids); req->set_gid_list(gid_count, _gids); return req; } void Client::handle_client_request_forward(const MConstRef<MClientRequestForward>& fwd) { mds_rank_t mds = mds_rank_t(fwd->get_source().num()); std::scoped_lock cl(client_lock); auto session = _get_mds_session(mds, fwd->get_connection().get()); if (!session) { return; } ceph_tid_t tid = fwd->get_tid(); if (mds_requests.count(tid) == 0) { ldout(cct, 10) << __func__ << " no pending request on tid " << tid << dendl; return; } MetaRequest *request = mds_requests[tid]; ceph_assert(request); /* * Avoid inifinite retrying after overflow. * * The MDS will increase the fwd count and in client side * if the num_fwd is less than the one saved in request * that means the MDS is an old version and overflowed of * 8 bits. */ auto num_fwd = fwd->get_num_fwd(); if (num_fwd <= request->num_fwd || (uint32_t)num_fwd >= UINT32_MAX) { request->abort(-CEPHFS_EMULTIHOP); request->caller_cond->notify_all(); ldout(cct, 0) << __func__ << " request tid " << tid << " new num_fwd " << num_fwd << " old num_fwd " << request->num_fwd << ", fwd seq overflow" << ", abort it" << dendl; return; } // reset retry counter request->retry_attempt = 0; // request not forwarded, or dest mds has no session. // resend. ldout(cct, 10) << __func__ << " tid " << tid << " fwd " << fwd->get_num_fwd() << " to mds." << fwd->get_dest_mds() << ", resending to " << fwd->get_dest_mds() << dendl; request->mds = -1; request->item.remove_myself(); request->num_fwd = num_fwd; request->resend_mds = fwd->get_dest_mds(); request->caller_cond->notify_all(); } bool Client::is_dir_operation(MetaRequest *req) { int op = req->get_op(); if (op == CEPH_MDS_OP_MKNOD || op == CEPH_MDS_OP_LINK || op == CEPH_MDS_OP_UNLINK || op == CEPH_MDS_OP_RENAME || op == CEPH_MDS_OP_MKDIR || op == CEPH_MDS_OP_RMDIR || op == CEPH_MDS_OP_SYMLINK || op == CEPH_MDS_OP_CREATE) return true; return false; } void Client::handle_client_reply(const MConstRef<MClientReply>& reply) { mds_rank_t mds_num = mds_rank_t(reply->get_source().num()); std::scoped_lock cl(client_lock); auto session = _get_mds_session(mds_num, reply->get_connection().get()); if (!session) { return; } ceph_tid_t tid = reply->get_tid(); bool is_safe = reply->is_safe(); if (mds_requests.count(tid) == 0) { lderr(cct) << __func__ << " no pending request on tid " << tid << " safe is:" << is_safe << dendl; return; } MetaRequest *request = mds_requests.at(tid); ldout(cct, 20) << __func__ << " got a reply. Safe:" << is_safe << " tid " << tid << dendl; // correct sessions ? if (request->mds != mds_num) { ldout(cct, 0) << "got a stale reply from mds." << mds_num << " instead of mds." << request->mds << dendl; return; } if (request->got_unsafe && !is_safe) { //duplicate response ldout(cct, 0) << "got a duplicate reply on tid " << tid << " from mds " << mds_num << " safe:" << is_safe << dendl; return; } ceph_assert(!request->reply); request->reply = reply; insert_trace(request, session.get()); // Handle unsafe reply if (!is_safe) { request->got_unsafe = true; session->unsafe_requests.push_back(&request->unsafe_item); if (is_dir_operation(request)) { Inode *dir = request->inode(); ceph_assert(dir); dir->unsafe_ops.push_back(&request->unsafe_dir_item); } if (request->target) { InodeRef &in = request->target; in->unsafe_ops.push_back(&request->unsafe_target_item); } } // Only signal the caller once (on the first reply): // Either its an unsafe reply, or its a safe reply and no unsafe reply was sent. if (!is_safe || !request->got_unsafe) { ceph::condition_variable cond; request->dispatch_cond = &cond; // wake up waiter ldout(cct, 20) << __func__ << " signalling caller " << (void*)request->caller_cond << dendl; request->caller_cond->notify_all(); // wake for kick back std::unique_lock l{client_lock, std::adopt_lock}; cond.wait(l, [tid, request, &cond, this] { if (request->dispatch_cond) { ldout(cct, 20) << "handle_client_reply awaiting kickback on tid " << tid << " " << &cond << dendl; } return !request->dispatch_cond; }); l.release(); } if (is_safe) { // the filesystem change is committed to disk // we're done, clean up if (request->got_unsafe) { request->unsafe_item.remove_myself(); request->unsafe_dir_item.remove_myself(); request->unsafe_target_item.remove_myself(); signal_cond_list(request->waitfor_safe); } request->item.remove_myself(); unregister_request(request); } if (is_unmounting()) mount_cond.notify_all(); } void Client::_handle_full_flag(int64_t pool) { ldout(cct, 1) << __func__ << ": FULL: cancelling outstanding operations " << "on " << pool << dendl; // Cancel all outstanding ops in this pool with -CEPHFS_ENOSPC: it is necessary // to do this rather than blocking, because otherwise when we fill up we // potentially lock caps forever on files with dirty pages, and we need // to be able to release those caps to the MDS so that it can delete files // and free up space. epoch_t cancelled_epoch = objecter->op_cancel_writes(-CEPHFS_ENOSPC, pool); // For all inodes with layouts in this pool and a pending flush write op // (i.e. one of the ones we will cancel), we've got to purge_set their data // from ObjectCacher so that it doesn't re-issue the write in response to // the ENOSPC error. // Fortunately since we're cancelling everything in a given pool, we don't // need to know which ops belong to which ObjectSet, we can just blow all // the un-flushed cached data away and mark any dirty inodes' async_err // field with -CEPHFS_ENOSPC as long as we're sure all the ops we cancelled were // affecting this pool, and all the objectsets we're purging were also // in this pool. for (unordered_map<vinodeno_t,Inode*>::iterator i = inode_map.begin(); i != inode_map.end(); ++i) { Inode *inode = i->second; if (inode->oset.dirty_or_tx && (pool == -1 || inode->layout.pool_id == pool)) { ldout(cct, 4) << __func__ << ": FULL: inode 0x" << std::hex << i->first << std::dec << " has dirty objects, purging and setting ENOSPC" << dendl; objectcacher->purge_set(&inode->oset); inode->set_async_err(-CEPHFS_ENOSPC); } } if (cancelled_epoch != (epoch_t)-1) { set_cap_epoch_barrier(cancelled_epoch); } } void Client::handle_osd_map(const MConstRef<MOSDMap>& m) { std::scoped_lock cl(client_lock); const auto myaddrs = messenger->get_myaddrs(); bool new_blocklist = objecter->with_osdmap( [&](const OSDMap& o) { return o.is_blocklisted(myaddrs); }); if (new_blocklist && !blocklisted) { auto epoch = objecter->with_osdmap([](const OSDMap &o){ return o.get_epoch(); }); lderr(cct) << "I was blocklisted at osd epoch " << epoch << dendl; blocklisted = true; _abort_mds_sessions(-CEPHFS_EBLOCKLISTED); // Since we know all our OSD ops will fail, cancel them all preemtively, // so that on an unhealthy cluster we can umount promptly even if e.g. // some PGs were inaccessible. objecter->op_cancel_writes(-CEPHFS_EBLOCKLISTED); } if (blocklisted) { // Handle case where we were blocklisted but no longer are blocklisted = objecter->with_osdmap([myaddrs](const OSDMap &o){ return o.is_blocklisted(myaddrs);}); } // Always subscribe to next osdmap for blocklisted client // until this client is not blocklisted. if (blocklisted) { objecter->maybe_request_map(); } if (objecter->osdmap_full_flag()) { _handle_full_flag(-1); } else { // Accumulate local list of full pools so that I can drop // the objecter lock before re-entering objecter in // cancel_writes std::vector<int64_t> full_pools; objecter->with_osdmap([&full_pools](const OSDMap &o) { for (const auto& kv : o.get_pools()) { if (kv.second.has_flag(pg_pool_t::FLAG_FULL)) { full_pools.push_back(kv.first); } } }); for (auto p : full_pools) _handle_full_flag(p); // Subscribe to subsequent maps to watch for the full flag going // away. For the global full flag objecter does this for us, but // it pays no attention to the per-pool full flag so in this branch // we do it ourselves. if (!full_pools.empty()) { objecter->maybe_request_map(); } } } // ------------------------ // incoming messages bool Client::ms_dispatch2(const MessageRef &m) { RWRef_t iref_reader(initialize_state, CLIENT_INITIALIZED); if (!iref_reader.is_state_satisfied()) { ldout(cct, 10) << "inactive, discarding " << *m << dendl; return true; } switch (m->get_type()) { // mounting and mds sessions case CEPH_MSG_MDS_MAP: handle_mds_map(ref_cast<MMDSMap>(m)); break; case CEPH_MSG_FS_MAP: handle_fs_map(ref_cast<MFSMap>(m)); break; case CEPH_MSG_FS_MAP_USER: handle_fs_map_user(ref_cast<MFSMapUser>(m)); break; case CEPH_MSG_CLIENT_SESSION: handle_client_session(ref_cast<MClientSession>(m)); break; case CEPH_MSG_OSD_MAP: handle_osd_map(ref_cast<MOSDMap>(m)); break; // requests case CEPH_MSG_CLIENT_REQUEST_FORWARD: handle_client_request_forward(ref_cast<MClientRequestForward>(m)); break; case CEPH_MSG_CLIENT_REPLY: handle_client_reply(ref_cast<MClientReply>(m)); break; // reclaim reply case CEPH_MSG_CLIENT_RECLAIM_REPLY: handle_client_reclaim_reply(ref_cast<MClientReclaimReply>(m)); break; case CEPH_MSG_CLIENT_SNAP: handle_snap(ref_cast<MClientSnap>(m)); break; case CEPH_MSG_CLIENT_CAPS: handle_caps(ref_cast<MClientCaps>(m)); break; case CEPH_MSG_CLIENT_LEASE: handle_lease(ref_cast<MClientLease>(m)); break; case MSG_COMMAND_REPLY: if (m->get_source().type() == CEPH_ENTITY_TYPE_MDS) { handle_command_reply(ref_cast<MCommandReply>(m)); } else { return false; } break; case CEPH_MSG_CLIENT_QUOTA: handle_quota(ref_cast<MClientQuota>(m)); break; default: return false; } // unmounting? std::scoped_lock cl(client_lock); if (is_unmounting()) { ldout(cct, 10) << "unmounting: trim pass, size was " << lru.lru_get_size() << "+" << inode_map.size() << dendl; uint64_t size = lru.lru_get_size() + inode_map.size(); trim_cache(); if (size > lru.lru_get_size() + inode_map.size()) { ldout(cct, 10) << "unmounting: trim pass, cache shrank, poking unmount()" << dendl; mount_cond.notify_all(); } else { ldout(cct, 10) << "unmounting: trim pass, size still " << lru.lru_get_size() << "+" << inode_map.size() << dendl; } } return true; } void Client::handle_fs_map(const MConstRef<MFSMap>& m) { std::scoped_lock cl(client_lock); fsmap.reset(new FSMap(m->get_fsmap())); signal_cond_list(waiting_for_fsmap); monclient->sub_got("fsmap", fsmap->get_epoch()); } void Client::handle_fs_map_user(const MConstRef<MFSMapUser>& m) { std::scoped_lock cl(client_lock); fsmap_user.reset(new FSMapUser); *fsmap_user = m->get_fsmap(); monclient->sub_got("fsmap.user", fsmap_user->get_epoch()); signal_cond_list(waiting_for_fsmap); } // Cancel all the commands for missing or laggy GIDs void Client::cancel_commands(const MDSMap& newmap) { std::vector<ceph_tid_t> cancel_ops; std::scoped_lock cmd_lock(command_lock); auto &commands = command_table.get_commands(); for (const auto &[tid, op] : commands) { const mds_gid_t op_mds_gid = op.mds_gid; if (newmap.is_dne_gid(op_mds_gid) || newmap.is_laggy_gid(op_mds_gid)) { ldout(cct, 1) << __func__ << ": cancelling command op " << tid << dendl; cancel_ops.push_back(tid); if (op.outs) { std::ostringstream ss; ss << "MDS " << op_mds_gid << " went away"; *(op.outs) = ss.str(); } /* * No need to make the con->mark_down under * client_lock here, because the con will * has its own lock. */ op.con->mark_down(); if (op.on_finish) op.on_finish->complete(-CEPHFS_ETIMEDOUT); } } for (const auto &tid : cancel_ops) command_table.erase(tid); } void Client::handle_mds_map(const MConstRef<MMDSMap>& m) { std::unique_lock cl(client_lock); if (m->get_epoch() <= mdsmap->get_epoch()) { ldout(cct, 1) << __func__ << " epoch " << m->get_epoch() << " is identical to or older than our " << mdsmap->get_epoch() << dendl; return; } cl.unlock(); ldout(cct, 1) << __func__ << " epoch " << m->get_epoch() << dendl; std::unique_ptr<MDSMap> _mdsmap(new MDSMap); _mdsmap->decode(m->get_encoded()); cancel_commands(*_mdsmap.get()); cl.lock(); _mdsmap.swap(mdsmap); // reset session for (auto p = mds_sessions.begin(); p != mds_sessions.end(); ) { mds_rank_t mds = p->first; MetaSessionRef session = p->second; ++p; int oldstate = _mdsmap->get_state(mds); int newstate = mdsmap->get_state(mds); if (!mdsmap->is_up(mds)) { session->con->mark_down(); } else if (mdsmap->get_addrs(mds) != session->addrs) { auto old_inc = _mdsmap->get_incarnation(mds); auto new_inc = mdsmap->get_incarnation(mds); if (old_inc != new_inc) { ldout(cct, 1) << "mds incarnation changed from " << old_inc << " to " << new_inc << dendl; oldstate = MDSMap::STATE_NULL; } session->con->mark_down(); session->addrs = mdsmap->get_addrs(mds); // When new MDS starts to take over, notify kernel to trim unused entries // in its dcache/icache. Hopefully, the kernel will release some unused // inodes before the new MDS enters reconnect state. trim_cache_for_reconnect(session.get()); } else if (oldstate == newstate) continue; // no change session->mds_state = newstate; if (newstate == MDSMap::STATE_RECONNECT) { session->con = messenger->connect_to_mds(session->addrs); send_reconnect(session.get()); } else if (newstate > MDSMap::STATE_RECONNECT) { if (oldstate < MDSMap::STATE_RECONNECT) { ldout(cct, 1) << "we may miss the MDSMap::RECONNECT, close mds session ... " << dendl; _closed_mds_session(session.get()); continue; } if (newstate >= MDSMap::STATE_ACTIVE) { if (oldstate < MDSMap::STATE_ACTIVE) { // kick new requests kick_requests(session.get()); kick_flushing_caps(session.get()); signal_context_list(session->waiting_for_open); wake_up_session_caps(session.get(), true); } connect_mds_targets(mds); } } else if (newstate == MDSMap::STATE_NULL && mds >= mdsmap->get_max_mds()) { _closed_mds_session(session.get()); } } // kick any waiting threads signal_cond_list(waiting_for_mdsmap); monclient->sub_got("mdsmap", mdsmap->get_epoch()); } void Client::send_reconnect(MetaSession *session) { mds_rank_t mds = session->mds_num; ldout(cct, 10) << __func__ << " to mds." << mds << dendl; // trim unused caps to reduce MDS's cache rejoin time trim_cache_for_reconnect(session); session->readonly = false; session->release.reset(); // reset my cap seq number session->seq = 0; //connect to the mds' offload targets connect_mds_targets(mds); //make sure unsafe requests get saved resend_unsafe_requests(session); early_kick_flushing_caps(session); auto m = make_message<MClientReconnect>(); bool allow_multi = session->mds_features.test(CEPHFS_FEATURE_MULTI_RECONNECT); // i have an open session. ceph::unordered_set<inodeno_t> did_snaprealm; for (ceph::unordered_map<vinodeno_t, Inode*>::iterator p = inode_map.begin(); p != inode_map.end(); ++p) { Inode *in = p->second; auto it = in->caps.find(mds); if (it != in->caps.end()) { if (allow_multi && m->get_approx_size() >= static_cast<size_t>((std::numeric_limits<int>::max() >> 1))) { m->mark_more(); session->con->send_message2(std::move(m)); m = make_message<MClientReconnect>(); } Cap &cap = it->second; ldout(cct, 10) << " caps on " << p->first << " " << ccap_string(cap.issued) << " wants " << ccap_string(in->caps_wanted()) << dendl; filepath path; in->make_short_path(path); ldout(cct, 10) << " path " << path << dendl; bufferlist flockbl; _encode_filelocks(in, flockbl); cap.seq = 0; // reset seq. cap.issue_seq = 0; // reset seq. cap.mseq = 0; // reset seq. // cap gen should catch up with session cap_gen if (cap.gen < session->cap_gen) { cap.gen = session->cap_gen; cap.issued = cap.implemented = CEPH_CAP_PIN; } else { cap.issued = cap.implemented; } snapid_t snap_follows = 0; if (!in->cap_snaps.empty()) snap_follows = in->cap_snaps.begin()->first; m->add_cap(p->first.ino, cap.cap_id, path.get_ino(), path.get_path(), // ino in->caps_wanted(), // wanted cap.issued, // issued in->snaprealm->ino, snap_follows, flockbl); if (did_snaprealm.count(in->snaprealm->ino) == 0) { ldout(cct, 10) << " snaprealm " << *in->snaprealm << dendl; m->add_snaprealm(in->snaprealm->ino, in->snaprealm->seq, in->snaprealm->parent); did_snaprealm.insert(in->snaprealm->ino); } } } if (!allow_multi) m->set_encoding_version(0); // use connection features to choose encoding session->con->send_message2(std::move(m)); mount_cond.notify_all(); if (session->reclaim_state == MetaSession::RECLAIMING) signal_cond_list(waiting_for_reclaim); } void Client::kick_requests(MetaSession *session) { ldout(cct, 10) << __func__ << " for mds." << session->mds_num << dendl; for (map<ceph_tid_t, MetaRequest*>::iterator p = mds_requests.begin(); p != mds_requests.end(); ++p) { MetaRequest *req = p->second; if (req->got_unsafe) continue; if (req->aborted()) { if (req->caller_cond) { req->kick = true; req->caller_cond->notify_all(); } continue; } if (req->retry_attempt > 0) continue; // new requests only if (req->mds == session->mds_num) { send_request(p->second, session); } } } void Client::resend_unsafe_requests(MetaSession *session) { for (xlist<MetaRequest*>::iterator iter = session->unsafe_requests.begin(); !iter.end(); ++iter) send_request(*iter, session); // also re-send old requests when MDS enters reconnect stage. So that MDS can // process completed requests in clientreplay stage. for (map<ceph_tid_t, MetaRequest*>::iterator p = mds_requests.begin(); p != mds_requests.end(); ++p) { MetaRequest *req = p->second; if (req->got_unsafe) continue; if (req->aborted()) continue; if (req->retry_attempt == 0) continue; // old requests only if (req->mds == session->mds_num) send_request(req, session, true); } } void Client::wait_unsafe_requests() { list<MetaRequest*> last_unsafe_reqs; for (const auto &p : mds_sessions) { const auto s = p.second; if (!s->unsafe_requests.empty()) { MetaRequest *req = s->unsafe_requests.back(); req->get(); last_unsafe_reqs.push_back(req); } } for (list<MetaRequest*>::iterator p = last_unsafe_reqs.begin(); p != last_unsafe_reqs.end(); ++p) { MetaRequest *req = *p; if (req->unsafe_item.is_on_list()) wait_on_list(req->waitfor_safe); put_request(req); } } void Client::kick_requests_closed(MetaSession *session) { ldout(cct, 10) << __func__ << " for mds." << session->mds_num << dendl; for (map<ceph_tid_t, MetaRequest*>::iterator p = mds_requests.begin(); p != mds_requests.end(); ) { MetaRequest *req = p->second; ++p; if (req->mds == session->mds_num) { if (req->caller_cond) { req->kick = true; req->caller_cond->notify_all(); } req->item.remove_myself(); if (req->got_unsafe) { lderr(cct) << __func__ << " removing unsafe request " << req->get_tid() << dendl; req->unsafe_item.remove_myself(); if (is_dir_operation(req)) { Inode *dir = req->inode(); ceph_assert(dir); dir->set_async_err(-CEPHFS_EIO); lderr(cct) << "kick_requests_closed drop req of inode(dir) : " << dir->ino << " " << req->get_tid() << dendl; req->unsafe_dir_item.remove_myself(); } if (req->target) { InodeRef &in = req->target; in->set_async_err(-CEPHFS_EIO); lderr(cct) << "kick_requests_closed drop req of inode : " << in->ino << " " << req->get_tid() << dendl; req->unsafe_target_item.remove_myself(); } signal_cond_list(req->waitfor_safe); unregister_request(req); } } } ceph_assert(session->requests.empty()); ceph_assert(session->unsafe_requests.empty()); } /************ * leases */ void Client::got_mds_push(MetaSession *s) { s->seq++; ldout(cct, 10) << " mds." << s->mds_num << " seq now " << s->seq << dendl; if (s->state == MetaSession::STATE_CLOSING) { s->con->send_message2(make_message<MClientSession>(CEPH_SESSION_REQUEST_CLOSE, s->seq)); } } void Client::handle_lease(const MConstRef<MClientLease>& m) { ldout(cct, 10) << __func__ << " " << *m << dendl; ceph_assert(m->get_action() == CEPH_MDS_LEASE_REVOKE); mds_rank_t mds = mds_rank_t(m->get_source().num()); std::scoped_lock cl(client_lock); auto session = _get_mds_session(mds, m->get_connection().get()); if (!session) { return; } got_mds_push(session.get()); ceph_seq_t seq = m->get_seq(); Inode *in; vinodeno_t vino(m->get_ino(), CEPH_NOSNAP); if (inode_map.count(vino) == 0) { ldout(cct, 10) << " don't have vino " << vino << dendl; goto revoke; } in = inode_map[vino]; if (m->get_mask() & CEPH_LEASE_VALID) { if (!in->dir || in->dir->dentries.count(m->dname) == 0) { ldout(cct, 10) << " don't have dir|dentry " << m->get_ino() << "/" << m->dname <<dendl; goto revoke; } Dentry *dn = in->dir->dentries[m->dname]; ldout(cct, 10) << " revoked DN lease on " << dn << dendl; dn->lease_mds = -1; } revoke: { auto reply = make_message<MClientLease>(CEPH_MDS_LEASE_RELEASE, seq, m->get_mask(), m->get_ino(), m->get_first(), m->get_last(), m->dname); m->get_connection()->send_message2(std::move(reply)); } } void Client::_put_inode(Inode *in, int n) { ldout(cct, 10) << __func__ << " on " << *in << " n = " << n << dendl; int left = in->get_nref(); ceph_assert(left >= n + 1); in->iput(n); left -= n; if (left == 1) { // the last one will be held by the inode_map // release any caps remove_all_caps(in); ldout(cct, 10) << __func__ << " deleting " << *in << dendl; bool unclean = objectcacher->release_set(&in->oset); ceph_assert(!unclean); inode_map.erase(in->vino()); if (use_faked_inos()) _release_faked_ino(in); if (root == nullptr) { root_ancestor = 0; while (!root_parents.empty()) root_parents.erase(root_parents.begin()); } in->iput(); } } void Client::delay_put_inodes(bool wakeup) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); std::map<Inode*,int> release; { std::scoped_lock dl(delay_i_lock); release.swap(delay_i_release); } if (release.empty()) return; for (auto &[in, cnt] : release) _put_inode(in, cnt); if (wakeup) mount_cond.notify_all(); } void Client::put_inode(Inode *in, int n) { ldout(cct, 20) << __func__ << " on " << *in << " n = " << n << dendl; std::scoped_lock dl(delay_i_lock); delay_i_release[in] += n; } void Client::close_dir(Dir *dir) { Inode *in = dir->parent_inode; ldout(cct, 15) << __func__ << " dir " << dir << " on " << in << dendl; ceph_assert(dir->is_empty()); ceph_assert(in->dir == dir); ceph_assert(in->dentries.size() < 2); // dirs can't be hard-linked if (!in->dentries.empty()) in->get_first_parent()->put(); // unpin dentry delete in->dir; in->dir = 0; put_inode(in); // unpin inode } /** * Don't call this with in==NULL, use get_or_create for that * leave dn set to default NULL unless you're trying to add * a new inode to a pre-created Dentry */ Dentry* Client::link(Dir *dir, const string& name, Inode *in, Dentry *dn) { if (!dn) { // create a new Dentry dn = new Dentry(dir, name); lru.lru_insert_mid(dn); // mid or top? if(in) { ldout(cct, 15) << "link dir " << *dir->parent_inode << " '" << name << "' to inode " << *in << " dn " << *dn << " (new dn)" << dendl; } else { ldout(cct, 15) << "link dir " << *dir->parent_inode << " '" << name << "' " << " dn " << *dn << " (new dn)" << dendl; } } else { ceph_assert(!dn->inode); ldout(cct, 15) << "link dir " << *dir->parent_inode << " '" << name << "' to inode " << in << " dn " << *dn << " (old dn)" << dendl; } if (in) { // link to inode InodeRef tmp_ref; // only one parent for directories! if (in->is_dir() && !in->dentries.empty()) { tmp_ref = in; // prevent unlink below from freeing the inode. Dentry *olddn = in->get_first_parent(); ceph_assert(olddn->dir != dir || olddn->name != name); Inode *old_diri = olddn->dir->parent_inode; clear_dir_complete_and_ordered(old_diri, true); unlink(olddn, true, true); // keep dir, dentry } dn->link(in); inc_dentry_nr(); ldout(cct, 20) << "link inode " << in << " parents now " << in->dentries << dendl; } return dn; } void Client::unlink(Dentry *dn, bool keepdir, bool keepdentry) { InodeRef in(dn->inode); ldout(cct, 15) << "unlink dir " << dn->dir->parent_inode << " '" << dn->name << "' dn " << dn << " inode " << dn->inode << dendl; // unlink from inode if (dn->inode) { dn->unlink(); dec_dentry_nr(); ldout(cct, 20) << "unlink inode " << in << " parents now " << in->dentries << dendl; } if (keepdentry) { dn->lease_mds = -1; } else { ldout(cct, 15) << "unlink removing '" << dn->name << "' dn " << dn << dendl; // unlink from dir Dir *dir = dn->dir; dn->detach(); // delete den lru.lru_remove(dn); dn->put(); if (dir->is_empty() && !keepdir) close_dir(dir); } } /** * For asynchronous flushes, check for errors from the IO and * update the inode if necessary */ class C_Client_FlushComplete : public Context { private: Client *client; InodeRef inode; public: C_Client_FlushComplete(Client *c, Inode *in) : client(c), inode(in) { } void finish(int r) override { ceph_assert(ceph_mutex_is_locked_by_me(client->client_lock)); if (r != 0) { client_t const whoami = client->whoami; // For the benefit of ldout prefix ldout(client->cct, 1) << "I/O error from flush on inode " << inode << " 0x" << std::hex << inode->ino << std::dec << ": " << r << "(" << cpp_strerror(r) << ")" << dendl; inode->set_async_err(r); } } }; /**** * caps */ void Client::get_cap_ref(Inode *in, int cap) { if ((cap & CEPH_CAP_FILE_BUFFER) && in->cap_refs[CEPH_CAP_FILE_BUFFER] == 0) { ldout(cct, 5) << __func__ << " got first FILE_BUFFER ref on " << *in << dendl; in->iget(); } if ((cap & CEPH_CAP_FILE_CACHE) && in->cap_refs[CEPH_CAP_FILE_CACHE] == 0) { ldout(cct, 5) << __func__ << " got first FILE_CACHE ref on " << *in << dendl; in->iget(); } in->get_cap_ref(cap); } void Client::put_cap_ref(Inode *in, int cap) { int last = in->put_cap_ref(cap); if (last) { int put_nref = 0; int drop = last & ~in->caps_issued(); if (in->snapid == CEPH_NOSNAP) { if ((last & CEPH_CAP_FILE_WR) && !in->cap_snaps.empty() && in->cap_snaps.rbegin()->second.writing) { ldout(cct, 10) << __func__ << " finishing pending cap_snap on " << *in << dendl; in->cap_snaps.rbegin()->second.writing = 0; finish_cap_snap(in, in->cap_snaps.rbegin()->second, get_caps_used(in)); signal_cond_list(in->waitfor_caps); // wake up blocked sync writers } if (last & CEPH_CAP_FILE_BUFFER) { for (auto &p : in->cap_snaps) p.second.dirty_data = 0; signal_cond_list(in->waitfor_commit); ldout(cct, 5) << __func__ << " dropped last FILE_BUFFER ref on " << *in << dendl; ++put_nref; if (!in->cap_snaps.empty()) { flush_snaps(in); } } } if (last & CEPH_CAP_FILE_CACHE) { ldout(cct, 5) << __func__ << " dropped last FILE_CACHE ref on " << *in << dendl; ++put_nref; } if (drop) check_caps(in, 0); if (put_nref) put_inode(in, put_nref); } } // get caps for a given file handle -- the inode should have @need caps // issued by the mds and @want caps not revoked (or not under revocation). // this routine blocks till the cap requirement is satisfied. also account // (track) for capability hit when required (when cap requirement succeedes). int Client::get_caps(Fh *fh, int need, int want, int *phave, loff_t endoff) { Inode *in = fh->inode.get(); int r = check_pool_perm(in, need); if (r < 0) return r; while (1) { int file_wanted = in->caps_file_wanted(); if ((file_wanted & need) != need) { ldout(cct, 10) << "get_caps " << *in << " need " << ccap_string(need) << " file_wanted " << ccap_string(file_wanted) << ", EBADF " << dendl; return -CEPHFS_EBADF; } if ((fh->mode & CEPH_FILE_MODE_WR) && fh->gen != fd_gen) return -CEPHFS_EBADF; if ((in->flags & I_ERROR_FILELOCK) && fh->has_any_filelocks()) return -CEPHFS_EIO; int implemented; int have = in->caps_issued(&implemented); bool waitfor_caps = false; bool waitfor_commit = false; if (have & need & CEPH_CAP_FILE_WR) { if (endoff > 0) { if ((endoff >= (loff_t)in->max_size || endoff > (loff_t)(in->size << 1)) && endoff > (loff_t)in->wanted_max_size) { ldout(cct, 10) << "wanted_max_size " << in->wanted_max_size << " -> " << endoff << dendl; in->wanted_max_size = endoff; } if (in->wanted_max_size > in->max_size && in->wanted_max_size > in->requested_max_size) check_caps(in, 0); } if (endoff >= 0 && endoff > (loff_t)in->max_size) { ldout(cct, 10) << "waiting on max_size, endoff " << endoff << " max_size " << in->max_size << " on " << *in << dendl; waitfor_caps = true; } if (!in->cap_snaps.empty()) { if (in->cap_snaps.rbegin()->second.writing) { ldout(cct, 10) << "waiting on cap_snap write to complete" << dendl; waitfor_caps = true; } for (auto &p : in->cap_snaps) { if (p.second.dirty_data) { waitfor_commit = true; break; } } if (waitfor_commit) { _flush(in, new C_Client_FlushComplete(this, in)); ldout(cct, 10) << "waiting for WRBUFFER to get dropped" << dendl; } } } if (!waitfor_caps && !waitfor_commit) { if ((have & need) == need) { int revoking = implemented & ~have; ldout(cct, 10) << "get_caps " << *in << " have " << ccap_string(have) << " need " << ccap_string(need) << " want " << ccap_string(want) << " revoking " << ccap_string(revoking) << dendl; if ((revoking & want) == 0) { *phave = need | (have & want); in->get_cap_ref(need); cap_hit(); return 0; } } ldout(cct, 10) << "waiting for caps " << *in << " need " << ccap_string(need) << " want " << ccap_string(want) << dendl; waitfor_caps = true; } if ((need & CEPH_CAP_FILE_WR) && ((in->auth_cap && in->auth_cap->session->readonly) || // userland clients are only allowed to read if fscrypt enabled in->is_fscrypt_enabled())) return -CEPHFS_EROFS; if (in->flags & I_CAP_DROPPED) { int mds_wanted = in->caps_mds_wanted(); if ((mds_wanted & need) != need) { int ret = _renew_caps(in); if (ret < 0) return ret; continue; } if (!(file_wanted & ~mds_wanted)) in->flags &= ~I_CAP_DROPPED; } if (waitfor_caps) wait_on_list(in->waitfor_caps); else if (waitfor_commit) wait_on_list(in->waitfor_commit); } } int Client::get_caps_used(Inode *in) { unsigned used = in->caps_used(); if (!(used & CEPH_CAP_FILE_CACHE) && !objectcacher->set_is_empty(&in->oset)) used |= CEPH_CAP_FILE_CACHE; return used; } void Client::cap_delay_requeue(Inode *in) { ldout(cct, 10) << __func__ << " on " << *in << dendl; in->hold_caps_until = ceph::coarse_mono_clock::now() + caps_release_delay; delayed_list.push_back(&in->delay_cap_item); } void Client::send_cap(Inode *in, MetaSession *session, Cap *cap, int flags, int used, int want, int retain, int flush, ceph_tid_t flush_tid) { int held = cap->issued | cap->implemented; int revoking = cap->implemented & ~cap->issued; retain &= ~revoking; int dropping = cap->issued & ~retain; int op = CEPH_CAP_OP_UPDATE; ldout(cct, 10) << __func__ << " " << *in << " mds." << session->mds_num << " seq " << cap->seq << " used " << ccap_string(used) << " want " << ccap_string(want) << " flush " << ccap_string(flush) << " retain " << ccap_string(retain) << " held "<< ccap_string(held) << " revoking " << ccap_string(revoking) << " dropping " << ccap_string(dropping) << dendl; if (cct->_conf->client_inject_release_failure && revoking) { const int would_have_issued = cap->issued & retain; const int would_have_implemented = cap->implemented & (cap->issued | used); // Simulated bug: // - tell the server we think issued is whatever they issued plus whatever we implemented // - leave what we have implemented in place ldout(cct, 20) << __func__ << " injecting failure to release caps" << dendl; cap->issued = cap->issued | cap->implemented; // Make an exception for revoking xattr caps: we are injecting // failure to release other caps, but allow xattr because client // will block on xattr ops if it can't release these to MDS (#9800) const int xattr_mask = CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL; cap->issued ^= xattr_mask & revoking; cap->implemented ^= xattr_mask & revoking; ldout(cct, 20) << __func__ << " issued " << ccap_string(cap->issued) << " vs " << ccap_string(would_have_issued) << dendl; ldout(cct, 20) << __func__ << " implemented " << ccap_string(cap->implemented) << " vs " << ccap_string(would_have_implemented) << dendl; } else { // Normal behaviour cap->issued &= retain; cap->implemented &= cap->issued | used; } snapid_t follows = 0; if (flush) follows = in->snaprealm->get_snap_context().seq; auto m = make_message<MClientCaps>(op, in->ino, 0, cap->cap_id, cap->seq, cap->implemented, want, flush, cap->mseq, cap_epoch_barrier); m->caller_uid = in->cap_dirtier_uid; m->caller_gid = in->cap_dirtier_gid; m->head.issue_seq = cap->issue_seq; m->set_tid(flush_tid); m->head.uid = in->uid; m->head.gid = in->gid; m->head.mode = in->mode; m->head.nlink = in->nlink; if (flush & CEPH_CAP_XATTR_EXCL) { encode(in->xattrs, m->xattrbl); m->head.xattr_version = in->xattr_version; } m->size = in->size; m->max_size = in->max_size; m->truncate_seq = in->truncate_seq; m->truncate_size = in->truncate_size; m->mtime = in->mtime; m->atime = in->atime; m->ctime = in->ctime; m->btime = in->btime; m->time_warp_seq = in->time_warp_seq; m->change_attr = in->change_attr; m->fscrypt_auth = in->fscrypt_auth; m->fscrypt_file = in->fscrypt_file; if (!(flags & MClientCaps::FLAG_PENDING_CAPSNAP) && !in->cap_snaps.empty() && in->cap_snaps.rbegin()->second.flush_tid == 0) flags |= MClientCaps::FLAG_PENDING_CAPSNAP; m->flags = flags; if (flush & CEPH_CAP_FILE_WR) { m->inline_version = in->inline_version; m->inline_data = in->inline_data; } in->reported_size = in->size; m->set_snap_follows(follows); cap->wanted = want; if (cap == in->auth_cap) { if (want & CEPH_CAP_ANY_FILE_WR) { m->set_max_size(in->wanted_max_size); in->requested_max_size = in->wanted_max_size; ldout(cct, 15) << "auth cap, requesting max_size " << in->requested_max_size << dendl; } else { in->requested_max_size = 0; ldout(cct, 15) << "auth cap, reset requested_max_size due to not wanting any file write cap" << dendl; } } if (!session->flushing_caps_tids.empty()) m->set_oldest_flush_tid(*session->flushing_caps_tids.begin()); session->con->send_message2(std::move(m)); } static bool is_max_size_approaching(Inode *in) { /* mds will adjust max size according to the reported size */ if (in->flushing_caps & CEPH_CAP_FILE_WR) return false; if (in->size >= in->max_size) return true; /* half of previous max_size increment has been used */ if (in->max_size > in->reported_size && (in->size << 1) >= in->max_size + in->reported_size) return true; return false; } static int adjust_caps_used_for_lazyio(int used, int issued, int implemented) { if (!(used & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_BUFFER))) return used; if (!(implemented & CEPH_CAP_FILE_LAZYIO)) return used; if (issued & CEPH_CAP_FILE_LAZYIO) { if (!(issued & CEPH_CAP_FILE_CACHE)) { used &= ~CEPH_CAP_FILE_CACHE; used |= CEPH_CAP_FILE_LAZYIO; } if (!(issued & CEPH_CAP_FILE_BUFFER)) { used &= ~CEPH_CAP_FILE_BUFFER; used |= CEPH_CAP_FILE_LAZYIO; } } else { if (!(implemented & CEPH_CAP_FILE_CACHE)) { used &= ~CEPH_CAP_FILE_CACHE; used |= CEPH_CAP_FILE_LAZYIO; } if (!(implemented & CEPH_CAP_FILE_BUFFER)) { used &= ~CEPH_CAP_FILE_BUFFER; used |= CEPH_CAP_FILE_LAZYIO; } } return used; } /** * check_caps * * Examine currently used and wanted versus held caps. Release, flush or ack * revoked caps to the MDS as appropriate. * * @param in the inode to check * @param flags flags to apply to cap check */ void Client::check_caps(Inode *in, unsigned flags) { unsigned wanted = in->caps_wanted(); unsigned used = get_caps_used(in); unsigned cap_used; int implemented; int issued = in->caps_issued(&implemented); int revoking = implemented & ~issued; int orig_used = used; used = adjust_caps_used_for_lazyio(used, issued, implemented); int retain = wanted | used | CEPH_CAP_PIN; if (!is_unmounting() && in->nlink > 0) { if (wanted) { retain |= CEPH_CAP_ANY; } else if (in->is_dir() && (issued & CEPH_CAP_FILE_SHARED) && (in->flags & I_COMPLETE)) { // we do this here because we don't want to drop to Fs (and then // drop the Fs if we do a create!) if that alone makes us send lookups // to the MDS. Doing it in in->caps_wanted() has knock-on effects elsewhere wanted = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL; retain |= wanted; } else { retain |= CEPH_CAP_ANY_SHARED; // keep RD only if we didn't have the file open RW, // because then the mds would revoke it anyway to // journal max_size=0. if (in->max_size == 0) retain |= CEPH_CAP_ANY_RD; } } ldout(cct, 10) << __func__ << " on " << *in << " wanted " << ccap_string(wanted) << " used " << ccap_string(used) << " issued " << ccap_string(issued) << " revoking " << ccap_string(revoking) << " flags=" << flags << dendl; if (in->snapid != CEPH_NOSNAP) return; //snap caps last forever, can't write if (in->caps.empty()) return; // guard if at end of func if (!(orig_used & CEPH_CAP_FILE_BUFFER) && (revoking & used & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO))) { if (_release(in)) used &= ~(CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO); } for (auto &[mds, cap] : in->caps) { auto session = mds_sessions.at(mds); cap_used = used; if (in->auth_cap && &cap != in->auth_cap) cap_used &= ~in->auth_cap->issued; revoking = cap.implemented & ~cap.issued; ldout(cct, 10) << " cap mds." << mds << " issued " << ccap_string(cap.issued) << " implemented " << ccap_string(cap.implemented) << " revoking " << ccap_string(revoking) << dendl; if (in->wanted_max_size > in->max_size && in->wanted_max_size > in->requested_max_size && &cap == in->auth_cap) goto ack; /* approaching file_max? */ if ((cap.issued & CEPH_CAP_FILE_WR) && &cap == in->auth_cap && is_max_size_approaching(in)) { ldout(cct, 10) << "size " << in->size << " approaching max_size " << in->max_size << ", reported " << in->reported_size << dendl; goto ack; } /* completed revocation? */ if (revoking && (revoking & cap_used) == 0) { ldout(cct, 10) << "completed revocation of " << ccap_string(cap.implemented & ~cap.issued) << dendl; goto ack; } /* want more caps from mds? */ if (wanted & ~(cap.wanted | cap.issued)) goto ack; if (!revoking && is_unmounting() && (cap_used == 0)) goto ack; if ((cap.issued & ~retain) == 0 && // and we don't have anything we wouldn't like !in->dirty_caps) // and we have no dirty caps continue; if (!(flags & CHECK_CAPS_NODELAY)) { ldout(cct, 10) << "delaying cap release" << dendl; cap_delay_requeue(in); continue; } ack: if (&cap == in->auth_cap) { if (in->flags & I_KICK_FLUSH) { ldout(cct, 20) << " reflushing caps (check_caps) on " << *in << " to mds." << mds << dendl; kick_flushing_caps(in, session.get()); } if (!in->cap_snaps.empty() && in->cap_snaps.rbegin()->second.flush_tid == 0) flush_snaps(in); } int flushing; int msg_flags = 0; ceph_tid_t flush_tid; if (in->auth_cap == &cap && in->dirty_caps) { flushing = mark_caps_flushing(in, &flush_tid); if (flags & CHECK_CAPS_SYNCHRONOUS) msg_flags |= MClientCaps::FLAG_SYNC; } else { flushing = 0; flush_tid = 0; } in->delay_cap_item.remove_myself(); send_cap(in, session.get(), &cap, msg_flags, cap_used, wanted, retain, flushing, flush_tid); } } void Client::queue_cap_snap(Inode *in, SnapContext& old_snapc) { int used = get_caps_used(in); int dirty = in->caps_dirty(); ldout(cct, 10) << __func__ << " " << *in << " snapc " << old_snapc << " used " << ccap_string(used) << dendl; if (in->cap_snaps.size() && in->cap_snaps.rbegin()->second.writing) { ldout(cct, 10) << __func__ << " already have pending cap_snap on " << *in << dendl; return; } else if (dirty || (used & CEPH_CAP_FILE_WR)) { const auto &capsnapem = in->cap_snaps.emplace(std::piecewise_construct, std::make_tuple(old_snapc.seq), std::make_tuple(in)); ceph_assert(capsnapem.second); /* element inserted */ CapSnap &capsnap = capsnapem.first->second; capsnap.context = old_snapc; capsnap.issued = in->caps_issued(); capsnap.dirty = dirty; capsnap.dirty_data = (used & CEPH_CAP_FILE_BUFFER); capsnap.uid = in->uid; capsnap.gid = in->gid; capsnap.mode = in->mode; capsnap.btime = in->btime; capsnap.xattrs = in->xattrs; capsnap.xattr_version = in->xattr_version; capsnap.cap_dirtier_uid = in->cap_dirtier_uid; capsnap.cap_dirtier_gid = in->cap_dirtier_gid; if (used & CEPH_CAP_FILE_WR) { ldout(cct, 10) << __func__ << " WR used on " << *in << dendl; capsnap.writing = 1; } else { finish_cap_snap(in, capsnap, used); } } else { ldout(cct, 10) << __func__ << " not dirty|writing on " << *in << dendl; } } void Client::finish_cap_snap(Inode *in, CapSnap &capsnap, int used) { ldout(cct, 10) << __func__ << " " << *in << " capsnap " << (void *)&capsnap << " used " << ccap_string(used) << dendl; capsnap.size = in->size; capsnap.mtime = in->mtime; capsnap.atime = in->atime; capsnap.ctime = in->ctime; capsnap.time_warp_seq = in->time_warp_seq; capsnap.change_attr = in->change_attr; capsnap.dirty |= in->caps_dirty(); /* Only reset it if it wasn't set before */ if (capsnap.cap_dirtier_uid == -1) { capsnap.cap_dirtier_uid = in->cap_dirtier_uid; capsnap.cap_dirtier_gid = in->cap_dirtier_gid; } if (capsnap.dirty & CEPH_CAP_FILE_WR) { capsnap.inline_data = in->inline_data; capsnap.inline_version = in->inline_version; } if (used & CEPH_CAP_FILE_BUFFER) { ldout(cct, 10) << __func__ << " " << *in << " cap_snap " << &capsnap << " used " << used << " WRBUFFER, trigger to flush dirty buffer" << dendl; /* trigger to flush the buffer */ _flush(in, new C_Client_FlushComplete(this, in)); } else { capsnap.dirty_data = 0; flush_snaps(in); } } void Client::send_flush_snap(Inode *in, MetaSession *session, snapid_t follows, CapSnap& capsnap) { auto m = make_message<MClientCaps>(CEPH_CAP_OP_FLUSHSNAP, in->ino, in->snaprealm->ino, 0, in->auth_cap->mseq, cap_epoch_barrier); m->caller_uid = capsnap.cap_dirtier_uid; m->caller_gid = capsnap.cap_dirtier_gid; m->set_client_tid(capsnap.flush_tid); m->head.snap_follows = follows; m->head.caps = capsnap.issued; m->head.dirty = capsnap.dirty; m->head.uid = capsnap.uid; m->head.gid = capsnap.gid; m->head.mode = capsnap.mode; m->btime = capsnap.btime; m->size = capsnap.size; m->head.xattr_version = capsnap.xattr_version; encode(capsnap.xattrs, m->xattrbl); m->ctime = capsnap.ctime; m->btime = capsnap.btime; m->mtime = capsnap.mtime; m->atime = capsnap.atime; m->time_warp_seq = capsnap.time_warp_seq; m->change_attr = capsnap.change_attr; if (capsnap.dirty & CEPH_CAP_FILE_WR) { m->inline_version = in->inline_version; m->inline_data = in->inline_data; } ceph_assert(!session->flushing_caps_tids.empty()); m->set_oldest_flush_tid(*session->flushing_caps_tids.begin()); session->con->send_message2(std::move(m)); } void Client::flush_snaps(Inode *in) { ldout(cct, 10) << "flush_snaps on " << *in << dendl; ceph_assert(in->cap_snaps.size()); // pick auth mds ceph_assert(in->auth_cap); MetaSession *session = in->auth_cap->session; for (auto &p : in->cap_snaps) { CapSnap &capsnap = p.second; // only do new flush if (capsnap.flush_tid > 0) continue; ldout(cct, 10) << "flush_snaps mds." << session->mds_num << " follows " << p.first << " size " << capsnap.size << " mtime " << capsnap.mtime << " dirty_data=" << capsnap.dirty_data << " writing=" << capsnap.writing << " on " << *in << dendl; if (capsnap.dirty_data || capsnap.writing) break; capsnap.flush_tid = ++last_flush_tid; session->flushing_caps_tids.insert(capsnap.flush_tid); in->flushing_cap_tids[capsnap.flush_tid] = 0; if (!in->flushing_cap_item.is_on_list()) session->flushing_caps.push_back(&in->flushing_cap_item); send_flush_snap(in, session, p.first, capsnap); } } void Client::wait_on_list(list<ceph::condition_variable*>& ls) { ceph::condition_variable cond; ls.push_back(&cond); std::unique_lock l{client_lock, std::adopt_lock}; cond.wait(l); l.release(); ls.remove(&cond); } void Client::signal_cond_list(list<ceph::condition_variable*>& ls) { for (auto cond : ls) { cond->notify_all(); } } void Client::wait_on_context_list(list<Context*>& ls) { ceph::condition_variable cond; bool done = false; int r; ls.push_back(new C_Cond(cond, &done, &r)); std::unique_lock l{client_lock, std::adopt_lock}; cond.wait(l, [&done] { return done;}); l.release(); } void Client::signal_context_list(list<Context*>& ls) { while (!ls.empty()) { ls.front()->complete(0); ls.pop_front(); } } void Client::wake_up_session_caps(MetaSession *s, bool reconnect) { for (const auto &cap : s->caps) { auto &in = cap->inode; if (reconnect) { in.requested_max_size = 0; in.wanted_max_size = 0; } else { if (cap->gen < s->cap_gen) { // mds did not re-issue stale cap. cap->issued = cap->implemented = CEPH_CAP_PIN; // make sure mds knows what we want. if (in.caps_file_wanted() & ~cap->wanted) in.flags |= I_CAP_DROPPED; } } signal_cond_list(in.waitfor_caps); } } // flush dirty data (from objectcache) class C_Client_CacheInvalidate : public Context { private: Client *client; vinodeno_t ino; int64_t offset, length; public: C_Client_CacheInvalidate(Client *c, Inode *in, int64_t off, int64_t len) : client(c), offset(off), length(len) { if (client->use_faked_inos()) ino = vinodeno_t(in->faked_ino, CEPH_NOSNAP); else ino = in->vino(); } void finish(int r) override { // _async_invalidate takes the lock when it needs to, call this back from outside of lock. ceph_assert(ceph_mutex_is_not_locked_by_me(client->client_lock)); client->_async_invalidate(ino, offset, length); } }; void Client::_async_invalidate(vinodeno_t ino, int64_t off, int64_t len) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return; ldout(cct, 10) << __func__ << " " << ino << " " << off << "~" << len << dendl; ino_invalidate_cb(callback_handle, ino, off, len); } void Client::_schedule_invalidate_callback(Inode *in, int64_t off, int64_t len) { if (ino_invalidate_cb) // we queue the invalidate, which calls the callback and decrements the ref async_ino_invalidator.queue(new C_Client_CacheInvalidate(this, in, off, len)); } void Client::_invalidate_inode_cache(Inode *in) { ldout(cct, 10) << __func__ << " " << *in << dendl; // invalidate our userspace inode cache if (cct->_conf->client_oc) { objectcacher->release_set(&in->oset); if (!objectcacher->set_is_empty(&in->oset)) lderr(cct) << "failed to invalidate cache for " << *in << dendl; } _schedule_invalidate_callback(in, 0, 0); } void Client::_invalidate_inode_cache(Inode *in, int64_t off, int64_t len) { ldout(cct, 10) << __func__ << " " << *in << " " << off << "~" << len << dendl; // invalidate our userspace inode cache if (cct->_conf->client_oc) { vector<ObjectExtent> ls; Striper::file_to_extents(cct, in->ino, &in->layout, off, len, in->truncate_size, ls); objectcacher->discard_writeback(&in->oset, ls, nullptr); } _schedule_invalidate_callback(in, off, len); } bool Client::_release(Inode *in) { ldout(cct, 20) << "_release " << *in << dendl; if (in->cap_refs[CEPH_CAP_FILE_CACHE] == 0) { _invalidate_inode_cache(in); return true; } return false; } bool Client::_flush(Inode *in, Context *onfinish) { ldout(cct, 10) << "_flush " << *in << dendl; if (!in->oset.dirty_or_tx) { ldout(cct, 10) << " nothing to flush" << dendl; onfinish->complete(0); return true; } if (objecter->osdmap_pool_full(in->layout.pool_id)) { ldout(cct, 8) << __func__ << ": FULL, purging for ENOSPC" << dendl; objectcacher->purge_set(&in->oset); if (onfinish) { onfinish->complete(-CEPHFS_ENOSPC); } return true; } return objectcacher->flush_set(&in->oset, onfinish); } void Client::_flush_range(Inode *in, int64_t offset, uint64_t size) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); if (!in->oset.dirty_or_tx) { ldout(cct, 10) << " nothing to flush" << dendl; return; } C_SaferCond onflush("Client::_flush_range flock"); bool ret = objectcacher->file_flush(&in->oset, &in->layout, in->snaprealm->get_snap_context(), offset, size, &onflush); if (!ret) { // wait for flush client_lock.unlock(); onflush.wait(); client_lock.lock(); } } void Client::flush_set_callback(ObjectCacher::ObjectSet *oset) { // std::scoped_lock l(client_lock); ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); // will be called via dispatch() -> objecter -> ... Inode *in = static_cast<Inode *>(oset->parent); ceph_assert(in); _flushed(in); } void Client::_flushed(Inode *in) { ldout(cct, 10) << "_flushed " << *in << dendl; put_cap_ref(in, CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_BUFFER); } // checks common to add_update_cap, handle_cap_grant void Client::check_cap_issue(Inode *in, unsigned issued) { unsigned had = in->caps_issued(); if ((issued & CEPH_CAP_FILE_CACHE) && !(had & CEPH_CAP_FILE_CACHE)) in->cache_gen++; if ((issued & CEPH_CAP_FILE_SHARED) != (had & CEPH_CAP_FILE_SHARED)) { if (issued & CEPH_CAP_FILE_SHARED) in->shared_gen++; if (in->is_dir()) clear_dir_complete_and_ordered(in, true); } } void Client::add_update_cap(Inode *in, MetaSession *mds_session, uint64_t cap_id, unsigned issued, unsigned wanted, unsigned seq, unsigned mseq, inodeno_t realm, int flags, const UserPerm& cap_perms) { if (!in->is_any_caps()) { ceph_assert(in->snaprealm == 0); in->snaprealm = get_snap_realm(realm); in->snaprealm->inodes_with_caps.push_back(&in->snaprealm_item); ldout(cct, 15) << __func__ << " first one, opened snaprealm " << in->snaprealm << dendl; } else { ceph_assert(in->snaprealm); if ((flags & CEPH_CAP_FLAG_AUTH) && realm != inodeno_t(-1) && in->snaprealm->ino != realm) { in->snaprealm_item.remove_myself(); auto oldrealm = in->snaprealm; in->snaprealm = get_snap_realm(realm); in->snaprealm->inodes_with_caps.push_back(&in->snaprealm_item); put_snap_realm(oldrealm); } } mds_rank_t mds = mds_session->mds_num; const auto &capem = in->caps.emplace(std::piecewise_construct, std::forward_as_tuple(mds), std::forward_as_tuple(*in, mds_session)); Cap &cap = capem.first->second; if (!capem.second) { if (cap.gen < mds_session->cap_gen) cap.issued = cap.implemented = CEPH_CAP_PIN; /* * auth mds of the inode changed. we received the cap export * message, but still haven't received the cap import message. * handle_cap_export() updated the new auth MDS' cap. * * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing * a message that was send before the cap import message. So * don't remove caps. */ if (ceph_seq_cmp(seq, cap.seq) <= 0) { if (&cap != in->auth_cap) ldout(cct, 0) << "WARNING: " << "inode " << *in << " caps on mds." << mds << " != auth_cap." << dendl; ceph_assert(cap.cap_id == cap_id); seq = cap.seq; mseq = cap.mseq; issued |= cap.issued; flags |= CEPH_CAP_FLAG_AUTH; } } else { inc_pinned_icaps(); } check_cap_issue(in, issued); if (flags & CEPH_CAP_FLAG_AUTH) { if (in->auth_cap != &cap && (!in->auth_cap || ceph_seq_cmp(in->auth_cap->mseq, mseq) < 0)) { if (in->auth_cap && in->flushing_cap_item.is_on_list()) { ldout(cct, 10) << __func__ << " changing auth cap: " << "add myself to new auth MDS' flushing caps list" << dendl; adjust_session_flushing_caps(in, in->auth_cap->session, mds_session); } in->auth_cap = &cap; } } unsigned old_caps = cap.issued; cap.cap_id = cap_id; cap.issued = issued; cap.implemented |= issued; if (ceph_seq_cmp(mseq, cap.mseq) > 0) cap.wanted = wanted; else cap.wanted |= wanted; cap.seq = seq; cap.issue_seq = seq; cap.mseq = mseq; cap.gen = mds_session->cap_gen; cap.latest_perms = cap_perms; ldout(cct, 10) << __func__ << " issued " << ccap_string(old_caps) << " -> " << ccap_string(cap.issued) << " from mds." << mds << " on " << *in << dendl; if ((issued & ~old_caps) && in->auth_cap == &cap) { // non-auth MDS is revoking the newly grant caps ? for (auto &p : in->caps) { if (&p.second == &cap) continue; if (p.second.implemented & ~p.second.issued & issued) { check_caps(in, CHECK_CAPS_NODELAY); break; } } } if (issued & ~old_caps) signal_cond_list(in->waitfor_caps); } void Client::remove_cap(Cap *cap, bool queue_release) { auto &in = cap->inode; MetaSession *session = cap->session; mds_rank_t mds = cap->session->mds_num; ldout(cct, 10) << __func__ << " mds." << mds << " on " << in << dendl; if (queue_release) { session->enqueue_cap_release( in.ino, cap->cap_id, cap->issue_seq, cap->mseq, cap_epoch_barrier); } else { dec_pinned_icaps(); } if (in.auth_cap == cap) { if (in.flushing_cap_item.is_on_list()) { ldout(cct, 10) << " removing myself from flushing_cap list" << dendl; in.flushing_cap_item.remove_myself(); } in.auth_cap = NULL; } size_t n = in.caps.erase(mds); ceph_assert(n == 1); cap = nullptr; if (!in.is_any_caps()) { ldout(cct, 15) << __func__ << " last one, closing snaprealm " << in.snaprealm << dendl; in.snaprealm_item.remove_myself(); put_snap_realm(in.snaprealm); in.snaprealm = 0; } } void Client::remove_all_caps(Inode *in) { while (!in->caps.empty()) remove_cap(&in->caps.begin()->second, true); } void Client::remove_session_caps(MetaSession *s, int err) { ldout(cct, 10) << __func__ << " mds." << s->mds_num << dendl; while (s->caps.size()) { Cap *cap = *s->caps.begin(); InodeRef in(&cap->inode); bool dirty_caps = false; if (in->auth_cap == cap) { dirty_caps = in->dirty_caps | in->flushing_caps; in->wanted_max_size = 0; in->requested_max_size = 0; if (in->has_any_filelocks()) in->flags |= I_ERROR_FILELOCK; } auto caps = cap->implemented; if (cap->wanted | cap->issued) in->flags |= I_CAP_DROPPED; remove_cap(cap, false); in->cap_snaps.clear(); if (dirty_caps) { lderr(cct) << __func__ << " still has dirty|flushing caps on " << *in << dendl; if (in->flushing_caps) { num_flushing_caps--; in->flushing_cap_tids.clear(); } in->flushing_caps = 0; in->mark_caps_clean(); put_inode(in.get()); } caps &= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_BUFFER; if (caps && !in->caps_issued_mask(caps, true)) { if (err == -CEPHFS_EBLOCKLISTED) { if (in->oset.dirty_or_tx) { lderr(cct) << __func__ << " still has dirty data on " << *in << dendl; in->set_async_err(err); } objectcacher->purge_set(&in->oset); } else { objectcacher->release_set(&in->oset); } _schedule_invalidate_callback(in.get(), 0, 0); } signal_cond_list(in->waitfor_caps); } s->flushing_caps_tids.clear(); sync_cond.notify_all(); } std::pair<int, bool> Client::_do_remount(bool retry_on_error) { uint64_t max_retries = cct->_conf.get_val<uint64_t>("client_max_retries_on_remount_failure"); bool abort_on_failure = false; errno = 0; int r = remount_cb(callback_handle); if (r == 0) { retries_on_invalidate = 0; } else { int e = errno; client_t whoami = get_nodeid(); if (r == -1) { lderr(cct) << "failed to remount (to trim kernel dentries): " "errno = " << e << " (" << strerror(e) << ")" << dendl; } else { lderr(cct) << "failed to remount (to trim kernel dentries): " "return code = " << r << dendl; } bool should_abort = (cct->_conf.get_val<bool>("client_die_on_failed_remount") || cct->_conf.get_val<bool>("client_die_on_failed_dentry_invalidate")) && !(retry_on_error && (++retries_on_invalidate < max_retries)); if (should_abort && !is_unmounting()) { lderr(cct) << "failed to remount for kernel dentry trimming; quitting!" << dendl; abort_on_failure = true; } } return std::make_pair(r, abort_on_failure); } class C_Client_Remount : public Context { private: Client *client; public: explicit C_Client_Remount(Client *c) : client(c) {} void finish(int r) override { ceph_assert(r == 0); auto result = client->_do_remount(true); if (result.second) { ceph_abort(); } } }; void Client::_invalidate_kernel_dcache() { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return; if (can_invalidate_dentries) { if (dentry_invalidate_cb && root->dir) { for (ceph::unordered_map<string, Dentry*>::iterator p = root->dir->dentries.begin(); p != root->dir->dentries.end(); ++p) { if (p->second->inode) _schedule_invalidate_dentry_callback(p->second, false); } } } else if (remount_cb) { // Hacky: // when remounting a file system, linux kernel trims all unused dentries in the fs remount_finisher.queue(new C_Client_Remount(this)); } } void Client::_trim_negative_child_dentries(InodeRef& in) { if (!in->is_dir()) return; Dir* dir = in->dir; if (dir && dir->dentries.size() == dir->num_null_dentries) { for (auto p = dir->dentries.begin(); p != dir->dentries.end(); ) { Dentry *dn = p->second; ++p; ceph_assert(!dn->inode); if (dn->lru_is_expireable()) unlink(dn, true, false); // keep dir, drop dentry } if (dir->dentries.empty()) { close_dir(dir); } } if (in->flags & I_SNAPDIR_OPEN) { InodeRef snapdir = open_snapdir(in.get()); _trim_negative_child_dentries(snapdir); } } class C_Client_CacheRelease : public Context { private: Client *client; vinodeno_t ino; public: C_Client_CacheRelease(Client *c, Inode *in) : client(c) { if (client->use_faked_inos()) ino = vinodeno_t(in->faked_ino, CEPH_NOSNAP); else ino = in->vino(); } void finish(int r) override { ceph_assert(ceph_mutex_is_not_locked_by_me(client->client_lock)); client->_async_inode_release(ino); } }; void Client::_async_inode_release(vinodeno_t ino) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return; ldout(cct, 10) << __func__ << " " << ino << dendl; ino_release_cb(callback_handle, ino); } void Client::_schedule_ino_release_callback(Inode *in) { if (ino_release_cb) // we queue the invalidate, which calls the callback and decrements the ref async_ino_releasor.queue(new C_Client_CacheRelease(this, in)); } void Client::trim_caps(MetaSession *s, uint64_t max) { mds_rank_t mds = s->mds_num; size_t caps_size = s->caps.size(); ldout(cct, 10) << __func__ << " mds." << mds << " max " << max << " caps " << caps_size << dendl; uint64_t trimmed = 0; auto p = s->caps.begin(); std::set<Dentry *> to_trim; /* this avoids caps other than the one we're * looking at from getting deleted during traversal. */ while ((caps_size - trimmed) > max && !p.end()) { Cap *cap = *p; InodeRef in(&cap->inode); // Increment p early because it will be invalidated if cap // is deleted inside remove_cap ++p; if (in->caps.size() > 1 && cap != in->auth_cap) { int mine = cap->issued | cap->implemented; int oissued = in->auth_cap ? in->auth_cap->issued : 0; // disposable non-auth cap if (!(get_caps_used(in.get()) & ~oissued & mine)) { ldout(cct, 20) << " removing unused, unneeded non-auth cap on " << *in << dendl; cap = (remove_cap(cap, true), nullptr); trimmed++; } } else { ldout(cct, 20) << " trying to trim dentries for " << *in << dendl; _trim_negative_child_dentries(in); bool all = true; auto q = in->dentries.begin(); while (q != in->dentries.end()) { Dentry *dn = *q; ++q; if (dn->lru_is_expireable()) { if (can_invalidate_dentries && dn->dir->parent_inode->ino == CEPH_INO_ROOT) { // Only issue one of these per DN for inodes in root: handle // others more efficiently by calling for root-child DNs at // the end of this function. _schedule_invalidate_dentry_callback(dn, true); } ldout(cct, 20) << " queueing dentry for trimming: " << dn->name << dendl; to_trim.insert(dn); } else { ldout(cct, 20) << " not expirable: " << dn->name << dendl; all = false; } } if (in->ll_ref == 1 && in->ino != CEPH_INO_ROOT) { _schedule_ino_release_callback(in.get()); } if (all && in->ino != CEPH_INO_ROOT) { ldout(cct, 20) << __func__ << " counting as trimmed: " << *in << dendl; trimmed++; } } } ldout(cct, 20) << " trimming queued dentries: " << dendl; for (const auto &dn : to_trim) { trim_dentry(dn); } to_trim.clear(); caps_size = s->caps.size(); if (caps_size > (size_t)max) _invalidate_kernel_dcache(); } void Client::force_session_readonly(MetaSession *s) { s->readonly = true; for (xlist<Cap*>::iterator p = s->caps.begin(); !p.end(); ++p) { auto &in = (*p)->inode; if (in.caps_wanted() & CEPH_CAP_FILE_WR) signal_cond_list(in.waitfor_caps); } } int Client::mark_caps_flushing(Inode *in, ceph_tid_t* ptid) { MetaSession *session = in->auth_cap->session; int flushing = in->dirty_caps; ceph_assert(flushing); ceph_tid_t flush_tid = ++last_flush_tid; in->flushing_cap_tids[flush_tid] = flushing; if (!in->flushing_caps) { ldout(cct, 10) << __func__ << " " << ccap_string(flushing) << " " << *in << dendl; num_flushing_caps++; } else { ldout(cct, 10) << __func__ << " (more) " << ccap_string(flushing) << " " << *in << dendl; } in->flushing_caps |= flushing; in->mark_caps_clean(); if (!in->flushing_cap_item.is_on_list()) session->flushing_caps.push_back(&in->flushing_cap_item); session->flushing_caps_tids.insert(flush_tid); *ptid = flush_tid; return flushing; } void Client::adjust_session_flushing_caps(Inode *in, MetaSession *old_s, MetaSession *new_s) { for (auto &p : in->cap_snaps) { CapSnap &capsnap = p.second; if (capsnap.flush_tid > 0) { old_s->flushing_caps_tids.erase(capsnap.flush_tid); new_s->flushing_caps_tids.insert(capsnap.flush_tid); } } for (map<ceph_tid_t, int>::iterator it = in->flushing_cap_tids.begin(); it != in->flushing_cap_tids.end(); ++it) { old_s->flushing_caps_tids.erase(it->first); new_s->flushing_caps_tids.insert(it->first); } new_s->flushing_caps.push_back(&in->flushing_cap_item); } /* * Flush all the dirty caps back to the MDS. Because the callers * generally wait on the result of this function (syncfs and umount * cases), we set CHECK_CAPS_SYNCHRONOUS on the last check_caps call. */ void Client::flush_caps_sync() { ldout(cct, 10) << __func__ << dendl; for (auto &q : mds_sessions) { auto s = q.second; xlist<Inode*>::iterator p = s->dirty_list.begin(); while (!p.end()) { unsigned flags = CHECK_CAPS_NODELAY; Inode *in = *p; ++p; if (p.end()) flags |= CHECK_CAPS_SYNCHRONOUS; check_caps(in, flags); } } } void Client::wait_sync_caps(Inode *in, ceph_tid_t want) { while (in->flushing_caps) { map<ceph_tid_t, int>::iterator it = in->flushing_cap_tids.begin(); ceph_assert(it != in->flushing_cap_tids.end()); if (it->first > want) break; ldout(cct, 10) << __func__ << " on " << *in << " flushing " << ccap_string(it->second) << " want " << want << " last " << it->first << dendl; wait_on_list(in->waitfor_caps); } } void Client::wait_sync_caps(ceph_tid_t want) { retry: ldout(cct, 10) << __func__ << " want " << want << " (last is " << last_flush_tid << ", " << num_flushing_caps << " total flushing)" << dendl; for (auto &p : mds_sessions) { auto s = p.second; if (s->flushing_caps_tids.empty()) continue; ceph_tid_t oldest_tid = *s->flushing_caps_tids.begin(); if (oldest_tid <= want) { ldout(cct, 10) << " waiting on mds." << p.first << " tid " << oldest_tid << " (want " << want << ")" << dendl; std::unique_lock l{client_lock, std::adopt_lock}; sync_cond.wait(l); l.release(); goto retry; } } } void Client::kick_flushing_caps(Inode *in, MetaSession *session) { in->flags &= ~I_KICK_FLUSH; Cap *cap = in->auth_cap; ceph_assert(cap->session == session); ceph_tid_t last_snap_flush = 0; for (auto p = in->flushing_cap_tids.rbegin(); p != in->flushing_cap_tids.rend(); ++p) { if (!p->second) { last_snap_flush = p->first; break; } } int wanted = in->caps_wanted(); int used = get_caps_used(in) | in->caps_dirty(); auto it = in->cap_snaps.begin(); for (auto& p : in->flushing_cap_tids) { if (p.second) { int msg_flags = p.first < last_snap_flush ? MClientCaps::FLAG_PENDING_CAPSNAP : 0; send_cap(in, session, cap, msg_flags, used, wanted, (cap->issued | cap->implemented), p.second, p.first); } else { ceph_assert(it != in->cap_snaps.end()); ceph_assert(it->second.flush_tid == p.first); send_flush_snap(in, session, it->first, it->second); ++it; } } } void Client::kick_flushing_caps(MetaSession *session) { mds_rank_t mds = session->mds_num; ldout(cct, 10) << __func__ << " mds." << mds << dendl; for (xlist<Inode*>::iterator p = session->flushing_caps.begin(); !p.end(); ++p) { Inode *in = *p; if (in->flags & I_KICK_FLUSH) { ldout(cct, 20) << " reflushing caps on " << *in << " to mds." << mds << dendl; kick_flushing_caps(in, session); } } } void Client::early_kick_flushing_caps(MetaSession *session) { for (xlist<Inode*>::iterator p = session->flushing_caps.begin(); !p.end(); ++p) { Inode *in = *p; Cap *cap = in->auth_cap; ceph_assert(cap); // if flushing caps were revoked, we re-send the cap flush in client reconnect // stage. This guarantees that MDS processes the cap flush message before issuing // the flushing caps to other client. if ((in->flushing_caps & in->auth_cap->issued) == in->flushing_caps) { in->flags |= I_KICK_FLUSH; continue; } ldout(cct, 20) << " reflushing caps (early_kick) on " << *in << " to mds." << session->mds_num << dendl; // send_reconnect() also will reset these sequence numbers. make sure // sequence numbers in cap flush message match later reconnect message. cap->seq = 0; cap->issue_seq = 0; cap->mseq = 0; cap->issued = cap->implemented; kick_flushing_caps(in, session); } } void Client::invalidate_snaprealm_and_children(SnapRealm *realm) { list<SnapRealm*> q; q.push_back(realm); while (!q.empty()) { realm = q.front(); q.pop_front(); ldout(cct, 10) << __func__ << " " << *realm << dendl; realm->invalidate_cache(); for (set<SnapRealm*>::iterator p = realm->pchildren.begin(); p != realm->pchildren.end(); ++p) q.push_back(*p); } } SnapRealm *Client::get_snap_realm(inodeno_t r) { SnapRealm *realm = snap_realms[r]; ldout(cct, 20) << __func__ << " " << r << " " << realm << ", nref was " << (realm ? realm->nref : 0) << dendl; if (!realm) { snap_realms[r] = realm = new SnapRealm(r); // Do not release the global snaprealm until unmounting. if (r == CEPH_INO_GLOBAL_SNAPREALM) realm->nref++; } realm->nref++; ldout(cct, 20) << __func__ << " " << r << " " << realm << ", nref now is " << realm->nref << dendl; return realm; } SnapRealm *Client::get_snap_realm_maybe(inodeno_t r) { if (snap_realms.count(r) == 0) { ldout(cct, 20) << __func__ << " " << r << " fail" << dendl; return NULL; } SnapRealm *realm = snap_realms[r]; ldout(cct, 20) << __func__ << " " << r << " " << realm << " " << realm->nref << " -> " << (realm->nref + 1) << dendl; realm->nref++; return realm; } void Client::put_snap_realm(SnapRealm *realm) { ldout(cct, 20) << __func__ << " " << realm->ino << " " << realm << " " << realm->nref << " -> " << (realm->nref - 1) << dendl; if (--realm->nref == 0) { snap_realms.erase(realm->ino); if (realm->pparent) { realm->pparent->pchildren.erase(realm); put_snap_realm(realm->pparent); } delete realm; } } bool Client::adjust_realm_parent(SnapRealm *realm, inodeno_t parent) { if (realm->parent != parent) { ldout(cct, 10) << __func__ << " " << *realm << " " << realm->parent << " -> " << parent << dendl; realm->parent = parent; if (realm->pparent) { realm->pparent->pchildren.erase(realm); put_snap_realm(realm->pparent); } realm->pparent = get_snap_realm(parent); realm->pparent->pchildren.insert(realm); return true; } return false; } static bool has_new_snaps(const SnapContext& old_snapc, const SnapContext& new_snapc) { return !new_snapc.snaps.empty() && new_snapc.snaps[0] > old_snapc.seq; } struct SnapRealmInfoMeta { SnapRealmInfoMeta(utime_t last_modified, uint64_t change_attr) : last_modified(last_modified), change_attr(change_attr) { } utime_t last_modified; uint64_t change_attr; }; static std::pair<SnapRealmInfo, std::optional<SnapRealmInfoMeta>> get_snap_realm_info( MetaSession *session, bufferlist::const_iterator &p) { if (session->mds_features.test(CEPHFS_FEATURE_NEW_SNAPREALM_INFO)) { SnapRealmInfoNew ninfo; decode(ninfo, p); return std::make_pair(ninfo.info, SnapRealmInfoMeta(ninfo.last_modified, ninfo.change_attr)); } else { SnapRealmInfo info; decode(info, p); return std::make_pair(info, std::nullopt); } } void Client::update_snap_trace(MetaSession *session, const bufferlist& bl, SnapRealm **realm_ret, bool flush) { SnapRealm *first_realm = NULL; ldout(cct, 10) << __func__ << " len " << bl.length() << dendl; map<SnapRealm*, SnapContext> dirty_realms; auto p = bl.cbegin(); while (!p.end()) { auto [info, realm_info_meta] = get_snap_realm_info(session, p); SnapRealm *realm = get_snap_realm(info.ino()); bool invalidate = false; if (info.seq() > realm->seq || (realm_info_meta && (*realm_info_meta).change_attr > realm->change_attr)) { ldout(cct, 10) << __func__ << " " << *realm << " seq " << info.seq() << " > " << realm->seq << dendl; if (flush) { // writeback any dirty caps _before_ updating snap list (i.e. with old snap info) // flush me + children list<SnapRealm*> q; q.push_back(realm); while (!q.empty()) { SnapRealm *realm = q.front(); q.pop_front(); for (set<SnapRealm*>::iterator p = realm->pchildren.begin(); p != realm->pchildren.end(); ++p) q.push_back(*p); if (dirty_realms.count(realm) == 0) { realm->nref++; dirty_realms[realm] = realm->get_snap_context(); } } } // update realm->seq = info.seq(); realm->created = info.created(); realm->parent_since = info.parent_since(); realm->prior_parent_snaps = info.prior_parent_snaps; if (realm_info_meta) { realm->last_modified = (*realm_info_meta).last_modified; realm->change_attr = (*realm_info_meta).change_attr; } realm->my_snaps = info.my_snaps; invalidate = true; } // _always_ verify parent if (adjust_realm_parent(realm, info.parent())) invalidate = true; if (invalidate) { invalidate_snaprealm_and_children(realm); ldout(cct, 15) << __func__ << " " << *realm << " self|parent updated" << dendl; ldout(cct, 15) << " snapc " << realm->get_snap_context() << dendl; } else { ldout(cct, 10) << __func__ << " " << *realm << " seq " << info.seq() << " <= " << realm->seq << " and same parent, SKIPPING" << dendl; } if (!first_realm) first_realm = realm; else put_snap_realm(realm); } for (auto &[realm, snapc] : dirty_realms) { // if there are new snaps ? if (has_new_snaps(snapc, realm->get_snap_context())) { ldout(cct, 10) << " flushing caps on " << *realm << dendl; for (auto&& in : realm->inodes_with_caps) { queue_cap_snap(in, snapc); } } else { ldout(cct, 10) << " no new snap on " << *realm << dendl; } put_snap_realm(realm); } if (realm_ret) *realm_ret = first_realm; else put_snap_realm(first_realm); } void Client::handle_snap(const MConstRef<MClientSnap>& m) { ldout(cct, 10) << __func__ << " " << *m << dendl; mds_rank_t mds = mds_rank_t(m->get_source().num()); std::scoped_lock cl(client_lock); auto session = _get_mds_session(mds, m->get_connection().get()); if (!session) { return; } got_mds_push(session.get()); map<Inode*, SnapContext> to_move; SnapRealm *realm = 0; if (m->head.op == CEPH_SNAP_OP_SPLIT) { ceph_assert(m->head.split); auto p = m->bl.cbegin(); auto [info, _] = get_snap_realm_info(session.get(), p); ceph_assert(info.ino() == m->head.split); // flush, then move, ino's. realm = get_snap_realm(info.ino()); ldout(cct, 10) << " splitting off " << *realm << dendl; for (auto& ino : m->split_inos) { vinodeno_t vino(ino, CEPH_NOSNAP); if (inode_map.count(vino)) { Inode *in = inode_map[vino]; if (!in->snaprealm || in->snaprealm == realm) continue; if (in->snaprealm->created > info.created()) { ldout(cct, 10) << " NOT moving " << *in << " from _newer_ realm " << *in->snaprealm << dendl; continue; } ldout(cct, 10) << " moving " << *in << " from " << *in->snaprealm << dendl; in->snaprealm_item.remove_myself(); to_move[in] = in->snaprealm->get_snap_context(); put_snap_realm(in->snaprealm); } } // move child snaprealms, too for (auto& child_realm : m->split_realms) { ldout(cct, 10) << "adjusting snaprealm " << child_realm << " parent" << dendl; SnapRealm *child = get_snap_realm_maybe(child_realm); if (!child) continue; adjust_realm_parent(child, realm->ino); put_snap_realm(child); } } update_snap_trace(session.get(), m->bl, NULL, m->head.op != CEPH_SNAP_OP_DESTROY); if (realm) { for (auto p = to_move.begin(); p != to_move.end(); ++p) { Inode *in = p->first; in->snaprealm = realm; realm->inodes_with_caps.push_back(&in->snaprealm_item); realm->nref++; // queue for snap writeback if (has_new_snaps(p->second, realm->get_snap_context())) queue_cap_snap(in, p->second); } put_snap_realm(realm); } } void Client::handle_quota(const MConstRef<MClientQuota>& m) { mds_rank_t mds = mds_rank_t(m->get_source().num()); std::scoped_lock cl(client_lock); auto session = _get_mds_session(mds, m->get_connection().get()); if (!session) { return; } got_mds_push(session.get()); ldout(cct, 10) << __func__ << " " << *m << " from mds." << mds << dendl; vinodeno_t vino(m->ino, CEPH_NOSNAP); if (inode_map.count(vino)) { Inode *in = NULL; in = inode_map[vino]; if (in) { in->quota = m->quota; in->rstat = m->rstat; } } } void Client::handle_caps(const MConstRef<MClientCaps>& m) { mds_rank_t mds = mds_rank_t(m->get_source().num()); std::scoped_lock cl(client_lock); auto session = _get_mds_session(mds, m->get_connection().get()); if (!session) { return; } if (m->osd_epoch_barrier && !objecter->have_map(m->osd_epoch_barrier)) { // Pause RADOS operations until we see the required epoch objecter->set_epoch_barrier(m->osd_epoch_barrier); } if (m->osd_epoch_barrier > cap_epoch_barrier) { // Record the barrier so that we will transmit it to MDS when releasing set_cap_epoch_barrier(m->osd_epoch_barrier); } got_mds_push(session.get()); bool do_cap_release = false; Inode *in; vinodeno_t vino(m->get_ino(), CEPH_NOSNAP); if (auto it = inode_map.find(vino); it != inode_map.end()) { in = it->second; /* MDS maybe waiting for cap release with increased seq */ switch (m->get_op()) { case CEPH_CAP_OP_REVOKE: case CEPH_CAP_OP_GRANT: if (!in->caps.count(mds)) { do_cap_release = true; ldout(cct, 5) << __func__ << " vino " << vino << " don't have cap " << m->get_cap_id() << " op " << m->get_op() << ", immediately releasing" << dendl; } } } else { /* MDS maybe waiting for cap release with increased seq */ switch (m->get_op()) { case CEPH_CAP_OP_IMPORT: case CEPH_CAP_OP_REVOKE: case CEPH_CAP_OP_GRANT: do_cap_release = true; ldout(cct, 5) << __func__ << " don't have vino " << vino << " op " << m->get_op() << ", immediately releasing" << dendl; break; default: ldout(cct, 5) << __func__ << " don't have vino " << vino << ", dropping" << dendl; return; } } // In case the mds is waiting on e.g. a revocation if (do_cap_release) { session->enqueue_cap_release( m->get_ino(), m->get_cap_id(), m->get_seq(), m->get_mseq(), cap_epoch_barrier); flush_cap_releases(); return; } switch (m->get_op()) { case CEPH_CAP_OP_EXPORT: return handle_cap_export(session.get(), in, m); case CEPH_CAP_OP_FLUSHSNAP_ACK: return handle_cap_flushsnap_ack(session.get(), in, m); case CEPH_CAP_OP_IMPORT: /* no return */ handle_cap_import(session.get(), in, m); } if (auto it = in->caps.find(mds); it != in->caps.end()) { Cap &cap = in->caps.at(mds); switch (m->get_op()) { case CEPH_CAP_OP_TRUNC: return handle_cap_trunc(session.get(), in, m); case CEPH_CAP_OP_IMPORT: case CEPH_CAP_OP_REVOKE: case CEPH_CAP_OP_GRANT: return handle_cap_grant(session.get(), in, &cap, m); case CEPH_CAP_OP_FLUSH_ACK: return handle_cap_flush_ack(session.get(), in, &cap, m); } } else { ldout(cct, 5) << __func__ << " don't have " << *in << " cap on mds." << mds << dendl; return; } } void Client::handle_cap_import(MetaSession *session, Inode *in, const MConstRef<MClientCaps>& m) { mds_rank_t mds = session->mds_num; ldout(cct, 5) << __func__ << " ino " << m->get_ino() << " mseq " << m->get_mseq() << " IMPORT from mds." << mds << dendl; const mds_rank_t peer_mds = mds_rank_t(m->peer.mds); Cap *cap = NULL; UserPerm cap_perms; if (auto it = in->caps.find(peer_mds); m->peer.cap_id && it != in->caps.end()) { cap = &it->second; cap_perms = cap->latest_perms; } // add/update it SnapRealm *realm = NULL; update_snap_trace(session, m->snapbl, &realm); int issued = m->get_caps(); int wanted = m->get_wanted(); add_update_cap(in, session, m->get_cap_id(), issued, wanted, m->get_seq(), m->get_mseq(), m->get_realm(), CEPH_CAP_FLAG_AUTH, cap_perms); if (cap && cap->cap_id == m->peer.cap_id) { remove_cap(cap, (m->peer.flags & CEPH_CAP_FLAG_RELEASE)); } if (realm) put_snap_realm(realm); if (in->auth_cap && in->auth_cap->session == session) { if (!(wanted & CEPH_CAP_ANY_FILE_WR) || in->requested_max_size > m->get_max_size()) { in->requested_max_size = 0; ldout(cct, 15) << "reset requested_max_size after cap import" << dendl; } // reflush any/all caps (if we are now the auth_cap) kick_flushing_caps(in, session); } } void Client::handle_cap_export(MetaSession *session, Inode *in, const MConstRef<MClientCaps>& m) { mds_rank_t mds = session->mds_num; ldout(cct, 5) << __func__ << " ino " << m->get_ino() << " mseq " << m->get_mseq() << " EXPORT from mds." << mds << dendl; auto it = in->caps.find(mds); if (it != in->caps.end()) { Cap &cap = it->second; if (cap.cap_id == m->get_cap_id()) { if (m->peer.cap_id) { const auto peer_mds = mds_rank_t(m->peer.mds); auto tsession = _get_or_open_mds_session(peer_mds); auto it = in->caps.find(peer_mds); if (it != in->caps.end()) { Cap &tcap = it->second; if (tcap.cap_id == m->peer.cap_id && ceph_seq_cmp(tcap.seq, m->peer.seq) < 0) { tcap.cap_id = m->peer.cap_id; tcap.seq = m->peer.seq - 1; tcap.issue_seq = tcap.seq; tcap.issued |= cap.issued; tcap.implemented |= cap.issued; if (&cap == in->auth_cap) in->auth_cap = &tcap; if (in->auth_cap == &tcap && in->flushing_cap_item.is_on_list()) adjust_session_flushing_caps(in, session, tsession.get()); } } else { add_update_cap(in, tsession.get(), m->peer.cap_id, cap.issued, 0, m->peer.seq - 1, m->peer.mseq, (uint64_t)-1, &cap == in->auth_cap ? CEPH_CAP_FLAG_AUTH : 0, cap.latest_perms); } } else { if (cap.wanted | cap.issued) in->flags |= I_CAP_DROPPED; } remove_cap(&cap, false); } } } void Client::handle_cap_trunc(MetaSession *session, Inode *in, const MConstRef<MClientCaps>& m) { mds_rank_t mds = session->mds_num; ceph_assert(in->caps.count(mds)); uint64_t size = m->get_size(); if (in->is_fscrypt_enabled()) { size = std::stoll(std::string(std::rbegin(m->fscrypt_file), std::rend(m->fscrypt_file))); } ldout(cct, 10) << __func__ << " on ino " << *in << " size " << in->size << " -> " << m->get_size() << dendl; int issued; in->caps_issued(&issued); issued |= in->caps_dirty(); update_inode_file_size(in, issued, size, m->get_truncate_seq(), m->get_truncate_size()); } void Client::handle_cap_flush_ack(MetaSession *session, Inode *in, Cap *cap, const MConstRef<MClientCaps>& m) { ceph_tid_t flush_ack_tid = m->get_client_tid(); int dirty = m->get_dirty(); int cleaned = 0; int flushed = 0; auto it = in->flushing_cap_tids.begin(); if (it->first < flush_ack_tid) { ldout(cct, 0) << __func__ << " mds." << session->mds_num << " got unexpected flush ack tid " << flush_ack_tid << " expected is " << it->first << dendl; } for (; it != in->flushing_cap_tids.end(); ) { if (!it->second) { // cap snap ++it; continue; } if (it->first == flush_ack_tid) cleaned = it->second; if (it->first <= flush_ack_tid) { session->flushing_caps_tids.erase(it->first); in->flushing_cap_tids.erase(it++); ++flushed; continue; } cleaned &= ~it->second; if (!cleaned) break; ++it; } ldout(cct, 5) << __func__ << " mds." << session->mds_num << " cleaned " << ccap_string(cleaned) << " on " << *in << " with " << ccap_string(dirty) << dendl; if (flushed) { signal_cond_list(in->waitfor_caps); if (session->flushing_caps_tids.empty() || *session->flushing_caps_tids.begin() > flush_ack_tid) sync_cond.notify_all(); } if (!dirty) { in->cap_dirtier_uid = -1; in->cap_dirtier_gid = -1; } if (!cleaned) { ldout(cct, 10) << " tid " << m->get_client_tid() << " != any cap bit tids" << dendl; } else { if (in->flushing_caps) { ldout(cct, 5) << " flushing_caps " << ccap_string(in->flushing_caps) << " -> " << ccap_string(in->flushing_caps & ~cleaned) << dendl; in->flushing_caps &= ~cleaned; if (in->flushing_caps == 0) { ldout(cct, 10) << " " << *in << " !flushing" << dendl; num_flushing_caps--; if (in->flushing_cap_tids.empty()) in->flushing_cap_item.remove_myself(); } if (!in->caps_dirty()) put_inode(in); } } } void Client::handle_cap_flushsnap_ack(MetaSession *session, Inode *in, const MConstRef<MClientCaps>& m) { ceph_tid_t flush_ack_tid = m->get_client_tid(); mds_rank_t mds = session->mds_num; ceph_assert(in->caps.count(mds)); snapid_t follows = m->get_snap_follows(); if (auto it = in->cap_snaps.find(follows); it != in->cap_snaps.end()) { auto& capsnap = it->second; if (flush_ack_tid != capsnap.flush_tid) { ldout(cct, 10) << " tid " << flush_ack_tid << " != " << capsnap.flush_tid << dendl; } else { InodeRef tmp_ref(in); ldout(cct, 5) << __func__ << " mds." << mds << " flushed snap follows " << follows << " on " << *in << dendl; session->flushing_caps_tids.erase(capsnap.flush_tid); in->flushing_cap_tids.erase(capsnap.flush_tid); if (in->flushing_caps == 0 && in->flushing_cap_tids.empty()) in->flushing_cap_item.remove_myself(); in->cap_snaps.erase(it); signal_cond_list(in->waitfor_caps); if (session->flushing_caps_tids.empty() || *session->flushing_caps_tids.begin() > flush_ack_tid) sync_cond.notify_all(); } } else { ldout(cct, 5) << __func__ << " DUP(?) mds." << mds << " flushed snap follows " << follows << " on " << *in << dendl; // we may not have it if we send multiple FLUSHSNAP requests and (got multiple FLUSHEDSNAPs back) } } class C_Client_DentryInvalidate : public Context { private: Client *client; vinodeno_t dirino; vinodeno_t ino; string name; public: C_Client_DentryInvalidate(Client *c, Dentry *dn, bool del) : client(c), name(dn->name) { if (client->use_faked_inos()) { dirino.ino = dn->dir->parent_inode->faked_ino; if (del) ino.ino = dn->inode->faked_ino; } else { dirino = dn->dir->parent_inode->vino(); if (del) ino = dn->inode->vino(); } if (!del) ino.ino = inodeno_t(); } void finish(int r) override { // _async_dentry_invalidate is responsible for its own locking ceph_assert(ceph_mutex_is_not_locked_by_me(client->client_lock)); client->_async_dentry_invalidate(dirino, ino, name); } }; void Client::_async_dentry_invalidate(vinodeno_t dirino, vinodeno_t ino, string& name) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return; ldout(cct, 10) << __func__ << " '" << name << "' ino " << ino << " in dir " << dirino << dendl; dentry_invalidate_cb(callback_handle, dirino, ino, name.c_str(), name.length()); } void Client::_schedule_invalidate_dentry_callback(Dentry *dn, bool del) { if (dentry_invalidate_cb && dn->inode->ll_ref > 0) async_dentry_invalidator.queue(new C_Client_DentryInvalidate(this, dn, del)); } void Client::_try_to_trim_inode(Inode *in, bool sched_inval) { int ref = in->get_nref(); ldout(cct, 5) << __func__ << " in " << *in <<dendl; if (in->dir && !in->dir->dentries.empty()) { for (auto p = in->dir->dentries.begin(); p != in->dir->dentries.end(); ) { Dentry *dn = p->second; ++p; /* rmsnap removes whole subtree, need trim inodes recursively. * we don't need to invalidate dentries recursively. because * invalidating a directory dentry effectively invalidate * whole subtree */ if (in->snapid != CEPH_NOSNAP && dn->inode && dn->inode->is_dir()) _try_to_trim_inode(dn->inode.get(), false); if (dn->lru_is_expireable()) unlink(dn, true, false); // keep dir, drop dentry } if (in->dir->dentries.empty()) { close_dir(in->dir); --ref; } } if (ref > 1 && (in->flags & I_SNAPDIR_OPEN)) { InodeRef snapdir = open_snapdir(in); _try_to_trim_inode(snapdir.get(), false); --ref; } if (ref > 1) { auto q = in->dentries.begin(); while (q != in->dentries.end()) { Dentry *dn = *q; ++q; if( in->ll_ref > 0 && sched_inval) { // FIXME: we play lots of unlink/link tricks when handling MDS replies, // so in->dentries doesn't always reflect the state of kernel's dcache. _schedule_invalidate_dentry_callback(dn, true); } unlink(dn, true, true); } } } void Client::handle_cap_grant(MetaSession *session, Inode *in, Cap *cap, const MConstRef<MClientCaps>& m) { mds_rank_t mds = session->mds_num; int used = get_caps_used(in); int wanted = in->caps_wanted(); int flags = 0; const unsigned new_caps = m->get_caps(); const bool was_stale = session->cap_gen > cap->gen; ldout(cct, 5) << __func__ << " on in " << m->get_ino() << " mds." << mds << " seq " << m->get_seq() << " caps now " << ccap_string(new_caps) << " was " << ccap_string(cap->issued) << (was_stale ? " (stale)" : "") << dendl; if (was_stale) cap->issued = cap->implemented = CEPH_CAP_PIN; cap->seq = m->get_seq(); cap->gen = session->cap_gen; check_cap_issue(in, new_caps); // update inode int issued; in->caps_issued(&issued); issued |= in->caps_dirty(); if ((new_caps & CEPH_CAP_AUTH_SHARED) && !(issued & CEPH_CAP_AUTH_EXCL)) { in->mode = m->head.mode; in->uid = m->head.uid; in->gid = m->head.gid; in->btime = m->btime; } bool deleted_inode = false; if ((new_caps & CEPH_CAP_LINK_SHARED) && !(issued & CEPH_CAP_LINK_EXCL)) { in->nlink = m->head.nlink; if (in->nlink == 0) deleted_inode = true; } if (!(issued & CEPH_CAP_XATTR_EXCL) && m->xattrbl.length() && m->head.xattr_version > in->xattr_version) { auto p = m->xattrbl.cbegin(); decode(in->xattrs, p); in->xattr_version = m->head.xattr_version; } if ((new_caps & CEPH_CAP_FILE_SHARED) && m->dirstat_is_valid()) { in->dirstat.nfiles = m->get_nfiles(); in->dirstat.nsubdirs = m->get_nsubdirs(); } if (new_caps & CEPH_CAP_ANY_RD) { update_inode_file_time(in, issued, m->get_time_warp_seq(), m->get_ctime(), m->get_mtime(), m->get_atime()); } if (new_caps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) { in->layout = m->get_layout(); update_inode_file_size(in, issued, m->get_size(), m->get_truncate_seq(), m->get_truncate_size()); } if (m->inline_version > in->inline_version) { in->inline_data = m->inline_data; in->inline_version = m->inline_version; } /* always take a newer change attr */ if (m->get_change_attr() > in->change_attr) in->change_attr = m->get_change_attr(); // max_size if (cap == in->auth_cap && (new_caps & CEPH_CAP_ANY_FILE_WR) && (m->get_max_size() != in->max_size)) { ldout(cct, 10) << "max_size " << in->max_size << " -> " << m->get_max_size() << dendl; in->max_size = m->get_max_size(); if (in->max_size > in->wanted_max_size) { in->wanted_max_size = 0; in->requested_max_size = 0; } } bool check = false; if ((was_stale || m->get_op() == CEPH_CAP_OP_IMPORT) && (wanted & ~(cap->wanted | new_caps))) { // If mds is importing cap, prior cap messages that update 'wanted' // may get dropped by mds (migrate seq mismatch). // // We don't send cap message to update 'wanted' if what we want are // already issued. If mds revokes caps, cap message that releases caps // also tells mds what we want. But if caps got revoked by mds forcedly // (session stale). We may haven't told mds what we want. check = true; } // update caps auto revoked = cap->issued & ~new_caps; if (revoked) { ldout(cct, 10) << " revocation of " << ccap_string(revoked) << dendl; cap->issued = new_caps; cap->implemented |= new_caps; // recall delegations if we're losing caps necessary for them if (revoked & ceph_deleg_caps_for_type(CEPH_DELEGATION_RD)) in->recall_deleg(false); else if (revoked & ceph_deleg_caps_for_type(CEPH_DELEGATION_WR)) in->recall_deleg(true); used = adjust_caps_used_for_lazyio(used, cap->issued, cap->implemented); if ((used & revoked & (CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO)) && !_flush(in, new C_Client_FlushComplete(this, in))) { // waitin' for flush } else if (used & revoked & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) { if (_release(in)) { check = true; flags = CHECK_CAPS_NODELAY; } } else { cap->wanted = 0; // don't let check_caps skip sending a response to MDS check = true; flags = CHECK_CAPS_NODELAY; } } else if (cap->issued == new_caps) { ldout(cct, 10) << " caps unchanged at " << ccap_string(cap->issued) << dendl; } else { ldout(cct, 10) << " grant, new caps are " << ccap_string(new_caps & ~cap->issued) << dendl; cap->issued = new_caps; cap->implemented |= new_caps; if (cap == in->auth_cap) { // non-auth MDS is revoking the newly grant caps ? for (const auto &p : in->caps) { if (&p.second == cap) continue; if (p.second.implemented & ~p.second.issued & new_caps) { check = true; break; } } } } // just in case the caps was released just before we get the revoke msg if (!check && m->get_op() == CEPH_CAP_OP_REVOKE) { cap->wanted = 0; // don't let check_caps skip sending a response to MDS check = true; flags = CHECK_CAPS_NODELAY; } if (check) check_caps(in, flags); // wake up waiters if (new_caps) signal_cond_list(in->waitfor_caps); // may drop inode's last ref if (deleted_inode) _try_to_trim_inode(in, true); } int Client::inode_permission(Inode *in, const UserPerm& perms, unsigned want) { if (perms.uid() == 0) { // For directories, DACs are overridable. // For files, Read/write DACs are always overridable but executable DACs are // overridable when there is at least one exec bit set if(!S_ISDIR(in->mode) && (want & MAY_EXEC) && !(in->mode & S_IXUGO)) return -CEPHFS_EACCES; return 0; } if (perms.uid() != in->uid && (in->mode & S_IRWXG)) { int ret = _posix_acl_permission(in, perms, want); if (ret != -CEPHFS_EAGAIN) return ret; } // check permissions before doing anything else if (!in->check_mode(perms, want)) return -CEPHFS_EACCES; return 0; } int Client::xattr_permission(Inode *in, const char *name, unsigned want, const UserPerm& perms) { int r = _getattr_for_perm(in, perms); if (r < 0) goto out; r = 0; if (strncmp(name, "system.", 7) == 0) { if ((want & MAY_WRITE) && (perms.uid() != 0 && perms.uid() != in->uid)) r = -CEPHFS_EPERM; } else { r = inode_permission(in, perms, want); } out: ldout(cct, 5) << __func__ << " " << in << " = " << r << dendl; return r; } std::ostream& operator<<(std::ostream &out, const UserPerm& perm) { out << "UserPerm(uid: " << perm.uid() << ", gid: " << perm.gid() << ")"; return out; } int Client::may_setattr(Inode *in, struct ceph_statx *stx, int mask, const UserPerm& perms) { ldout(cct, 20) << __func__ << " " << *in << "; " << perms << " stx_mode: " << hex << stx->stx_mode << " mask:" << mask << dec << dendl; int r = _getattr_for_perm(in, perms); if (r < 0) goto out; if (mask & CEPH_SETATTR_SIZE) { r = inode_permission(in, perms, MAY_WRITE); if (r < 0) goto out; } r = -CEPHFS_EPERM; if (mask & CEPH_SETATTR_UID) { if (perms.uid() != 0 && (perms.uid() != in->uid || stx->stx_uid != in->uid)) goto out; } if (mask & CEPH_SETATTR_GID) { if (perms.uid() != 0 && (perms.uid() != in->uid || (!perms.gid_in_groups(stx->stx_gid) && stx->stx_gid != in->gid))) goto out; } if (mask & CEPH_SETATTR_MODE) { uint32_t m = ~stx->stx_mode & in->mode; // mode bits removed ldout(cct, 20) << __func__ << " " << *in << " = " << hex << m << dec << dendl; if (perms.uid() != 0 && perms.uid() != in->uid && /* * Currently the kernel fuse and libfuse code is buggy and * won't pass the ATTR_KILL_SUID/ATTR_KILL_SGID to ceph-fuse. * But will just set the ATTR_MODE and at the same time by * clearing the suid/sgid bits. * * Only allow unprivileged users to clear S_ISUID and S_ISUID. */ (m & ~(S_ISUID | S_ISGID))) goto out; gid_t i_gid = (mask & CEPH_SETATTR_GID) ? stx->stx_gid : in->gid; if (perms.uid() != 0 && !perms.gid_in_groups(i_gid)) stx->stx_mode &= ~S_ISGID; } if (mask & (CEPH_SETATTR_CTIME | CEPH_SETATTR_BTIME | CEPH_SETATTR_MTIME | CEPH_SETATTR_ATIME)) { if (perms.uid() != 0 && perms.uid() != in->uid) { int check_mask = CEPH_SETATTR_CTIME | CEPH_SETATTR_BTIME; if (!(mask & CEPH_SETATTR_MTIME_NOW)) check_mask |= CEPH_SETATTR_MTIME; if (!(mask & CEPH_SETATTR_ATIME_NOW)) check_mask |= CEPH_SETATTR_ATIME; if (check_mask & mask) { goto out; } else { r = inode_permission(in, perms, MAY_WRITE); if (r < 0) goto out; } } } r = 0; out: ldout(cct, 3) << __func__ << " " << in << " = " << r << dendl; return r; } int Client::may_open(Inode *in, int flags, const UserPerm& perms) { ldout(cct, 20) << __func__ << " " << *in << "; " << perms << dendl; unsigned want = 0; if ((flags & O_ACCMODE) == O_WRONLY) want = MAY_WRITE; else if ((flags & O_ACCMODE) == O_RDWR) want = MAY_READ | MAY_WRITE; else if ((flags & O_ACCMODE) == O_RDONLY) want = MAY_READ; if (flags & O_TRUNC) want |= MAY_WRITE; int r = 0; switch (in->mode & S_IFMT) { case S_IFLNK: r = -CEPHFS_ELOOP; goto out; case S_IFDIR: if (want & MAY_WRITE) { r = -CEPHFS_EISDIR; goto out; } break; } r = _getattr_for_perm(in, perms); if (r < 0) goto out; r = inode_permission(in, perms, want); out: ldout(cct, 3) << __func__ << " " << in << " = " << r << dendl; return r; } int Client::may_lookup(Inode *dir, const UserPerm& perms) { ldout(cct, 20) << __func__ << " " << *dir << "; " << perms << dendl; int r = _getattr_for_perm(dir, perms); if (r < 0) goto out; r = inode_permission(dir, perms, MAY_EXEC); out: ldout(cct, 3) << __func__ << " " << dir << " = " << r << dendl; return r; } int Client::may_create(Inode *dir, const UserPerm& perms) { ldout(cct, 20) << __func__ << " " << *dir << "; " << perms << dendl; int r = _getattr_for_perm(dir, perms); if (r < 0) goto out; r = inode_permission(dir, perms, MAY_EXEC | MAY_WRITE); out: ldout(cct, 3) << __func__ << " " << dir << " = " << r << dendl; return r; } int Client::may_delete(Inode *dir, const char *name, const UserPerm& perms) { ldout(cct, 20) << __func__ << " " << *dir << "; " << "; name " << name << "; " << perms << dendl; int r = _getattr_for_perm(dir, perms); if (r < 0) goto out; r = inode_permission(dir, perms, MAY_EXEC | MAY_WRITE); if (r < 0) goto out; /* 'name == NULL' means rmsnap w/o permission checks */ if (perms.uid() != 0 && name && (dir->mode & S_ISVTX)) { InodeRef otherin; r = _lookup(dir, name, CEPH_CAP_AUTH_SHARED, &otherin, perms); if (r < 0) goto out; if (dir->uid != perms.uid() && otherin->uid != perms.uid()) r = -CEPHFS_EPERM; } out: ldout(cct, 3) << __func__ << " " << dir << " = " << r << dendl; return r; } int Client::may_delete(const char *relpath, const UserPerm& perms) { ldout(cct, 20) << __func__ << " " << relpath << "; " << perms << dendl; RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; filepath path(relpath); string name = path.last_dentry(); path.pop_dentry(); InodeRef dir; std::scoped_lock lock(client_lock); int r = path_walk(path, &dir, perms); if (r < 0) return r; if (cct->_conf->client_permissions) { int r = may_delete(dir.get(), name.c_str(), perms); if (r < 0) return r; } return 0; } int Client::may_hardlink(Inode *in, const UserPerm& perms) { ldout(cct, 20) << __func__ << " " << *in << "; " << perms << dendl; int r = _getattr_for_perm(in, perms); if (r < 0) goto out; if (perms.uid() == 0 || perms.uid() == in->uid) { r = 0; goto out; } r = -CEPHFS_EPERM; if (!S_ISREG(in->mode)) goto out; if (in->mode & S_ISUID) goto out; if ((in->mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) goto out; r = inode_permission(in, perms, MAY_READ | MAY_WRITE); out: ldout(cct, 3) << __func__ << " " << in << " = " << r << dendl; return r; } int Client::_getattr_for_perm(Inode *in, const UserPerm& perms) { int mask = CEPH_STAT_CAP_MODE; bool force = false; if (acl_type != NO_ACL) { mask |= CEPH_STAT_CAP_XATTR; force = in->xattr_version == 0; } return _getattr(in, mask, perms, force); } vinodeno_t Client::_get_vino(Inode *in) { /* The caller must hold the client lock */ return vinodeno_t(in->ino, in->snapid); } /** * Resolve an MDS spec to a list of MDS daemon GIDs. * * The spec is a string representing a GID, rank, filesystem:rank, or name/id. * It may be '*' in which case it matches all GIDs. * * If no error is returned, the `targets` vector will be populated with at least * one MDS. */ int Client::resolve_mds( const std::string &mds_spec, std::vector<mds_gid_t> *targets) { ceph_assert(fsmap); ceph_assert(targets != nullptr); mds_role_t role; CachedStackStringStream css; int role_r = fsmap->parse_role(mds_spec, &role, *css); if (role_r == 0) { // We got a role, resolve it to a GID auto& info = fsmap->get_filesystem(role.fscid)->mds_map.get_info(role.rank); ldout(cct, 10) << __func__ << ": resolved " << mds_spec << " to role '" << role << "' aka " << info.human_name() << dendl; targets->push_back(info.global_id); return 0; } std::string strtol_err; long long rank_or_gid = strict_strtoll(mds_spec.c_str(), 10, &strtol_err); if (strtol_err.empty()) { // It is a possible GID const mds_gid_t mds_gid = mds_gid_t(rank_or_gid); if (fsmap->gid_exists(mds_gid)) { auto& info = fsmap->get_info_gid(mds_gid); ldout(cct, 10) << __func__ << ": validated gid " << mds_gid << " aka " << info.human_name() << dendl; targets->push_back(mds_gid); return 0; } else { lderr(cct) << __func__ << ": gid " << mds_gid << " not in MDS map" << dendl; lderr(cct) << "FSMap: " << *fsmap << dendl; return -CEPHFS_ENOENT; } } else if (mds_spec == "*") { // It is a wildcard: use all MDSs const auto& mds_info = fsmap->get_mds_info(); ldout(cct, 10) << __func__ << ": resolving `*' to all MDS daemons" << dendl; if (mds_info.empty()) { lderr(cct) << __func__ << ": no MDS daemons found" << dendl; lderr(cct) << "FSMap: " << *fsmap << dendl; return -CEPHFS_ENOENT; } for (const auto& [gid, info] : mds_info) { ldout(cct, 10) << __func__ << ": appending " << info.human_name() << " to targets" << dendl; targets->push_back(gid); } return 0; } else { // It did not parse as an integer, it is not a wildcard, it must be a name const mds_gid_t mds_gid = fsmap->find_mds_gid_by_name(mds_spec); if (mds_gid == mds_gid_t{0}) { lderr(cct) << __func__ << ": no MDS daemons found by name `" << mds_spec << "'" << dendl; lderr(cct) << "FSMap: " << *fsmap << dendl; return -CEPHFS_ENOENT; } else { auto& info = fsmap->get_info_gid(mds_gid); ldout(cct, 10) << __func__ << ": resolved name '" << mds_spec << "' to " << info.human_name() << dendl; targets->push_back(mds_gid); } return 0; } } /** * Authenticate with mon and establish global ID */ int Client::authenticate() { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); if (monclient->is_authenticated()) { return 0; } client_lock.unlock(); int r = monclient->authenticate(std::chrono::duration<double>(mount_timeout).count()); client_lock.lock(); if (r < 0) { return r; } whoami = monclient->get_global_id(); messenger->set_myname(entity_name_t::CLIENT(whoami.v)); return 0; } int Client::fetch_fsmap(bool user) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); // Retrieve FSMap to enable looking up daemon addresses. We need FSMap // rather than MDSMap because no one MDSMap contains all the daemons, and // a `tell` can address any daemon. version_t fsmap_latest; bs::error_code ec; do { client_lock.unlock(); std::tie(fsmap_latest, std::ignore) = monclient->get_version("fsmap", ca::use_blocked[ec]); client_lock.lock(); } while (ec == bs::errc::resource_unavailable_try_again); if (ec) { lderr(cct) << "Failed to learn FSMap version: " << ec << dendl; return ceph::from_error_code(ec); } ldout(cct, 10) << __func__ << " learned FSMap version " << fsmap_latest << dendl; if (user) { if (!fsmap_user || fsmap_user->get_epoch() < fsmap_latest) { monclient->sub_want("fsmap.user", fsmap_latest, CEPH_SUBSCRIBE_ONETIME); monclient->renew_subs(); wait_on_list(waiting_for_fsmap); } ceph_assert(fsmap_user); ceph_assert(fsmap_user->get_epoch() >= fsmap_latest); } else { if (!fsmap || fsmap->get_epoch() < fsmap_latest) { monclient->sub_want("fsmap", fsmap_latest, CEPH_SUBSCRIBE_ONETIME); monclient->renew_subs(); wait_on_list(waiting_for_fsmap); } ceph_assert(fsmap); ceph_assert(fsmap->get_epoch() >= fsmap_latest); } ldout(cct, 10) << __func__ << " finished waiting for FSMap version " << fsmap_latest << dendl; return 0; } /** * * @mds_spec one of ID, rank, GID, "*" * */ int Client::mds_command( const std::string &mds_spec, const vector<string>& cmd, const bufferlist& inbl, bufferlist *outbl, string *outs, Context *onfinish) { RWRef_t iref_reader(initialize_state, CLIENT_INITIALIZED); if (!iref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::unique_lock cl(client_lock); int r; r = authenticate(); if (r < 0) { return r; } r = fetch_fsmap(false); if (r < 0) { return r; } // Look up MDS target(s) of the command std::vector<mds_gid_t> targets; r = resolve_mds(mds_spec, &targets); if (r < 0) { return r; } // If daemons are laggy, we won't send them commands. If all // are laggy then we fail. std::vector<mds_gid_t> non_laggy; for (const auto& gid : targets) { const auto info = fsmap->get_info_gid(gid); if (!info.laggy()) { non_laggy.push_back(gid); } } if (non_laggy.size() == 0) { *outs = "All targeted MDS daemons are laggy"; return -CEPHFS_ENOENT; } if (metadata.empty()) { // We are called on an unmounted client, so metadata // won't be initialized yet. populate_metadata(""); } // Send commands to targets C_GatherBuilder gather(cct, onfinish); for (const auto& target_gid : non_laggy) { const auto info = fsmap->get_info_gid(target_gid); // Open a connection to the target MDS ConnectionRef conn = messenger->connect_to_mds(info.get_addrs()); cl.unlock(); { std::scoped_lock cmd_lock(command_lock); // Generate MDSCommandOp state auto &op = command_table.start_command(); op.on_finish = gather.new_sub(); op.cmd = cmd; op.outbl = outbl; op.outs = outs; op.inbl = inbl; op.mds_gid = target_gid; op.con = conn; ldout(cct, 4) << __func__ << ": new command op to " << target_gid << " tid=" << op.tid << cmd << dendl; // Construct and send MCommand MessageRef m = op.get_message(monclient->get_fsid()); conn->send_message2(std::move(m)); } cl.lock(); } gather.activate(); return 0; } void Client::handle_command_reply(const MConstRef<MCommandReply>& m) { ceph_tid_t const tid = m->get_tid(); ldout(cct, 10) << __func__ << ": tid=" << m->get_tid() << dendl; std::scoped_lock cmd_lock(command_lock); if (!command_table.exists(tid)) { ldout(cct, 1) << __func__ << ": unknown tid " << tid << ", dropping" << dendl; return; } auto &op = command_table.get_command(tid); if (op.outbl) { *op.outbl = m->get_data(); } if (op.outs) { *op.outs = m->rs; } if (op.on_finish) { op.on_finish->complete(m->r); } command_table.erase(tid); } // ------------------- // MOUNT int Client::subscribe_mdsmap(const std::string &fs_name) { int r = authenticate(); if (r < 0) { lderr(cct) << "authentication failed: " << cpp_strerror(r) << dendl; return r; } std::string resolved_fs_name; if (fs_name.empty()) { resolved_fs_name = cct->_conf.get_val<std::string>("client_fs"); if (resolved_fs_name.empty()) // Try the backwards compatibility fs name option resolved_fs_name = cct->_conf.get_val<std::string>("client_mds_namespace"); } else { resolved_fs_name = fs_name; } std::string want = "mdsmap"; if (!resolved_fs_name.empty()) { r = fetch_fsmap(true); if (r < 0) return r; fscid = fsmap_user->get_fs_cid(resolved_fs_name); if (fscid == FS_CLUSTER_ID_NONE) { return -CEPHFS_ENOENT; } std::ostringstream oss; oss << want << "." << fscid; want = oss.str(); } ldout(cct, 10) << "Subscribing to map '" << want << "'" << dendl; monclient->sub_want(want, 0, 0); monclient->renew_subs(); return 0; } int Client::mount(const std::string &mount_root, const UserPerm& perms, bool require_mds, const std::string &fs_name) { ceph_assert(is_initialized()); /* * To make sure that the _unmount() must wait until the mount() * is done. */ RWRef_t mref_writer(mount_state, CLIENT_MOUNTING, false); if (!mref_writer.is_first_writer()) // already mounting or mounted return 0; std::unique_lock cl(client_lock); int r = subscribe_mdsmap(fs_name); if (r < 0) { lderr(cct) << "mdsmap subscription failed: " << cpp_strerror(r) << dendl; return r; } start_tick_thread(); // start tick thread if (require_mds) { while (1) { auto availability = mdsmap->is_cluster_available(); if (availability == MDSMap::STUCK_UNAVAILABLE) { // Error out ldout(cct, 10) << "mds cluster unavailable: epoch=" << mdsmap->get_epoch() << dendl; return CEPH_FUSE_NO_MDS_UP; } else if (availability == MDSMap::AVAILABLE) { // Continue to mount break; } else if (availability == MDSMap::TRANSIENT_UNAVAILABLE) { // Else, wait. MDSMonitor will update the map to bring // us to a conclusion eventually. wait_on_list(waiting_for_mdsmap); } else { // Unexpected value! ceph_abort(); } } } if(mdsmap->test_flag(CEPH_MDSMAP_REFUSE_CLIENT_SESSION)) { lderr(cct) << "connections cannot be made while" " the flag refuse_client_session is set" << dendl; return -CEPHFS_EACCES; } populate_metadata(mount_root.empty() ? "/" : mount_root); filepath fp(CEPH_INO_ROOT); if (!mount_root.empty()) { fp = filepath(mount_root.c_str()); } while (true) { MetaRequest *req = new MetaRequest(CEPH_MDS_OP_GETATTR); req->set_filepath(fp); req->head.args.getattr.mask = CEPH_STAT_CAP_INODE_ALL; int res = make_request(req, perms); if (res < 0) { if (res == -CEPHFS_EACCES && root) { ldout(cct, 1) << __func__ << " EACCES on parent of mount point; quotas may not work" << dendl; break; } return res; } if (fp.depth()) fp.pop_dentry(); else break; } ceph_assert(root); _ll_get(root.get()); // trace? if (!cct->_conf->client_trace.empty()) { traceout.open(cct->_conf->client_trace.c_str()); if (traceout.is_open()) { ldout(cct, 1) << "opened trace file '" << cct->_conf->client_trace << "'" << dendl; } else { ldout(cct, 1) << "FAILED to open trace file '" << cct->_conf->client_trace << "'" << dendl; } } /* ldout(cct, 3) << "op: // client trace data structs" << dendl; ldout(cct, 3) << "op: struct stat st;" << dendl; ldout(cct, 3) << "op: struct utimbuf utim;" << dendl; ldout(cct, 3) << "op: int readlinkbuf_len = 1000;" << dendl; ldout(cct, 3) << "op: char readlinkbuf[readlinkbuf_len];" << dendl; ldout(cct, 3) << "op: map<string, inode_t*> dir_contents;" << dendl; ldout(cct, 3) << "op: map<int, int> open_files;" << dendl; ldout(cct, 3) << "op: int fd;" << dendl; */ mref_writer.update_state(CLIENT_MOUNTED); return 0; } // UNMOUNT void Client::_close_sessions() { for (auto it = mds_sessions.begin(); it != mds_sessions.end(); ) { if (it->second->state == MetaSession::STATE_REJECTED) mds_sessions.erase(it++); else ++it; } while (!mds_sessions.empty()) { // send session closes! for (auto &p : mds_sessions) { if (p.second->state != MetaSession::STATE_CLOSING) { _close_mds_session(p.second.get()); mds_ranks_closing.insert(p.first); } } // wait for sessions to close double timo = cct->_conf.get_val<std::chrono::seconds>("client_shutdown_timeout").count(); ldout(cct, 2) << "waiting for " << mds_ranks_closing.size() << " mds session(s) to close (timeout: " << timo << "s)" << dendl; std::unique_lock l{client_lock, std::adopt_lock}; if (!timo) { mount_cond.wait(l); } else if (!mount_cond.wait_for(l, ceph::make_timespan(timo), [this] { return mds_ranks_closing.empty(); })) { ldout(cct, 1) << mds_ranks_closing.size() << " mds(s) did not respond to session close -- timing out." << dendl; while (!mds_ranks_closing.empty()) { auto session = mds_sessions.at(*mds_ranks_closing.begin()); // this prunes entry from mds_sessions and mds_ranks_closing _closed_mds_session(session.get(), -CEPHFS_ETIMEDOUT); } } mds_ranks_closing.clear(); l.release(); } } void Client::flush_mdlog_sync(Inode *in) { if (in->unsafe_ops.empty()) { return; } std::set<mds_rank_t> anchor; for (auto &&p : in->unsafe_ops) { anchor.emplace(p->mds); } if (in->auth_cap) { anchor.emplace(in->auth_cap->session->mds_num); } for (auto &rank : anchor) { auto session = &mds_sessions.at(rank); flush_mdlog(session->get()); } } void Client::flush_mdlog_sync() { if (mds_requests.empty()) return; for (auto &p : mds_sessions) { flush_mdlog(p.second.get()); } } void Client::flush_mdlog(MetaSession *session) { // Only send this to Luminous or newer MDS daemons, older daemons // will crash if they see an unknown CEPH_SESSION_* value in this msg. const uint64_t features = session->con->get_features(); if (HAVE_FEATURE(features, SERVER_LUMINOUS)) { auto m = make_message<MClientSession>(CEPH_SESSION_REQUEST_FLUSH_MDLOG); session->con->send_message2(std::move(m)); } } void Client::_abort_mds_sessions(int err) { for (auto p = mds_requests.begin(); p != mds_requests.end(); ) { auto req = p->second; ++p; // unsafe requests will be removed during close session below. if (req->got_unsafe) continue; req->abort(err); if (req->caller_cond) { req->kick = true; req->caller_cond->notify_all(); } } // Process aborts on any requests that were on this waitlist. // Any requests that were on a waiting_for_open session waitlist // will get kicked during close session below. signal_cond_list(waiting_for_mdsmap); // Force-close all sessions while(!mds_sessions.empty()) { auto session = mds_sessions.begin()->second; _closed_mds_session(session.get(), err); } } void Client::_unmount(bool abort) { /* * We are unmounting the client. * * Just declare the state to STATE_UNMOUNTING to block and fail * any new comming "reader" and then try to wait all the in-flight * "readers" to finish. */ RWRef_t mref_writer(mount_state, CLIENT_UNMOUNTING, false); if (!mref_writer.is_first_writer()) return; mref_writer.wait_readers_done(); std::unique_lock lock{client_lock}; if (abort || blocklisted) { ldout(cct, 2) << "unmounting (" << (abort ? "abort)" : "blocklisted)") << dendl; } else { ldout(cct, 2) << "unmounting" << dendl; } deleg_timeout = 0; if (abort) { mount_aborted = true; // Abort all mds sessions _abort_mds_sessions(-CEPHFS_ENOTCONN); objecter->op_cancel_writes(-CEPHFS_ENOTCONN); } else { // flush the mdlog for pending requests, if any flush_mdlog_sync(); } mount_cond.wait(lock, [this] { // Only wait for write OPs for (auto& [tid, req] : mds_requests) { if (req->is_write()) { ldout(cct, 10) << "waiting for write request '" << tid << "' to complete, currently there are " << mds_requests.size() << " outstanding read/write requests" << dendl; return false; } } return true; }); cwd.reset(); root.reset(); // clean up any unclosed files while (!fd_map.empty()) { Fh *fh = fd_map.begin()->second; fd_map.erase(fd_map.begin()); ldout(cct, 0) << " destroyed lost open file " << fh << " on " << *fh->inode << dendl; _release_fh(fh); } while (!ll_unclosed_fh_set.empty()) { set<Fh*>::iterator it = ll_unclosed_fh_set.begin(); Fh *fh = *it; ll_unclosed_fh_set.erase(fh); ldout(cct, 0) << " destroyed lost open file " << fh << " on " << *(fh->inode) << dendl; _release_fh(fh); } while (!opened_dirs.empty()) { dir_result_t *dirp = *opened_dirs.begin(); ldout(cct, 0) << " destroyed lost open dir " << dirp << " on " << *dirp->inode << dendl; _closedir(dirp); } _ll_drop_pins(); if (cct->_conf->client_oc) { // flush/release all buffered data std::list<InodeRef> anchor; for (auto& p : inode_map) { Inode *in = p.second; if (!in) { ldout(cct, 0) << "null inode_map entry ino " << p.first << dendl; ceph_assert(in); } // prevent inode from getting freed anchor.emplace_back(in); if (abort || blocklisted) { objectcacher->purge_set(&in->oset); } else if (!in->caps.empty()) { _release(in); _flush(in, new C_Client_FlushComplete(this, in)); } } } if (abort || blocklisted) { for (auto &q : mds_sessions) { auto s = q.second; for (auto p = s->dirty_list.begin(); !p.end(); ) { Inode *in = *p; ++p; if (in->dirty_caps) { ldout(cct, 0) << " drop dirty caps on " << *in << dendl; in->mark_caps_clean(); put_inode(in); } } } } else { flush_caps_sync(); wait_sync_caps(last_flush_tid); } // empty lru cache trim_cache(); delay_put_inodes(); while (lru.lru_get_size() > 0 || !inode_map.empty()) { ldout(cct, 2) << "cache still has " << lru.lru_get_size() << "+" << inode_map.size() << " items" << ", waiting (for caps to release?)" << dendl; if (auto r = mount_cond.wait_for(lock, ceph::make_timespan(5)); r == std::cv_status::timeout) { dump_cache(NULL); } } ceph_assert(lru.lru_get_size() == 0); ceph_assert(inode_map.empty()); // stop tracing if (!cct->_conf->client_trace.empty()) { ldout(cct, 1) << "closing trace file '" << cct->_conf->client_trace << "'" << dendl; traceout.close(); } // stop the tick thread tick_thread_stopped = true; upkeep_cond.notify_one(); _close_sessions(); // release the global snapshot realm SnapRealm *global_realm = snap_realms[CEPH_INO_GLOBAL_SNAPREALM]; if (global_realm) { ceph_assert(global_realm->nref == 1); put_snap_realm(global_realm); } mref_writer.update_state(CLIENT_UNMOUNTED); /* * Stop the remount_queue before clearing the mountpoint memory * to avoid possible use-after-free bug. */ if (remount_cb) { ldout(cct, 10) << "unmount stopping remount finisher" << dendl; remount_finisher.wait_for_empty(); remount_finisher.stop(); remount_cb = nullptr; } ldout(cct, 2) << "unmounted." << dendl; } void Client::unmount() { _unmount(false); } void Client::abort_conn() { _unmount(true); } void Client::flush_cap_releases() { uint64_t nr_caps = 0; // send any cap releases for (auto &p : mds_sessions) { auto session = p.second; if (session->release && mdsmap->is_clientreplay_or_active_or_stopping( p.first)) { nr_caps += session->release->caps.size(); if (cct->_conf->client_inject_release_failure) { ldout(cct, 20) << __func__ << " injecting failure to send cap release message" << dendl; } else { session->con->send_message2(std::move(session->release)); } session->release.reset(); } } if (nr_caps > 0) { dec_pinned_icaps(nr_caps); } } void Client::renew_and_flush_cap_releases() { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); if (!mount_aborted && mdsmap->get_epoch()) { // renew caps? auto el = ceph::coarse_mono_clock::now() - last_cap_renew; if (unlikely(utime_t(el) > mdsmap->get_session_timeout() / 3.0)) renew_caps(); flush_cap_releases(); } } void Client::tick() { ldout(cct, 20) << "tick" << dendl; auto now = ceph::coarse_mono_clock::now(); /* * If the mount() is not finished */ if (is_mounting() && !mds_requests.empty()) { MetaRequest *req = mds_requests.begin()->second; if (req->created + mount_timeout < now) { req->abort(-CEPHFS_ETIMEDOUT); if (req->caller_cond) { req->kick = true; req->caller_cond->notify_all(); } signal_cond_list(waiting_for_mdsmap); for (auto &p : mds_sessions) { signal_context_list(p.second->waiting_for_open); } } } renew_and_flush_cap_releases(); // delayed caps xlist<Inode*>::iterator p = delayed_list.begin(); while (!p.end()) { Inode *in = *p; ++p; if (!mount_aborted && in->hold_caps_until > now) break; delayed_list.pop_front(); if (!mount_aborted) check_caps(in, CHECK_CAPS_NODELAY); } if (!mount_aborted) collect_and_send_metrics(); delay_put_inodes(is_unmounting()); trim_cache(true); if (blocklisted && (is_mounted() || is_unmounting()) && last_auto_reconnect + std::chrono::seconds(30 * 60) < now && cct->_conf.get_val<bool>("client_reconnect_stale")) { messenger->client_reset(); fd_gen++; // invalidate open files blocklisted = false; _kick_stale_sessions(); last_auto_reconnect = now; } } void Client::start_tick_thread() { upkeeper = std::thread([this]() { using time = ceph::coarse_mono_time; using sec = std::chrono::seconds; auto last_tick = time::min(); std::unique_lock cl(client_lock); while (!tick_thread_stopped) { auto now = clock::now(); auto since = now - last_tick; auto t_interval = clock::duration(cct->_conf.get_val<sec>("client_tick_interval")); auto d_interval = clock::duration(cct->_conf.get_val<sec>("client_debug_inject_tick_delay")); auto interval = std::max(t_interval, d_interval); if (likely(since >= interval*.90)) { tick(); last_tick = clock::now(); } else { interval -= since; } ldout(cct, 20) << "upkeep thread waiting interval " << interval << dendl; if (!tick_thread_stopped) upkeep_cond.wait_for(cl, interval); } }); } void Client::collect_and_send_metrics() { ldout(cct, 20) << __func__ << dendl; ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); // right now, we only track and send global metrics. its sufficient // to send these metrics to MDS rank0. collect_and_send_global_metrics(); } void Client::collect_and_send_global_metrics() { ldout(cct, 20) << __func__ << dendl; ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); /* Do not send the metrics until the MDS rank is ready */ if (!mdsmap->is_active((mds_rank_t)0)) { ldout(cct, 5) << __func__ << " MDS rank 0 is not ready yet -- not sending metric" << dendl; return; } if (!have_open_session((mds_rank_t)0)) { ldout(cct, 5) << __func__ << ": no session with rank=0 -- not sending metric" << dendl; return; } auto session = _get_or_open_mds_session((mds_rank_t)0); if (!session->mds_features.test(CEPHFS_FEATURE_METRIC_COLLECT)) { ldout(cct, 5) << __func__ << ": rank=0 does not support metrics" << dendl; return; } ClientMetricMessage metric; std::vector<ClientMetricMessage> message; // read latency if (_collect_and_send_global_metrics || session->mds_metric_flags.test(CLIENT_METRIC_TYPE_READ_LATENCY)) { metric = ClientMetricMessage(ReadLatencyPayload(logger->tget(l_c_read), logger->tget(l_c_rd_avg), logger->get(l_c_rd_sqsum), nr_read_request)); message.push_back(metric); } // write latency if (_collect_and_send_global_metrics || session->mds_metric_flags.test(CLIENT_METRIC_TYPE_WRITE_LATENCY)) { metric = ClientMetricMessage(WriteLatencyPayload(logger->tget(l_c_wrlat), logger->tget(l_c_wr_avg), logger->get(l_c_wr_sqsum), nr_write_request)); message.push_back(metric); } // metadata latency if (_collect_and_send_global_metrics || session->mds_metric_flags.test(CLIENT_METRIC_TYPE_METADATA_LATENCY)) { metric = ClientMetricMessage(MetadataLatencyPayload(logger->tget(l_c_lat), logger->tget(l_c_md_avg), logger->get(l_c_md_sqsum), nr_metadata_request)); message.push_back(metric); } // cap hit ratio -- nr_caps is unused right now if (_collect_and_send_global_metrics || session->mds_metric_flags.test(CLIENT_METRIC_TYPE_CAP_INFO)) { auto [cap_hits, cap_misses] = get_cap_hit_rates(); metric = ClientMetricMessage(CapInfoPayload(cap_hits, cap_misses, 0)); message.push_back(metric); } // dentry lease hit ratio if (_collect_and_send_global_metrics || session->mds_metric_flags.test(CLIENT_METRIC_TYPE_DENTRY_LEASE)) { auto [dlease_hits, dlease_misses, nr] = get_dlease_hit_rates(); metric = ClientMetricMessage(DentryLeasePayload(dlease_hits, dlease_misses, nr)); message.push_back(metric); } // opened files if (_collect_and_send_global_metrics || session->mds_metric_flags.test(CLIENT_METRIC_TYPE_OPENED_FILES)) { auto [opened_files, total_inodes] = get_opened_files_rates(); metric = ClientMetricMessage(OpenedFilesPayload(opened_files, total_inodes)); message.push_back(metric); } // pinned i_caps if (_collect_and_send_global_metrics || session->mds_metric_flags.test(CLIENT_METRIC_TYPE_PINNED_ICAPS)) { auto [pinned_icaps, total_inodes] = get_pinned_icaps_rates(); metric = ClientMetricMessage(PinnedIcapsPayload(pinned_icaps, total_inodes)); message.push_back(metric); } // opened inodes if (_collect_and_send_global_metrics || session->mds_metric_flags.test(CLIENT_METRIC_TYPE_OPENED_INODES)) { auto [opened_inodes, total_inodes] = get_opened_inodes_rates(); metric = ClientMetricMessage(OpenedInodesPayload(opened_inodes, total_inodes)); message.push_back(metric); } // read io sizes if (_collect_and_send_global_metrics || session->mds_metric_flags.test(CLIENT_METRIC_TYPE_READ_IO_SIZES)) { metric = ClientMetricMessage(ReadIoSizesPayload(total_read_ops, total_read_size)); message.push_back(metric); } // write io sizes if (_collect_and_send_global_metrics || session->mds_metric_flags.test(CLIENT_METRIC_TYPE_WRITE_IO_SIZES)) { metric = ClientMetricMessage(WriteIoSizesPayload(total_write_ops, total_write_size)); message.push_back(metric); } session->con->send_message2(make_message<MClientMetrics>(std::move(message))); } void Client::renew_caps() { ldout(cct, 10) << "renew_caps()" << dendl; last_cap_renew = ceph::coarse_mono_clock::now(); for (auto &p : mds_sessions) { ldout(cct, 15) << "renew_caps requesting from mds." << p.first << dendl; if (mdsmap->get_state(p.first) >= MDSMap::STATE_REJOIN) renew_caps(p.second.get()); } } void Client::renew_caps(MetaSession *session) { ldout(cct, 10) << "renew_caps mds." << session->mds_num << dendl; session->last_cap_renew_request = ceph_clock_now(); uint64_t seq = ++session->cap_renew_seq; session->con->send_message2(make_message<MClientSession>(CEPH_SESSION_REQUEST_RENEWCAPS, seq)); } // =============================================================== // high level (POSIXy) interface int Client::_do_lookup(Inode *dir, const string& name, int mask, InodeRef *target, const UserPerm& perms) { int op = dir->snapid == CEPH_SNAPDIR ? CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; MetaRequest *req = new MetaRequest(op); filepath path; dir->make_nosnap_relative_path(path); path.push_dentry(name); req->set_filepath(path); req->set_inode(dir); if (cct->_conf->client_debug_getattr_caps && op == CEPH_MDS_OP_LOOKUP) mask |= DEBUG_GETATTR_CAPS; req->head.args.getattr.mask = mask; ldout(cct, 10) << __func__ << " on " << path << dendl; int r = make_request(req, perms, target); ldout(cct, 10) << __func__ << " res is " << r << dendl; return r; } bool Client::_dentry_valid(const Dentry *dn) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); // is dn lease valid? utime_t now = ceph_clock_now(); if (dn->lease_mds >= 0 && dn->lease_ttl > now && mds_sessions.count(dn->lease_mds)) { auto s = mds_sessions.at(dn->lease_mds); if (s->cap_ttl > now && s->cap_gen == dn->lease_gen) { dlease_hit(); return true; } ldout(cct, 20) << " bad lease, cap_ttl " << s->cap_ttl << ", cap_gen " << s->cap_gen << " vs lease_gen " << dn->lease_gen << dendl; } dlease_miss(); return false; } int Client::_lookup(Inode *dir, const string& dname, int mask, InodeRef *target, const UserPerm& perms, std::string* alternate_name, bool is_rename) { int r = 0; Dentry *dn = NULL; bool did_lookup_request = false; // can only request shared caps mask &= CEPH_CAP_ANY_SHARED | CEPH_STAT_RSTAT; if (dname == "..") { if (dir->dentries.empty()) { MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPPARENT); filepath path(dir->ino); req->set_filepath(path); InodeRef tmptarget; int r = make_request(req, perms, &tmptarget, NULL, rand() % mdsmap->get_num_in_mds()); if (r == 0) { *target = std::move(tmptarget); ldout(cct, 8) << __func__ << " found target " << (*target)->ino << dendl; } else { *target = dir; } } else *target = dir->get_first_parent()->dir->parent_inode; //dirs can't be hard-linked goto done; } if (dname == ".") { *target = dir; goto done; } if (!dir->is_dir()) { r = -CEPHFS_ENOTDIR; goto done; } if (dname.length() > NAME_MAX) { r = -CEPHFS_ENAMETOOLONG; goto done; } if (dname == cct->_conf->client_snapdir && dir->snapid == CEPH_NOSNAP) { *target = open_snapdir(dir); goto done; } relookup: if (dir->dir && dir->dir->dentries.count(dname)) { dn = dir->dir->dentries[dname]; ldout(cct, 20) << __func__ << " have " << *dn << " from mds." << dn->lease_mds << " ttl " << dn->lease_ttl << " seq " << dn->lease_seq << dendl; if (!dn->inode || dn->inode->caps_issued_mask(mask, true)) { if (_dentry_valid(dn)) { // touch this mds's dir cap too, even though we don't _explicitly_ use it here, to // make trim_caps() behave. dir->try_touch_cap(dn->lease_mds); goto hit_dn; } // dir shared caps? if (dir->caps_issued_mask(CEPH_CAP_FILE_SHARED, true)) { if (dn->cap_shared_gen == dir->shared_gen && (!dn->inode || dn->inode->caps_issued_mask(mask, true))) goto hit_dn; if (!dn->inode && (dir->flags & I_COMPLETE)) { ldout(cct, 10) << __func__ << " concluded ENOENT locally for " << *dir << " dn '" << dname << "'" << dendl; return -CEPHFS_ENOENT; } } } else { ldout(cct, 20) << " no cap on " << dn->inode->vino() << dendl; } // In rare case during the rename if another thread tries to // lookup the dst dentry, it may get an inconsistent result // that both src dentry and dst dentry will link to the same // inode at the same time. // Will wait the rename to finish and try it again. if (!is_rename && dn->is_renaming) { ldout(cct, 1) << __func__ << " dir " << *dir << " rename is on the way, will wait for dn '" << dname << "'" << dendl; wait_on_list(waiting_for_rename); goto relookup; } } else { // can we conclude ENOENT locally? if (dir->caps_issued_mask(CEPH_CAP_FILE_SHARED, true) && (dir->flags & I_COMPLETE)) { ldout(cct, 10) << __func__ << " concluded ENOENT locally for " << *dir << " dn '" << dname << "'" << dendl; return -CEPHFS_ENOENT; } } if (did_lookup_request) { r = 0; goto done; } r = _do_lookup(dir, dname, mask, target, perms); did_lookup_request = true; if (r == 0) { /* complete lookup to get dentry for alternate_name */ goto relookup; } else { goto done; } hit_dn: if (dn->inode) { *target = dn->inode; if (alternate_name) *alternate_name = dn->alternate_name; } else { r = -CEPHFS_ENOENT; } touch_dn(dn); goto done; done: if (r < 0) ldout(cct, 10) << __func__ << " " << *dir << " " << dname << " = " << r << dendl; else ldout(cct, 10) << __func__ << " " << *dir << " " << dname << " = " << **target << dendl; return r; } Dentry *Client::get_or_create(Inode *dir, const char* name) { // lookup ldout(cct, 20) << __func__ << " " << *dir << " name " << name << dendl; dir->open_dir(); if (dir->dir->dentries.count(name)) return dir->dir->dentries[name]; else // otherwise link up a new one return link(dir->dir, name, NULL, NULL); } int Client::walk(std::string_view path, walk_dentry_result* wdr, const UserPerm& perms, bool followsym) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 10) << __func__ << ": " << path << dendl; std::scoped_lock lock(client_lock); return path_walk(path, wdr, perms, followsym); } int Client::path_walk(const filepath& origpath, InodeRef *end, const UserPerm& perms, bool followsym, int mask, InodeRef dirinode) { walk_dentry_result wdr; int rc = path_walk(origpath, &wdr, perms, followsym, mask, dirinode); *end = std::move(wdr.in); return rc; } int Client::path_walk(const filepath& origpath, walk_dentry_result* result, const UserPerm& perms, bool followsym, int mask, InodeRef dirinode) { filepath path = origpath; InodeRef cur; std::string alternate_name; if (origpath.absolute()) cur = root; else if (!dirinode) cur = cwd; else { cur = dirinode; } ceph_assert(cur); ldout(cct, 20) << __func__ << " cur=" << *cur << dendl; ldout(cct, 10) << __func__ << " " << path << dendl; int symlinks = 0; unsigned i=0; while (i < path.depth() && cur) { int caps = 0; const string &dname = path[i]; ldout(cct, 10) << " " << i << " " << *cur << " " << dname << dendl; ldout(cct, 20) << " (path is " << path << ")" << dendl; InodeRef next; if (cct->_conf->client_permissions) { int r = may_lookup(cur.get(), perms); if (r < 0) return r; caps = CEPH_CAP_AUTH_SHARED; } /* Get extra requested caps on the last component */ if (i == (path.depth() - 1)) caps |= mask; int r = _lookup(cur.get(), dname, caps, &next, perms, &alternate_name); if (r < 0) return r; // only follow trailing symlink if followsym. always follow // 'directory' symlinks. if (next && next->is_symlink()) { symlinks++; ldout(cct, 20) << " symlink count " << symlinks << ", value is '" << next->symlink << "'" << dendl; if (symlinks > MAXSYMLINKS) { return -CEPHFS_ELOOP; } if (i < path.depth() - 1) { // dir symlink // replace consumed components of path with symlink dir target filepath resolved(next->symlink.c_str()); resolved.append(path.postfixpath(i + 1)); path = resolved; i = 0; if (next->symlink[0] == '/') { cur = root; } continue; } else if (followsym) { if (next->symlink[0] == '/') { path = next->symlink.c_str(); i = 0; // reset position cur = root; } else { filepath more(next->symlink.c_str()); // we need to remove the symlink component from off of the path // before adding the target that the symlink points to. remain // at the same position in the path. path.pop_dentry(); path.append(more); } continue; } } cur.swap(next); i++; } if (!cur) return -CEPHFS_ENOENT; if (result) { result->in = std::move(cur); result->alternate_name = std::move(alternate_name); } return 0; } // namespace ops int Client::link(const char *relexisting, const char *relpath, const UserPerm& perm, std::string alternate_name) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << "link" << std::endl; tout(cct) << relexisting << std::endl; tout(cct) << relpath << std::endl; filepath existing(relexisting); InodeRef in, dir; std::scoped_lock lock(client_lock); int r = path_walk(existing, &in, perm, true); if (r < 0) return r; if (std::string(relpath) == "/") { r = -CEPHFS_EEXIST; return r; } filepath path(relpath); string name = path.last_dentry(); path.pop_dentry(); r = path_walk(path, &dir, perm, true); if (r < 0) return r; if (cct->_conf->client_permissions) { if (S_ISDIR(in->mode)) { r = -CEPHFS_EPERM; return r; } r = may_hardlink(in.get(), perm); if (r < 0) return r; r = may_create(dir.get(), perm); if (r < 0) return r; } r = _link(in.get(), dir.get(), name.c_str(), perm, std::move(alternate_name)); return r; } int Client::unlink(const char *relpath, const UserPerm& perm) { return unlinkat(CEPHFS_AT_FDCWD, relpath, 0, perm); } int Client::unlinkat(int dirfd, const char *relpath, int flags, const UserPerm& perm) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) { return -CEPHFS_ENOTCONN; } tout(cct) << __func__ << std::endl; tout(cct) << dirfd << std::endl; tout(cct) << relpath << std::endl; tout(cct) << flags << std::endl; if (std::string(relpath) == "/") { return flags & AT_REMOVEDIR ? -CEPHFS_EBUSY : -CEPHFS_EISDIR; } filepath path(relpath); string name = path.last_dentry(); path.pop_dentry(); InodeRef dir; std::scoped_lock lock(client_lock); InodeRef dirinode; int r = get_fd_inode(dirfd, &dirinode); if (r < 0) { return r; } r = path_walk(path, &dir, perm, true, 0, dirinode); if (r < 0) { return r; } if (cct->_conf->client_permissions) { r = may_delete(dir.get(), name.c_str(), perm); if (r < 0) { return r; } } if (flags & AT_REMOVEDIR) { r = _rmdir(dir.get(), name.c_str(), perm); } else { r = _unlink(dir.get(), name.c_str(), perm); } return r; } int Client::rename(const char *relfrom, const char *relto, const UserPerm& perm, std::string alternate_name) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << relfrom << std::endl; tout(cct) << relto << std::endl; if (std::string(relfrom) == "/" || std::string(relto) == "/") return -CEPHFS_EBUSY; filepath from(relfrom); filepath to(relto); string fromname = from.last_dentry(); from.pop_dentry(); string toname = to.last_dentry(); to.pop_dentry(); InodeRef fromdir, todir; std::scoped_lock lock(client_lock); int r = path_walk(from, &fromdir, perm); if (r < 0) goto out; r = path_walk(to, &todir, perm); if (r < 0) goto out; if (cct->_conf->client_permissions) { int r = may_delete(fromdir.get(), fromname.c_str(), perm); if (r < 0) return r; r = may_delete(todir.get(), toname.c_str(), perm); if (r < 0 && r != -CEPHFS_ENOENT) return r; } r = _rename(fromdir.get(), fromname.c_str(), todir.get(), toname.c_str(), perm, std::move(alternate_name)); out: return r; } // dirs int Client::mkdir(const char *relpath, mode_t mode, const UserPerm& perm, std::string alternate_name) { return mkdirat(CEPHFS_AT_FDCWD, relpath, mode, perm, alternate_name); } int Client::mkdirat(int dirfd, const char *relpath, mode_t mode, const UserPerm& perm, std::string alternate_name) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << dirfd << std::endl; tout(cct) << relpath << std::endl; tout(cct) << mode << std::endl; ldout(cct, 10) << __func__ << ": " << relpath << dendl; if (std::string(relpath) == "/") { return -CEPHFS_EEXIST; } filepath path(relpath); string name = path.last_dentry(); path.pop_dentry(); InodeRef dir; std::scoped_lock lock(client_lock); InodeRef dirinode; int r = get_fd_inode(dirfd, &dirinode); if (r < 0) { return r; } r = path_walk(path, &dir, perm, true, 0, dirinode); if (r < 0) { return r; } if (cct->_conf->client_permissions) { r = may_create(dir.get(), perm); if (r < 0) { return r; } } return _mkdir(dir.get(), name.c_str(), mode, perm, 0, {}, std::move(alternate_name)); } int Client::mkdirs(const char *relpath, mode_t mode, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 10) << "Client::mkdirs " << relpath << dendl; tout(cct) << __func__ << std::endl; tout(cct) << relpath << std::endl; tout(cct) << mode << std::endl; //get through existing parts of path filepath path(relpath); unsigned int i; int r = 0, caps = 0; InodeRef cur, next; std::scoped_lock lock(client_lock); cur = cwd; for (i=0; i<path.depth(); ++i) { if (cct->_conf->client_permissions) { r = may_lookup(cur.get(), perms); if (r < 0) break; caps = CEPH_CAP_AUTH_SHARED; } r = _lookup(cur.get(), path[i].c_str(), caps, &next, perms); if (r < 0) break; cur.swap(next); } if (r!=-CEPHFS_ENOENT) return r; ldout(cct, 20) << __func__ << " got through " << i << " directories on path " << relpath << dendl; //make new directory at each level for (; i<path.depth(); ++i) { if (cct->_conf->client_permissions) { r = may_create(cur.get(), perms); if (r < 0) return r; } //make new dir r = _mkdir(cur.get(), path[i].c_str(), mode, perms, &next); //check proper creation/existence if(-CEPHFS_EEXIST == r && i < path.depth() - 1) { r = _lookup(cur.get(), path[i].c_str(), CEPH_CAP_AUTH_SHARED, &next, perms); } if (r < 0) return r; //move to new dir and continue cur.swap(next); ldout(cct, 20) << __func__ << ": successfully created directory " << filepath(cur->ino).get_path() << dendl; } return 0; } int Client::rmdir(const char *relpath, const UserPerm& perms) { return unlinkat(CEPHFS_AT_FDCWD, relpath, AT_REMOVEDIR, perms); } int Client::mknod(const char *relpath, mode_t mode, const UserPerm& perms, dev_t rdev) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << relpath << std::endl; tout(cct) << mode << std::endl; tout(cct) << rdev << std::endl; if (std::string(relpath) == "/") return -CEPHFS_EEXIST; filepath path(relpath); string name = path.last_dentry(); path.pop_dentry(); InodeRef dir; std::scoped_lock lock(client_lock); int r = path_walk(path, &dir, perms); if (r < 0) return r; if (cct->_conf->client_permissions) { int r = may_create(dir.get(), perms); if (r < 0) return r; } return _mknod(dir.get(), name.c_str(), mode, rdev, perms); } // symlinks int Client::symlink(const char *target, const char *relpath, const UserPerm& perms, std::string alternate_name) { return symlinkat(target, CEPHFS_AT_FDCWD, relpath, perms, alternate_name); } int Client::symlinkat(const char *target, int dirfd, const char *relpath, const UserPerm& perms, std::string alternate_name) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) { return -CEPHFS_ENOTCONN; } tout(cct) << __func__ << std::endl; tout(cct) << target << std::endl; tout(cct) << dirfd << std::endl; tout(cct) << relpath << std::endl; if (std::string(relpath) == "/") { return -CEPHFS_EEXIST; } filepath path(relpath); string name = path.last_dentry(); path.pop_dentry(); InodeRef dir; std::scoped_lock lock(client_lock); InodeRef dirinode; int r = get_fd_inode(dirfd, &dirinode); if (r < 0) { return r; } r = path_walk(path, &dir, perms, true, 0, dirinode); if (r < 0) { return r; } if (cct->_conf->client_permissions) { int r = may_create(dir.get(), perms); if (r < 0) { return r; } } return _symlink(dir.get(), name.c_str(), target, perms, std::move(alternate_name)); } int Client::readlink(const char *relpath, char *buf, loff_t size, const UserPerm& perms) { return readlinkat(CEPHFS_AT_FDCWD, relpath, buf, size, perms); } int Client::readlinkat(int dirfd, const char *relpath, char *buf, loff_t size, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) { return -CEPHFS_ENOTCONN; } tout(cct) << __func__ << std::endl; tout(cct) << dirfd << std::endl; tout(cct) << relpath << std::endl; InodeRef dirinode; std::scoped_lock lock(client_lock); int r = get_fd_inode(dirfd, &dirinode); if (r < 0) { return r; } InodeRef in; filepath path(relpath); r = path_walk(path, &in, perms, false, 0, dirinode); if (r < 0) { return r; } return _readlink(in.get(), buf, size); } int Client::_readlink(Inode *in, char *buf, size_t size) { if (!in->is_symlink()) return -CEPHFS_EINVAL; // copy into buf (at most size bytes) int r = in->symlink.length(); if (r > (int)size) r = size; memcpy(buf, in->symlink.c_str(), r); return r; } // inode stuff int Client::_getattr(Inode *in, int mask, const UserPerm& perms, bool force) { bool yes = in->caps_issued_mask(mask, true); ldout(cct, 10) << __func__ << " mask " << ccap_string(mask) << " issued=" << yes << dendl; if (yes && !force) return 0; MetaRequest *req = new MetaRequest(CEPH_MDS_OP_GETATTR); filepath path; in->make_nosnap_relative_path(path); req->set_filepath(path); req->set_inode(in); req->head.args.getattr.mask = mask; int res = make_request(req, perms); ldout(cct, 10) << __func__ << " result=" << res << dendl; return res; } int Client::_getvxattr( Inode *in, const UserPerm& perms, const char *xattr_name, ssize_t size, void *value, mds_rank_t rank) { if (!xattr_name || strlen(xattr_name) <= 0 || strlen(xattr_name) > 255) { return -CEPHFS_ENODATA; } MetaRequest *req = new MetaRequest(CEPH_MDS_OP_GETVXATTR); filepath path; in->make_nosnap_relative_path(path); req->set_filepath(path); req->set_inode(in); req->set_string2(xattr_name); bufferlist bl; int res = make_request(req, perms, nullptr, nullptr, rank, &bl, CEPHFS_FEATURE_OP_GETVXATTR); ldout(cct, 10) << __func__ << " result=" << res << dendl; if (res < 0) { if (res == -CEPHFS_EOPNOTSUPP) { return -CEPHFS_ENODATA; } return res; } std::string buf; auto p = bl.cbegin(); DECODE_START(1, p); decode(buf, p); DECODE_FINISH(p); ssize_t len = buf.length(); res = len; // refer to man getxattr(2) for output buffer size == 0 if (size > 0) { if (len > size) { res = -CEPHFS_ERANGE; // insufficient output buffer space } else { memcpy(value, buf.c_str(), len); } } return res; } int Client::_do_setattr(Inode *in, struct ceph_statx *stx, int mask, const UserPerm& perms, InodeRef *inp, std::vector<uint8_t>* aux) { int issued = in->caps_issued(); union ceph_mds_request_args args; bool kill_sguid = false; int inode_drop = 0; size_t auxsize = 0; if (aux) auxsize = aux->size(); ldout(cct, 10) << __func__ << " mask " << mask << " issued " << ccap_string(issued) << " aux size " << auxsize << dendl; if (in->snapid != CEPH_NOSNAP) { return -CEPHFS_EROFS; } if ((mask & CEPH_SETATTR_SIZE) && (uint64_t)stx->stx_size > in->size && is_quota_bytes_exceeded(in, (uint64_t)stx->stx_size - in->size, perms)) { return -CEPHFS_EDQUOT; } // Can't set fscrypt_auth and file at the same time! if ((mask & (CEPH_SETATTR_FSCRYPT_AUTH|CEPH_SETATTR_FSCRYPT_FILE)) == (CEPH_SETATTR_FSCRYPT_AUTH|CEPH_SETATTR_FSCRYPT_FILE)) return -CEPHFS_EINVAL; if (!aux && (mask & (CEPH_SETATTR_FSCRYPT_AUTH|CEPH_SETATTR_FSCRYPT_FILE))) return -CEPHFS_EINVAL; memset(&args, 0, sizeof(args)); // make the change locally? if ((in->cap_dirtier_uid >= 0 && perms.uid() != in->cap_dirtier_uid) || (in->cap_dirtier_gid >= 0 && perms.gid() != in->cap_dirtier_gid)) { ldout(cct, 10) << __func__ << " caller " << perms.uid() << ":" << perms.gid() << " != cap dirtier " << in->cap_dirtier_uid << ":" << in->cap_dirtier_gid << ", forcing sync setattr" << dendl; /* * This works because we implicitly flush the caps as part of the * request, so the cap update check will happen with the writeback * cap context, and then the setattr check will happen with the * caller's context. * * In reality this pattern is likely pretty rare (different users * setattr'ing the same file). If that turns out not to be the * case later, we can build a more complex pipelined cap writeback * infrastructure... */ mask |= CEPH_SETATTR_CTIME; } if (!mask) { // caller just needs us to bump the ctime in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); if (issued & CEPH_CAP_AUTH_EXCL) in->mark_caps_dirty(CEPH_CAP_AUTH_EXCL); else if (issued & CEPH_CAP_FILE_EXCL) in->mark_caps_dirty(CEPH_CAP_FILE_EXCL); else if (issued & CEPH_CAP_XATTR_EXCL) in->mark_caps_dirty(CEPH_CAP_XATTR_EXCL); else mask |= CEPH_SETATTR_CTIME; } if (in->caps_issued_mask(CEPH_CAP_AUTH_EXCL)) { kill_sguid = !!(mask & CEPH_SETATTR_KILL_SGUID); } if (mask & CEPH_SETATTR_UID) { ldout(cct,10) << "changing uid to " << stx->stx_uid << dendl; if (in->caps_issued_mask(CEPH_CAP_AUTH_EXCL)) { in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->uid = stx->stx_uid; in->mark_caps_dirty(CEPH_CAP_AUTH_EXCL); mask &= ~CEPH_SETATTR_UID; kill_sguid = true; } else if (!in->caps_issued_mask(CEPH_CAP_AUTH_SHARED) || in->uid != stx->stx_uid) { args.setattr.uid = stx->stx_uid; inode_drop |= CEPH_CAP_AUTH_SHARED; } else { mask &= ~CEPH_SETATTR_UID; } } if (mask & CEPH_SETATTR_GID) { ldout(cct,10) << "changing gid to " << stx->stx_gid << dendl; if (in->caps_issued_mask(CEPH_CAP_AUTH_EXCL)) { in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->gid = stx->stx_gid; in->mark_caps_dirty(CEPH_CAP_AUTH_EXCL); mask &= ~CEPH_SETATTR_GID; kill_sguid = true; } else if (!in->caps_issued_mask(CEPH_CAP_AUTH_SHARED) || in->gid != stx->stx_gid) { args.setattr.gid = stx->stx_gid; inode_drop |= CEPH_CAP_AUTH_SHARED; } else { mask &= ~CEPH_SETATTR_GID; } } if (mask & CEPH_SETATTR_MODE) { ldout(cct,10) << "changing mode to " << stx->stx_mode << dendl; if (in->caps_issued_mask(CEPH_CAP_AUTH_EXCL)) { in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->mode = (in->mode & ~07777) | (stx->stx_mode & 07777); in->mark_caps_dirty(CEPH_CAP_AUTH_EXCL); mask &= ~CEPH_SETATTR_MODE; } else if (!in->caps_issued_mask(CEPH_CAP_AUTH_SHARED) || in->mode != stx->stx_mode) { args.setattr.mode = stx->stx_mode; inode_drop |= CEPH_CAP_AUTH_SHARED; } else { mask &= ~CEPH_SETATTR_MODE; } } else if (in->caps_issued_mask(CEPH_CAP_AUTH_EXCL) && S_ISREG(in->mode)) { if (kill_sguid && (in->mode & (S_IXUSR|S_IXGRP|S_IXOTH))) { in->mode &= ~(S_ISUID|S_ISGID); } else { if (mask & CEPH_SETATTR_KILL_SUID) { in->mode &= ~S_ISUID; } if (mask & CEPH_SETATTR_KILL_SGID) { in->mode &= ~S_ISGID; } } mask &= ~(CEPH_SETATTR_KILL_SGUID|CEPH_SETATTR_KILL_SUID|CEPH_SETATTR_KILL_SGID); in->mark_caps_dirty(CEPH_CAP_AUTH_EXCL); } if (mask & CEPH_SETATTR_BTIME) { ldout(cct,10) << "changing btime to " << in->btime << dendl; if (in->caps_issued_mask(CEPH_CAP_AUTH_EXCL)) { in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->btime = utime_t(stx->stx_btime); in->mark_caps_dirty(CEPH_CAP_AUTH_EXCL); mask &= ~CEPH_SETATTR_BTIME; } else if (!in->caps_issued_mask(CEPH_CAP_AUTH_SHARED) || in->btime != utime_t(stx->stx_btime)) { args.setattr.btime = utime_t(stx->stx_btime); inode_drop |= CEPH_CAP_AUTH_SHARED; } else { mask &= ~CEPH_SETATTR_BTIME; } } if (mask & CEPH_SETATTR_FSCRYPT_AUTH) { ldout(cct,10) << "resetting cached fscrypt_auth field. size now " << in->fscrypt_auth.size() << dendl; if (in->caps_issued_mask(CEPH_CAP_AUTH_EXCL)) { in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->fscrypt_auth = *aux; in->mark_caps_dirty(CEPH_CAP_AUTH_EXCL); mask &= ~CEPH_SETATTR_FSCRYPT_AUTH; } else if (!in->caps_issued_mask(CEPH_CAP_AUTH_SHARED) || in->fscrypt_auth != *aux) { inode_drop |= CEPH_CAP_AUTH_SHARED; } else { mask &= ~CEPH_SETATTR_FSCRYPT_AUTH; } } if (mask & CEPH_SETATTR_SIZE) { if ((uint64_t)stx->stx_size >= mdsmap->get_max_filesize()) { //too big! ldout(cct,10) << "unable to set size to " << stx->stx_size << ". Too large!" << dendl; return -CEPHFS_EFBIG; } ldout(cct,10) << "changing size to " << stx->stx_size << dendl; if (in->caps_issued_mask(CEPH_CAP_FILE_EXCL) && !(mask & CEPH_SETATTR_KILL_SGUID) && stx->stx_size >= in->size) { if (stx->stx_size > in->size) { in->size = in->reported_size = stx->stx_size; in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->mark_caps_dirty(CEPH_CAP_FILE_EXCL); mask &= ~(CEPH_SETATTR_SIZE); mask |= CEPH_SETATTR_MTIME; } else { // ignore it when size doesn't change mask &= ~(CEPH_SETATTR_SIZE); } } else { args.setattr.size = stx->stx_size; inode_drop |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR; } } if (mask & CEPH_SETATTR_FSCRYPT_FILE) { ldout(cct,10) << "resetting cached fscrypt_file field. size now " << in->fscrypt_file.size() << dendl; if (in->caps_issued_mask(CEPH_CAP_FILE_EXCL)) { in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->fscrypt_file = *aux; in->mark_caps_dirty(CEPH_CAP_FILE_EXCL); mask &= ~CEPH_SETATTR_FSCRYPT_FILE; } else if (!in->caps_issued_mask(CEPH_CAP_FILE_SHARED) || in->fscrypt_file != *aux) { inode_drop |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR; } else { mask &= ~CEPH_SETATTR_FSCRYPT_FILE; } } if (mask & CEPH_SETATTR_MTIME) { if (in->caps_issued_mask(CEPH_CAP_FILE_EXCL)) { in->mtime = utime_t(stx->stx_mtime); in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->time_warp_seq++; in->mark_caps_dirty(CEPH_CAP_FILE_EXCL); mask &= ~CEPH_SETATTR_MTIME; } else if (in->caps_issued_mask(CEPH_CAP_FILE_WR) && utime_t(stx->stx_mtime) > in->mtime) { in->mtime = utime_t(stx->stx_mtime); in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->mark_caps_dirty(CEPH_CAP_FILE_WR); mask &= ~CEPH_SETATTR_MTIME; } else if (!in->caps_issued_mask(CEPH_CAP_FILE_SHARED) || in->mtime != utime_t(stx->stx_mtime)) { args.setattr.mtime = utime_t(stx->stx_mtime); inode_drop |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR; } else { mask &= ~CEPH_SETATTR_MTIME; } } if (mask & CEPH_SETATTR_ATIME) { if (in->caps_issued_mask(CEPH_CAP_FILE_EXCL)) { in->atime = utime_t(stx->stx_atime); in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->time_warp_seq++; in->mark_caps_dirty(CEPH_CAP_FILE_EXCL); mask &= ~CEPH_SETATTR_ATIME; } else if (in->caps_issued_mask(CEPH_CAP_FILE_WR) && utime_t(stx->stx_atime) > in->atime) { in->atime = utime_t(stx->stx_atime); in->ctime = ceph_clock_now(); in->cap_dirtier_uid = perms.uid(); in->cap_dirtier_gid = perms.gid(); in->mark_caps_dirty(CEPH_CAP_FILE_WR); mask &= ~CEPH_SETATTR_ATIME; } else if (!in->caps_issued_mask(CEPH_CAP_FILE_SHARED) || in->atime != utime_t(stx->stx_atime)) { args.setattr.atime = utime_t(stx->stx_atime); inode_drop |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR; } else { mask &= ~CEPH_SETATTR_ATIME; } } if (!mask) { in->change_attr++; if (in->is_dir() && in->snapid == CEPH_NOSNAP) { vinodeno_t vino(in->ino, CEPH_SNAPDIR); if (inode_map.count(vino)) { refresh_snapdir_attrs(inode_map[vino], in); } } return 0; } MetaRequest *req = new MetaRequest(CEPH_MDS_OP_SETATTR); filepath path; in->make_nosnap_relative_path(path); req->set_filepath(path); req->set_inode(in); req->head.args = args; req->inode_drop = inode_drop; if (mask & CEPH_SETATTR_FSCRYPT_AUTH) { req->fscrypt_auth = *aux; } else if (mask & CEPH_SETATTR_FSCRYPT_FILE) { req->fscrypt_file = *aux; } req->head.args.setattr.mask = mask; req->regetattr_mask = mask; int res = make_request(req, perms, inp); ldout(cct, 10) << "_setattr result=" << res << dendl; return res; } /* Note that we only care about attrs that setattr cares about */ void Client::stat_to_statx(struct stat *st, struct ceph_statx *stx) { stx->stx_size = st->st_size; stx->stx_mode = st->st_mode; stx->stx_uid = st->st_uid; stx->stx_gid = st->st_gid; #ifdef __APPLE__ stx->stx_mtime = st->st_mtimespec; stx->stx_atime = st->st_atimespec; #elif __WIN32 stx->stx_mtime.tv_sec = st->st_mtime; stx->stx_mtime.tv_nsec = 0; stx->stx_atime.tv_sec = st->st_atime; stx->stx_atime.tv_nsec = 0; #else stx->stx_mtime = st->st_mtim; stx->stx_atime = st->st_atim; #endif } int Client::__setattrx(Inode *in, struct ceph_statx *stx, int mask, const UserPerm& perms, InodeRef *inp) { if (mask & CEPH_SETATTR_SIZE) { mask |= clear_suid_sgid(in, perms, true); } int ret = _do_setattr(in, stx, mask, perms, inp); if (ret < 0) return ret; if (mask & CEPH_SETATTR_MODE) ret = _posix_acl_chmod(in, stx->stx_mode, perms); return ret; } int Client::_setattrx(InodeRef &in, struct ceph_statx *stx, int mask, const UserPerm& perms) { mask &= (CEPH_SETATTR_MODE | CEPH_SETATTR_UID | CEPH_SETATTR_GID | CEPH_SETATTR_MTIME | CEPH_SETATTR_ATIME | CEPH_SETATTR_SIZE | CEPH_SETATTR_CTIME | CEPH_SETATTR_BTIME); if (cct->_conf->client_permissions) { int r = may_setattr(in.get(), stx, mask, perms); if (r < 0) return r; } return __setattrx(in.get(), stx, mask, perms); } int Client::_setattr(InodeRef &in, struct stat *attr, int mask, const UserPerm& perms) { struct ceph_statx stx; stat_to_statx(attr, &stx); mask &= ~CEPH_SETATTR_BTIME; if ((mask & CEPH_SETATTR_UID) && attr->st_uid == static_cast<uid_t>(-1)) { mask &= ~CEPH_SETATTR_UID; } if ((mask & CEPH_SETATTR_GID) && attr->st_gid == static_cast<uid_t>(-1)) { mask &= ~CEPH_SETATTR_GID; } return _setattrx(in, &stx, mask, perms); } int Client::setattr(const char *relpath, struct stat *attr, int mask, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << relpath << std::endl; tout(cct) << mask << std::endl; filepath path(relpath); InodeRef in; std::scoped_lock lock(client_lock); int r = path_walk(path, &in, perms); if (r < 0) return r; return _setattr(in, attr, mask, perms); } int Client::setattrx(const char *relpath, struct ceph_statx *stx, int mask, const UserPerm& perms, int flags) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << relpath << std::endl; tout(cct) << mask << std::endl; filepath path(relpath); InodeRef in; std::scoped_lock lock(client_lock); int r = path_walk(path, &in, perms, !(flags & AT_SYMLINK_NOFOLLOW)); if (r < 0) return r; return _setattrx(in, stx, mask, perms); } int Client::fsetattr(int fd, struct stat *attr, int mask, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << fd << std::endl; tout(cct) << mask << std::endl; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; #if defined(__linux__) && defined(O_PATH) if (f->flags & O_PATH) return -CEPHFS_EBADF; #endif return _setattr(f->inode, attr, mask, perms); } int Client::fsetattrx(int fd, struct ceph_statx *stx, int mask, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << fd << std::endl; tout(cct) << mask << std::endl; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; #if defined(__linux__) && defined(O_PATH) if (f->flags & O_PATH) return -CEPHFS_EBADF; #endif return _setattrx(f->inode, stx, mask, perms); } int Client::stat(const char *relpath, struct stat *stbuf, const UserPerm& perms, frag_info_t *dirstat, int mask) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << __func__ << " enter (relpath " << relpath << " mask " << mask << ")" << dendl; tout(cct) << "stat" << std::endl; tout(cct) << relpath << std::endl; filepath path(relpath); InodeRef in; std::scoped_lock lock(client_lock); int r = path_walk(path, &in, perms, true, mask); if (r < 0) return r; r = _getattr(in, mask, perms); if (r < 0) { ldout(cct, 3) << __func__ << " exit on error!" << dendl; return r; } fill_stat(in, stbuf, dirstat); ldout(cct, 3) << __func__ << " exit (relpath " << relpath << " mask " << mask << ")" << dendl; return r; } unsigned Client::statx_to_mask(unsigned int flags, unsigned int want) { unsigned mask = 0; /* The AT_STATX_FORCE_SYNC is always in higher priority than AT_STATX_DONT_SYNC. */ if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_DONT_SYNC) goto out; /* Always set PIN to distinguish from AT_STATX_DONT_SYNC case */ mask |= CEPH_CAP_PIN; if (want & (CEPH_STATX_MODE|CEPH_STATX_UID|CEPH_STATX_GID|CEPH_STATX_BTIME|CEPH_STATX_CTIME|CEPH_STATX_VERSION)) mask |= CEPH_CAP_AUTH_SHARED; if (want & (CEPH_STATX_NLINK|CEPH_STATX_CTIME|CEPH_STATX_VERSION)) mask |= CEPH_CAP_LINK_SHARED; if (want & (CEPH_STATX_NLINK|CEPH_STATX_ATIME|CEPH_STATX_MTIME|CEPH_STATX_CTIME|CEPH_STATX_SIZE|CEPH_STATX_BLOCKS|CEPH_STATX_VERSION)) mask |= CEPH_CAP_FILE_SHARED; if (want & (CEPH_STATX_VERSION|CEPH_STATX_CTIME)) mask |= CEPH_CAP_XATTR_SHARED; out: return mask; } int Client::statx(const char *relpath, struct ceph_statx *stx, const UserPerm& perms, unsigned int want, unsigned int flags) { return statxat(CEPHFS_AT_FDCWD, relpath, stx, perms, want, flags); } int Client::lstat(const char *relpath, struct stat *stbuf, const UserPerm& perms, frag_info_t *dirstat, int mask) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << __func__ << " enter (relpath " << relpath << " mask " << mask << ")" << dendl; tout(cct) << __func__ << std::endl; tout(cct) << relpath << std::endl; filepath path(relpath); InodeRef in; std::scoped_lock lock(client_lock); // don't follow symlinks int r = path_walk(path, &in, perms, false, mask); if (r < 0) return r; r = _getattr(in, mask, perms); if (r < 0) { ldout(cct, 3) << __func__ << " exit on error!" << dendl; return r; } fill_stat(in, stbuf, dirstat); ldout(cct, 3) << __func__ << " exit (relpath " << relpath << " mask " << mask << ")" << dendl; return r; } int Client::fill_stat(Inode *in, struct stat *st, frag_info_t *dirstat, nest_info_t *rstat) { ldout(cct, 10) << __func__ << " on " << in->ino << " snap/dev" << in->snapid << " mode 0" << oct << in->mode << dec << " mtime " << in->mtime << " ctime " << in->ctime << dendl; memset(st, 0, sizeof(struct stat)); if (use_faked_inos()) st->st_ino = in->faked_ino; else st->st_ino = in->ino; st->st_dev = in->snapid; st->st_mode = in->mode; st->st_rdev = in->rdev; if (in->is_dir()) { switch (in->nlink) { case 0: st->st_nlink = 0; /* dir is unlinked */ break; case 1: st->st_nlink = 1 /* parent dentry */ + 1 /* <dir>/. */ + in->dirstat.nsubdirs; /* include <dir>/. self-reference */ break; default: ceph_abort(); } } else { st->st_nlink = in->nlink; } st->st_uid = in->uid; st->st_gid = in->gid; if (in->ctime > in->mtime) { stat_set_ctime_sec(st, in->ctime.sec()); stat_set_ctime_nsec(st, in->ctime.nsec()); } else { stat_set_ctime_sec(st, in->mtime.sec()); stat_set_ctime_nsec(st, in->mtime.nsec()); } stat_set_atime_sec(st, in->atime.sec()); stat_set_atime_nsec(st, in->atime.nsec()); stat_set_mtime_sec(st, in->mtime.sec()); stat_set_mtime_nsec(st, in->mtime.nsec()); if (in->is_dir()) { if (cct->_conf->client_dirsize_rbytes) { st->st_size = in->rstat.rbytes; } else if (in->snapid == CEPH_SNAPDIR) { SnapRealm *realm = get_snap_realm_maybe(in->vino().ino); if (realm) { st->st_size = realm->my_snaps.size(); put_snap_realm(realm); } } else { st->st_size = in->dirstat.size(); } // The Windows "stat" structure provides just a subset of the fields that are // available on Linux. #ifndef _WIN32 st->st_blocks = 1; #endif } else { st->st_size = in->size; #ifndef _WIN32 st->st_blocks = (in->size + 511) >> 9; #endif } #ifndef _WIN32 st->st_blksize = std::max<uint32_t>(in->layout.stripe_unit, 4096); #endif if (dirstat) *dirstat = in->dirstat; if (rstat) *rstat = in->rstat; return in->caps_issued(); } void Client::fill_statx(Inode *in, unsigned int mask, struct ceph_statx *stx) { ldout(cct, 10) << __func__ << " on " << in->ino << " snap/dev" << in->snapid << " mode 0" << oct << in->mode << dec << " mtime " << in->mtime << " ctime " << in->ctime << " change_attr " << in->change_attr << dendl; memset(stx, 0, sizeof(struct ceph_statx)); /* * If mask is 0, then the caller set AT_STATX_DONT_SYNC. Reset the mask * so that all bits are set. */ if (!mask) mask = ~0; /* These are always considered to be available */ stx->stx_dev = in->snapid; stx->stx_blksize = std::max<uint32_t>(in->layout.stripe_unit, 4096); /* Type bits are always set, even when CEPH_STATX_MODE is not */ stx->stx_mode = S_IFMT & in->mode; stx->stx_ino = use_faked_inos() ? in->faked_ino : (uint64_t)in->ino; stx->stx_rdev = in->rdev; stx->stx_mask |= (CEPH_STATX_INO|CEPH_STATX_RDEV); if (mask & CEPH_CAP_AUTH_SHARED) { stx->stx_uid = in->uid; stx->stx_gid = in->gid; stx->stx_mode = in->mode; in->btime.to_timespec(&stx->stx_btime); stx->stx_mask |= (CEPH_STATX_MODE|CEPH_STATX_UID|CEPH_STATX_GID|CEPH_STATX_BTIME); } if (mask & CEPH_CAP_LINK_SHARED) { if (in->is_dir()) { switch (in->nlink) { case 0: stx->stx_nlink = 0; /* dir is unlinked */ break; case 1: stx->stx_nlink = 1 /* parent dentry */ + 1 /* <dir>/. */ + in->dirstat.nsubdirs; /* include <dir>/. self-reference */ break; default: ceph_abort(); } } else { stx->stx_nlink = in->nlink; } stx->stx_mask |= CEPH_STATX_NLINK; } if (mask & CEPH_CAP_FILE_SHARED) { in->atime.to_timespec(&stx->stx_atime); in->mtime.to_timespec(&stx->stx_mtime); if (in->is_dir()) { if (cct->_conf->client_dirsize_rbytes) { stx->stx_size = in->rstat.rbytes; } else if (in->snapid == CEPH_SNAPDIR) { SnapRealm *realm = get_snap_realm_maybe(in->vino().ino); if (realm) { stx->stx_size = realm->my_snaps.size(); put_snap_realm(realm); } } else { stx->stx_size = in->dirstat.size(); } stx->stx_blocks = 1; } else { stx->stx_size = in->size; stx->stx_blocks = (in->size + 511) >> 9; } stx->stx_mask |= (CEPH_STATX_ATIME|CEPH_STATX_MTIME| CEPH_STATX_SIZE|CEPH_STATX_BLOCKS); } /* Change time and change_attr both require all shared caps to view */ if ((mask & CEPH_STAT_CAP_INODE_ALL) == CEPH_STAT_CAP_INODE_ALL) { stx->stx_version = in->change_attr; if (in->ctime > in->mtime) in->ctime.to_timespec(&stx->stx_ctime); else in->mtime.to_timespec(&stx->stx_ctime); stx->stx_mask |= (CEPH_STATX_CTIME|CEPH_STATX_VERSION); } } void Client::touch_dn(Dentry *dn) { lru.lru_touch(dn); } int Client::chmod(const char *relpath, mode_t mode, const UserPerm& perms) { return chmodat(CEPHFS_AT_FDCWD, relpath, mode, 0, perms); } int Client::fchmod(int fd, mode_t mode, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << fd << std::endl; tout(cct) << mode << std::endl; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; #if defined(__linux__) && defined(O_PATH) if (f->flags & O_PATH) return -CEPHFS_EBADF; #endif struct stat attr; attr.st_mode = mode; return _setattr(f->inode, &attr, CEPH_SETATTR_MODE, perms); } int Client::chmodat(int dirfd, const char *relpath, mode_t mode, int flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) { return -CEPHFS_ENOTCONN; } tout(cct) << __func__ << std::endl; tout(cct) << dirfd << std::endl; tout(cct) << relpath << std::endl; tout(cct) << mode << std::endl; tout(cct) << flags << std::endl; filepath path(relpath); InodeRef in; InodeRef dirinode; std::scoped_lock lock(client_lock); int r = get_fd_inode(dirfd, &dirinode); if (r < 0) { return r; } r = path_walk(path, &in, perms, !(flags & AT_SYMLINK_NOFOLLOW), 0, dirinode); if (r < 0) { return r; } struct stat attr; attr.st_mode = mode; return _setattr(in, &attr, CEPH_SETATTR_MODE, perms); } int Client::lchmod(const char *relpath, mode_t mode, const UserPerm& perms) { return chmodat(CEPHFS_AT_FDCWD, relpath, mode, AT_SYMLINK_NOFOLLOW, perms); } int Client::chown(const char *relpath, uid_t new_uid, gid_t new_gid, const UserPerm& perms) { return chownat(CEPHFS_AT_FDCWD, relpath, new_uid, new_gid, 0, perms); } int Client::fchown(int fd, uid_t new_uid, gid_t new_gid, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << fd << std::endl; tout(cct) << new_uid << std::endl; tout(cct) << new_gid << std::endl; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; #if defined(__linux__) && defined(O_PATH) if (f->flags & O_PATH) return -CEPHFS_EBADF; #endif struct stat attr; attr.st_uid = new_uid; attr.st_gid = new_gid; int mask = 0; if (new_uid != static_cast<uid_t>(-1)) mask |= CEPH_SETATTR_UID; if (new_gid != static_cast<gid_t>(-1)) mask |= CEPH_SETATTR_GID; return _setattr(f->inode, &attr, mask, perms); } int Client::lchown(const char *relpath, uid_t new_uid, gid_t new_gid, const UserPerm& perms) { return chownat(CEPHFS_AT_FDCWD, relpath, new_uid, new_gid, AT_SYMLINK_NOFOLLOW, perms); } int Client::chownat(int dirfd, const char *relpath, uid_t new_uid, gid_t new_gid, int flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) { return -CEPHFS_ENOTCONN; } tout(cct) << __func__ << std::endl; tout(cct) << dirfd << std::endl; tout(cct) << relpath << std::endl; tout(cct) << new_uid << std::endl; tout(cct) << new_gid << std::endl; tout(cct) << flags << std::endl; filepath path(relpath); InodeRef in; InodeRef dirinode; std::scoped_lock lock(client_lock); int r = get_fd_inode(dirfd, &dirinode); if (r < 0) { return r; } r = path_walk(path, &in, perms, !(flags & AT_SYMLINK_NOFOLLOW), 0, dirinode); if (r < 0) { return r; } struct stat attr; attr.st_uid = new_uid; attr.st_gid = new_gid; return _setattr(in, &attr, CEPH_SETATTR_UID|CEPH_SETATTR_GID, perms); } // for [l]utime() invoke the timeval variant as the timespec // variant are not yet implemented. for futime[s](), invoke // the timespec variant. int Client::utime(const char *relpath, struct utimbuf *buf, const UserPerm& perms) { struct timeval tv[2]; tv[0].tv_sec = buf->actime; tv[0].tv_usec = 0; tv[1].tv_sec = buf->modtime; tv[1].tv_usec = 0; return utimes(relpath, tv, perms); } int Client::lutime(const char *relpath, struct utimbuf *buf, const UserPerm& perms) { struct timeval tv[2]; tv[0].tv_sec = buf->actime; tv[0].tv_usec = 0; tv[1].tv_sec = buf->modtime; tv[1].tv_usec = 0; return lutimes(relpath, tv, perms); } int Client::futime(int fd, struct utimbuf *buf, const UserPerm& perms) { struct timespec ts[2]; ts[0].tv_sec = buf->actime; ts[0].tv_nsec = 0; ts[1].tv_sec = buf->modtime; ts[1].tv_nsec = 0; return futimens(fd, ts, perms); } int Client::utimes(const char *relpath, struct timeval times[2], const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << relpath << std::endl; tout(cct) << "atime: " << times[0].tv_sec << "." << times[0].tv_usec << std::endl; tout(cct) << "mtime: " << times[1].tv_sec << "." << times[1].tv_usec << std::endl; filepath path(relpath); InodeRef in; std::scoped_lock lock(client_lock); int r = path_walk(path, &in, perms); if (r < 0) return r; struct ceph_statx attr; utime_t(times[0]).to_timespec(&attr.stx_atime); utime_t(times[1]).to_timespec(&attr.stx_mtime); return _setattrx(in, &attr, CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME, perms); } int Client::lutimes(const char *relpath, struct timeval times[2], const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << relpath << std::endl; tout(cct) << "atime: " << times[0].tv_sec << "." << times[0].tv_usec << std::endl; tout(cct) << "mtime: " << times[1].tv_sec << "." << times[1].tv_usec << std::endl; filepath path(relpath); InodeRef in; std::scoped_lock lock(client_lock); int r = path_walk(path, &in, perms, false); if (r < 0) return r; struct ceph_statx attr; utime_t(times[0]).to_timespec(&attr.stx_atime); utime_t(times[1]).to_timespec(&attr.stx_mtime); return _setattrx(in, &attr, CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME, perms); } int Client::futimes(int fd, struct timeval times[2], const UserPerm& perms) { struct timespec ts[2]; ts[0].tv_sec = times[0].tv_sec; ts[0].tv_nsec = times[0].tv_usec * 1000; ts[1].tv_sec = times[1].tv_sec; ts[1].tv_nsec = times[1].tv_usec * 1000; return futimens(fd, ts, perms); } int Client::futimens(int fd, struct timespec times[2], const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << fd << std::endl; tout(cct) << "atime: " << times[0].tv_sec << "." << times[0].tv_nsec << std::endl; tout(cct) << "mtime: " << times[1].tv_sec << "." << times[1].tv_nsec << std::endl; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; #if defined(__linux__) && defined(O_PATH) if (f->flags & O_PATH) return -CEPHFS_EBADF; #endif struct ceph_statx attr; utime_t(times[0]).to_timespec(&attr.stx_atime); utime_t(times[1]).to_timespec(&attr.stx_mtime); return _setattrx(f->inode, &attr, CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME, perms); } int Client::utimensat(int dirfd, const char *relpath, struct timespec times[2], int flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) { return -CEPHFS_ENOTCONN; } tout(cct) << __func__ << std::endl; tout(cct) << dirfd << std::endl; tout(cct) << relpath << std::endl; tout(cct) << "atime: " << times[0].tv_sec << "." << times[0].tv_nsec << std::endl; tout(cct) << "mtime: " << times[1].tv_sec << "." << times[1].tv_nsec << std::endl; tout(cct) << flags << std::endl; filepath path(relpath); InodeRef in; InodeRef dirinode; std::scoped_lock lock(client_lock); int r = get_fd_inode(dirfd, &dirinode); if (r < 0) { return r; } #if defined(__linux__) && defined(O_PATH) if (flags & O_PATH) { return -CEPHFS_EBADF; } #endif r = path_walk(path, &in, perms, !(flags & AT_SYMLINK_NOFOLLOW), 0, dirinode); if (r < 0) { return r; } struct ceph_statx attr; utime_t(times[0]).to_timespec(&attr.stx_atime); utime_t(times[1]).to_timespec(&attr.stx_mtime); return _setattrx(in, &attr, CEPH_SETATTR_MTIME|CEPH_SETATTR_ATIME, perms); } int Client::flock(int fd, int operation, uint64_t owner) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << fd << std::endl; tout(cct) << operation << std::endl; tout(cct) << owner << std::endl; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; return _flock(f, operation, owner); } int Client::opendir(const char *relpath, dir_result_t **dirpp, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << relpath << std::endl; filepath path(relpath); InodeRef in; std::scoped_lock lock(client_lock); int r = path_walk(path, &in, perms, true); if (r < 0) return r; if (cct->_conf->client_permissions) { int r = may_open(in.get(), O_RDONLY, perms); if (r < 0) return r; } r = _opendir(in.get(), dirpp, perms); /* if ENOTDIR, dirpp will be an uninitialized point and it's very dangerous to access its value */ if (r != -CEPHFS_ENOTDIR) tout(cct) << (uintptr_t)*dirpp << std::endl; return r; } int Client::fdopendir(int dirfd, dir_result_t **dirpp, const UserPerm &perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) { return -CEPHFS_ENOTCONN; } tout(cct) << __func__ << std::endl; tout(cct) << dirfd << std::endl; InodeRef dirinode; std::scoped_lock locker(client_lock); int r = get_fd_inode(dirfd, &dirinode); if (r < 0) { return r; } if (cct->_conf->client_permissions) { r = may_open(dirinode.get(), O_RDONLY, perms); if (r < 0) { return r; } } r = _opendir(dirinode.get(), dirpp, perms); /* if ENOTDIR, dirpp will be an uninitialized point and it's very dangerous to access its value */ if (r != -CEPHFS_ENOTDIR) { tout(cct) << (uintptr_t)*dirpp << std::endl; } return r; } int Client::_opendir(Inode *in, dir_result_t **dirpp, const UserPerm& perms) { if (!in->is_dir()) return -CEPHFS_ENOTDIR; *dirpp = new dir_result_t(in, perms); opened_dirs.insert(*dirpp); ldout(cct, 8) << __func__ << "(" << in->ino << ") = " << 0 << " (" << *dirpp << ")" << dendl; return 0; } int Client::closedir(dir_result_t *dir) { tout(cct) << __func__ << std::endl; tout(cct) << (uintptr_t)dir << std::endl; ldout(cct, 3) << __func__ << "(" << dir << ") = 0" << dendl; std::scoped_lock lock(client_lock); _closedir(dir); return 0; } void Client::_closedir(dir_result_t *dirp) { ldout(cct, 10) << __func__ << "(" << dirp << ")" << dendl; if (dirp->inode) { ldout(cct, 10) << __func__ << " detaching inode " << dirp->inode << dendl; dirp->inode.reset(); } _readdir_drop_dirp_buffer(dirp); opened_dirs.erase(dirp); delete dirp; } void Client::rewinddir(dir_result_t *dirp) { ldout(cct, 3) << __func__ << "(" << dirp << ")" << dendl; RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return; std::scoped_lock lock(client_lock); dir_result_t *d = static_cast<dir_result_t*>(dirp); _readdir_drop_dirp_buffer(d); d->reset(); } loff_t Client::telldir(dir_result_t *dirp) { dir_result_t *d = static_cast<dir_result_t*>(dirp); ldout(cct, 3) << __func__ << "(" << dirp << ") = " << d->offset << dendl; return d->offset; } void Client::seekdir(dir_result_t *dirp, loff_t offset) { ldout(cct, 3) << __func__ << "(" << dirp << ", " << offset << ")" << dendl; RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return; std::scoped_lock lock(client_lock); if (offset == dirp->offset) return; if (offset > dirp->offset) dirp->release_count = 0; // bump if we do a forward seek else dirp->ordered_count = 0; // disable filling readdir cache if (dirp->hash_order()) { if (dirp->offset > offset) { _readdir_drop_dirp_buffer(dirp); dirp->reset(); } } else { if (offset == 0 || dirp->buffer_frag != frag_t(dir_result_t::fpos_high(offset)) || dirp->offset_low() > dir_result_t::fpos_low(offset)) { _readdir_drop_dirp_buffer(dirp); dirp->reset(); } } dirp->offset = offset; } //struct dirent { // ino_t d_ino; /* inode number */ // off_t d_off; /* offset to the next dirent */ // unsigned short d_reclen; /* length of this record */ // unsigned char d_type; /* type of file */ // char d_name[256]; /* filename */ //}; void Client::fill_dirent(struct dirent *de, const char *name, int type, uint64_t ino, loff_t next_off) { strncpy(de->d_name, name, 255); de->d_name[255] = '\0'; #if !defined(__CYGWIN__) && !(defined(_WIN32)) de->d_ino = ino; #if !defined(__APPLE__) && !defined(__FreeBSD__) de->d_off = next_off; #endif de->d_reclen = 1; de->d_type = IFTODT(type); ldout(cct, 10) << __func__ << " '" << de->d_name << "' -> " << inodeno_t(de->d_ino) << " type " << (int)de->d_type << " w/ next_off " << hex << next_off << dec << dendl; #endif } void Client::_readdir_next_frag(dir_result_t *dirp) { frag_t fg = dirp->buffer_frag; if (fg.is_rightmost()) { ldout(cct, 10) << __func__ << " advance from " << fg << " to END" << dendl; dirp->set_end(); return; } // advance fg = fg.next(); ldout(cct, 10) << __func__ << " advance from " << dirp->buffer_frag << " to " << fg << dendl; if (dirp->hash_order()) { // keep last_name int64_t new_offset = dir_result_t::make_fpos(fg.value(), 2, true); if (dirp->offset < new_offset) // don't decrease offset dirp->offset = new_offset; } else { dirp->last_name.clear(); dirp->offset = dir_result_t::make_fpos(fg, 2, false); _readdir_rechoose_frag(dirp); } } void Client::_readdir_rechoose_frag(dir_result_t *dirp) { ceph_assert(dirp->inode); if (dirp->hash_order()) return; frag_t cur = frag_t(dirp->offset_high()); frag_t fg = dirp->inode->dirfragtree[cur.value()]; if (fg != cur) { ldout(cct, 10) << __func__ << " frag " << cur << " maps to " << fg << dendl; dirp->offset = dir_result_t::make_fpos(fg, 2, false); dirp->last_name.clear(); dirp->next_offset = 2; } } void Client::_readdir_drop_dirp_buffer(dir_result_t *dirp) { ldout(cct, 10) << __func__ << " " << dirp << dendl; dirp->buffer.clear(); } int Client::_readdir_get_frag(int op, dir_result_t* dirp, fill_readdir_args_cb_t fill_req_cb) { ceph_assert(dirp); ceph_assert(dirp->inode); // get the current frag. frag_t fg; if (dirp->hash_order()) fg = dirp->inode->dirfragtree[dirp->offset_high()]; else fg = frag_t(dirp->offset_high()); ldout(cct, 10) << __func__ << " " << dirp << " on " << dirp->inode->ino << " fg " << fg << " offset " << hex << dirp->offset << dec << dendl; InodeRef& diri = dirp->inode; MetaRequest *req = new MetaRequest(op); fill_req_cb(dirp, req, diri, fg); bufferlist dirbl; int res = make_request(req, dirp->perms, NULL, NULL, -1, &dirbl); if (res == -CEPHFS_EAGAIN) { ldout(cct, 10) << __func__ << " got EAGAIN, retrying" << dendl; _readdir_rechoose_frag(dirp); return _readdir_get_frag(op, dirp, fill_req_cb); } if (res == 0) { ldout(cct, 10) << __func__ << " " << dirp << " got frag " << dirp->buffer_frag << " size " << dirp->buffer.size() << dendl; } else { ldout(cct, 10) << __func__ << " got error " << res << ", setting end flag" << dendl; dirp->set_end(); } return res; } struct dentry_off_lt { bool operator()(const Dentry* dn, int64_t off) const { return dir_result_t::fpos_cmp(dn->offset, off) < 0; } }; int Client::_readdir_cache_cb(dir_result_t *dirp, add_dirent_cb_t cb, void *p, int caps, bool getref) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); ldout(cct, 10) << __func__ << " " << dirp << " on " << dirp->inode->ino << " last_name " << dirp->last_name << " offset " << hex << dirp->offset << dec << dendl; Dir *dir = dirp->inode->dir; if (!dir) { ldout(cct, 10) << " dir is empty" << dendl; dirp->set_end(); return 0; } vector<Dentry*>::iterator pd = std::lower_bound(dir->readdir_cache.begin(), dir->readdir_cache.end(), dirp->offset, dentry_off_lt()); string dn_name; while (true) { int mask = caps; if (!dirp->inode->is_complete_and_ordered()) return -CEPHFS_EAGAIN; if (pd == dir->readdir_cache.end()) break; Dentry *dn = *pd; if (dn->inode == NULL) { ldout(cct, 15) << " skipping null '" << dn->name << "'" << dendl; ++pd; continue; } if (dn->cap_shared_gen != dir->parent_inode->shared_gen) { ldout(cct, 15) << " skipping mismatch shared gen '" << dn->name << "'" << dendl; ++pd; continue; } int idx = pd - dir->readdir_cache.begin(); if (dn->inode->is_dir() && cct->_conf->client_dirsize_rbytes) { mask |= CEPH_STAT_RSTAT; } int r = _getattr(dn->inode, mask, dirp->perms); if (r < 0) return r; // the content of readdir_cache may change after _getattr(), so pd may be invalid iterator pd = dir->readdir_cache.begin() + idx; if (pd >= dir->readdir_cache.end() || *pd != dn) return -CEPHFS_EAGAIN; struct ceph_statx stx; struct dirent de; fill_statx(dn->inode, caps, &stx); uint64_t next_off = dn->offset + 1; fill_dirent(&de, dn->name.c_str(), stx.stx_mode, stx.stx_ino, next_off); ++pd; if (pd == dir->readdir_cache.end()) next_off = dir_result_t::END; Inode *in = NULL; if (getref) { in = dn->inode.get(); _ll_get(in); } dn_name = dn->name; // fill in name while we have lock client_lock.unlock(); r = cb(p, &de, &stx, next_off, in); // _next_ offset client_lock.lock(); ldout(cct, 15) << " de " << de.d_name << " off " << hex << dn->offset << dec << " = " << r << dendl; if (r < 0) { return r; } dirp->offset = next_off; if (dirp->at_end()) dirp->next_offset = 2; else dirp->next_offset = dirp->offset_low(); dirp->last_name = dn_name; // we successfully returned this one; update! dirp->release_count = 0; // last_name no longer match cache index if (r > 0) return r; } ldout(cct, 10) << __func__ << " " << dirp << " on " << dirp->inode->ino << " at end" << dendl; dirp->set_end(); return 0; } int Client::readdir_r_cb(dir_result_t* d, add_dirent_cb_t cb, void* p, unsigned want, unsigned flags, bool getref) { auto fill_readdir_cb = [](dir_result_t* dirp, MetaRequest* req, InodeRef& diri, frag_t fg) { filepath path; diri->make_nosnap_relative_path(path); req->set_filepath(path); req->set_inode(diri.get()); req->head.args.readdir.frag = fg; req->head.args.readdir.flags = CEPH_READDIR_REPLY_BITFLAGS; if (dirp->last_name.length()) { req->path2.set_path(dirp->last_name); } else if (dirp->hash_order()) { req->head.args.readdir.offset_hash = dirp->offset_high(); } req->dirp = dirp; }; int op = CEPH_MDS_OP_READDIR; if (d->inode && d->inode->snapid == CEPH_SNAPDIR) op = CEPH_MDS_OP_LSSNAP; return _readdir_r_cb(op, d, cb, fill_readdir_cb, p, want, flags, getref, false); } // // NB: this is used for both readdir and readdir_snapdiff results processing // hence it should be request type agnostic // int Client::_readdir_r_cb(int op, dir_result_t *d, add_dirent_cb_t cb, fill_readdir_args_cb_t fill_cb, void *p, unsigned want, unsigned flags, bool getref, bool bypass_cache) { int caps = statx_to_mask(flags, want); int rstat_on_dir = cct->_conf->client_dirsize_rbytes ? CEPH_STAT_RSTAT : 0; RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::unique_lock cl(client_lock); dir_result_t *dirp = static_cast<dir_result_t*>(d); ldout(cct, 10) << __func__ << " " << *dirp->inode << " offset " << hex << dirp->offset << dec << " at_end=" << dirp->at_end() << " hash_order=" << dirp->hash_order() << dendl; struct dirent de; struct ceph_statx stx; memset(&de, 0, sizeof(de)); memset(&stx, 0, sizeof(stx)); InodeRef& diri = dirp->inode; if (dirp->at_end()) return 0; if (dirp->offset == 0) { ldout(cct, 15) << " including ." << dendl; ceph_assert(diri->dentries.size() < 2); // can't have multiple hard-links to a dir uint64_t next_off = 1; int r; r = _getattr(diri, caps | rstat_on_dir, dirp->perms); if (r < 0) return r; fill_statx(diri, caps, &stx); fill_dirent(&de, ".", S_IFDIR, stx.stx_ino, next_off); Inode *inode = NULL; if (getref) { inode = diri.get(); _ll_get(inode); } cl.unlock(); r = cb(p, &de, &stx, next_off, inode); cl.lock(); if (r < 0) return r; dirp->offset = next_off; if (r > 0) return r; } if (dirp->offset == 1) { ldout(cct, 15) << " including .." << dendl; uint64_t next_off = 2; InodeRef in; if (diri->dentries.empty()) in = diri; else in = diri->get_first_parent()->dir->parent_inode; int r; r = _getattr(in, caps | rstat_on_dir, dirp->perms); if (r < 0) return r; fill_statx(in, caps, &stx); fill_dirent(&de, "..", S_IFDIR, stx.stx_ino, next_off); Inode *inode = NULL; if (getref) { inode = in.get(); _ll_get(inode); } cl.unlock(); r = cb(p, &de, &stx, next_off, inode); cl.lock(); if (r < 0) return r; dirp->offset = next_off; if (r > 0) return r; } // can we read from our cache? ldout(cct, 10) << __func__ << " offset " << hex << dirp->offset << dec << " snapid " << dirp->inode->snapid << " (complete && ordered) " << dirp->inode->is_complete_and_ordered() << " issued " << ccap_string(dirp->inode->caps_issued()) << dendl; if (!bypass_cache && dirp->inode->snapid != CEPH_SNAPDIR && dirp->inode->is_complete_and_ordered() && dirp->inode->caps_issued_mask(CEPH_CAP_FILE_SHARED, true)) { int err = _readdir_cache_cb(dirp, cb, p, caps, getref); if (err != -CEPHFS_EAGAIN) return err; } while (1) { if (dirp->at_end()) return 0; bool check_caps = true; if (!dirp->is_cached()) { int r = _readdir_get_frag(op, dirp, fill_cb); if (r) return r; // _readdir_get_frag () may updates dirp->offset if the replied dirfrag is // different than the requested one. (our dirfragtree was outdated) check_caps = false; } frag_t fg = dirp->buffer_frag; ldout(cct, 10) << __func__ << " frag " << fg << " buffer size " << dirp->buffer.size() << " offset " << hex << dirp->offset << dendl; for (auto it = std::lower_bound(dirp->buffer.begin(), dirp->buffer.end(), dirp->offset, dir_result_t::dentry_off_lt()); it != dirp->buffer.end(); ++it) { dir_result_t::dentry &entry = *it; uint64_t next_off = entry.offset + 1; int r; if (check_caps) { int mask = caps; if(entry.inode->is_dir()){ mask |= rstat_on_dir; } r = _getattr(entry.inode, mask, dirp->perms); if (r < 0) return r; } fill_statx(entry.inode, caps, &stx); fill_dirent(&de, entry.name.c_str(), stx.stx_mode, stx.stx_ino, next_off); Inode *inode = NULL; if (getref) { inode = entry.inode.get(); _ll_get(inode); } cl.unlock(); r = cb(p, &de, &stx, next_off, inode); // _next_ offset cl.lock(); ldout(cct, 15) << __func__ << " de " << de.d_name << " off " << hex << next_off - 1 << dec << " snap " << entry.inode->snapid << " = " << r << dendl; if (r < 0) return r; dirp->offset = next_off; if (r > 0) return r; } if (dirp->next_offset > 2) { ldout(cct, 10) << " fetching next chunk of this frag" << dendl; _readdir_drop_dirp_buffer(dirp); continue; // more! } if (!fg.is_rightmost()) { // next frag! _readdir_next_frag(dirp); continue; } if (!bypass_cache && diri->shared_gen == dirp->start_shared_gen && diri->dir_release_count == dirp->release_count) { if (diri->dir_ordered_count == dirp->ordered_count) { ldout(cct, 10) << " marking (I_COMPLETE|I_DIR_ORDERED) on " << *diri << dendl; if (diri->dir) { ceph_assert(diri->dir->readdir_cache.size() >= dirp->cache_index); diri->dir->readdir_cache.resize(dirp->cache_index); } diri->flags |= I_COMPLETE | I_DIR_ORDERED; } else { ldout(cct, 10) << " marking I_COMPLETE on " << *diri << dendl; diri->flags |= I_COMPLETE; } } dirp->set_end(); return 0; } ceph_abort(); return 0; } int Client::readdir_r(dir_result_t *d, struct dirent *de) { return readdirplus_r(d, de, 0, 0, 0, NULL); } /* * readdirplus_r * * returns * 1 if we got a dirent * 0 for end of directory * <0 on error */ struct single_readdir { struct dirent *de; struct ceph_statx *stx; Inode *inode; bool full; }; static int _readdir_single_dirent_cb(void *p, struct dirent *de, struct ceph_statx *stx, off_t off, Inode *in) { single_readdir *c = static_cast<single_readdir *>(p); if (c->full) return -1; // already filled this dirent *c->de = *de; if (c->stx) *c->stx = *stx; c->inode = in; c->full = true; return 1; } struct dirent *Client::readdir(dir_result_t *d) { int ret; auto& de = d->de; single_readdir sr; sr.de = &de; sr.stx = NULL; sr.inode = NULL; sr.full = false; // our callback fills the dirent and sets sr.full=true on first // call, and returns -1 the second time around. ret = readdir_r_cb(d, _readdir_single_dirent_cb, (void *)&sr); if (ret < -1) { errno = -ret; // this sucks. return (dirent *) NULL; } if (sr.full) { return &de; } return (dirent *) NULL; } int Client::readdirplus_r(dir_result_t *d, struct dirent *de, struct ceph_statx *stx, unsigned want, unsigned flags, Inode **out) { single_readdir sr; sr.de = de; sr.stx = stx; sr.inode = NULL; sr.full = false; // our callback fills the dirent and sets sr.full=true on first // call, and returns -1 the second time around. int r = readdir_r_cb(d, _readdir_single_dirent_cb, (void *)&sr, want, flags, out); if (r < -1) return r; if (out) *out = sr.inode; if (sr.full) return 1; return 0; } int Client::readdir_snapdiff(dir_result_t* d1, snapid_t snap2, struct dirent* out_de, snapid_t* out_snap) { if (!d1 || !d1->inode || d1->inode->snapid == snap2) { lderr(cct) << __func__ << " invalid parameters: " << " d1:" << d1 << " d1->inode:" << (d1 ? d1->inode : nullptr) << " snap2 id :" << snap2 << dendl; errno = EINVAL; return -errno; } auto& de = d1->de; ceph_statx stx; single_readdir sr; sr.de = &de; sr.stx = &stx; sr.inode = NULL; sr.full = false; auto fill_snapdiff_cb = [&](dir_result_t* dirp, MetaRequest* req, InodeRef& diri, frag_t fg) { filepath path; diri->make_nosnap_relative_path(path); req->set_filepath(path); req->set_inode(diri.get()); req->head.args.snapdiff.snap_other = snap2; req->head.args.snapdiff.frag = fg; req->head.args.snapdiff.flags = CEPH_READDIR_REPLY_BITFLAGS; if (dirp->last_name.length()) { req->path2.set_path(dirp->last_name); } else if (dirp->hash_order()) { req->head.args.snapdiff.offset_hash = dirp->offset_high(); } req->dirp = dirp; }; // our callback fills the dirent and sets sr.full=true on first // call, and returns -1 the second time around. int ret = _readdir_r_cb(CEPH_MDS_OP_READDIR_SNAPDIFF, d1, _readdir_single_dirent_cb, fill_snapdiff_cb, (void*)&sr, 0, AT_STATX_DONT_SYNC, false, true); if (ret < -1) { lderr(cct) << __func__ << " error: " << cpp_strerror(ret) << dendl; errno = -ret; // this sucks. return ret; } ldout(cct, 15) << __func__ << " " << ret << " " << sr.de->d_name << " " << stx.stx_dev << dendl; if (sr.full) { if (out_de) { *out_de = de; } if (out_snap) { *out_snap = stx.stx_dev; } return 1; } return 0; } /* getdents */ struct getdents_result { char *buf; int buflen; int pos; bool fullent; }; static int _readdir_getdent_cb(void *p, struct dirent *de, struct ceph_statx *stx, off_t off, Inode *in) { struct getdents_result *c = static_cast<getdents_result *>(p); int dlen; if (c->fullent) dlen = sizeof(*de); else dlen = strlen(de->d_name) + 1; if (c->pos + dlen > c->buflen) return -1; // doesn't fit if (c->fullent) { memcpy(c->buf + c->pos, de, sizeof(*de)); } else { memcpy(c->buf + c->pos, de->d_name, dlen); } c->pos += dlen; return 0; } int Client::_getdents(dir_result_t *dir, char *buf, int buflen, bool fullent) { getdents_result gr; gr.buf = buf; gr.buflen = buflen; gr.fullent = fullent; gr.pos = 0; int r = readdir_r_cb(dir, _readdir_getdent_cb, (void *)&gr); if (r < 0) { // some error if (r == -1) { // buffer ran out of space if (gr.pos) { // but we got some entries already! return gr.pos; } // or we need a larger buffer return -CEPHFS_ERANGE; } else { // actual error, return it return r; } } return gr.pos; } /* getdir */ struct getdir_result { list<string> *contents; int num; }; static int _getdir_cb(void *p, struct dirent *de, struct ceph_statx *stx, off_t off, Inode *in) { getdir_result *r = static_cast<getdir_result *>(p); r->contents->push_back(de->d_name); r->num++; return 0; } int Client::getdir(const char *relpath, list<string>& contents, const UserPerm& perms) { ldout(cct, 3) << "getdir(" << relpath << ")" << dendl; tout(cct) << "getdir" << std::endl; tout(cct) << relpath << std::endl; dir_result_t *d; int r = opendir(relpath, &d, perms); if (r < 0) return r; getdir_result gr; gr.contents = &contents; gr.num = 0; r = readdir_r_cb(d, _getdir_cb, (void *)&gr); closedir(d); if (r < 0) return r; return gr.num; } /****** file i/o **********/ // common parts for open and openat. call with client_lock locked. int Client::create_and_open(int dirfd, const char *relpath, int flags, const UserPerm& perms, mode_t mode, int stripe_unit, int stripe_count, int object_size, const char *data_pool, std::string alternate_name) { ceph_assert(ceph_mutex_is_locked(client_lock)); int cflags = ceph_flags_sys2wire(flags); tout(cct) << cflags << std::endl; Fh *fh = NULL; #if defined(__linux__) && defined(O_PATH) /* When the O_PATH is being specified, others flags than O_DIRECTORY * and O_NOFOLLOW are ignored. Please refer do_entry_open() function * in kernel (fs/open.c). */ if (flags & O_PATH) flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH; #endif filepath path(relpath); InodeRef in; bool created = false; /* O_CREATE with O_EXCL enforces O_NOFOLLOW. */ bool followsym = !((flags & O_NOFOLLOW) || ((flags & O_CREAT) && (flags & O_EXCL))); int mask = ceph_caps_for_mode(ceph_flags_to_mode(cflags)); InodeRef dirinode = nullptr; int r = get_fd_inode(dirfd, &dirinode); if (r < 0) { return r; } r = path_walk(path, &in, perms, followsym, mask, dirinode); if (r == 0 && (flags & O_CREAT) && (flags & O_EXCL)) return -CEPHFS_EEXIST; #if defined(__linux__) && defined(O_PATH) if (r == 0 && in->is_symlink() && (flags & O_NOFOLLOW) && !(flags & O_PATH)) #else if (r == 0 && in->is_symlink() && (flags & O_NOFOLLOW)) #endif return -CEPHFS_ELOOP; if (r == -CEPHFS_ENOENT && (flags & O_CREAT)) { filepath dirpath = path; string dname = dirpath.last_dentry(); dirpath.pop_dentry(); InodeRef dir; r = path_walk(dirpath, &dir, perms, true, cct->_conf->client_permissions ? CEPH_CAP_AUTH_SHARED : 0, dirinode); if (r < 0) { goto out; } if (cct->_conf->client_permissions) { r = may_create(dir.get(), perms); if (r < 0) goto out; } r = _create(dir.get(), dname.c_str(), flags, mode, &in, &fh, stripe_unit, stripe_count, object_size, data_pool, &created, perms, std::move(alternate_name)); } if (r < 0) goto out; if (!created) { // posix says we can only check permissions of existing files if (cct->_conf->client_permissions) { r = may_open(in.get(), flags, perms); if (r < 0) goto out; } } if (!fh) r = _open(in.get(), flags, mode, &fh, perms); if (r >= 0) { // allocate a integer file descriptor ceph_assert(fh); r = get_fd(); ceph_assert(fd_map.count(r) == 0); fd_map[r] = fh; } out: return r; } int Client::open(const char *relpath, int flags, const UserPerm& perms, mode_t mode, int stripe_unit, int stripe_count, int object_size, const char *data_pool, std::string alternate_name) { return openat(CEPHFS_AT_FDCWD, relpath, flags, perms, mode, stripe_unit, stripe_count, object_size, data_pool, alternate_name); } int Client::openat(int dirfd, const char *relpath, int flags, const UserPerm& perms, mode_t mode, int stripe_unit, int stripe_count, int object_size, const char *data_pool, std::string alternate_name) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) { return -CEPHFS_ENOTCONN; } ldout(cct, 3) << "openat enter(" << relpath << ")" << dendl; tout(cct) << dirfd << std::endl; tout(cct) << relpath << std::endl; tout(cct) << flags << std::endl; tout(cct) << mode << std::endl; std::scoped_lock locker(client_lock); int r = create_and_open(dirfd, relpath, flags, perms, mode, stripe_unit, stripe_count, object_size, data_pool, alternate_name); tout(cct) << r << std::endl; ldout(cct, 3) << "openat exit(" << relpath << ")" << dendl; return r; } int Client::lookup_hash(inodeno_t ino, inodeno_t dirino, const char *name, const UserPerm& perms) { ldout(cct, 3) << __func__ << " enter(" << ino << ", #" << dirino << "/" << name << ")" << dendl; RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPHASH); filepath path(ino); req->set_filepath(path); uint32_t h = ceph_str_hash(CEPH_STR_HASH_RJENKINS, name, strlen(name)); char f[30]; sprintf(f, "%u", h); filepath path2(dirino); path2.push_dentry(string(f)); req->set_filepath2(path2); int r = make_request(req, perms, NULL, NULL, rand() % mdsmap->get_num_in_mds()); ldout(cct, 3) << __func__ << " exit(" << ino << ", #" << dirino << "/" << name << ") = " << r << dendl; return r; } /** * Load inode into local cache. * * If inode pointer is non-NULL, and take a reference on * the resulting Inode object in one operation, so that caller * can safely assume inode will still be there after return. */ int Client::_lookup_vino(vinodeno_t vino, const UserPerm& perms, Inode **inode) { ldout(cct, 8) << __func__ << " enter(" << vino << ")" << dendl; RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; if (is_reserved_vino(vino)) return -CEPHFS_ESTALE; MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPINO); filepath path(vino.ino); req->set_filepath(path); /* * The MDS expects either a "real" snapid here or 0. The special value * carveouts for the snapid are all at the end of the range so we can * just look for any snapid below this value. */ if (vino.snapid < CEPH_NOSNAP) req->head.args.lookupino.snapid = vino.snapid; int r = make_request(req, perms, NULL, NULL, rand() % mdsmap->get_num_in_mds()); if (r == 0 && inode != NULL) { unordered_map<vinodeno_t,Inode*>::iterator p = inode_map.find(vino); ceph_assert(p != inode_map.end()); *inode = p->second; _ll_get(*inode); } ldout(cct, 8) << __func__ << " exit(" << vino << ") = " << r << dendl; return r; } int Client::lookup_ino(inodeno_t ino, const UserPerm& perms, Inode **inode) { vinodeno_t vino(ino, CEPH_NOSNAP); std::scoped_lock lock(client_lock); return _lookup_vino(vino, perms, inode); } /** * Find the parent inode of `ino` and insert it into * our cache. Conditionally also set `parent` to a referenced * Inode* if caller provides non-NULL value. */ int Client::_lookup_parent(Inode *ino, const UserPerm& perms, Inode **parent) { ldout(cct, 8) << __func__ << " enter(" << ino->ino << ")" << dendl; MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPPARENT); filepath path(ino->ino); req->set_filepath(path); InodeRef target; int r = make_request(req, perms, &target, NULL, rand() % mdsmap->get_num_in_mds()); // Give caller a reference to the parent ino if they provided a pointer. if (parent != NULL) { if (r == 0) { *parent = target.get(); _ll_get(*parent); ldout(cct, 8) << __func__ << " found parent " << (*parent)->ino << dendl; } else { *parent = NULL; } } ldout(cct, 8) << __func__ << " exit(" << ino->ino << ") = " << r << dendl; return r; } /** * Populate the parent dentry for `ino`, provided it is * a child of `parent`. */ int Client::_lookup_name(Inode *ino, Inode *parent, const UserPerm& perms) { ceph_assert(parent->is_dir()); ldout(cct, 3) << __func__ << " enter(" << ino->ino << ")" << dendl; RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPNAME); req->set_filepath2(filepath(parent->ino)); req->set_filepath(filepath(ino->ino)); req->set_inode(ino); int r = make_request(req, perms, NULL, NULL, rand() % mdsmap->get_num_in_mds()); ldout(cct, 3) << __func__ << " exit(" << ino->ino << ") = " << r << dendl; return r; } int Client::lookup_name(Inode *ino, Inode *parent, const UserPerm& perms) { std::scoped_lock lock(client_lock); return _lookup_name(ino, parent, perms); } Fh *Client::_create_fh(Inode *in, int flags, int cmode, const UserPerm& perms) { ceph_assert(in); Fh *f = new Fh(in, flags, cmode, fd_gen, perms); ldout(cct, 10) << __func__ << " " << in->ino << " mode " << cmode << dendl; if (in->snapid != CEPH_NOSNAP) { in->snap_cap_refs++; ldout(cct, 5) << "open success, fh is " << f << " combined IMMUTABLE SNAP caps " << ccap_string(in->caps_issued()) << dendl; } const auto& conf = cct->_conf; f->readahead.set_trigger_requests(1); f->readahead.set_min_readahead_size(conf->client_readahead_min); uint64_t max_readahead = Readahead::NO_LIMIT; if (conf->client_readahead_max_bytes) { max_readahead = std::min(max_readahead, (uint64_t)conf->client_readahead_max_bytes); } if (conf->client_readahead_max_periods) { max_readahead = std::min(max_readahead, in->layout.get_period()*(uint64_t)conf->client_readahead_max_periods); } f->readahead.set_max_readahead_size(max_readahead); vector<uint64_t> alignments; alignments.push_back(in->layout.get_period()); alignments.push_back(in->layout.stripe_unit); f->readahead.set_alignments(alignments); return f; } int Client::_release_fh(Fh *f) { //ldout(cct, 3) << "op: client->close(open_files[ " << fh << " ]);" << dendl; //ldout(cct, 3) << "op: open_files.erase( " << fh << " );" << dendl; Inode *in = f->inode.get(); ldout(cct, 8) << __func__ << " " << f << " mode " << f->mode << " on " << *in << dendl; in->unset_deleg(f); if (in->snapid == CEPH_NOSNAP) { if (in->put_open_ref(f->mode)) { _flush(in, new C_Client_FlushComplete(this, in)); check_caps(in, 0); } } else { ceph_assert(in->snap_cap_refs > 0); in->snap_cap_refs--; } _release_filelocks(f); // Finally, read any async err (i.e. from flushes) int err = f->take_async_err(); if (err != 0) { ldout(cct, 1) << __func__ << " " << f << " on inode " << *in << " caught async_err = " << cpp_strerror(err) << dendl; } else { ldout(cct, 10) << __func__ << " " << f << " on inode " << *in << " no async_err state" << dendl; } _put_fh(f); return err; } void Client::_put_fh(Fh *f) { int left = f->put(); if (!left) { delete f; } } int Client::_open(Inode *in, int flags, mode_t mode, Fh **fhp, const UserPerm& perms) { if (in->snapid != CEPH_NOSNAP && (flags & (O_WRONLY | O_RDWR | O_CREAT | O_TRUNC | O_APPEND))) { return -CEPHFS_EROFS; } // use normalized flags to generate cmode int cflags = ceph_flags_sys2wire(flags); if (cct->_conf.get_val<bool>("client_force_lazyio")) cflags |= CEPH_O_LAZY; int cmode = ceph_flags_to_mode(cflags); int want = ceph_caps_for_mode(cmode); int result = 0; in->get_open_ref(cmode); // make note of pending open, since it effects _wanted_ caps. if ((flags & O_TRUNC) == 0 && in->caps_issued_mask(want)) { // update wanted? check_caps(in, CHECK_CAPS_NODELAY); } else { MetaRequest *req = new MetaRequest(CEPH_MDS_OP_OPEN); filepath path; in->make_nosnap_relative_path(path); req->set_filepath(path); req->head.args.open.flags = cflags & ~CEPH_O_CREAT; req->head.args.open.mode = mode; req->head.args.open.pool = -1; if (cct->_conf->client_debug_getattr_caps) req->head.args.open.mask = DEBUG_GETATTR_CAPS; else req->head.args.open.mask = 0; req->head.args.open.old_size = in->size; // for O_TRUNC req->set_inode(in); result = make_request(req, perms); /* * NFS expects that delegations will be broken on a conflicting open, * not just when there is actual conflicting access to the file. SMB leases * and oplocks also have similar semantics. * * Ensure that clients that have delegations enabled will wait on minimal * caps during open, just to ensure that other clients holding delegations * return theirs first. */ if (deleg_timeout && result == 0) { int need = 0, have; if (cmode & CEPH_FILE_MODE_WR) need |= CEPH_CAP_FILE_WR; if (cmode & CEPH_FILE_MODE_RD) need |= CEPH_CAP_FILE_RD; Fh fh(in, flags, cmode, fd_gen, perms); result = get_caps(&fh, need, want, &have, -1); if (result < 0) { ldout(cct, 8) << "Unable to get caps after open of inode " << *in << " . Denying open: " << cpp_strerror(result) << dendl; } else { put_cap_ref(in, need); } } } // success? if (result >= 0) { if (fhp) *fhp = _create_fh(in, flags, cmode, perms); } else { in->put_open_ref(cmode); } trim_cache(); return result; } int Client::_renew_caps(Inode *in) { int wanted = in->caps_file_wanted(); if (in->is_any_caps() && ((wanted & CEPH_CAP_ANY_WR) == 0 || in->auth_cap)) { check_caps(in, CHECK_CAPS_NODELAY); return 0; } int flags = 0; if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR)) flags = O_RDWR; else if (wanted & CEPH_CAP_FILE_RD) flags = O_RDONLY; else if (wanted & CEPH_CAP_FILE_WR) flags = O_WRONLY; MetaRequest *req = new MetaRequest(CEPH_MDS_OP_OPEN); filepath path; in->make_nosnap_relative_path(path); req->set_filepath(path); req->head.args.open.flags = flags; req->head.args.open.pool = -1; if (cct->_conf->client_debug_getattr_caps) req->head.args.open.mask = DEBUG_GETATTR_CAPS; else req->head.args.open.mask = 0; req->set_inode(in); // duplicate in case Cap goes away; not sure if that race is a concern? const UserPerm *pperm = in->get_best_perms(); UserPerm perms; if (pperm != NULL) perms = *pperm; int ret = make_request(req, perms); return ret; } int Client::_close(int fd) { ldout(cct, 3) << "close enter(" << fd << ")" << dendl; tout(cct) << "close" << std::endl; tout(cct) << fd << std::endl; Fh *fh = get_filehandle(fd); if (!fh) return -CEPHFS_EBADF; int err = _release_fh(fh); fd_map.erase(fd); put_fd(fd); ldout(cct, 3) << "close exit(" << fd << ")" << dendl; return err; } int Client::close(int fd) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); return _close(fd); } // ------------ // read, write loff_t Client::lseek(int fd, loff_t offset, int whence) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << "lseek" << std::endl; tout(cct) << fd << std::endl; tout(cct) << offset << std::endl; tout(cct) << whence << std::endl; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; #if defined(__linux__) && defined(O_PATH) if (f->flags & O_PATH) return -CEPHFS_EBADF; #endif return _lseek(f, offset, whence); } loff_t Client::_lseek(Fh *f, loff_t offset, int whence) { Inode *in = f->inode.get(); bool whence_check = false; loff_t pos = -1; switch (whence) { case SEEK_END: whence_check = true; break; #ifdef SEEK_DATA case SEEK_DATA: whence_check = true; break; #endif #ifdef SEEK_HOLE case SEEK_HOLE: whence_check = true; break; #endif } if (whence_check) { int r = _getattr(in, CEPH_STAT_CAP_SIZE, f->actor_perms); if (r < 0) return r; } switch (whence) { case SEEK_SET: pos = offset; break; case SEEK_CUR: pos = f->pos + offset; break; case SEEK_END: pos = in->size + offset; break; #ifdef SEEK_DATA case SEEK_DATA: if (offset < 0 || static_cast<uint64_t>(offset) >= in->size) return -CEPHFS_ENXIO; pos = offset; break; #endif #ifdef SEEK_HOLE case SEEK_HOLE: if (offset < 0 || static_cast<uint64_t>(offset) >= in->size) return -CEPHFS_ENXIO; pos = in->size; break; #endif default: ldout(cct, 1) << __func__ << ": invalid whence value " << whence << dendl; return -CEPHFS_EINVAL; } if (pos < 0) { return -CEPHFS_EINVAL; } else { f->pos = pos; } ldout(cct, 8) << "_lseek(" << f << ", " << offset << ", " << whence << ") = " << f->pos << dendl; return f->pos; } void Client::lock_fh_pos(Fh *f) { ldout(cct, 10) << __func__ << " " << f << dendl; if (f->pos_locked || !f->pos_waiters.empty()) { ceph::condition_variable cond; f->pos_waiters.push_back(&cond); ldout(cct, 10) << __func__ << " BLOCKING on " << f << dendl; std::unique_lock l{client_lock, std::adopt_lock}; cond.wait(l, [f, me=&cond] { return !f->pos_locked && f->pos_waiters.front() == me; }); l.release(); ldout(cct, 10) << __func__ << " UNBLOCKING on " << f << dendl; ceph_assert(f->pos_waiters.front() == &cond); f->pos_waiters.pop_front(); } f->pos_locked = true; } void Client::unlock_fh_pos(Fh *f) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); ldout(cct, 10) << __func__ << " " << f << dendl; f->pos_locked = false; if (!f->pos_waiters.empty()) { // only wake up the oldest waiter auto cond = f->pos_waiters.front(); cond->notify_one(); } } int Client::uninline_data(Inode *in, Context *onfinish) { if (!in->inline_data.length()) { onfinish->complete(0); return 0; } char oid_buf[32]; snprintf(oid_buf, sizeof(oid_buf), "%llx.00000000", (long long unsigned)in->ino); object_t oid = oid_buf; ObjectOperation create_ops; create_ops.create(false); objecter->mutate(oid, OSDMap::file_to_object_locator(in->layout), create_ops, in->snaprealm->get_snap_context(), ceph::real_clock::now(), 0, NULL); bufferlist inline_version_bl; encode(in->inline_version, inline_version_bl); ObjectOperation uninline_ops; uninline_ops.cmpxattr("inline_version", CEPH_OSD_CMPXATTR_OP_GT, CEPH_OSD_CMPXATTR_MODE_U64, inline_version_bl); bufferlist inline_data = in->inline_data; uninline_ops.write(0, inline_data, in->truncate_size, in->truncate_seq); uninline_ops.setxattr("inline_version", stringify(in->inline_version)); objecter->mutate(oid, OSDMap::file_to_object_locator(in->layout), uninline_ops, in->snaprealm->get_snap_context(), ceph::real_clock::now(), 0, onfinish); return 0; } // // blocking osd interface int Client::read(int fd, char *buf, loff_t size, loff_t offset) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << "read" << std::endl; tout(cct) << fd << std::endl; tout(cct) << size << std::endl; tout(cct) << offset << std::endl; std::unique_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; #if defined(__linux__) && defined(O_PATH) if (f->flags & O_PATH) return -CEPHFS_EBADF; #endif bufferlist bl; /* We can't return bytes written larger than INT_MAX, clamp size to that */ size = std::min(size, (loff_t)INT_MAX); int r = _read(f, offset, size, &bl); ldout(cct, 3) << "read(" << fd << ", " << (void*)buf << ", " << size << ", " << offset << ") = " << r << dendl; if (r >= 0) { lock.unlock(); bl.begin().copy(bl.length(), buf); r = bl.length(); } return r; } int Client::preadv(int fd, const struct iovec *iov, int iovcnt, loff_t offset) { if (iovcnt < 0) return -CEPHFS_EINVAL; return _preadv_pwritev(fd, iov, iovcnt, offset, false); } int64_t Client::_read(Fh *f, int64_t offset, uint64_t size, bufferlist *bl) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); int want, have = 0; bool movepos = false; int64_t rc = 0; const auto& conf = cct->_conf; Inode *in = f->inode.get(); utime_t lat; utime_t start = ceph_clock_now(); if ((f->mode & CEPH_FILE_MODE_RD) == 0) return -CEPHFS_EBADF; //bool lazy = f->mode == CEPH_FILE_MODE_LAZY; if (offset < 0) { lock_fh_pos(f); offset = f->pos; movepos = true; } loff_t start_pos = offset; if (in->inline_version == 0) { auto r = _getattr(in, CEPH_STAT_CAP_INLINE_DATA, f->actor_perms, true); if (r < 0) { rc = r; goto done; } ceph_assert(in->inline_version > 0); } retry: if (f->mode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; else want = CEPH_CAP_FILE_CACHE; { auto r = get_caps(f, CEPH_CAP_FILE_RD, want, &have, -1); if (r < 0) { rc = r; goto done; } } if (f->flags & O_DIRECT) have &= ~(CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO); if (in->inline_version < CEPH_INLINE_NONE) { uint32_t len = in->inline_data.length(); uint64_t endoff = offset + size; if (endoff > in->size) endoff = in->size; if (offset < len) { if (endoff <= len) { bl->substr_of(in->inline_data, offset, endoff - offset); } else { bl->substr_of(in->inline_data, offset, len - offset); bl->append_zero(endoff - len); } rc = endoff - offset; } else if ((uint64_t)offset < endoff) { bl->append_zero(endoff - offset); rc = endoff - offset; } else { rc = 0; } goto success; } if (!conf->client_debug_force_sync_read && conf->client_oc && (have & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO))) { if (f->flags & O_RSYNC) { _flush_range(in, offset, size); } rc = _read_async(f, offset, size, bl); if (rc < 0) goto done; } else { if (f->flags & O_DIRECT) _flush_range(in, offset, size); bool checkeof = false; rc = _read_sync(f, offset, size, bl, &checkeof); if (rc < 0) goto done; if (checkeof) { offset += rc; size -= rc; put_cap_ref(in, CEPH_CAP_FILE_RD); have = 0; // reverify size { auto r = _getattr(in, CEPH_STAT_CAP_SIZE, f->actor_perms); if (r < 0) { rc = r; goto done; } } // eof? short read. if ((uint64_t)offset < in->size) goto retry; } } success: ceph_assert(rc >= 0); update_read_io_size(bl->length()); if (movepos) { // adjust fd pos f->pos = start_pos + rc; } lat = ceph_clock_now(); lat -= start; ++nr_read_request; update_io_stat_read(lat); done: // done! if (have) { put_cap_ref(in, CEPH_CAP_FILE_RD); } if (movepos) { unlock_fh_pos(f); } return rc; } Client::C_Readahead::C_Readahead(Client *c, Fh *f) : client(c), f(f) { f->get(); f->readahead.inc_pending(); } Client::C_Readahead::~C_Readahead() { f->readahead.dec_pending(); client->_put_fh(f); } void Client::C_Readahead::finish(int r) { lgeneric_subdout(client->cct, client, 20) << "client." << client->get_nodeid() << " " << "C_Readahead on " << f->inode << dendl; client->put_cap_ref(f->inode.get(), CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE); if (r > 0) { client->update_read_io_size(r); } } int Client::_read_async(Fh *f, uint64_t off, uint64_t len, bufferlist *bl) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); const auto& conf = cct->_conf; Inode *in = f->inode.get(); ldout(cct, 10) << __func__ << " " << *in << " " << off << "~" << len << dendl; // trim read based on file size? if (off >= in->size) return 0; if (len == 0) return 0; if (off + len > in->size) { len = in->size - off; } ldout(cct, 10) << " min_bytes=" << f->readahead.get_min_readahead_size() << " max_bytes=" << f->readahead.get_max_readahead_size() << " max_periods=" << conf->client_readahead_max_periods << dendl; // read (and possibly block) int r = 0; C_SaferCond onfinish("Client::_read_async flock"); r = objectcacher->file_read(&in->oset, &in->layout, in->snapid, off, len, bl, 0, &onfinish); if (r == 0) { get_cap_ref(in, CEPH_CAP_FILE_CACHE); client_lock.unlock(); r = onfinish.wait(); client_lock.lock(); put_cap_ref(in, CEPH_CAP_FILE_CACHE); update_read_io_size(bl->length()); } if(f->readahead.get_min_readahead_size() > 0) { pair<uint64_t, uint64_t> readahead_extent = f->readahead.update(off, len, in->size); if (readahead_extent.second > 0) { ldout(cct, 20) << "readahead " << readahead_extent.first << "~" << readahead_extent.second << " (caller wants " << off << "~" << len << ")" << dendl; Context *onfinish2 = new C_Readahead(this, f); int r2 = objectcacher->file_read(&in->oset, &in->layout, in->snapid, readahead_extent.first, readahead_extent.second, NULL, 0, onfinish2); if (r2 == 0) { ldout(cct, 20) << "readahead initiated, c " << onfinish2 << dendl; get_cap_ref(in, CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE); } else { ldout(cct, 20) << "readahead was no-op, already cached" << dendl; delete onfinish2; } } } return r; } int Client::_read_sync(Fh *f, uint64_t off, uint64_t len, bufferlist *bl, bool *checkeof) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); Inode *in = f->inode.get(); uint64_t pos = off; int left = len; int read = 0; ldout(cct, 10) << __func__ << " " << *in << " " << off << "~" << len << dendl; // 0 success, 1 continue and < 0 error happen. auto wait_and_copy = [&](C_SaferCond &onfinish, bufferlist &tbl, int wanted) { int r = onfinish.wait(); // if we get ENOENT from OSD, assume 0 bytes returned if (r == -CEPHFS_ENOENT) r = 0; if (r < 0) return r; if (tbl.length()) { r = tbl.length(); read += r; pos += r; left -= r; bl->claim_append(tbl); } // short read? if (r >= 0 && r < wanted) { if (pos < in->size) { // zero up to known EOF int64_t some = in->size - pos; if (some > left) some = left; auto z = buffer::ptr_node::create(some); z->zero(); bl->push_back(std::move(z)); read += some; pos += some; left -= some; if (left == 0) return 0; } *checkeof = true; return 0; } return 1; }; while (left > 0) { C_SaferCond onfinish("Client::_read_sync flock"); bufferlist tbl; int wanted = left; filer->read_trunc(in->ino, &in->layout, in->snapid, pos, left, &tbl, 0, in->truncate_size, in->truncate_seq, &onfinish); client_lock.unlock(); int r = wait_and_copy(onfinish, tbl, wanted); client_lock.lock(); if (!r) return read; if (r < 0) return r; } return read; } int Client::write(int fd, const char *buf, loff_t size, loff_t offset) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << "write" << std::endl; tout(cct) << fd << std::endl; tout(cct) << size << std::endl; tout(cct) << offset << std::endl; std::scoped_lock lock(client_lock); Fh *fh = get_filehandle(fd); if (!fh) return -CEPHFS_EBADF; #if defined(__linux__) && defined(O_PATH) if (fh->flags & O_PATH) return -CEPHFS_EBADF; #endif /* We can't return bytes written larger than INT_MAX, clamp size to that */ size = std::min(size, (loff_t)INT_MAX); int r = _write(fh, offset, size, buf, NULL, false); ldout(cct, 3) << "write(" << fd << ", \"...\", " << size << ", " << offset << ") = " << r << dendl; return r; } int Client::pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) { if (iovcnt < 0) return -CEPHFS_EINVAL; return _preadv_pwritev(fd, iov, iovcnt, offset, true); } int64_t Client::_preadv_pwritev_locked(Fh *fh, const struct iovec *iov, unsigned iovcnt, int64_t offset, bool write, bool clamp_to_int) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); #if defined(__linux__) && defined(O_PATH) if (fh->flags & O_PATH) return -CEPHFS_EBADF; #endif loff_t totallen = 0; for (unsigned i = 0; i < iovcnt; i++) { totallen += iov[i].iov_len; } /* * Some of the API functions take 64-bit size values, but only return * 32-bit signed integers. Clamp the I/O sizes in those functions so that * we don't do I/Os larger than the values we can return. */ if (clamp_to_int) { totallen = std::min(totallen, (loff_t)INT_MAX); } if (write) { int64_t w = _write(fh, offset, totallen, NULL, iov, iovcnt); ldout(cct, 3) << "pwritev(" << fh << ", \"...\", " << totallen << ", " << offset << ") = " << w << dendl; return w; } else { bufferlist bl; int64_t r = _read(fh, offset, totallen, &bl); ldout(cct, 3) << "preadv(" << fh << ", " << offset << ") = " << r << dendl; if (r <= 0) return r; client_lock.unlock(); auto iter = bl.cbegin(); for (unsigned j = 0, resid = r; j < iovcnt && resid > 0; j++) { /* * This piece of code aims to handle the case that bufferlist * does not have enough data to fill in the iov */ const auto round_size = std::min<unsigned>(resid, iov[j].iov_len); iter.copy(round_size, reinterpret_cast<char*>(iov[j].iov_base)); resid -= round_size; /* iter is self-updating */ } client_lock.lock(); return r; } } int Client::_preadv_pwritev(int fd, const struct iovec *iov, unsigned iovcnt, int64_t offset, bool write) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << fd << std::endl; tout(cct) << offset << std::endl; std::scoped_lock cl(client_lock); Fh *fh = get_filehandle(fd); if (!fh) return -CEPHFS_EBADF; return _preadv_pwritev_locked(fh, iov, iovcnt, offset, write, true); } int64_t Client::_write(Fh *f, int64_t offset, uint64_t size, const char *buf, const struct iovec *iov, int iovcnt) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); uint64_t fpos = 0; Inode *in = f->inode.get(); if ( (uint64_t)(offset+size) > mdsmap->get_max_filesize() && //exceeds config (uint64_t)(offset+size) > in->size ) { //exceeds filesize return -CEPHFS_EFBIG; } //ldout(cct, 7) << "write fh " << fh << " size " << size << " offset " << offset << dendl; if (objecter->osdmap_pool_full(in->layout.pool_id)) { return -CEPHFS_ENOSPC; } ceph_assert(in->snapid == CEPH_NOSNAP); // was Fh opened as writeable? if ((f->mode & CEPH_FILE_MODE_WR) == 0) return -CEPHFS_EBADF; // use/adjust fd pos? if (offset < 0) { lock_fh_pos(f); /* * FIXME: this is racy in that we may block _after_ this point waiting for caps, and size may * change out from under us. */ if (f->flags & O_APPEND) { auto r = _lseek(f, 0, SEEK_END); if (r < 0) { unlock_fh_pos(f); return r; } } offset = f->pos; fpos = offset+size; unlock_fh_pos(f); } // check quota uint64_t endoff = offset + size; if (endoff > in->size && is_quota_bytes_exceeded(in, endoff - in->size, f->actor_perms)) { return -CEPHFS_EDQUOT; } //bool lazy = f->mode == CEPH_FILE_MODE_LAZY; ldout(cct, 10) << "cur file size is " << in->size << dendl; // time it. utime_t start = ceph_clock_now(); if (in->inline_version == 0) { int r = _getattr(in, CEPH_STAT_CAP_INLINE_DATA, f->actor_perms, true); if (r < 0) return r; ceph_assert(in->inline_version > 0); } // copy into fresh buffer (since our write may be resub, async) bufferlist bl; if (buf) { if (size > 0) bl.append(buf, size); } else if (iov){ for (int i = 0; i < iovcnt; i++) { if (iov[i].iov_len > 0) { bl.append((const char *)iov[i].iov_base, iov[i].iov_len); } } } utime_t lat; uint64_t totalwritten; int want, have; if (f->mode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; else want = CEPH_CAP_FILE_BUFFER; int r = get_caps(f, CEPH_CAP_FILE_WR|CEPH_CAP_AUTH_SHARED, want, &have, endoff); if (r < 0) return r; put_cap_ref(in, CEPH_CAP_AUTH_SHARED); if (size > 0) { r = clear_suid_sgid(in, f->actor_perms); if (r < 0) { put_cap_ref(in, CEPH_CAP_FILE_WR); return r; } } if (f->flags & O_DIRECT) have &= ~(CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO); ldout(cct, 10) << " snaprealm " << *in->snaprealm << dendl; std::unique_ptr<C_SaferCond> onuninline = nullptr; if (in->inline_version < CEPH_INLINE_NONE) { if (endoff > cct->_conf->client_max_inline_size || endoff > CEPH_INLINE_MAX_SIZE || !(have & CEPH_CAP_FILE_BUFFER)) { onuninline.reset(new C_SaferCond("Client::_write_uninline_data flock")); uninline_data(in, onuninline.get()); } else { get_cap_ref(in, CEPH_CAP_FILE_BUFFER); uint32_t len = in->inline_data.length(); if (endoff < len) in->inline_data.begin(endoff).copy(len - endoff, bl); // XXX if (offset < len) in->inline_data.splice(offset, len - offset); else if (offset > len) in->inline_data.append_zero(offset - len); in->inline_data.append(bl); in->inline_version++; put_cap_ref(in, CEPH_CAP_FILE_BUFFER); goto success; } } if (cct->_conf->client_oc && (have & (CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO))) { // do buffered write if (!in->oset.dirty_or_tx) get_cap_ref(in, CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_BUFFER); get_cap_ref(in, CEPH_CAP_FILE_BUFFER); // async, caching, non-blocking. r = objectcacher->file_write(&in->oset, &in->layout, in->snaprealm->get_snap_context(), offset, size, bl, ceph::real_clock::now(), 0); put_cap_ref(in, CEPH_CAP_FILE_BUFFER); if (r < 0) goto done; // flush cached write if O_SYNC is set on file fh // O_DSYNC == O_SYNC on linux < 2.6.33 // O_SYNC = __O_SYNC | O_DSYNC on linux >= 2.6.33 if ((f->flags & O_SYNC) || (f->flags & O_DSYNC)) { _flush_range(in, offset, size); } } else { if (f->flags & O_DIRECT) _flush_range(in, offset, size); // simple, non-atomic sync write C_SaferCond onfinish("Client::_write flock"); get_cap_ref(in, CEPH_CAP_FILE_BUFFER); filer->write_trunc(in->ino, &in->layout, in->snaprealm->get_snap_context(), offset, size, bl, ceph::real_clock::now(), 0, in->truncate_size, in->truncate_seq, &onfinish); client_lock.unlock(); r = onfinish.wait(); client_lock.lock(); put_cap_ref(in, CEPH_CAP_FILE_BUFFER); if (r < 0) goto done; } // if we get here, write was successful, update client metadata success: update_write_io_size(size); // time lat = ceph_clock_now(); lat -= start; ++nr_write_request; update_io_stat_write(lat); if (fpos) { lock_fh_pos(f); f->pos = fpos; unlock_fh_pos(f); } totalwritten = size; r = (int64_t)totalwritten; // extend file? if (totalwritten + offset > in->size) { in->size = totalwritten + offset; in->mark_caps_dirty(CEPH_CAP_FILE_WR); if (is_quota_bytes_approaching(in, f->actor_perms)) { check_caps(in, CHECK_CAPS_NODELAY); } else if (is_max_size_approaching(in)) { check_caps(in, 0); } ldout(cct, 7) << "wrote to " << totalwritten+offset << ", extending file size" << dendl; } else { ldout(cct, 7) << "wrote to " << totalwritten+offset << ", leaving file size at " << in->size << dendl; } // mtime in->mtime = in->ctime = ceph_clock_now(); in->change_attr++; in->mark_caps_dirty(CEPH_CAP_FILE_WR); done: if (nullptr != onuninline) { client_lock.unlock(); int uninline_ret = onuninline->wait(); client_lock.lock(); if (uninline_ret >= 0 || uninline_ret == -CEPHFS_ECANCELED) { in->inline_data.clear(); in->inline_version = CEPH_INLINE_NONE; in->mark_caps_dirty(CEPH_CAP_FILE_WR); check_caps(in, 0); } else r = uninline_ret; } put_cap_ref(in, CEPH_CAP_FILE_WR); return r; } int Client::_flush(Fh *f) { Inode *in = f->inode.get(); int err = f->take_async_err(); if (err != 0) { ldout(cct, 1) << __func__ << ": " << f << " on inode " << *in << " caught async_err = " << cpp_strerror(err) << dendl; } else { ldout(cct, 10) << __func__ << ": " << f << " on inode " << *in << " no async_err state" << dendl; } return err; } int Client::truncate(const char *relpath, loff_t length, const UserPerm& perms) { struct ceph_statx stx; stx.stx_size = length; return setattrx(relpath, &stx, CEPH_SETATTR_SIZE, perms); } int Client::ftruncate(int fd, loff_t length, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; tout(cct) << fd << std::endl; tout(cct) << length << std::endl; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; #if defined(__linux__) && defined(O_PATH) if (f->flags & O_PATH) return -CEPHFS_EBADF; #endif if ((f->mode & CEPH_FILE_MODE_WR) == 0) return -CEPHFS_EBADF; struct stat attr; attr.st_size = length; return _setattr(f->inode, &attr, CEPH_SETATTR_SIZE, perms); } int Client::fsync(int fd, bool syncdataonly) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << "fsync" << std::endl; tout(cct) << fd << std::endl; tout(cct) << syncdataonly << std::endl; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; #if defined(__linux__) && defined(O_PATH) if (f->flags & O_PATH) return -CEPHFS_EBADF; #endif int r = _fsync(f, syncdataonly); if (r == 0) { // The IOs in this fsync were okay, but maybe something happened // in the background that we shoudl be reporting? r = f->take_async_err(); ldout(cct, 5) << "fsync(" << fd << ", " << syncdataonly << ") = 0, async_err = " << r << dendl; } else { // Assume that an error we encountered during fsync, even reported // synchronously, would also have applied the error to the Fh, and we // should clear it here to avoid returning the same error again on next // call. ldout(cct, 5) << "fsync(" << fd << ", " << syncdataonly << ") = " << r << dendl; f->take_async_err(); } return r; } int Client::_fsync(Inode *in, bool syncdataonly) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); int r = 0; std::unique_ptr<C_SaferCond> object_cacher_completion = nullptr; ceph_tid_t flush_tid = 0; InodeRef tmp_ref; utime_t lat; utime_t start = ceph_clock_now(); ldout(cct, 8) << "_fsync on " << *in << " " << (syncdataonly ? "(dataonly)":"(data+metadata)") << dendl; if (cct->_conf->client_oc) { object_cacher_completion.reset(new C_SaferCond("Client::_fsync::lock")); tmp_ref = in; // take a reference; C_SaferCond doesn't and _flush won't either _flush(in, object_cacher_completion.get()); ldout(cct, 15) << "using return-valued form of _fsync" << dendl; } if (!syncdataonly && in->dirty_caps) { check_caps(in, CHECK_CAPS_NODELAY|CHECK_CAPS_SYNCHRONOUS); if (in->flushing_caps) flush_tid = last_flush_tid; } else ldout(cct, 10) << "no metadata needs to commit" << dendl; if (!syncdataonly && !in->unsafe_ops.empty()) { flush_mdlog_sync(in); MetaRequest *req = in->unsafe_ops.back(); ldout(cct, 15) << "waiting on unsafe requests, last tid " << req->get_tid() << dendl; req->get(); wait_on_list(req->waitfor_safe); put_request(req); } if (nullptr != object_cacher_completion) { // wait on a real reply instead of guessing client_lock.unlock(); ldout(cct, 15) << "waiting on data to flush" << dendl; r = object_cacher_completion->wait(); client_lock.lock(); ldout(cct, 15) << "got " << r << " from flush writeback" << dendl; } else { // FIXME: this can starve while (in->cap_refs[CEPH_CAP_FILE_BUFFER] > 0) { ldout(cct, 10) << "ino " << in->ino << " has " << in->cap_refs[CEPH_CAP_FILE_BUFFER] << " uncommitted, waiting" << dendl; wait_on_list(in->waitfor_commit); } } if (!r) { if (flush_tid > 0) wait_sync_caps(in, flush_tid); ldout(cct, 10) << "ino " << in->ino << " has no uncommitted writes" << dendl; } else { ldout(cct, 8) << "ino " << in->ino << " failed to commit to disk! " << cpp_strerror(-r) << dendl; } lat = ceph_clock_now(); lat -= start; logger->tinc(l_c_fsync, lat); return r; } int Client::_fsync(Fh *f, bool syncdataonly) { ldout(cct, 8) << "_fsync(" << f << ", " << (syncdataonly ? "dataonly)":"data+metadata)") << dendl; return _fsync(f->inode.get(), syncdataonly); } int Client::fstat(int fd, struct stat *stbuf, const UserPerm& perms, int mask) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << "fstat mask " << hex << mask << dec << std::endl; tout(cct) << fd << std::endl; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; int r = _getattr(f->inode, mask, perms); if (r < 0) return r; fill_stat(f->inode, stbuf, NULL); ldout(cct, 5) << "fstat(" << fd << ", " << stbuf << ") = " << r << dendl; return r; } int Client::fstatx(int fd, struct ceph_statx *stx, const UserPerm& perms, unsigned int want, unsigned int flags) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << "fstatx flags " << hex << flags << " want " << want << dec << std::endl; tout(cct) << fd << std::endl; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; unsigned mask = statx_to_mask(flags, want); int r = 0; if (mask) { r = _getattr(f->inode, mask, perms); if (r < 0) { ldout(cct, 3) << "fstatx exit on error!" << dendl; return r; } } fill_statx(f->inode, mask, stx); ldout(cct, 3) << "fstatx(" << fd << ", " << stx << ") = " << r << dendl; return r; } int Client::statxat(int dirfd, const char *relpath, struct ceph_statx *stx, const UserPerm& perms, unsigned int want, unsigned int flags) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) { return -CEPHFS_ENOTCONN; } tout(cct) << __func__ << " flags " << hex << flags << " want " << want << dec << std::endl; tout(cct) << dirfd << std::endl; tout(cct) << relpath << std::endl; unsigned mask = statx_to_mask(flags, want); InodeRef dirinode; std::scoped_lock lock(client_lock); int r = get_fd_inode(dirfd, &dirinode); if (r < 0) { return r; } InodeRef in; filepath path(relpath); r = path_walk(path, &in, perms, !(flags & AT_SYMLINK_NOFOLLOW), mask, dirinode); if (r < 0) { return r; } r = _getattr(in, mask, perms); if (r < 0) { ldout(cct, 3) << __func__ << " exit on error!" << dendl; return r; } fill_statx(in, mask, stx); ldout(cct, 3) << __func__ << " dirfd" << dirfd << ", r= " << r << dendl; return r; } // not written yet, but i want to link! int Client::chdir(const char *relpath, std::string &new_cwd, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << "chdir" << std::endl; tout(cct) << relpath << std::endl; filepath path(relpath); InodeRef in; std::scoped_lock lock(client_lock); int r = path_walk(path, &in, perms); if (r < 0) return r; if (!(in.get()->is_dir())) return -CEPHFS_ENOTDIR; if (cwd != in) cwd.swap(in); ldout(cct, 3) << "chdir(" << relpath << ") cwd now " << cwd->ino << dendl; _getcwd(new_cwd, perms); return 0; } void Client::_getcwd(string& dir, const UserPerm& perms) { filepath path; ldout(cct, 10) << __func__ << " " << *cwd << dendl; Inode *in = cwd.get(); while (in != root.get()) { ceph_assert(in->dentries.size() < 2); // dirs can't be hard-linked // A cwd or ancester is unlinked if (in->dentries.empty()) { return; } Dentry *dn = in->get_first_parent(); if (!dn) { // look it up ldout(cct, 10) << __func__ << " looking up parent for " << *in << dendl; MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LOOKUPNAME); filepath path(in->ino); req->set_filepath(path); req->set_inode(in); int res = make_request(req, perms); if (res < 0) break; // start over path = filepath(); in = cwd.get(); continue; } path.push_front_dentry(dn->name); in = dn->dir->parent_inode; } dir = "/"; dir += path.get_path(); } void Client::getcwd(string& dir, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return; std::scoped_lock l(client_lock); _getcwd(dir, perms); } int Client::statfs(const char *path, struct statvfs *stbuf, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << std::endl; unsigned long int total_files_on_fs; ceph_statfs stats; C_SaferCond cond; std::unique_lock lock(client_lock); const vector<int64_t> &data_pools = mdsmap->get_data_pools(); if (data_pools.size() == 1) { objecter->get_fs_stats(stats, data_pools[0], &cond); } else { objecter->get_fs_stats(stats, std::optional<int64_t>(), &cond); } lock.unlock(); int rval = cond.wait(); lock.lock(); ceph_assert(root); total_files_on_fs = root->rstat.rfiles + root->rstat.rsubdirs; if (rval < 0) { ldout(cct, 1) << "underlying call to statfs returned error: " << cpp_strerror(rval) << dendl; return rval; } memset(stbuf, 0, sizeof(*stbuf)); /* * we're going to set a block size of 4MB so we can represent larger * FSes without overflowing. Additionally convert the space * measurements from KB to bytes while making them in terms of * blocks. We use 4MB only because it is big enough, and because it * actually *is* the (ceph) default block size. */ stbuf->f_frsize = CEPH_4M_BLOCK_SIZE; stbuf->f_files = total_files_on_fs; stbuf->f_ffree = -1; stbuf->f_favail = -1; stbuf->f_fsid = -1; // ?? stbuf->f_flag = 0; // ?? stbuf->f_namemax = NAME_MAX; // Usually quota_root will == root_ancestor, but if the mount root has no // quota but we can see a parent of it that does have a quota, we'll // respect that one instead. ceph_assert(root != nullptr); InodeRef quota_root = root->quota.is_enabled(QUOTA_MAX_BYTES) ? root : get_quota_root(root.get(), perms, QUOTA_MAX_BYTES); // get_quota_root should always give us something if client quotas are // enabled ceph_assert(cct->_conf.get_val<bool>("client_quota") == false || quota_root != nullptr); /* If bytes quota is set on a directory and conf option "client quota df" * is also set, available space = quota limit - used space. Else, * available space = total space - used space. */ if (quota_root && cct->_conf->client_quota_df && quota_root->quota.max_bytes) { // Skip the getattr if any sessions are stale, as we don't want to // block `df` if this client has e.g. been evicted, or if the MDS cluster // is unhealthy. if (!_any_stale_sessions()) { int r = _getattr(quota_root, 0, perms, true); if (r != 0) { // Ignore return value: error getting latest inode metadata is not a good // reason to break "df". lderr(cct) << "Error in getattr on quota root 0x" << std::hex << quota_root->ino << std::dec << " statfs result may be outdated" << dendl; } } // Special case: if there is a size quota set on the Inode acting // as the root for this client mount, then report the quota status // as the filesystem statistics. fsblkcnt_t total = quota_root->quota.max_bytes >> CEPH_4M_BLOCK_SHIFT; const fsblkcnt_t used = quota_root->rstat.rbytes >> CEPH_4M_BLOCK_SHIFT; // It is possible for a quota to be exceeded: arithmetic here must // handle case where used > total. fsblkcnt_t free = total > used ? total - used : 0; // For quota size less than 4KB, report the total=used=4KB,free=0 // when quota is full and total=free=4KB, used=0 otherwise. if (!total) { total = 1; free = quota_root->quota.max_bytes > quota_root->rstat.rbytes ? 1 : 0; stbuf->f_frsize = CEPH_4K_BLOCK_SIZE; } stbuf->f_blocks = total; stbuf->f_bfree = free; stbuf->f_bavail = free; } else { // General case: report the cluster statistics returned from RADOS. Because // multiple pools may be used without one filesystem namespace via // layouts, this is the most correct thing we can do. stbuf->f_blocks = stats.kb >> CEPH_4K_BLOCK_SHIFT; stbuf->f_bfree = stats.kb_avail >> CEPH_4K_BLOCK_SHIFT; stbuf->f_bavail = stats.kb_avail >> CEPH_4K_BLOCK_SHIFT; } stbuf->f_bsize = stbuf->f_frsize; return rval; } int Client::_do_filelock(Inode *in, Fh *fh, int lock_type, int op, int sleep, struct flock *fl, uint64_t owner, bool removing) { ldout(cct, 10) << __func__ << " ino " << in->ino << (lock_type == CEPH_LOCK_FCNTL ? " fcntl" : " flock") << " type " << fl->l_type << " owner " << owner << " " << fl->l_start << "~" << fl->l_len << dendl; if (in->flags & I_ERROR_FILELOCK) return -CEPHFS_EIO; int lock_cmd; if (F_RDLCK == fl->l_type) lock_cmd = CEPH_LOCK_SHARED; else if (F_WRLCK == fl->l_type) lock_cmd = CEPH_LOCK_EXCL; else if (F_UNLCK == fl->l_type) lock_cmd = CEPH_LOCK_UNLOCK; else return -CEPHFS_EIO; if (op != CEPH_MDS_OP_SETFILELOCK || lock_cmd == CEPH_LOCK_UNLOCK) sleep = 0; /* * Set the most significant bit, so that MDS knows the 'owner' * is sufficient to identify the owner of lock. (old code uses * both 'owner' and 'pid') */ owner |= (1ULL << 63); MetaRequest *req = new MetaRequest(op); filepath path; in->make_nosnap_relative_path(path); req->set_filepath(path); req->set_inode(in); req->head.args.filelock_change.rule = lock_type; req->head.args.filelock_change.type = lock_cmd; req->head.args.filelock_change.owner = owner; req->head.args.filelock_change.pid = fl->l_pid; req->head.args.filelock_change.start = fl->l_start; req->head.args.filelock_change.length = fl->l_len; req->head.args.filelock_change.wait = sleep; int ret; bufferlist bl; if (sleep && switch_interrupt_cb) { // enable interrupt switch_interrupt_cb(callback_handle, req->get()); ret = make_request(req, fh->actor_perms, NULL, NULL, -1, &bl); // disable interrupt switch_interrupt_cb(callback_handle, NULL); if (ret == 0 && req->aborted()) { // effect of this lock request has been revoked by the 'lock intr' request ret = req->get_abort_code(); } put_request(req); } else { ret = make_request(req, fh->actor_perms, NULL, NULL, -1, &bl); } if (ret == 0) { if (op == CEPH_MDS_OP_GETFILELOCK) { ceph_filelock filelock; auto p = bl.cbegin(); decode(filelock, p); if (CEPH_LOCK_SHARED == filelock.type) fl->l_type = F_RDLCK; else if (CEPH_LOCK_EXCL == filelock.type) fl->l_type = F_WRLCK; else fl->l_type = F_UNLCK; fl->l_whence = SEEK_SET; fl->l_start = filelock.start; fl->l_len = filelock.length; fl->l_pid = filelock.pid; } else if (op == CEPH_MDS_OP_SETFILELOCK) { ceph_lock_state_t *lock_state; if (lock_type == CEPH_LOCK_FCNTL) { if (!in->fcntl_locks) in->fcntl_locks.reset(new ceph_lock_state_t(cct, CEPH_LOCK_FCNTL)); lock_state = in->fcntl_locks.get(); } else if (lock_type == CEPH_LOCK_FLOCK) { if (!in->flock_locks) in->flock_locks.reset(new ceph_lock_state_t(cct, CEPH_LOCK_FLOCK)); lock_state = in->flock_locks.get(); } else { ceph_abort(); return -CEPHFS_EINVAL; } _update_lock_state(fl, owner, lock_state); if (!removing) { if (lock_type == CEPH_LOCK_FCNTL) { if (!fh->fcntl_locks) fh->fcntl_locks.reset(new ceph_lock_state_t(cct, CEPH_LOCK_FCNTL)); lock_state = fh->fcntl_locks.get(); } else { if (!fh->flock_locks) fh->flock_locks.reset(new ceph_lock_state_t(cct, CEPH_LOCK_FLOCK)); lock_state = fh->flock_locks.get(); } _update_lock_state(fl, owner, lock_state); } } else ceph_abort(); } return ret; } int Client::_interrupt_filelock(MetaRequest *req) { // Set abort code, but do not kick. The abort code prevents the request // from being re-sent. req->abort(-CEPHFS_EINTR); if (req->mds < 0) return 0; // haven't sent the request Inode *in = req->inode(); int lock_type; if (req->head.args.filelock_change.rule == CEPH_LOCK_FLOCK) lock_type = CEPH_LOCK_FLOCK_INTR; else if (req->head.args.filelock_change.rule == CEPH_LOCK_FCNTL) lock_type = CEPH_LOCK_FCNTL_INTR; else { ceph_abort(); return -CEPHFS_EINVAL; } MetaRequest *intr_req = new MetaRequest(CEPH_MDS_OP_SETFILELOCK); filepath path; in->make_nosnap_relative_path(path); intr_req->set_filepath(path); intr_req->set_inode(in); intr_req->head.args.filelock_change = req->head.args.filelock_change; intr_req->head.args.filelock_change.rule = lock_type; intr_req->head.args.filelock_change.type = CEPH_LOCK_UNLOCK; UserPerm perms(req->get_uid(), req->get_gid()); return make_request(intr_req, perms, NULL, NULL, -1); } void Client::_encode_filelocks(Inode *in, bufferlist& bl) { if (!in->fcntl_locks && !in->flock_locks) return; unsigned nr_fcntl_locks = in->fcntl_locks ? in->fcntl_locks->held_locks.size() : 0; encode(nr_fcntl_locks, bl); if (nr_fcntl_locks) { auto &lock_state = in->fcntl_locks; for(auto p = lock_state->held_locks.begin(); p != lock_state->held_locks.end(); ++p) encode(p->second, bl); } unsigned nr_flock_locks = in->flock_locks ? in->flock_locks->held_locks.size() : 0; encode(nr_flock_locks, bl); if (nr_flock_locks) { auto &lock_state = in->flock_locks; for(auto p = lock_state->held_locks.begin(); p != lock_state->held_locks.end(); ++p) encode(p->second, bl); } ldout(cct, 10) << __func__ << " ino " << in->ino << ", " << nr_fcntl_locks << " fcntl locks, " << nr_flock_locks << " flock locks" << dendl; } void Client::_release_filelocks(Fh *fh) { if (!fh->fcntl_locks && !fh->flock_locks) return; Inode *in = fh->inode.get(); ldout(cct, 10) << __func__ << " " << fh << " ino " << in->ino << dendl; list<ceph_filelock> activated_locks; list<pair<int, ceph_filelock> > to_release; if (fh->fcntl_locks) { auto &lock_state = fh->fcntl_locks; for(auto p = lock_state->held_locks.begin(); p != lock_state->held_locks.end(); ) { auto q = p++; if (in->flags & I_ERROR_FILELOCK) { lock_state->remove_lock(q->second, activated_locks); } else { to_release.push_back(pair<int, ceph_filelock>(CEPH_LOCK_FCNTL, q->second)); } } lock_state.reset(); } if (fh->flock_locks) { auto &lock_state = fh->flock_locks; for(auto p = lock_state->held_locks.begin(); p != lock_state->held_locks.end(); ) { auto q = p++; if (in->flags & I_ERROR_FILELOCK) { lock_state->remove_lock(q->second, activated_locks); } else { to_release.push_back(pair<int, ceph_filelock>(CEPH_LOCK_FLOCK, q->second)); } } lock_state.reset(); } if ((in->flags & I_ERROR_FILELOCK) && !in->has_any_filelocks()) in->flags &= ~I_ERROR_FILELOCK; if (to_release.empty()) return; struct flock fl; memset(&fl, 0, sizeof(fl)); fl.l_whence = SEEK_SET; fl.l_type = F_UNLCK; for (list<pair<int, ceph_filelock> >::iterator p = to_release.begin(); p != to_release.end(); ++p) { fl.l_start = p->second.start; fl.l_len = p->second.length; fl.l_pid = p->second.pid; _do_filelock(in, fh, p->first, CEPH_MDS_OP_SETFILELOCK, 0, &fl, p->second.owner, true); } } void Client::_update_lock_state(struct flock *fl, uint64_t owner, ceph_lock_state_t *lock_state) { int lock_cmd; if (F_RDLCK == fl->l_type) lock_cmd = CEPH_LOCK_SHARED; else if (F_WRLCK == fl->l_type) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK;; ceph_filelock filelock; filelock.start = fl->l_start; filelock.length = fl->l_len; filelock.client = 0; // see comment in _do_filelock() filelock.owner = owner | (1ULL << 63); filelock.pid = fl->l_pid; filelock.type = lock_cmd; if (filelock.type == CEPH_LOCK_UNLOCK) { list<ceph_filelock> activated_locks; lock_state->remove_lock(filelock, activated_locks); } else { bool r = lock_state->add_lock(filelock, false, false, NULL); ceph_assert(r); } } int Client::_getlk(Fh *fh, struct flock *fl, uint64_t owner) { Inode *in = fh->inode.get(); ldout(cct, 10) << "_getlk " << fh << " ino " << in->ino << dendl; int ret = _do_filelock(in, fh, CEPH_LOCK_FCNTL, CEPH_MDS_OP_GETFILELOCK, 0, fl, owner); return ret; } int Client::_setlk(Fh *fh, struct flock *fl, uint64_t owner, int sleep) { Inode *in = fh->inode.get(); ldout(cct, 10) << "_setlk " << fh << " ino " << in->ino << dendl; int ret = _do_filelock(in, fh, CEPH_LOCK_FCNTL, CEPH_MDS_OP_SETFILELOCK, sleep, fl, owner); ldout(cct, 10) << "_setlk " << fh << " ino " << in->ino << " result=" << ret << dendl; return ret; } int Client::_flock(Fh *fh, int cmd, uint64_t owner) { Inode *in = fh->inode.get(); ldout(cct, 10) << "_flock " << fh << " ino " << in->ino << dendl; int sleep = !(cmd & LOCK_NB); cmd &= ~LOCK_NB; int type; switch (cmd) { case LOCK_SH: type = F_RDLCK; break; case LOCK_EX: type = F_WRLCK; break; case LOCK_UN: type = F_UNLCK; break; default: return -CEPHFS_EINVAL; } struct flock fl; memset(&fl, 0, sizeof(fl)); fl.l_type = type; fl.l_whence = SEEK_SET; int ret = _do_filelock(in, fh, CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, sleep, &fl, owner); ldout(cct, 10) << "_flock " << fh << " ino " << in->ino << " result=" << ret << dendl; return ret; } int Client::get_snap_info(const char *path, const UserPerm &perms, SnapInfo *snap_info) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) { return -CEPHFS_ENOTCONN; } std::scoped_lock lock(client_lock); InodeRef in; int r = Client::path_walk(path, &in, perms, true); if (r < 0) { return r; } if (in->snapid == CEPH_NOSNAP) { return -CEPHFS_EINVAL; } snap_info->id = in->snapid; snap_info->metadata = in->snap_metadata; return 0; } int Client::ll_statfs(Inode *in, struct statvfs *stbuf, const UserPerm& perms) { /* Since the only thing this does is wrap a call to statfs, and statfs takes a lock, it doesn't seem we have a need to split it out. */ return statfs(0, stbuf, perms); } void Client::_ll_register_callbacks(struct ceph_client_callback_args *args) { if (!args) return; ldout(cct, 10) << __func__ << " cb " << args->handle << " invalidate_ino_cb " << args->ino_cb << " invalidate_dentry_cb " << args->dentry_cb << " switch_interrupt_cb " << args->switch_intr_cb << " remount_cb " << args->remount_cb << dendl; callback_handle = args->handle; if (args->ino_cb) { ino_invalidate_cb = args->ino_cb; async_ino_invalidator.start(); } if (args->dentry_cb) { dentry_invalidate_cb = args->dentry_cb; async_dentry_invalidator.start(); } if (args->switch_intr_cb) { switch_interrupt_cb = args->switch_intr_cb; interrupt_finisher.start(); } if (args->remount_cb) { remount_cb = args->remount_cb; remount_finisher.start(); } if (args->ino_release_cb) { ino_release_cb = args->ino_release_cb; async_ino_releasor.start(); } if (args->umask_cb) umask_cb = args->umask_cb; } // This is deprecated, use ll_register_callbacks2() instead. void Client::ll_register_callbacks(struct ceph_client_callback_args *args) { ceph_assert(!is_mounting() && !is_mounted() && !is_unmounting()); _ll_register_callbacks(args); } int Client::ll_register_callbacks2(struct ceph_client_callback_args *args) { if (is_mounting() || is_mounted() || is_unmounting()) return -CEPHFS_EBUSY; _ll_register_callbacks(args); return 0; } std::pair<int, bool> Client::test_dentry_handling(bool can_invalidate) { std::pair <int, bool> r(0, false); RWRef_t iref_reader(initialize_state, CLIENT_INITIALIZED); if (!iref_reader.is_state_satisfied()) return std::make_pair(-CEPHFS_ENOTCONN, false); can_invalidate_dentries = can_invalidate; /* * Force to use the old and slow method to invalidate the dcache * if the euid is non-root, or the remount may fail with return * code 1 or 32. */ uid_t euid = geteuid(); ldout(cct, 10) << "euid: " << euid << dendl; if (euid != 0) { can_invalidate_dentries = true; } if (can_invalidate_dentries) { ceph_assert(dentry_invalidate_cb); ldout(cct, 1) << "using dentry_invalidate_cb" << dendl; } else { ceph_assert(remount_cb); ldout(cct, 1) << "using remount_cb" << dendl; r = _do_remount(false); } return r; } int Client::_sync_fs() { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); ldout(cct, 10) << __func__ << dendl; // flush file data std::unique_ptr<C_SaferCond> cond = nullptr; if (cct->_conf->client_oc) { cond.reset(new C_SaferCond("Client::_sync_fs:lock")); objectcacher->flush_all(cond.get()); } // flush caps flush_caps_sync(); ceph_tid_t flush_tid = last_flush_tid; // flush the mdlog before waiting for unsafe requests. flush_mdlog_sync(); // wait for unsafe mds requests wait_unsafe_requests(); wait_sync_caps(flush_tid); if (nullptr != cond) { client_lock.unlock(); ldout(cct, 15) << __func__ << " waiting on data to flush" << dendl; cond->wait(); ldout(cct, 15) << __func__ << " flush finished" << dendl; client_lock.lock(); } return 0; } int Client::sync_fs() { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock l(client_lock); return _sync_fs(); } int64_t Client::drop_caches() { std::scoped_lock l(client_lock); return objectcacher->release_all(); } int Client::_lazyio(Fh *fh, int enable) { Inode *in = fh->inode.get(); ldout(cct, 20) << __func__ << " " << *in << " " << !!enable << dendl; if (!!(fh->mode & CEPH_FILE_MODE_LAZY) == !!enable) return 0; int orig_mode = fh->mode; if (enable) { fh->mode |= CEPH_FILE_MODE_LAZY; in->get_open_ref(fh->mode); in->put_open_ref(orig_mode); check_caps(in, CHECK_CAPS_NODELAY); } else { fh->mode &= ~CEPH_FILE_MODE_LAZY; in->get_open_ref(fh->mode); in->put_open_ref(orig_mode); check_caps(in, 0); } return 0; } int Client::lazyio(int fd, int enable) { std::scoped_lock l(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; return _lazyio(f, enable); } int Client::ll_lazyio(Fh *fh, int enable) { ldout(cct, 3) << __func__ << " " << fh << " " << fh->inode->ino << " " << !!enable << dendl; tout(cct) << __func__ << std::endl; std::scoped_lock lock(client_lock); return _lazyio(fh, enable); } int Client::lazyio_propagate(int fd, loff_t offset, size_t count) { std::scoped_lock l(client_lock); ldout(cct, 3) << "op: client->lazyio_propagate(" << fd << ", " << offset << ", " << count << ")" << dendl; Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; // for now _fsync(f, true); return 0; } int Client::lazyio_synchronize(int fd, loff_t offset, size_t count) { std::scoped_lock l(client_lock); ldout(cct, 3) << "op: client->lazyio_synchronize(" << fd << ", " << offset << ", " << count << ")" << dendl; Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; Inode *in = f->inode.get(); _fsync(f, true); if (_release(in)) { int r =_getattr(in, CEPH_STAT_CAP_SIZE, f->actor_perms); if (r < 0) return r; } return 0; } // ============================= // snaps int Client::mksnap(const char *relpath, const char *name, const UserPerm& perm, mode_t mode, const std::map<std::string, std::string> &metadata) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock l(client_lock); filepath path(relpath); InodeRef in; int r = path_walk(path, &in, perm); if (r < 0) return r; if (cct->_conf->client_permissions) { r = may_create(in.get(), perm); if (r < 0) return r; } Inode *snapdir = open_snapdir(in.get()); return _mkdir(snapdir, name, mode, perm, nullptr, metadata); } int Client::rmsnap(const char *relpath, const char *name, const UserPerm& perms, bool check_perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock l(client_lock); filepath path(relpath); InodeRef in; int r = path_walk(path, &in, perms); if (r < 0) return r; Inode *snapdir = open_snapdir(in.get()); if (cct->_conf->client_permissions) { r = may_delete(snapdir, check_perms ? name : NULL, perms); if (r < 0) return r; } return _rmdir(snapdir, name, perms); } // ============================= // expose caps int Client::get_caps_issued(int fd) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; return f->inode->caps_issued(); } int Client::get_caps_issued(const char *path, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); filepath p(path); InodeRef in; int r = path_walk(p, &in, perms, true); if (r < 0) return r; return in->caps_issued(); } // ========================================= // low level void Client::refresh_snapdir_attrs(Inode *in, Inode *diri) { ldout(cct, 10) << __func__ << ": snapdir inode=" << *in << ", inode=" << *diri << dendl; in->ino = diri->ino; in->snapid = CEPH_SNAPDIR; in->mode = diri->mode; in->uid = diri->uid; in->gid = diri->gid; in->nlink = 1; in->mtime = diri->snaprealm->last_modified; in->ctime = in->mtime; in->change_attr = diri->snaprealm->change_attr; in->btime = diri->btime; in->atime = diri->atime; in->size = diri->size; in->dirfragtree.clear(); in->snapdir_parent = diri; // copy posix acls to snapshotted inode in->xattrs.clear(); for (auto &[xattr_key, xattr_value] : diri->xattrs) { if (xattr_key.rfind("system.", 0) == 0) { in->xattrs[xattr_key] = xattr_value; } } } Inode *Client::open_snapdir(Inode *diri) { Inode *in; vinodeno_t vino(diri->ino, CEPH_SNAPDIR); if (!inode_map.count(vino)) { in = new Inode(this, vino, &diri->layout); refresh_snapdir_attrs(in, diri); diri->flags |= I_SNAPDIR_OPEN; inode_map[vino] = in; if (use_faked_inos()) _assign_faked_ino(in); ldout(cct, 10) << "open_snapdir created snapshot inode " << *in << dendl; } else { in = inode_map[vino]; ldout(cct, 10) << "open_snapdir had snapshot inode " << *in << dendl; } return in; } int Client::ll_lookup(Inode *parent, const char *name, struct stat *attr, Inode **out, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vparent = _get_vino(parent); ldout(cct, 3) << __func__ << " " << vparent << " " << name << dendl; tout(cct) << __func__ << std::endl; tout(cct) << name << std::endl; std::scoped_lock lock(client_lock); int r = 0; if (!fuse_default_permissions) { if (strcmp(name, ".") && strcmp(name, "..")) { r = may_lookup(parent, perms); if (r < 0) return r; } } string dname(name); InodeRef in; r = _lookup(parent, dname, CEPH_STAT_CAP_INODE_ALL, &in, perms); if (r < 0) { attr->st_ino = 0; goto out; } ceph_assert(in); fill_stat(in, attr); _ll_get(in.get()); out: ldout(cct, 3) << __func__ << " " << vparent << " " << name << " -> " << r << " (" << hex << attr->st_ino << dec << ")" << dendl; tout(cct) << attr->st_ino << std::endl; *out = in.get(); return r; } int Client::ll_lookup_vino( vinodeno_t vino, const UserPerm& perms, Inode **inode) { ceph_assert(inode != NULL); RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; if (is_reserved_vino(vino)) return -CEPHFS_ESTALE; std::scoped_lock lock(client_lock); ldout(cct, 3) << __func__ << " " << vino << dendl; // Check the cache first unordered_map<vinodeno_t,Inode*>::iterator p = inode_map.find(vino); if (p != inode_map.end()) { *inode = p->second; _ll_get(*inode); return 0; } uint64_t snapid = vino.snapid; // for snapdir, find the non-snapped dir inode if (snapid == CEPH_SNAPDIR) vino.snapid = CEPH_NOSNAP; int r = _lookup_vino(vino, perms, inode); if (r) return r; ceph_assert(*inode != NULL); if (snapid == CEPH_SNAPDIR) { Inode *tmp = *inode; // open the snapdir and put the inode ref *inode = open_snapdir(tmp); _ll_forget(tmp, 1); _ll_get(*inode); } return 0; } int Client::ll_lookup_inode( struct inodeno_t ino, const UserPerm& perms, Inode **inode) { vinodeno_t vino(ino, CEPH_NOSNAP); return ll_lookup_vino(vino, perms, inode); } int Client::ll_lookupx(Inode *parent, const char *name, Inode **out, struct ceph_statx *stx, unsigned want, unsigned flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vparent = _get_vino(parent); ldout(cct, 3) << __func__ << " " << vparent << " " << name << dendl; tout(cct) << "ll_lookupx" << std::endl; tout(cct) << name << std::endl; std::scoped_lock lock(client_lock); int r = 0; if (!fuse_default_permissions) { r = may_lookup(parent, perms); if (r < 0) return r; } string dname(name); InodeRef in; unsigned mask = statx_to_mask(flags, want); r = _lookup(parent, dname, mask, &in, perms); if (r < 0) { stx->stx_ino = 0; stx->stx_mask = 0; } else { ceph_assert(in); fill_statx(in, mask, stx); _ll_get(in.get()); } ldout(cct, 3) << __func__ << " " << vparent << " " << name << " -> " << r << " (" << hex << stx->stx_ino << dec << ")" << dendl; tout(cct) << stx->stx_ino << std::endl; *out = in.get(); return r; } int Client::ll_walk(const char* name, Inode **out, struct ceph_statx *stx, unsigned int want, unsigned int flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; filepath fp(name, 0); InodeRef in; int rc; unsigned mask = statx_to_mask(flags, want); ldout(cct, 3) << __func__ << " " << name << dendl; tout(cct) << __func__ << std::endl; tout(cct) << name << std::endl; std::scoped_lock lock(client_lock); rc = path_walk(fp, &in, perms, !(flags & AT_SYMLINK_NOFOLLOW), mask); if (rc < 0) { /* zero out mask, just in case... */ stx->stx_mask = 0; stx->stx_ino = 0; *out = NULL; return rc; } else { ceph_assert(in); fill_statx(in, mask, stx); _ll_get(in.get()); *out = in.get(); return 0; } } void Client::_ll_get(Inode *in) { if (in->ll_ref == 0) { in->iget(); if (in->is_dir() && !in->dentries.empty()) { ceph_assert(in->dentries.size() == 1); // dirs can't be hard-linked in->get_first_parent()->get(); // pin dentry } if (in->snapid != CEPH_NOSNAP) ll_snap_ref[in->snapid]++; } in->ll_get(); ldout(cct, 20) << __func__ << " " << in << " " << in->ino << " -> " << in->ll_ref << dendl; } int Client::_ll_put(Inode *in, uint64_t num) { in->ll_put(num); ldout(cct, 20) << __func__ << " " << in << " " << in->ino << " " << num << " -> " << in->ll_ref << dendl; if (in->ll_ref == 0) { if (in->is_dir() && !in->dentries.empty()) { ceph_assert(in->dentries.size() == 1); // dirs can't be hard-linked in->get_first_parent()->put(); // unpin dentry } if (in->snapid != CEPH_NOSNAP) { auto p = ll_snap_ref.find(in->snapid); ceph_assert(p != ll_snap_ref.end()); ceph_assert(p->second > 0); if (--p->second == 0) ll_snap_ref.erase(p); } put_inode(in); return 0; } else { return in->ll_ref; } } void Client::_ll_drop_pins() { ldout(cct, 10) << __func__ << dendl; std::set<InodeRef> to_be_put; //this set will be deconstructed item by item when exit ceph::unordered_map<vinodeno_t, Inode*>::iterator next; for (ceph::unordered_map<vinodeno_t, Inode*>::iterator it = inode_map.begin(); it != inode_map.end(); it = next) { Inode *in = it->second; next = it; ++next; if (in->ll_ref){ to_be_put.insert(in); _ll_put(in, in->ll_ref); } } } bool Client::_ll_forget(Inode *in, uint64_t count) { inodeno_t ino = in->ino; ldout(cct, 8) << __func__ << " " << ino << " " << count << dendl; tout(cct) << __func__ << std::endl; tout(cct) << ino.val << std::endl; tout(cct) << count << std::endl; // Ignore forget if we're no longer mounted RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return true; if (ino == 1) return true; // ignore forget on root. bool last = false; if (in->ll_ref < count) { ldout(cct, 1) << "WARNING: ll_forget on " << ino << " " << count << ", which only has ll_ref=" << in->ll_ref << dendl; _ll_put(in, in->ll_ref); last = true; } else { if (_ll_put(in, count) == 0) last = true; } return last; } bool Client::ll_forget(Inode *in, uint64_t count) { std::scoped_lock lock(client_lock); return _ll_forget(in, count); } bool Client::ll_put(Inode *in) { /* ll_forget already takes the lock */ return ll_forget(in, 1); } int Client::ll_get_snap_ref(snapid_t snap) { std::scoped_lock lock(client_lock); auto p = ll_snap_ref.find(snap); if (p != ll_snap_ref.end()) return p->second; return 0; } snapid_t Client::ll_get_snapid(Inode *in) { std::scoped_lock lock(client_lock); return in->snapid; } Inode *Client::ll_get_inode(ino_t ino) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return NULL; std::scoped_lock lock(client_lock); vinodeno_t vino = _map_faked_ino(ino); unordered_map<vinodeno_t,Inode*>::iterator p = inode_map.find(vino); if (p == inode_map.end()) return NULL; Inode *in = p->second; _ll_get(in); return in; } Inode *Client::ll_get_inode(vinodeno_t vino) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return NULL; if (is_reserved_vino(vino)) return NULL; std::scoped_lock lock(client_lock); unordered_map<vinodeno_t,Inode*>::iterator p = inode_map.find(vino); if (p == inode_map.end()) return NULL; Inode *in = p->second; _ll_get(in); return in; } int Client::_ll_getattr(Inode *in, int caps, const UserPerm& perms) { vinodeno_t vino = _get_vino(in); ldout(cct, 8) << __func__ << " " << vino << dendl; tout(cct) << __func__ << std::endl; tout(cct) << vino.ino.val << std::endl; if (vino.snapid < CEPH_NOSNAP) return 0; else return _getattr(in, caps, perms); } int Client::ll_getattr(Inode *in, struct stat *attr, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); int res = _ll_getattr(in, CEPH_STAT_CAP_INODE_ALL, perms); if (res == 0) fill_stat(in, attr); ldout(cct, 3) << __func__ << " " << _get_vino(in) << " = " << res << dendl; return res; } int Client::ll_getattrx(Inode *in, struct ceph_statx *stx, unsigned int want, unsigned int flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); int res = 0; unsigned mask = statx_to_mask(flags, want); if (mask && !in->caps_issued_mask(mask, true)) res = _ll_getattr(in, mask, perms); if (res == 0) fill_statx(in, mask, stx); ldout(cct, 3) << __func__ << " " << _get_vino(in) << " = " << res << dendl; return res; } int Client::_ll_setattrx(Inode *in, struct ceph_statx *stx, int mask, const UserPerm& perms, InodeRef *inp) { vinodeno_t vino = _get_vino(in); ldout(cct, 8) << __func__ << " " << vino << " mask " << hex << mask << dec << dendl; tout(cct) << __func__ << std::endl; tout(cct) << vino.ino.val << std::endl; tout(cct) << stx->stx_mode << std::endl; tout(cct) << stx->stx_uid << std::endl; tout(cct) << stx->stx_gid << std::endl; tout(cct) << stx->stx_size << std::endl; tout(cct) << stx->stx_mtime << std::endl; tout(cct) << stx->stx_atime << std::endl; tout(cct) << stx->stx_btime << std::endl; tout(cct) << mask << std::endl; if (!fuse_default_permissions) { int res = may_setattr(in, stx, mask, perms); if (res < 0) return res; } mask &= ~(CEPH_SETATTR_MTIME_NOW | CEPH_SETATTR_ATIME_NOW); return __setattrx(in, stx, mask, perms, inp); } int Client::ll_setattrx(Inode *in, struct ceph_statx *stx, int mask, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); InodeRef target(in); int res = _ll_setattrx(in, stx, mask, perms, &target); if (res == 0) { ceph_assert(in == target.get()); fill_statx(in, in->caps_issued(), stx); } ldout(cct, 3) << __func__ << " " << _get_vino(in) << " = " << res << dendl; return res; } int Client::ll_setattr(Inode *in, struct stat *attr, int mask, const UserPerm& perms) { struct ceph_statx stx; stat_to_statx(attr, &stx); RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); InodeRef target(in); int res = _ll_setattrx(in, &stx, mask, perms, &target); if (res == 0) { ceph_assert(in == target.get()); fill_stat(in, attr); } ldout(cct, 3) << __func__ << " " << _get_vino(in) << " = " << res << dendl; return res; } // ---------- // xattrs int Client::getxattr(const char *path, const char *name, void *value, size_t size, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); InodeRef in; int r = Client::path_walk(path, &in, perms, true, CEPH_STAT_CAP_XATTR); if (r < 0) return r; return _getxattr(in, name, value, size, perms); } int Client::lgetxattr(const char *path, const char *name, void *value, size_t size, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); InodeRef in; int r = Client::path_walk(path, &in, perms, false, CEPH_STAT_CAP_XATTR); if (r < 0) return r; return _getxattr(in, name, value, size, perms); } int Client::fgetxattr(int fd, const char *name, void *value, size_t size, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; return _getxattr(f->inode, name, value, size, perms); } int Client::listxattr(const char *path, char *list, size_t size, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); InodeRef in; int r = Client::path_walk(path, &in, perms, true, CEPH_STAT_CAP_XATTR); if (r < 0) return r; return Client::_listxattr(in.get(), list, size, perms); } int Client::llistxattr(const char *path, char *list, size_t size, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); InodeRef in; int r = Client::path_walk(path, &in, perms, false, CEPH_STAT_CAP_XATTR); if (r < 0) return r; return Client::_listxattr(in.get(), list, size, perms); } int Client::flistxattr(int fd, char *list, size_t size, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; return Client::_listxattr(f->inode.get(), list, size, perms); } int Client::removexattr(const char *path, const char *name, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); InodeRef in; int r = Client::path_walk(path, &in, perms, true); if (r < 0) return r; return _removexattr(in, name, perms); } int Client::lremovexattr(const char *path, const char *name, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); InodeRef in; int r = Client::path_walk(path, &in, perms, false); if (r < 0) return r; return _removexattr(in, name, perms); } int Client::fremovexattr(int fd, const char *name, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; return _removexattr(f->inode, name, perms); } int Client::setxattr(const char *path, const char *name, const void *value, size_t size, int flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; _setxattr_maybe_wait_for_osdmap(name, value, size); std::scoped_lock lock(client_lock); InodeRef in; int r = Client::path_walk(path, &in, perms, true); if (r < 0) return r; return _setxattr(in, name, value, size, flags, perms); } int Client::lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; _setxattr_maybe_wait_for_osdmap(name, value, size); std::scoped_lock lock(client_lock); InodeRef in; int r = Client::path_walk(path, &in, perms, false); if (r < 0) return r; return _setxattr(in, name, value, size, flags, perms); } int Client::fsetxattr(int fd, const char *name, const void *value, size_t size, int flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; _setxattr_maybe_wait_for_osdmap(name, value, size); std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; return _setxattr(f->inode, name, value, size, flags, perms); } int Client::_getxattr(Inode *in, const char *name, void *value, size_t size, const UserPerm& perms) { int r; const VXattr *vxattr = nullptr; vxattr = _match_vxattr(in, name); if (vxattr) { r = -CEPHFS_ENODATA; // Do a force getattr to get the latest quota before returning // a value to userspace. int flags = 0; if (vxattr->flags & VXATTR_RSTAT) { flags |= CEPH_STAT_RSTAT; } if (vxattr->flags & VXATTR_DIRSTAT) { flags |= CEPH_CAP_FILE_SHARED; } r = _getattr(in, flags | CEPH_STAT_CAP_XATTR, perms, true); if (r != 0) { // Error from getattr! return r; } // call pointer-to-member function char buf[256]; if (!(vxattr->exists_cb && !(this->*(vxattr->exists_cb))(in))) { r = (this->*(vxattr->getxattr_cb))(in, buf, sizeof(buf)); } else { r = -CEPHFS_ENODATA; } if (size != 0) { if (r > (int)size) { r = -CEPHFS_ERANGE; } else if (r > 0) { memcpy(value, buf, r); } } goto out; } if (!strncmp(name, "ceph.", 5)) { r = _getvxattr(in, perms, name, size, value, MDS_RANK_NONE); goto out; } if (acl_type == NO_ACL && !strncmp(name, "system.", 7)) { r = -CEPHFS_EOPNOTSUPP; goto out; } r = _getattr(in, CEPH_STAT_CAP_XATTR, perms, in->xattr_version == 0); if (r == 0) { string n(name); r = -CEPHFS_ENODATA; if (in->xattrs.count(n)) { r = in->xattrs[n].length(); if (r > 0 && size != 0) { if (size >= (unsigned)r) memcpy(value, in->xattrs[n].c_str(), r); else r = -CEPHFS_ERANGE; } } } out: ldout(cct, 8) << "_getxattr(" << in->ino << ", \"" << name << "\", " << size << ") = " << r << dendl; return r; } int Client::_getxattr(InodeRef &in, const char *name, void *value, size_t size, const UserPerm& perms) { if (cct->_conf->client_permissions) { int r = xattr_permission(in.get(), name, MAY_READ, perms); if (r < 0) return r; } return _getxattr(in.get(), name, value, size, perms); } int Client::ll_getxattr(Inode *in, const char *name, void *value, size_t size, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vino = _get_vino(in); ldout(cct, 3) << __func__ << " " << vino << " " << name << " size " << size << dendl; tout(cct) << __func__ << std::endl; tout(cct) << vino.ino.val << std::endl; tout(cct) << name << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = xattr_permission(in, name, MAY_READ, perms); if (r < 0) return r; } return _getxattr(in, name, value, size, perms); } int Client::_listxattr(Inode *in, char *name, size_t size, const UserPerm& perms) { bool len_only = (size == 0); int r = _getattr(in, CEPH_STAT_CAP_XATTR, perms, in->xattr_version == 0); if (r != 0) { goto out; } r = 0; for ([[maybe_unused]] const auto &[xattr_name, xattr_value_bl] : in->xattrs) { if (xattr_name.rfind("ceph.", 0) == 0) { continue; } size_t this_len = xattr_name.length() + 1; r += this_len; if (len_only) continue; if (this_len > size) { r = -CEPHFS_ERANGE; goto out; } memcpy(name, xattr_name.c_str(), this_len); name += this_len; size -= this_len; } out: ldout(cct, 8) << __func__ << "(" << in->ino << ", " << size << ") = " << r << dendl; return r; } int Client::ll_listxattr(Inode *in, char *names, size_t size, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vino = _get_vino(in); ldout(cct, 3) << __func__ << " " << vino << " size " << size << dendl; tout(cct) << __func__ << std::endl; tout(cct) << vino.ino.val << std::endl; tout(cct) << size << std::endl; std::scoped_lock lock(client_lock); return _listxattr(in, names, size, perms); } int Client::_do_setxattr(Inode *in, const char *name, const void *value, size_t size, int flags, const UserPerm& perms) { int xattr_flags = 0; if (!value) xattr_flags |= CEPH_XATTR_REMOVE; if (flags & XATTR_CREATE) xattr_flags |= CEPH_XATTR_CREATE; if (flags & XATTR_REPLACE) xattr_flags |= CEPH_XATTR_REPLACE; MetaRequest *req = new MetaRequest(CEPH_MDS_OP_SETXATTR); filepath path; in->make_nosnap_relative_path(path); req->set_filepath(path); req->set_string2(name); req->set_inode(in); req->head.args.setxattr.flags = xattr_flags; bufferlist bl; ceph_assert(value || size == 0); bl.append((const char*)value, size); req->set_data(bl); int res = make_request(req, perms); trim_cache(); ldout(cct, 3) << __func__ << "(" << in->ino << ", \"" << name << "\") = " << res << dendl; return res; } int Client::_setxattr(Inode *in, const char *name, const void *value, size_t size, int flags, const UserPerm& perms) { if (in->snapid != CEPH_NOSNAP) { return -CEPHFS_EROFS; } if (size == 0) { value = ""; } else if (value == NULL) { return -CEPHFS_EINVAL; } bool posix_acl_xattr = false; if (acl_type == POSIX_ACL) posix_acl_xattr = !strncmp(name, "system.", 7); if (strncmp(name, "user.", 5) && strncmp(name, "security.", 9) && strncmp(name, "trusted.", 8) && strncmp(name, "ceph.", 5) && !posix_acl_xattr) return -CEPHFS_EOPNOTSUPP; bool check_realm = false; if (posix_acl_xattr) { if (!strcmp(name, ACL_EA_ACCESS)) { mode_t new_mode = in->mode; if (value) { int ret = posix_acl_equiv_mode(value, size, &new_mode); if (ret < 0) return ret; if (ret == 0) { value = NULL; size = 0; } if (new_mode != in->mode) { struct ceph_statx stx; stx.stx_mode = new_mode; ret = _do_setattr(in, &stx, CEPH_SETATTR_MODE, perms, nullptr); if (ret < 0) return ret; } } } else if (!strcmp(name, ACL_EA_DEFAULT)) { if (value) { if (!S_ISDIR(in->mode)) return -CEPHFS_EACCES; int ret = posix_acl_check(value, size); if (ret < 0) return -CEPHFS_EINVAL; if (ret == 0) { value = NULL; size = 0; } } } else { return -CEPHFS_EOPNOTSUPP; } } else { const VXattr *vxattr = _match_vxattr(in, name); if (vxattr) { if (vxattr->readonly) return -CEPHFS_EOPNOTSUPP; if (vxattr->setxattr_cb) return (this->*(vxattr->setxattr_cb))(in, value, size, perms); if (vxattr->name.compare(0, 10, "ceph.quota") == 0 && value) check_realm = true; } } int ret = _do_setxattr(in, name, value, size, flags, perms); if (ret >= 0 && check_realm) { // check if snaprealm was created for quota inode if (in->quota.is_enabled() && !(in->snaprealm && in->snaprealm->ino == in->ino)) ret = -CEPHFS_EOPNOTSUPP; } return ret; } int Client::_setxattr(InodeRef &in, const char *name, const void *value, size_t size, int flags, const UserPerm& perms) { if (cct->_conf->client_permissions) { int r = xattr_permission(in.get(), name, MAY_WRITE, perms); if (r < 0) return r; } return _setxattr(in.get(), name, value, size, flags, perms); } int Client::_setxattr_check_data_pool(string& name, string& value, const OSDMap *osdmap) { string tmp; if (name == "layout") { string::iterator begin = value.begin(); string::iterator end = value.end(); keys_and_values<string::iterator> p; // create instance of parser std::map<string, string> m; // map to receive results if (!qi::parse(begin, end, p, m)) { // returns true if successful return -CEPHFS_EINVAL; } if (begin != end) return -CEPHFS_EINVAL; for (map<string,string>::iterator q = m.begin(); q != m.end(); ++q) { if (q->first == "pool") { tmp = q->second; break; } } } else if (name == "layout.pool") { tmp = value; } if (tmp.length()) { int64_t pool; try { pool = boost::lexical_cast<unsigned>(tmp); if (!osdmap->have_pg_pool(pool)) return -CEPHFS_ENOENT; } catch (boost::bad_lexical_cast const&) { pool = osdmap->lookup_pg_pool_name(tmp); if (pool < 0) { return -CEPHFS_ENOENT; } } } return 0; } void Client::_setxattr_maybe_wait_for_osdmap(const char *name, const void *value, size_t size) { // For setting pool of layout, MetaRequest need osdmap epoch. // There is a race which create a new data pool but client and mds both don't have. // Make client got the latest osdmap which make mds quickly judge whether get newer osdmap. ldout(cct, 15) << __func__ << ": name = " << name << dendl; if (strcmp(name, "ceph.file.layout.pool") == 0 || strcmp(name, "ceph.dir.layout.pool") == 0 || strcmp(name, "ceph.file.layout") == 0 || strcmp(name, "ceph.dir.layout") == 0) { string rest(strstr(name, "layout")); string v((const char*)value, size); int r = objecter->with_osdmap([&](const OSDMap& o) { return _setxattr_check_data_pool(rest, v, &o); }); if (r == -CEPHFS_ENOENT) { bs::error_code ec; ldout(cct, 20) << __func__ << ": waiting for latest osdmap" << dendl; objecter->wait_for_latest_osdmap(ca::use_blocked[ec]); ldout(cct, 20) << __func__ << ": got latest osdmap: " << ec << dendl; } } } int Client::ll_setxattr(Inode *in, const char *name, const void *value, size_t size, int flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; _setxattr_maybe_wait_for_osdmap(name, value, size); vinodeno_t vino = _get_vino(in); ldout(cct, 3) << __func__ << " " << vino << " " << name << " size " << size << dendl; tout(cct) << __func__ << std::endl; tout(cct) << vino.ino.val << std::endl; tout(cct) << name << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = xattr_permission(in, name, MAY_WRITE, perms); if (r < 0) return r; } return _setxattr(in, name, value, size, flags, perms); } int Client::_removexattr(Inode *in, const char *name, const UserPerm& perms) { if (in->snapid != CEPH_NOSNAP) { return -CEPHFS_EROFS; } // same xattrs supported by kernel client if (strncmp(name, "user.", 5) && strncmp(name, "system.", 7) && strncmp(name, "security.", 9) && strncmp(name, "trusted.", 8) && strncmp(name, "ceph.", 5)) return -CEPHFS_EOPNOTSUPP; const VXattr *vxattr = _match_vxattr(in, name); if (vxattr && vxattr->readonly) return -CEPHFS_EOPNOTSUPP; MetaRequest *req = new MetaRequest(CEPH_MDS_OP_RMXATTR); filepath path; in->make_nosnap_relative_path(path); req->set_filepath(path); req->set_filepath2(name); req->set_inode(in); int res = make_request(req, perms); trim_cache(); ldout(cct, 8) << "_removexattr(" << in->ino << ", \"" << name << "\") = " << res << dendl; return res; } int Client::_removexattr(InodeRef &in, const char *name, const UserPerm& perms) { if (cct->_conf->client_permissions) { int r = xattr_permission(in.get(), name, MAY_WRITE, perms); if (r < 0) return r; } return _removexattr(in.get(), name, perms); } int Client::ll_removexattr(Inode *in, const char *name, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vino = _get_vino(in); ldout(cct, 3) << "ll_removexattr " << vino << " " << name << dendl; tout(cct) << "ll_removexattr" << std::endl; tout(cct) << vino.ino.val << std::endl; tout(cct) << name << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = xattr_permission(in, name, MAY_WRITE, perms); if (r < 0) return r; } return _removexattr(in, name, perms); } bool Client::_vxattrcb_fscrypt_auth_exists(Inode *in) { bool exists = !in->fscrypt_auth.empty(); ldout(cct, 10) << "fscrypt_auth exists " << exists << dendl; return exists; } size_t Client::_vxattrcb_fscrypt_auth(Inode *in, char *val, size_t size) { size_t count = in->fscrypt_auth.size(); if (count <= size) memcpy(val, in->fscrypt_auth.data(), count); return count; } int Client::_vxattrcb_fscrypt_auth_set(Inode *in, const void *val, size_t size, const UserPerm& perms) { struct ceph_statx stx = { 0 }; std::vector<uint8_t> aux; aux.resize(size); memcpy(aux.data(), val, size); return _do_setattr(in, &stx, CEPH_SETATTR_FSCRYPT_AUTH, perms, nullptr, &aux); } bool Client::_vxattrcb_fscrypt_file_exists(Inode *in) { return !in->fscrypt_file.empty(); } size_t Client::_vxattrcb_fscrypt_file(Inode *in, char *val, size_t size) { size_t count = in->fscrypt_file.size(); if (count <= size) memcpy(val, in->fscrypt_file.data(), count); return count; } int Client::_vxattrcb_fscrypt_file_set(Inode *in, const void *val, size_t size, const UserPerm& perms) { struct ceph_statx stx = { 0 }; std::vector<uint8_t> aux; aux.resize(size); memcpy(aux.data(), val, size); return _do_setattr(in, &stx, CEPH_SETATTR_FSCRYPT_FILE, perms, nullptr, &aux); } bool Client::_vxattrcb_quota_exists(Inode *in) { return in->quota.is_enabled() && (in->snapid != CEPH_NOSNAP || (in->snaprealm && in->snaprealm->ino == in->ino)); } size_t Client::_vxattrcb_quota(Inode *in, char *val, size_t size) { return snprintf(val, size, "max_bytes=%lld max_files=%lld", (long long int)in->quota.max_bytes, (long long int)in->quota.max_files); } size_t Client::_vxattrcb_quota_max_bytes(Inode *in, char *val, size_t size) { return snprintf(val, size, "%lld", (long long int)in->quota.max_bytes); } size_t Client::_vxattrcb_quota_max_files(Inode *in, char *val, size_t size) { return snprintf(val, size, "%lld", (long long int)in->quota.max_files); } bool Client::_vxattrcb_layout_exists(Inode *in) { return in->layout != file_layout_t(); } size_t Client::_vxattrcb_layout(Inode *in, char *val, size_t size) { int r = snprintf(val, size, "stripe_unit=%llu stripe_count=%llu object_size=%llu pool=", (unsigned long long)in->layout.stripe_unit, (unsigned long long)in->layout.stripe_count, (unsigned long long)in->layout.object_size); objecter->with_osdmap([&](const OSDMap& o) { if (o.have_pg_pool(in->layout.pool_id)) r += snprintf(val + r, size - r, "%s", o.get_pool_name(in->layout.pool_id).c_str()); else r += snprintf(val + r, size - r, "%" PRIu64, (uint64_t)in->layout.pool_id); }); if (in->layout.pool_ns.length()) r += snprintf(val + r, size - r, " pool_namespace=%s", in->layout.pool_ns.c_str()); return r; } size_t Client::_vxattrcb_layout_stripe_unit(Inode *in, char *val, size_t size) { return snprintf(val, size, "%llu", (unsigned long long)in->layout.stripe_unit); } size_t Client::_vxattrcb_layout_stripe_count(Inode *in, char *val, size_t size) { return snprintf(val, size, "%llu", (unsigned long long)in->layout.stripe_count); } size_t Client::_vxattrcb_layout_object_size(Inode *in, char *val, size_t size) { return snprintf(val, size, "%llu", (unsigned long long)in->layout.object_size); } size_t Client::_vxattrcb_layout_pool(Inode *in, char *val, size_t size) { size_t r; objecter->with_osdmap([&](const OSDMap& o) { if (o.have_pg_pool(in->layout.pool_id)) r = snprintf(val, size, "%s", o.get_pool_name( in->layout.pool_id).c_str()); else r = snprintf(val, size, "%" PRIu64, (uint64_t)in->layout.pool_id); }); return r; } size_t Client::_vxattrcb_layout_pool_namespace(Inode *in, char *val, size_t size) { return snprintf(val, size, "%s", in->layout.pool_ns.c_str()); } size_t Client::_vxattrcb_dir_entries(Inode *in, char *val, size_t size) { return snprintf(val, size, "%llu", (unsigned long long)(in->dirstat.nfiles + in->dirstat.nsubdirs)); } size_t Client::_vxattrcb_dir_files(Inode *in, char *val, size_t size) { return snprintf(val, size, "%llu", (unsigned long long)in->dirstat.nfiles); } size_t Client::_vxattrcb_dir_subdirs(Inode *in, char *val, size_t size) { return snprintf(val, size, "%llu", (unsigned long long)in->dirstat.nsubdirs); } size_t Client::_vxattrcb_dir_rentries(Inode *in, char *val, size_t size) { return snprintf(val, size, "%llu", (unsigned long long)(in->rstat.rfiles + in->rstat.rsubdirs)); } size_t Client::_vxattrcb_dir_rfiles(Inode *in, char *val, size_t size) { return snprintf(val, size, "%llu", (unsigned long long)in->rstat.rfiles); } size_t Client::_vxattrcb_dir_rsubdirs(Inode *in, char *val, size_t size) { return snprintf(val, size, "%llu", (unsigned long long)in->rstat.rsubdirs); } size_t Client::_vxattrcb_dir_rsnaps(Inode *in, char *val, size_t size) { return snprintf(val, size, "%llu", (unsigned long long)in->rstat.rsnaps); } size_t Client::_vxattrcb_dir_rbytes(Inode *in, char *val, size_t size) { return snprintf(val, size, "%llu", (unsigned long long)in->rstat.rbytes); } size_t Client::_vxattrcb_dir_rctime(Inode *in, char *val, size_t size) { return snprintf(val, size, "%ld.%09ld", (long)in->rstat.rctime.sec(), (long)in->rstat.rctime.nsec()); } bool Client::_vxattrcb_dir_pin_exists(Inode *in) { return in->dir_pin != -CEPHFS_ENODATA; } size_t Client::_vxattrcb_dir_pin(Inode *in, char *val, size_t size) { return snprintf(val, size, "%ld", (long)in->dir_pin); } bool Client::_vxattrcb_snap_btime_exists(Inode *in) { return !in->snap_btime.is_zero(); } size_t Client::_vxattrcb_snap_btime(Inode *in, char *val, size_t size) { return snprintf(val, size, "%llu.%09lu", (long long unsigned)in->snap_btime.sec(), (long unsigned)in->snap_btime.nsec()); } size_t Client::_vxattrcb_caps(Inode *in, char *val, size_t size) { int issued; in->caps_issued(&issued); return snprintf(val, size, "%s/0x%x", ccap_string(issued).c_str(), issued); } bool Client::_vxattrcb_mirror_info_exists(Inode *in) { // checking one of the xattrs would suffice return in->xattrs.count("ceph.mirror.info.cluster_id") != 0; } size_t Client::_vxattrcb_mirror_info(Inode *in, char *val, size_t size) { return snprintf(val, size, "cluster_id=%.*s fs_id=%.*s", in->xattrs["ceph.mirror.info.cluster_id"].length(), in->xattrs["ceph.mirror.info.cluster_id"].c_str(), in->xattrs["ceph.mirror.info.fs_id"].length(), in->xattrs["ceph.mirror.info.fs_id"].c_str()); } size_t Client::_vxattrcb_cluster_fsid(Inode *in, char *val, size_t size) { return snprintf(val, size, "%s", monclient->get_fsid().to_string().c_str()); } size_t Client::_vxattrcb_client_id(Inode *in, char *val, size_t size) { auto name = messenger->get_myname(); return snprintf(val, size, "%s%" PRId64, name.type_str(), name.num()); } #define CEPH_XATTR_NAME(_type, _name) "ceph." #_type "." #_name #define CEPH_XATTR_NAME2(_type, _name, _name2) "ceph." #_type "." #_name "." #_name2 #define XATTR_NAME_CEPH(_type, _name, _flags) \ { \ name: CEPH_XATTR_NAME(_type, _name), \ getxattr_cb: &Client::_vxattrcb_ ## _type ## _ ## _name, \ readonly: true, \ exists_cb: NULL, \ flags: _flags, \ } #define XATTR_LAYOUT_FIELD(_type, _name, _field) \ { \ name: CEPH_XATTR_NAME2(_type, _name, _field), \ getxattr_cb: &Client::_vxattrcb_ ## _name ## _ ## _field, \ readonly: false, \ exists_cb: &Client::_vxattrcb_layout_exists, \ flags: 0, \ } #define XATTR_QUOTA_FIELD(_type, _name) \ { \ name: CEPH_XATTR_NAME(_type, _name), \ getxattr_cb: &Client::_vxattrcb_ ## _type ## _ ## _name, \ readonly: false, \ exists_cb: &Client::_vxattrcb_quota_exists, \ flags: 0, \ } const Client::VXattr Client::_dir_vxattrs[] = { { name: "ceph.dir.layout", getxattr_cb: &Client::_vxattrcb_layout, readonly: false, exists_cb: &Client::_vxattrcb_layout_exists, flags: 0, }, // FIXME // Delete the following dir layout field definitions for release "S" XATTR_LAYOUT_FIELD(dir, layout, stripe_unit), XATTR_LAYOUT_FIELD(dir, layout, stripe_count), XATTR_LAYOUT_FIELD(dir, layout, object_size), XATTR_LAYOUT_FIELD(dir, layout, pool), XATTR_LAYOUT_FIELD(dir, layout, pool_namespace), XATTR_NAME_CEPH(dir, entries, VXATTR_DIRSTAT), XATTR_NAME_CEPH(dir, files, VXATTR_DIRSTAT), XATTR_NAME_CEPH(dir, subdirs, VXATTR_DIRSTAT), XATTR_NAME_CEPH(dir, rentries, VXATTR_RSTAT), XATTR_NAME_CEPH(dir, rfiles, VXATTR_RSTAT), XATTR_NAME_CEPH(dir, rsubdirs, VXATTR_RSTAT), XATTR_NAME_CEPH(dir, rsnaps, VXATTR_RSTAT), XATTR_NAME_CEPH(dir, rbytes, VXATTR_RSTAT), XATTR_NAME_CEPH(dir, rctime, VXATTR_RSTAT), { name: "ceph.quota", getxattr_cb: &Client::_vxattrcb_quota, readonly: false, exists_cb: &Client::_vxattrcb_quota_exists, flags: 0, }, XATTR_QUOTA_FIELD(quota, max_bytes), XATTR_QUOTA_FIELD(quota, max_files), // FIXME // Delete the following dir pin field definitions for release "S" { name: "ceph.dir.pin", getxattr_cb: &Client::_vxattrcb_dir_pin, readonly: false, exists_cb: &Client::_vxattrcb_dir_pin_exists, flags: 0, }, { name: "ceph.snap.btime", getxattr_cb: &Client::_vxattrcb_snap_btime, readonly: true, exists_cb: &Client::_vxattrcb_snap_btime_exists, flags: 0, }, { name: "ceph.mirror.info", getxattr_cb: &Client::_vxattrcb_mirror_info, readonly: false, exists_cb: &Client::_vxattrcb_mirror_info_exists, flags: 0, }, { name: "ceph.caps", getxattr_cb: &Client::_vxattrcb_caps, readonly: true, exists_cb: NULL, flags: 0, }, { name: "" } /* Required table terminator */ }; const Client::VXattr Client::_file_vxattrs[] = { { name: "ceph.file.layout", getxattr_cb: &Client::_vxattrcb_layout, readonly: false, exists_cb: &Client::_vxattrcb_layout_exists, flags: 0, }, XATTR_LAYOUT_FIELD(file, layout, stripe_unit), XATTR_LAYOUT_FIELD(file, layout, stripe_count), XATTR_LAYOUT_FIELD(file, layout, object_size), XATTR_LAYOUT_FIELD(file, layout, pool), XATTR_LAYOUT_FIELD(file, layout, pool_namespace), { name: "ceph.snap.btime", getxattr_cb: &Client::_vxattrcb_snap_btime, readonly: true, exists_cb: &Client::_vxattrcb_snap_btime_exists, flags: 0, }, { name: "ceph.caps", getxattr_cb: &Client::_vxattrcb_caps, readonly: true, exists_cb: NULL, flags: 0, }, { name: "" } /* Required table terminator */ }; const Client::VXattr Client::_common_vxattrs[] = { { name: "ceph.cluster_fsid", getxattr_cb: &Client::_vxattrcb_cluster_fsid, readonly: true, exists_cb: nullptr, flags: 0, }, { name: "ceph.client_id", getxattr_cb: &Client::_vxattrcb_client_id, readonly: true, exists_cb: nullptr, flags: 0, }, { name: "ceph.fscrypt.auth", getxattr_cb: &Client::_vxattrcb_fscrypt_auth, setxattr_cb: &Client::_vxattrcb_fscrypt_auth_set, readonly: false, exists_cb: &Client::_vxattrcb_fscrypt_auth_exists, flags: 0, }, { name: "ceph.fscrypt.file", getxattr_cb: &Client::_vxattrcb_fscrypt_file, setxattr_cb: &Client::_vxattrcb_fscrypt_file_set, readonly: false, exists_cb: &Client::_vxattrcb_fscrypt_file_exists, flags: 0, }, { name: "" } /* Required table terminator */ }; const Client::VXattr *Client::_get_vxattrs(Inode *in) { if (in->is_dir()) return _dir_vxattrs; else if (in->is_file()) return _file_vxattrs; return NULL; } const Client::VXattr *Client::_match_vxattr(Inode *in, const char *name) { if (strncmp(name, "ceph.", 5) == 0) { const VXattr *vxattr = _get_vxattrs(in); if (vxattr) { while (!vxattr->name.empty()) { if (vxattr->name == name) return vxattr; vxattr++; } } // for common vxattrs vxattr = _common_vxattrs; while (!vxattr->name.empty()) { if (vxattr->name == name) return vxattr; vxattr++; } } return NULL; } int Client::ll_readlink(Inode *in, char *buf, size_t buflen, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vino = _get_vino(in); ldout(cct, 3) << "ll_readlink " << vino << dendl; tout(cct) << "ll_readlink" << std::endl; tout(cct) << vino.ino.val << std::endl; std::scoped_lock lock(client_lock); for (auto dn : in->dentries) { touch_dn(dn); } int r = _readlink(in, buf, buflen); // FIXME: no permission checking! ldout(cct, 3) << "ll_readlink " << vino << " = " << r << dendl; return r; } int Client::_mknod(Inode *dir, const char *name, mode_t mode, dev_t rdev, const UserPerm& perms, InodeRef *inp) { ldout(cct, 8) << "_mknod(" << dir->ino << " " << name << ", 0" << oct << mode << dec << ", " << rdev << ", uid " << perms.uid() << ", gid " << perms.gid() << ")" << dendl; if (strlen(name) > NAME_MAX) return -CEPHFS_ENAMETOOLONG; if (dir->snapid != CEPH_NOSNAP) { return -CEPHFS_EROFS; } if (is_quota_files_exceeded(dir, perms)) { return -CEPHFS_EDQUOT; } MetaRequest *req = new MetaRequest(CEPH_MDS_OP_MKNOD); filepath path; dir->make_nosnap_relative_path(path); path.push_dentry(name); req->set_filepath(path); req->set_inode(dir); req->head.args.mknod.rdev = rdev; req->dentry_drop = CEPH_CAP_FILE_SHARED; req->dentry_unless = CEPH_CAP_FILE_EXCL; bufferlist xattrs_bl; int res = _posix_acl_create(dir, &mode, xattrs_bl, perms); if (res < 0) { put_request(req); return res; } req->head.args.mknod.mode = mode; if (xattrs_bl.length() > 0) req->set_data(xattrs_bl); Dentry *de = get_or_create(dir, name); req->set_dentry(de); res = make_request(req, perms, inp); trim_cache(); ldout(cct, 8) << "mknod(" << path << ", 0" << oct << mode << dec << ") = " << res << dendl; return res; } int Client::ll_mknod(Inode *parent, const char *name, mode_t mode, dev_t rdev, struct stat *attr, Inode **out, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vparent = _get_vino(parent); ldout(cct, 3) << "ll_mknod " << vparent << " " << name << dendl; tout(cct) << "ll_mknod" << std::endl; tout(cct) << vparent.ino.val << std::endl; tout(cct) << name << std::endl; tout(cct) << mode << std::endl; tout(cct) << rdev << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = may_create(parent, perms); if (r < 0) return r; } InodeRef in; int r = _mknod(parent, name, mode, rdev, perms, &in); if (r == 0) { fill_stat(in, attr); _ll_get(in.get()); } tout(cct) << attr->st_ino << std::endl; ldout(cct, 3) << "ll_mknod " << vparent << " " << name << " = " << r << " (" << hex << attr->st_ino << dec << ")" << dendl; *out = in.get(); return r; } int Client::ll_mknodx(Inode *parent, const char *name, mode_t mode, dev_t rdev, Inode **out, struct ceph_statx *stx, unsigned want, unsigned flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; unsigned caps = statx_to_mask(flags, want); vinodeno_t vparent = _get_vino(parent); ldout(cct, 3) << "ll_mknodx " << vparent << " " << name << dendl; tout(cct) << "ll_mknodx" << std::endl; tout(cct) << vparent.ino.val << std::endl; tout(cct) << name << std::endl; tout(cct) << mode << std::endl; tout(cct) << rdev << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = may_create(parent, perms); if (r < 0) return r; } InodeRef in; int r = _mknod(parent, name, mode, rdev, perms, &in); if (r == 0) { fill_statx(in, caps, stx); _ll_get(in.get()); } tout(cct) << stx->stx_ino << std::endl; ldout(cct, 3) << "ll_mknodx " << vparent << " " << name << " = " << r << " (" << hex << stx->stx_ino << dec << ")" << dendl; *out = in.get(); return r; } int Client::_create(Inode *dir, const char *name, int flags, mode_t mode, InodeRef *inp, Fh **fhp, int stripe_unit, int stripe_count, int object_size, const char *data_pool, bool *created, const UserPerm& perms, std::string alternate_name) { ldout(cct, 8) << "_create(" << dir->ino << " " << name << ", 0" << oct << mode << dec << ")" << dendl; if (strlen(name) > NAME_MAX) return -CEPHFS_ENAMETOOLONG; if (dir->snapid != CEPH_NOSNAP) { return -CEPHFS_EROFS; } if (is_quota_files_exceeded(dir, perms)) { return -CEPHFS_EDQUOT; } // use normalized flags to generate cmode int cflags = ceph_flags_sys2wire(flags); if (cct->_conf.get_val<bool>("client_force_lazyio")) cflags |= CEPH_O_LAZY; int cmode = ceph_flags_to_mode(cflags); int64_t pool_id = -1; if (data_pool && *data_pool) { pool_id = objecter->with_osdmap( std::mem_fn(&OSDMap::lookup_pg_pool_name), data_pool); if (pool_id < 0) return -CEPHFS_EINVAL; if (pool_id > 0xffffffffll) return -CEPHFS_ERANGE; // bummer! } MetaRequest *req = new MetaRequest(CEPH_MDS_OP_CREATE); filepath path; dir->make_nosnap_relative_path(path); path.push_dentry(name); req->set_filepath(path); req->set_alternate_name(std::move(alternate_name)); req->set_inode(dir); req->head.args.open.flags = cflags | CEPH_O_CREAT; req->head.args.open.stripe_unit = stripe_unit; req->head.args.open.stripe_count = stripe_count; req->head.args.open.object_size = object_size; if (cct->_conf->client_debug_getattr_caps) req->head.args.open.mask = DEBUG_GETATTR_CAPS; else req->head.args.open.mask = 0; req->head.args.open.pool = pool_id; req->dentry_drop = CEPH_CAP_FILE_SHARED; req->dentry_unless = CEPH_CAP_FILE_EXCL; mode |= S_IFREG; bufferlist xattrs_bl; int res = _posix_acl_create(dir, &mode, xattrs_bl, perms); if (res < 0) { put_request(req); return res; } req->head.args.open.mode = mode; if (xattrs_bl.length() > 0) req->set_data(xattrs_bl); Dentry *de = get_or_create(dir, name); req->set_dentry(de); res = make_request(req, perms, inp, created); if (res < 0) { goto reply_error; } /* If the caller passed a value in fhp, do the open */ if(fhp) { (*inp)->get_open_ref(cmode); *fhp = _create_fh(inp->get(), flags, cmode, perms); } reply_error: trim_cache(); ldout(cct, 8) << "create(" << path << ", 0" << oct << mode << dec << " layout " << stripe_unit << ' ' << stripe_count << ' ' << object_size <<") = " << res << dendl; return res; } int Client::_mkdir(Inode *dir, const char *name, mode_t mode, const UserPerm& perm, InodeRef *inp, const std::map<std::string, std::string> &metadata, std::string alternate_name) { ldout(cct, 8) << "_mkdir(" << dir->ino << " " << name << ", 0" << oct << mode << dec << ", uid " << perm.uid() << ", gid " << perm.gid() << ")" << dendl; if (strlen(name) > NAME_MAX) return -CEPHFS_ENAMETOOLONG; if (dir->snapid != CEPH_NOSNAP && dir->snapid != CEPH_SNAPDIR) { return -CEPHFS_EROFS; } if (is_quota_files_exceeded(dir, perm)) { return -CEPHFS_EDQUOT; } bool is_snap_op = dir->snapid == CEPH_SNAPDIR; MetaRequest *req = new MetaRequest(is_snap_op ? CEPH_MDS_OP_MKSNAP : CEPH_MDS_OP_MKDIR); filepath path; dir->make_nosnap_relative_path(path); path.push_dentry(name); req->set_filepath(path); req->set_inode(dir); req->dentry_drop = CEPH_CAP_FILE_SHARED; req->dentry_unless = CEPH_CAP_FILE_EXCL; req->set_alternate_name(std::move(alternate_name)); mode |= S_IFDIR; bufferlist bl; int res = _posix_acl_create(dir, &mode, bl, perm); if (res < 0) { put_request(req); return res; } req->head.args.mkdir.mode = mode; if (is_snap_op) { SnapPayload payload; // clear the bufferlist that may have been populated by the call // to _posix_acl_create(). MDS mksnap does not make use of it. // So, reuse it to pass metadata payload. bl.clear(); payload.metadata = metadata; encode(payload, bl); } if (bl.length() > 0) { req->set_data(bl); } Dentry *de = get_or_create(dir, name); req->set_dentry(de); ldout(cct, 10) << "_mkdir: making request" << dendl; res = make_request(req, perm, inp); ldout(cct, 10) << "_mkdir result is " << res << dendl; trim_cache(); ldout(cct, 8) << "_mkdir(" << path << ", 0" << oct << mode << dec << ") = " << res << dendl; return res; } int Client::ll_mkdir(Inode *parent, const char *name, mode_t mode, struct stat *attr, Inode **out, const UserPerm& perm) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vparent = _get_vino(parent); ldout(cct, 3) << "ll_mkdir " << vparent << " " << name << dendl; tout(cct) << "ll_mkdir" << std::endl; tout(cct) << vparent.ino.val << std::endl; tout(cct) << name << std::endl; tout(cct) << mode << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = may_create(parent, perm); if (r < 0) return r; } InodeRef in; int r = _mkdir(parent, name, mode, perm, &in); if (r == 0) { fill_stat(in, attr); _ll_get(in.get()); } tout(cct) << attr->st_ino << std::endl; ldout(cct, 3) << "ll_mkdir " << vparent << " " << name << " = " << r << " (" << hex << attr->st_ino << dec << ")" << dendl; *out = in.get(); return r; } int Client::ll_mkdirx(Inode *parent, const char *name, mode_t mode, Inode **out, struct ceph_statx *stx, unsigned want, unsigned flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vparent = _get_vino(parent); ldout(cct, 3) << "ll_mkdirx " << vparent << " " << name << dendl; tout(cct) << "ll_mkdirx" << std::endl; tout(cct) << vparent.ino.val << std::endl; tout(cct) << name << std::endl; tout(cct) << mode << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = may_create(parent, perms); if (r < 0) return r; } InodeRef in; int r = _mkdir(parent, name, mode, perms, &in); if (r == 0) { fill_statx(in, statx_to_mask(flags, want), stx); _ll_get(in.get()); } else { stx->stx_ino = 0; stx->stx_mask = 0; } tout(cct) << stx->stx_ino << std::endl; ldout(cct, 3) << "ll_mkdirx " << vparent << " " << name << " = " << r << " (" << hex << stx->stx_ino << dec << ")" << dendl; *out = in.get(); return r; } int Client::_symlink(Inode *dir, const char *name, const char *target, const UserPerm& perms, std::string alternate_name, InodeRef *inp) { ldout(cct, 8) << "_symlink(" << dir->ino << " " << name << ", " << target << ", uid " << perms.uid() << ", gid " << perms.gid() << ")" << dendl; if (strlen(name) > NAME_MAX) return -CEPHFS_ENAMETOOLONG; if (dir->snapid != CEPH_NOSNAP) { return -CEPHFS_EROFS; } if (is_quota_files_exceeded(dir, perms)) { return -CEPHFS_EDQUOT; } MetaRequest *req = new MetaRequest(CEPH_MDS_OP_SYMLINK); filepath path; dir->make_nosnap_relative_path(path); path.push_dentry(name); req->set_filepath(path); req->set_alternate_name(std::move(alternate_name)); req->set_inode(dir); req->set_string2(target); req->dentry_drop = CEPH_CAP_FILE_SHARED; req->dentry_unless = CEPH_CAP_FILE_EXCL; Dentry *de = get_or_create(dir, name); req->set_dentry(de); int res = make_request(req, perms, inp); trim_cache(); ldout(cct, 8) << "_symlink(\"" << path << "\", \"" << target << "\") = " << res << dendl; return res; } int Client::ll_symlink(Inode *parent, const char *name, const char *value, struct stat *attr, Inode **out, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vparent = _get_vino(parent); ldout(cct, 3) << "ll_symlink " << vparent << " " << name << " -> " << value << dendl; tout(cct) << "ll_symlink" << std::endl; tout(cct) << vparent.ino.val << std::endl; tout(cct) << name << std::endl; tout(cct) << value << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = may_create(parent, perms); if (r < 0) return r; } InodeRef in; int r = _symlink(parent, name, value, perms, "", &in); if (r == 0) { fill_stat(in, attr); _ll_get(in.get()); } tout(cct) << attr->st_ino << std::endl; ldout(cct, 3) << "ll_symlink " << vparent << " " << name << " = " << r << " (" << hex << attr->st_ino << dec << ")" << dendl; *out = in.get(); return r; } int Client::ll_symlinkx(Inode *parent, const char *name, const char *value, Inode **out, struct ceph_statx *stx, unsigned want, unsigned flags, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vparent = _get_vino(parent); ldout(cct, 3) << "ll_symlinkx " << vparent << " " << name << " -> " << value << dendl; tout(cct) << "ll_symlinkx" << std::endl; tout(cct) << vparent.ino.val << std::endl; tout(cct) << name << std::endl; tout(cct) << value << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = may_create(parent, perms); if (r < 0) return r; } InodeRef in; int r = _symlink(parent, name, value, perms, "", &in); if (r == 0) { fill_statx(in, statx_to_mask(flags, want), stx); _ll_get(in.get()); } tout(cct) << stx->stx_ino << std::endl; ldout(cct, 3) << "ll_symlinkx " << vparent << " " << name << " = " << r << " (" << hex << stx->stx_ino << dec << ")" << dendl; *out = in.get(); return r; } int Client::_unlink(Inode *dir, const char *name, const UserPerm& perm) { ldout(cct, 8) << "_unlink(" << dir->ino << " " << name << " uid " << perm.uid() << " gid " << perm.gid() << ")" << dendl; if (dir->snapid != CEPH_NOSNAP) { return -CEPHFS_EROFS; } MetaRequest *req = new MetaRequest(CEPH_MDS_OP_UNLINK); filepath path; dir->make_nosnap_relative_path(path); path.push_dentry(name); req->set_filepath(path); InodeRef otherin; Inode *in; Dentry *de = get_or_create(dir, name); req->set_dentry(de); req->dentry_drop = CEPH_CAP_FILE_SHARED; req->dentry_unless = CEPH_CAP_FILE_EXCL; int res = _lookup(dir, name, 0, &otherin, perm); if (res < 0) { put_request(req); return res; } in = otherin.get(); req->set_other_inode(in); in->break_all_delegs(); req->other_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; req->set_inode(dir); res = make_request(req, perm); trim_cache(); ldout(cct, 8) << "unlink(" << path << ") = " << res << dendl; return res; } int Client::ll_unlink(Inode *in, const char *name, const UserPerm& perm) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vino = _get_vino(in); ldout(cct, 3) << "ll_unlink " << vino << " " << name << dendl; tout(cct) << "ll_unlink" << std::endl; tout(cct) << vino.ino.val << std::endl; tout(cct) << name << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = may_delete(in, name, perm); if (r < 0) return r; } return _unlink(in, name, perm); } int Client::_rmdir(Inode *dir, const char *name, const UserPerm& perms) { ldout(cct, 8) << "_rmdir(" << dir->ino << " " << name << " uid " << perms.uid() << " gid " << perms.gid() << ")" << dendl; if (dir->snapid != CEPH_NOSNAP && dir->snapid != CEPH_SNAPDIR) { return -CEPHFS_EROFS; } int op = dir->snapid == CEPH_SNAPDIR ? CEPH_MDS_OP_RMSNAP : CEPH_MDS_OP_RMDIR; MetaRequest *req = new MetaRequest(op); filepath path; dir->make_nosnap_relative_path(path); path.push_dentry(name); req->set_filepath(path); req->set_inode(dir); req->dentry_drop = CEPH_CAP_FILE_SHARED; req->dentry_unless = CEPH_CAP_FILE_EXCL; req->other_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; InodeRef in; Dentry *de = get_or_create(dir, name); if (op == CEPH_MDS_OP_RMDIR) req->set_dentry(de); else de->get(); int res = _lookup(dir, name, 0, &in, perms); if (res < 0) { put_request(req); return res; } if (op == CEPH_MDS_OP_RMSNAP) { unlink(de, true, true); de->put(); } req->set_other_inode(in.get()); res = make_request(req, perms); trim_cache(); ldout(cct, 8) << "rmdir(" << path << ") = " << res << dendl; return res; } int Client::ll_rmdir(Inode *in, const char *name, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vino = _get_vino(in); ldout(cct, 3) << "ll_rmdir " << vino << " " << name << dendl; tout(cct) << "ll_rmdir" << std::endl; tout(cct) << vino.ino.val << std::endl; tout(cct) << name << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = may_delete(in, name, perms); if (r < 0) return r; } return _rmdir(in, name, perms); } int Client::_rename(Inode *fromdir, const char *fromname, Inode *todir, const char *toname, const UserPerm& perm, std::string alternate_name) { ldout(cct, 8) << "_rename(" << fromdir->ino << " " << fromname << " to " << todir->ino << " " << toname << " uid " << perm.uid() << " gid " << perm.gid() << ")" << dendl; if (fromdir->snapid != todir->snapid) return -CEPHFS_EXDEV; int op = CEPH_MDS_OP_RENAME; if (fromdir->snapid != CEPH_NOSNAP) { if (fromdir == todir && fromdir->snapid == CEPH_SNAPDIR) op = CEPH_MDS_OP_RENAMESNAP; else return -CEPHFS_EROFS; } // don't allow cross-quota renames if (cct->_conf.get_val<bool>("client_quota") && fromdir != todir) { Inode *fromdir_root = fromdir->quota.is_enabled() ? fromdir : get_quota_root(fromdir, perm); Inode *todir_root = todir->quota.is_enabled() ? todir : get_quota_root(todir, perm); if (fromdir_root != todir_root) { return -CEPHFS_EXDEV; } } InodeRef target; MetaRequest *req = new MetaRequest(op); filepath from; fromdir->make_nosnap_relative_path(from); from.push_dentry(fromname); filepath to; todir->make_nosnap_relative_path(to); to.push_dentry(toname); req->set_filepath(to); req->set_filepath2(from); req->set_alternate_name(std::move(alternate_name)); Dentry *oldde = get_or_create(fromdir, fromname); Dentry *de = get_or_create(todir, toname); int res; if (op == CEPH_MDS_OP_RENAME) { req->set_old_dentry(oldde); req->old_dentry_drop = CEPH_CAP_FILE_SHARED; req->old_dentry_unless = CEPH_CAP_FILE_EXCL; de->is_renaming = true; req->set_dentry(de); req->dentry_drop = CEPH_CAP_FILE_SHARED; req->dentry_unless = CEPH_CAP_FILE_EXCL; InodeRef oldin, otherin; res = _lookup(fromdir, fromname, 0, &oldin, perm, nullptr, true); if (res < 0) goto fail; Inode *oldinode = oldin.get(); oldinode->break_all_delegs(); req->set_old_inode(oldinode); req->old_inode_drop = CEPH_CAP_LINK_SHARED; res = _lookup(todir, toname, 0, &otherin, perm, nullptr, true); switch (res) { case 0: { Inode *in = otherin.get(); req->set_other_inode(in); in->break_all_delegs(); } req->other_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; break; case -CEPHFS_ENOENT: break; default: goto fail; } req->set_inode(todir); } else { // renamesnap reply contains no tracedn, so we need to invalidate // dentry manually unlink(oldde, true, true); unlink(de, true, true); req->set_inode(todir); } res = make_request(req, perm, &target); ldout(cct, 10) << "rename result is " << res << dendl; // if rename fails it will miss waking up the waiters if (op == CEPH_MDS_OP_RENAME && de->is_renaming) { de->is_renaming = false; signal_cond_list(waiting_for_rename); } // renamed item from our cache trim_cache(); ldout(cct, 8) << "_rename(" << from << ", " << to << ") = " << res << dendl; return res; fail: put_request(req); return res; } int Client::ll_rename(Inode *parent, const char *name, Inode *newparent, const char *newname, const UserPerm& perm) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vparent = _get_vino(parent); vinodeno_t vnewparent = _get_vino(newparent); ldout(cct, 3) << "ll_rename " << vparent << " " << name << " to " << vnewparent << " " << newname << dendl; tout(cct) << "ll_rename" << std::endl; tout(cct) << vparent.ino.val << std::endl; tout(cct) << name << std::endl; tout(cct) << vnewparent.ino.val << std::endl; tout(cct) << newname << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = may_delete(parent, name, perm); if (r < 0) return r; r = may_delete(newparent, newname, perm); if (r < 0 && r != -CEPHFS_ENOENT) return r; } return _rename(parent, name, newparent, newname, perm, ""); } int Client::_link(Inode *in, Inode *dir, const char *newname, const UserPerm& perm, std::string alternate_name, InodeRef *inp) { ldout(cct, 8) << "_link(" << in->ino << " to " << dir->ino << " " << newname << " uid " << perm.uid() << " gid " << perm.gid() << ")" << dendl; if (strlen(newname) > NAME_MAX) return -CEPHFS_ENAMETOOLONG; if (in->snapid != CEPH_NOSNAP || dir->snapid != CEPH_NOSNAP) { return -CEPHFS_EROFS; } if (is_quota_files_exceeded(dir, perm)) { return -CEPHFS_EDQUOT; } in->break_all_delegs(); MetaRequest *req = new MetaRequest(CEPH_MDS_OP_LINK); filepath path(newname, dir->ino); req->set_filepath(path); req->set_alternate_name(std::move(alternate_name)); filepath existing(in->ino); req->set_filepath2(existing); req->set_inode(dir); req->inode_drop = CEPH_CAP_FILE_SHARED; req->inode_unless = CEPH_CAP_FILE_EXCL; Dentry *de = get_or_create(dir, newname); req->set_dentry(de); int res = make_request(req, perm, inp); ldout(cct, 10) << "link result is " << res << dendl; trim_cache(); ldout(cct, 8) << "link(" << existing << ", " << path << ") = " << res << dendl; return res; } int Client::ll_link(Inode *in, Inode *newparent, const char *newname, const UserPerm& perm) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vino = _get_vino(in); vinodeno_t vnewparent = _get_vino(newparent); ldout(cct, 3) << "ll_link " << vino << " to " << vnewparent << " " << newname << dendl; tout(cct) << "ll_link" << std::endl; tout(cct) << vino.ino.val << std::endl; tout(cct) << vnewparent << std::endl; tout(cct) << newname << std::endl; InodeRef target; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { if (S_ISDIR(in->mode)) return -CEPHFS_EPERM; int r = may_hardlink(in, perm); if (r < 0) return r; r = may_create(newparent, perm); if (r < 0) return r; } return _link(in, newparent, newname, perm, "", &target); } int Client::ll_num_osds(void) { std::scoped_lock lock(client_lock); return objecter->with_osdmap(std::mem_fn(&OSDMap::get_num_osds)); } int Client::ll_osdaddr(int osd, uint32_t *addr) { std::scoped_lock lock(client_lock); entity_addr_t g; bool exists = objecter->with_osdmap([&](const OSDMap& o) { if (!o.exists(osd)) return false; g = o.get_addrs(osd).front(); return true; }); if (!exists) return -1; uint32_t nb_addr = (g.in4_addr()).sin_addr.s_addr; *addr = ntohl(nb_addr); return 0; } uint32_t Client::ll_stripe_unit(Inode *in) { std::scoped_lock lock(client_lock); return in->layout.stripe_unit; } uint64_t Client::ll_snap_seq(Inode *in) { std::scoped_lock lock(client_lock); return in->snaprealm->seq; } int Client::ll_file_layout(Inode *in, file_layout_t *layout) { std::scoped_lock lock(client_lock); *layout = in->layout; return 0; } int Client::ll_file_layout(Fh *fh, file_layout_t *layout) { return ll_file_layout(fh->inode.get(), layout); } /* Currently we cannot take advantage of redundancy in reads, since we would have to go through all possible placement groups (a potentially quite large number determined by a hash), and use CRUSH to calculate the appropriate set of OSDs for each placement group, then index into that. An array with one entry per OSD is much more tractable and works for demonstration purposes. */ int Client::ll_get_stripe_osd(Inode *in, uint64_t blockno, file_layout_t* layout) { std::scoped_lock lock(client_lock); inodeno_t ino = in->ino; uint32_t object_size = layout->object_size; uint32_t su = layout->stripe_unit; uint32_t stripe_count = layout->stripe_count; uint64_t stripes_per_object = object_size / su; uint64_t stripeno = 0, stripepos = 0; if(stripe_count) { stripeno = blockno / stripe_count; // which horizontal stripe (Y) stripepos = blockno % stripe_count; // which object in the object set (X) } uint64_t objectsetno = stripeno / stripes_per_object; // which object set uint64_t objectno = objectsetno * stripe_count + stripepos; // object id object_t oid = file_object_t(ino, objectno); return objecter->with_osdmap([&](const OSDMap& o) { ceph_object_layout olayout = o.file_to_object_layout(oid, *layout); pg_t pg = (pg_t)olayout.ol_pgid; vector<int> osds; int primary; o.pg_to_acting_osds(pg, &osds, &primary); return primary; }); } /* Return the offset of the block, internal to the object */ uint64_t Client::ll_get_internal_offset(Inode *in, uint64_t blockno) { std::scoped_lock lock(client_lock); file_layout_t *layout=&(in->layout); uint32_t object_size = layout->object_size; uint32_t su = layout->stripe_unit; uint64_t stripes_per_object = object_size / su; return (blockno % stripes_per_object) * su; } int Client::ll_opendir(Inode *in, int flags, dir_result_t** dirpp, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vino = _get_vino(in); ldout(cct, 3) << "ll_opendir " << vino << dendl; tout(cct) << "ll_opendir" << std::endl; tout(cct) << vino.ino.val << std::endl; std::scoped_lock lock(client_lock); if (!fuse_default_permissions) { int r = may_open(in, flags, perms); if (r < 0) return r; } int r = _opendir(in, dirpp, perms); tout(cct) << (uintptr_t)*dirpp << std::endl; ldout(cct, 3) << "ll_opendir " << vino << " = " << r << " (" << *dirpp << ")" << dendl; return r; } int Client::ll_releasedir(dir_result_t *dirp) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << "ll_releasedir " << dirp << dendl; tout(cct) << "ll_releasedir" << std::endl; tout(cct) << (uintptr_t)dirp << std::endl; std::scoped_lock lock(client_lock); _closedir(dirp); return 0; } int Client::ll_fsyncdir(dir_result_t *dirp) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << "ll_fsyncdir " << dirp << dendl; tout(cct) << "ll_fsyncdir" << std::endl; tout(cct) << (uintptr_t)dirp << std::endl; std::scoped_lock lock(client_lock); return _fsync(dirp->inode.get(), false); } int Client::ll_open(Inode *in, int flags, Fh **fhp, const UserPerm& perms) { ceph_assert(!(flags & O_CREAT)); RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vino = _get_vino(in); ldout(cct, 3) << "ll_open " << vino << " " << ceph_flags_sys2wire(flags) << dendl; tout(cct) << "ll_open" << std::endl; tout(cct) << vino.ino.val << std::endl; tout(cct) << ceph_flags_sys2wire(flags) << std::endl; std::scoped_lock lock(client_lock); int r; if (!fuse_default_permissions) { r = may_open(in, flags, perms); if (r < 0) goto out; } r = _open(in, flags, 0, fhp /* may be NULL */, perms); out: Fh *fhptr = fhp ? *fhp : NULL; if (fhptr) { ll_unclosed_fh_set.insert(fhptr); } tout(cct) << (uintptr_t)fhptr << std::endl; ldout(cct, 3) << "ll_open " << vino << " " << ceph_flags_sys2wire(flags) << " = " << r << " (" << fhptr << ")" << dendl; return r; } int Client::_ll_create(Inode *parent, const char *name, mode_t mode, int flags, InodeRef *in, int caps, Fh **fhp, const UserPerm& perms) { *fhp = NULL; vinodeno_t vparent = _get_vino(parent); ldout(cct, 8) << "_ll_create " << vparent << " " << name << " 0" << oct << mode << dec << " " << ceph_flags_sys2wire(flags) << ", uid " << perms.uid() << ", gid " << perms.gid() << dendl; tout(cct) << "ll_create" << std::endl; tout(cct) << vparent.ino.val << std::endl; tout(cct) << name << std::endl; tout(cct) << mode << std::endl; tout(cct) << ceph_flags_sys2wire(flags) << std::endl; bool created = false; int r = _lookup(parent, name, caps, in, perms); if (r == 0 && (flags & O_CREAT) && (flags & O_EXCL)) return -CEPHFS_EEXIST; if (r == -CEPHFS_ENOENT && (flags & O_CREAT)) { if (!fuse_default_permissions) { r = may_create(parent, perms); if (r < 0) goto out; } r = _create(parent, name, flags, mode, in, fhp, 0, 0, 0, NULL, &created, perms, ""); if (r < 0) goto out; } if (r < 0) goto out; ceph_assert(*in); ldout(cct, 20) << "_ll_create created = " << created << dendl; if (!created) { if (!fuse_default_permissions) { r = may_open(in->get(), flags, perms); if (r < 0) { if (*fhp) { int release_r = _release_fh(*fhp); ceph_assert(release_r == 0); // during create, no async data ops should have happened } goto out; } } if (*fhp == NULL) { r = _open(in->get(), flags, mode, fhp, perms); if (r < 0) goto out; } } out: if (*fhp) { ll_unclosed_fh_set.insert(*fhp); } #ifdef _WIN32 uint64_t ino = 0; #else ino_t ino = 0; #endif if (r >= 0) { Inode *inode = in->get(); if (use_faked_inos()) ino = inode->faked_ino; else ino = inode->ino; } tout(cct) << (uintptr_t)*fhp << std::endl; tout(cct) << ino << std::endl; ldout(cct, 8) << "_ll_create " << vparent << " " << name << " 0" << oct << mode << dec << " " << ceph_flags_sys2wire(flags) << " = " << r << " (" << *fhp << " " << hex << ino << dec << ")" << dendl; return r; } int Client::ll_create(Inode *parent, const char *name, mode_t mode, int flags, struct stat *attr, Inode **outp, Fh **fhp, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); InodeRef in; int r = _ll_create(parent, name, mode, flags, &in, CEPH_STAT_CAP_INODE_ALL, fhp, perms); if (r >= 0) { ceph_assert(in); // passing an Inode in outp requires an additional ref if (outp) { _ll_get(in.get()); *outp = in.get(); } fill_stat(in, attr); } else { attr->st_ino = 0; } return r; } int Client::ll_createx(Inode *parent, const char *name, mode_t mode, int oflags, Inode **outp, Fh **fhp, struct ceph_statx *stx, unsigned want, unsigned lflags, const UserPerm& perms) { unsigned caps = statx_to_mask(lflags, want); RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); InodeRef in; int r = _ll_create(parent, name, mode, oflags, &in, caps, fhp, perms); if (r >= 0) { ceph_assert(in); // passing an Inode in outp requires an additional ref if (outp) { _ll_get(in.get()); *outp = in.get(); } fill_statx(in, caps, stx); } else { stx->stx_ino = 0; stx->stx_mask = 0; } return r; } loff_t Client::ll_lseek(Fh *fh, loff_t offset, int whence) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << "ll_lseek" << std::endl; tout(cct) << offset << std::endl; tout(cct) << whence << std::endl; std::scoped_lock lock(client_lock); return _lseek(fh, offset, whence); } int Client::ll_read(Fh *fh, loff_t off, loff_t len, bufferlist *bl) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << "ll_read " << fh << " " << fh->inode->ino << " " << " " << off << "~" << len << dendl; tout(cct) << "ll_read" << std::endl; tout(cct) << (uintptr_t)fh << std::endl; tout(cct) << off << std::endl; tout(cct) << len << std::endl; /* We can't return bytes written larger than INT_MAX, clamp len to that */ len = std::min(len, (loff_t)INT_MAX); std::scoped_lock lock(client_lock); int r = _read(fh, off, len, bl); ldout(cct, 3) << "ll_read " << fh << " " << off << "~" << len << " = " << r << dendl; return r; } int Client::ll_read_block(Inode *in, uint64_t blockid, char *buf, uint64_t offset, uint64_t length, file_layout_t* layout) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; vinodeno_t vino = _get_vino(in); object_t oid = file_object_t(vino.ino, blockid); C_SaferCond onfinish; bufferlist bl; objecter->read(oid, object_locator_t(layout->pool_id), offset, length, vino.snapid, &bl, CEPH_OSD_FLAG_READ, &onfinish); int r = onfinish.wait(); if (r >= 0) { bl.begin().copy(bl.length(), buf); r = bl.length(); } return r; } /* It appears that the OSD doesn't return success unless the entire buffer was written, return the write length on success. */ int Client::ll_write_block(Inode *in, uint64_t blockid, char* buf, uint64_t offset, uint64_t length, file_layout_t* layout, uint64_t snapseq, uint32_t sync) { vinodeno_t vino = ll_get_vino(in); int r = 0; std::unique_ptr<C_SaferCond> onsafe = nullptr; RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; if (length == 0) { return -CEPHFS_EINVAL; } if (true || sync) { /* if write is stable, the epilogue is waiting on * flock */ onsafe.reset(new C_SaferCond("Client::ll_write_block flock")); } object_t oid = file_object_t(vino.ino, blockid); SnapContext fakesnap; ceph::bufferlist bl; if (length > 0) { bl.push_back(buffer::copy(buf, length)); } ldout(cct, 1) << "ll_block_write for " << vino.ino << "." << blockid << dendl; fakesnap.seq = snapseq; /* lock just in time */ objecter->write(oid, object_locator_t(layout->pool_id), offset, length, fakesnap, bl, ceph::real_clock::now(), 0, onsafe.get()); if (nullptr != onsafe) { r = onsafe->wait(); } if (r < 0) { return r; } else { return length; } } int Client::ll_commit_blocks(Inode *in, uint64_t offset, uint64_t length) { /* BarrierContext *bctx; vinodeno_t vino = _get_vino(in); uint64_t ino = vino.ino; ldout(cct, 1) << "ll_commit_blocks for " << vino.ino << " from " << offset << " to " << length << dendl; if (length == 0) { return -CEPHFS_EINVAL; } std::scoped_lock lock(client_lock); map<uint64_t, BarrierContext*>::iterator p = barriers.find(ino); if (p != barriers.end()) { barrier_interval civ(offset, offset + length); p->second->commit_barrier(civ); } */ return 0; } int Client::ll_write(Fh *fh, loff_t off, loff_t len, const char *data) { ldout(cct, 3) << "ll_write " << fh << " " << fh->inode->ino << " " << off << "~" << len << dendl; tout(cct) << "ll_write" << std::endl; tout(cct) << (uintptr_t)fh << std::endl; tout(cct) << off << std::endl; tout(cct) << len << std::endl; RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; /* We can't return bytes written larger than INT_MAX, clamp len to that */ len = std::min(len, (loff_t)INT_MAX); std::scoped_lock lock(client_lock); int r = _write(fh, off, len, data, NULL, 0); ldout(cct, 3) << "ll_write " << fh << " " << off << "~" << len << " = " << r << dendl; return r; } int64_t Client::ll_writev(struct Fh *fh, const struct iovec *iov, int iovcnt, int64_t off) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock cl(client_lock); return _preadv_pwritev_locked(fh, iov, iovcnt, off, true, false); } int64_t Client::ll_readv(struct Fh *fh, const struct iovec *iov, int iovcnt, int64_t off) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock cl(client_lock); return _preadv_pwritev_locked(fh, iov, iovcnt, off, false, false); } int Client::ll_flush(Fh *fh) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << "ll_flush " << fh << " " << fh->inode->ino << " " << dendl; tout(cct) << "ll_flush" << std::endl; tout(cct) << (uintptr_t)fh << std::endl; std::scoped_lock lock(client_lock); return _flush(fh); } int Client::ll_fsync(Fh *fh, bool syncdataonly) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << "ll_fsync " << fh << " " << fh->inode->ino << " " << dendl; tout(cct) << "ll_fsync" << std::endl; tout(cct) << (uintptr_t)fh << std::endl; std::scoped_lock lock(client_lock); int r = _fsync(fh, syncdataonly); if (r) { // If we're returning an error, clear it from the FH fh->take_async_err(); } return r; } int Client::ll_sync_inode(Inode *in, bool syncdataonly) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << "ll_sync_inode " << *in << " " << dendl; tout(cct) << "ll_sync_inode" << std::endl; tout(cct) << (uintptr_t)in << std::endl; std::scoped_lock lock(client_lock); return _fsync(in, syncdataonly); } int Client::clear_suid_sgid(Inode *in, const UserPerm& perms, bool defer) { ldout(cct, 20) << __func__ << " " << *in << "; " << perms << " defer " << defer << dendl; if (!in->is_file()) { return 0; } if (likely(!(in->mode & (S_ISUID|S_ISGID)))) { return 0; } if (perms.uid() == 0 || perms.uid() == in->uid) { return 0; } int mask = 0; // always drop the suid if (unlikely(in->mode & S_ISUID)) { mask = CEPH_SETATTR_KILL_SUID; } // remove the sgid if S_IXUGO is set or the inode is // is not in the caller's group list. if ((in->mode & S_ISGID) && ((in->mode & S_IXUGO) || !perms.gid_in_groups(in->gid))) { mask |= CEPH_SETATTR_KILL_SGID; } ldout(cct, 20) << __func__ << " mask " << mask << dendl; if (defer) { return mask; } struct ceph_statx stx = { 0 }; return __setattrx(in, &stx, mask, perms); } int Client::_fallocate(Fh *fh, int mode, int64_t offset, int64_t length) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); if (offset < 0 || length <= 0) return -CEPHFS_EINVAL; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -CEPHFS_EOPNOTSUPP; if ((mode & FALLOC_FL_PUNCH_HOLE) && !(mode & FALLOC_FL_KEEP_SIZE)) return -CEPHFS_EOPNOTSUPP; Inode *in = fh->inode.get(); if (objecter->osdmap_pool_full(in->layout.pool_id) && !(mode & FALLOC_FL_PUNCH_HOLE)) { return -CEPHFS_ENOSPC; } if (in->snapid != CEPH_NOSNAP) return -CEPHFS_EROFS; if ((fh->mode & CEPH_FILE_MODE_WR) == 0) return -CEPHFS_EBADF; uint64_t size = offset + length; if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) && size > in->size && is_quota_bytes_exceeded(in, size - in->size, fh->actor_perms)) { return -CEPHFS_EDQUOT; } int have; int r = get_caps(fh, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER, &have, -1); if (r < 0) return r; r = clear_suid_sgid(in, fh->actor_perms); if (r < 0) { put_cap_ref(in, CEPH_CAP_FILE_WR); return r; } std::unique_ptr<C_SaferCond> onuninline = nullptr; if (mode & FALLOC_FL_PUNCH_HOLE) { if (in->inline_version < CEPH_INLINE_NONE && (have & CEPH_CAP_FILE_BUFFER)) { bufferlist bl; auto inline_iter = in->inline_data.cbegin(); int len = in->inline_data.length(); if (offset < len) { if (offset > 0) inline_iter.copy(offset, bl); int size = length; if (offset + size > len) size = len - offset; if (size > 0) bl.append_zero(size); if (offset + size < len) { inline_iter += size; inline_iter.copy(len - offset - size, bl); } in->inline_data = bl; in->inline_version++; } in->mtime = in->ctime = ceph_clock_now(); in->change_attr++; in->mark_caps_dirty(CEPH_CAP_FILE_WR); } else { if (in->inline_version < CEPH_INLINE_NONE) { onuninline.reset(new C_SaferCond("Client::_fallocate_uninline_data flock")); uninline_data(in, onuninline.get()); } C_SaferCond onfinish("Client::_punch_hole flock"); get_cap_ref(in, CEPH_CAP_FILE_BUFFER); _invalidate_inode_cache(in, offset, length); filer->zero(in->ino, &in->layout, in->snaprealm->get_snap_context(), offset, length, ceph::real_clock::now(), 0, true, &onfinish); in->mtime = in->ctime = ceph_clock_now(); in->change_attr++; in->mark_caps_dirty(CEPH_CAP_FILE_WR); client_lock.unlock(); onfinish.wait(); client_lock.lock(); put_cap_ref(in, CEPH_CAP_FILE_BUFFER); } } else if (!(mode & FALLOC_FL_KEEP_SIZE)) { uint64_t size = offset + length; if (size > in->size) { in->size = size; in->mtime = in->ctime = ceph_clock_now(); in->change_attr++; in->mark_caps_dirty(CEPH_CAP_FILE_WR); if (is_quota_bytes_approaching(in, fh->actor_perms)) { check_caps(in, CHECK_CAPS_NODELAY); } else if (is_max_size_approaching(in)) { check_caps(in, 0); } } } if (nullptr != onuninline) { client_lock.unlock(); int ret = onuninline->wait(); client_lock.lock(); if (ret >= 0 || ret == -CEPHFS_ECANCELED) { in->inline_data.clear(); in->inline_version = CEPH_INLINE_NONE; in->mark_caps_dirty(CEPH_CAP_FILE_WR); check_caps(in, 0); } else r = ret; } put_cap_ref(in, CEPH_CAP_FILE_WR); return r; } int Client::ll_fallocate(Fh *fh, int mode, int64_t offset, int64_t length) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << __func__ << " " << fh << " " << fh->inode->ino << " " << dendl; tout(cct) << __func__ << " " << mode << " " << offset << " " << length << std::endl; tout(cct) << (uintptr_t)fh << std::endl; std::scoped_lock lock(client_lock); return _fallocate(fh, mode, offset, length); } int Client::fallocate(int fd, int mode, loff_t offset, loff_t length) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; tout(cct) << __func__ << " " << fd << mode << " " << offset << " " << length << std::endl; std::scoped_lock lock(client_lock); Fh *fh = get_filehandle(fd); if (!fh) return -CEPHFS_EBADF; #if defined(__linux__) && defined(O_PATH) if (fh->flags & O_PATH) return -CEPHFS_EBADF; #endif return _fallocate(fh, mode, offset, length); } int Client::ll_release(Fh *fh) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << __func__ << " (fh)" << fh << " " << fh->inode->ino << " " << dendl; tout(cct) << __func__ << " (fh)" << std::endl; tout(cct) << (uintptr_t)fh << std::endl; std::scoped_lock lock(client_lock); if (ll_unclosed_fh_set.count(fh)) ll_unclosed_fh_set.erase(fh); return _release_fh(fh); } int Client::ll_getlk(Fh *fh, struct flock *fl, uint64_t owner) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << "ll_getlk (fh)" << fh << " " << fh->inode->ino << dendl; tout(cct) << "ll_getk (fh)" << (uintptr_t)fh << std::endl; std::scoped_lock lock(client_lock); return _getlk(fh, fl, owner); } int Client::ll_setlk(Fh *fh, struct flock *fl, uint64_t owner, int sleep) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << __func__ << " (fh) " << fh << " " << fh->inode->ino << dendl; tout(cct) << __func__ << " (fh)" << (uintptr_t)fh << std::endl; std::scoped_lock lock(client_lock); return _setlk(fh, fl, owner, sleep); } int Client::ll_flock(Fh *fh, int cmd, uint64_t owner) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; ldout(cct, 3) << __func__ << " (fh) " << fh << " " << fh->inode->ino << dendl; tout(cct) << __func__ << " (fh)" << (uintptr_t)fh << std::endl; std::scoped_lock lock(client_lock); return _flock(fh, cmd, owner); } int Client::set_deleg_timeout(uint32_t timeout) { std::scoped_lock lock(client_lock); /* * The whole point is to prevent blocklisting so we must time out the * delegation before the session autoclose timeout kicks in. */ if (timeout >= mdsmap->get_session_autoclose()) return -CEPHFS_EINVAL; deleg_timeout = timeout; return 0; } int Client::ll_delegation(Fh *fh, unsigned cmd, ceph_deleg_cb_t cb, void *priv) { int ret = -CEPHFS_EINVAL; RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); Inode *inode = fh->inode.get(); switch(cmd) { case CEPH_DELEGATION_NONE: inode->unset_deleg(fh); ret = 0; break; default: try { ret = inode->set_deleg(fh, cmd, cb, priv); } catch (std::bad_alloc&) { ret = -CEPHFS_ENOMEM; } break; } return ret; } class C_Client_RequestInterrupt : public Context { private: Client *client; MetaRequest *req; public: C_Client_RequestInterrupt(Client *c, MetaRequest *r) : client(c), req(r) { req->get(); } void finish(int r) override { std::scoped_lock l(client->client_lock); ceph_assert(req->head.op == CEPH_MDS_OP_SETFILELOCK); client->_interrupt_filelock(req); client->put_request(req); } }; void Client::ll_interrupt(void *d) { MetaRequest *req = static_cast<MetaRequest*>(d); ldout(cct, 3) << __func__ << " tid " << req->get_tid() << dendl; tout(cct) << __func__ << " tid " << req->get_tid() << std::endl; interrupt_finisher.queue(new C_Client_RequestInterrupt(this, req)); } // ========================================= // layout // expose file layouts int Client::describe_layout(const char *relpath, file_layout_t *lp, const UserPerm& perms) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); filepath path(relpath); InodeRef in; int r = path_walk(path, &in, perms); if (r < 0) return r; *lp = in->layout; ldout(cct, 3) << __func__ << "(" << relpath << ") = 0" << dendl; return 0; } int Client::fdescribe_layout(int fd, file_layout_t *lp) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; Inode *in = f->inode.get(); *lp = in->layout; ldout(cct, 3) << __func__ << "(" << fd << ") = 0" << dendl; return 0; } int64_t Client::get_default_pool_id() { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); /* first data pool is the default */ return mdsmap->get_first_data_pool(); } // expose osdmap int64_t Client::get_pool_id(const char *pool_name) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); return objecter->with_osdmap(std::mem_fn(&OSDMap::lookup_pg_pool_name), pool_name); } string Client::get_pool_name(int64_t pool) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return string(); std::scoped_lock lock(client_lock); return objecter->with_osdmap([pool](const OSDMap& o) { return o.have_pg_pool(pool) ? o.get_pool_name(pool) : string(); }); } int Client::get_pool_replication(int64_t pool) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); return objecter->with_osdmap([pool](const OSDMap& o) { return o.have_pg_pool(pool) ? o.get_pg_pool(pool)->get_size() : -CEPHFS_ENOENT; }); } int Client::get_file_extent_osds(int fd, loff_t off, loff_t *len, vector<int>& osds) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; Inode *in = f->inode.get(); vector<ObjectExtent> extents; Striper::file_to_extents(cct, in->ino, &in->layout, off, 1, in->truncate_size, extents); ceph_assert(extents.size() == 1); objecter->with_osdmap([&](const OSDMap& o) { pg_t pg = o.object_locator_to_pg(extents[0].oid, extents[0].oloc); o.pg_to_acting_osds(pg, osds); }); if (osds.empty()) return -CEPHFS_EINVAL; /* * Return the remainder of the extent (stripe unit) * * If length = 1 is passed to Striper::file_to_extents we get a single * extent back, but its length is one so we still need to compute the length * to the end of the stripe unit. * * If length = su then we may get 1 or 2 objects back in the extents vector * which would have to be examined. Even then, the offsets are local to the * object, so matching up to the file offset is extra work. * * It seems simpler to stick with length = 1 and manually compute the * remainder. */ if (len) { uint64_t su = in->layout.stripe_unit; *len = su - (off % su); } return 0; } int Client::get_osd_crush_location(int id, vector<pair<string, string> >& path) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); if (id < 0) return -CEPHFS_EINVAL; return objecter->with_osdmap([&](const OSDMap& o) { return o.crush->get_full_location_ordered(id, path); }); } int Client::get_file_stripe_address(int fd, loff_t offset, vector<entity_addr_t>& address) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; Inode *in = f->inode.get(); // which object? vector<ObjectExtent> extents; Striper::file_to_extents(cct, in->ino, &in->layout, offset, 1, in->truncate_size, extents); ceph_assert(extents.size() == 1); // now we have the object and its 'layout' return objecter->with_osdmap([&](const OSDMap& o) { pg_t pg = o.object_locator_to_pg(extents[0].oid, extents[0].oloc); vector<int> osds; o.pg_to_acting_osds(pg, osds); if (osds.empty()) return -CEPHFS_EINVAL; for (unsigned i = 0; i < osds.size(); i++) { entity_addr_t addr = o.get_addrs(osds[i]).front(); address.push_back(addr); } return 0; }); } int Client::get_osd_addr(int osd, entity_addr_t& addr) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); return objecter->with_osdmap([&](const OSDMap& o) { if (!o.exists(osd)) return -CEPHFS_ENOENT; addr = o.get_addrs(osd).front(); return 0; }); } int Client::enumerate_layout(int fd, vector<ObjectExtent>& result, loff_t length, loff_t offset) { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); Fh *f = get_filehandle(fd); if (!f) return -CEPHFS_EBADF; Inode *in = f->inode.get(); // map to a list of extents Striper::file_to_extents(cct, in->ino, &in->layout, offset, length, in->truncate_size, result); ldout(cct, 3) << __func__ << "(" << fd << ", " << length << ", " << offset << ") = 0" << dendl; return 0; } /* find an osd with the same ip. -CEPHFS_ENXIO if none. */ int Client::get_local_osd() { RWRef_t mref_reader(mount_state, CLIENT_MOUNTING); if (!mref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; std::scoped_lock lock(client_lock); objecter->with_osdmap([this](const OSDMap& o) { if (o.get_epoch() != local_osd_epoch) { local_osd = o.find_osd_on_ip(messenger->get_myaddrs().front()); local_osd_epoch = o.get_epoch(); } }); return local_osd; } // =============================== void Client::ms_handle_connect(Connection *con) { ldout(cct, 10) << __func__ << " on " << con->get_peer_addr() << dendl; } bool Client::ms_handle_reset(Connection *con) { ldout(cct, 0) << __func__ << " on " << con->get_peer_addr() << dendl; return false; } void Client::ms_handle_remote_reset(Connection *con) { std::scoped_lock lock(client_lock); ldout(cct, 0) << __func__ << " on " << con->get_peer_addr() << dendl; switch (con->get_peer_type()) { case CEPH_ENTITY_TYPE_MDS: { // kludge to figure out which mds this is; fixme with a Connection* state mds_rank_t mds = MDS_RANK_NONE; MetaSessionRef s = NULL; for (auto &p : mds_sessions) { if (mdsmap->have_inst(p.first) && mdsmap->get_addrs(p.first) == con->get_peer_addrs()) { mds = p.first; s = p.second; } } if (mds >= 0) { ceph_assert(s != NULL); switch (s->state) { case MetaSession::STATE_CLOSING: ldout(cct, 1) << "reset from mds we were closing; we'll call that closed" << dendl; _closed_mds_session(s.get()); break; case MetaSession::STATE_OPENING: { ldout(cct, 1) << "reset from mds we were opening; retrying" << dendl; list<Context*> waiters; waiters.swap(s->waiting_for_open); _closed_mds_session(s.get()); auto news = _get_or_open_mds_session(mds); news->waiting_for_open.swap(waiters); } break; case MetaSession::STATE_OPEN: { objecter->maybe_request_map(); /* to check if we are blocklisted */ if (cct->_conf.get_val<bool>("client_reconnect_stale")) { ldout(cct, 1) << "reset from mds we were open; close mds session for reconnect" << dendl; _closed_mds_session(s.get()); } else { ldout(cct, 1) << "reset from mds we were open; mark session as stale" << dendl; s->state = MetaSession::STATE_STALE; } } break; case MetaSession::STATE_NEW: case MetaSession::STATE_CLOSED: default: break; } } } break; } } bool Client::ms_handle_refused(Connection *con) { ldout(cct, 1) << __func__ << " on " << con->get_peer_addr() << dendl; return false; } Inode *Client::get_quota_root(Inode *in, const UserPerm& perms, quota_max_t type) { Inode *quota_in = root_ancestor; SnapRealm *realm = in->snaprealm; if (!cct->_conf.get_val<bool>("client_quota")) return NULL; while (realm) { ldout(cct, 10) << __func__ << " realm " << realm->ino << dendl; if (realm->ino != in->ino) { auto p = inode_map.find(vinodeno_t(realm->ino, CEPH_NOSNAP)); if (p == inode_map.end()) break; if (p->second->quota.is_enabled(type)) { quota_in = p->second; break; } } realm = realm->pparent; } ldout(cct, 10) << __func__ << " " << in->vino() << " -> " << quota_in->vino() << dendl; return quota_in; } /** * Traverse quota ancestors of the Inode, return true * if any of them passes the passed function */ bool Client::check_quota_condition(Inode *in, const UserPerm& perms, std::function<bool (const Inode &in)> test) { if (!cct->_conf.get_val<bool>("client_quota")) return false; while (true) { ceph_assert(in != NULL); if (test(*in)) { return true; } if (in == root_ancestor) { // We're done traversing, drop out return false; } else { // Continue up the tree in = get_quota_root(in, perms); } } return false; } bool Client::is_quota_files_exceeded(Inode *in, const UserPerm& perms) { return check_quota_condition(in, perms, [](const Inode &in) { return in.quota.max_files && in.rstat.rsize() >= in.quota.max_files; }); } bool Client::is_quota_bytes_exceeded(Inode *in, int64_t new_bytes, const UserPerm& perms) { return check_quota_condition(in, perms, [&new_bytes](const Inode &in) { return in.quota.max_bytes && (in.rstat.rbytes + new_bytes) > in.quota.max_bytes; }); } bool Client::is_quota_bytes_approaching(Inode *in, const UserPerm& perms) { ceph_assert(in->size >= in->reported_size); const uint64_t size = in->size - in->reported_size; return check_quota_condition(in, perms, [&size](const Inode &in) { if (in.quota.max_bytes) { if (in.rstat.rbytes >= in.quota.max_bytes) { return true; } const uint64_t space = in.quota.max_bytes - in.rstat.rbytes; return (space >> 4) < size; } else { return false; } }); } enum { POOL_CHECKED = 1, POOL_CHECKING = 2, POOL_READ = 4, POOL_WRITE = 8, }; int Client::check_pool_perm(Inode *in, int need) { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); if (!cct->_conf->client_check_pool_perm) return 0; /* Only need to do this for regular files */ if (!in->is_file()) return 0; int64_t pool_id = in->layout.pool_id; std::string pool_ns = in->layout.pool_ns; std::pair<int64_t, std::string> perm_key(pool_id, pool_ns); int have = 0; while (true) { auto it = pool_perms.find(perm_key); if (it == pool_perms.end()) break; if (it->second == POOL_CHECKING) { // avoid concurrent checkings wait_on_list(waiting_for_pool_perm); } else { have = it->second; ceph_assert(have & POOL_CHECKED); break; } } if (!have) { if (in->snapid != CEPH_NOSNAP) { // pool permission check needs to write to the first object. But for snapshot, // head of the first object may have already been deleted. To avoid creating // orphan object, skip the check for now. return 0; } pool_perms[perm_key] = POOL_CHECKING; char oid_buf[32]; snprintf(oid_buf, sizeof(oid_buf), "%llx.00000000", (unsigned long long)in->ino); object_t oid = oid_buf; SnapContext nullsnapc; C_SaferCond rd_cond; ObjectOperation rd_op; rd_op.stat(nullptr, nullptr, nullptr); objecter->mutate(oid, OSDMap::file_to_object_locator(in->layout), rd_op, nullsnapc, ceph::real_clock::now(), 0, &rd_cond); C_SaferCond wr_cond; ObjectOperation wr_op; wr_op.create(true); objecter->mutate(oid, OSDMap::file_to_object_locator(in->layout), wr_op, nullsnapc, ceph::real_clock::now(), 0, &wr_cond); client_lock.unlock(); int rd_ret = rd_cond.wait(); int wr_ret = wr_cond.wait(); client_lock.lock(); bool errored = false; if (rd_ret == 0 || rd_ret == -CEPHFS_ENOENT) have |= POOL_READ; else if (rd_ret != -CEPHFS_EPERM) { ldout(cct, 10) << __func__ << " on pool " << pool_id << " ns " << pool_ns << " rd_err = " << rd_ret << " wr_err = " << wr_ret << dendl; errored = true; } if (wr_ret == 0 || wr_ret == -CEPHFS_EEXIST) have |= POOL_WRITE; else if (wr_ret != -CEPHFS_EPERM) { ldout(cct, 10) << __func__ << " on pool " << pool_id << " ns " << pool_ns << " rd_err = " << rd_ret << " wr_err = " << wr_ret << dendl; errored = true; } if (errored) { // Indeterminate: erase CHECKING state so that subsequent calls re-check. // Raise EIO because actual error code might be misleading for // userspace filesystem user. pool_perms.erase(perm_key); signal_cond_list(waiting_for_pool_perm); return -CEPHFS_EIO; } pool_perms[perm_key] = have | POOL_CHECKED; signal_cond_list(waiting_for_pool_perm); } if ((need & CEPH_CAP_FILE_RD) && !(have & POOL_READ)) { ldout(cct, 10) << __func__ << " on pool " << pool_id << " ns " << pool_ns << " need " << ccap_string(need) << ", but no read perm" << dendl; return -CEPHFS_EPERM; } if ((need & CEPH_CAP_FILE_WR) && !(have & POOL_WRITE)) { ldout(cct, 10) << __func__ << " on pool " << pool_id << " ns " << pool_ns << " need " << ccap_string(need) << ", but no write perm" << dendl; return -CEPHFS_EPERM; } return 0; } int Client::_posix_acl_permission(Inode *in, const UserPerm& perms, unsigned want) { if (acl_type == POSIX_ACL) { if (in->xattrs.count(ACL_EA_ACCESS)) { const bufferptr& access_acl = in->xattrs[ACL_EA_ACCESS]; return posix_acl_permits(access_acl, in->uid, in->gid, perms, want); } } return -CEPHFS_EAGAIN; } int Client::_posix_acl_chmod(Inode *in, mode_t mode, const UserPerm& perms) { if (acl_type == NO_ACL) return 0; int r = _getattr(in, CEPH_STAT_CAP_XATTR, perms, in->xattr_version == 0); if (r < 0) goto out; if (acl_type == POSIX_ACL) { if (in->xattrs.count(ACL_EA_ACCESS)) { const bufferptr& access_acl = in->xattrs[ACL_EA_ACCESS]; bufferptr acl(access_acl.c_str(), access_acl.length()); r = posix_acl_access_chmod(acl, mode); if (r < 0) goto out; r = _do_setxattr(in, ACL_EA_ACCESS, acl.c_str(), acl.length(), 0, perms); } else { r = 0; } } out: ldout(cct, 10) << __func__ << " ino " << in->ino << " result=" << r << dendl; return r; } int Client::_posix_acl_create(Inode *dir, mode_t *mode, bufferlist& xattrs_bl, const UserPerm& perms) { if (acl_type == NO_ACL) return 0; if (S_ISLNK(*mode)) return 0; int r = _getattr(dir, CEPH_STAT_CAP_XATTR, perms, dir->xattr_version == 0); if (r < 0) goto out; if (acl_type == POSIX_ACL) { if (dir->xattrs.count(ACL_EA_DEFAULT)) { map<string, bufferptr> xattrs; const bufferptr& default_acl = dir->xattrs[ACL_EA_DEFAULT]; bufferptr acl(default_acl.c_str(), default_acl.length()); r = posix_acl_inherit_mode(acl, mode); if (r < 0) goto out; if (r > 0) { r = posix_acl_equiv_mode(acl.c_str(), acl.length(), mode); if (r < 0) goto out; if (r > 0) xattrs[ACL_EA_ACCESS] = acl; } if (S_ISDIR(*mode)) xattrs[ACL_EA_DEFAULT] = dir->xattrs[ACL_EA_DEFAULT]; r = xattrs.size(); if (r > 0) encode(xattrs, xattrs_bl); } else { if (umask_cb) *mode &= ~umask_cb(callback_handle); r = 0; } } out: ldout(cct, 10) << __func__ << " dir ino " << dir->ino << " result=" << r << dendl; return r; } void Client::set_filer_flags(int flags) { std::scoped_lock l(client_lock); ceph_assert(flags == 0 || flags == CEPH_OSD_FLAG_LOCALIZE_READS); objecter->add_global_op_flags(flags); } void Client::clear_filer_flags(int flags) { std::scoped_lock l(client_lock); ceph_assert(flags == CEPH_OSD_FLAG_LOCALIZE_READS); objecter->clear_global_op_flag(flags); } // called before mount void Client::set_uuid(const std::string& uuid) { RWRef_t iref_reader(initialize_state, CLIENT_INITIALIZED); ceph_assert(iref_reader.is_state_satisfied()); std::scoped_lock l(client_lock); ceph_assert(!uuid.empty()); metadata["uuid"] = uuid; _close_sessions(); } // called before mount. 0 means infinite void Client::set_session_timeout(unsigned timeout) { RWRef_t iref_reader(initialize_state, CLIENT_INITIALIZED); ceph_assert(iref_reader.is_state_satisfied()); std::scoped_lock l(client_lock); metadata["timeout"] = stringify(timeout); } // called before mount int Client::start_reclaim(const std::string& uuid, unsigned flags, const std::string& fs_name) { RWRef_t iref_reader(initialize_state, CLIENT_INITIALIZED); if (!iref_reader.is_state_satisfied()) return -CEPHFS_ENOTCONN; if (uuid.empty()) return -CEPHFS_EINVAL; std::unique_lock l(client_lock); { auto it = metadata.find("uuid"); if (it != metadata.end() && it->second == uuid) return -CEPHFS_EINVAL; } int r = subscribe_mdsmap(fs_name); if (r < 0) { lderr(cct) << "mdsmap subscription failed: " << cpp_strerror(r) << dendl; return r; } if (metadata.empty()) populate_metadata(""); while (mdsmap->get_epoch() == 0) wait_on_list(waiting_for_mdsmap); reclaim_errno = 0; for (unsigned mds = 0; mds < mdsmap->get_num_in_mds(); ) { if (!mdsmap->is_up(mds)) { ldout(cct, 10) << "mds." << mds << " not active, waiting for new mdsmap" << dendl; wait_on_list(waiting_for_mdsmap); continue; } MetaSessionRef session; if (!have_open_session(mds)) { session = _get_or_open_mds_session(mds); if (session->state == MetaSession::STATE_REJECTED) return -CEPHFS_EPERM; if (session->state != MetaSession::STATE_OPENING) { // umounting? return -CEPHFS_EINVAL; } ldout(cct, 10) << "waiting for session to mds." << mds << " to open" << dendl; wait_on_context_list(session->waiting_for_open); continue; } session = mds_sessions.at(mds); if (!session->mds_features.test(CEPHFS_FEATURE_RECLAIM_CLIENT)) return -CEPHFS_EOPNOTSUPP; if (session->reclaim_state == MetaSession::RECLAIM_NULL || session->reclaim_state == MetaSession::RECLAIMING) { session->reclaim_state = MetaSession::RECLAIMING; auto m = make_message<MClientReclaim>(uuid, flags); session->con->send_message2(std::move(m)); wait_on_list(waiting_for_reclaim); } else if (session->reclaim_state == MetaSession::RECLAIM_FAIL) { return reclaim_errno ? : -CEPHFS_ENOTRECOVERABLE; } else { mds++; } } // didn't find target session in any mds if (reclaim_target_addrs.empty()) { if (flags & CEPH_RECLAIM_RESET) return -CEPHFS_ENOENT; return -CEPHFS_ENOTRECOVERABLE; } if (flags & CEPH_RECLAIM_RESET) return 0; // use blocklist to check if target session was killed // (config option mds_session_blocklist_on_evict needs to be true) ldout(cct, 10) << __func__ << ": waiting for OSD epoch " << reclaim_osd_epoch << dendl; bs::error_code ec; l.unlock(); objecter->wait_for_map(reclaim_osd_epoch, ca::use_blocked[ec]); l.lock(); if (ec) return ceph::from_error_code(ec); bool blocklisted = objecter->with_osdmap( [this](const OSDMap &osd_map) -> bool { return osd_map.is_blocklisted(reclaim_target_addrs); }); if (blocklisted) return -CEPHFS_ENOTRECOVERABLE; metadata["reclaiming_uuid"] = uuid; return 0; } void Client::finish_reclaim() { auto it = metadata.find("reclaiming_uuid"); if (it == metadata.end()) { for (auto &p : mds_sessions) p.second->reclaim_state = MetaSession::RECLAIM_NULL; return; } for (auto &p : mds_sessions) { p.second->reclaim_state = MetaSession::RECLAIM_NULL; auto m = make_message<MClientReclaim>("", MClientReclaim::FLAG_FINISH); p.second->con->send_message2(std::move(m)); } metadata["uuid"] = it->second; metadata.erase(it); } void Client::handle_client_reclaim_reply(const MConstRef<MClientReclaimReply>& reply) { mds_rank_t from = mds_rank_t(reply->get_source().num()); ldout(cct, 10) << __func__ << " " << *reply << " from mds." << from << dendl; std::scoped_lock cl(client_lock); auto session = _get_mds_session(from, reply->get_connection().get()); if (!session) { ldout(cct, 10) << " discarding reclaim reply from sessionless mds." << from << dendl; return; } if (reply->get_result() >= 0) { session->reclaim_state = MetaSession::RECLAIM_OK; if (reply->get_epoch() > reclaim_osd_epoch) reclaim_osd_epoch = reply->get_epoch(); if (!reply->get_addrs().empty()) reclaim_target_addrs = reply->get_addrs(); } else { session->reclaim_state = MetaSession::RECLAIM_FAIL; reclaim_errno = reply->get_result(); } signal_cond_list(waiting_for_reclaim); } /** * This is included in cap release messages, to cause * the MDS to wait until this OSD map epoch. It is necessary * in corner cases where we cancel RADOS ops, so that * nobody else tries to do IO to the same objects in * the same epoch as the cancelled ops. */ void Client::set_cap_epoch_barrier(epoch_t e) { ldout(cct, 5) << __func__ << " epoch = " << e << dendl; cap_epoch_barrier = e; } const char** Client::get_tracked_conf_keys() const { static const char* keys[] = { "client_cache_size", "client_cache_mid", "client_acl_type", "client_deleg_timeout", "client_deleg_break_on_open", "client_oc_size", "client_oc_max_objects", "client_oc_max_dirty", "client_oc_target_dirty", "client_oc_max_dirty_age", "client_caps_release_delay", "client_mount_timeout", NULL }; return keys; } void Client::handle_conf_change(const ConfigProxy& conf, const std::set <std::string> &changed) { std::scoped_lock lock(client_lock); if (changed.count("client_cache_mid")) { lru.lru_set_midpoint(cct->_conf->client_cache_mid); } if (changed.count("client_acl_type")) { acl_type = NO_ACL; if (cct->_conf->client_acl_type == "posix_acl") acl_type = POSIX_ACL; } if (changed.count("client_oc_size")) { objectcacher->set_max_size(cct->_conf->client_oc_size); } if (changed.count("client_oc_max_objects")) { objectcacher->set_max_objects(cct->_conf->client_oc_max_objects); } if (changed.count("client_oc_max_dirty")) { objectcacher->set_max_dirty(cct->_conf->client_oc_max_dirty); } if (changed.count("client_oc_target_dirty")) { objectcacher->set_target_dirty(cct->_conf->client_oc_target_dirty); } if (changed.count("client_oc_max_dirty_age")) { objectcacher->set_max_dirty_age(cct->_conf->client_oc_max_dirty_age); } if (changed.count("client_collect_and_send_global_metrics")) { _collect_and_send_global_metrics = cct->_conf.get_val<bool>( "client_collect_and_send_global_metrics"); } if (changed.count("client_caps_release_delay")) { caps_release_delay = cct->_conf.get_val<std::chrono::seconds>( "client_caps_release_delay"); } if (changed.count("client_mount_timeout")) { mount_timeout = cct->_conf.get_val<std::chrono::seconds>( "client_mount_timeout"); } } void intrusive_ptr_add_ref(Inode *in) { in->iget(); } void intrusive_ptr_release(Inode *in) { in->client->put_inode(in); } mds_rank_t Client::_get_random_up_mds() const { ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); std::set<mds_rank_t> up; mdsmap->get_up_mds_set(up); if (up.empty()) return MDS_RANK_NONE; std::set<mds_rank_t>::const_iterator p = up.begin(); for (int n = rand() % up.size(); n; n--) ++p; return *p; } StandaloneClient::StandaloneClient(Messenger *m, MonClient *mc, boost::asio::io_context& ictx) : Client(m, mc, new Objecter(m->cct, m, mc, ictx)) { monclient->set_messenger(m); objecter->set_client_incarnation(0); } StandaloneClient::~StandaloneClient() { delete objecter; objecter = nullptr; } int StandaloneClient::init() { RWRef_t iref_writer(initialize_state, CLIENT_INITIALIZING, false); ceph_assert(iref_writer.is_first_writer()); _pre_init(); objecter->init(); client_lock.lock(); messenger->add_dispatcher_tail(objecter); messenger->add_dispatcher_tail(this); monclient->set_want_keys(CEPH_ENTITY_TYPE_MDS | CEPH_ENTITY_TYPE_OSD); int r = monclient->init(); if (r < 0) { // need to do cleanup because we're in an intermediate init state { std::scoped_lock l(timer_lock); timer.shutdown(); } client_lock.unlock(); objecter->shutdown(); objectcacher->stop(); monclient->shutdown(); return r; } objecter->start(); client_lock.unlock(); _finish_init(); iref_writer.update_state(CLIENT_INITIALIZED); return 0; } void StandaloneClient::shutdown() { Client::shutdown(); objecter->shutdown(); monclient->shutdown(); }
468,914
27.441499
146
cc
null
ceph-main/src/client/Client.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <[email protected]> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_CLIENT_H #define CEPH_CLIENT_H #include "common/CommandTable.h" #include "common/Finisher.h" #include "common/Timer.h" #include "common/ceph_mutex.h" #include "common/cmdparse.h" #include "common/compiler_extensions.h" #include "include/common_fwd.h" #include "include/cephfs/ceph_ll_client.h" #include "include/filepath.h" #include "include/interval_set.h" #include "include/lru.h" #include "include/types.h" #include "include/unordered_map.h" #include "include/unordered_set.h" #include "include/cephfs/metrics/Types.h" #include "mds/mdstypes.h" #include "include/cephfs/types.h" #include "msg/Dispatcher.h" #include "msg/MessageRef.h" #include "msg/Messenger.h" #include "osdc/ObjectCacher.h" #include "RWRef.h" #include "InodeRef.h" #include "MetaSession.h" #include "UserPerm.h" #include <fstream> #include <map> #include <memory> #include <set> #include <string> #include <thread> using std::set; using std::map; using std::fstream; class FSMap; class FSMapUser; class MonClient; struct DirStat; struct LeaseStat; struct InodeStat; class Filer; class Objecter; class WritebackHandler; class MDSMap; class Message; class destructive_lock_ref_t; enum { l_c_first = 20000, l_c_reply, l_c_lat, l_c_wrlat, l_c_read, l_c_fsync, l_c_md_avg, l_c_md_sqsum, l_c_md_ops, l_c_rd_avg, l_c_rd_sqsum, l_c_rd_ops, l_c_wr_avg, l_c_wr_sqsum, l_c_wr_ops, l_c_last, }; class MDSCommandOp : public CommandOp { public: mds_gid_t mds_gid; explicit MDSCommandOp(ceph_tid_t t) : CommandOp(t) {} }; /* error code for ceph_fuse */ #define CEPH_FUSE_NO_MDS_UP -((1<<16)+0) /* no mds up deteced in ceph_fuse */ #define CEPH_FUSE_LAST -((1<<16)+1) /* (unused) */ // ============================================ // types for my local metadata cache /* basic structure: - Dentries live in an LRU loop. they get expired based on last access. see include/lru.h. items can be bumped to "mid" or "top" of list, etc. - Inode has ref count for each Fh, Dir, or Dentry that points to it. - when Inode ref goes to 0, it's expired. - when Dir is empty, it's removed (and it's Inode ref--) */ /* getdir result */ struct DirEntry { explicit DirEntry(const std::string &s) : d_name(s), stmask(0) {} DirEntry(const std::string &n, struct stat& s, int stm) : d_name(n), st(s), stmask(stm) {} std::string d_name; struct stat st; int stmask; }; struct Cap; class Dir; class Dentry; struct SnapRealm; struct Fh; struct CapSnap; struct MetaRequest; class ceph_lock_state_t; // ======================================================== // client interface struct dir_result_t { static const int SHIFT = 28; static const int64_t MASK = (1 << SHIFT) - 1; static const int64_t HASH = 0xFFULL << (SHIFT + 24); // impossible frag bits static const loff_t END = 1ULL << (SHIFT + 32); struct dentry { int64_t offset; std::string name; std::string alternate_name; InodeRef inode; explicit dentry(int64_t o) : offset(o) {} dentry(int64_t o, std::string n, std::string an, InodeRef in) : offset(o), name(std::move(n)), alternate_name(std::move(an)), inode(std::move(in)) {} }; struct dentry_off_lt { bool operator()(const dentry& d, int64_t off) const { return dir_result_t::fpos_cmp(d.offset, off) < 0; } }; explicit dir_result_t(Inode *in, const UserPerm& perms); static uint64_t make_fpos(unsigned h, unsigned l, bool hash) { uint64_t v = ((uint64_t)h<< SHIFT) | (uint64_t)l; if (hash) v |= HASH; else ceph_assert((v & HASH) != HASH); return v; } static unsigned fpos_high(uint64_t p) { unsigned v = (p & (END-1)) >> SHIFT; if ((p & HASH) == HASH) return ceph_frag_value(v); return v; } static unsigned fpos_low(uint64_t p) { return p & MASK; } static int fpos_cmp(uint64_t l, uint64_t r) { int c = ceph_frag_compare(fpos_high(l), fpos_high(r)); if (c) return c; if (fpos_low(l) == fpos_low(r)) return 0; return fpos_low(l) < fpos_low(r) ? -1 : 1; } unsigned offset_high() { return fpos_high(offset); } unsigned offset_low() { return fpos_low(offset); } void set_end() { offset |= END; } bool at_end() { return (offset & END); } void set_hash_order() { offset |= HASH; } bool hash_order() { return (offset & HASH) == HASH; } bool is_cached() { if (buffer.empty()) return false; if (hash_order()) { return buffer_frag.contains(offset_high()); } else { return buffer_frag == frag_t(offset_high()); } } void reset() { last_name.clear(); next_offset = 2; offset = 0; ordered_count = 0; cache_index = 0; buffer.clear(); } InodeRef inode; int64_t offset; // hash order: // (0xff << 52) | ((24 bits hash) << 28) | // (the nth entry has hash collision); // frag+name order; // ((frag value) << 28) | (the nth entry in frag); unsigned next_offset; // offset of next chunk (last_name's + 1) std::string last_name; // last entry in previous chunk uint64_t release_count; uint64_t ordered_count; unsigned cache_index; int start_shared_gen; // dir shared_gen at start of readdir UserPerm perms; frag_t buffer_frag; std::vector<dentry> buffer; struct dirent de; }; class Client : public Dispatcher, public md_config_obs_t { public: friend class C_Block_Sync; // Calls block map and protected helpers friend class C_Client_CacheInvalidate; // calls ino_invalidate_cb friend class C_Client_DentryInvalidate; // calls dentry_invalidate_cb friend class C_Client_FlushComplete; // calls put_inode() friend class C_Client_Remount; friend class C_Client_RequestInterrupt; friend class C_Deleg_Timeout; // Asserts on client_lock, called when a delegation is unreturned friend class C_Client_CacheRelease; // Asserts on client_lock friend class SyntheticClient; friend void intrusive_ptr_release(Inode *in); template <typename T> friend struct RWRefState; template <typename T> friend class RWRef; using Dispatcher::cct; using clock = ceph::coarse_mono_clock; typedef int (*add_dirent_cb_t)(void *p, struct dirent *de, struct ceph_statx *stx, off_t off, Inode *in); struct walk_dentry_result { InodeRef in; std::string alternate_name; }; class CommandHook : public AdminSocketHook { public: explicit CommandHook(Client *client); int call(std::string_view command, const cmdmap_t& cmdmap, const bufferlist&, Formatter *f, std::ostream& errss, bufferlist& out) override; private: Client *m_client; }; // snapshot info returned via get_snap_info(). nothing to do // with SnapInfo on the MDS. struct SnapInfo { snapid_t id; std::map<std::string, std::string> metadata; }; Client(Messenger *m, MonClient *mc, Objecter *objecter_); Client(const Client&) = delete; Client(const Client&&) = delete; virtual ~Client() override; static UserPerm pick_my_perms(CephContext *c) { uid_t uid = c->_conf->client_mount_uid >= 0 ? c->_conf->client_mount_uid : -1; gid_t gid = c->_conf->client_mount_gid >= 0 ? c->_conf->client_mount_gid : -1; return UserPerm(uid, gid); } UserPerm pick_my_perms() { uid_t uid = user_id >= 0 ? user_id : -1; gid_t gid = group_id >= 0 ? group_id : -1; return UserPerm(uid, gid); } int mount(const std::string &mount_root, const UserPerm& perms, bool require_mds=false, const std::string &fs_name=""); void unmount(); bool is_unmounting() const { return mount_state.check_current_state(CLIENT_UNMOUNTING); } bool is_mounted() const { return mount_state.check_current_state(CLIENT_MOUNTED); } bool is_mounting() const { return mount_state.check_current_state(CLIENT_MOUNTING); } bool is_initialized() const { return initialize_state.check_current_state(CLIENT_INITIALIZED); } void abort_conn(); void set_uuid(const std::string& uuid); void set_session_timeout(unsigned timeout); int start_reclaim(const std::string& uuid, unsigned flags, const std::string& fs_name); void finish_reclaim(); fs_cluster_id_t get_fs_cid() { return fscid; } int mds_command( const std::string &mds_spec, const std::vector<std::string>& cmd, const bufferlist& inbl, bufferlist *poutbl, std::string *prs, Context *onfinish); // these should (more or less) mirror the actual system calls. int statfs(const char *path, struct statvfs *stbuf, const UserPerm& perms); // crap int chdir(const char *s, std::string &new_cwd, const UserPerm& perms); void _getcwd(std::string& cwd, const UserPerm& perms); void getcwd(std::string& cwd, const UserPerm& perms); // namespace ops int opendir(const char *name, dir_result_t **dirpp, const UserPerm& perms); int fdopendir(int dirfd, dir_result_t **dirpp, const UserPerm& perms); int closedir(dir_result_t *dirp); /** * Fill a directory listing from dirp, invoking cb for each entry * with the given pointer, the dirent, the struct stat, the stmask, * and the offset. * * Returns 0 if it reached the end of the directory. * If @a cb returns a negative error code, stop and return that. */ int readdir_r_cb(dir_result_t *dirp, add_dirent_cb_t cb, void *p, unsigned want=0, unsigned flags=AT_STATX_DONT_SYNC, bool getref=false); struct dirent * readdir(dir_result_t *d); int readdir_r(dir_result_t *dirp, struct dirent *de); int readdirplus_r(dir_result_t *dirp, struct dirent *de, struct ceph_statx *stx, unsigned want, unsigned flags, Inode **out); /* * Get the next snapshot delta entry. * */ int readdir_snapdiff(dir_result_t* dir1, snapid_t snap2, struct dirent* out_de, snapid_t* out_snap); int getdir(const char *relpath, std::list<std::string>& names, const UserPerm& perms); // get the whole dir at once. /** * Returns the length of the buffer that got filled in, or -errno. * If it returns -CEPHFS_ERANGE you just need to increase the size of the * buffer and try again. */ int _getdents(dir_result_t *dirp, char *buf, int buflen, bool ful); // get a bunch of dentries at once int getdents(dir_result_t *dirp, char *buf, int buflen) { return _getdents(dirp, buf, buflen, true); } int getdnames(dir_result_t *dirp, char *buf, int buflen) { return _getdents(dirp, buf, buflen, false); } void rewinddir(dir_result_t *dirp); loff_t telldir(dir_result_t *dirp); void seekdir(dir_result_t *dirp, loff_t offset); int may_delete(const char *relpath, const UserPerm& perms); int link(const char *existing, const char *newname, const UserPerm& perm, std::string alternate_name=""); int unlink(const char *path, const UserPerm& perm); int unlinkat(int dirfd, const char *relpath, int flags, const UserPerm& perm); int rename(const char *from, const char *to, const UserPerm& perm, std::string alternate_name=""); // dirs int mkdir(const char *path, mode_t mode, const UserPerm& perm, std::string alternate_name=""); int mkdirat(int dirfd, const char *relpath, mode_t mode, const UserPerm& perm, std::string alternate_name=""); int mkdirs(const char *path, mode_t mode, const UserPerm& perms); int rmdir(const char *path, const UserPerm& perms); // symlinks int readlink(const char *path, char *buf, loff_t size, const UserPerm& perms); int readlinkat(int dirfd, const char *relpath, char *buf, loff_t size, const UserPerm& perms); int symlink(const char *existing, const char *newname, const UserPerm& perms, std::string alternate_name=""); int symlinkat(const char *target, int dirfd, const char *relpath, const UserPerm& perms, std::string alternate_name=""); // path traversal for high-level interface int walk(std::string_view path, struct walk_dentry_result* result, const UserPerm& perms, bool followsym=true); // inode stuff unsigned statx_to_mask(unsigned int flags, unsigned int want); int stat(const char *path, struct stat *stbuf, const UserPerm& perms, frag_info_t *dirstat=0, int mask=CEPH_STAT_CAP_INODE_ALL); int statx(const char *path, struct ceph_statx *stx, const UserPerm& perms, unsigned int want, unsigned int flags); int lstat(const char *path, struct stat *stbuf, const UserPerm& perms, frag_info_t *dirstat=0, int mask=CEPH_STAT_CAP_INODE_ALL); int setattr(const char *relpath, struct stat *attr, int mask, const UserPerm& perms); int setattrx(const char *relpath, struct ceph_statx *stx, int mask, const UserPerm& perms, int flags=0); int fsetattr(int fd, struct stat *attr, int mask, const UserPerm& perms); int fsetattrx(int fd, struct ceph_statx *stx, int mask, const UserPerm& perms); int chmod(const char *path, mode_t mode, const UserPerm& perms); int fchmod(int fd, mode_t mode, const UserPerm& perms); int chmodat(int dirfd, const char *relpath, mode_t mode, int flags, const UserPerm& perms); int lchmod(const char *path, mode_t mode, const UserPerm& perms); int chown(const char *path, uid_t new_uid, gid_t new_gid, const UserPerm& perms); int fchown(int fd, uid_t new_uid, gid_t new_gid, const UserPerm& perms); int lchown(const char *path, uid_t new_uid, gid_t new_gid, const UserPerm& perms); int chownat(int dirfd, const char *relpath, uid_t new_uid, gid_t new_gid, int flags, const UserPerm& perms); int utime(const char *path, struct utimbuf *buf, const UserPerm& perms); int lutime(const char *path, struct utimbuf *buf, const UserPerm& perms); int futime(int fd, struct utimbuf *buf, const UserPerm& perms); int utimes(const char *relpath, struct timeval times[2], const UserPerm& perms); int lutimes(const char *relpath, struct timeval times[2], const UserPerm& perms); int futimes(int fd, struct timeval times[2], const UserPerm& perms); int futimens(int fd, struct timespec times[2], const UserPerm& perms); int utimensat(int dirfd, const char *relpath, struct timespec times[2], int flags, const UserPerm& perms); int flock(int fd, int operation, uint64_t owner); int truncate(const char *path, loff_t size, const UserPerm& perms); // file ops int mknod(const char *path, mode_t mode, const UserPerm& perms, dev_t rdev=0); int create_and_open(int dirfd, const char *relpath, int flags, const UserPerm& perms, mode_t mode, int stripe_unit, int stripe_count, int object_size, const char *data_pool, std::string alternate_name); int open(const char *path, int flags, const UserPerm& perms, mode_t mode=0, std::string alternate_name="") { return open(path, flags, perms, mode, 0, 0, 0, NULL, alternate_name); } int open(const char *path, int flags, const UserPerm& perms, mode_t mode, int stripe_unit, int stripe_count, int object_size, const char *data_pool, std::string alternate_name=""); int openat(int dirfd, const char *relpath, int flags, const UserPerm& perms, mode_t mode, int stripe_unit, int stripe_count, int object_size, const char *data_pool, std::string alternate_name); int openat(int dirfd, const char *path, int flags, const UserPerm& perms, mode_t mode=0, std::string alternate_name="") { return openat(dirfd, path, flags, perms, mode, 0, 0, 0, NULL, alternate_name); } int lookup_hash(inodeno_t ino, inodeno_t dirino, const char *name, const UserPerm& perms); int lookup_ino(inodeno_t ino, const UserPerm& perms, Inode **inode=NULL); int lookup_name(Inode *in, Inode *parent, const UserPerm& perms); int _close(int fd); int close(int fd); loff_t lseek(int fd, loff_t offset, int whence); int read(int fd, char *buf, loff_t size, loff_t offset=-1); int preadv(int fd, const struct iovec *iov, int iovcnt, loff_t offset=-1); int write(int fd, const char *buf, loff_t size, loff_t offset=-1); int pwritev(int fd, const struct iovec *iov, int iovcnt, loff_t offset=-1); int fake_write_size(int fd, loff_t size); int ftruncate(int fd, loff_t size, const UserPerm& perms); int fsync(int fd, bool syncdataonly); int fstat(int fd, struct stat *stbuf, const UserPerm& perms, int mask=CEPH_STAT_CAP_INODE_ALL); int fstatx(int fd, struct ceph_statx *stx, const UserPerm& perms, unsigned int want, unsigned int flags); int statxat(int dirfd, const char *relpath, struct ceph_statx *stx, const UserPerm& perms, unsigned int want, unsigned int flags); int fallocate(int fd, int mode, loff_t offset, loff_t length); // full path xattr ops int getxattr(const char *path, const char *name, void *value, size_t size, const UserPerm& perms); int lgetxattr(const char *path, const char *name, void *value, size_t size, const UserPerm& perms); int fgetxattr(int fd, const char *name, void *value, size_t size, const UserPerm& perms); int listxattr(const char *path, char *list, size_t size, const UserPerm& perms); int llistxattr(const char *path, char *list, size_t size, const UserPerm& perms); int flistxattr(int fd, char *list, size_t size, const UserPerm& perms); int removexattr(const char *path, const char *name, const UserPerm& perms); int lremovexattr(const char *path, const char *name, const UserPerm& perms); int fremovexattr(int fd, const char *name, const UserPerm& perms); int setxattr(const char *path, const char *name, const void *value, size_t size, int flags, const UserPerm& perms); int lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags, const UserPerm& perms); int fsetxattr(int fd, const char *name, const void *value, size_t size, int flags, const UserPerm& perms); int sync_fs(); int64_t drop_caches(); int get_snap_info(const char *path, const UserPerm &perms, SnapInfo *snap_info); // hpc lazyio int lazyio(int fd, int enable); int lazyio_propagate(int fd, loff_t offset, size_t count); int lazyio_synchronize(int fd, loff_t offset, size_t count); // expose file layout int describe_layout(const char *path, file_layout_t* layout, const UserPerm& perms); int fdescribe_layout(int fd, file_layout_t* layout); int get_file_stripe_address(int fd, loff_t offset, std::vector<entity_addr_t>& address); int get_file_extent_osds(int fd, loff_t off, loff_t *len, std::vector<int>& osds); int get_osd_addr(int osd, entity_addr_t& addr); // expose mdsmap int64_t get_default_pool_id(); // expose osdmap int get_local_osd(); int get_pool_replication(int64_t pool); int64_t get_pool_id(const char *pool_name); std::string get_pool_name(int64_t pool); int get_osd_crush_location(int id, std::vector<std::pair<std::string, std::string> >& path); int enumerate_layout(int fd, std::vector<ObjectExtent>& result, loff_t length, loff_t offset); int mksnap(const char *path, const char *name, const UserPerm& perm, mode_t mode=0, const std::map<std::string, std::string> &metadata={}); int rmsnap(const char *path, const char *name, const UserPerm& perm, bool check_perms=false); // Inode permission checking int inode_permission(Inode *in, const UserPerm& perms, unsigned want); // expose caps int get_caps_issued(int fd); int get_caps_issued(const char *path, const UserPerm& perms); snapid_t ll_get_snapid(Inode *in); vinodeno_t ll_get_vino(Inode *in) { std::lock_guard lock(client_lock); return _get_vino(in); } // get inode from faked ino Inode *ll_get_inode(ino_t ino); Inode *ll_get_inode(vinodeno_t vino); int ll_lookup(Inode *parent, const char *name, struct stat *attr, Inode **out, const UserPerm& perms); int ll_lookup_inode(struct inodeno_t ino, const UserPerm& perms, Inode **inode); int ll_lookup_vino(vinodeno_t vino, const UserPerm& perms, Inode **inode); int ll_lookupx(Inode *parent, const char *name, Inode **out, struct ceph_statx *stx, unsigned want, unsigned flags, const UserPerm& perms); bool ll_forget(Inode *in, uint64_t count); bool ll_put(Inode *in); int ll_get_snap_ref(snapid_t snap); int ll_getattr(Inode *in, struct stat *st, const UserPerm& perms); int ll_getattrx(Inode *in, struct ceph_statx *stx, unsigned int want, unsigned int flags, const UserPerm& perms); int ll_setattrx(Inode *in, struct ceph_statx *stx, int mask, const UserPerm& perms); int ll_setattr(Inode *in, struct stat *st, int mask, const UserPerm& perms); int ll_getxattr(Inode *in, const char *name, void *value, size_t size, const UserPerm& perms); int ll_setxattr(Inode *in, const char *name, const void *value, size_t size, int flags, const UserPerm& perms); int ll_removexattr(Inode *in, const char *name, const UserPerm& perms); int ll_listxattr(Inode *in, char *list, size_t size, const UserPerm& perms); int ll_opendir(Inode *in, int flags, dir_result_t **dirpp, const UserPerm& perms); int ll_releasedir(dir_result_t* dirp); int ll_fsyncdir(dir_result_t* dirp); int ll_readlink(Inode *in, char *buf, size_t bufsize, const UserPerm& perms); int ll_mknod(Inode *in, const char *name, mode_t mode, dev_t rdev, struct stat *attr, Inode **out, const UserPerm& perms); int ll_mknodx(Inode *parent, const char *name, mode_t mode, dev_t rdev, Inode **out, struct ceph_statx *stx, unsigned want, unsigned flags, const UserPerm& perms); int ll_mkdir(Inode *in, const char *name, mode_t mode, struct stat *attr, Inode **out, const UserPerm& perm); int ll_mkdirx(Inode *parent, const char *name, mode_t mode, Inode **out, struct ceph_statx *stx, unsigned want, unsigned flags, const UserPerm& perms); int ll_symlink(Inode *in, const char *name, const char *value, struct stat *attr, Inode **out, const UserPerm& perms); int ll_symlinkx(Inode *parent, const char *name, const char *value, Inode **out, struct ceph_statx *stx, unsigned want, unsigned flags, const UserPerm& perms); int ll_unlink(Inode *in, const char *name, const UserPerm& perm); int ll_rmdir(Inode *in, const char *name, const UserPerm& perms); int ll_rename(Inode *parent, const char *name, Inode *newparent, const char *newname, const UserPerm& perm); int ll_link(Inode *in, Inode *newparent, const char *newname, const UserPerm& perm); int ll_open(Inode *in, int flags, Fh **fh, const UserPerm& perms); int _ll_create(Inode *parent, const char *name, mode_t mode, int flags, InodeRef *in, int caps, Fh **fhp, const UserPerm& perms); int ll_create(Inode *parent, const char *name, mode_t mode, int flags, struct stat *attr, Inode **out, Fh **fhp, const UserPerm& perms); int ll_createx(Inode *parent, const char *name, mode_t mode, int oflags, Inode **outp, Fh **fhp, struct ceph_statx *stx, unsigned want, unsigned lflags, const UserPerm& perms); int ll_read_block(Inode *in, uint64_t blockid, char *buf, uint64_t offset, uint64_t length, file_layout_t* layout); int ll_write_block(Inode *in, uint64_t blockid, char* buf, uint64_t offset, uint64_t length, file_layout_t* layout, uint64_t snapseq, uint32_t sync); int ll_commit_blocks(Inode *in, uint64_t offset, uint64_t length); int ll_statfs(Inode *in, struct statvfs *stbuf, const UserPerm& perms); int ll_walk(const char* name, Inode **i, struct ceph_statx *stx, unsigned int want, unsigned int flags, const UserPerm& perms); uint32_t ll_stripe_unit(Inode *in); int ll_file_layout(Inode *in, file_layout_t *layout); uint64_t ll_snap_seq(Inode *in); int ll_read(Fh *fh, loff_t off, loff_t len, bufferlist *bl); int ll_write(Fh *fh, loff_t off, loff_t len, const char *data); int64_t ll_readv(struct Fh *fh, const struct iovec *iov, int iovcnt, int64_t off); int64_t ll_writev(struct Fh *fh, const struct iovec *iov, int iovcnt, int64_t off); loff_t ll_lseek(Fh *fh, loff_t offset, int whence); int ll_flush(Fh *fh); int ll_fsync(Fh *fh, bool syncdataonly); int ll_sync_inode(Inode *in, bool syncdataonly); int ll_fallocate(Fh *fh, int mode, int64_t offset, int64_t length); int ll_release(Fh *fh); int ll_getlk(Fh *fh, struct flock *fl, uint64_t owner); int ll_setlk(Fh *fh, struct flock *fl, uint64_t owner, int sleep); int ll_flock(Fh *fh, int cmd, uint64_t owner); int ll_lazyio(Fh *fh, int enable); int ll_file_layout(Fh *fh, file_layout_t *layout); void ll_interrupt(void *d); bool ll_handle_umask() { return acl_type != NO_ACL; } int ll_get_stripe_osd(struct Inode *in, uint64_t blockno, file_layout_t* layout); uint64_t ll_get_internal_offset(struct Inode *in, uint64_t blockno); int ll_num_osds(void); int ll_osdaddr(int osd, uint32_t *addr); int ll_osdaddr(int osd, char* buf, size_t size); void _ll_register_callbacks(struct ceph_client_callback_args *args); void ll_register_callbacks(struct ceph_client_callback_args *args); // deprecated int ll_register_callbacks2(struct ceph_client_callback_args *args); std::pair<int, bool> test_dentry_handling(bool can_invalidate); const char** get_tracked_conf_keys() const override; void handle_conf_change(const ConfigProxy& conf, const std::set <std::string> &changed) override; uint32_t get_deleg_timeout() { return deleg_timeout; } int set_deleg_timeout(uint32_t timeout); int ll_delegation(Fh *fh, unsigned cmd, ceph_deleg_cb_t cb, void *priv); entity_name_t get_myname() { return messenger->get_myname(); } void wait_on_list(std::list<ceph::condition_variable*>& ls); void signal_cond_list(std::list<ceph::condition_variable*>& ls); void set_filer_flags(int flags); void clear_filer_flags(int flags); void tear_down_cache(); void update_metadata(std::string const &k, std::string const &v); client_t get_nodeid() { return whoami; } inodeno_t get_root_ino(); Inode *get_root(); virtual int init(); virtual void shutdown(); // messaging void cancel_commands(const MDSMap& newmap); void handle_mds_map(const MConstRef<MMDSMap>& m); void handle_fs_map(const MConstRef<MFSMap>& m); void handle_fs_map_user(const MConstRef<MFSMapUser>& m); void handle_osd_map(const MConstRef<MOSDMap>& m); void handle_lease(const MConstRef<MClientLease>& m); // inline data int uninline_data(Inode *in, Context *onfinish); // file caps void check_cap_issue(Inode *in, unsigned issued); void add_update_cap(Inode *in, MetaSession *session, uint64_t cap_id, unsigned issued, unsigned wanted, unsigned seq, unsigned mseq, inodeno_t realm, int flags, const UserPerm& perms); void remove_cap(Cap *cap, bool queue_release); void remove_all_caps(Inode *in); void remove_session_caps(MetaSession *session, int err); int mark_caps_flushing(Inode *in, ceph_tid_t *ptid); void adjust_session_flushing_caps(Inode *in, MetaSession *old_s, MetaSession *new_s); void flush_caps_sync(); void kick_flushing_caps(Inode *in, MetaSession *session); void kick_flushing_caps(MetaSession *session); void early_kick_flushing_caps(MetaSession *session); int get_caps(Fh *fh, int need, int want, int *have, loff_t endoff); int get_caps_used(Inode *in); void maybe_update_snaprealm(SnapRealm *realm, snapid_t snap_created, snapid_t snap_highwater, std::vector<snapid_t>& snaps); void handle_quota(const MConstRef<MClientQuota>& m); void handle_snap(const MConstRef<MClientSnap>& m); void handle_caps(const MConstRef<MClientCaps>& m); void handle_cap_import(MetaSession *session, Inode *in, const MConstRef<MClientCaps>& m); void handle_cap_export(MetaSession *session, Inode *in, const MConstRef<MClientCaps>& m); void handle_cap_trunc(MetaSession *session, Inode *in, const MConstRef<MClientCaps>& m); void handle_cap_flush_ack(MetaSession *session, Inode *in, Cap *cap, const MConstRef<MClientCaps>& m); void handle_cap_flushsnap_ack(MetaSession *session, Inode *in, const MConstRef<MClientCaps>& m); void handle_cap_grant(MetaSession *session, Inode *in, Cap *cap, const MConstRef<MClientCaps>& m); void cap_delay_requeue(Inode *in); void send_cap(Inode *in, MetaSession *session, Cap *cap, int flags, int used, int want, int retain, int flush, ceph_tid_t flush_tid); void send_flush_snap(Inode *in, MetaSession *session, snapid_t follows, CapSnap& capsnap); void flush_snaps(Inode *in); void get_cap_ref(Inode *in, int cap); void put_cap_ref(Inode *in, int cap); void wait_sync_caps(Inode *in, ceph_tid_t want); void wait_sync_caps(ceph_tid_t want); void queue_cap_snap(Inode *in, SnapContext &old_snapc); void finish_cap_snap(Inode *in, CapSnap &capsnap, int used); void _schedule_invalidate_dentry_callback(Dentry *dn, bool del); void _async_dentry_invalidate(vinodeno_t dirino, vinodeno_t ino, std::string& name); void _try_to_trim_inode(Inode *in, bool sched_inval); void _schedule_invalidate_callback(Inode *in, int64_t off, int64_t len); void _invalidate_inode_cache(Inode *in); void _invalidate_inode_cache(Inode *in, int64_t off, int64_t len); void _async_invalidate(vinodeno_t ino, int64_t off, int64_t len); void _schedule_ino_release_callback(Inode *in); void _async_inode_release(vinodeno_t ino); bool _release(Inode *in); /** * Initiate a flush of the data associated with the given inode. * If you specify a Context, you are responsible for holding an inode * reference for the duration of the flush. If not, _flush() will * take the reference for you. * @param in The Inode whose data you wish to flush. * @param c The Context you wish us to complete once the data is * flushed. If already flushed, this will be called in-line. * * @returns true if the data was already flushed, false otherwise. */ bool _flush(Inode *in, Context *c); void _flush_range(Inode *in, int64_t off, uint64_t size); void _flushed(Inode *in); void flush_set_callback(ObjectCacher::ObjectSet *oset); void close_release(Inode *in); void close_safe(Inode *in); void lock_fh_pos(Fh *f); void unlock_fh_pos(Fh *f); // metadata cache void update_dir_dist(Inode *in, DirStat *st, mds_rank_t from); void clear_dir_complete_and_ordered(Inode *diri, bool complete); void insert_readdir_results(MetaRequest *request, MetaSession *session, Inode *diri, Inode *diri_other); Inode* insert_trace(MetaRequest *request, MetaSession *session); void update_inode_file_size(Inode *in, int issued, uint64_t size, uint64_t truncate_seq, uint64_t truncate_size); void update_inode_file_time(Inode *in, int issued, uint64_t time_warp_seq, utime_t ctime, utime_t mtime, utime_t atime); Inode *add_update_inode(InodeStat *st, utime_t ttl, MetaSession *session, const UserPerm& request_perms); Dentry *insert_dentry_inode(Dir *dir, const std::string& dname, LeaseStat *dlease, Inode *in, utime_t from, MetaSession *session, Dentry *old_dentry = NULL); void update_dentry_lease(Dentry *dn, LeaseStat *dlease, utime_t from, MetaSession *session); bool use_faked_inos() { return _use_faked_inos; } vinodeno_t map_faked_ino(ino_t ino); //notify the mds to flush the mdlog void flush_mdlog_sync(Inode *in); void flush_mdlog_sync(); void flush_mdlog(MetaSession *session); void renew_caps(); void renew_caps(MetaSession *session); void flush_cap_releases(); void renew_and_flush_cap_releases(); void tick(); void start_tick_thread(); void update_read_io_size(size_t size) { total_read_ops++; total_read_size += size; } void update_write_io_size(size_t size) { total_write_ops++; total_write_size += size; } void inc_dentry_nr() { ++dentry_nr; } void dec_dentry_nr() { --dentry_nr; } void dlease_hit() { ++dlease_hits; } void dlease_miss() { ++dlease_misses; } std::tuple<uint64_t, uint64_t, uint64_t> get_dlease_hit_rates() { return std::make_tuple(dlease_hits, dlease_misses, dentry_nr); } void cap_hit() { ++cap_hits; } void cap_miss() { ++cap_misses; } std::pair<uint64_t, uint64_t> get_cap_hit_rates() { return std::make_pair(cap_hits, cap_misses); } void inc_opened_files() { ++opened_files; } void dec_opened_files() { --opened_files; } std::pair<uint64_t, uint64_t> get_opened_files_rates() { return std::make_pair(opened_files, inode_map.size()); } void inc_pinned_icaps() { ++pinned_icaps; } void dec_pinned_icaps(uint64_t nr=1) { pinned_icaps -= nr; } std::pair<uint64_t, uint64_t> get_pinned_icaps_rates() { return std::make_pair(pinned_icaps, inode_map.size()); } void inc_opened_inodes() { ++opened_inodes; } void dec_opened_inodes() { --opened_inodes; } std::pair<uint64_t, uint64_t> get_opened_inodes_rates() { return std::make_pair(opened_inodes, inode_map.size()); } /* timer_lock for 'timer' */ ceph::mutex timer_lock = ceph::make_mutex("Client::timer_lock"); SafeTimer timer; /* tick thread */ std::thread upkeeper; ceph::condition_variable upkeep_cond; bool tick_thread_stopped = false; std::unique_ptr<PerfCounters> logger; std::unique_ptr<MDSMap> mdsmap; bool fuse_default_permissions; bool _collect_and_send_global_metrics; protected: std::list<ceph::condition_variable*> waiting_for_reclaim; /* Flags for check_caps() */ static const unsigned CHECK_CAPS_NODELAY = 0x1; static const unsigned CHECK_CAPS_SYNCHRONOUS = 0x2; void check_caps(Inode *in, unsigned flags); void set_cap_epoch_barrier(epoch_t e); void handle_command_reply(const MConstRef<MCommandReply>& m); int fetch_fsmap(bool user); int resolve_mds( const std::string &mds_spec, std::vector<mds_gid_t> *targets); void get_session_metadata(std::map<std::string, std::string> *meta) const; bool have_open_session(mds_rank_t mds); void got_mds_push(MetaSession *s); MetaSessionRef _get_mds_session(mds_rank_t mds, Connection *con); ///< return session for mds *and* con; null otherwise MetaSessionRef _get_or_open_mds_session(mds_rank_t mds); MetaSessionRef _open_mds_session(mds_rank_t mds); void _close_mds_session(MetaSession *s); void _closed_mds_session(MetaSession *s, int err=0, bool rejected=false); bool _any_stale_sessions() const; void _kick_stale_sessions(); void handle_client_session(const MConstRef<MClientSession>& m); void send_reconnect(MetaSession *s); void resend_unsafe_requests(MetaSession *s); void wait_unsafe_requests(); void dump_mds_requests(Formatter *f); void dump_mds_sessions(Formatter *f, bool cap_dump=false); int make_request(MetaRequest *req, const UserPerm& perms, InodeRef *ptarget = 0, bool *pcreated = 0, mds_rank_t use_mds=-1, bufferlist *pdirbl=0, size_t feature_needed=ULONG_MAX); void put_request(MetaRequest *request); void unregister_request(MetaRequest *request); int verify_reply_trace(int r, MetaSession *session, MetaRequest *request, const MConstRef<MClientReply>& reply, InodeRef *ptarget, bool *pcreated, const UserPerm& perms); void encode_cap_releases(MetaRequest *request, mds_rank_t mds); int encode_inode_release(Inode *in, MetaRequest *req, mds_rank_t mds, int drop, int unless,int force=0); void encode_dentry_release(Dentry *dn, MetaRequest *req, mds_rank_t mds, int drop, int unless); mds_rank_t choose_target_mds(MetaRequest *req, Inode** phash_diri=NULL); void connect_mds_targets(mds_rank_t mds); void send_request(MetaRequest *request, MetaSession *session, bool drop_cap_releases=false); MRef<MClientRequest> build_client_request(MetaRequest *request, mds_rank_t mds); void kick_requests(MetaSession *session); void kick_requests_closed(MetaSession *session); void handle_client_request_forward(const MConstRef<MClientRequestForward>& reply); void handle_client_reply(const MConstRef<MClientReply>& reply); bool is_dir_operation(MetaRequest *request); int path_walk(const filepath& fp, struct walk_dentry_result* result, const UserPerm& perms, bool followsym=true, int mask=0, InodeRef dirinode=nullptr); int path_walk(const filepath& fp, InodeRef *end, const UserPerm& perms, bool followsym=true, int mask=0, InodeRef dirinode=nullptr); // fake inode number for 32-bits ino_t void _assign_faked_ino(Inode *in); void _assign_faked_root(Inode *in); void _release_faked_ino(Inode *in); void _reset_faked_inos(); vinodeno_t _map_faked_ino(ino_t ino); // Optional extra metadata about me to send to the MDS void populate_metadata(const std::string &mount_root); SnapRealm *get_snap_realm(inodeno_t r); SnapRealm *get_snap_realm_maybe(inodeno_t r); void put_snap_realm(SnapRealm *realm); bool adjust_realm_parent(SnapRealm *realm, inodeno_t parent); void update_snap_trace(MetaSession *session, const bufferlist& bl, SnapRealm **realm_ret, bool must_flush=true); void invalidate_snaprealm_and_children(SnapRealm *realm); void refresh_snapdir_attrs(Inode *in, Inode *diri); Inode *open_snapdir(Inode *diri); int get_fd() { int fd = free_fd_set.range_start(); free_fd_set.erase(fd, 1); return fd; } void put_fd(int fd) { free_fd_set.insert(fd, 1); } /* * Resolve file descriptor, or return NULL. */ Fh *get_filehandle(int fd) { auto it = fd_map.find(fd); if (it == fd_map.end()) return NULL; return it->second; } int get_fd_inode(int fd, InodeRef *in); // helpers void wake_up_session_caps(MetaSession *s, bool reconnect); void wait_on_context_list(std::list<Context*>& ls); void signal_context_list(std::list<Context*>& ls); // -- metadata cache stuff // decrease inode ref. delete if dangling. void _put_inode(Inode *in, int n); void delay_put_inodes(bool wakeup=false); void put_inode(Inode *in, int n=1); void close_dir(Dir *dir); int subscribe_mdsmap(const std::string &fs_name=""); void _abort_mds_sessions(int err); // same as unmount() but for when the client_lock is already held void _unmount(bool abort); //int get_cache_size() { return lru.lru_get_size(); } /** * Don't call this with in==NULL, use get_or_create for that * leave dn set to default NULL unless you're trying to add * a new inode to a pre-created Dentry */ Dentry* link(Dir *dir, const std::string& name, Inode *in, Dentry *dn); void unlink(Dentry *dn, bool keepdir, bool keepdentry); int fill_stat(Inode *in, struct stat *st, frag_info_t *dirstat=0, nest_info_t *rstat=0); int fill_stat(InodeRef& in, struct stat *st, frag_info_t *dirstat=0, nest_info_t *rstat=0) { return fill_stat(in.get(), st, dirstat, rstat); } void fill_statx(Inode *in, unsigned int mask, struct ceph_statx *stx); void fill_statx(InodeRef& in, unsigned int mask, struct ceph_statx *stx) { return fill_statx(in.get(), mask, stx); } void touch_dn(Dentry *dn); // trim cache. void trim_cache(bool trim_kernel_dcache=false); void trim_cache_for_reconnect(MetaSession *s); void trim_dentry(Dentry *dn); void trim_caps(MetaSession *s, uint64_t max); void _invalidate_kernel_dcache(); void _trim_negative_child_dentries(InodeRef& in); void dump_inode(Formatter *f, Inode *in, set<Inode*>& did, bool disconnected); void dump_cache(Formatter *f); // debug // force read-only void force_session_readonly(MetaSession *s); void dump_status(Formatter *f); // debug bool ms_dispatch2(const MessageRef& m) override; void ms_handle_connect(Connection *con) override; bool ms_handle_reset(Connection *con) override; void ms_handle_remote_reset(Connection *con) override; bool ms_handle_refused(Connection *con) override; int authenticate(); Inode* get_quota_root(Inode *in, const UserPerm& perms, quota_max_t type=QUOTA_ANY); bool check_quota_condition(Inode *in, const UserPerm& perms, std::function<bool (const Inode &)> test); bool is_quota_files_exceeded(Inode *in, const UserPerm& perms); bool is_quota_bytes_exceeded(Inode *in, int64_t new_bytes, const UserPerm& perms); bool is_quota_bytes_approaching(Inode *in, const UserPerm& perms); int check_pool_perm(Inode *in, int need); void handle_client_reclaim_reply(const MConstRef<MClientReclaimReply>& reply); /** * Call this when an OSDMap is seen with a full flag (global or per pool) * set. * * @param pool the pool ID affected, or -1 if all. */ void _handle_full_flag(int64_t pool); void _close_sessions(); void _pre_init(); /** * The basic housekeeping parts of init (perf counters, admin socket) * that is independent of how objecters/monclient/messengers are * being set up. */ void _finish_init(); // global client lock // - protects Client and buffer cache both! ceph::mutex client_lock = ceph::make_mutex("Client::client_lock"); std::map<snapid_t, int> ll_snap_ref; InodeRef root = nullptr; map<Inode*, InodeRef> root_parents; Inode* root_ancestor = nullptr; LRU lru; // lru list of Dentry's in our local metadata cache. InodeRef cwd; std::unique_ptr<Filer> filer; std::unique_ptr<ObjectCacher> objectcacher; std::unique_ptr<WritebackHandler> writeback_handler; Messenger *messenger; MonClient *monclient; Objecter *objecter; client_t whoami; /* The state migration mechanism */ enum _state { /* For the initialize_state */ CLIENT_NEW, // The initial state for the initialize_state or after Client::shutdown() CLIENT_INITIALIZING, // At the beginning of the Client::init() CLIENT_INITIALIZED, // At the end of CLient::init() /* For the mount_state */ CLIENT_UNMOUNTED, // The initial state for the mount_state or after unmounted CLIENT_MOUNTING, // At the beginning of Client::mount() CLIENT_MOUNTED, // At the end of Client::mount() CLIENT_UNMOUNTING, // At the beginning of the Client::_unmout() }; typedef enum _state state_t; using RWRef_t = RWRef<state_t>; struct mount_state_t : public RWRefState<state_t> { public: bool is_valid_state(state_t state) const override { switch (state) { case Client::CLIENT_MOUNTING: case Client::CLIENT_MOUNTED: case Client::CLIENT_UNMOUNTING: case Client::CLIENT_UNMOUNTED: return true; default: return false; } } int check_reader_state(state_t require) const override { if (require == Client::CLIENT_MOUNTING && (state == Client::CLIENT_MOUNTING || state == Client::CLIENT_MOUNTED)) return true; else return false; } /* The state migration check */ int check_writer_state(state_t require) const override { if (require == Client::CLIENT_MOUNTING && state == Client::CLIENT_UNMOUNTED) return true; else if (require == Client::CLIENT_MOUNTED && state == Client::CLIENT_MOUNTING) return true; else if (require == Client::CLIENT_UNMOUNTING && state == Client::CLIENT_MOUNTED) return true; else if (require == Client::CLIENT_UNMOUNTED && state == Client::CLIENT_UNMOUNTING) return true; else return false; } mount_state_t(state_t state, const char *lockname, uint64_t reader_cnt=0) : RWRefState (state, lockname, reader_cnt) {} ~mount_state_t() {} }; struct initialize_state_t : public RWRefState<state_t> { public: bool is_valid_state(state_t state) const override { switch (state) { case Client::CLIENT_NEW: case Client::CLIENT_INITIALIZING: case Client::CLIENT_INITIALIZED: return true; default: return false; } } int check_reader_state(state_t require) const override { if (require == Client::CLIENT_INITIALIZED && state >= Client::CLIENT_INITIALIZED) return true; else return false; } /* The state migration check */ int check_writer_state(state_t require) const override { if (require == Client::CLIENT_INITIALIZING && (state == Client::CLIENT_NEW)) return true; else if (require == Client::CLIENT_INITIALIZED && (state == Client::CLIENT_INITIALIZING)) return true; else if (require == Client::CLIENT_NEW && (state == Client::CLIENT_INITIALIZED)) return true; else return false; } initialize_state_t(state_t state, const char *lockname, uint64_t reader_cnt=0) : RWRefState (state, lockname, reader_cnt) {} ~initialize_state_t() {} }; struct mount_state_t mount_state; struct initialize_state_t initialize_state; private: struct C_Readahead : public Context { C_Readahead(Client *c, Fh *f); ~C_Readahead() override; void finish(int r) override; Client *client; Fh *f; }; /* * These define virtual xattrs exposing the recursive directory * statistics and layout metadata. */ struct VXattr { const std::string name; size_t (Client::*getxattr_cb)(Inode *in, char *val, size_t size); int (Client::*setxattr_cb)(Inode *in, const void *val, size_t size, const UserPerm& perms); bool readonly; bool (Client::*exists_cb)(Inode *in); unsigned int flags; }; enum { NO_ACL = 0, POSIX_ACL, }; enum { MAY_EXEC = 1, MAY_WRITE = 2, MAY_READ = 4, }; typedef std::function<void(dir_result_t*, MetaRequest*, InodeRef&, frag_t)> fill_readdir_args_cb_t; std::unique_ptr<CephContext, std::function<void(CephContext*)>> cct_deleter; /* Flags for VXattr */ static const unsigned VXATTR_RSTAT = 0x1; static const unsigned VXATTR_DIRSTAT = 0x2; static const VXattr _dir_vxattrs[]; static const VXattr _file_vxattrs[]; static const VXattr _common_vxattrs[]; bool is_reserved_vino(vinodeno_t &vino); void fill_dirent(struct dirent *de, const char *name, int type, uint64_t ino, loff_t next_off); int _opendir(Inode *in, dir_result_t **dirpp, const UserPerm& perms); void _readdir_drop_dirp_buffer(dir_result_t *dirp); bool _readdir_have_frag(dir_result_t *dirp); void _readdir_next_frag(dir_result_t *dirp); void _readdir_rechoose_frag(dir_result_t *dirp); int _readdir_get_frag(int op, dir_result_t *dirp, fill_readdir_args_cb_t fill_req_cb); int _readdir_cache_cb(dir_result_t *dirp, add_dirent_cb_t cb, void *p, int caps, bool getref); int _readdir_r_cb(int op, dir_result_t* d, add_dirent_cb_t cb, fill_readdir_args_cb_t fill_cb, void* p, unsigned want, unsigned flags, bool getref, bool bypass_cache); void _closedir(dir_result_t *dirp); // other helpers void _fragmap_remove_non_leaves(Inode *in); void _fragmap_remove_stopped_mds(Inode *in, mds_rank_t mds); void _ll_get(Inode *in); int _ll_put(Inode *in, uint64_t num); void _ll_drop_pins(); Fh *_create_fh(Inode *in, int flags, int cmode, const UserPerm& perms); int _release_fh(Fh *fh); void _put_fh(Fh *fh); std::pair<int, bool> _do_remount(bool retry_on_error); int _read_sync(Fh *f, uint64_t off, uint64_t len, bufferlist *bl, bool *checkeof); int _read_async(Fh *f, uint64_t off, uint64_t len, bufferlist *bl); bool _dentry_valid(const Dentry *dn); // internal interface // call these with client_lock held! int _do_lookup(Inode *dir, const std::string& name, int mask, InodeRef *target, const UserPerm& perms); int _lookup(Inode *dir, const std::string& dname, int mask, InodeRef *target, const UserPerm& perm, std::string* alternate_name=nullptr, bool is_rename=false); int _link(Inode *in, Inode *dir, const char *name, const UserPerm& perm, std::string alternate_name, InodeRef *inp = 0); int _unlink(Inode *dir, const char *name, const UserPerm& perm); int _rename(Inode *olddir, const char *oname, Inode *ndir, const char *nname, const UserPerm& perm, std::string alternate_name); int _mkdir(Inode *dir, const char *name, mode_t mode, const UserPerm& perm, InodeRef *inp = 0, const std::map<std::string, std::string> &metadata={}, std::string alternate_name=""); int _rmdir(Inode *dir, const char *name, const UserPerm& perms); int _symlink(Inode *dir, const char *name, const char *target, const UserPerm& perms, std::string alternate_name, InodeRef *inp = 0); int _mknod(Inode *dir, const char *name, mode_t mode, dev_t rdev, const UserPerm& perms, InodeRef *inp = 0); int _do_setattr(Inode *in, struct ceph_statx *stx, int mask, const UserPerm& perms, InodeRef *inp, std::vector<uint8_t>* aux=nullptr); void stat_to_statx(struct stat *st, struct ceph_statx *stx); int __setattrx(Inode *in, struct ceph_statx *stx, int mask, const UserPerm& perms, InodeRef *inp = 0); int _setattrx(InodeRef &in, struct ceph_statx *stx, int mask, const UserPerm& perms); int _setattr(InodeRef &in, struct stat *attr, int mask, const UserPerm& perms); int _ll_setattrx(Inode *in, struct ceph_statx *stx, int mask, const UserPerm& perms, InodeRef *inp = 0); int _getattr(Inode *in, int mask, const UserPerm& perms, bool force=false); int _getattr(InodeRef &in, int mask, const UserPerm& perms, bool force=false) { return _getattr(in.get(), mask, perms, force); } int _readlink(Inode *in, char *buf, size_t size); int _getxattr(Inode *in, const char *name, void *value, size_t len, const UserPerm& perms); int _getxattr(InodeRef &in, const char *name, void *value, size_t len, const UserPerm& perms); int _getvxattr(Inode *in, const UserPerm& perms, const char *attr_name, ssize_t size, void *value, mds_rank_t rank); int _listxattr(Inode *in, char *names, size_t len, const UserPerm& perms); int _do_setxattr(Inode *in, const char *name, const void *value, size_t len, int flags, const UserPerm& perms); int _setxattr(Inode *in, const char *name, const void *value, size_t len, int flags, const UserPerm& perms); int _setxattr(InodeRef &in, const char *name, const void *value, size_t len, int flags, const UserPerm& perms); int _setxattr_check_data_pool(std::string& name, std::string& value, const OSDMap *osdmap); void _setxattr_maybe_wait_for_osdmap(const char *name, const void *value, size_t len); int _removexattr(Inode *in, const char *nm, const UserPerm& perms); int _removexattr(InodeRef &in, const char *nm, const UserPerm& perms); int _open(Inode *in, int flags, mode_t mode, Fh **fhp, const UserPerm& perms); int _renew_caps(Inode *in); int _create(Inode *in, const char *name, int flags, mode_t mode, InodeRef *inp, Fh **fhp, int stripe_unit, int stripe_count, int object_size, const char *data_pool, bool *created, const UserPerm &perms, std::string alternate_name); loff_t _lseek(Fh *fh, loff_t offset, int whence); int64_t _read(Fh *fh, int64_t offset, uint64_t size, bufferlist *bl); int64_t _write(Fh *fh, int64_t offset, uint64_t size, const char *buf, const struct iovec *iov, int iovcnt); int64_t _preadv_pwritev_locked(Fh *fh, const struct iovec *iov, unsigned iovcnt, int64_t offset, bool write, bool clamp_to_int); int _preadv_pwritev(int fd, const struct iovec *iov, unsigned iovcnt, int64_t offset, bool write); int _flush(Fh *fh); int _fsync(Fh *fh, bool syncdataonly); int _fsync(Inode *in, bool syncdataonly); int _sync_fs(); int clear_suid_sgid(Inode *in, const UserPerm& perms, bool defer=false); int _fallocate(Fh *fh, int mode, int64_t offset, int64_t length); int _getlk(Fh *fh, struct flock *fl, uint64_t owner); int _setlk(Fh *fh, struct flock *fl, uint64_t owner, int sleep); int _flock(Fh *fh, int cmd, uint64_t owner); int _lazyio(Fh *fh, int enable); Dentry *get_or_create(Inode *dir, const char* name); int xattr_permission(Inode *in, const char *name, unsigned want, const UserPerm& perms); int may_setattr(Inode *in, struct ceph_statx *stx, int mask, const UserPerm& perms); int may_open(Inode *in, int flags, const UserPerm& perms); int may_lookup(Inode *dir, const UserPerm& perms); int may_create(Inode *dir, const UserPerm& perms); int may_delete(Inode *dir, const char *name, const UserPerm& perms); int may_hardlink(Inode *in, const UserPerm& perms); int _getattr_for_perm(Inode *in, const UserPerm& perms); vinodeno_t _get_vino(Inode *in); bool _vxattrcb_fscrypt_auth_exists(Inode *in); size_t _vxattrcb_fscrypt_auth(Inode *in, char *val, size_t size); int _vxattrcb_fscrypt_auth_set(Inode *in, const void *val, size_t size, const UserPerm& perms); bool _vxattrcb_fscrypt_file_exists(Inode *in); size_t _vxattrcb_fscrypt_file(Inode *in, char *val, size_t size); int _vxattrcb_fscrypt_file_set(Inode *in, const void *val, size_t size, const UserPerm& perms); bool _vxattrcb_quota_exists(Inode *in); size_t _vxattrcb_quota(Inode *in, char *val, size_t size); size_t _vxattrcb_quota_max_bytes(Inode *in, char *val, size_t size); size_t _vxattrcb_quota_max_files(Inode *in, char *val, size_t size); bool _vxattrcb_layout_exists(Inode *in); size_t _vxattrcb_layout(Inode *in, char *val, size_t size); size_t _vxattrcb_layout_stripe_unit(Inode *in, char *val, size_t size); size_t _vxattrcb_layout_stripe_count(Inode *in, char *val, size_t size); size_t _vxattrcb_layout_object_size(Inode *in, char *val, size_t size); size_t _vxattrcb_layout_pool(Inode *in, char *val, size_t size); size_t _vxattrcb_layout_pool_namespace(Inode *in, char *val, size_t size); size_t _vxattrcb_dir_entries(Inode *in, char *val, size_t size); size_t _vxattrcb_dir_files(Inode *in, char *val, size_t size); size_t _vxattrcb_dir_subdirs(Inode *in, char *val, size_t size); size_t _vxattrcb_dir_rentries(Inode *in, char *val, size_t size); size_t _vxattrcb_dir_rfiles(Inode *in, char *val, size_t size); size_t _vxattrcb_dir_rsubdirs(Inode *in, char *val, size_t size); size_t _vxattrcb_dir_rsnaps(Inode *in, char *val, size_t size); size_t _vxattrcb_dir_rbytes(Inode *in, char *val, size_t size); size_t _vxattrcb_dir_rctime(Inode *in, char *val, size_t size); bool _vxattrcb_dir_pin_exists(Inode *in); size_t _vxattrcb_dir_pin(Inode *in, char *val, size_t size); bool _vxattrcb_snap_btime_exists(Inode *in); size_t _vxattrcb_snap_btime(Inode *in, char *val, size_t size); size_t _vxattrcb_caps(Inode *in, char *val, size_t size); bool _vxattrcb_mirror_info_exists(Inode *in); size_t _vxattrcb_mirror_info(Inode *in, char *val, size_t size); size_t _vxattrcb_cluster_fsid(Inode *in, char *val, size_t size); size_t _vxattrcb_client_id(Inode *in, char *val, size_t size); static const VXattr *_get_vxattrs(Inode *in); static const VXattr *_match_vxattr(Inode *in, const char *name); int _do_filelock(Inode *in, Fh *fh, int lock_type, int op, int sleep, struct flock *fl, uint64_t owner, bool removing=false); int _interrupt_filelock(MetaRequest *req); void _encode_filelocks(Inode *in, bufferlist& bl); void _release_filelocks(Fh *fh); void _update_lock_state(struct flock *fl, uint64_t owner, ceph_lock_state_t *lock_state); int _posix_acl_create(Inode *dir, mode_t *mode, bufferlist& xattrs_bl, const UserPerm& perms); int _posix_acl_chmod(Inode *in, mode_t mode, const UserPerm& perms); int _posix_acl_permission(Inode *in, const UserPerm& perms, unsigned want); mds_rank_t _get_random_up_mds() const; int _ll_getattr(Inode *in, int caps, const UserPerm& perms); int _lookup_parent(Inode *in, const UserPerm& perms, Inode **parent=NULL); int _lookup_name(Inode *in, Inode *parent, const UserPerm& perms); int _lookup_vino(vinodeno_t ino, const UserPerm& perms, Inode **inode=NULL); bool _ll_forget(Inode *in, uint64_t count); void collect_and_send_metrics(); void collect_and_send_global_metrics(); void update_io_stat_metadata(utime_t latency); void update_io_stat_read(utime_t latency); void update_io_stat_write(utime_t latency); uint32_t deleg_timeout = 0; client_switch_interrupt_callback_t switch_interrupt_cb = nullptr; client_remount_callback_t remount_cb = nullptr; client_ino_callback_t ino_invalidate_cb = nullptr; client_dentry_callback_t dentry_invalidate_cb = nullptr; client_umask_callback_t umask_cb = nullptr; client_ino_release_t ino_release_cb = nullptr; void *callback_handle = nullptr; bool can_invalidate_dentries = false; Finisher async_ino_invalidator; Finisher async_dentry_invalidator; Finisher interrupt_finisher; Finisher remount_finisher; Finisher async_ino_releasor; Finisher objecter_finisher; ceph::coarse_mono_time last_cap_renew; CommandHook m_command_hook; int user_id, group_id; int acl_type = NO_ACL; epoch_t cap_epoch_barrier = 0; // mds sessions map<mds_rank_t, MetaSessionRef> mds_sessions; // mds -> push seq std::set<mds_rank_t> mds_ranks_closing; // mds ranks currently tearing down sessions std::list<ceph::condition_variable*> waiting_for_mdsmap; // FSMap, for when using mds_command std::list<ceph::condition_variable*> waiting_for_fsmap; std::unique_ptr<FSMap> fsmap; std::unique_ptr<FSMapUser> fsmap_user; // This mutex only protects command_table ceph::mutex command_lock = ceph::make_mutex("Client::command_lock"); // MDS command state CommandTable<MDSCommandOp> command_table; bool _use_faked_inos; // Cluster fsid fs_cluster_id_t fscid; // file handles, etc. interval_set<int> free_fd_set; // unused fds ceph::unordered_map<int, Fh*> fd_map; set<Fh*> ll_unclosed_fh_set; ceph::unordered_set<dir_result_t*> opened_dirs; uint64_t fd_gen = 1; bool mount_aborted = false; bool blocklisted = false; ceph::unordered_map<vinodeno_t, Inode*> inode_map; ceph::unordered_map<ino_t, vinodeno_t> faked_ino_map; interval_set<ino_t> free_faked_inos; ino_t last_used_faked_ino; ino_t last_used_faked_root; int local_osd = -CEPHFS_ENXIO; epoch_t local_osd_epoch = 0; // mds requests ceph_tid_t last_tid = 0; ceph_tid_t oldest_tid = 0; // oldest incomplete mds request, excluding setfilelock requests map<ceph_tid_t, MetaRequest*> mds_requests; // cap flushing ceph_tid_t last_flush_tid = 1; xlist<Inode*> delayed_list; int num_flushing_caps = 0; ceph::unordered_map<inodeno_t,SnapRealm*> snap_realms; std::map<std::string, std::string> metadata; ceph::coarse_mono_time last_auto_reconnect; std::chrono::seconds caps_release_delay, mount_timeout; // trace generation std::ofstream traceout; ceph::condition_variable mount_cond, sync_cond; std::map<std::pair<int64_t,std::string>, int> pool_perms; std::list<ceph::condition_variable*> waiting_for_pool_perm; std::list<ceph::condition_variable*> waiting_for_rename; uint64_t retries_on_invalidate = 0; // state reclaim int reclaim_errno = 0; epoch_t reclaim_osd_epoch = 0; entity_addrvec_t reclaim_target_addrs; // dentry lease metrics uint64_t dentry_nr = 0; uint64_t dlease_hits = 0; uint64_t dlease_misses = 0; uint64_t cap_hits = 0; uint64_t cap_misses = 0; uint64_t opened_files = 0; uint64_t pinned_icaps = 0; uint64_t opened_inodes = 0; uint64_t total_read_ops = 0; uint64_t total_read_size = 0; uint64_t total_write_ops = 0; uint64_t total_write_size = 0; ceph::spinlock delay_i_lock; std::map<Inode*,int> delay_i_release; uint64_t nr_metadata_request = 0; uint64_t nr_read_request = 0; uint64_t nr_write_request = 0; }; /** * Specialization of Client that manages its own Objecter instance * and handles init/shutdown of messenger/monclient */ class StandaloneClient : public Client { public: StandaloneClient(Messenger *m, MonClient *mc, boost::asio::io_context& ictx); ~StandaloneClient() override; int init() override; void shutdown() override; }; #endif
60,652
35.781686
130
h
null
ceph-main/src/client/ClientSnapRealm.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "ClientSnapRealm.h" #include "common/Formatter.h" using std::set; using std::vector; void SnapRealm::build_snap_context() { set<snapid_t> snaps; snapid_t max_seq = seq; // start with prior_parents? for (unsigned i=0; i<prior_parent_snaps.size(); i++) snaps.insert(prior_parent_snaps[i]); // current parent's snaps if (pparent) { const SnapContext& psnapc = pparent->get_snap_context(); for (unsigned i=0; i<psnapc.snaps.size(); i++) if (psnapc.snaps[i] >= parent_since) snaps.insert(psnapc.snaps[i]); if (psnapc.seq > max_seq) max_seq = psnapc.seq; } // my snaps for (unsigned i=0; i<my_snaps.size(); i++) snaps.insert(my_snaps[i]); // ok! cached_snap_context.seq = max_seq; cached_snap_context.snaps.resize(0); cached_snap_context.snaps.reserve(snaps.size()); for (set<snapid_t>::reverse_iterator p = snaps.rbegin(); p != snaps.rend(); ++p) cached_snap_context.snaps.push_back(*p); } void SnapRealm::dump(Formatter *f) const { f->dump_stream("ino") << ino; f->dump_int("nref", nref); f->dump_stream("created") << created; f->dump_stream("seq") << seq; f->dump_stream("parent_ino") << parent; f->dump_stream("parent_since") << parent_since; f->open_array_section("prior_parent_snaps"); for (vector<snapid_t>::const_iterator p = prior_parent_snaps.begin(); p != prior_parent_snaps.end(); ++p) f->dump_stream("snapid") << *p; f->close_section(); f->open_array_section("my_snaps"); for (vector<snapid_t>::const_iterator p = my_snaps.begin(); p != my_snaps.end(); ++p) f->dump_stream("snapid") << *p; f->close_section(); f->open_array_section("children"); for (set<SnapRealm*>::const_iterator p = pchildren.begin(); p != pchildren.end(); ++p) f->dump_stream("child") << (*p)->ino; f->close_section(); }
1,923
29.0625
107
cc
null
ceph-main/src/client/ClientSnapRealm.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_CLIENT_SNAPREALM_H #define CEPH_CLIENT_SNAPREALM_H #include "include/types.h" #include "common/snap_types.h" #include "include/xlist.h" struct Inode; struct SnapRealm { inodeno_t ino; int nref; snapid_t created; snapid_t seq; inodeno_t parent; snapid_t parent_since; std::vector<snapid_t> prior_parent_snaps; // snaps prior to parent_since std::vector<snapid_t> my_snaps; SnapRealm *pparent; std::set<SnapRealm*> pchildren; utime_t last_modified; uint64_t change_attr; private: SnapContext cached_snap_context; // my_snaps + parent snaps + past_parent_snaps friend std::ostream& operator<<(std::ostream& out, const SnapRealm& r); public: xlist<Inode*> inodes_with_caps; explicit SnapRealm(inodeno_t i) : ino(i), nref(0), created(0), seq(0), pparent(NULL), last_modified(utime_t()), change_attr(0) { } void build_snap_context(); void invalidate_cache() { cached_snap_context.clear(); } const SnapContext& get_snap_context() { if (cached_snap_context.seq == 0) build_snap_context(); return cached_snap_context; } void dump(Formatter *f) const; }; inline std::ostream& operator<<(std::ostream& out, const SnapRealm& r) { return out << "snaprealm(" << r.ino << " nref=" << r.nref << " c=" << r.created << " seq=" << r.seq << " parent=" << r.parent << " my_snaps=" << r.my_snaps << " cached_snapc=" << r.cached_snap_context << " last_modified=" << r.last_modified << " change_attr=" << r.change_attr << ")"; } #endif
1,649
24.384615
101
h
null
ceph-main/src/client/Delegation.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "common/Clock.h" #include "common/Timer.h" #include "Client.h" #include "Inode.h" #include "Fh.h" #include "Delegation.h" class C_Deleg_Timeout : public Context { Delegation *deleg; public: explicit C_Deleg_Timeout(Delegation *d) : deleg(d) {} void finish(int r) override { Inode *in = deleg->get_fh()->inode.get(); Client *client = in->client; lsubdout(client->cct, client, 0) << __func__ << ": delegation return timeout for inode 0x" << std::hex << in->ino << ". Forcibly unmounting client. "<< client << std::dec << dendl; client->_unmount(false); } }; /** * ceph_deleg_caps_for_type - what caps are necessary for a delegation? * @type: delegation request type * * Determine what caps are necessary in order to grant a delegation of a given * type. For read delegations, we need whatever we require in order to do * cached reads, plus AsLs to cover metadata changes that should trigger a * recall. We also grab Xs since changing xattrs usually alters the mtime and * so would trigger a recall. * * For write delegations, we need whatever read delegations need plus the * caps to allow writing to the file (Fbwx). */ int ceph_deleg_caps_for_type(unsigned type) { int caps = CEPH_CAP_PIN; switch (type) { case CEPH_DELEGATION_WR: caps |= CEPH_CAP_FILE_EXCL | CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER; /* Fallthrough */ case CEPH_DELEGATION_RD: caps |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE | CEPH_CAP_XATTR_SHARED | CEPH_CAP_LINK_SHARED | CEPH_CAP_AUTH_SHARED; break; default: // Should never happen ceph_abort(); } return caps; } /* * A delegation is a container for holding caps on behalf of a client that * wants to be able to rely on them until recalled. */ Delegation::Delegation(Fh *_fh, unsigned _type, ceph_deleg_cb_t _cb, void *_priv) : fh(_fh), priv(_priv), type(_type), recall_cb(_cb), recall_time(utime_t()), timeout_event(nullptr) { Inode *inode = _fh->inode.get(); inode->client->get_cap_ref(inode, ceph_deleg_caps_for_type(_type)); }; Delegation::~Delegation() { disarm_timeout(); Inode *inode = fh->inode.get(); inode->client->put_cap_ref(inode, ceph_deleg_caps_for_type(type)); } void Delegation::reinit(unsigned _type, ceph_deleg_cb_t _recall_cb, void *_priv) { /* update cap refs -- note that we do a get first to avoid any going to 0 */ if (type != _type) { Inode *inode = fh->inode.get(); inode->client->get_cap_ref(inode, ceph_deleg_caps_for_type(_type)); inode->client->put_cap_ref(inode, ceph_deleg_caps_for_type(type)); type = _type; } recall_cb = _recall_cb; priv = _priv; } void Delegation::arm_timeout() { Client *client = fh->inode.get()->client; std::scoped_lock l(client->timer_lock); if (timeout_event) return; timeout_event = new C_Deleg_Timeout(this); client->timer.add_event_after(client->get_deleg_timeout(), timeout_event); } void Delegation::disarm_timeout() { Client *client = fh->inode.get()->client; std::scoped_lock l(client->timer_lock); if (!timeout_event) return; client->timer.cancel_event(timeout_event); timeout_event = nullptr; } void Delegation::recall(bool skip_read) { /* If skip_read is true, don't break read delegations */ if (skip_read && type == CEPH_DELEGATION_RD) return; if (!is_recalled()) { recall_cb(fh, priv); recall_time = ceph_clock_now(); arm_timeout(); } }
3,545
25.863636
81
cc
null
ceph-main/src/client/Delegation.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef _CEPH_CLIENT_DELEGATION_H #define _CEPH_CLIENT_DELEGATION_H #include "common/Clock.h" #include "common/Timer.h" #include "include/cephfs/ceph_ll_client.h" /* Commands for manipulating delegation state */ #ifndef CEPH_DELEGATION_NONE # define CEPH_DELEGATION_NONE 0 # define CEPH_DELEGATION_RD 1 # define CEPH_DELEGATION_WR 2 #endif /* Converts CEPH_DELEGATION_* to cap mask */ int ceph_deleg_caps_for_type(unsigned type); /* * A delegation is a container for holding caps on behalf of a client that * wants to be able to rely on them until recalled. */ class Delegation { public: Delegation(Fh *_fh, unsigned _type, ceph_deleg_cb_t _cb, void *_priv); ~Delegation(); Fh *get_fh() { return fh; } unsigned get_type() { return type; } bool is_recalled() { return !recall_time.is_zero(); } void reinit(unsigned _type, ceph_deleg_cb_t _recall_cb, void *_priv); void recall(bool skip_read); private: // Filehandle against which it was acquired Fh *fh; // opaque token that will be passed to the callback void *priv; // CEPH_DELEGATION_* type unsigned type; // callback into application to recall delegation ceph_deleg_cb_t recall_cb; // time of first recall utime_t recall_time; // timer for unreturned delegations Context *timeout_event; void arm_timeout(); void disarm_timeout(); }; #endif /* _CEPH_CLIENT_DELEGATION_H */
1,493
24.758621
74
h
null
ceph-main/src/client/Dentry.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/types.h" #include "include/utime.h" #include "Dentry.h" #include "Dir.h" #include "Inode.h" #include "common/Formatter.h" void Dentry::dump(Formatter *f) const { f->dump_string("name", name); f->dump_stream("dir") << dir->parent_inode->ino; if (inode) f->dump_stream("ino") << inode->ino; f->dump_int("ref", ref); f->dump_int("offset", offset); if (lease_mds >= 0) { f->dump_int("lease_mds", lease_mds); f->dump_stream("lease_ttl") << lease_ttl; f->dump_unsigned("lease_gen", lease_gen); f->dump_unsigned("lease_seq", lease_seq); } f->dump_int("cap_shared_gen", cap_shared_gen); } std::ostream &operator<<(std::ostream &oss, const Dentry &dn) { return oss << dn.dir->parent_inode->vino() << "[\"" << dn.name << "\"]"; }
879
24.882353
74
cc
null
ceph-main/src/client/Dentry.h
#ifndef CEPH_CLIENT_DENTRY_H #define CEPH_CLIENT_DENTRY_H #include "include/lru.h" #include "include/xlist.h" #include "mds/mdstypes.h" #include "Inode.h" #include "InodeRef.h" #include "Dir.h" class Dentry : public LRUObject { public: explicit Dentry(Dir *_dir, const std::string &_name) : dir(_dir), name(_name), inode_xlist_link(this) { auto r = dir->dentries.insert(make_pair(name, this)); ceph_assert(r.second); dir->num_null_dentries++; } ~Dentry() { ceph_assert(ref == 0); ceph_assert(dir == nullptr); } /* * ref==1 -> cached, unused * ref >1 -> pinned in lru */ void get() { ceph_assert(ref > 0); if (++ref == 2) lru_pin(); //cout << "dentry.get on " << this << " " << name << " now " << ref << std::endl; } void put() { ceph_assert(ref > 0); if (--ref == 1) lru_unpin(); //cout << "dentry.put on " << this << " " << name << " now " << ref << std::endl; if (ref == 0) delete this; } void link(InodeRef in) { inode = in; inode->dentries.push_back(&inode_xlist_link); if (inode->is_dir()) { if (inode->dir) get(); // dir -> dn pin if (inode->ll_ref) get(); // ll_ref -> dn pin } dir->num_null_dentries--; } void unlink(void) { if (inode->is_dir()) { if (inode->dir) put(); // dir -> dn pin if (inode->ll_ref) put(); // ll_ref -> dn pin } ceph_assert(inode_xlist_link.get_list() == &inode->dentries); inode_xlist_link.remove_myself(); inode.reset(); dir->num_null_dentries++; } void mark_primary() { if (inode && inode->dentries.front() != this) inode->dentries.push_front(&inode_xlist_link); } void detach(void) { ceph_assert(!inode); auto p = dir->dentries.find(name); ceph_assert(p != dir->dentries.end()); dir->dentries.erase(p); dir->num_null_dentries--; dir = nullptr; } void dump(Formatter *f) const; friend std::ostream &operator<<(std::ostream &oss, const Dentry &Dentry); Dir *dir; const std::string name; InodeRef inode; int ref = 1; // 1 if there's a dir beneath me. int64_t offset = 0; mds_rank_t lease_mds = -1; utime_t lease_ttl; uint64_t lease_gen = 0; ceph_seq_t lease_seq = 0; int cap_shared_gen = 0; std::string alternate_name; bool is_renaming = false; private: xlist<Dentry *>::item inode_xlist_link; }; #endif
2,430
23.069307
85
h
null
ceph-main/src/client/Dir.h
#ifndef CEPH_CLIENT_DIR_H #define CEPH_CLIENT_DIR_H #include <string> #include <vector> class Dentry; struct Inode; class Dir { public: Inode *parent_inode; // my inode ceph::unordered_map<std::string, Dentry*> dentries; unsigned num_null_dentries = 0; std::vector<Dentry*> readdir_cache; explicit Dir(Inode* in) { parent_inode = in; } bool is_empty() { return dentries.empty(); } }; #endif
416
16.375
53
h
null
ceph-main/src/client/Fh.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2017 Red Hat Inc * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "Inode.h" #include "Fh.h" Fh::Fh(InodeRef in, int flags, int cmode, uint64_t _gen, const UserPerm &perms) : inode(in), flags(flags), gen(_gen), actor_perms(perms), mode(cmode), readahead() { inode->add_fh(this); } Fh::~Fh() { inode->rm_fh(this); }
678
19.575758
81
cc
null
ceph-main/src/client/Fh.h
#ifndef CEPH_CLIENT_FH_H #define CEPH_CLIENT_FH_H #include "common/Readahead.h" #include "include/types.h" #include "InodeRef.h" #include "UserPerm.h" #include "mds/flock.h" class Inode; // file handle for any open file state struct Fh { InodeRef inode; int flags; uint64_t gen; UserPerm actor_perms; // perms I opened the file with // the members above once ininitalized in the constructor // they won't change, and putting them under the client_lock // makes no sense. int _ref = 1; loff_t pos = 0; int mode; // the mode i opened the file with bool pos_locked = false; // pos is currently in use std::list<ceph::condition_variable*> pos_waiters; // waiters for pos Readahead readahead; // file lock std::unique_ptr<ceph_lock_state_t> fcntl_locks; std::unique_ptr<ceph_lock_state_t> flock_locks; bool has_any_filelocks() { return (fcntl_locks && !fcntl_locks->empty()) || (flock_locks && !flock_locks->empty()); } // IO error encountered by any writeback on this Inode while // this Fh existed (i.e. an fsync on another Fh will still show // up as an async_err here because it could have been the same // bytes we wrote via this Fh). int async_err = {0}; int take_async_err() { int e = async_err; async_err = 0; return e; } Fh() = delete; Fh(InodeRef in, int flags, int cmode, uint64_t gen, const UserPerm &perms); ~Fh(); void get() { ++_ref; } int put() { return --_ref; } }; #endif
1,539
22.333333
77
h
null
ceph-main/src/client/Inode.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "Client.h" #include "Inode.h" #include "Dentry.h" #include "Dir.h" #include "Fh.h" #include "MetaSession.h" #include "ClientSnapRealm.h" #include "Delegation.h" #include "mds/flock.h" using std::dec; using std::list; using std::oct; using std::ostream; using std::string; Inode::~Inode() { delay_cap_item.remove_myself(); dirty_cap_item.remove_myself(); snaprealm_item.remove_myself(); if (snapdir_parent) { snapdir_parent->flags &= ~I_SNAPDIR_OPEN; snapdir_parent.reset(); } if (!oset.objects.empty()) { lsubdout(client->cct, client, 0) << __func__ << ": leftover objects on inode 0x" << std::hex << ino << std::dec << dendl; ceph_assert(oset.objects.empty()); } if (!delegations.empty()) { lsubdout(client->cct, client, 0) << __func__ << ": leftover delegations on inode 0x" << std::hex << ino << std::dec << dendl; ceph_assert(delegations.empty()); } } ostream& operator<<(ostream &out, const Inode &in) { out << in.vino() << "(" << "faked_ino=" << in.faked_ino << " nref=" << in.get_nref() << " ll_ref=" << in.ll_ref << " cap_refs=" << in.cap_refs << " open=" << in.open_by_mode << " mode=" << oct << in.mode << dec << " size=" << in.size << "/" << in.max_size << " nlink=" << in.nlink << " btime=" << in.btime << " mtime=" << in.mtime << " ctime=" << in.ctime << " change_attr=" << in.change_attr << " caps=" << ccap_string(in.caps_issued()); if (!in.caps.empty()) { out << "("; bool first = true; for (const auto &pair : in.caps) { if (!first) out << ','; out << pair.first << '=' << ccap_string(pair.second.issued); first = false; } out << ")"; } if (in.dirty_caps) out << " dirty_caps=" << ccap_string(in.dirty_caps); if (in.flushing_caps) out << " flushing_caps=" << ccap_string(in.flushing_caps); if (in.flags & I_COMPLETE) out << " COMPLETE"; if (in.is_file()) out << " " << in.oset; if (!in.dentries.empty()) out << " parents=" << in.dentries; if (in.is_dir() && in.has_dir_layout()) out << " has_dir_layout"; if (in.quota.is_enabled()) out << " " << in.quota; out << ' ' << &in << ")"; return out; } void Inode::make_long_path(filepath& p) { if (!dentries.empty()) { Dentry *dn = get_first_parent(); ceph_assert(dn->dir && dn->dir->parent_inode); dn->dir->parent_inode->make_long_path(p); p.push_dentry(dn->name); } else if (snapdir_parent) { make_nosnap_relative_path(p); } else p = filepath(ino); } void Inode::make_short_path(filepath& p) { if (!dentries.empty()) { Dentry *dn = get_first_parent(); ceph_assert(dn->dir && dn->dir->parent_inode); p = filepath(dn->name, dn->dir->parent_inode->ino); } else if (snapdir_parent) { make_nosnap_relative_path(p); } else p = filepath(ino); } /* * make a filepath suitable for an mds request: * - if we are non-snapped/live, the ino is sufficient, e.g. #1234 * - if we are snapped, make filepath relative to first non-snapped parent. */ void Inode::make_nosnap_relative_path(filepath& p) { if (snapid == CEPH_NOSNAP) { p = filepath(ino); } else if (snapdir_parent) { snapdir_parent->make_nosnap_relative_path(p); string empty; p.push_dentry(empty); } else if (!dentries.empty()) { Dentry *dn = get_first_parent(); ceph_assert(dn->dir && dn->dir->parent_inode); dn->dir->parent_inode->make_nosnap_relative_path(p); p.push_dentry(dn->name); } else { p = filepath(ino); } } void Inode::get_open_ref(int mode) { client->inc_opened_files(); if (open_by_mode[mode] == 0) { client->inc_opened_inodes(); } open_by_mode[mode]++; break_deleg(!(mode & CEPH_FILE_MODE_WR)); } bool Inode::put_open_ref(int mode) { //cout << "open_by_mode[" << mode << "] " << open_by_mode[mode] << " -> " << (open_by_mode[mode]-1) << std::endl; auto& ref = open_by_mode.at(mode); ceph_assert(ref > 0); client->dec_opened_files(); if (--ref == 0) { client->dec_opened_inodes(); return true; } return false; } void Inode::get_cap_ref(int cap) { int n = 0; while (cap) { if (cap & 1) { int c = 1 << n; cap_refs[c]++; //cout << "inode " << *this << " get " << cap_string(c) << " " << (cap_refs[c]-1) << " -> " << cap_refs[c] << std::endl; } cap >>= 1; n++; } } int Inode::put_cap_ref(int cap) { int last = 0; int n = 0; while (cap) { if (cap & 1) { int c = 1 << n; if (cap_refs[c] <= 0) { lderr(client->cct) << "put_cap_ref " << ccap_string(c) << " went negative on " << *this << dendl; ceph_assert(cap_refs[c] > 0); } if (--cap_refs[c] == 0) last |= c; //cout << "inode " << *this << " put " << cap_string(c) << " " << (cap_refs[c]+1) << " -> " << cap_refs[c] << std::endl; } cap >>= 1; n++; } return last; } bool Inode::is_any_caps() { return !caps.empty() || snap_caps; } bool Inode::cap_is_valid(const Cap &cap) const { /*cout << "cap_gen " << cap->session-> cap_gen << std::endl << "session gen " << cap->gen << std::endl << "cap expire " << cap->session->cap_ttl << std::endl << "cur time " << ceph_clock_now(cct) << std::endl;*/ if ((cap.session->cap_gen <= cap.gen) && (ceph_clock_now() < cap.session->cap_ttl)) { return true; } return false; } int Inode::caps_issued(int *implemented) const { int c = snap_caps; int i = 0; for (const auto &[mds, cap] : caps) { if (cap_is_valid(cap)) { c |= cap.issued; i |= cap.implemented; } } // exclude caps issued by non-auth MDS, but are been revoking by // the auth MDS. The non-auth MDS should be revoking/exporting // these caps, but the message is delayed. if (auth_cap) c &= ~auth_cap->implemented | auth_cap->issued; if (implemented) *implemented = i; return c; } void Inode::try_touch_cap(mds_rank_t mds) { auto it = caps.find(mds); if (it != caps.end()) { it->second.touch(); } } /** * caps_issued_mask - check whether we have all of the caps in the mask * @mask: mask to check against * @allow_impl: whether the caller can also use caps that are implemented but not issued * * This is the bog standard "check whether we have the required caps" operation. * Typically, we only check against the capset that is currently "issued". * In other words, we ignore caps that have been revoked but not yet released. * Also account capability hit/miss stats. * * Some callers (particularly those doing attribute retrieval) can also make * use of the full set of "implemented" caps to satisfy requests from the * cache. * * Those callers should refrain from taking new references to implemented * caps! */ bool Inode::caps_issued_mask(unsigned mask, bool allow_impl) { int c = snap_caps; int i = 0; if ((c & mask) == mask) return true; // prefer auth cap if (auth_cap && cap_is_valid(*auth_cap) && (auth_cap->issued & mask) == mask) { auth_cap->touch(); client->cap_hit(); return true; } // try any cap for (auto &pair : caps) { Cap &cap = pair.second; if (cap_is_valid(cap)) { if ((cap.issued & mask) == mask) { cap.touch(); client->cap_hit(); return true; } c |= cap.issued; i |= cap.implemented; } } if (allow_impl) c |= i; if ((c & mask) == mask) { // bah.. touch them all for (auto &pair : caps) { pair.second.touch(); } client->cap_hit(); return true; } client->cap_miss(); return false; } int Inode::caps_used() { int w = 0; for (const auto &[cap, cnt] : cap_refs) if (cnt) w |= cap; return w; } int Inode::caps_file_wanted() { int want = 0; for (const auto &[mode, cnt] : open_by_mode) if (cnt) want |= ceph_caps_for_mode(mode); return want; } int Inode::caps_wanted() { int want = caps_file_wanted() | caps_used(); if (want & CEPH_CAP_FILE_BUFFER) want |= CEPH_CAP_FILE_EXCL; return want; } int Inode::caps_mds_wanted() { int want = 0; for (const auto &pair : caps) { want |= pair.second.wanted; } return want; } int Inode::caps_dirty() { return dirty_caps | flushing_caps; } const UserPerm* Inode::get_best_perms() { const UserPerm *perms = NULL; for (const auto &pair : caps) { const UserPerm& iperm = pair.second.latest_perms; if (!perms) { // we don't have any, take what's present perms = &iperm; } else if (iperm.uid() == uid) { if (iperm.gid() == gid) { // we have the best possible, return return &iperm; } if (perms->uid() != uid) { // take uid > gid every time perms = &iperm; } } else if (perms->uid() != uid && iperm.gid() == gid) { perms = &iperm; // a matching gid is better than nothing } } return perms; } bool Inode::have_valid_size() { // RD+RDCACHE or WR+WRBUFFER => valid size if (caps_issued() & (CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL)) return true; return false; } // open Dir for an inode. if it's not open, allocated it (and pin dentry in memory). Dir *Inode::open_dir() { if (!dir) { dir = new Dir(this); lsubdout(client->cct, client, 15) << "open_dir " << dir << " on " << this << dendl; ceph_assert(dentries.size() < 2); // dirs can't be hard-linked if (!dentries.empty()) get_first_parent()->get(); // pin dentry iget(); // pin inode } return dir; } bool Inode::check_mode(const UserPerm& perms, unsigned want) { if (uid == perms.uid()) { // if uid is owner, owner entry determines access want = want << 6; } else if (perms.gid_in_groups(gid)) { // if a gid or sgid matches the owning group, group entry determines access want = want << 3; } return (mode & want) == want; } void Inode::dump(Formatter *f) const { f->dump_stream("ino") << ino; f->dump_stream("snapid") << snapid; if (rdev) f->dump_unsigned("rdev", rdev); f->dump_stream("ctime") << ctime; f->dump_stream("btime") << btime; f->dump_stream("mode") << '0' << std::oct << mode << std::dec; f->dump_unsigned("uid", uid); f->dump_unsigned("gid", gid); f->dump_int("nlink", nlink); f->dump_unsigned("size", size); f->dump_unsigned("max_size", max_size); f->dump_unsigned("truncate_seq", truncate_seq); f->dump_unsigned("truncate_size", truncate_size); f->dump_stream("mtime") << mtime; f->dump_stream("atime") << atime; f->dump_unsigned("time_warp_seq", time_warp_seq); f->dump_unsigned("change_attr", change_attr); f->dump_object("layout", layout); if (is_dir()) { f->open_object_section("dir_layout"); ::dump(dir_layout, f); f->close_section(); f->dump_bool("complete", flags & I_COMPLETE); f->dump_bool("ordered", flags & I_DIR_ORDERED); /* FIXME when wip-mds-encoding is merged *** f->open_object_section("dir_stat"); dirstat.dump(f); f->close_section(); f->open_object_section("rstat"); rstat.dump(f); f->close_section(); */ } f->dump_unsigned("version", version); f->dump_unsigned("xattr_version", xattr_version); f->dump_unsigned("flags", flags); if (is_dir()) { f->dump_int("dir_hashed", (int)dir_hashed); f->dump_int("dir_replicated", (int)dir_replicated); if (dir_replicated) { f->open_array_section("dirfrags"); for (const auto &frag : frag_repmap) { f->open_object_section("frags"); CachedStackStringStream css; *css << std::hex << frag.first.value() << "/" << std::dec << frag.first.bits(); f->dump_string("frag", css->strv()); f->open_array_section("repmap"); for (const auto &mds : frag.second) { f->dump_int("mds", mds); } f->close_section(); f->close_section(); } f->close_section(); } } f->open_array_section("caps"); for (const auto &pair : caps) { f->open_object_section("cap"); if (&pair.second == auth_cap) f->dump_int("auth", 1); pair.second.dump(f); f->close_section(); } f->close_section(); if (auth_cap) f->dump_int("auth_cap", auth_cap->session->mds_num); f->dump_stream("dirty_caps") << ccap_string(dirty_caps); if (flushing_caps) { f->dump_stream("flushings_caps") << ccap_string(flushing_caps); f->open_object_section("flushing_cap_tid"); for (map<ceph_tid_t, int>::const_iterator p = flushing_cap_tids.begin(); p != flushing_cap_tids.end(); ++p) { string n(ccap_string(p->second)); f->dump_unsigned(n.c_str(), p->first); } f->close_section(); } f->dump_int("shared_gen", shared_gen); f->dump_int("cache_gen", cache_gen); if (snap_caps) { f->dump_int("snap_caps", snap_caps); f->dump_int("snap_cap_refs", snap_cap_refs); } f->dump_stream("hold_caps_until") << hold_caps_until; if (snaprealm) { f->open_object_section("snaprealm"); snaprealm->dump(f); f->close_section(); } if (!cap_snaps.empty()) { for (const auto &p : cap_snaps) { f->open_object_section("cap_snap"); f->dump_stream("follows") << p.first; p.second.dump(f); f->close_section(); } } // open if (!open_by_mode.empty()) { f->open_array_section("open_by_mode"); for (map<int,int>::const_iterator p = open_by_mode.begin(); p != open_by_mode.end(); ++p) { f->open_object_section("ref"); f->dump_int("mode", p->first); f->dump_int("refs", p->second); f->close_section(); } f->close_section(); } if (!cap_refs.empty()) { f->open_array_section("cap_refs"); for (map<int,int>::const_iterator p = cap_refs.begin(); p != cap_refs.end(); ++p) { f->open_object_section("cap_ref"); f->dump_stream("cap") << ccap_string(p->first); f->dump_int("refs", p->second); f->close_section(); } f->close_section(); } f->dump_unsigned("reported_size", reported_size); if (wanted_max_size != max_size) f->dump_unsigned("wanted_max_size", wanted_max_size); if (requested_max_size != max_size) f->dump_unsigned("requested_max_size", requested_max_size); f->dump_int("nref", get_nref()); f->dump_int("ll_ref", ll_ref); if (!dentries.empty()) { f->open_array_section("parents"); for (const auto &&dn : dentries) { f->open_object_section("dentry"); f->dump_stream("dir_ino") << dn->dir->parent_inode->ino; f->dump_string("name", dn->name); f->close_section(); } f->close_section(); } } void Cap::dump(Formatter *f) const { f->dump_int("mds", session->mds_num); f->dump_stream("ino") << inode.ino; f->dump_unsigned("cap_id", cap_id); f->dump_stream("issued") << ccap_string(issued); if (implemented != issued) f->dump_stream("implemented") << ccap_string(implemented); f->dump_stream("wanted") << ccap_string(wanted); f->dump_unsigned("seq", seq); f->dump_unsigned("issue_seq", issue_seq); f->dump_unsigned("mseq", mseq); f->dump_unsigned("gen", gen); } void CapSnap::dump(Formatter *f) const { f->dump_stream("ino") << in->ino; f->dump_stream("issued") << ccap_string(issued); f->dump_stream("dirty") << ccap_string(dirty); f->dump_unsigned("size", size); f->dump_stream("ctime") << ctime; f->dump_stream("mtime") << mtime; f->dump_stream("atime") << atime; f->dump_int("time_warp_seq", time_warp_seq); f->dump_stream("mode") << '0' << std::oct << mode << std::dec; f->dump_unsigned("uid", uid); f->dump_unsigned("gid", gid); if (!xattrs.empty()) { f->open_object_section("xattr_lens"); for (map<string,bufferptr>::const_iterator p = xattrs.begin(); p != xattrs.end(); ++p) f->dump_int(p->first.c_str(), p->second.length()); f->close_section(); } f->dump_unsigned("xattr_version", xattr_version); f->dump_int("writing", (int)writing); f->dump_int("dirty_data", (int)dirty_data); f->dump_unsigned("flush_tid", flush_tid); } void Inode::set_async_err(int r) { for (const auto &fh : fhs) { fh->async_err = r; } } bool Inode::has_recalled_deleg() { if (delegations.empty()) return false; // Either all delegations are recalled or none are. Just check the first. Delegation& deleg = delegations.front(); return deleg.is_recalled(); } void Inode::recall_deleg(bool skip_read) { if (delegations.empty()) return; // Issue any recalls for (list<Delegation>::iterator d = delegations.begin(); d != delegations.end(); ++d) { Delegation& deleg = *d; deleg.recall(skip_read); } } bool Inode::delegations_broken(bool skip_read) { if (delegations.empty()) { lsubdout(client->cct, client, 10) << __func__ << ": delegations empty on " << *this << dendl; return true; } if (skip_read) { Delegation& deleg = delegations.front(); lsubdout(client->cct, client, 10) << __func__ << ": read delegs only on " << *this << dendl; if (deleg.get_type() == CEPH_FILE_MODE_RD) { return true; } } lsubdout(client->cct, client, 10) << __func__ << ": not broken" << *this << dendl; return false; } void Inode::break_deleg(bool skip_read) { lsubdout(client->cct, client, 10) << __func__ << ": breaking delegs on " << *this << dendl; recall_deleg(skip_read); while (!delegations_broken(skip_read)) client->wait_on_list(waitfor_deleg); } /** * set_deleg: request a delegation on an open Fh * @fh: filehandle on which to acquire it * @type: delegation request type * @cb: delegation recall callback function * @priv: private pointer to be passed to callback * * Attempt to acquire a delegation on an open file handle. If there are no * conflicts and we have the right caps, allocate a new delegation, fill it * out and return 0. Return an error if we can't get one for any reason. */ int Inode::set_deleg(Fh *fh, unsigned type, ceph_deleg_cb_t cb, void *priv) { lsubdout(client->cct, client, 10) << __func__ << ": inode " << *this << dendl; /* * 0 deleg timeout means that they haven't been explicitly enabled. Don't * allow it, with an unusual error to make it clear. */ if (!client->get_deleg_timeout()) return -CEPHFS_ETIME; // Just say no if we have any recalled delegs still outstanding if (has_recalled_deleg()) { lsubdout(client->cct, client, 10) << __func__ << ": has_recalled_deleg" << dendl; return -CEPHFS_EAGAIN; } // check vs. currently open files on this inode switch (type) { case CEPH_DELEGATION_RD: if (open_count_for_write()) { lsubdout(client->cct, client, 10) << __func__ << ": open for write" << dendl; return -CEPHFS_EAGAIN; } break; case CEPH_DELEGATION_WR: if (open_count() > 1) { lsubdout(client->cct, client, 10) << __func__ << ": open" << dendl; return -CEPHFS_EAGAIN; } break; default: return -CEPHFS_EINVAL; } /* * A delegation is essentially a long-held container for cap references that * we delegate to the client until recalled. The caps required depend on the * type of delegation (read vs. rw). This is entirely an opportunistic thing. * If we don't have the necessary caps for the delegation, then we just don't * grant one. * * In principle we could request the caps from the MDS, but a delegation is * usually requested just after an open. If we don't have the necessary caps * already, then it's likely that there is some sort of conflicting access. * * In the future, we may need to add a way to have this request caps more * aggressively -- for instance, to handle WANT_DELEGATION for NFSv4.1+. */ int need = ceph_deleg_caps_for_type(type); if (!caps_issued_mask(need)) { lsubdout(client->cct, client, 10) << __func__ << ": cap mismatch, have=" << ccap_string(caps_issued()) << " need=" << ccap_string(need) << dendl; return -CEPHFS_EAGAIN; } for (list<Delegation>::iterator d = delegations.begin(); d != delegations.end(); ++d) { Delegation& deleg = *d; if (deleg.get_fh() == fh) { deleg.reinit(type, cb, priv); return 0; } } delegations.emplace_back(fh, type, cb, priv); return 0; } /** * unset_deleg - remove a delegation that was previously set * @fh: file handle to clear delegation of * * Unlink delegation from the Inode (if there is one), put caps and free it. */ void Inode::unset_deleg(Fh *fh) { for (list<Delegation>::iterator d = delegations.begin(); d != delegations.end(); ++d) { Delegation& deleg = *d; if (deleg.get_fh() == fh) { delegations.erase(d); client->signal_cond_list(waitfor_deleg); break; } } } /** * mark_caps_dirty - mark some caps dirty * @caps: the dirty caps * * note that if there is no dirty and flushing caps before, we need to pin this inode. * it will be unpined by handle_cap_flush_ack when there are no dirty and flushing caps. */ void Inode::mark_caps_dirty(int caps) { /* * If auth_cap is nullptr means the reonnecting is not finished or * already rejected. */ if (!auth_cap) { ceph_assert(!dirty_caps); lsubdout(client->cct, client, 1) << __func__ << " " << *this << " dirty caps '" << ccap_string(caps) << "', but no auth cap." << dendl; return; } lsubdout(client->cct, client, 10) << __func__ << " " << *this << " " << ccap_string(dirty_caps) << " -> " << ccap_string(dirty_caps | caps) << dendl; if (caps && !caps_dirty()) iget(); dirty_caps |= caps; auth_cap->session->get_dirty_list().push_back(&dirty_cap_item); client->cap_delay_requeue(this); } /** * mark_caps_clean - only clean the dirty_caps and caller should start flushing the dirty caps. */ void Inode::mark_caps_clean() { lsubdout(client->cct, client, 10) << __func__ << " " << *this << dendl; dirty_caps = 0; dirty_cap_item.remove_myself(); }
21,956
25.941104
126
cc
null
ceph-main/src/client/Inode.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_CLIENT_INODE_H #define CEPH_CLIENT_INODE_H #include <numeric> #include "include/compat.h" #include "include/ceph_assert.h" #include "include/types.h" #include "include/xlist.h" #include "mds/flock.h" #include "mds/mdstypes.h" // hrm #include "include/cephfs/types.h" #include "osdc/ObjectCacher.h" #include "InodeRef.h" #include "MetaSession.h" #include "UserPerm.h" #include "Delegation.h" class Client; class Dentry; class Dir; struct SnapRealm; struct Inode; class MetaRequest; class filepath; class Fh; class Cap { public: Cap() = delete; Cap(Inode &i, MetaSession *s) : inode(i), session(s), gen(s->cap_gen), cap_item(this) { s->caps.push_back(&cap_item); } ~Cap() { cap_item.remove_myself(); } void touch(void) { // move to back of LRU session->caps.push_back(&cap_item); } void dump(Formatter *f) const; Inode &inode; MetaSession *session; uint64_t cap_id = 0; unsigned issued = 0; unsigned implemented = 0; unsigned wanted = 0; // as known to mds. uint64_t seq = 0; uint64_t issue_seq = 0; __u32 mseq = 0; // migration seq __u32 gen; UserPerm latest_perms; private: /* Note that this Cap will not move (see Inode::caps): * * Section 23.1.2#8 * The insert members shall not affect the validity of iterators and * references to the container, and the erase members shall invalidate only * iterators and references to the erased elements. */ xlist<Cap *>::item cap_item; }; struct CapSnap { //snapid_t follows; // map key InodeRef in; SnapContext context; int issued = 0, dirty = 0; uint64_t size = 0; utime_t ctime, btime, mtime, atime; version_t time_warp_seq = 0; uint64_t change_attr = 0; uint32_t mode = 0; uid_t uid = 0; gid_t gid = 0; std::map<std::string,bufferptr> xattrs; version_t xattr_version = 0; bufferlist inline_data; version_t inline_version = 0; bool writing = false, dirty_data = false; uint64_t flush_tid = 0; int64_t cap_dirtier_uid = -1; int64_t cap_dirtier_gid = -1; explicit CapSnap(Inode *i) : in(i) {} void dump(Formatter *f) const; }; // inode flags #define I_COMPLETE (1 << 0) #define I_DIR_ORDERED (1 << 1) #define I_SNAPDIR_OPEN (1 << 2) #define I_KICK_FLUSH (1 << 3) #define I_CAP_DROPPED (1 << 4) #define I_ERROR_FILELOCK (1 << 5) struct Inode : RefCountedObject { ceph::coarse_mono_time hold_caps_until; Client *client; // -- the actual inode -- inodeno_t ino; // ORDER DEPENDENCY: oset snapid_t snapid; ino_t faked_ino = 0; uint32_t rdev = 0; // if special file // affected by any inode change... utime_t ctime; // inode change time utime_t btime; // birth time // perm (namespace permissions) uint32_t mode = 0; uid_t uid = 0; gid_t gid = 0; // nlink int32_t nlink = 0; // file (data access) ceph_dir_layout dir_layout{}; file_layout_t layout; uint64_t size = 0; // on directory, # dentries uint32_t truncate_seq = 1; uint64_t truncate_size = -1; utime_t mtime; // file data modify time. utime_t atime; // file data access time. uint32_t time_warp_seq = 0; // count of (potential) mtime/atime timewarps (i.e., utimes()) uint64_t change_attr = 0; uint64_t max_size = 0; // max size we can write to // dirfrag, recursive accountin frag_info_t dirstat; nest_info_t rstat; // special stuff version_t version = 0; // auth only version_t xattr_version = 0; utime_t snap_btime; // snapshot creation (birth) time std::map<std::string, std::string> snap_metadata; // inline data version_t inline_version = 0; bufferlist inline_data; std::vector<uint8_t> fscrypt_auth; std::vector<uint8_t> fscrypt_file; bool is_fscrypt_enabled() { return !!fscrypt_auth.size(); } bool is_root() const { return ino == CEPH_INO_ROOT; } bool is_symlink() const { return (mode & S_IFMT) == S_IFLNK; } bool is_dir() const { return (mode & S_IFMT) == S_IFDIR; } bool is_file() const { return (mode & S_IFMT) == S_IFREG; } bool has_dir_layout() const { return layout != file_layout_t(); } __u32 hash_dentry_name(const std::string &dn) { int which = dir_layout.dl_dir_hash; if (!which) which = CEPH_STR_HASH_LINUX; ceph_assert(ceph_str_hash_valid(which)); return ceph_str_hash(which, dn.data(), dn.length()); } unsigned flags = 0; quota_info_t quota; bool is_complete_and_ordered() { static const unsigned wants = I_COMPLETE | I_DIR_ORDERED; return (flags & wants) == wants; } // about the dir (if this is one!) Dir *dir = 0; // if i'm a dir. fragtree_t dirfragtree; uint64_t dir_release_count = 1; uint64_t dir_ordered_count = 1; bool dir_hashed = false; bool dir_replicated = false; // per-mds caps std::map<mds_rank_t, Cap> caps; // mds -> Cap Cap *auth_cap = 0; int64_t cap_dirtier_uid = -1; int64_t cap_dirtier_gid = -1; unsigned dirty_caps = 0; unsigned flushing_caps = 0; std::map<ceph_tid_t, int> flushing_cap_tids; int shared_gen = 0; int cache_gen = 0; int snap_caps = 0; int snap_cap_refs = 0; xlist<Inode*>::item delay_cap_item, dirty_cap_item, flushing_cap_item; SnapRealm *snaprealm = 0; xlist<Inode*>::item snaprealm_item; InodeRef snapdir_parent; // only if we are a snapdir inode std::map<snapid_t,CapSnap> cap_snaps; // pending flush to mds //int open_by_mode[CEPH_FILE_MODE_NUM]; std::map<int,int> open_by_mode; std::map<int,int> cap_refs; ObjectCacher::ObjectSet oset; // ORDER DEPENDENCY: ino uint64_t reported_size = 0; uint64_t wanted_max_size = 0; uint64_t requested_max_size = 0; uint64_t ll_ref = 0; // separate ref count for ll client xlist<Dentry *> dentries; // if i'm linked to a dentry. std::string symlink; // symlink content, if it's a symlink std::map<std::string,bufferptr> xattrs; std::map<frag_t,int> fragmap; // known frag -> mds mappings std::map<frag_t, std::vector<mds_rank_t>> frag_repmap; // non-auth mds mappings std::list<ceph::condition_variable*> waitfor_caps; std::list<ceph::condition_variable*> waitfor_commit; std::list<ceph::condition_variable*> waitfor_deleg; Dentry *get_first_parent() { ceph_assert(!dentries.empty()); return *dentries.begin(); } void make_long_path(filepath& p); void make_short_path(filepath& p); void make_nosnap_relative_path(filepath& p); // The ref count. 1 for each dentry, fh, inode_map, // cwd that links to me. void iget() { get(); } void iput(int n=1) { ceph_assert(n >= 0); while (n--) put(); } void ll_get() { ll_ref++; } void ll_put(uint64_t n=1) { ceph_assert(ll_ref >= n); ll_ref -= n; } // file locks std::unique_ptr<ceph_lock_state_t> fcntl_locks; std::unique_ptr<ceph_lock_state_t> flock_locks; bool has_any_filelocks() { return (fcntl_locks && !fcntl_locks->empty()) || (flock_locks && !flock_locks->empty()); } std::list<Delegation> delegations; xlist<MetaRequest*> unsafe_ops; std::set<Fh*> fhs; mds_rank_t dir_pin = MDS_RANK_NONE; Inode() = delete; Inode(Client *c, vinodeno_t vino, file_layout_t *newlayout) : client(c), ino(vino.ino), snapid(vino.snapid), delay_cap_item(this), dirty_cap_item(this), flushing_cap_item(this), snaprealm_item(this), oset((void *)this, newlayout->pool_id, this->ino) {} ~Inode(); vinodeno_t vino() const { return vinodeno_t(ino, snapid); } struct Compare { bool operator() (Inode* const & left, Inode* const & right) { if (left->ino.val < right->ino.val) { return (left->snapid.val < right->snapid.val); } return false; } }; bool check_mode(const UserPerm& perms, unsigned want); // CAPS -------- void get_open_ref(int mode); bool put_open_ref(int mode); void get_cap_ref(int cap); int put_cap_ref(int cap); bool is_any_caps(); bool cap_is_valid(const Cap &cap) const; int caps_issued(int *implemented = 0) const; void try_touch_cap(mds_rank_t mds); bool caps_issued_mask(unsigned mask, bool allow_impl=false); int caps_used(); int caps_file_wanted(); int caps_wanted(); int caps_mds_wanted(); int caps_dirty(); const UserPerm *get_best_perms(); bool have_valid_size(); Dir *open_dir(); void add_fh(Fh *f) {fhs.insert(f);} void rm_fh(Fh *f) {fhs.erase(f);} void set_async_err(int r); void dump(Formatter *f) const; void break_all_delegs() { break_deleg(false); }; void recall_deleg(bool skip_read); bool has_recalled_deleg(); int set_deleg(Fh *fh, unsigned type, ceph_deleg_cb_t cb, void *priv); void unset_deleg(Fh *fh); void mark_caps_dirty(int caps); void mark_caps_clean(); private: // how many opens for write on this Inode? long open_count_for_write() { return (long)(open_by_mode[CEPH_FILE_MODE_RDWR] + open_by_mode[CEPH_FILE_MODE_WR]); }; // how many opens of any sort on this inode? long open_count() { return (long) std::accumulate(open_by_mode.begin(), open_by_mode.end(), 0, [] (int value, const std::map<int, int>::value_type& p) { return value + p.second; }); }; void break_deleg(bool skip_read); bool delegations_broken(bool skip_read); }; std::ostream& operator<<(std::ostream &out, const Inode &in); #endif
9,590
25.133515
95
h
null
ceph-main/src/client/InodeRef.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_CLIENT_INODEREF_H #define CEPH_CLIENT_INODEREF_H #include <boost/intrusive_ptr.hpp> class Inode; void intrusive_ptr_add_ref(Inode *in); void intrusive_ptr_release(Inode *in); typedef boost::intrusive_ptr<Inode> InodeRef; #endif
341
25.307692
70
h
null
ceph-main/src/client/MetaRequest.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/types.h" #include "client/MetaRequest.h" #include "client/Dentry.h" #include "client/Inode.h" #include "messages/MClientReply.h" #include "common/Formatter.h" void MetaRequest::dump(Formatter *f) const { auto age = std::chrono::duration<double>(ceph_clock_now() - op_stamp); f->dump_unsigned("tid", tid); f->dump_string("op", ceph_mds_op_name(head.op)); f->dump_stream("path") << path; f->dump_stream("path2") << path2; if (_inode) f->dump_stream("ino") << _inode->ino; if (_old_inode) f->dump_stream("old_ino") << _old_inode->ino; if (_other_inode) f->dump_stream("other_ino") << _other_inode->ino; if (target) f->dump_stream("target_ino") << target->ino; if (_dentry) f->dump_string("dentry", _dentry->name); if (_old_dentry) f->dump_string("old_dentry", _old_dentry->name); f->dump_stream("hint_ino") << inodeno_t(head.ino); f->dump_stream("sent_stamp") << sent_stamp; f->dump_float("age", age.count()); f->dump_int("mds", mds); f->dump_int("resend_mds", resend_mds); f->dump_int("send_to_auth", send_to_auth); f->dump_unsigned("sent_on_mseq", sent_on_mseq); f->dump_int("retry_attempt", retry_attempt); f->dump_int("got_unsafe", got_unsafe); f->dump_unsigned("uid", head.caller_uid); f->dump_unsigned("gid", head.caller_gid); f->dump_unsigned("oldest_client_tid", head.oldest_client_tid); f->dump_unsigned("mdsmap_epoch", head.mdsmap_epoch); f->dump_unsigned("flags", head.flags); f->dump_unsigned("num_retry", head.ext_num_retry); f->dump_unsigned("num_fwd", head.ext_num_fwd); f->dump_unsigned("num_releases", head.num_releases); f->dump_int("abort_rc", abort_rc); } MetaRequest::~MetaRequest() { if (_dentry) _dentry->put(); if (_old_dentry) _old_dentry->put(); } void MetaRequest::set_dentry(Dentry *d) { ceph_assert(_dentry == NULL); _dentry = d; _dentry->get(); } Dentry *MetaRequest::dentry() { return _dentry; } void MetaRequest::set_old_dentry(Dentry *d) { ceph_assert(_old_dentry == NULL); _old_dentry = d; _old_dentry->get(); } Dentry *MetaRequest::old_dentry() { return _old_dentry; }
2,238
26.641975
72
cc
null
ceph-main/src/client/MetaRequest.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_CLIENT_METAREQUEST_H #define CEPH_CLIENT_METAREQUEST_H #include "include/types.h" #include "include/xlist.h" #include "include/filepath.h" #include "mds/mdstypes.h" #include "InodeRef.h" #include "UserPerm.h" #include "messages/MClientRequest.h" #include "messages/MClientReply.h" class Dentry; class dir_result_t; struct MetaRequest { private: InodeRef _inode, _old_inode, _other_inode; Dentry *_dentry = NULL; //associated with path Dentry *_old_dentry = NULL; //associated with path2 int abort_rc = 0; public: ceph::coarse_mono_time created = ceph::coarse_mono_clock::zero(); uint64_t tid = 0; utime_t op_stamp; ceph_mds_request_head head; filepath path, path2; std::string alternate_name; std::vector<uint8_t> fscrypt_auth; std::vector<uint8_t> fscrypt_file; bufferlist data; int inode_drop = 0; //the inode caps this operation will drop int inode_unless = 0; //unless we have these caps already int old_inode_drop = 0, old_inode_unless = 0; int dentry_drop = 0, dentry_unless = 0; int old_dentry_drop = 0, old_dentry_unless = 0; int other_inode_drop = 0, other_inode_unless = 0; std::vector<MClientRequest::Release> cap_releases; int regetattr_mask = 0; // getattr mask if i need to re-stat after a traceless reply utime_t sent_stamp; mds_rank_t mds = -1; // who i am asking mds_rank_t resend_mds = -1; // someone wants you to (re)send the request here bool send_to_auth = false; // must send to auth mds __u32 sent_on_mseq = 0; // mseq at last submission of this request int num_fwd = 0; // # of times i've been forwarded int retry_attempt = 0; std::atomic<uint64_t> ref = { 1 }; ceph::cref_t<MClientReply> reply = NULL; // the reply bool kick = false; bool success = false; // readdir result dir_result_t *dirp = NULL; //possible responses bool got_unsafe = false; xlist<MetaRequest*>::item item; xlist<MetaRequest*>::item unsafe_item; xlist<MetaRequest*>::item unsafe_dir_item; xlist<MetaRequest*>::item unsafe_target_item; ceph::condition_variable *caller_cond = NULL; // who to take up ceph::condition_variable *dispatch_cond = NULL; // who to kick back std::list<ceph::condition_variable*> waitfor_safe; InodeRef target; UserPerm perms; explicit MetaRequest(int op) : item(this), unsafe_item(this), unsafe_dir_item(this), unsafe_target_item(this) { memset(&head, 0, sizeof(head)); head.op = op; } ~MetaRequest(); /** * Prematurely terminate the request, such that callers * to make_request will receive `rc` as their result. */ void abort(int rc) { ceph_assert(rc != 0); abort_rc = rc; } /** * Whether abort() has been called for this request */ inline bool aborted() const { return abort_rc != 0; } /** * Given that abort() has been called for this request, what `rc` was * passed into it? */ int get_abort_code() const { return abort_rc; } void set_inode(Inode *in) { _inode = in; } Inode *inode() { return _inode.get(); } void take_inode(InodeRef *out) { out->swap(_inode); } void set_old_inode(Inode *in) { _old_inode = in; } Inode *old_inode() { return _old_inode.get(); } void take_old_inode(InodeRef *out) { out->swap(_old_inode); } void set_other_inode(Inode *in) { _other_inode = in; } Inode *other_inode() { return _other_inode.get(); } void take_other_inode(InodeRef *out) { out->swap(_other_inode); } void set_dentry(Dentry *d); Dentry *dentry(); void set_old_dentry(Dentry *d); Dentry *old_dentry(); MetaRequest* get() { ref++; return this; } /// psuedo-private put method; use Client::put_request() bool _put() { int v = --ref; return v == 0; } // normal fields void set_tid(ceph_tid_t t) { tid = t; } void set_oldest_client_tid(ceph_tid_t t) { head.oldest_client_tid = t; } void inc_num_fwd() { head.ext_num_fwd = head.ext_num_fwd + 1; } void set_retry_attempt(int a) { head.ext_num_retry = a; } void set_filepath(const filepath& fp) { path = fp; } void set_filepath2(const filepath& fp) { path2 = fp; } void set_alternate_name(std::string an) { alternate_name = an; } void set_string2(const char *s) { path2.set_path(std::string_view(s), 0); } void set_caller_perms(const UserPerm& _perms) { perms = _perms; head.caller_uid = perms.uid(); head.caller_gid = perms.gid(); } uid_t get_uid() { return perms.uid(); } uid_t get_gid() { return perms.gid(); } void set_data(const bufferlist &d) { data = d; } void set_dentry_wanted() { head.flags = head.flags | CEPH_MDS_FLAG_WANT_DENTRY; } int get_op() { return head.op; } ceph_tid_t get_tid() { return tid; } filepath& get_filepath() { return path; } filepath& get_filepath2() { return path2; } bool is_write() { return (head.op & CEPH_MDS_OP_WRITE) || (head.op == CEPH_MDS_OP_OPEN && (head.args.open.flags & (O_CREAT|O_TRUNC))); } bool can_forward() { if ((head.op & CEPH_MDS_OP_WRITE) || head.op == CEPH_MDS_OP_OPEN) // do not forward _any_ open request. return false; return true; } bool auth_is_best(int issued) { if (send_to_auth) return true; /* Any write op ? */ if (head.op & CEPH_MDS_OP_WRITE) return true; switch (head.op) { case CEPH_MDS_OP_OPEN: case CEPH_MDS_OP_READDIR: return true; case CEPH_MDS_OP_GETATTR: /* * If any 'x' caps is issued we can just choose the auth MDS * instead of the random replica MDSes. Because only when the * Locker is in LOCK_EXEC state will the loner client could * get the 'x' caps. And if we send the getattr requests to * any replica MDS it must auth pin and tries to rdlock from * the auth MDS, and then the auth MDS need to do the Locker * state transition to LOCK_SYNC. And after that the lock state * will change back. * * This cost much when doing the Locker state transition and * usually will need to revoke caps from clients. * * And for the 'Xs' caps for getxattr we will also choose the * auth MDS, because the MDS side code is buggy due to setxattr * won't notify the replica MDSes when the values changed and * the replica MDS will return the old values. Though we will * fix it in MDS code, but this still makes sense for old ceph. */ if (((head.args.getattr.mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL)) || (head.args.getattr.mask & (CEPH_STAT_RSTAT | CEPH_STAT_CAP_XATTR))) return true; default: return false; } } void dump(Formatter *f) const; }; #endif
6,922
28.088235
95
h
null
ceph-main/src/client/MetaSession.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/types.h" #include "messages/MClientCapRelease.h" #include "MetaSession.h" #include "Inode.h" #include "common/Formatter.h" const char *MetaSession::get_state_name() const { switch (state) { case STATE_NEW: return "new"; case STATE_OPENING: return "opening"; case STATE_OPEN: return "open"; case STATE_CLOSING: return "closing"; case STATE_CLOSED: return "closed"; case STATE_STALE: return "stale"; default: return "unknown"; } } void MetaSession::dump(Formatter *f, bool cap_dump) const { f->dump_int("mds", mds_num); f->dump_object("addrs", addrs); f->dump_unsigned("seq", seq); f->dump_unsigned("cap_gen", cap_gen); f->dump_stream("cap_ttl") << cap_ttl; f->dump_stream("last_cap_renew_request") << last_cap_renew_request; f->dump_unsigned("cap_renew_seq", cap_renew_seq); f->dump_int("num_caps", caps.size()); if (cap_dump) { f->open_array_section("caps"); for (const auto& cap : caps) { f->dump_object("cap", *cap); } f->close_section(); } f->dump_string("state", get_state_name()); } void MetaSession::enqueue_cap_release(inodeno_t ino, uint64_t cap_id, ceph_seq_t iseq, ceph_seq_t mseq, epoch_t osd_barrier) { if (!release) { release = ceph::make_message<MClientCapRelease>(); } if (osd_barrier > release->osd_epoch_barrier) { release->osd_epoch_barrier = osd_barrier; } ceph_mds_cap_item i; i.ino = ino; i.cap_id = cap_id; i.seq = iseq; i.migrate_seq = mseq; release->caps.push_back(i); }
1,613
24.619048
86
cc
null
ceph-main/src/client/MetaSession.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_CLIENT_METASESSION_H #define CEPH_CLIENT_METASESSION_H #include "include/types.h" #include "include/utime.h" #include "include/xlist.h" #include "mds/MDSMap.h" #include "mds/mdstypes.h" #include "messages/MClientCapRelease.h" struct Cap; struct Inode; struct CapSnap; struct MetaRequest; struct MetaSession { mds_rank_t mds_num; ConnectionRef con; version_t seq = 0; uint64_t cap_gen = 0; utime_t cap_ttl, last_cap_renew_request; uint64_t cap_renew_seq = 0; entity_addrvec_t addrs; feature_bitset_t mds_features; feature_bitset_t mds_metric_flags; enum { STATE_NEW, // Unused STATE_OPENING, STATE_OPEN, STATE_CLOSING, STATE_CLOSED, STATE_STALE, STATE_REJECTED, } state = STATE_OPENING; enum { RECLAIM_NULL, RECLAIMING, RECLAIM_OK, RECLAIM_FAIL, } reclaim_state = RECLAIM_NULL; int mds_state = MDSMap::STATE_NULL; bool readonly = false; std::list<Context*> waiting_for_open; xlist<Cap*> caps; // dirty_list keeps all the dirty inodes before flushing in current session. xlist<Inode*> dirty_list; xlist<Inode*> flushing_caps; xlist<MetaRequest*> requests; xlist<MetaRequest*> unsafe_requests; std::set<ceph_tid_t> flushing_caps_tids; ceph::ref_t<MClientCapRelease> release; MetaSession(mds_rank_t mds_num, ConnectionRef con, const entity_addrvec_t& addrs) : mds_num(mds_num), con(con), addrs(addrs) { } xlist<Inode*> &get_dirty_list() { return dirty_list; } const char *get_state_name() const; void dump(Formatter *f, bool cap_dump=false) const; void enqueue_cap_release(inodeno_t ino, uint64_t cap_id, ceph_seq_t iseq, ceph_seq_t mseq, epoch_t osd_barrier); }; using MetaSessionRef = std::shared_ptr<MetaSession>; #endif
1,861
22.871795
83
h
null
ceph-main/src/client/ObjecterWriteback.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_OSDC_OBJECTERWRITEBACKHANDLER_H #define CEPH_OSDC_OBJECTERWRITEBACKHANDLER_H #include "osdc/Objecter.h" #include "osdc/WritebackHandler.h" class ObjecterWriteback : public WritebackHandler { public: ObjecterWriteback(Objecter *o, Finisher *fin, ceph::mutex *lock) : m_objecter(o), m_finisher(fin), m_lock(lock) { } ~ObjecterWriteback() override {} void read(const object_t& oid, uint64_t object_no, const object_locator_t& oloc, uint64_t off, uint64_t len, snapid_t snapid, bufferlist *pbl, uint64_t trunc_size, __u32 trunc_seq, int op_flags, const ZTracer::Trace &parent_trace, Context *onfinish) override { m_objecter->read_trunc(oid, oloc, off, len, snapid, pbl, 0, trunc_size, trunc_seq, new C_OnFinisher(new C_Lock(m_lock, onfinish), m_finisher)); } bool may_copy_on_write(const object_t& oid, uint64_t read_off, uint64_t read_len, snapid_t snapid) override { return false; } ceph_tid_t write(const object_t& oid, const object_locator_t& oloc, uint64_t off, uint64_t len, const SnapContext& snapc, const bufferlist &bl, ceph::real_time mtime, uint64_t trunc_size, __u32 trunc_seq, ceph_tid_t journal_tid, const ZTracer::Trace &parent_trace, Context *oncommit) override { return m_objecter->write_trunc(oid, oloc, off, len, snapc, bl, mtime, 0, trunc_size, trunc_seq, new C_OnFinisher(new C_Lock(m_lock, oncommit), m_finisher)); } bool can_scattered_write() override { return true; } using WritebackHandler::write; ceph_tid_t write(const object_t& oid, const object_locator_t& oloc, std::vector<std::pair<uint64_t, bufferlist> >& io_vec, const SnapContext& snapc, ceph::real_time mtime, uint64_t trunc_size, __u32 trunc_seq, Context *oncommit) override { ObjectOperation op; for (auto& [offset, bl] : io_vec) op.write(offset, bl, trunc_size, trunc_seq); return m_objecter->mutate(oid, oloc, op, snapc, mtime, 0, new C_OnFinisher(new C_Lock(m_lock, oncommit), m_finisher)); } private: Objecter *m_objecter; Finisher *m_finisher; ceph::mutex *m_lock; }; #endif
2,406
32.901408
81
h
null
ceph-main/src/client/RWRef.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2020 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * * ============ * * This is a common read/write reference framework, which will work * simliarly to a RW lock, the difference here is that for the "readers" * they won't hold any lock but will increase a reference instead when * the "require" state is matched, or set a flag to tell the callers * that the "require" state is not matched and also there is no any * wait mechanism for "readers" to wait the state until it matches. It * will let the callers determine what to do next. * * The usage, such as in libcephfs's client/Client.cc case: * * The Readers: * * For the ll_read()/ll_write(), etc fucntions, they will work as * "readers", in the beginning they just need to define a RWRef * object and in RWRef constructor it will check if the state is * MOUNTED or MOUTING, if not it will fail and return directly with * doing nothing, or it will increase the reference and continue. * And when destructing the RWRef object, in the RWRef destructor * it will decrease the reference and notify the "writers" who maybe * waiting. * * The Writers: * * And for the _unmount() function , as a "writer", in the beginning * it will also just need to define a RWRef object and in RWRef * constructor it will update the state to next stage first, which then * will fail all the new comming "readers", and then wait for all the * "readers" to finish. * * With this we can get rid of the locks for all the "readers" and they * can run in parallel. And we won't have any potential deadlock issue * with RWRef, such as: * * With RWLock: * * ThreadA: ThreadB: * * write_lock<RWLock1>.lock(); another_lock.lock(); * state = NEXT_STATE; ... * another_lock.lock(); read_lock<RWLock1>.lock(); * ... if (state == STATE) { * ... * } * ... * * With RWRef: * * ThreadA: ThreadB: * * w = RWRef(myS, NEXT_STATE, false); another_lock.lock(); * another_lock.lock(); r = RWRef(myS, STATE); * ... if (r.is_state_satisfied()) { * ... * } * ... * * And also in ThreadA, if it needs to do the cond.wait(&another_lock), * it will goto sleep by holding the write_lock<RWLock1> for the RWLock * case, if the ThreadBs are for some IOs, they may stuck for a very long * time that may get timedout in the uplayer which may keep retrying. * With the RWRef, the ThreadB will fail or continue directly without any * stuck, and the uplayer will knew what next to do quickly. */ #ifndef CEPH_RWRef_Posix__H #define CEPH_RWRef_Posix__H #include <string> #include "include/ceph_assert.h" #include "common/ceph_mutex.h" /* The status mechanism info */ template<typename T> struct RWRefState { public: template <typename T1> friend class RWRef; /* * This will be status mechanism. Currently you need to define * it by yourself. */ T state; /* * User defined method to check whether the "require" state * is in the proper range we need. * * For example for the client/Client.cc: * In some reader operation cases we need to make sure the * client state is in mounting or mounted states, then it * will set the "require = mounting" in class RWRef's constructor. * Then the check_reader_state() should return truth if the * state is already in mouting or mounted state. */ virtual int check_reader_state(T require) const = 0; /* * User defined method to check whether the "require" state * is in the proper range we need. * * This will usually be the state migration check. */ virtual int check_writer_state(T require) const = 0; /* * User defined method to check whether the "require" * state is valid or not. */ virtual bool is_valid_state(T require) const = 0; int64_t get_state() const { std::scoped_lock l{lock}; return state; } bool check_current_state(T require) const { ceph_assert(is_valid_state(require)); std::scoped_lock l{lock}; return state == require; } RWRefState(T init_state, const char *lockname, uint64_t _reader_cnt=0) : state(init_state), lock(ceph::make_mutex(lockname)), reader_cnt(_reader_cnt) {} virtual ~RWRefState() {} private: mutable ceph::mutex lock; ceph::condition_variable cond; uint64_t reader_cnt = 0; }; template<typename T> class RWRef { public: RWRef(const RWRef& other) = delete; const RWRef& operator=(const RWRef& other) = delete; RWRef(RWRefState<T> &s, T require, bool ir=true) :S(s), is_reader(ir) { ceph_assert(S.is_valid_state(require)); std::scoped_lock l{S.lock}; if (likely(is_reader)) { // Readers will update the reader_cnt if (S.check_reader_state(require)) { S.reader_cnt++; satisfied = true; } } else { // Writers will update the state is_reader = false; /* * If the current state is not the same as "require" * then update the state and we are the first writer. * * Or if there already has one writer running or * finished, it will let user to choose to continue * or just break. */ if (S.check_writer_state(require)) { first_writer = true; S.state = require; } satisfied = true; } } /* * Whether the "require" state is in the proper range of * the states. */ bool is_state_satisfied() const { return satisfied; } /* * Update the state, and only the writer could do the update. */ void update_state(T new_state) { ceph_assert(!is_reader); ceph_assert(S.is_valid_state(new_state)); std::scoped_lock l{S.lock}; S.state = new_state; } /* * For current state whether we are the first writer or not */ bool is_first_writer() const { return first_writer; } /* * Will wait for all the in-flight "readers" to finish */ void wait_readers_done() { // Only writers can wait ceph_assert(!is_reader); std::unique_lock l{S.lock}; S.cond.wait(l, [this] { return !S.reader_cnt; }); } ~RWRef() { std::scoped_lock l{S.lock}; if (!is_reader) return; if (!satisfied) return; /* * Decrease the refcnt and notify the waiters */ if (--S.reader_cnt == 0) S.cond.notify_all(); } private: RWRefState<T> &S; bool satisfied = false; bool first_writer = false; bool is_reader = true; }; #endif // !CEPH_RWRef_Posix__H
7,286
28.742857
87
h
null
ceph-main/src/client/SyntheticClient.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <[email protected]> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "include/compat.h" #include <iostream> #include <sstream> #include "common/config.h" #include "SyntheticClient.h" #include "osdc/Objecter.h" #include "osdc/Filer.h" #include "include/filepath.h" #include "common/perf_counters.h" #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <utime.h> #include <math.h> #include <sys/statvfs.h> #include "common/errno.h" #include "include/ceph_assert.h" #include "include/cephfs/ceph_ll_client.h" #define dout_context g_ceph_context #define dout_subsys ceph_subsys_client #undef dout_prefix #define dout_prefix *_dout << "client." << (whoami >= 0 ? whoami:client->get_nodeid()) << " " using namespace std; // traces //void trace_include(SyntheticClient *syn, Client *cl, string& prefix); //void trace_openssh(SyntheticClient *syn, Client *cl, string& prefix); int num_client = 1; list<int> syn_modes; list<int> syn_iargs; list<string> syn_sargs; int syn_filer_flags = 0; void parse_syn_options(vector<const char*>& args) { vector<const char*> nargs; for (unsigned i=0; i<args.size(); i++) { if (strcmp(args[i],"--num-client") == 0) { num_client = atoi(args[++i]); continue; } if (strcmp(args[i],"--syn") == 0) { ++i; if (strcmp(args[i], "mksnap") == 0) { syn_modes.push_back(SYNCLIENT_MODE_MKSNAP); syn_sargs.push_back(args[++i]); // path syn_sargs.push_back(args[++i]); // name } else if (strcmp(args[i], "rmsnap") == 0) { syn_modes.push_back(SYNCLIENT_MODE_RMSNAP); syn_sargs.push_back(args[++i]); // path syn_sargs.push_back(args[++i]); // name } else if (strcmp(args[i], "mksnapfile") == 0) { syn_modes.push_back(SYNCLIENT_MODE_MKSNAPFILE); syn_sargs.push_back(args[++i]); // path } else if (strcmp(args[i],"rmfile") == 0) { syn_modes.push_back( SYNCLIENT_MODE_RMFILE ); } else if (strcmp(args[i],"writefile") == 0) { syn_modes.push_back( SYNCLIENT_MODE_WRITEFILE ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"wrshared") == 0) { syn_modes.push_back( SYNCLIENT_MODE_WRSHARED ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"writebatch") == 0) { syn_modes.push_back( SYNCLIENT_MODE_WRITEBATCH ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"readfile") == 0) { syn_modes.push_back( SYNCLIENT_MODE_READFILE ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"readwriterandom") == 0) { syn_modes.push_back( SYNCLIENT_MODE_RDWRRANDOM ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"readwriterandom_ex") == 0) { syn_modes.push_back( SYNCLIENT_MODE_RDWRRANDOM_EX ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"overloadosd0") == 0) { syn_modes.push_back( SYNCLIENT_MODE_OVERLOAD_OSD_0 ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"readshared") == 0) { syn_modes.push_back( SYNCLIENT_MODE_READSHARED ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"rw") == 0) { int a = atoi(args[++i]); int b = atoi(args[++i]); syn_modes.push_back( SYNCLIENT_MODE_WRITEFILE ); syn_iargs.push_back( a ); syn_iargs.push_back( b ); syn_modes.push_back( SYNCLIENT_MODE_READFILE ); syn_iargs.push_back( a ); syn_iargs.push_back( b ); } else if (strcmp(args[i],"dumpplacement") == 0) { syn_modes.push_back( SYNCLIENT_MODE_DUMP ); syn_sargs.push_back( args[++i] ); } else if (strcmp(args[i],"dropcache") == 0) { syn_modes.push_back( SYNCLIENT_MODE_DROPCACHE ); } else if (strcmp(args[i],"makedirs") == 0) { syn_modes.push_back( SYNCLIENT_MODE_MAKEDIRS ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"makedirmess") == 0) { syn_modes.push_back( SYNCLIENT_MODE_MAKEDIRMESS ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"statdirs") == 0) { syn_modes.push_back( SYNCLIENT_MODE_STATDIRS ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"readdirs") == 0) { syn_modes.push_back( SYNCLIENT_MODE_READDIRS ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"makefiles") == 0) { syn_modes.push_back( SYNCLIENT_MODE_MAKEFILES ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"makefiles2") == 0) { syn_modes.push_back( SYNCLIENT_MODE_MAKEFILES2 ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"linktest") == 0) { syn_modes.push_back( SYNCLIENT_MODE_LINKTEST ); } else if (strcmp(args[i],"createshared") == 0) { syn_modes.push_back( SYNCLIENT_MODE_CREATESHARED ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"openshared") == 0) { syn_modes.push_back( SYNCLIENT_MODE_OPENSHARED ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"createobjects") == 0) { syn_modes.push_back( SYNCLIENT_MODE_CREATEOBJECTS ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"objectrw") == 0) { syn_modes.push_back( SYNCLIENT_MODE_OBJECTRW ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"walk") == 0) { syn_modes.push_back( SYNCLIENT_MODE_FULLWALK ); //syn_sargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"randomwalk") == 0) { syn_modes.push_back( SYNCLIENT_MODE_RANDOMWALK ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"trace") == 0) { syn_modes.push_back( SYNCLIENT_MODE_TRACE ); syn_sargs.push_back( args[++i] ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back(1);// data } else if (strcmp(args[i],"mtrace") == 0) { syn_modes.push_back( SYNCLIENT_MODE_TRACE ); syn_sargs.push_back( args[++i] ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back(0);// no data } else if (strcmp(args[i],"thrashlinks") == 0) { syn_modes.push_back( SYNCLIENT_MODE_THRASHLINKS ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"foo") == 0) { syn_modes.push_back( SYNCLIENT_MODE_FOO ); } else if (strcmp(args[i],"until") == 0) { syn_modes.push_back( SYNCLIENT_MODE_UNTIL ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"sleepuntil") == 0) { syn_modes.push_back( SYNCLIENT_MODE_SLEEPUNTIL ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"only") == 0) { syn_modes.push_back( SYNCLIENT_MODE_ONLY ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"onlyrange") == 0) { syn_modes.push_back( SYNCLIENT_MODE_ONLYRANGE ); syn_iargs.push_back( atoi(args[++i]) ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"sleep") == 0) { syn_modes.push_back( SYNCLIENT_MODE_SLEEP ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"randomsleep") == 0) { syn_modes.push_back( SYNCLIENT_MODE_RANDOMSLEEP ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"opentest") == 0) { syn_modes.push_back( SYNCLIENT_MODE_OPENTEST ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"optest") == 0) { syn_modes.push_back( SYNCLIENT_MODE_OPTEST ); syn_iargs.push_back( atoi(args[++i]) ); } else if (strcmp(args[i],"truncate") == 0) { syn_modes.push_back( SYNCLIENT_MODE_TRUNCATE ); syn_sargs.push_back(args[++i]); syn_iargs.push_back(atoi(args[++i])); } else if (strcmp(args[i],"importfind") == 0) { syn_modes.push_back(SYNCLIENT_MODE_IMPORTFIND); syn_sargs.push_back(args[++i]); syn_sargs.push_back(args[++i]); syn_iargs.push_back(atoi(args[++i])); } else if (strcmp(args[i], "lookuphash") == 0) { syn_modes.push_back(SYNCLIENT_MODE_LOOKUPHASH); syn_sargs.push_back(args[++i]); syn_sargs.push_back(args[++i]); syn_sargs.push_back(args[++i]); } else if (strcmp(args[i], "lookupino") == 0) { syn_modes.push_back(SYNCLIENT_MODE_LOOKUPINO); syn_sargs.push_back(args[++i]); } else if (strcmp(args[i], "chunkfile") == 0) { syn_modes.push_back(SYNCLIENT_MODE_CHUNK); syn_sargs.push_back(args[++i]); } else { cerr << "unknown syn arg " << args[i] << std::endl; ceph_abort(); } } else if (strcmp(args[i], "localize_reads") == 0) { cerr << "set CEPH_OSD_FLAG_LOCALIZE_READS" << std::endl; syn_filer_flags |= CEPH_OSD_FLAG_LOCALIZE_READS; } else { nargs.push_back(args[i]); } } args = nargs; } SyntheticClient::SyntheticClient(StandaloneClient *client, int w) { this->client = client; whoami = w; thread_id = 0; did_readdir = false; run_only = -1; exclude = -1; this->modes = syn_modes; this->iargs = syn_iargs; this->sargs = syn_sargs; run_start = ceph_clock_now(); } #define DBL 2 void *synthetic_client_thread_entry(void *ptr) { SyntheticClient *sc = static_cast<SyntheticClient*>(ptr); //int r = sc->run(); return 0;//(void*)r; } string SyntheticClient::get_sarg(int seq) { string a; if (!sargs.empty()) { a = sargs.front(); sargs.pop_front(); } if (a.length() == 0 || a == "~") { char s[30]; snprintf(s, sizeof(s), "syn.%lld.%d", (long long)client->whoami.v, seq); a = s; } return a; } int SyntheticClient::run() { UserPerm perms = client->pick_my_perms(); dout(15) << "initing" << dendl; int err = client->init(); if (err < 0) { dout(0) << "failed to initialize: " << cpp_strerror(err) << dendl; return -1; } dout(15) << "mounting" << dendl; err = client->mount("", perms); if (err < 0) { dout(0) << "failed to mount: " << cpp_strerror(err) << dendl; client->shutdown(); return -1; } //run_start = ceph_clock_now(client->cct); run_until = utime_t(0,0); dout(5) << "run" << dendl; int seq = 0; for (list<int>::iterator it = modes.begin(); it != modes.end(); ++it) { int mode = *it; dout(3) << "mode " << mode << dendl; switch (mode) { // WHO? case SYNCLIENT_MODE_ONLY: { run_only = iargs.front(); iargs.pop_front(); if (run_only == client->get_nodeid()) dout(2) << "only " << run_only << dendl; } break; case SYNCLIENT_MODE_ONLYRANGE: { int first = iargs.front(); iargs.pop_front(); int last = iargs.front(); iargs.pop_front(); if (first <= client->get_nodeid() && last > client->get_nodeid()) { run_only = client->get_nodeid(); dout(2) << "onlyrange [" << first << ", " << last << ") includes me" << dendl; } else run_only = client->get_nodeid().v+1; // not me } break; case SYNCLIENT_MODE_EXCLUDE: { exclude = iargs.front(); iargs.pop_front(); if (exclude == client->get_nodeid()) { run_only = client->get_nodeid().v + 1; dout(2) << "not running " << exclude << dendl; } else run_only = -1; } break; // HOW LONG? case SYNCLIENT_MODE_UNTIL: { int iarg1 = iargs.front(); iargs.pop_front(); if (run_me()) { if (iarg1) { dout(2) << "until " << iarg1 << dendl; utime_t dur(iarg1,0); run_until = run_start + dur; } else { dout(2) << "until " << iarg1 << " (no limit)" << dendl; run_until = utime_t(0,0); } } } break; // ... case SYNCLIENT_MODE_FOO: if (run_me()) { foo(); } did_run_me(); break; case SYNCLIENT_MODE_RANDOMSLEEP: { int iarg1 = iargs.front(); iargs.pop_front(); if (run_me()) { srand(time(0) + getpid() + client->whoami.v); sleep(rand() % iarg1); } did_run_me(); } break; case SYNCLIENT_MODE_SLEEP: { int iarg1 = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "sleep " << iarg1 << dendl; sleep(iarg1); } did_run_me(); } break; case SYNCLIENT_MODE_SLEEPUNTIL: { int iarg1 = iargs.front(); iargs.pop_front(); if (iarg1 && run_me()) { dout(2) << "sleepuntil " << iarg1 << dendl; utime_t at = ceph_clock_now() - run_start; if (at.sec() < iarg1) sleep(iarg1 - at.sec()); } did_run_me(); } break; case SYNCLIENT_MODE_RANDOMWALK: { int iarg1 = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "randomwalk " << iarg1 << dendl; random_walk(iarg1); } did_run_me(); } break; case SYNCLIENT_MODE_DROPCACHE: { client->sync_fs(); client->drop_caches(); } break; case SYNCLIENT_MODE_DUMP: { string sarg1 = get_sarg(0); if (run_me()) { dout(2) << "placement dump " << sarg1 << dendl; dump_placement(sarg1); } did_run_me(); } break; case SYNCLIENT_MODE_MAKEDIRMESS: { string sarg1 = get_sarg(0); int iarg1 = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "makedirmess " << sarg1 << " " << iarg1 << dendl; make_dir_mess(sarg1.c_str(), iarg1); } did_run_me(); } break; case SYNCLIENT_MODE_MAKEDIRS: { string sarg1 = get_sarg(seq++); int iarg1 = iargs.front(); iargs.pop_front(); int iarg2 = iargs.front(); iargs.pop_front(); int iarg3 = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "makedirs " << sarg1 << " " << iarg1 << " " << iarg2 << " " << iarg3 << dendl; make_dirs(sarg1.c_str(), iarg1, iarg2, iarg3); } did_run_me(); } break; case SYNCLIENT_MODE_STATDIRS: { string sarg1 = get_sarg(0); int iarg1 = iargs.front(); iargs.pop_front(); int iarg2 = iargs.front(); iargs.pop_front(); int iarg3 = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "statdirs " << sarg1 << " " << iarg1 << " " << iarg2 << " " << iarg3 << dendl; stat_dirs(sarg1.c_str(), iarg1, iarg2, iarg3); } did_run_me(); } break; case SYNCLIENT_MODE_READDIRS: { string sarg1 = get_sarg(0); int iarg1 = iargs.front(); iargs.pop_front(); int iarg2 = iargs.front(); iargs.pop_front(); int iarg3 = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "readdirs " << sarg1 << " " << iarg1 << " " << iarg2 << " " << iarg3 << dendl; read_dirs(sarg1.c_str(), iarg1, iarg2, iarg3); } did_run_me(); } break; case SYNCLIENT_MODE_THRASHLINKS: { string sarg1 = get_sarg(0); int iarg1 = iargs.front(); iargs.pop_front(); int iarg2 = iargs.front(); iargs.pop_front(); int iarg3 = iargs.front(); iargs.pop_front(); int iarg4 = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "thrashlinks " << sarg1 << " " << iarg1 << " " << iarg2 << " " << iarg3 << dendl; thrash_links(sarg1.c_str(), iarg1, iarg2, iarg3, iarg4); } did_run_me(); } break; case SYNCLIENT_MODE_LINKTEST: { if (run_me()) { link_test(); } did_run_me(); } break; case SYNCLIENT_MODE_MAKEFILES: { int num = iargs.front(); iargs.pop_front(); int count = iargs.front(); iargs.pop_front(); int priv = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "makefiles " << num << " " << count << " " << priv << dendl; make_files(num, count, priv, false); } did_run_me(); } break; case SYNCLIENT_MODE_MAKEFILES2: { int num = iargs.front(); iargs.pop_front(); int count = iargs.front(); iargs.pop_front(); int priv = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "makefiles2 " << num << " " << count << " " << priv << dendl; make_files(num, count, priv, true); } did_run_me(); } break; case SYNCLIENT_MODE_CREATESHARED: { string sarg1 = get_sarg(0); int num = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "createshared " << num << dendl; create_shared(num); } did_run_me(); } break; case SYNCLIENT_MODE_OPENSHARED: { string sarg1 = get_sarg(0); int num = iargs.front(); iargs.pop_front(); int count = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "openshared " << num << dendl; open_shared(num, count); } did_run_me(); } break; case SYNCLIENT_MODE_CREATEOBJECTS: { int count = iargs.front(); iargs.pop_front(); int size = iargs.front(); iargs.pop_front(); int inflight = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "createobjects " << count << " of " << size << " bytes" << ", " << inflight << " in flight" << dendl; create_objects(count, size, inflight); } did_run_me(); } break; case SYNCLIENT_MODE_OBJECTRW: { int count = iargs.front(); iargs.pop_front(); int size = iargs.front(); iargs.pop_front(); int wrpc = iargs.front(); iargs.pop_front(); int overlap = iargs.front(); iargs.pop_front(); int rskew = iargs.front(); iargs.pop_front(); int wskew = iargs.front(); iargs.pop_front(); if (run_me()) { dout(2) << "objectrw " << count << " " << size << " " << wrpc << " " << overlap << " " << rskew << " " << wskew << dendl; object_rw(count, size, wrpc, overlap, rskew, wskew); } did_run_me(); } break; case SYNCLIENT_MODE_FULLWALK: { string sarg1;// = get_sarg(0); if (run_me()) { dout(2) << "fullwalk" << sarg1 << dendl; full_walk(sarg1); } did_run_me(); } break; case SYNCLIENT_MODE_REPEATWALK: { string sarg1 = get_sarg(0); if (run_me()) { dout(2) << "repeatwalk " << sarg1 << dendl; while (full_walk(sarg1) == 0) ; } did_run_me(); } break; case SYNCLIENT_MODE_RMFILE: { string sarg1 = get_sarg(0); if (run_me()) { rm_file(sarg1); } did_run_me(); } break; case SYNCLIENT_MODE_WRITEFILE: { string sarg1 = get_sarg(0); int iarg1 = iargs.front(); iargs.pop_front(); int iarg2 = iargs.front(); iargs.pop_front(); dout(1) << "WRITING SYN CLIENT" << dendl; if (run_me()) { write_file(sarg1, iarg1, iarg2); } did_run_me(); } break; case SYNCLIENT_MODE_CHUNK: if (run_me()) { string sarg1 = get_sarg(0); chunk_file(sarg1); } did_run_me(); break; case SYNCLIENT_MODE_OVERLOAD_OSD_0: { dout(1) << "OVERLOADING OSD 0" << dendl; int iarg1 = iargs.front(); iargs.pop_front(); int iarg2 = iargs.front(); iargs.pop_front(); int iarg3 = iargs.front(); iargs.pop_front(); if (run_me()) { overload_osd_0(iarg1, iarg2, iarg3); } did_run_me(); } break; case SYNCLIENT_MODE_WRSHARED: { string sarg1 = "shared"; int iarg1 = iargs.front(); iargs.pop_front(); int iarg2 = iargs.front(); iargs.pop_front(); if (run_me()) { write_file(sarg1, iarg1, iarg2); } did_run_me(); } break; case SYNCLIENT_MODE_READSHARED: { string sarg1 = "shared"; int iarg1 = iargs.front(); iargs.pop_front(); int iarg2 = iargs.front(); iargs.pop_front(); if (run_me()) { read_file(sarg1, iarg1, iarg2, true); } did_run_me(); } break; case SYNCLIENT_MODE_WRITEBATCH: { int iarg1 = iargs.front(); iargs.pop_front(); int iarg2 = iargs.front(); iargs.pop_front(); int iarg3 = iargs.front(); iargs.pop_front(); if (run_me()) { write_batch(iarg1, iarg2, iarg3); } did_run_me(); } break; case SYNCLIENT_MODE_READFILE: { string sarg1 = get_sarg(0); int iarg1 = iargs.front(); iargs.pop_front(); int iarg2 = iargs.front(); iargs.pop_front(); dout(1) << "READING SYN CLIENT" << dendl; if (run_me()) { read_file(sarg1, iarg1, iarg2); } did_run_me(); } break; case SYNCLIENT_MODE_RDWRRANDOM: { string sarg1 = get_sarg(0); int iarg1 = iargs.front(); iargs.pop_front(); int iarg2 = iargs.front(); iargs.pop_front(); dout(1) << "RANDOM READ WRITE SYN CLIENT" << dendl; if (run_me()) { read_random(sarg1, iarg1, iarg2); } did_run_me(); } break; case SYNCLIENT_MODE_RDWRRANDOM_EX: { string sarg1 = get_sarg(0); int iarg1 = iargs.front(); iargs.pop_front(); int iarg2 = iargs.front(); iargs.pop_front(); dout(1) << "RANDOM READ WRITE SYN CLIENT" << dendl; if (run_me()) { read_random_ex(sarg1, iarg1, iarg2); } did_run_me(); } break; case SYNCLIENT_MODE_TRACE: { string tfile = get_sarg(0); sargs.push_front(string("~")); int iarg1 = iargs.front(); iargs.pop_front(); int playdata = iargs.front(); iargs.pop_front(); string prefix = get_sarg(0); char realtfile[100]; snprintf(realtfile, sizeof(realtfile), tfile.c_str(), (int)client->get_nodeid().v); if (run_me()) { dout(0) << "trace " << tfile << " prefix=" << prefix << " count=" << iarg1 << " data=" << playdata << dendl; Trace t(realtfile); if (iarg1 == 0) iarg1 = 1; // play trace at least once! for (int i=0; i<iarg1; i++) { utime_t start = ceph_clock_now(); if (time_to_stop()) break; play_trace(t, prefix, !playdata); if (time_to_stop()) break; if (iarg1 > 1) clean_dir(prefix); // clean only if repeat utime_t lat = ceph_clock_now(); lat -= start; dout(0) << " trace " << tfile << " loop " << (i+1) << "/" << iarg1 << " done in " << (double)lat << " seconds" << dendl; if (client->logger && i > 0 && i < iarg1-1 ) { //client->logger->finc("trsum", (double)lat); //client->logger->inc("trnum"); } } dout(1) << "done " << dendl; } did_run_me(); } break; case SYNCLIENT_MODE_OPENTEST: { int count = iargs.front(); iargs.pop_front(); if (run_me()) { for (int i=0; i<count; i++) { int fd = client->open("test", (rand()%2) ? (O_WRONLY|O_CREAT) : O_RDONLY, perms); if (fd > 0) client->close(fd); } } did_run_me(); } break; case SYNCLIENT_MODE_OPTEST: { int count = iargs.front(); iargs.pop_front(); if (run_me()) { client->mknod("test", 0777, perms); struct stat st; for (int i=0; i<count; i++) { client->lstat("test", &st, perms); client->chmod("test", 0777, perms); } } did_run_me(); } break; case SYNCLIENT_MODE_TRUNCATE: { string file = get_sarg(0); sargs.push_front(file); int iarg1 = iargs.front(); iargs.pop_front(); if (run_me()) { client->truncate(file.c_str(), iarg1, perms); } did_run_me(); } break; case SYNCLIENT_MODE_IMPORTFIND: { string base = get_sarg(0); string find = get_sarg(0); int data = get_iarg(); if (run_me()) { import_find(base.c_str(), find.c_str(), data); } did_run_me(); } break; case SYNCLIENT_MODE_LOOKUPHASH: { inodeno_t ino; string iname = get_sarg(0); sscanf(iname.c_str(), "%llx", (long long unsigned*)&ino.val); inodeno_t dirino; string diname = get_sarg(0); sscanf(diname.c_str(), "%llx", (long long unsigned*)&dirino.val); string name = get_sarg(0); if (run_me()) { lookup_hash(ino, dirino, name.c_str(), perms); } } break; case SYNCLIENT_MODE_LOOKUPINO: { inodeno_t ino; string iname = get_sarg(0); sscanf(iname.c_str(), "%llx", (long long unsigned*)&ino.val); if (run_me()) { lookup_ino(ino, perms); } } break; case SYNCLIENT_MODE_MKSNAP: { string base = get_sarg(0); string name = get_sarg(0); if (run_me()) mksnap(base.c_str(), name.c_str(), perms); did_run_me(); } break; case SYNCLIENT_MODE_RMSNAP: { string base = get_sarg(0); string name = get_sarg(0); if (run_me()) rmsnap(base.c_str(), name.c_str(), perms); did_run_me(); } break; case SYNCLIENT_MODE_MKSNAPFILE: { string base = get_sarg(0); if (run_me()) mksnapfile(base.c_str()); did_run_me(); } break; default: ceph_abort(); } } dout(1) << "syn done, unmounting " << dendl; client->unmount(); client->shutdown(); return 0; } int SyntheticClient::start_thread() { ceph_assert(!thread_id); pthread_create(&thread_id, NULL, synthetic_client_thread_entry, this); ceph_assert(thread_id); ceph_pthread_setname(thread_id, "client"); return 0; } int SyntheticClient::join_thread() { ceph_assert(thread_id); void *rv; pthread_join(thread_id, &rv); return 0; } bool roll_die(float p) { float r = (float)(rand() % 100000) / 100000.0; if (r < p) return true; else return false; } void SyntheticClient::init_op_dist() { op_dist.clear(); #if 0 op_dist.add( CEPH_MDS_OP_STAT, 610 ); op_dist.add( CEPH_MDS_OP_UTIME, 0 ); op_dist.add( CEPH_MDS_OP_CHMOD, 1 ); op_dist.add( CEPH_MDS_OP_CHOWN, 1 ); #endif op_dist.add( CEPH_MDS_OP_READDIR, 2 ); op_dist.add( CEPH_MDS_OP_MKNOD, 30 ); op_dist.add( CEPH_MDS_OP_LINK, 0 ); op_dist.add( CEPH_MDS_OP_UNLINK, 20 ); op_dist.add( CEPH_MDS_OP_RENAME, 40 ); op_dist.add( CEPH_MDS_OP_MKDIR, 10 ); op_dist.add( CEPH_MDS_OP_RMDIR, 20 ); op_dist.add( CEPH_MDS_OP_SYMLINK, 20 ); op_dist.add( CEPH_MDS_OP_OPEN, 200 ); //op_dist.add( CEPH_MDS_OP_READ, 0 ); //op_dist.add( CEPH_MDS_OP_WRITE, 0 ); //op_dist.add( CEPH_MDS_OP_TRUNCATE, 0 ); //op_dist.add( CEPH_MDS_OP_FSYNC, 0 ); //op_dist.add( CEPH_MDS_OP_RELEASE, 200 ); op_dist.normalize(); } void SyntheticClient::up() { cwd = cwd.prefixpath(cwd.depth()-1); dout(DBL) << "cd .. -> " << cwd << dendl; clear_dir(); } int SyntheticClient::play_trace(Trace& t, string& prefix, bool metadata_only) { dout(4) << "play trace prefix '" << prefix << "'" << dendl; UserPerm perms = client->pick_my_perms(); t.start(); string buf; string buf2; utime_t start = ceph_clock_now(); ceph::unordered_map<int64_t, int64_t> open_files; ceph::unordered_map<int64_t, dir_result_t*> open_dirs; ceph::unordered_map<int64_t, Fh*> ll_files; ceph::unordered_map<int64_t, dir_result_t*> ll_dirs; ceph::unordered_map<uint64_t, int64_t> ll_inos; Inode *i1, *i2; ll_inos[1] = 1; // root inode is known. // prefix? const char *p = prefix.c_str(); if (prefix.length()) { client->mkdir(prefix.c_str(), 0755, perms); struct ceph_statx stx; i1 = client->ll_get_inode(vinodeno_t(1, CEPH_NOSNAP)); if (client->ll_lookupx(i1, prefix.c_str(), &i2, &stx, CEPH_STATX_INO, 0, perms) == 0) { ll_inos[1] = stx.stx_ino; dout(5) << "'root' ino is " << inodeno_t(stx.stx_ino) << dendl; client->ll_put(i1); } else { dout(0) << "warning: play_trace couldn't lookup up my per-client directory" << dendl; } } else (void) client->ll_get_inode(vinodeno_t(1, CEPH_NOSNAP)); utime_t last_status = start; int n = 0; // for object traces ceph::mutex lock = ceph::make_mutex("synclient foo"); ceph::condition_variable cond; bool ack; while (!t.end()) { if (++n == 100) { n = 00; utime_t now = last_status; if (now - last_status > 1.0) { last_status = now; dout(1) << "play_trace at line " << t.get_line() << dendl; } } if (time_to_stop()) break; // op const char *op = t.get_string(buf, 0); dout(4) << (t.get_line()-1) << ": trace op " << op << dendl; if (op[0] == '@') { // timestamp... ignore it! t.get_int(); // sec t.get_int(); // usec op = t.get_string(buf, 0); } // high level ops --------------------- UserPerm perms = client->pick_my_perms(); if (strcmp(op, "link") == 0) { const char *a = t.get_string(buf, p); const char *b = t.get_string(buf2, p); client->link(a, b, perms); } else if (strcmp(op, "unlink") == 0) { const char *a = t.get_string(buf, p); client->unlink(a, perms); } else if (strcmp(op, "rename") == 0) { const char *a = t.get_string(buf, p); const char *b = t.get_string(buf2, p); client->rename(a,b, perms); } else if (strcmp(op, "mkdir") == 0) { const char *a = t.get_string(buf, p); int64_t b = t.get_int(); client->mkdir(a, b, perms); } else if (strcmp(op, "rmdir") == 0) { const char *a = t.get_string(buf, p); client->rmdir(a, perms); } else if (strcmp(op, "symlink") == 0) { const char *a = t.get_string(buf, p); const char *b = t.get_string(buf2, p); client->symlink(a, b, perms); } else if (strcmp(op, "readlink") == 0) { const char *a = t.get_string(buf, p); char buf[100]; client->readlink(a, buf, 100, perms); } else if (strcmp(op, "lstat") == 0) { struct stat st; const char *a = t.get_string(buf, p); if (strcmp(a, p) != 0 && strcmp(a, "/") != 0 && strcmp(a, "/lib") != 0 && // or /lib.. that would be a lookup. hack. a[0] != 0) // stop stating the root directory already client->lstat(a, &st, perms); } else if (strcmp(op, "chmod") == 0) { const char *a = t.get_string(buf, p); int64_t b = t.get_int(); client->chmod(a, b, perms); } else if (strcmp(op, "chown") == 0) { const char *a = t.get_string(buf, p); int64_t b = t.get_int(); int64_t c = t.get_int(); client->chown(a, b, c, perms); } else if (strcmp(op, "utime") == 0) { const char *a = t.get_string(buf, p); int64_t b = t.get_int(); int64_t c = t.get_int(); struct utimbuf u; u.actime = b; u.modtime = c; client->utime(a, &u, perms); } else if (strcmp(op, "mknod") == 0) { const char *a = t.get_string(buf, p); int64_t b = t.get_int(); int64_t c = t.get_int(); client->mknod(a, b, perms, c); } else if (strcmp(op, "oldmknod") == 0) { const char *a = t.get_string(buf, p); int64_t b = t.get_int(); client->mknod(a, b, perms, 0); } else if (strcmp(op, "getdir") == 0) { const char *a = t.get_string(buf, p); list<string> contents; int r = client->getdir(a, contents, perms); if (r < 0) { dout(1) << "getdir on " << a << " returns " << r << dendl; } } else if (strcmp(op, "opendir") == 0) { const char *a = t.get_string(buf, p); int64_t b = t.get_int(); dir_result_t *dirp; client->opendir(a, &dirp, perms); if (dirp) open_dirs[b] = dirp; } else if (strcmp(op, "closedir") == 0) { int64_t a = t.get_int(); client->closedir(open_dirs[a]); open_dirs.erase(a); } else if (strcmp(op, "open") == 0) { const char *a = t.get_string(buf, p); int64_t b = t.get_int(); int64_t c = t.get_int(); int64_t d = t.get_int(); int64_t fd = client->open(a, b, perms, c); if (fd > 0) open_files[d] = fd; } else if (strcmp(op, "oldopen") == 0) { const char *a = t.get_string(buf, p); int64_t b = t.get_int(); int64_t d = t.get_int(); int64_t fd = client->open(a, b, perms, 0755); if (fd > 0) open_files[d] = fd; } else if (strcmp(op, "close") == 0) { int64_t id = t.get_int(); int64_t fh = open_files[id]; if (fh > 0) client->close(fh); open_files.erase(id); } else if (strcmp(op, "lseek") == 0) { int64_t f = t.get_int(); int fd = open_files[f]; int64_t off = t.get_int(); int64_t whence = t.get_int(); client->lseek(fd, off, whence); } else if (strcmp(op, "read") == 0) { int64_t f = t.get_int(); int64_t size = t.get_int(); int64_t off = t.get_int(); int64_t fd = open_files[f]; if (!metadata_only) { char *b = new char[size]; client->read(fd, b, size, off); delete[] b; } } else if (strcmp(op, "write") == 0) { int64_t f = t.get_int(); int64_t fd = open_files[f]; int64_t size = t.get_int(); int64_t off = t.get_int(); if (!metadata_only) { char *b = new char[size]; memset(b, 1, size); // let's write 1's! client->write(fd, b, size, off); delete[] b; } else { client->write(fd, NULL, 0, size+off); } } else if (strcmp(op, "truncate") == 0) { const char *a = t.get_string(buf, p); int64_t l = t.get_int(); client->truncate(a, l, perms); } else if (strcmp(op, "ftruncate") == 0) { int64_t f = t.get_int(); int fd = open_files[f]; int64_t l = t.get_int(); client->ftruncate(fd, l, perms); } else if (strcmp(op, "fsync") == 0) { int64_t f = t.get_int(); int64_t b = t.get_int(); int fd = open_files[f]; client->fsync(fd, b); } else if (strcmp(op, "chdir") == 0) { const char *a = t.get_string(buf, p); // Client users should remember their path, but since this // is just a synthetic client we ignore it. std::string ignore; client->chdir(a, ignore, perms); } else if (strcmp(op, "statfs") == 0) { struct statvfs stbuf; client->statfs("/", &stbuf, perms); } // low level ops --------------------- else if (strcmp(op, "ll_lookup") == 0) { int64_t i = t.get_int(); const char *name = t.get_string(buf, p); int64_t r = t.get_int(); struct ceph_statx stx; if (ll_inos.count(i)) { i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); if (client->ll_lookupx(i1, name, &i2, &stx, CEPH_STATX_INO, 0, perms) == 0) ll_inos[r] = stx.stx_ino; client->ll_put(i1); } } else if (strcmp(op, "ll_forget") == 0) { int64_t i = t.get_int(); int64_t n = t.get_int(); if (ll_inos.count(i) && client->ll_forget( client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)), n)) ll_inos.erase(i); } else if (strcmp(op, "ll_getattr") == 0) { int64_t i = t.get_int(); struct stat attr; if (ll_inos.count(i)) { i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); client->ll_getattr(i1, &attr, perms); client->ll_put(i1); } } else if (strcmp(op, "ll_setattr") == 0) { int64_t i = t.get_int(); struct stat attr; memset(&attr, 0, sizeof(attr)); attr.st_mode = t.get_int(); attr.st_uid = t.get_int(); attr.st_gid = t.get_int(); attr.st_size = t.get_int(); attr.st_mtime = t.get_int(); attr.st_atime = t.get_int(); int mask = t.get_int(); if (ll_inos.count(i)) { i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); client->ll_setattr(i1, &attr, mask, perms); client->ll_put(i1); } } else if (strcmp(op, "ll_readlink") == 0) { int64_t i = t.get_int(); if (ll_inos.count(i)) { char buf[PATH_MAX]; i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); client->ll_readlink(i1, buf, sizeof(buf), perms); client->ll_put(i1); } } else if (strcmp(op, "ll_mknod") == 0) { int64_t i = t.get_int(); const char *n = t.get_string(buf, p); int m = t.get_int(); int r = t.get_int(); int64_t ri = t.get_int(); struct stat attr; if (ll_inos.count(i)) { i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); if (client->ll_mknod(i1, n, m, r, &attr, &i2, perms) == 0) ll_inos[ri] = attr.st_ino; client->ll_put(i1); } } else if (strcmp(op, "ll_mkdir") == 0) { int64_t i = t.get_int(); const char *n = t.get_string(buf, p); int m = t.get_int(); int64_t ri = t.get_int(); struct stat attr; if (ll_inos.count(i)) { i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); if (client->ll_mkdir(i1, n, m, &attr, &i2, perms) == 0) ll_inos[ri] = attr.st_ino; client->ll_put(i1); } } else if (strcmp(op, "ll_symlink") == 0) { int64_t i = t.get_int(); const char *n = t.get_string(buf, p); const char *v = t.get_string(buf2, p); int64_t ri = t.get_int(); struct stat attr; if (ll_inos.count(i)) { i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); if (client->ll_symlink(i1, n, v, &attr, &i2, perms) == 0) ll_inos[ri] = attr.st_ino; client->ll_put(i1); } } else if (strcmp(op, "ll_unlink") == 0) { int64_t i = t.get_int(); const char *n = t.get_string(buf, p); if (ll_inos.count(i)) { i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); client->ll_unlink(i1, n, perms); client->ll_put(i1); } } else if (strcmp(op, "ll_rmdir") == 0) { int64_t i = t.get_int(); const char *n = t.get_string(buf, p); if (ll_inos.count(i)) { i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); client->ll_rmdir(i1, n, perms); client->ll_put(i1); } } else if (strcmp(op, "ll_rename") == 0) { int64_t i = t.get_int(); const char *n = t.get_string(buf, p); int64_t ni = t.get_int(); const char *nn = t.get_string(buf2, p); if (ll_inos.count(i) && ll_inos.count(ni)) { i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); i2 = client->ll_get_inode(vinodeno_t(ll_inos[ni],CEPH_NOSNAP)); client->ll_rename(i1, n, i2, nn, perms); client->ll_put(i1); client->ll_put(i2); } } else if (strcmp(op, "ll_link") == 0) { int64_t i = t.get_int(); int64_t ni = t.get_int(); const char *nn = t.get_string(buf, p); if (ll_inos.count(i) && ll_inos.count(ni)) { i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); i2 = client->ll_get_inode(vinodeno_t(ll_inos[ni],CEPH_NOSNAP)); client->ll_link(i1, i2, nn, perms); client->ll_put(i1); client->ll_put(i2); } } else if (strcmp(op, "ll_opendir") == 0) { int64_t i = t.get_int(); int64_t r = t.get_int(); dir_result_t *dirp; if (ll_inos.count(i)) { i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); if (client->ll_opendir(i1, O_RDONLY, &dirp, perms) == 0) ll_dirs[r] = dirp; client->ll_put(i1); } } else if (strcmp(op, "ll_releasedir") == 0) { int64_t f = t.get_int(); if (ll_dirs.count(f)) { client->ll_releasedir(ll_dirs[f]); ll_dirs.erase(f); } } else if (strcmp(op, "ll_open") == 0) { int64_t i = t.get_int(); int64_t f = t.get_int(); int64_t r = t.get_int(); Fh *fhp; if (ll_inos.count(i)) { i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); if (client->ll_open(i1, f, &fhp, perms) == 0) ll_files[r] = fhp; client->ll_put(i1); } } else if (strcmp(op, "ll_create") == 0) { int64_t i = t.get_int(); const char *n = t.get_string(buf, p); int64_t m = t.get_int(); int64_t f = t.get_int(); int64_t r = t.get_int(); int64_t ri = t.get_int(); struct stat attr; if (ll_inos.count(i)) { Fh *fhp; i1 = client->ll_get_inode(vinodeno_t(ll_inos[i],CEPH_NOSNAP)); if (client->ll_create(i1, n, m, f, &attr, NULL, &fhp, perms) == 0) { ll_inos[ri] = attr.st_ino; ll_files[r] = fhp; } client->ll_put(i1); } } else if (strcmp(op, "ll_read") == 0) { int64_t f = t.get_int(); int64_t off = t.get_int(); int64_t size = t.get_int(); if (ll_files.count(f) && !metadata_only) { bufferlist bl; client->ll_read(ll_files[f], off, size, &bl); } } else if (strcmp(op, "ll_write") == 0) { int64_t f = t.get_int(); int64_t off = t.get_int(); int64_t size = t.get_int(); if (ll_files.count(f)) { if (!metadata_only) { bufferlist bl; bufferptr bp(size); bl.push_back(bp); bp.zero(); client->ll_write(ll_files[f], off, size, bl.c_str()); } else { client->ll_write(ll_files[f], off+size, 0, NULL); } } } else if (strcmp(op, "ll_flush") == 0) { int64_t f = t.get_int(); if (!metadata_only && ll_files.count(f)) client->ll_flush(ll_files[f]); } else if (strcmp(op, "ll_fsync") == 0) { int64_t f = t.get_int(); if (!metadata_only && ll_files.count(f)) client->ll_fsync(ll_files[f], false); // FIXME dataonly param } else if (strcmp(op, "ll_release") == 0) { int64_t f = t.get_int(); if (ll_files.count(f)) { client->ll_release(ll_files[f]); ll_files.erase(f); } } else if (strcmp(op, "ll_statfs") == 0) { int64_t i = t.get_int(); if (ll_inos.count(i)) {} //client->ll_statfs(vinodeno_t(ll_inos[i],CEPH_NOSNAP), perms); } // object-level traces else if (strcmp(op, "o_stat") == 0) { int64_t oh = t.get_int(); int64_t ol = t.get_int(); object_t oid = file_object_t(oh, ol); std::unique_lock locker{lock}; object_locator_t oloc(SYNCLIENT_FIRST_POOL); uint64_t size; ceph::real_time mtime; client->objecter->stat(oid, oloc, CEPH_NOSNAP, &size, &mtime, 0, new C_SafeCond(lock, cond, &ack)); cond.wait(locker, [&ack] { return ack; }); } else if (strcmp(op, "o_read") == 0) { int64_t oh = t.get_int(); int64_t ol = t.get_int(); int64_t off = t.get_int(); int64_t len = t.get_int(); object_t oid = file_object_t(oh, ol); object_locator_t oloc(SYNCLIENT_FIRST_POOL); std::unique_lock locker{lock}; bufferlist bl; client->objecter->read(oid, oloc, off, len, CEPH_NOSNAP, &bl, 0, new C_SafeCond(lock, cond, &ack)); cond.wait(locker, [&ack] { return ack; }); } else if (strcmp(op, "o_write") == 0) { int64_t oh = t.get_int(); int64_t ol = t.get_int(); int64_t off = t.get_int(); int64_t len = t.get_int(); object_t oid = file_object_t(oh, ol); object_locator_t oloc(SYNCLIENT_FIRST_POOL); std::unique_lock locker{lock}; bufferptr bp(len); bufferlist bl; bl.push_back(bp); SnapContext snapc; client->objecter->write(oid, oloc, off, len, snapc, bl, ceph::real_clock::now(), 0, new C_SafeCond(lock, cond, &ack)); cond.wait(locker, [&ack] { return ack; }); } else if (strcmp(op, "o_zero") == 0) { int64_t oh = t.get_int(); int64_t ol = t.get_int(); int64_t off = t.get_int(); int64_t len = t.get_int(); object_t oid = file_object_t(oh, ol); object_locator_t oloc(SYNCLIENT_FIRST_POOL); std::unique_lock locker{lock}; SnapContext snapc; client->objecter->zero(oid, oloc, off, len, snapc, ceph::real_clock::now(), 0, new C_SafeCond(lock, cond, &ack)); cond.wait(locker, [&ack] { return ack; }); } else { dout(0) << (t.get_line()-1) << ": *** trace hit unrecognized symbol '" << op << "' " << dendl; ceph_abort(); } } dout(10) << "trace finished on line " << t.get_line() << dendl; // close open files for (ceph::unordered_map<int64_t, int64_t>::iterator fi = open_files.begin(); fi != open_files.end(); ++fi) { dout(1) << "leftover close " << fi->second << dendl; if (fi->second > 0) client->close(fi->second); } for (ceph::unordered_map<int64_t, dir_result_t*>::iterator fi = open_dirs.begin(); fi != open_dirs.end(); ++fi) { dout(1) << "leftover closedir " << fi->second << dendl; if (fi->second != 0) client->closedir(fi->second); } for (ceph::unordered_map<int64_t,Fh*>::iterator fi = ll_files.begin(); fi != ll_files.end(); ++fi) { dout(1) << "leftover ll_release " << fi->second << dendl; if (fi->second) client->ll_release(fi->second); } for (ceph::unordered_map<int64_t,dir_result_t*>::iterator fi = ll_dirs.begin(); fi != ll_dirs.end(); ++fi) { dout(1) << "leftover ll_releasedir " << fi->second << dendl; if (fi->second) client->ll_releasedir(fi->second); } return 0; } int SyntheticClient::clean_dir(string& basedir) { // read dir list<string> contents; UserPerm perms = client->pick_my_perms(); int r = client->getdir(basedir.c_str(), contents, perms); if (r < 0) { dout(1) << "getdir on " << basedir << " returns " << r << dendl; return r; } for (list<string>::iterator it = contents.begin(); it != contents.end(); ++it) { if (*it == ".") continue; if (*it == "..") continue; string file = basedir + "/" + *it; if (time_to_stop()) break; struct stat st; int r = client->lstat(file.c_str(), &st, perms); if (r < 0) { dout(1) << "stat error on " << file << " r=" << r << dendl; continue; } if ((st.st_mode & S_IFMT) == S_IFDIR) { clean_dir(file); client->rmdir(file.c_str(), perms); } else { client->unlink(file.c_str(), perms); } } return 0; } int SyntheticClient::full_walk(string& basedir) { if (time_to_stop()) return -1; list<string> dirq; list<frag_info_t> statq; dirq.push_back(basedir); frag_info_t empty; statq.push_back(empty); ceph::unordered_map<inodeno_t, int> nlink; ceph::unordered_map<inodeno_t, int> nlink_seen; UserPerm perms = client->pick_my_perms(); while (!dirq.empty()) { string dir = dirq.front(); frag_info_t expect = statq.front(); dirq.pop_front(); statq.pop_front(); frag_info_t actual = empty; // read dir list<string> contents; int r = client->getdir(dir.c_str(), contents, perms); if (r < 0) { dout(1) << "getdir on " << dir << " returns " << r << dendl; continue; } for (list<string>::iterator it = contents.begin(); it != contents.end(); ++it) { if (*it == "." || *it == "..") continue; string file = dir + "/" + *it; struct stat st; frag_info_t dirstat; int r = client->lstat(file.c_str(), &st, perms, &dirstat); if (r < 0) { dout(1) << "stat error on " << file << " r=" << r << dendl; continue; } nlink_seen[st.st_ino]++; nlink[st.st_ino] = st.st_nlink; if (S_ISDIR(st.st_mode)) actual.nsubdirs++; else actual.nfiles++; // print char *tm = ctime(&st.st_mtime); tm[strlen(tm)-1] = 0; printf("%llx %c%c%c%c%c%c%c%c%c%c %2d %5d %5d %8llu %12s %s\n", (long long)st.st_ino, S_ISDIR(st.st_mode) ? 'd':'-', (st.st_mode & 0400) ? 'r':'-', (st.st_mode & 0200) ? 'w':'-', (st.st_mode & 0100) ? 'x':'-', (st.st_mode & 040) ? 'r':'-', (st.st_mode & 020) ? 'w':'-', (st.st_mode & 010) ? 'x':'-', (st.st_mode & 04) ? 'r':'-', (st.st_mode & 02) ? 'w':'-', (st.st_mode & 01) ? 'x':'-', (int)st.st_nlink, (int)st.st_uid, (int)st.st_gid, (long long unsigned)st.st_size, tm, file.c_str()); if ((st.st_mode & S_IFMT) == S_IFDIR) { dirq.push_back(file); statq.push_back(dirstat); } } if (dir != "" && (actual.nsubdirs != expect.nsubdirs || actual.nfiles != expect.nfiles)) { dout(0) << dir << ": expected " << expect << dendl; dout(0) << dir << ": got " << actual << dendl; } } for (ceph::unordered_map<inodeno_t,int>::iterator p = nlink.begin(); p != nlink.end(); ++p) { if (nlink_seen[p->first] != p->second) dout(0) << p->first << " nlink " << p->second << " != " << nlink_seen[p->first] << "seen" << dendl; } return 0; } int SyntheticClient::dump_placement(string& fn) { UserPerm perms = client->pick_my_perms(); // open file int fd = client->open(fn.c_str(), O_RDONLY, perms); dout(5) << "reading from " << fn << " fd " << fd << dendl; if (fd < 0) return fd; // How big is it? struct stat stbuf; int lstat_result = client->lstat(fn.c_str(), &stbuf, perms); if (lstat_result < 0) { dout(0) << "lstat error for file " << fn << dendl; client->close(fd); return lstat_result; } off_t filesize = stbuf.st_size; // grab the placement info vector<ObjectExtent> extents; off_t offset = 0; client->enumerate_layout(fd, extents, filesize, offset); client->close(fd); // run through all the object extents dout(0) << "file size is " << filesize << dendl; dout(0) << "(osd, start, length) tuples for file " << fn << dendl; for (const auto& x : extents) { int osd = client->objecter->with_osdmap([&](const OSDMap& o) { return o.get_pg_acting_primary(o.object_locator_to_pg(x.oid, x.oloc)); }); // run through all the buffer extents for (const auto& be : x.buffer_extents) dout(0) << "OSD " << osd << ", offset " << be.first << ", length " << be.second << dendl; } return 0; } int SyntheticClient::make_dirs(const char *basedir, int dirs, int files, int depth) { if (time_to_stop()) return 0; UserPerm perms = client->pick_my_perms(); // make sure base dir exists int r = client->mkdir(basedir, 0755, perms); if (r != 0) { dout(1) << "can't make base dir? " << basedir << dendl; //return -1; } // children char d[500]; dout(3) << "make_dirs " << basedir << " dirs " << dirs << " files " << files << " depth " << depth << dendl; for (int i=0; i<files; i++) { snprintf(d, sizeof(d), "%s/file.%d", basedir, i); client->mknod(d, 0644, perms); } if (depth == 0) return 0; for (int i=0; i<dirs; i++) { snprintf(d, sizeof(d), "%s/dir.%d", basedir, i); make_dirs(d, dirs, files, depth-1); } return 0; } int SyntheticClient::stat_dirs(const char *basedir, int dirs, int files, int depth) { if (time_to_stop()) return 0; UserPerm perms = client->pick_my_perms(); // make sure base dir exists struct stat st; int r = client->lstat(basedir, &st, perms); if (r != 0) { dout(1) << "can't make base dir? " << basedir << dendl; return -1; } // children char d[500]; dout(3) << "stat_dirs " << basedir << " dirs " << dirs << " files " << files << " depth " << depth << dendl; for (int i=0; i<files; i++) { snprintf(d, sizeof(d), "%s/file.%d", basedir, i); client->lstat(d, &st, perms); } if (depth == 0) return 0; for (int i=0; i<dirs; i++) { snprintf(d, sizeof(d), "%s/dir.%d", basedir, i); stat_dirs(d, dirs, files, depth-1); } return 0; } int SyntheticClient::read_dirs(const char *basedir, int dirs, int files, int depth) { if (time_to_stop()) return 0; struct stat st; // children char d[500]; dout(3) << "read_dirs " << basedir << " dirs " << dirs << " files " << files << " depth " << depth << dendl; list<string> contents; UserPerm perms = client->pick_my_perms(); utime_t s = ceph_clock_now(); int r = client->getdir(basedir, contents, perms); utime_t e = ceph_clock_now(); e -= s; if (r < 0) { dout(0) << "getdir couldn't readdir " << basedir << ", stopping" << dendl; return -1; } for (int i=0; i<files; i++) { snprintf(d, sizeof(d), "%s/file.%d", basedir, i); utime_t s = ceph_clock_now(); if (client->lstat(d, &st, perms) < 0) { dout(2) << "read_dirs failed stat on " << d << ", stopping" << dendl; return -1; } utime_t e = ceph_clock_now(); e -= s; } if (depth > 0) for (int i=0; i<dirs; i++) { snprintf(d, sizeof(d), "%s/dir.%d", basedir, i); if (read_dirs(d, dirs, files, depth-1) < 0) return -1; } return 0; } int SyntheticClient::make_files(int num, int count, int priv, bool more) { int whoami = client->get_nodeid().v; char d[255]; UserPerm perms = client->pick_my_perms(); if (priv) { for (int c=0; c<count; c++) { snprintf(d, sizeof(d), "dir.%d.run%d", whoami, c); client->mkdir(d, 0755, perms); } } else { // shared if (true || whoami == 0) { for (int c=0; c<count; c++) { snprintf(d, sizeof(d), "dir.%d.run%d", 0, c); client->mkdir(d, 0755, perms); } } else { sleep(2); } } // files struct stat st; utime_t start = ceph_clock_now(); for (int c=0; c<count; c++) { for (int n=0; n<num; n++) { snprintf(d, sizeof(d), "dir.%d.run%d/file.client%d.%d", priv ? whoami:0, c, whoami, n); client->mknod(d, 0644, perms); if (more) { client->lstat(d, &st, perms); int fd = client->open(d, O_RDONLY, perms); client->unlink(d, perms); client->close(fd); } if (time_to_stop()) return 0; } } utime_t end = ceph_clock_now(); end -= start; dout(0) << "makefiles time is " << end << " or " << ((double)end / (double)num) <<" per file" << dendl; return 0; } int SyntheticClient::link_test() { char d[255]; char e[255]; UserPerm perms = client->pick_my_perms(); // create files int num = 200; client->mkdir("orig", 0755, perms); client->mkdir("copy", 0755, perms); utime_t start = ceph_clock_now(); for (int i=0; i<num; i++) { snprintf(d, sizeof(d), "orig/file.%d", i); client->mknod(d, 0755, perms); } utime_t end = ceph_clock_now(); end -= start; dout(0) << "orig " << end << dendl; // link start = ceph_clock_now(); for (int i=0; i<num; i++) { snprintf(d, sizeof(d), "orig/file.%d", i); snprintf(e, sizeof(e), "copy/file.%d", i); client->link(d, e, perms); } end = ceph_clock_now(); end -= start; dout(0) << "copy " << end << dendl; return 0; } int SyntheticClient::create_shared(int num) { // files UserPerm perms = client->pick_my_perms(); char d[255]; client->mkdir("test", 0755, perms); for (int n=0; n<num; n++) { snprintf(d, sizeof(d), "test/file.%d", n); client->mknod(d, 0644, perms); } return 0; } int SyntheticClient::open_shared(int num, int count) { // files char d[255]; UserPerm perms = client->pick_my_perms(); for (int c=0; c<count; c++) { // open list<int> fds; for (int n=0; n<num; n++) { snprintf(d, sizeof(d), "test/file.%d", n); int fd = client->open(d, O_RDONLY, perms); if (fd > 0) fds.push_back(fd); } if (false && client->get_nodeid() == 0) for (int n=0; n<num; n++) { snprintf(d, sizeof(d), "test/file.%d", n); client->unlink(d, perms); } while (!fds.empty()) { int fd = fds.front(); fds.pop_front(); client->close(fd); } } return 0; } // Hits OSD 0 with writes to various files with OSD 0 as the primary. int SyntheticClient::overload_osd_0(int n, int size, int wrsize) { UserPerm perms = client->pick_my_perms(); // collect a bunch of files starting on OSD 0 int left = n; int tried = 0; while (left < 0) { // pull open a file dout(0) << "in OSD overload" << dendl; string filename = get_sarg(tried); dout(1) << "OSD Overload workload: trying file " << filename << dendl; int fd = client->open(filename.c_str(), O_RDWR|O_CREAT, perms); ++tried; // only use the file if its first primary is OSD 0 int primary_osd = check_first_primary(fd); if (primary_osd != 0) { client->close(fd); dout(1) << "OSD Overload workload: SKIPPING file " << filename << " with OSD " << primary_osd << " as first primary. " << dendl; continue; } dout(1) << "OSD Overload workload: USING file " << filename << " with OSD 0 as first primary. " << dendl; --left; // do whatever operation we want to do on the file. How about a write? write_fd(fd, size, wrsize); } return 0; } // See what the primary is for the first object in this file. int SyntheticClient::check_first_primary(int fh) { vector<ObjectExtent> extents; client->enumerate_layout(fh, extents, 1, 0); return client->objecter->with_osdmap([&](const OSDMap& o) { return o.get_pg_acting_primary( o.object_locator_to_pg(extents.begin()->oid, extents.begin()->oloc)); }); } int SyntheticClient::rm_file(string& fn) { UserPerm perms = client->pick_my_perms(); return client->unlink(fn.c_str(), perms); } int SyntheticClient::write_file(string& fn, int size, loff_t wrsize) // size is in MB, wrsize in bytes { //uint64_t wrsize = 1024*256; char *buf = new char[wrsize+100]; // 1 MB memset(buf, 7, wrsize); int64_t chunks = (uint64_t)size * (uint64_t)(1024*1024) / (uint64_t)wrsize; UserPerm perms = client->pick_my_perms(); int fd = client->open(fn.c_str(), O_RDWR|O_CREAT, perms); dout(5) << "writing to " << fn << " fd " << fd << dendl; if (fd < 0) { delete[] buf; return fd; } utime_t from = ceph_clock_now(); utime_t start = from; uint64_t bytes = 0, total = 0; for (loff_t i=0; i<chunks; i++) { if (time_to_stop()) { dout(0) << "stopping" << dendl; break; } dout(2) << "writing block " << i << "/" << chunks << dendl; // fill buf with a 16 byte fingerprint // 64 bits : file offset // 64 bits : client id // = 128 bits (16 bytes) uint64_t *p = (uint64_t*)buf; while ((char*)p < buf + wrsize) { *p = (uint64_t)i*(uint64_t)wrsize + (uint64_t)((char*)p - buf); p++; *p = client->get_nodeid().v; p++; } client->write(fd, buf, wrsize, i*wrsize); bytes += wrsize; total += wrsize; utime_t now = ceph_clock_now(); if (now - from >= 1.0) { double el = now - from; dout(0) << "write " << (bytes / el / 1048576.0) << " MB/sec" << dendl; from = now; bytes = 0; } } client->fsync(fd, true); utime_t stop = ceph_clock_now(); double el = stop - start; dout(0) << "write total " << (total / el / 1048576.0) << " MB/sec (" << total << " bytes in " << el << " seconds)" << dendl; client->close(fd); delete[] buf; return 0; } int SyntheticClient::write_fd(int fd, int size, int wrsize) // size is in MB, wrsize in bytes { //uint64_t wrsize = 1024*256; char *buf = new char[wrsize+100]; // 1 MB memset(buf, 7, wrsize); uint64_t chunks = (uint64_t)size * (uint64_t)(1024*1024) / (uint64_t)wrsize; //dout(5) << "SyntheticClient::write_fd: writing to fd " << fd << dendl; if (fd < 0) { delete[] buf; return fd; } for (unsigned i=0; i<chunks; i++) { if (time_to_stop()) { dout(0) << "stopping" << dendl; break; } dout(2) << "writing block " << i << "/" << chunks << dendl; // fill buf with a 16 byte fingerprint // 64 bits : file offset // 64 bits : client id // = 128 bits (16 bytes) uint64_t *p = (uint64_t*)buf; while ((char*)p < buf + wrsize) { *p = (uint64_t)i*(uint64_t)wrsize + (uint64_t)((char*)p - buf); p++; *p = client->get_nodeid().v; p++; } client->write(fd, buf, wrsize, i*wrsize); } client->close(fd); delete[] buf; return 0; } int SyntheticClient::write_batch(int nfile, int size, int wrsize) { for (int i=0; i<nfile; i++) { string sarg1 = get_sarg(i); dout(0) << "Write file " << sarg1 << dendl; write_file(sarg1, size, wrsize); } return 0; } // size is in MB, wrsize in bytes int SyntheticClient::read_file(const std::string& fn, int size, int rdsize, bool ignoreprint) { char *buf = new char[rdsize]; memset(buf, 1, rdsize); uint64_t chunks = (uint64_t)size * (uint64_t)(1024*1024) / (uint64_t)rdsize; UserPerm perms = client->pick_my_perms(); int fd = client->open(fn.c_str(), O_RDONLY, perms); dout(5) << "reading from " << fn << " fd " << fd << dendl; if (fd < 0) { delete[] buf; return fd; } utime_t from = ceph_clock_now(); utime_t start = from; uint64_t bytes = 0, total = 0; for (unsigned i=0; i<chunks; i++) { if (time_to_stop()) break; dout(2) << "reading block " << i << "/" << chunks << dendl; int r = client->read(fd, buf, rdsize, i*rdsize); if (r < rdsize) { dout(1) << "read_file got r = " << r << ", probably end of file" << dendl; break; } bytes += rdsize; total += rdsize; utime_t now = ceph_clock_now(); if (now - from >= 1.0) { double el = now - from; dout(0) << "read " << (bytes / el / 1048576.0) << " MB/sec" << dendl; from = now; bytes = 0; } // verify fingerprint int bad = 0; uint64_t *p = (uint64_t*)buf; while ((char*)p + 32 < buf + rdsize) { uint64_t readoff = *p; uint64_t wantoff = (uint64_t)i*(uint64_t)rdsize + (uint64_t)((char*)p - buf); p++; int64_t readclient = *p; p++; if (readoff != wantoff || readclient != client->get_nodeid()) { if (!bad && !ignoreprint) dout(0) << "WARNING: wrong data from OSD, block says fileoffset=" << readoff << " client=" << readclient << ", should be offset " << wantoff << " client " << client->get_nodeid() << dendl; bad++; } } if (bad && !ignoreprint) dout(0) << " + " << (bad-1) << " other bad 16-byte bits in this block" << dendl; } utime_t stop = ceph_clock_now(); double el = stop - start; dout(0) << "read total " << (total / el / 1048576.0) << " MB/sec (" << total << " bytes in " << el << " seconds)" << dendl; client->close(fd); delete[] buf; return 0; } class C_Ref : public Context { ceph::mutex& lock; ceph::condition_variable& cond; int *ref; public: C_Ref(ceph::mutex &l, ceph::condition_variable &c, int *r) : lock(l), cond(c), ref(r) { lock_guard locker{lock}; (*ref)++; } void finish(int) override { lock_guard locker{lock}; (*ref)--; cond.notify_all(); } }; int SyntheticClient::create_objects(int nobj, int osize, int inflight) { // divy up int numc = num_client ? num_client : 1; int start, inc, end; if (1) { // strided start = client->get_nodeid().v; //nobjs % numc; inc = numc; end = start + nobj; } else { // segments start = nobj * client->get_nodeid().v / numc; inc = 1; end = nobj * (client->get_nodeid().v+1) / numc; } dout(5) << "create_objects " << nobj << " size=" << osize << " .. doing [" << start << "," << end << ") inc " << inc << dendl; bufferptr bp(osize); bp.zero(); bufferlist bl; bl.push_back(bp); ceph::mutex lock = ceph::make_mutex("create_objects lock"); ceph::condition_variable cond; int unsafe = 0; list<utime_t> starts; for (int i=start; i<end; i += inc) { if (time_to_stop()) break; object_t oid = file_object_t(999, i); object_locator_t oloc(SYNCLIENT_FIRST_POOL); SnapContext snapc; if (i % inflight == 0) { dout(6) << "create_objects " << i << "/" << (nobj+1) << dendl; } dout(10) << "writing " << oid << dendl; starts.push_back(ceph_clock_now()); { std::lock_guard locker{client->client_lock}; client->objecter->write(oid, oloc, 0, osize, snapc, bl, ceph::real_clock::now(), 0, new C_Ref(lock, cond, &unsafe)); } { std::unique_lock locker{lock}; cond.wait(locker, [&unsafe, inflight, this] { if (unsafe > inflight) { dout(20) << "waiting for " << unsafe << " unsafe" << dendl; } return unsafe <= inflight; }); } utime_t lat = ceph_clock_now(); lat -= starts.front(); starts.pop_front(); } { std::unique_lock locker{lock}; cond.wait(locker, [&unsafe, this] { if (unsafe > 0) { dout(10) << "waiting for " << unsafe << " unsafe" << dendl; } return unsafe <= 0; }); } dout(5) << "create_objects done" << dendl; return 0; } int SyntheticClient::object_rw(int nobj, int osize, int wrpc, int overlappc, double rskew, double wskew) { dout(5) << "object_rw " << nobj << " size=" << osize << " with " << wrpc << "% writes" << ", " << overlappc << "% overlap" << ", rskew = " << rskew << ", wskew = " << wskew << dendl; bufferptr bp(osize); bp.zero(); bufferlist bl; bl.push_back(bp); // start with odd number > nobj rjhash<uint32_t> h; unsigned prime = nobj + 1; // this is the minimum! prime += h(nobj) % (3*nobj); // bump it up some prime |= 1; // make it odd while (true) { unsigned j; for (j=2; j*j<=prime; j++) if (prime % j == 0) break; if (j*j > prime) { break; //cout << "prime " << prime << endl; } prime += 2; } ceph::mutex lock = ceph::make_mutex("lock"); ceph::condition_variable cond; int unack = 0; while (1) { if (time_to_stop()) break; // read or write? bool write = (rand() % 100) < wrpc; // choose object double r = drand48(); // [0..1) long o; if (write) { o = (long)trunc(pow(r, wskew) * (double)nobj); // exponentially skew towards 0 int pnoremap = (long)(r * 100.0); if (pnoremap >= overlappc) o = (o*prime) % nobj; // remap } else { o = (long)trunc(pow(r, rskew) * (double)nobj); // exponentially skew towards 0 } object_t oid = file_object_t(999, o); object_locator_t oloc(SYNCLIENT_FIRST_POOL); SnapContext snapc; client->client_lock.lock(); utime_t start = ceph_clock_now(); if (write) { dout(10) << "write to " << oid << dendl; ObjectOperation m; OSDOp op; op.op.op = CEPH_OSD_OP_WRITE; op.op.extent.offset = 0; op.op.extent.length = osize; op.indata = bl; m.ops.push_back(op); client->objecter->mutate(oid, oloc, m, snapc, ceph::real_clock::now(), 0, new C_Ref(lock, cond, &unack)); } else { dout(10) << "read from " << oid << dendl; bufferlist inbl; client->objecter->read(oid, oloc, 0, osize, CEPH_NOSNAP, &inbl, 0, new C_Ref(lock, cond, &unack)); } client->client_lock.unlock(); { std::unique_lock locker{lock}; cond.wait(locker, [&unack, this] { if (unack > 0) { dout(20) << "waiting for " << unack << " unack" << dendl; } return unack <= 0; }); } utime_t lat = ceph_clock_now(); lat -= start; } return 0; } int SyntheticClient::read_random(string& fn, int size, int rdsize) // size is in MB, wrsize in bytes { UserPerm perms = client->pick_my_perms(); uint64_t chunks = (uint64_t)size * (uint64_t)(1024*1024) / (uint64_t)rdsize; int fd = client->open(fn.c_str(), O_RDWR, perms); dout(5) << "reading from " << fn << " fd " << fd << dendl; if (fd < 0) return fd; int offset = 0; char * buf = NULL; for (unsigned i=0; i<2000; i++) { if (time_to_stop()) break; bool read=false; time_t seconds; time( &seconds); srand(seconds); // use rand instead ?? double x = drand48(); // cleanup before call 'new' if (buf != NULL) { delete[] buf; buf = NULL; } if (x < 0.5) { buf = new char[rdsize]; memset(buf, 1, rdsize); read=true; } else { buf = new char[rdsize+100]; // 1 MB memset(buf, 7, rdsize); } if (read) { offset=(rand())%(chunks+1); dout(2) << "reading block " << offset << "/" << chunks << dendl; int r = client->read(fd, buf, rdsize, offset*rdsize); if (r < rdsize) { dout(1) << "read_file got r = " << r << ", probably end of file" << dendl; } } else { dout(2) << "writing block " << offset << "/" << chunks << dendl; // fill buf with a 16 byte fingerprint // 64 bits : file offset // 64 bits : client id // = 128 bits (16 bytes) offset=(rand())%(chunks+1); uint64_t *p = (uint64_t*)buf; while ((char*)p < buf + rdsize) { *p = offset*rdsize + (char*)p - buf; p++; *p = client->get_nodeid().v; p++; } client->write(fd, buf, rdsize, offset*rdsize); } // verify fingerprint if (read) { int bad = 0; int64_t *p = (int64_t*)buf; while ((char*)p + 32 < buf + rdsize) { int64_t readoff = *p; int64_t wantoff = offset*rdsize + (int64_t)((char*)p - buf); p++; int64_t readclient = *p; p++; if (readoff != wantoff || readclient != client->get_nodeid()) { if (!bad) dout(0) << "WARNING: wrong data from OSD, block says fileoffset=" << readoff << " client=" << readclient << ", should be offset " << wantoff << " client " << client->get_nodeid() << dendl; bad++; } } if (bad) dout(0) << " + " << (bad-1) << " other bad 16-byte bits in this block" << dendl; } } client->close(fd); delete[] buf; return 0; } int normdist(int min, int max, int stdev) /* specifies input values */ { /* min: Minimum value; max: Maximum value; stdev: degree of deviation */ //int min, max, stdev; { time_t seconds; time( &seconds); srand(seconds); int range, iterate, result; /* declare range, iterate and result as integers, to avoid the need for floating point math*/ result = 0; /* ensure result is initialized to 0 */ range = max -min; /* calculate range of possible values between the max and min values */ iterate = range / stdev; /* this number of iterations ensures the proper shape of the resulting curve */ stdev += 1; /* compensation for integer vs. floating point math */ for (int c = iterate; c != 0; c--) /* loop through iterations */ { // result += (uniform (1, 100) * stdev) / 100; /* calculate and result += ( (rand()%100 + 1) * stdev) / 100; // printf("result=%d\n", result ); } printf("\n final result=%d\n", result ); return result + min; /* send final result back */ } int SyntheticClient::read_random_ex(string& fn, int size, int rdsize) // size is in MB, wrsize in bytes { uint64_t chunks = (uint64_t)size * (uint64_t)(1024*1024) / (uint64_t)rdsize; UserPerm perms = client->pick_my_perms(); int fd = client->open(fn.c_str(), O_RDWR, perms); dout(5) << "reading from " << fn << " fd " << fd << dendl; if (fd < 0) return fd; int offset = 0; char * buf = NULL; for (unsigned i=0; i<2000; i++) { if (time_to_stop()) break; bool read=false; time_t seconds; time( &seconds); srand(seconds); // use rand instead ?? double x = drand48(); // cleanup before call 'new' if (buf != NULL) { delete[] buf; buf = NULL; } if (x < 0.5) { buf = new char[rdsize]; memset(buf, 1, rdsize); read=true; } else { buf = new char[rdsize+100]; // 1 MB memset(buf, 7, rdsize); } if (read) { dout(2) << "reading block " << offset << "/" << chunks << dendl; int r = client->read(fd, buf, rdsize, offset*rdsize); if (r < rdsize) { dout(1) << "read_file got r = " << r << ", probably end of file" << dendl; } } else { dout(2) << "writing block " << offset << "/" << chunks << dendl; // fill buf with a 16 byte fingerprint // 64 bits : file offset // 64 bits : client id // = 128 bits (16 bytes) int count = rand()%10; for ( int j=0;j<count; j++ ) { offset=(rand())%(chunks+1); uint64_t *p = (uint64_t*)buf; while ((char*)p < buf + rdsize) { *p = offset*rdsize + (char*)p - buf; p++; *p = client->get_nodeid().v; p++; } client->write(fd, buf, rdsize, offset*rdsize); } } // verify fingerprint if (read) { int bad = 0; int64_t *p = (int64_t*)buf; while ((char*)p + 32 < buf + rdsize) { int64_t readoff = *p; int64_t wantoff = offset*rdsize + (int64_t)((char*)p - buf); p++; int64_t readclient = *p; p++; if (readoff != wantoff || readclient != client->get_nodeid()) { if (!bad) dout(0) << "WARNING: wrong data from OSD, block says fileoffset=" << readoff << " client=" << readclient << ", should be offset " << wantoff << " client " << client->get_nodeid() << dendl; bad++; } } if (bad) dout(0) << " + " << (bad-1) << " other bad 16-byte bits in this block" << dendl; } } client->close(fd); delete[] buf; return 0; } int SyntheticClient::random_walk(int num_req) { int left = num_req; //dout(1) << "random_walk() will do " << left << " ops" << dendl; init_op_dist(); // set up metadata op distribution UserPerm perms = client->pick_my_perms(); while (left > 0) { left--; if (time_to_stop()) break; // ascend? if (cwd.depth() && !roll_die(::pow((double).9, (double)cwd.depth()))) { dout(DBL) << "die says up" << dendl; up(); continue; } // descend? if (roll_die(::pow((double).9,(double)cwd.depth())) && !subdirs.empty()) { string s = get_random_subdir(); cwd.push_dentry( s ); dout(DBL) << "cd " << s << " -> " << cwd << dendl; clear_dir(); continue; } int op = 0; filepath path; if (contents.empty() && roll_die(.3)) { if (did_readdir) { dout(DBL) << "empty dir, up" << dendl; up(); } else op = CEPH_MDS_OP_READDIR; } else { op = op_dist.sample(); } //dout(DBL) << "op is " << op << dendl; int r = 0; // do op if (op == CEPH_MDS_OP_UNLINK) { if (contents.empty()) op = CEPH_MDS_OP_READDIR; else r = client->unlink(get_random_sub(), perms); // will fail on dirs } if (op == CEPH_MDS_OP_RENAME) { if (contents.empty()) op = CEPH_MDS_OP_READDIR; else { r = client->rename(get_random_sub(), make_sub("ren"), perms); } } if (op == CEPH_MDS_OP_MKDIR) { r = client->mkdir(make_sub("mkdir"), 0755, perms); } if (op == CEPH_MDS_OP_RMDIR) { if (!subdirs.empty()) r = client->rmdir(get_random_subdir(), perms); else r = client->rmdir(cwd.c_str(), perms); // will pbly fail } if (op == CEPH_MDS_OP_SYMLINK) { } /* if (op == CEPH_MDS_OP_CHMOD) { if (contents.empty()) op = CEPH_MDS_OP_READDIR; else r = client->chmod(get_random_sub(), rand() & 0755, perms); } if (op == CEPH_MDS_OP_CHOWN) { if (contents.empty()) r = client->chown(cwd.c_str(), rand(), rand(), perms); else r = client->chown(get_random_sub(), rand(), rand(), perms); } if (op == CEPH_MDS_OP_UTIME) { struct utimbuf b; memset(&b, 1, sizeof(b)); if (contents.empty()) r = client->utime(cwd.c_str(), &b, perms); else r = client->utime(get_random_sub(), &b, perms); } */ if (op == CEPH_MDS_OP_LINK) { } if (op == CEPH_MDS_OP_MKNOD) { r = client->mknod(make_sub("mknod"), 0644, perms); } if (op == CEPH_MDS_OP_OPEN) { if (contents.empty()) op = CEPH_MDS_OP_READDIR; else { r = client->open(get_random_sub(), O_RDONLY, perms); if (r > 0) { ceph_assert(open_files.count(r) == 0); open_files.insert(r); } } } /*if (op == CEPH_MDS_OP_RELEASE) { // actually, close if (open_files.empty()) op = CEPH_MDS_OP_STAT; else { int fh = get_random_fh(); r = client->close( fh ); if (r == 0) open_files.erase(fh); } } */ if (op == CEPH_MDS_OP_GETATTR) { struct stat st; if (contents.empty()) { if (did_readdir) { if (roll_die(.1)) { dout(DBL) << "stat in empty dir, up" << dendl; up(); } else { op = CEPH_MDS_OP_MKNOD; } } else op = CEPH_MDS_OP_READDIR; } else r = client->lstat(get_random_sub(), &st, perms); } if (op == CEPH_MDS_OP_READDIR) { clear_dir(); list<string> c; r = client->getdir(cwd.c_str(), c, perms); for (list<string>::iterator it = c.begin(); it != c.end(); ++it) { //dout(DBL) << " got " << *it << dendl; ceph_abort(); /*contents[*it] = it->second; if (it->second && S_ISDIR(it->second->st_mode)) subdirs.insert(*it); */ } did_readdir = true; } // errors? if (r < 0) { // reevaluate cwd. //while (cwd.depth()) { //if (client->lookup(cwd)) break; // it's in the cache //dout(DBL) << "r = " << r << ", client doesn't have " << cwd << ", cd .." << dendl; dout(DBL) << "r = " << r << ", client may not have " << cwd << ", cd .." << dendl; up(); //} } } // close files dout(DBL) << "closing files" << dendl; while (!open_files.empty()) { int fh = get_random_fh(); int r = client->close( fh ); if (r == 0) open_files.erase(fh); } dout(DBL) << "done" << dendl; return 0; } void SyntheticClient::make_dir_mess(const char *basedir, int n) { UserPerm perms = client->pick_my_perms(); vector<string> dirs; dirs.push_back(basedir); dirs.push_back(basedir); client->mkdir(basedir, 0755, perms); // motivation: // P(dir) ~ subdirs_of(dir) + 2 // from 5-year metadata workload paper in fast'07 // create dirs for (int i=0; i<n; i++) { // pick a dir int k = rand() % dirs.size(); string parent = dirs[k]; // pick a name std::stringstream ss; ss << parent << "/" << i; string dir = ss.str(); // update dirs dirs.push_back(parent); dirs.push_back(dir); dirs.push_back(dir); // do it client->mkdir(dir.c_str(), 0755, perms); } } void SyntheticClient::foo() { UserPerm perms = client->pick_my_perms(); if (1) { // make 2 parallel dirs, link/unlink between them. char a[100], b[100]; client->mkdir("/a", 0755, perms); client->mkdir("/b", 0755, perms); for (int i=0; i<10; i++) { snprintf(a, sizeof(a), "/a/%d", i); client->mknod(a, 0644, perms); } while (1) { for (int i=0; i<10; i++) { snprintf(a, sizeof(a), "/a/%d", i); snprintf(b, sizeof(b), "/b/%d", i); client->link(a, b, perms); } for (int i=0; i<10; i++) { snprintf(b, sizeof(b), "/b/%d", i); client->unlink(b, perms); } } return; } if (1) { // bug1.cpp const char *fn = "blah"; char buffer[8192]; client->unlink(fn, perms); int handle = client->open(fn, O_CREAT|O_RDWR, perms, S_IRWXU); ceph_assert(handle>=0); int r=client->write(handle,buffer,8192); ceph_assert(r>=0); r=client->close(handle); ceph_assert(r>=0); handle = client->open(fn, O_RDWR, perms); // open the same file, it must have some data already ceph_assert(handle>=0); r=client->read(handle,buffer,8192); ceph_assert(r==8192); // THIS ASSERTION FAILS with disabled cache r=client->close(handle); ceph_assert(r>=0); return; } if (1) { dout(0) << "first" << dendl; int fd = client->open("tester", O_WRONLY|O_CREAT, perms); client->write(fd, "hi there", 0, 8); client->close(fd); dout(0) << "sleep" << dendl; sleep(10); dout(0) << "again" << dendl; fd = client->open("tester", O_WRONLY|O_CREAT, perms); client->write(fd, "hi there", 0, 8); client->close(fd); return; } if (1) { // open some files srand(0); for (int i=0; i<20; i++) { int s = 5; int a = rand() % s; int b = rand() % s; int c = rand() % s; char src[80]; snprintf(src, sizeof(src), "syn.0.0/dir.%d/dir.%d/file.%d", a, b, c); //int fd = client->open(src, O_RDONLY, perms); } return; } if (0) { // rename fun for (int i=0; i<100; i++) { int s = 5; int a = rand() % s; int b = rand() % s; int c = rand() % s; int d = rand() % s; int e = rand() % s; int f = rand() % s; char src[80]; char dst[80]; snprintf(src, sizeof(src), "syn.0.0/dir.%d/dir.%d/file.%d", a, b, c); snprintf(dst, sizeof(dst), "syn.0.0/dir.%d/dir.%d/file.%d", d, e, f); client->rename(src, dst, perms); } return; } if (1) { // link fun srand(0); for (int i=0; i<100; i++) { int s = 5; int a = rand() % s; int b = rand() % s; int c = rand() % s; int d = rand() % s; int e = rand() % s; int f = rand() % s; char src[80]; char dst[80]; snprintf(src, sizeof(src), "syn.0.0/dir.%d/dir.%d/file.%d", a, b, c); snprintf(dst, sizeof(dst), "syn.0.0/dir.%d/dir.%d/newlink.%d", d, e, f); client->link(src, dst, perms); } srand(0); for (int i=0; i<100; i++) { int s = 5; int a = rand() % s; int b = rand() % s; int c = rand() % s; int d = rand() % s; int e = rand() % s; int f = rand() % s; char src[80]; char dst[80]; snprintf(src, sizeof(src), "syn.0.0/dir.%d/dir.%d/file.%d", a, b, c); snprintf(dst, sizeof(dst), "syn.0.0/dir.%d/dir.%d/newlink.%d", d, e, f); client->unlink(dst, perms); } return; } // link fun client->mknod("one", 0755, perms); client->mknod("two", 0755, perms); client->link("one", "three", perms); client->mkdir("dir", 0755, perms); client->link("two", "/dir/twolink", perms); client->link("dir/twolink", "four", perms); // unlink fun client->mknod("a", 0644, perms); client->unlink("a", perms); client->mknod("b", 0644, perms); client->link("b", "c", perms); client->unlink("c", perms); client->mkdir("d", 0755, perms); client->unlink("d", perms); client->rmdir("d", perms); // rename fun client->mknod("p1", 0644, perms); client->mknod("p2", 0644, perms); client->rename("p1","p2", perms); client->mknod("p3", 0644, perms); client->rename("p3","p4", perms); // check dest dir ambiguity thing client->mkdir("dir1", 0755, perms); client->mkdir("dir2", 0755, perms); client->rename("p2", "dir1/p2", perms); client->rename("dir1/p2", "dir2/p2", perms); client->rename("dir2/p2", "/p2", perms); // check primary+remote link merging client->link("p2","p2.l", perms); client->link("p4","p4.l", perms); client->rename("p2.l", "p2", perms); client->rename("p4", "p4.l", perms); // check anchor updates client->mknod("dir1/a", 0644, perms); client->link("dir1/a", "da1", perms); client->link("dir1/a", "da2", perms); client->link("da2","da3", perms); client->rename("dir1/a", "dir2/a", perms); client->rename("dir2/a", "da2", perms); client->rename("da1", "da2", perms); client->rename("da2", "da3", perms); // check directory renames client->mkdir("dir3", 0755, perms); client->mknod("dir3/asdf", 0644, perms); client->mkdir("dir4", 0755, perms); client->mkdir("dir5", 0755, perms); client->mknod("dir5/asdf", 0644, perms); client->rename("dir3", "dir4", perms); // ok client->rename("dir4", "dir5", perms); // fail } int SyntheticClient::thrash_links(const char *basedir, int dirs, int files, int depth, int n) { dout(1) << "thrash_links " << basedir << " " << dirs << " " << files << " " << depth << " links " << n << dendl; if (time_to_stop()) return 0; UserPerm perms = client->pick_my_perms(); srand(0); if (1) { bool renames = true; // thrash renames too? for (int k=0; k<n; k++) { if (renames && rand() % 10 == 0) { // rename some directories. whee! int dep = (rand() % depth) + 1; string src = basedir; { char t[80]; for (int d=0; d<dep; d++) { int a = rand() % dirs; snprintf(t, sizeof(t), "/dir.%d", a); src += t; } } string dst = basedir; { char t[80]; for (int d=0; d<dep; d++) { int a = rand() % dirs; snprintf(t, sizeof(t), "/dir.%d", a); dst += t; } } if (client->rename(dst.c_str(), "/tmp", perms) == 0) { client->rename(src.c_str(), dst.c_str(), perms); client->rename("/tmp", src.c_str(), perms); } continue; } // pick a dest dir string src = basedir; { char t[80]; for (int d=0; d<depth; d++) { int a = rand() % dirs; snprintf(t, sizeof(t), "/dir.%d", a); src += t; } int a = rand() % files; snprintf(t, sizeof(t), "/file.%d", a); src += t; } string dst = basedir; { char t[80]; for (int d=0; d<depth; d++) { int a = rand() % dirs; snprintf(t, sizeof(t), "/dir.%d", a); dst += t; } int a = rand() % files; snprintf(t, sizeof(t), "/file.%d", a); dst += t; } int o = rand() % 4; switch (o) { case 0: client->mknod(src.c_str(), 0755, perms); if (renames) client->rename(src.c_str(), dst.c_str(), perms); break; case 1: client->mknod(src.c_str(), 0755, perms); client->unlink(dst.c_str(), perms); client->link(src.c_str(), dst.c_str(), perms); break; case 2: client->unlink(src.c_str(), perms); break; case 3: client->unlink(dst.c_str(), perms); break; //case 4: client->mknod(src.c_str(), 0755, perms); break; //case 5: client->mknod(dst.c_str(), 0755, perms); break; } } return 0; } if (1) { // now link shit up for (int i=0; i<n; i++) { if (time_to_stop()) return 0; char f[20]; // pick a file string file = basedir; if (depth) { int d = rand() % (depth+1); for (int k=0; k<d; k++) { snprintf(f, sizeof(f), "/dir.%d", rand() % dirs); file += f; } } snprintf(f, sizeof(f), "/file.%d", rand() % files); file += f; // pick a dir for our link string ln = basedir; if (depth) { int d = rand() % (depth+1); for (int k=0; k<d; k++) { snprintf(f, sizeof(f), "/dir.%d", rand() % dirs); ln += f; } } snprintf(f, sizeof(f), "/ln.%d", i); ln += f; client->link(file.c_str(), ln.c_str(), perms); } } return 0; } void SyntheticClient::import_find(const char *base, const char *find, bool data) { dout(1) << "import_find " << base << " from " << find << " data=" << data << dendl; /* use this to gather the static trace: * * find . -exec ls -dilsn --time-style=+%s \{\} \; * or if it's wafl, * find . -path ./.snapshot -prune -o -exec ls -dilsn --time-style=+%s \{\} \; * */ UserPerm process_perms = client->pick_my_perms(); if (base[0] != '-') client->mkdir(base, 0755, process_perms); ifstream f(find); ceph_assert(f.is_open()); int dirnum = 0; while (!f.eof()) { uint64_t ino; int dunno, nlink; string modestring; int uid, gid; off_t size; time_t mtime; string filename; f >> ino; if (f.eof()) break; f >> dunno; f >> modestring; f >> nlink; f >> uid; f >> gid; f >> size; f >> mtime; f.seekg(1, ios::cur); getline(f, filename); UserPerm perms(uid, gid); // ignore "." if (filename == ".") continue; // remove leading ./ ceph_assert(filename[0] == '.' && filename[1] == '/'); filename = filename.substr(2); // new leading dir? int sp = filename.find("/"); if (sp < 0) dirnum++; //dout(0) << "leading dir " << filename << " " << dirnum << dendl; if (dirnum % num_client != client->get_nodeid()) { dout(20) << "skipping leading dir " << dirnum << " " << filename << dendl; continue; } // parse the mode ceph_assert(modestring.length() == 10); mode_t mode = 0; switch (modestring[0]) { case 'd': mode |= S_IFDIR; break; case 'l': mode |= S_IFLNK; break; default: case '-': mode |= S_IFREG; break; } if (modestring[1] == 'r') mode |= 0400; if (modestring[2] == 'w') mode |= 0200; if (modestring[3] == 'x') mode |= 0100; if (modestring[4] == 'r') mode |= 040; if (modestring[5] == 'w') mode |= 020; if (modestring[6] == 'x') mode |= 010; if (modestring[7] == 'r') mode |= 04; if (modestring[8] == 'w') mode |= 02; if (modestring[9] == 'x') mode |= 01; dout(20) << " mode " << modestring << " to " << oct << mode << dec << dendl; if (S_ISLNK(mode)) { // target vs destination int pos = filename.find(" -> "); ceph_assert(pos > 0); string link; if (base[0] != '-') { link = base; link += "/"; } link += filename.substr(0, pos); string target; if (filename[pos+4] == '/') { if (base[0] != '-') target = base; target += filename.substr(pos + 4); } else { target = filename.substr(pos + 4); } dout(10) << "symlink from '" << link << "' -> '" << target << "'" << dendl; client->symlink(target.c_str(), link.c_str(), perms); } else { string f; if (base[0] != '-') { f = base; f += "/"; } f += filename; if (S_ISDIR(mode)) { client->mkdir(f.c_str(), mode, perms); } else { int fd = client->open(f.c_str(), O_WRONLY|O_CREAT, perms, mode & 0777); ceph_assert(fd > 0); if (data) { client->write(fd, "", 0, size); } else { client->truncate(f.c_str(), size, perms); } client->close(fd); //client->chmod(f.c_str(), mode & 0777, perms, process_perms); client->chown(f.c_str(), uid, gid, process_perms); struct utimbuf ut; ut.modtime = mtime; ut.actime = mtime; client->utime(f.c_str(), &ut, perms); } } } } int SyntheticClient::lookup_hash(inodeno_t ino, inodeno_t dirino, const char *name, const UserPerm& perms) { int r = client->lookup_hash(ino, dirino, name, perms); dout(0) << "lookup_hash(" << ino << ", #" << dirino << "/" << name << ") = " << r << dendl; return r; } int SyntheticClient::lookup_ino(inodeno_t ino, const UserPerm& perms) { int r = client->lookup_ino(ino, perms); dout(0) << "lookup_ino(" << ino << ") = " << r << dendl; return r; } int SyntheticClient::chunk_file(string &filename) { UserPerm perms = client->pick_my_perms(); int fd = client->open(filename.c_str(), O_RDONLY, perms); if (fd < 0) return fd; struct stat st; int ret = client->fstat(fd, &st, perms); if (ret < 0) { client->close(fd); return ret; } uint64_t size = st.st_size; dout(0) << "file " << filename << " size is " << size << dendl; inode_t inode{}; inode.ino = st.st_ino; ret = client->fdescribe_layout(fd, &inode.layout); ceph_assert(ret == 0); // otherwise fstat did a bad thing uint64_t pos = 0; bufferlist from_before; while (pos < size) { int get = std::min<int>(size - pos, 1048576); ceph::mutex flock = ceph::make_mutex("synclient chunk_file lock"); ceph::condition_variable cond; bool done; bufferlist bl; { std::unique_lock locker{flock}; Context *onfinish = new C_SafeCond(flock, cond, &done); client->filer->read(inode.ino, &inode.layout, CEPH_NOSNAP, pos, get, &bl, 0, onfinish); cond.wait(locker, [&done] { return done; }); } dout(0) << "got " << bl.length() << " bytes at " << pos << dendl; if (from_before.length()) { dout(0) << " including bit from previous block" << dendl; pos -= from_before.length(); from_before.claim_append(bl); bl.swap(from_before); } // .... // keep last 32 bytes around from_before.clear(); from_before.substr_of(bl, bl.length()-32, 32); pos += bl.length(); } client->close(fd); return 0; } void SyntheticClient::mksnap(const char *base, const char *name, const UserPerm& perms) { client->mksnap(base, name, perms); } void SyntheticClient::rmsnap(const char *base, const char *name, const UserPerm& perms) { client->rmsnap(base, name, perms); } void SyntheticClient::mksnapfile(const char *dir) { UserPerm perms = client->pick_my_perms(); client->mkdir(dir, 0755, perms); string f = dir; f += "/foo"; int fd = client->open(f.c_str(), O_WRONLY|O_CREAT|O_TRUNC, perms); char buf[1048576*4]; client->write(fd, buf, sizeof(buf), 0); client->fsync(fd, true); client->close(fd); string s = dir; s += "/.snap/1"; client->mkdir(s.c_str(), 0755, perms); fd = client->open(f.c_str(), O_WRONLY, perms); client->write(fd, buf, 1048576*2, 1048576); client->fsync(fd, true); client->close(fd); }
95,004
26.625763
132
cc
null
ceph-main/src/client/SyntheticClient.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <[email protected]> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_SYNTHETICCLIENT_H #define CEPH_SYNTHETICCLIENT_H #include <pthread.h> #include "Client.h" #include "include/Distribution.h" #include "Trace.h" #define SYNCLIENT_FIRST_POOL 0 #define SYNCLIENT_MODE_RANDOMWALK 1 #define SYNCLIENT_MODE_FULLWALK 2 #define SYNCLIENT_MODE_REPEATWALK 3 #define SYNCLIENT_MODE_MAKEDIRMESS 7 #define SYNCLIENT_MODE_MAKEDIRS 8 // dirs files depth #define SYNCLIENT_MODE_STATDIRS 9 // dirs files depth #define SYNCLIENT_MODE_READDIRS 10 // dirs files depth #define SYNCLIENT_MODE_MAKEFILES 11 // num count private #define SYNCLIENT_MODE_MAKEFILES2 12 // num count private #define SYNCLIENT_MODE_CREATESHARED 13 // num #define SYNCLIENT_MODE_OPENSHARED 14 // num count #define SYNCLIENT_MODE_RMFILE 19 #define SYNCLIENT_MODE_WRITEFILE 20 #define SYNCLIENT_MODE_READFILE 21 #define SYNCLIENT_MODE_WRITEBATCH 22 #define SYNCLIENT_MODE_WRSHARED 23 #define SYNCLIENT_MODE_READSHARED 24 #define SYNCLIENT_MODE_RDWRRANDOM 25 #define SYNCLIENT_MODE_RDWRRANDOM_EX 26 #define SYNCLIENT_MODE_LINKTEST 27 #define SYNCLIENT_MODE_OVERLOAD_OSD_0 28 // two args #define SYNCLIENT_MODE_DROPCACHE 29 #define SYNCLIENT_MODE_TRACE 30 #define SYNCLIENT_MODE_CREATEOBJECTS 35 #define SYNCLIENT_MODE_OBJECTRW 36 #define SYNCLIENT_MODE_OPENTEST 40 #define SYNCLIENT_MODE_OPTEST 41 #define SYNCLIENT_MODE_ONLY 50 #define SYNCLIENT_MODE_ONLYRANGE 51 #define SYNCLIENT_MODE_EXCLUDE 52 #define SYNCLIENT_MODE_EXCLUDERANGE 53 #define SYNCLIENT_MODE_UNTIL 55 #define SYNCLIENT_MODE_SLEEPUNTIL 56 #define SYNCLIENT_MODE_RANDOMSLEEP 61 #define SYNCLIENT_MODE_SLEEP 62 #define SYNCLIENT_MODE_DUMP 63 #define SYNCLIENT_MODE_LOOKUPHASH 70 #define SYNCLIENT_MODE_LOOKUPINO 71 #define SYNCLIENT_MODE_TRUNCATE 200 #define SYNCLIENT_MODE_FOO 100 #define SYNCLIENT_MODE_THRASHLINKS 101 #define SYNCLIENT_MODE_IMPORTFIND 300 #define SYNCLIENT_MODE_CHUNK 400 #define SYNCLIENT_MODE_MKSNAP 1000 #define SYNCLIENT_MODE_RMSNAP 1001 #define SYNCLIENT_MODE_MKSNAPFILE 1002 void parse_syn_options(std::vector<const char*>& args); extern int num_client; class SyntheticClient { StandaloneClient *client; int whoami; pthread_t thread_id; Distribution op_dist; void init_op_dist(); int get_op(); filepath cwd; std::map<std::string, struct stat*> contents; std::set<std::string> subdirs; bool did_readdir; std::set<int> open_files; void up(); void clear_dir() { contents.clear(); subdirs.clear(); did_readdir = false; } int get_random_fh() { int r = rand() % open_files.size(); std::set<int>::iterator it = open_files.begin(); while (r--) ++it; return *it; } filepath n1; const char *get_random_subdir() { ceph_assert(!subdirs.empty()); int r = ((rand() % subdirs.size()) + (rand() % subdirs.size())) / 2; // non-uniform distn std::set<std::string>::iterator it = subdirs.begin(); while (r--) ++it; n1 = cwd; n1.push_dentry( *it ); return n1.get_path().c_str(); } filepath n2; const char *get_random_sub() { ceph_assert(!contents.empty()); int r = ((rand() % contents.size()) + (rand() % contents.size())) / 2; // non-uniform distn if (cwd.depth() && cwd.last_dentry().length()) r += cwd.last_dentry().c_str()[0]; // slightly permuted r %= contents.size(); std::map<std::string,struct stat*>::iterator it = contents.begin(); while (r--) ++it; n2 = cwd; n2.push_dentry( it->first ); return n2.get_path().c_str(); } filepath sub; char sub_s[50]; const char *make_sub(const char *base) { snprintf(sub_s, sizeof(sub_s), "%s.%d", base, rand() % 100); std::string f = sub_s; sub = cwd; sub.push_dentry(f); return sub.c_str(); } public: SyntheticClient(StandaloneClient *client, int w = -1); int start_thread(); int join_thread(); int run(); bool run_me() { if (run_only >= 0) { if (run_only == client->get_nodeid()) return true; else return false; } return true; } void did_run_me() { run_only = -1; run_until = utime_t(); } // run() will do one of these things: std::list<int> modes; std::list<std::string> sargs; std::list<int> iargs; utime_t run_start; utime_t run_until; client_t run_only; client_t exclude; std::string get_sarg(int seq); int get_iarg() { int i = iargs.front(); iargs.pop_front(); return i; } bool time_to_stop() { utime_t now = ceph_clock_now(); if (0) std::cout << "time_to_stop .. now " << now << " until " << run_until << " start " << run_start << std::endl; if (run_until.sec() && now > run_until) return true; else return false; } std::string compose_path(std::string& prefix, char *rest) { return prefix + rest; } int full_walk(std::string& fromdir); int random_walk(int n); int dump_placement(std::string& fn); int make_dirs(const char *basedir, int dirs, int files, int depth); int stat_dirs(const char *basedir, int dirs, int files, int depth); int read_dirs(const char *basedir, int dirs, int files, int depth); int make_files(int num, int count, int priv, bool more); int link_test(); int create_shared(int num); int open_shared(int num, int count); int rm_file(std::string& fn); int write_file(std::string& fn, int mb, loff_t chunk); int write_fd(int fd, int size, int wrsize); int write_batch(int nfile, int mb, int chunk); int read_file(const std::string& fn, int mb, int chunk, bool ignoreprint=false); int create_objects(int nobj, int osize, int inflight); int object_rw(int nobj, int osize, int wrpc, int overlap, double rskew, double wskew); int read_random(std::string& fn, int mb, int chunk); int read_random_ex(std::string& fn, int mb, int chunk); int overload_osd_0(int n, int sie, int wrsize); int check_first_primary(int fd); int clean_dir(std::string& basedir); int play_trace(Trace& t, std::string& prefix, bool metadata_only=false); void make_dir_mess(const char *basedir, int n); void foo(); int thrash_links(const char *basedir, int dirs, int files, int depth, int n); void import_find(const char *basedir, const char *find, bool writedata); int lookup_hash(inodeno_t ino, inodeno_t dirino, const char *name, const UserPerm& perms); int lookup_ino(inodeno_t ino, const UserPerm& perms); int chunk_file(std::string &filename); void mksnap(const char *base, const char *name, const UserPerm& perms); void rmsnap(const char *base, const char *name, const UserPerm& perms); void mksnapfile(const char *dir); }; #endif
7,239
24.673759
101
h
null
ceph-main/src/client/Trace.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <[email protected]> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "Trace.h" #include "common/debug.h" #include <iostream> #include <map> #include "common/config.h" #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> void Trace::start() { //cout << "start" << std::endl; delete fs; fs = new ifstream(); fs->open(filename); if (!fs->is_open()) { //generic_dout(0) << "** unable to open trace file " << filename << dendl; ceph_abort(); } //generic_dout(2) << "opened traced file '" << filename << "'" << dendl; // read first line getline(*fs, line); //cout << "first line is " << line << std::endl; _line = 1; } const char *Trace::peek_string(string &buf, const char *prefix) { //if (prefix) cout << "prefix '" << prefix << "' line '" << line << "'" << std::endl; if (prefix && strstr(line.c_str(), "/prefix") == line.c_str()) { buf.clear(); buf.append(prefix); buf.append(line.c_str() + strlen("/prefix")); } else { buf = line; } return buf.c_str(); } const char *Trace::get_string(string &buf, const char *prefix) { peek_string(buf, prefix); //cout << "buf is " << buf << std::endl; // read next line (and detect eof early) _line++; getline(*fs, line); //cout << "next line is " << line << std::endl; return buf.c_str(); }
1,702
20.2875
87
cc
null
ceph-main/src/client/Trace.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <[email protected]> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_CLIENT_TRACE_H #define CEPH_CLIENT_TRACE_H #include <stdlib.h> #include <list> #include <string> #include <fstream> using std::list; using std::string; using std::ifstream; /* this class is more like an iterator over a constant tokenlist (which is protected by a mutex, see Trace.cc) */ class Trace { int _line; const char *filename; ifstream *fs; string line; public: explicit Trace(const char* f) : _line(0), filename(f), fs(0) {} ~Trace() { delete fs; } Trace(const Trace& other); const Trace& operator=(const Trace& other); int get_line() { return _line; } void start(); const char *peek_string(string &buf, const char *prefix); const char *get_string(string &buf, const char *prefix); int64_t get_int() { string buf; return atoll(get_string(buf, 0)); } bool end() { return !fs || fs->eof(); //return _cur == _end; } }; #endif
1,348
18.838235
71
h
null
ceph-main/src/client/UserPerm.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_CLIENT_USERPERM_H #define CEPH_CLIENT_USERPERM_H struct UserPerm { private: uid_t m_uid; gid_t m_gid; int gid_count; gid_t *gids; bool alloced_gids; void deep_copy_from(const UserPerm& b) { if (alloced_gids) { delete[] gids; alloced_gids = false; } m_uid = b.m_uid; m_gid = b.m_gid; gid_count = b.gid_count; if (gid_count > 0) { gids = new gid_t[gid_count]; alloced_gids = true; for (int i = 0; i < gid_count; ++i) { gids[i] = b.gids[i]; } } } public: UserPerm() : m_uid(-1), m_gid(-1), gid_count(0), gids(NULL), alloced_gids(false) {} UserPerm(uid_t uid, gid_t gid, int ngids=0, gid_t *gidlist=NULL) : m_uid(uid), m_gid(gid), gid_count(ngids), gids(gidlist), alloced_gids(false) {} UserPerm(const UserPerm& o) : UserPerm() { deep_copy_from(o); } UserPerm(UserPerm && o) { m_uid = o.m_uid; m_gid = o.m_gid; gid_count = o.gid_count; gids = o.gids; alloced_gids = o.alloced_gids; o.gids = NULL; o.gid_count = 0; } ~UserPerm() { if (alloced_gids) delete[] gids; } UserPerm& operator=(const UserPerm& o) { deep_copy_from(o); return *this; } uid_t uid() const { return m_uid != (uid_t)-1 ? m_uid : ::geteuid(); } gid_t gid() const { return m_gid != (gid_t)-1 ? m_gid : ::getegid(); } bool gid_in_groups(gid_t id) const { if (id == gid()) return true; for (int i = 0; i < gid_count; ++i) { if (id == gids[i]) return true; } return false; } int get_gids(const gid_t **_gids) const { *_gids = gids; return gid_count; } void init_gids(gid_t* _gids, int count) { gids = _gids; gid_count = count; alloced_gids = true; } void shallow_copy(const UserPerm& o) { m_uid = o.m_uid; m_gid = o.m_gid; gid_count = o.gid_count; gids = o.gids; alloced_gids = false; } }; #endif
2,311
23.595745
78
h
null
ceph-main/src/client/barrier.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * * Copyright (C) 2012 CohortFS, LLC. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #if defined(__FreeBSD__) #include <sys/param.h> #endif #include "include/Context.h" #include "Client.h" #include "barrier.h" #undef dout_prefix #define dout_prefix *_dout << "client." << whoami << " " #define dout_subsys ceph_subsys_client #define cldout(cl, v) dout_impl((cl)->cct, dout_subsys, v) \ *_dout << "client." << cl->whoami << " " /* C_Block_Sync */ class C_Block_Sync : public Context { private: Client *cl; uint64_t ino; barrier_interval iv; enum CBlockSync_State state; Barrier *barrier; int *rval; /* see Cond.h */ public: boost::intrusive::list_member_hook<> intervals_hook; C_Block_Sync(Client *c, uint64_t i, barrier_interval iv, int *r); void finish(int rval); friend class Barrier; friend class BarrierContext; }; C_Block_Sync::C_Block_Sync(Client *c, uint64_t i, barrier_interval iv, int *r=0) : cl(c), ino(i), iv(iv), rval(r) { state = CBlockSync_State_None; barrier = NULL; cldout(cl, 1) << "C_Block_Sync for " << ino << dendl; if (!cl->barriers[ino]) { cl->barriers[ino] = new BarrierContext(cl, ino); } /* XXX current semantics aren't commit-ordered */ cl->barriers[ino]->write_nobarrier(*this); } void C_Block_Sync::finish(int r) { cldout(cl, 1) << "C_Block_Sync::finish() for " << ino << " " << iv << " r==" << r << dendl; if (rval) *rval = r; cl->barriers[ino]->complete(*this); } /* Barrier */ Barrier::Barrier() { } Barrier::~Barrier() { } /* BarrierContext */ BarrierContext::BarrierContext(Client *c, uint64_t ino) : cl(c), ino(ino) { }; void BarrierContext::write_nobarrier(C_Block_Sync &cbs) { std::lock_guard locker(lock); cbs.state = CBlockSync_State_Unclaimed; outstanding_writes.push_back(cbs); } void BarrierContext::write_barrier(C_Block_Sync &cbs) { std::unique_lock locker(lock); barrier_interval &iv = cbs.iv; { /* find blocking commit--intrusive no help here */ BarrierList::iterator iter; bool done = false; for (iter = active_commits.begin(); !done && (iter != active_commits.end()); ++iter) { Barrier &barrier = *iter; while (boost::icl::intersects(barrier.span, iv)) { /* wait on this */ barrier.cond.wait(locker); done = true; } } } cbs.state = CBlockSync_State_Unclaimed; outstanding_writes.push_back(cbs); } /* write_barrier */ void BarrierContext::commit_barrier(barrier_interval &civ) { std::unique_lock locker(lock); /* we commit outstanding writes--if none exist, we don't care */ if (outstanding_writes.size() == 0) return; boost::icl::interval_set<uint64_t> cvs; cvs.insert(civ); Barrier *barrier = NULL; BlockSyncList::iterator iter, iter2; iter = outstanding_writes.begin(); while (iter != outstanding_writes.end()) { barrier_interval &iv = iter->iv; if (boost::icl::intersects(cvs, iv)) { C_Block_Sync &a_write = *iter; if (! barrier) barrier = new Barrier(); /* mark the callback */ a_write.state = CBlockSync_State_Committing; a_write.barrier = barrier; iter2 = iter++; outstanding_writes.erase(iter2); barrier->write_list.push_back(a_write); barrier->span.insert(iv); /* avoid iter invalidate */ } else { ++iter; } } if (barrier) { active_commits.push_back(*barrier); /* and wait on this */ barrier->cond.wait(locker); } } /* commit_barrier */ void BarrierContext::complete(C_Block_Sync &cbs) { std::lock_guard locker(lock); BlockSyncList::iterator iter = BlockSyncList::s_iterator_to(cbs); switch (cbs.state) { case CBlockSync_State_Unclaimed: /* cool, no waiting */ outstanding_writes.erase(iter); break; case CBlockSync_State_Committing: { Barrier *barrier = iter->barrier; barrier->write_list.erase(iter); /* signal waiters */ barrier->cond.notify_all(); /* dispose cleared barrier */ if (barrier->write_list.size() == 0) { BarrierList::iterator iter2 = BarrierList::s_iterator_to(*barrier); active_commits.erase(iter2); delete barrier; } } break; default: ceph_abort(); break; } cbs.state = CBlockSync_State_Completed; } /* complete */ BarrierContext::~BarrierContext() { }
4,594
22.443878
70
cc
null
ceph-main/src/client/barrier.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * * Copyright (C) 2012 CohortFS, LLC. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef BARRIER_H #define BARRIER_H #include "include/types.h" #include <boost/intrusive/list.hpp> #define BOOST_ICL_USE_STATIC_BOUNDED_INTERVALS #include <boost/icl/interval_set.hpp> #include "common/ceph_mutex.h" class Client; typedef boost::icl::interval<uint64_t>::type barrier_interval; /* * we keep count of uncommitted writes on the inode, so that * ll_commit_blocks can do the right thing. * * This is just a hacked copy of Ceph's sync callback. */ enum CBlockSync_State { CBlockSync_State_None, /* initial state */ CBlockSync_State_Unclaimed, /* outstanding write */ CBlockSync_State_Committing, /* commit in progress */ CBlockSync_State_Completed, }; class BarrierContext; class C_Block_Sync; typedef boost::intrusive::list< C_Block_Sync, boost::intrusive::member_hook< C_Block_Sync, boost::intrusive::list_member_hook<>, &C_Block_Sync::intervals_hook > > BlockSyncList; class Barrier { private: ceph::condition_variable cond; boost::icl::interval_set<uint64_t> span; BlockSyncList write_list; public: boost::intrusive::list_member_hook<> active_commits_hook; Barrier(); ~Barrier(); friend class BarrierContext; }; typedef boost::intrusive::list< Barrier, boost::intrusive::member_hook< Barrier, boost::intrusive::list_member_hook<>, &Barrier::active_commits_hook > > BarrierList; class BarrierContext { private: Client *cl; uint64_t ino; ceph::mutex lock = ceph::make_mutex("BarrierContext"); // writes not claimed by a commit BlockSyncList outstanding_writes; // commits in progress, with their claimed writes BarrierList active_commits; public: BarrierContext(Client *c, uint64_t ino); void write_nobarrier(C_Block_Sync &cbs); void write_barrier(C_Block_Sync &cbs); void commit_barrier(barrier_interval &civ); void complete(C_Block_Sync &cbs); ~BarrierContext(); }; #endif
2,273
21.74
70
h
null
ceph-main/src/client/fuse_ll.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <[email protected]> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <sys/file.h> #include <sys/types.h> #include <sys/wait.h> #include <limits.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <fcntl.h> #include <unistd.h> #if defined(__linux__) #include <libgen.h> #include <sys/vfs.h> #include <sys/xattr.h> #include <linux/magic.h> #endif // ceph #include "common/errno.h" #include "common/safe_io.h" #include "include/types.h" #include "Client.h" #include "Fh.h" #include "ioctl.h" #include "common/config.h" #include "include/ceph_assert.h" #include "include/cephfs/ceph_ll_client.h" #include "include/ceph_fuse.h" #include "fuse_ll.h" #include <fuse_lowlevel.h> #define dout_context g_ceph_context #define FINO_INO(x) ((x) & ((1ull<<48)-1ull)) #define FINO_STAG(x) ((x) >> 48) #define MAKE_FINO(i,s) ((i) | ((int64_t)(s) << 48)) #define STAG_MASK 0xffff #define G_NOSNAP_STAG 0 // for all CEPH_NOSNAP #define G_SNAPDIR_STAG 1 // for all CEPH_SNAPDIR #define MINORBITS 20 #define MINORMASK ((1U << MINORBITS) - 1) #define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) #define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) #define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi)) #if defined(__linux__) #ifndef FUSE_SUPER_MAGIC #define FUSE_SUPER_MAGIC 0x65735546 #endif #define _CEPH_CLIENT_ID "ceph.client_id" #endif /* * The dedicated struct for snapid <-> stag map for each ceph * inode, and the stag is a number in range [2, 0xffff], and * the stag number 0 is reserved for CEPH_NOSNAP and 1 is * reserved for CEPH_SNAPDIR. */ struct ceph_fuse_fake_inode_stag { ceph::unordered_map<uint64_t,int> snap_stag_map; // <snapid, stagid> ceph::unordered_map<int, uint64_t> stag_snap_map; // <stagid, snapid> int last_stag = 1; }; using namespace std; static const ceph::unordered_map<int,int> cephfs_errno_to_system_errno = { {CEPHFS_EBLOCKLISTED, ESHUTDOWN}, {CEPHFS_EPERM, EPERM}, {CEPHFS_ESTALE, ESTALE}, {CEPHFS_ENOSPC, ENOSPC}, {CEPHFS_ETIMEDOUT, ETIMEDOUT}, {CEPHFS_EIO, EIO}, {CEPHFS_ENOTCONN, ENOTCONN}, {CEPHFS_EEXIST, EEXIST}, {CEPHFS_EINTR, EINTR}, {CEPHFS_EINVAL, EINVAL}, {CEPHFS_EBADF, EBADF}, {CEPHFS_EROFS, EROFS}, {CEPHFS_EAGAIN, EAGAIN}, {CEPHFS_EACCES, EACCES}, {CEPHFS_ELOOP, ELOOP}, {CEPHFS_EISDIR, EISDIR}, {CEPHFS_ENOENT, ENOENT}, {CEPHFS_ENOTDIR, ENOTDIR}, {CEPHFS_ENAMETOOLONG, ENAMETOOLONG}, {CEPHFS_EBUSY, EBUSY}, {CEPHFS_EDQUOT, EDQUOT}, {CEPHFS_EFBIG, EFBIG}, {CEPHFS_ERANGE, ERANGE}, {CEPHFS_ENXIO, ENXIO}, {CEPHFS_ECANCELED, ECANCELED}, {CEPHFS_ENODATA, ENODATA}, {CEPHFS_EOPNOTSUPP, EOPNOTSUPP}, {CEPHFS_EXDEV, EXDEV}, {CEPHFS_ENOMEM, ENOMEM}, {CEPHFS_ENOTRECOVERABLE, ENOTRECOVERABLE}, {CEPHFS_ENOSYS, ENOSYS}, {CEPHFS_ENOTEMPTY, ENOTEMPTY}, {CEPHFS_EDEADLK, EDEADLK}, {CEPHFS_EDOM, EDOM}, {CEPHFS_EMLINK, EMLINK}, {CEPHFS_ETIME, ETIME}, {CEPHFS_EOLDSNAPC, EIO} // forcing to EIO for now }; /* Requirements: * cephfs_errno >= 0 */ static int get_sys_errno(int cephfs_errno) { if (cephfs_errno == 0) return 0; auto it = cephfs_errno_to_system_errno.find(cephfs_errno); if (it != cephfs_errno_to_system_errno.end()) return it->second; return EIO; } static uint32_t new_encode_dev(dev_t dev) { unsigned major = MAJOR(dev); unsigned minor = MINOR(dev); return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); } static dev_t new_decode_dev(uint32_t dev) { unsigned major = (dev & 0xfff00) >> 8; unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); return MKDEV(major, minor); } class CephFuse::Handle { public: Handle(Client *c, int fd); ~Handle(); int init(int argc, const char *argv[]); int start(); int loop(); void finalize(); uint64_t fino_snap(uint64_t fino); uint64_t make_fake_ino(inodeno_t ino, snapid_t snapid); Inode * iget(fuse_ino_t fino); void iput(Inode *in); int fd_on_success; Client *client; struct fuse_session *se = nullptr; #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) struct fuse_cmdline_opts opts; struct fuse_conn_info_opts *conn_opts; #else struct fuse_chan *ch = nullptr; char *mountpoint = nullptr; #endif ceph::mutex stag_lock = ceph::make_mutex("fuse_ll.cc stag_lock"); // a map of <ceph ino, fino stag/snapid map> ceph::unordered_map<uint64_t, struct ceph_fuse_fake_inode_stag> g_fino_maps; pthread_key_t fuse_req_key = 0; void set_fuse_req(fuse_req_t); fuse_req_t get_fuse_req(); struct fuse_args args; }; #if defined(__linux__) static int already_fuse_mounted(const char *path, bool &already_mounted) { struct statx path_statx; struct statx parent_statx; char path_copy[PATH_MAX] = {0}; char *parent_path = NULL; int err = 0; already_mounted = false; strncpy(path_copy, path, sizeof(path_copy)-1); parent_path = dirname(path_copy); // get stat information for original path if (-1 == statx(AT_FDCWD, path, AT_STATX_DONT_SYNC, STATX_INO, &path_statx)) { err = errno; derr << "fuse_ll: already_fuse_mounted: statx(" << path << ") failed with error " << cpp_strerror(err) << dendl; return err; } // if path isn't directory, then it can't be a mountpoint. if (!(path_statx.stx_mode & S_IFDIR)) { err = EINVAL; derr << "fuse_ll: already_fuse_mounted: " << path << " is not a directory" << dendl; return err; } // get stat information for parent path if (-1 == statx(AT_FDCWD, parent_path, AT_STATX_DONT_SYNC, STATX_INO, &parent_statx)) { err = errno; derr << "fuse_ll: already_fuse_mounted: statx(" << parent_path << ") failed with error " << cpp_strerror(err) << dendl; return err; } // if original path and parent have different device ids, // then the path is a mount point // or, if they refer to the same path, then it's probably // the root directory '/' and therefore path is a mountpoint if( path_statx.stx_dev_major != parent_statx.stx_dev_major || path_statx.stx_dev_minor != parent_statx.stx_dev_minor || ( path_statx.stx_dev_major == parent_statx.stx_dev_major && path_statx.stx_dev_minor == parent_statx.stx_dev_minor && path_statx.stx_ino == parent_statx.stx_ino ) ) { struct statfs path_statfs; if (-1 == statfs(path, &path_statfs)) { err = errno; derr << "fuse_ll: already_fuse_mounted: statfs(" << path << ") failed with error " << cpp_strerror(err) << dendl; return err; } if(FUSE_SUPER_MAGIC == path_statfs.f_type) { // if getxattr returns positive length means value exist for ceph.client_id // then ceph fuse is already mounted on path char client_id[128] = {0}; if (getxattr(path, _CEPH_CLIENT_ID, &client_id, sizeof(client_id)) > 0) { already_mounted = true; derr << path << " already mounted by " << client_id << dendl; } } } return err; } #else // non-linux platforms static int already_fuse_mounted(const char *path, bool &already_mounted) { already_mounted = false; return 0; } #endif static int getgroups(fuse_req_t req, gid_t **sgids) { #if FUSE_VERSION >= FUSE_MAKE_VERSION(2, 8) ceph_assert(sgids); int c = fuse_req_getgroups(req, 0, NULL); if (c < 0) { return c; } if (c == 0) { return 0; } gid_t *gids = new (std::nothrow) gid_t[c]; if (!gids) { return -get_sys_errno(CEPHFS_ENOMEM); } c = fuse_req_getgroups(req, c, gids); if (c < 0) { delete[] gids; } else { *sgids = gids; } return c; #endif return -get_sys_errno(CEPHFS_ENOSYS); } static void get_fuse_groups(UserPerm& perms, fuse_req_t req) { CephFuse::Handle *cfuse = (CephFuse::Handle *)fuse_req_userdata(req); if (cfuse->client->cct->_conf.get_val<bool>("fuse_set_user_groups")) { gid_t *gids = NULL; int count = getgroups(req, &gids); if (count > 0) { perms.init_gids(gids, count); } else if (count < 0) { derr << __func__ << ": getgroups failed: " << cpp_strerror(-count) << dendl; } } } static CephFuse::Handle *fuse_ll_req_prepare(fuse_req_t req) { CephFuse::Handle *cfuse = (CephFuse::Handle *)fuse_req_userdata(req); cfuse->set_fuse_req(req); return cfuse; } static void fuse_ll_lookup(fuse_req_t req, fuse_ino_t parent, const char *name) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); struct fuse_entry_param fe; Inode *i2, *i1 = cfuse->iget(parent); // see below int r; UserPerm perms(ctx->uid, ctx->gid); get_fuse_groups(perms, req); if (!i1) { r = cfuse->client->lookup_ino(parent, perms, &i1); if (r < 0) { fuse_reply_err(req, get_sys_errno(-r)); return; } } memset(&fe, 0, sizeof(fe)); r = cfuse->client->ll_lookup(i1, name, &fe.attr, &i2, perms); if (r >= 0) { fe.ino = cfuse->make_fake_ino(fe.attr.st_ino, fe.attr.st_dev); fe.attr.st_rdev = new_encode_dev(fe.attr.st_rdev); fuse_reply_entry(req, &fe); } else { fuse_reply_err(req, get_sys_errno(-r)); } // XXX NB, we dont iput(i2) because FUSE will do so in a matching // fuse_ll_forget() cfuse->iput(i1); } // fuse3 has changed forget function signature #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) static void fuse_ll_forget(fuse_req_t req, fuse_ino_t ino, uint64_t nlookup) #else static void fuse_ll_forget(fuse_req_t req, fuse_ino_t ino, long unsigned nlookup) #endif { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); Inode *in = cfuse->iget(ino); if (in) cfuse->client->ll_forget(in, nlookup+1); fuse_reply_none(req); } static void fuse_ll_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); struct stat stbuf; UserPerm perms(ctx->uid, ctx->gid); Inode *in = cfuse->iget(ino); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); (void) fi; // XXX if (cfuse->client->ll_getattr(in, &stbuf, perms) == 0) { stbuf.st_ino = cfuse->make_fake_ino(stbuf.st_ino, stbuf.st_dev); stbuf.st_rdev = new_encode_dev(stbuf.st_rdev); fuse_reply_attr(req, &stbuf, 0); } else fuse_reply_err(req, ENOENT); cfuse->iput(in); // iput required } static void fuse_ll_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr, int to_set, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); UserPerm perms(ctx->uid, ctx->gid); Inode *in = cfuse->iget(ino); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); int mask = 0; if (to_set & FUSE_SET_ATTR_MODE) mask |= CEPH_SETATTR_MODE; if (to_set & FUSE_SET_ATTR_UID) mask |= CEPH_SETATTR_UID; if (to_set & FUSE_SET_ATTR_GID) mask |= CEPH_SETATTR_GID; if (to_set & FUSE_SET_ATTR_MTIME) mask |= CEPH_SETATTR_MTIME; if (to_set & FUSE_SET_ATTR_ATIME) mask |= CEPH_SETATTR_ATIME; if (to_set & FUSE_SET_ATTR_SIZE) mask |= CEPH_SETATTR_SIZE; #if !defined(__APPLE__) if (to_set & FUSE_SET_ATTR_MTIME_NOW) mask |= CEPH_SETATTR_MTIME_NOW; if (to_set & FUSE_SET_ATTR_ATIME_NOW) mask |= CEPH_SETATTR_ATIME_NOW; #endif int r = cfuse->client->ll_setattr(in, attr, mask, perms); if (r == 0) fuse_reply_attr(req, attr, 0); else fuse_reply_err(req, get_sys_errno(-r)); cfuse->iput(in); // iput required } // XATTRS static void fuse_ll_setxattr(fuse_req_t req, fuse_ino_t ino, const char *name, const char *value, size_t size, int flags #if defined(__APPLE__) ,uint32_t pos #endif ) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); UserPerm perms(ctx->uid, ctx->gid); Inode *in = cfuse->iget(ino); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); int r = cfuse->client->ll_setxattr(in, name, value, size, flags, perms); fuse_reply_err(req, get_sys_errno(-r)); cfuse->iput(in); // iput required } static void fuse_ll_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); char buf[size]; UserPerm perms(ctx->uid, ctx->gid); Inode *in = cfuse->iget(ino); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); int r = cfuse->client->ll_listxattr(in, buf, size, perms); if (size == 0 && r >= 0) fuse_reply_xattr(req, r); else if (r >= 0) fuse_reply_buf(req, buf, r); else fuse_reply_err(req, get_sys_errno(-r)); cfuse->iput(in); // iput required } static void fuse_ll_getxattr(fuse_req_t req, fuse_ino_t ino, const char *name, size_t size #if defined(__APPLE__) ,uint32_t position #endif ) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); char buf[size]; UserPerm perms(ctx->uid, ctx->gid); Inode *in = cfuse->iget(ino); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); int r = cfuse->client->ll_getxattr(in, name, buf, size, perms); if (size == 0 && r >= 0) fuse_reply_xattr(req, r); else if (r >= 0) fuse_reply_buf(req, buf, r); else fuse_reply_err(req, get_sys_errno(-r)); cfuse->iput(in); // iput required } static void fuse_ll_removexattr(fuse_req_t req, fuse_ino_t ino, const char *name) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); UserPerm perms(ctx->uid, ctx->gid); Inode *in = cfuse->iget(ino); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); int r = cfuse->client->ll_removexattr(in, name, perms); fuse_reply_err(req, get_sys_errno(-r)); cfuse->iput(in); // iput required } static void fuse_ll_opendir(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); UserPerm perms(ctx->uid, ctx->gid); void *dirp; Inode *in = cfuse->iget(ino); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); int r = cfuse->client->ll_opendir(in, fi->flags, (dir_result_t **)&dirp, perms); if (r >= 0) { fi->fh = (uint64_t)dirp; fuse_reply_open(req, fi); } else { fuse_reply_err(req, get_sys_errno(-r)); } cfuse->iput(in); // iput required } static void fuse_ll_readlink(fuse_req_t req, fuse_ino_t ino) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); char buf[PATH_MAX + 1]; // leave room for a null terminator UserPerm perms(ctx->uid, ctx->gid); Inode *in = cfuse->iget(ino); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); int r = cfuse->client->ll_readlink(in, buf, sizeof(buf) - 1, perms); if (r >= 0) { buf[r] = '\0'; fuse_reply_readlink(req, buf); } else { fuse_reply_err(req, get_sys_errno(-r)); } cfuse->iput(in); // iput required } static void fuse_ll_mknod(fuse_req_t req, fuse_ino_t parent, const char *name, mode_t mode, dev_t rdev) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); struct fuse_entry_param fe; UserPerm perms(ctx->uid, ctx->gid); Inode *i2, *i1 = cfuse->iget(parent); if (!i1) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); memset(&fe, 0, sizeof(fe)); int r = cfuse->client->ll_mknod(i1, name, mode, new_decode_dev(rdev), &fe.attr, &i2, perms); if (r == 0) { fe.ino = cfuse->make_fake_ino(fe.attr.st_ino, fe.attr.st_dev); fe.attr.st_rdev = new_encode_dev(fe.attr.st_rdev); fuse_reply_entry(req, &fe); } else { fuse_reply_err(req, get_sys_errno(-r)); } // XXX NB, we dont iput(i2) because FUSE will do so in a matching // fuse_ll_forget() cfuse->iput(i1); // iput required } static void fuse_ll_mkdir(fuse_req_t req, fuse_ino_t parent, const char *name, mode_t mode) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); Inode *i2, *i1; struct fuse_entry_param fe; memset(&fe, 0, sizeof(fe)); UserPerm perm(ctx->uid, ctx->gid); get_fuse_groups(perm, req); #ifdef HAVE_SYS_SYNCFS auto fuse_multithreaded = cfuse->client->cct->_conf.get_val<bool>( "fuse_multithreaded"); auto fuse_syncfs_on_mksnap = cfuse->client->cct->_conf.get_val<bool>( "fuse_syncfs_on_mksnap"); if (cfuse->fino_snap(parent) == CEPH_SNAPDIR && fuse_multithreaded && fuse_syncfs_on_mksnap) { int err = 0; #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) int fd = ::open(cfuse->opts.mountpoint, O_RDONLY | O_DIRECTORY | O_CLOEXEC); #else int fd = ::open(cfuse->mountpoint, O_RDONLY | O_DIRECTORY | O_CLOEXEC); #endif if (fd < 0) { err = errno; } else { int r = ::syncfs(fd); if (r < 0) err = errno; ::close(fd); } if (err) { fuse_reply_err(req, err); return; } } #endif i1 = cfuse->iget(parent); if (!i1) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } int r = cfuse->client->ll_mkdir(i1, name, mode, &fe.attr, &i2, perm); if (r == 0) { fe.ino = cfuse->make_fake_ino(fe.attr.st_ino, fe.attr.st_dev); fe.attr.st_rdev = new_encode_dev(fe.attr.st_rdev); fuse_reply_entry(req, &fe); } else { fuse_reply_err(req, get_sys_errno(-r)); } // XXX NB, we dont iput(i2) because FUSE will do so in a matching // fuse_ll_forget() cfuse->iput(i1); // iput required } static void fuse_ll_unlink(fuse_req_t req, fuse_ino_t parent, const char *name) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); UserPerm perm(ctx->uid, ctx->gid); Inode *in = cfuse->iget(parent); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perm, req); int r = cfuse->client->ll_unlink(in, name, perm); fuse_reply_err(req, get_sys_errno(-r)); cfuse->iput(in); // iput required } static void fuse_ll_rmdir(fuse_req_t req, fuse_ino_t parent, const char *name) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); UserPerm perms(ctx->uid, ctx->gid); Inode *in = cfuse->iget(parent); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); int r = cfuse->client->ll_rmdir(in, name, perms); fuse_reply_err(req, get_sys_errno(-r)); cfuse->iput(in); // iput required } static void fuse_ll_symlink(fuse_req_t req, const char *existing, fuse_ino_t parent, const char *name) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); struct fuse_entry_param fe; UserPerm perms(ctx->uid, ctx->gid); Inode *i2, *i1 = cfuse->iget(parent); if (!i1) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); memset(&fe, 0, sizeof(fe)); int r = cfuse->client->ll_symlink(i1, name, existing, &fe.attr, &i2, perms); if (r == 0) { fe.ino = cfuse->make_fake_ino(fe.attr.st_ino, fe.attr.st_dev); fe.attr.st_rdev = new_encode_dev(fe.attr.st_rdev); fuse_reply_entry(req, &fe); } else { fuse_reply_err(req, get_sys_errno(-r)); } // XXX NB, we dont iput(i2) because FUSE will do so in a matching // fuse_ll_forget() cfuse->iput(i1); // iput required } static void fuse_ll_rename(fuse_req_t req, fuse_ino_t parent, const char *name, fuse_ino_t newparent, const char *newname #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) , unsigned int flags #endif ) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); UserPerm perm(ctx->uid, ctx->gid); Inode *in = cfuse->iget(parent); Inode *nin = cfuse->iget(newparent); if (!in || !nin) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perm, req); int r = cfuse->client->ll_rename(in, name, nin, newname, perm); fuse_reply_err(req, get_sys_errno(-r)); cfuse->iput(in); // iputs required cfuse->iput(nin); } static void fuse_ll_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t newparent, const char *newname) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); struct fuse_entry_param fe; Inode *in = cfuse->iget(ino); Inode *nin = cfuse->iget(newparent); if (!in || !nin) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } memset(&fe, 0, sizeof(fe)); UserPerm perm(ctx->uid, ctx->gid); get_fuse_groups(perm, req); /* * Note that we could successfully link, but then fail the subsequent * getattr and return an error. Perhaps we should ignore getattr errors, * but then how do we tell FUSE that the attrs are bogus? */ int r = cfuse->client->ll_link(in, nin, newname, perm); if (r == 0) { r = cfuse->client->ll_getattr(in, &fe.attr, perm); if (r == 0) { fe.ino = cfuse->make_fake_ino(fe.attr.st_ino, fe.attr.st_dev); fe.attr.st_rdev = new_encode_dev(fe.attr.st_rdev); fuse_reply_entry(req, &fe); } } if (r != 0) { /* * Many ll operations in libcephfs return an extra inode reference, but * ll_link currently does not. Still, FUSE needs one for the new dentry, * so we commandeer the reference taken earlier when ll_link is successful. * On error however, we must put that reference. */ cfuse->iput(in); fuse_reply_err(req, get_sys_errno(-r)); } cfuse->iput(nin); } static void fuse_ll_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); Fh *fh = NULL; UserPerm perms(ctx->uid, ctx->gid); Inode *in = cfuse->iget(ino); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); int r = cfuse->client->ll_open(in, fi->flags, &fh, perms); if (r == 0) { fi->fh = (uint64_t)fh; #if FUSE_VERSION >= FUSE_MAKE_VERSION(2, 8) auto fuse_disable_pagecache = cfuse->client->cct->_conf.get_val<bool>( "fuse_disable_pagecache"); auto fuse_use_invalidate_cb = cfuse->client->cct->_conf.get_val<bool>( "fuse_use_invalidate_cb"); if (fuse_disable_pagecache) fi->direct_io = 1; else if (fuse_use_invalidate_cb) fi->keep_cache = 1; #endif fuse_reply_open(req, fi); } else { fuse_reply_err(req, get_sys_errno(-r)); } cfuse->iput(in); // iput required } static void fuse_ll_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); Fh *fh = reinterpret_cast<Fh*>(fi->fh); bufferlist bl; int r = cfuse->client->ll_read(fh, off, size, &bl); if (r >= 0) { vector<iovec> iov; size_t len; struct fuse_bufvec *bufv; if (bl.get_num_buffers() > IOV_MAX) bl.rebuild(); bl.prepare_iov(&iov); len = sizeof(struct fuse_bufvec) + sizeof(struct fuse_buf) * (iov.size() - 1); bufv = (struct fuse_bufvec *)calloc(1, len); if (bufv) { int i = 0; bufv->count = iov.size(); for (auto &v: iov) { bufv->buf[i].mem = v.iov_base; bufv->buf[i++].size = v.iov_len; } fuse_reply_data(req, bufv, FUSE_BUF_SPLICE_MOVE); free(bufv); return; } iov.insert(iov.begin(), {0}); // the first one is reserved for fuse_out_header fuse_reply_iov(req, &iov[0], iov.size()); } else fuse_reply_err(req, get_sys_errno(-r)); } static void fuse_ll_write(fuse_req_t req, fuse_ino_t ino, const char *buf, size_t size, off_t off, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); Fh *fh = reinterpret_cast<Fh*>(fi->fh); int r = cfuse->client->ll_write(fh, off, size, buf); if (r >= 0) fuse_reply_write(req, r); else fuse_reply_err(req, get_sys_errno(-r)); } static void fuse_ll_flush(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); Fh *fh = reinterpret_cast<Fh*>(fi->fh); int r = cfuse->client->ll_flush(fh); fuse_reply_err(req, get_sys_errno(-r)); } #ifdef FUSE_IOCTL_COMPAT static void fuse_ll_ioctl(fuse_req_t req, fuse_ino_t ino, #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 5) unsigned int cmd, #else int cmd, #endif void *arg, struct fuse_file_info *fi, unsigned flags, const void *in_buf, size_t in_bufsz, size_t out_bufsz) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); if (flags & FUSE_IOCTL_COMPAT) { fuse_reply_err(req, ENOSYS); return; } switch (static_cast<unsigned>(cmd)) { case CEPH_IOC_GET_LAYOUT: { file_layout_t layout; struct ceph_ioctl_layout l; Fh *fh = (Fh*)fi->fh; cfuse->client->ll_file_layout(fh, &layout); l.stripe_unit = layout.stripe_unit; l.stripe_count = layout.stripe_count; l.object_size = layout.object_size; l.data_pool = layout.pool_id; fuse_reply_ioctl(req, 0, &l, sizeof(struct ceph_ioctl_layout)); } break; default: fuse_reply_err(req, EINVAL); } } #endif #if FUSE_VERSION >= FUSE_MAKE_VERSION(2, 9) static void fuse_ll_fallocate(fuse_req_t req, fuse_ino_t ino, int mode, off_t offset, off_t length, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); Fh *fh = (Fh*)fi->fh; int r = cfuse->client->ll_fallocate(fh, mode, offset, length); fuse_reply_err(req, get_sys_errno(-r)); } #endif static void fuse_ll_release(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); Fh *fh = reinterpret_cast<Fh*>(fi->fh); int r = cfuse->client->ll_release(fh); fuse_reply_err(req, get_sys_errno(-r)); } static void fuse_ll_fsync(fuse_req_t req, fuse_ino_t ino, int datasync, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); Fh *fh = reinterpret_cast<Fh*>(fi->fh); int r = cfuse->client->ll_fsync(fh, datasync); fuse_reply_err(req, get_sys_errno(-r)); } struct readdir_context { fuse_req_t req; char *buf; size_t size; size_t pos; /* in buf */ uint64_t snap; }; /* * return 0 on success, -1 if out of space */ static int fuse_ll_add_dirent(void *p, struct dirent *de, struct ceph_statx *stx, off_t next_off, Inode *in) { struct readdir_context *c = (struct readdir_context *)p; CephFuse::Handle *cfuse = (CephFuse::Handle *)fuse_req_userdata(c->req); struct stat st; st.st_ino = cfuse->make_fake_ino(stx->stx_ino, c->snap); st.st_mode = stx->stx_mode; st.st_rdev = new_encode_dev(stx->stx_rdev); size_t room = c->size - c->pos; size_t entrysize = fuse_add_direntry(c->req, c->buf + c->pos, room, de->d_name, &st, next_off); if (entrysize > room) return -ENOSPC; /* success */ c->pos += entrysize; return 0; } static void fuse_ll_readdir(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); dir_result_t *dirp = reinterpret_cast<dir_result_t*>(fi->fh); cfuse->client->seekdir(dirp, off); struct readdir_context rc; rc.req = req; rc.snap = cfuse->fino_snap(ino); if (rc.snap == CEPH_MAXSNAP) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } rc.buf = new char[size]; rc.size = size; rc.pos = 0; int r = cfuse->client->readdir_r_cb(dirp, fuse_ll_add_dirent, &rc); if (r == 0 || r == -CEPHFS_ENOSPC) /* ignore ENOSPC from our callback */ fuse_reply_buf(req, rc.buf, rc.pos); else fuse_reply_err(req, get_sys_errno(-r)); delete[] rc.buf; } static void fuse_ll_releasedir(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); dir_result_t *dirp = reinterpret_cast<dir_result_t*>(fi->fh); cfuse->client->ll_releasedir(dirp); fuse_reply_err(req, 0); } static void fuse_ll_fsyncdir(fuse_req_t req, fuse_ino_t ino, int datasync, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); dir_result_t *dirp = reinterpret_cast<dir_result_t*>(fi->fh); int r = cfuse->client->ll_fsyncdir(dirp); fuse_reply_err(req, get_sys_errno(-r)); } static void fuse_ll_access(fuse_req_t req, fuse_ino_t ino, int mask) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); UserPerm perms(ctx->uid, ctx->gid); Inode *in = cfuse->iget(ino); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); int r = cfuse->client->inode_permission(in, perms, mask); fuse_reply_err(req, get_sys_errno(-r)); cfuse->iput(in); } static void fuse_ll_create(fuse_req_t req, fuse_ino_t parent, const char *name, mode_t mode, struct fuse_file_info *fi) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); struct fuse_entry_param fe; Fh *fh = NULL; UserPerm perms(ctx->uid, ctx->gid); Inode *i1 = cfuse->iget(parent), *i2; if (!i1) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); memset(&fe, 0, sizeof(fe)); // pass &i2 for the created inode so that ll_create takes an initial ll_ref int r = cfuse->client->ll_create(i1, name, mode, fi->flags, &fe.attr, &i2, &fh, perms); if (r == 0) { fi->fh = (uint64_t)fh; fe.ino = cfuse->make_fake_ino(fe.attr.st_ino, fe.attr.st_dev); #if FUSE_VERSION >= FUSE_MAKE_VERSION(2, 8) auto fuse_disable_pagecache = cfuse->client->cct->_conf.get_val<bool>( "fuse_disable_pagecache"); auto fuse_use_invalidate_cb = cfuse->client->cct->_conf.get_val<bool>( "fuse_use_invalidate_cb"); if (fuse_disable_pagecache) fi->direct_io = 1; else if (fuse_use_invalidate_cb) fi->keep_cache = 1; #endif fuse_reply_create(req, &fe, fi); } else fuse_reply_err(req, get_sys_errno(-r)); // XXX NB, we dont iput(i2) because FUSE will do so in a matching // fuse_ll_forget() cfuse->iput(i1); // iput required } static void fuse_ll_statfs(fuse_req_t req, fuse_ino_t ino) { struct statvfs stbuf; CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); const struct fuse_ctx *ctx = fuse_req_ctx(req); UserPerm perms(ctx->uid, ctx->gid); Inode *in = cfuse->iget(ino); if (!in) { fuse_reply_err(req, get_sys_errno(CEPHFS_EINVAL)); return; } get_fuse_groups(perms, req); int r = cfuse->client->ll_statfs(in, &stbuf, perms); if (r == 0) fuse_reply_statfs(req, &stbuf); else fuse_reply_err(req, get_sys_errno(-r)); cfuse->iput(in); // iput required } static void fuse_ll_getlk(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi, struct flock *lock) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); Fh *fh = reinterpret_cast<Fh*>(fi->fh); int r = cfuse->client->ll_getlk(fh, lock, fi->lock_owner); if (r == 0) fuse_reply_lock(req, lock); else fuse_reply_err(req, get_sys_errno(-r)); } static void fuse_ll_setlk(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi, struct flock *lock, int sleep) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); Fh *fh = reinterpret_cast<Fh*>(fi->fh); // must use multithread if operation may block auto fuse_multithreaded = cfuse->client->cct->_conf.get_val<bool>( "fuse_multithreaded"); if (!fuse_multithreaded && sleep && lock->l_type != F_UNLCK) { fuse_reply_err(req, EDEADLK); return; } int r = cfuse->client->ll_setlk(fh, lock, fi->lock_owner, sleep); fuse_reply_err(req, get_sys_errno(-r)); } static void fuse_ll_interrupt(fuse_req_t req, void* data) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); cfuse->client->ll_interrupt(data); } static void switch_interrupt_cb(void *handle, void* data) { CephFuse::Handle *cfuse = (CephFuse::Handle *)handle; fuse_req_t req = cfuse->get_fuse_req(); if (data) fuse_req_interrupt_func(req, fuse_ll_interrupt, data); else fuse_req_interrupt_func(req, NULL, NULL); } #if FUSE_VERSION >= FUSE_MAKE_VERSION(2, 9) static void fuse_ll_flock(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi, int cmd) { CephFuse::Handle *cfuse = fuse_ll_req_prepare(req); Fh *fh = (Fh*)fi->fh; // must use multithread if operation may block auto fuse_multithreaded = cfuse->client->cct->_conf.get_val<bool>( "fuse_multithreaded"); if (!fuse_multithreaded && !(cmd & (LOCK_NB | LOCK_UN))) { fuse_reply_err(req, EDEADLK); return; } int r = cfuse->client->ll_flock(fh, cmd, fi->lock_owner); fuse_reply_err(req, get_sys_errno(-r)); } #endif #if !defined(__APPLE__) static mode_t umask_cb(void *handle) { CephFuse::Handle *cfuse = (CephFuse::Handle *)handle; fuse_req_t req = cfuse->get_fuse_req(); const struct fuse_ctx *ctx = fuse_req_ctx(req); return ctx->umask; } #endif static void ino_invalidate_cb(void *handle, vinodeno_t vino, int64_t off, int64_t len) { #if FUSE_VERSION >= FUSE_MAKE_VERSION(2, 8) CephFuse::Handle *cfuse = (CephFuse::Handle *)handle; fuse_ino_t fino = cfuse->make_fake_ino(vino.ino, vino.snapid); #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) fuse_lowlevel_notify_inval_inode(cfuse->se, fino, off, len); #else fuse_lowlevel_notify_inval_inode(cfuse->ch, fino, off, len); #endif #endif } static void dentry_invalidate_cb(void *handle, vinodeno_t dirino, vinodeno_t ino, const char *name, size_t len) { CephFuse::Handle *cfuse = (CephFuse::Handle *)handle; fuse_ino_t fdirino = cfuse->make_fake_ino(dirino.ino, dirino.snapid); #if FUSE_VERSION >= FUSE_MAKE_VERSION(2, 9) fuse_ino_t fino = 0; if (ino.ino != inodeno_t()) fino = cfuse->make_fake_ino(ino.ino, ino.snapid); #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) fuse_lowlevel_notify_delete(cfuse->se, fdirino, fino, name, len); #else fuse_lowlevel_notify_delete(cfuse->ch, fdirino, fino, name, len); #endif #elif FUSE_VERSION >= FUSE_MAKE_VERSION(2, 8) fuse_lowlevel_notify_inval_entry(cfuse->ch, fdirino, name, len); #endif } static int remount_cb(void *handle) { // used for trimming kernel dcache. when remounting a file system, linux kernel // trims all unused dentries in the file system char cmd[128+PATH_MAX]; CephFuse::Handle *cfuse = (CephFuse::Handle *)handle; snprintf(cmd, sizeof(cmd), "LIBMOUNT_FSTAB=/dev/null mount -i -o remount %s", #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) cfuse->opts.mountpoint); #else cfuse->mountpoint); #endif int r = system(cmd); if (r != 0 && r != -1) { r = WEXITSTATUS(r); } return r; } static void do_init(void *data, fuse_conn_info *conn) { CephFuse::Handle *cfuse = (CephFuse::Handle *)data; Client *client = cfuse->client; #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) fuse_apply_conn_info_opts(cfuse->conn_opts, conn); #endif if(conn->capable & FUSE_CAP_SPLICE_MOVE) conn->want |= FUSE_CAP_SPLICE_MOVE; #if !defined(__APPLE__) if (!client->fuse_default_permissions && client->ll_handle_umask()) { // apply umask in userspace if posix acl is enabled if(conn->capable & FUSE_CAP_DONT_MASK) conn->want |= FUSE_CAP_DONT_MASK; } if(conn->capable & FUSE_CAP_EXPORT_SUPPORT) conn->want |= FUSE_CAP_EXPORT_SUPPORT; #endif if (cfuse->fd_on_success) { //cout << "fuse init signaling on fd " << fd_on_success << std::endl; // see Preforker::daemonize(), ceph-fuse's parent process expects a `-1` // from a daemonized child process. uint32_t r = -1; int err = safe_write(cfuse->fd_on_success, &r, sizeof(r)); if (err) { derr << "fuse_ll: do_init: safe_write failed with error " << cpp_strerror(err) << dendl; ceph_abort(); } //cout << "fuse init done signaling on fd " << fd_on_success << std::endl; // close stdout, etc. ::close(0); ::close(1); ::close(2); } } const static struct fuse_lowlevel_ops fuse_ll_oper = { init: do_init, destroy: 0, lookup: fuse_ll_lookup, forget: fuse_ll_forget, getattr: fuse_ll_getattr, setattr: fuse_ll_setattr, readlink: fuse_ll_readlink, mknod: fuse_ll_mknod, mkdir: fuse_ll_mkdir, unlink: fuse_ll_unlink, rmdir: fuse_ll_rmdir, symlink: fuse_ll_symlink, rename: fuse_ll_rename, link: fuse_ll_link, open: fuse_ll_open, read: fuse_ll_read, write: fuse_ll_write, flush: fuse_ll_flush, release: fuse_ll_release, fsync: fuse_ll_fsync, opendir: fuse_ll_opendir, readdir: fuse_ll_readdir, releasedir: fuse_ll_releasedir, fsyncdir: fuse_ll_fsyncdir, statfs: fuse_ll_statfs, setxattr: fuse_ll_setxattr, getxattr: fuse_ll_getxattr, listxattr: fuse_ll_listxattr, removexattr: fuse_ll_removexattr, access: fuse_ll_access, create: fuse_ll_create, getlk: fuse_ll_getlk, setlk: fuse_ll_setlk, bmap: 0, #if FUSE_VERSION >= FUSE_MAKE_VERSION(2, 8) #ifdef FUSE_IOCTL_COMPAT ioctl: fuse_ll_ioctl, #else ioctl: 0, #endif poll: 0, #endif #if FUSE_VERSION >= FUSE_MAKE_VERSION(2, 9) write_buf: 0, retrieve_reply: 0, forget_multi: 0, flock: fuse_ll_flock, #endif #if FUSE_VERSION >= FUSE_MAKE_VERSION(2, 9) fallocate: fuse_ll_fallocate #endif }; CephFuse::Handle::Handle(Client *c, int fd) : fd_on_success(fd), client(c) { memset(&args, 0, sizeof(args)); #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) memset(&opts, 0, sizeof(opts)); #endif } CephFuse::Handle::~Handle() { fuse_opt_free_args(&args); } void CephFuse::Handle::finalize() { #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) if (se) { fuse_remove_signal_handlers(se); fuse_session_unmount(se); fuse_session_destroy(se); } if (conn_opts) free(conn_opts); if (opts.mountpoint) free(opts.mountpoint); #else if (se) fuse_remove_signal_handlers(se); if (ch) fuse_session_remove_chan(ch); if (se) fuse_session_destroy(se); if (ch) fuse_unmount(mountpoint, ch); #endif pthread_key_delete(fuse_req_key); } int CephFuse::Handle::init(int argc, const char *argv[]) { int r = pthread_key_create(&fuse_req_key, NULL); if (r) { derr << "pthread_key_create failed." << dendl; return r; } // set up fuse argc/argv int newargc = 0; const char **newargv = (const char **) malloc((argc + 17) * sizeof(char *)); if(!newargv) return ENOMEM; newargv[newargc++] = argv[0]; newargv[newargc++] = "-f"; // stay in foreground auto fuse_allow_other = client->cct->_conf.get_val<bool>( "fuse_allow_other"); auto fuse_default_permissions = client->cct->_conf.get_val<bool>( "fuse_default_permissions"); #if FUSE_VERSION < FUSE_MAKE_VERSION(3, 0) auto fuse_big_writes = client->cct->_conf.get_val<bool>( "fuse_big_writes"); #endif auto fuse_max_write = client->cct->_conf.get_val<Option::size_t>( "fuse_max_write"); auto fuse_atomic_o_trunc = client->cct->_conf.get_val<bool>( "fuse_atomic_o_trunc"); auto fuse_splice_read = client->cct->_conf.get_val<bool>( "fuse_splice_read"); auto fuse_splice_write = client->cct->_conf.get_val<bool>( "fuse_splice_write"); auto fuse_splice_move = client->cct->_conf.get_val<bool>( "fuse_splice_move"); auto fuse_debug = client->cct->_conf.get_val<bool>( "fuse_debug"); if (fuse_allow_other) { newargv[newargc++] = "-o"; newargv[newargc++] = "allow_other"; } if (fuse_default_permissions) { newargv[newargc++] = "-o"; newargv[newargc++] = "default_permissions"; } #if defined(__linux__) #if FUSE_VERSION < FUSE_MAKE_VERSION(3, 0) if (fuse_big_writes) { newargv[newargc++] = "-o"; newargv[newargc++] = "big_writes"; } #endif if (fuse_max_write > 0) { char strsplice[65]; newargv[newargc++] = "-o"; sprintf(strsplice, "max_write=%zu", (size_t)fuse_max_write); newargv[newargc++] = strsplice; } if (fuse_atomic_o_trunc) { newargv[newargc++] = "-o"; newargv[newargc++] = "atomic_o_trunc"; } if (fuse_splice_read) { newargv[newargc++] = "-o"; newargv[newargc++] = "splice_read"; } if (fuse_splice_write) { newargv[newargc++] = "-o"; newargv[newargc++] = "splice_write"; } if (fuse_splice_move) { newargv[newargc++] = "-o"; newargv[newargc++] = "splice_move"; } #endif if (fuse_debug) newargv[newargc++] = "-d"; for (int argctr = 1; argctr < argc; argctr++) newargv[newargc++] = argv[argctr]; derr << "init, newargv = " << newargv << " newargc=" << newargc << dendl; struct fuse_args a = FUSE_ARGS_INIT(newargc, (char**)newargv); args = a; // Roundabout construction b/c FUSE_ARGS_INIT is for initialization not assignment #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) if (fuse_parse_cmdline(&args, &opts) == -1) { #else if (fuse_parse_cmdline(&args, &mountpoint, NULL, NULL) == -1) { #endif derr << "fuse_parse_cmdline failed." << dendl; fuse_opt_free_args(&args); free(newargv); return EINVAL; } #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) derr << "init, args.argv = " << args.argv << " args.argc=" << args.argc << dendl; conn_opts = fuse_parse_conn_info_opts(&args); if (!conn_opts) { derr << "fuse_parse_conn_info_opts failed" << dendl; fuse_opt_free_args(&args); free(newargv); return EINVAL; } #endif ceph_assert(args.allocated); // Checking fuse has realloc'd args so we can free newargv free(newargv); struct ceph_client_callback_args cb_args = { handle: this, ino_cb: client->cct->_conf.get_val<bool>("fuse_use_invalidate_cb") ? ino_invalidate_cb : NULL, dentry_cb: dentry_invalidate_cb, switch_intr_cb: switch_interrupt_cb, #if defined(__linux__) remount_cb: remount_cb, #endif #if !defined(__APPLE__) umask_cb: umask_cb, #endif }; r = client->ll_register_callbacks2(&cb_args); if (r) { derr << "registering callbacks failed: " << r << dendl; return r; } return 0; } int CephFuse::Handle::start() { bool is_mounted = false; #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) int err = already_fuse_mounted(opts.mountpoint, is_mounted); #else int err = already_fuse_mounted(mountpoint, is_mounted); #endif if (err) { return err; } if (is_mounted) { return EBUSY; } #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) se = fuse_session_new(&args, &fuse_ll_oper, sizeof(fuse_ll_oper), this); if (!se) { derr << "fuse_session_new failed" << dendl; return EDOM; } #else ch = fuse_mount(mountpoint, &args); if (!ch) { derr << "fuse_mount(mountpoint=" << mountpoint << ") failed." << dendl; return EIO; } se = fuse_lowlevel_new(&args, &fuse_ll_oper, sizeof(fuse_ll_oper), this); if (!se) { derr << "fuse_lowlevel_new failed" << dendl; return EDOM; } #endif signal(SIGTERM, SIG_DFL); signal(SIGINT, SIG_DFL); if (fuse_set_signal_handlers(se) == -1) { derr << "fuse_set_signal_handlers failed" << dendl; return ENOSYS; } #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) if (fuse_session_mount(se, opts.mountpoint) != 0) { derr << "fuse_session_mount failed" << dendl; return ENOSYS; } #else fuse_session_add_chan(se, ch); #endif return 0; } int CephFuse::Handle::loop() { auto fuse_multithreaded = client->cct->_conf.get_val<bool>( "fuse_multithreaded"); if (fuse_multithreaded) { #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 12) { struct fuse_loop_config *conf = fuse_loop_cfg_create(); ceph_assert(conf != nullptr); fuse_loop_cfg_set_clone_fd(conf, opts.clone_fd); fuse_loop_cfg_set_idle_threads(conf, opts.max_idle_threads); fuse_loop_cfg_set_max_threads(conf, opts.max_threads); int r = fuse_session_loop_mt(se, conf); fuse_loop_cfg_destroy(conf); return r; } #elif FUSE_VERSION >= FUSE_MAKE_VERSION(3, 1) { struct fuse_loop_config conf = { clone_fd: opts.clone_fd, max_idle_threads: opts.max_idle_threads }; return fuse_session_loop_mt(se, &conf); } #elif FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) return fuse_session_loop_mt(se, opts.clone_fd); #else return fuse_session_loop_mt(se); #endif } else { return fuse_session_loop(se); } } uint64_t CephFuse::Handle::fino_snap(uint64_t fino) { if (fino == FUSE_ROOT_ID) return CEPH_NOSNAP; if (client->use_faked_inos()) { vinodeno_t vino = client->map_faked_ino(fino); return vino.snapid; } else { std::lock_guard l(stag_lock); uint64_t stag = FINO_STAG(fino); if (stag == 0) return CEPH_NOSNAP; else if (stag == 1) return CEPH_SNAPDIR; inodeno_t ino = FINO_INO(fino); // does the fino_maps for the ino exist ? if (!g_fino_maps.count(ino)) return CEPH_MAXSNAP; auto &fino_maps = g_fino_maps[ino]; // does the stagid <--> snapid map exist ? if (!fino_maps.stag_snap_map.count(stag)) return CEPH_MAXSNAP; // get the snapid return fino_maps.stag_snap_map[stag]; } } Inode * CephFuse::Handle::iget(fuse_ino_t fino) { if (fino == FUSE_ROOT_ID) return client->get_root(); if (client->use_faked_inos()) { return client->ll_get_inode((ino_t)fino); } else { uint64_t snap = fino_snap(fino); if (snap == CEPH_MAXSNAP) return NULL; vinodeno_t vino(FINO_INO(fino), snap); return client->ll_get_inode(vino); } } void CephFuse::Handle::iput(Inode *in) { client->ll_put(in); } uint64_t CephFuse::Handle::make_fake_ino(inodeno_t ino, snapid_t snapid) { if (client->use_faked_inos()) { // already faked by libcephfs if (ino == client->get_root_ino()) return FUSE_ROOT_ID; return ino; } else { if (snapid == CEPH_NOSNAP && ino == client->get_root_ino()) return FUSE_ROOT_ID; int stag; if (snapid == CEPH_NOSNAP) { stag = G_NOSNAP_STAG; } else if (snapid == CEPH_SNAPDIR) { stag = G_SNAPDIR_STAG; } else { std::lock_guard l(stag_lock); auto &fino_maps = g_fino_maps[ino]; // will insert it anyway if not exists // already exist ? if (fino_maps.snap_stag_map.count(snapid)) { inodeno_t fino = MAKE_FINO(ino, fino_maps.snap_stag_map[snapid]); return fino; } // create a new snapid <--> stagid map int first = fino_maps.last_stag & STAG_MASK; stag = (++fino_maps.last_stag) & STAG_MASK; for (; stag != first; stag = (++fino_maps.last_stag) & STAG_MASK) { // stag 0 is reserved for CEPH_NOSNAP and 1 for CEPH_SNAPDIR if (stag == 0 || stag == 1) continue; // the new stag is not used ? if (!fino_maps.stag_snap_map.count(stag)) { fino_maps.snap_stag_map[snapid] = stag; fino_maps.stag_snap_map[stag] = snapid; break; } // the stag is already used by a snpaid, // try to free it auto _snapid = fino_maps.stag_snap_map[stag]; if (!client->ll_get_snap_ref(_snapid)) { fino_maps.snap_stag_map.erase(_snapid); fino_maps.snap_stag_map[snapid] = stag; fino_maps.stag_snap_map[stag] = snapid; break; } } if (stag == first) { /* * It shouldn't be here because the max snapshots for each * directory is 4_K, and here we have around 64_K, which is * from 0xffff - 2, stags could be used for each directory. * * More detail please see mds 'mds_max_snaps_per_dir' option. */ ceph_abort_msg("run out of stag"); } } inodeno_t fino = MAKE_FINO(ino, stag); //cout << "make_fake_ino " << ino << "." << snapid << " -> " << fino << std::endl; return fino; } } void CephFuse::Handle::set_fuse_req(fuse_req_t req) { pthread_setspecific(fuse_req_key, (void*)req); } fuse_req_t CephFuse::Handle::get_fuse_req() { return (fuse_req_t) pthread_getspecific(fuse_req_key); } CephFuse::CephFuse(Client *c, int fd) : _handle(new CephFuse::Handle(c, fd)) { } CephFuse::~CephFuse() { delete _handle; } int CephFuse::init(int argc, const char *argv[]) { return _handle->init(argc, argv); } int CephFuse::start() { return _handle->start(); } int CephFuse::loop() { return _handle->loop(); } void CephFuse::finalize() { return _handle->finalize(); } std::string CephFuse::get_mount_point() const { #if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0) if (_handle->opts.mountpoint) { return _handle->opts.mountpoint; #else if (_handle->mountpoint) { return _handle->mountpoint; #endif } else { return ""; } }
49,980
26.537741
95
cc
null
ceph-main/src/client/fuse_ll.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <[email protected]> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ class CephFuse { public: CephFuse(Client *c, int fd); ~CephFuse(); int init(int argc, const char *argv[]); int start(); int mount(); int loop(); void finalize(); class Handle; std::string get_mount_point() const; private: CephFuse::Handle *_handle; };
701
23.206897
71
h
null
ceph-main/src/client/ioctl.h
#ifndef FS_CEPH_IOCTL_H #define FS_CEPH_IOCTL_H #include "include/int_types.h" #if defined(__linux__) #include <linux/ioctl.h> #include <linux/types.h> #elif defined(__APPLE__) || defined(__FreeBSD__) #include <sys/ioctl.h> #include <sys/types.h> #endif #define CEPH_IOCTL_MAGIC 0x97 /* just use u64 to align sanely on all archs */ struct ceph_ioctl_layout { __u64 stripe_unit, stripe_count, object_size; __u64 data_pool; __s64 unused; }; #define CEPH_IOC_GET_LAYOUT _IOR(CEPH_IOCTL_MAGIC, 1, \ struct ceph_ioctl_layout) #define CEPH_IOC_SET_LAYOUT _IOW(CEPH_IOCTL_MAGIC, 2, \ struct ceph_ioctl_layout) #define CEPH_IOC_SET_LAYOUT_POLICY _IOW(CEPH_IOCTL_MAGIC, 5, \ struct ceph_ioctl_layout) /* * Extract identity, address of the OSD and object storing a given * file offset. */ struct ceph_ioctl_dataloc { __u64 file_offset; /* in+out: file offset */ __u64 object_offset; /* out: offset in object */ __u64 object_no; /* out: object # */ __u64 object_size; /* out: object size */ char object_name[64]; /* out: object name */ __u64 block_offset; /* out: offset in block */ __u64 block_size; /* out: block length */ __s64 osd; /* out: osd # */ struct sockaddr_storage osd_addr; /* out: osd address */ }; #define CEPH_IOC_GET_DATALOC _IOWR(CEPH_IOCTL_MAGIC, 3, \ struct ceph_ioctl_dataloc) #define CEPH_IOC_LAZYIO _IO(CEPH_IOCTL_MAGIC, 4) #endif
1,482
27.519231
66
h
null
ceph-main/src/client/posix_acl.cc
#include "include/compat.h" #include "include/types.h" #include "include/fs_types.h" #include <sys/stat.h> #include "posix_acl.h" #include "UserPerm.h" int posix_acl_check(const void *xattr, size_t size) { const acl_ea_header *header; if (size < sizeof(*header)) return -1; header = reinterpret_cast<const acl_ea_header*>(xattr); ceph_le32 expected_version; expected_version = ACL_EA_VERSION; if (header->a_version != expected_version) return -1; const acl_ea_entry *entry = header->a_entries; size -= sizeof(*header); if (size % sizeof(*entry)) return -1; int count = size / sizeof(*entry); if (count == 0) return 0; int state = ACL_USER_OBJ; int needs_mask = 0; for (int i = 0; i < count; ++i) { __u16 tag = entry->e_tag; switch(tag) { case ACL_USER_OBJ: if (state == ACL_USER_OBJ) { state = ACL_USER; break; } return -1; case ACL_USER: if (state != ACL_USER) return -1; needs_mask = 1; break; case ACL_GROUP_OBJ: if (state == ACL_USER) { state = ACL_GROUP; break; } return -1; case ACL_GROUP: if (state != ACL_GROUP) return -1; needs_mask = 1; break; case ACL_MASK: if (state != ACL_GROUP) return -1; state = ACL_OTHER; break; case ACL_OTHER: if (state == ACL_OTHER || (state == ACL_GROUP && !needs_mask)) { state = 0; break; } // fall-thru default: return -1; } ++entry; } return state == 0 ? count : -1; } int posix_acl_equiv_mode(const void *xattr, size_t size, mode_t *mode_p) { if (posix_acl_check(xattr, size) < 0) return -CEPHFS_EINVAL; int not_equiv = 0; mode_t mode = 0; const acl_ea_header *header = reinterpret_cast<const acl_ea_header*>(xattr); const acl_ea_entry *entry = header->a_entries; int count = (size - sizeof(*header)) / sizeof(*entry); for (int i = 0; i < count; ++i) { __u16 tag = entry->e_tag; __u16 perm = entry->e_perm; switch(tag) { case ACL_USER_OBJ: mode |= (perm & S_IRWXO) << 6; break; case ACL_GROUP_OBJ: mode |= (perm & S_IRWXO) << 3; break; case ACL_OTHER: mode |= perm & S_IRWXO; break; case ACL_MASK: mode = (mode & ~S_IRWXG) | ((perm & S_IRWXO) << 3); /* fall through */ case ACL_USER: case ACL_GROUP: not_equiv = 1; break; default: return -CEPHFS_EINVAL; } ++entry; } if (mode_p) *mode_p = (*mode_p & ~ACCESSPERMS) | mode; return not_equiv; } int posix_acl_inherit_mode(bufferptr& acl, mode_t *mode_p) { if (posix_acl_check(acl.c_str(), acl.length()) <= 0) return -CEPHFS_EIO; acl_ea_entry *group_entry = NULL, *mask_entry = NULL; mode_t mode = *mode_p; int not_equiv = 0; acl_ea_header *header = reinterpret_cast<acl_ea_header*>(acl.c_str()); acl_ea_entry *entry = header->a_entries; int count = (acl.length() - sizeof(*header)) / sizeof(*entry); for (int i = 0; i < count; ++i) { __u16 tag = entry->e_tag; __u16 perm = entry->e_perm; switch(tag) { case ACL_USER_OBJ: perm &= (mode >> 6) | ~S_IRWXO; mode &= (perm << 6) | ~S_IRWXU; entry->e_perm = perm; break; case ACL_USER: case ACL_GROUP: not_equiv = 1; break; case ACL_GROUP_OBJ: group_entry = entry; break; case ACL_OTHER: perm &= mode | ~S_IRWXO; mode &= perm | ~S_IRWXO; entry->e_perm = perm; break; case ACL_MASK: mask_entry = entry; not_equiv = 1; break; default: return -CEPHFS_EIO; } ++entry; } if (mask_entry) { __u16 perm = mask_entry->e_perm; perm &= (mode >> 3) | ~S_IRWXO; mode &= (perm << 3) | ~S_IRWXG; mask_entry->e_perm = perm; } else { if (!group_entry) return -CEPHFS_EIO; __u16 perm = group_entry->e_perm; perm &= (mode >> 3) | ~S_IRWXO; mode &= (perm << 3) | ~S_IRWXG; group_entry->e_perm = perm; } *mode_p = (*mode_p & ~ACCESSPERMS) | mode; return not_equiv; } int posix_acl_access_chmod(bufferptr& acl, mode_t mode) { if (posix_acl_check(acl.c_str(), acl.length()) <= 0) return -CEPHFS_EIO; acl_ea_entry *group_entry = NULL, *mask_entry = NULL; acl_ea_header *header = reinterpret_cast<acl_ea_header*>(acl.c_str()); acl_ea_entry *entry = header->a_entries; int count = (acl.length() - sizeof(*header)) / sizeof(*entry); for (int i = 0; i < count; ++i) { __u16 tag = entry->e_tag; switch(tag) { case ACL_USER_OBJ: entry->e_perm = (mode & S_IRWXU) >> 6; break; case ACL_GROUP_OBJ: group_entry = entry; break; case ACL_MASK: mask_entry = entry; break; case ACL_OTHER: entry->e_perm = mode & S_IRWXO; break; default: break; } ++entry; } if (mask_entry) { mask_entry->e_perm = (mode & S_IRWXG) >> 3; } else { if (!group_entry) return -CEPHFS_EIO; group_entry->e_perm = (mode & S_IRWXG) >> 3; } return 0; } int posix_acl_permits(const bufferptr& acl, uid_t i_uid, gid_t i_gid, const UserPerm& perms, unsigned want) { if (posix_acl_check(acl.c_str(), acl.length()) < 0) return -CEPHFS_EIO; const acl_ea_header *header = reinterpret_cast<const acl_ea_header*>(acl.c_str()); const acl_ea_entry *entry = header->a_entries; const acl_ea_entry *next_entry; __u16 perm, tag; __u32 id; int group_found = 0; int idx; int count = (acl.length() - sizeof(*header)) / sizeof(*entry); for (idx = 0; idx < count; ++idx) { tag = entry->e_tag; perm = entry->e_perm; switch(tag) { case ACL_USER_OBJ: if (i_uid == perms.uid()) goto check_perm; break; case ACL_USER: id = entry->e_id; if (id == perms.uid()) goto check_mask; break; case ACL_GROUP_OBJ: /* fall through */ case ACL_GROUP: id = (tag == ACL_GROUP_OBJ) ? i_gid : entry->e_id; if (perms.gid_in_groups(id)) { group_found = 1; if ((perm & want) == want) goto check_mask; } break; case ACL_MASK: break; case ACL_OTHER: if (group_found) return -CEPHFS_EACCES; else goto check_perm; break; default: return -CEPHFS_EIO; } ++entry; } return -CEPHFS_EIO; check_mask: next_entry = entry + 1; for (++idx; idx < count; ++idx) { tag = next_entry->e_tag; if (tag == ACL_MASK) { __u16 mask = next_entry->e_perm; if ((perm & mask & want) == want) return 0; return -CEPHFS_EACCES; } ++next_entry; } check_perm: if ((perm & want) == want) return 0; return -CEPHFS_EACCES; }
6,553
21.678201
84
cc
null
ceph-main/src/client/posix_acl.h
#ifndef CEPH_POSIX_ACL #define CEPH_POSIX_ACL #define ACL_EA_VERSION 0x0002 #define ACL_USER_OBJ 0x01 #define ACL_USER 0x02 #define ACL_GROUP_OBJ 0x04 #define ACL_GROUP 0x08 #define ACL_MASK 0x10 #define ACL_OTHER 0x20 #define ACL_EA_ACCESS "system.posix_acl_access" #define ACL_EA_DEFAULT "system.posix_acl_default" typedef struct { ceph_le16 e_tag; ceph_le16 e_perm; ceph_le32 e_id; } acl_ea_entry; typedef struct { ceph_le32 a_version; acl_ea_entry a_entries[0]; } acl_ea_header; class UserPerm; int posix_acl_check(const void *xattr, size_t size); int posix_acl_equiv_mode(const void *xattr, size_t size, mode_t *mode_p); int posix_acl_inherit_mode(bufferptr& acl, mode_t *mode_p); int posix_acl_access_chmod(bufferptr& acl, mode_t mode); int posix_acl_permits(const bufferptr& acl, uid_t i_uid, gid_t i_gid, const UserPerm& groups, unsigned want); #endif
1,001
26.833333
73
h
null
ceph-main/src/client/test_ioctls.c
#include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <limits.h> #include <sys/ioctl.h> #include <netinet/in.h> #include <sys/socket.h> #include <netdb.h> #include "ioctl.h" char new_file_name[PATH_MAX]; int main(int argc, char **argv) { char *fn; int fd, err; struct ceph_ioctl_layout l; struct ceph_ioctl_dataloc dl; if (argc < 3) { printf("usage: ceph_test_ioctls <filename> <offset>\n"); return 1; } fn = argv[1]; fd = open(fn, O_CREAT|O_RDWR, 0644); if (fd < 0) { perror("couldn't open file"); return 1; } /* get layout */ err = ioctl(fd, CEPH_IOC_GET_LAYOUT, (unsigned long)&l); if (err < 0) { perror("ioctl IOC_GET_LAYOUT error"); return 1; } printf("layout:\n stripe_unit %lld\n stripe_count %lld\n object_size %lld\n data_pool %lld\n", (long long)l.stripe_unit, (long long)l.stripe_count, (long long)l.object_size, (long long)l.data_pool); /* set layout */ l.stripe_unit = 1048576; l.stripe_count = 2; err = ioctl(fd, CEPH_IOC_SET_LAYOUT, (unsigned long)&l); if (err < 0) { perror("ioctl IOC_SET_LAYOUT error"); return 1; } printf("set layout, writing to file\n"); printf("file %s\n", fn); /* get layout again */ err = ioctl(fd, CEPH_IOC_GET_LAYOUT, (unsigned long)&l); if (err < 0) { perror("ioctl IOC_GET_LAYOUT error"); return 1; } printf("layout:\n stripe_unit %lld\n stripe_count %lld\n object_size %lld\n data_pool %lld\n", (long long)l.stripe_unit, (long long)l.stripe_count, (long long)l.object_size, (long long)l.data_pool); /* dataloc */ dl.file_offset = atoll(argv[2]); err = ioctl(fd, CEPH_IOC_GET_DATALOC, (unsigned long)&dl); if (err < 0) { perror("ioctl IOC_GET_DATALOC error"); return 1; } printf("dataloc:\n"); printf(" file_offset %lld (of object start)\n", (long long)dl.file_offset); printf(" object '%s'\n object_offset %lld\n object_size %lld object_no %lld\n", dl.object_name, (long long)dl.object_offset, (long long)dl.object_size, (long long)dl.object_no); printf(" block_offset %lld\n block_size %lld\n", (long long)dl.block_offset, (long long)dl.block_size); char buf[80]; getnameinfo((struct sockaddr *)&dl.osd_addr, sizeof(dl.osd_addr), buf, sizeof(buf), 0, 0, NI_NUMERICHOST); printf(" osd%lld %s\n", (long long)dl.osd, buf); if (argc < 4) return 0; /* set dir default layout */ printf("testing dir policy setting\n"); fd = open(argv[3], O_RDONLY); if (fd < 0) { perror("couldn't open dir"); return 1; } l.object_size = 1048576; l.stripe_count = 1; err = ioctl(fd, CEPH_IOC_SET_LAYOUT_POLICY, (unsigned long)&l); if (err < 0) { perror("ioctl IOC_SET_LAYOUT_POLICY error"); return 1; } printf("set layout, creating file\n"); snprintf(new_file_name, sizeof(new_file_name), "%s/testfile", argv[3]); fd = open(new_file_name, O_CREAT | O_RDWR, 0644); if (fd < 0) { perror("couldn't open file"); return 1; } err = ioctl(fd, CEPH_IOC_GET_LAYOUT, (unsigned long)&l); if (err < 0) { perror("ioctl IOC_GET_LAYOUT error"); return 1; } printf("layout:\n stripe_unit %lld\n stripe_count %lld\n object_size %lld\n data_pool %lld\n", (long long)l.stripe_unit, (long long)l.stripe_count, (long long)l.object_size, (long long)l.data_pool); return 0; }
3,691
29.01626
118
c
null
ceph-main/src/client/hypertable/CephBroker.cc
/** -*- C++ -*- * Copyright (C) 2009-2011 New Dream Network * * This file is part of Hypertable. * * Hypertable is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or any later version. * * Hypertable is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Hypertable. If not, see <http://www.gnu.org/licenses/> * * Authors: * Gregory Farnum <[email protected]> * Colin McCabe <[email protected]> */ #include "Common/Compat.h" #include "CephBroker.h" #include "Common/Error.h" #include "Common/FileUtils.h" #include "Common/Filesystem.h" #include "Common/System.h" #include <cephfs/libcephfs.h> #include <dirent.h> #include <errno.h> #include <fcntl.h> #include <poll.h> #include <string> #include <sys/types.h> #include <sys/uio.h> #include <unistd.h> using namespace Hypertable; std::atomic<int> CephBroker::ms_next_fd{0}; /* A thread-safe version of strerror */ static std::string cpp_strerror(int err) { char buf[128]; if (err < 0) err = -err; std::ostringstream oss; oss << strerror_r(err, buf, sizeof(buf)); return oss.str(); } OpenFileDataCeph::OpenFileDataCeph(struct ceph_mount_info *cmount_, const String& fname, int _fd, int _flags) : cmount(cmount_), fd(_fd), flags(_flags), filename(fname) { } OpenFileDataCeph::~OpenFileDataCeph() { ceph_close(cmount, fd); } CephBroker::CephBroker(PropertiesPtr& cfg) : cmount(NULL) { int ret; String id(cfg->get_str("CephBroker.Id")); m_verbose = cfg->get_bool("Hypertable.Verbose"); m_root_dir = cfg->get_str("CephBroker.RootDir"); String mon_addr(cfg->get_str("CephBroker.MonAddr")); HT_INFO("Calling ceph_create"); ret = ceph_create(&cmount, id.empty() ? NULL : id.c_str()); if (ret) { throw Hypertable::Exception(ret, "ceph_create failed"); } ret = ceph_conf_set(cmount, "mon_host", mon_addr.c_str()); if (ret) { ceph_shutdown(cmount); throw Hypertable::Exception(ret, "ceph_conf_set(mon_addr) failed"); } // For Ceph debugging, uncomment these lines //ceph_conf_set(cmount, "debug_client", "1"); //ceph_conf_set(cmount, "debug_ms", "1"); HT_INFO("Calling ceph_mount"); ret = ceph_mount(cmount, m_root_dir.empty() ? NULL : m_root_dir.c_str()); if (ret) { ceph_shutdown(cmount); throw Hypertable::Exception(ret, "ceph_mount failed"); } HT_INFO("Mounted Ceph filesystem."); } CephBroker::~CephBroker() { ceph_shutdown(cmount); cmount = NULL; } void CephBroker::open(ResponseCallbackOpen *cb, const char *fname, uint32_t flags, uint32_t bufsz) { int fd, ceph_fd; String abspath; HT_DEBUGF("open file='%s' bufsz=%d", fname, bufsz); make_abs_path(fname, abspath); fd = atomic_inc_return(&ms_next_fd); if ((ceph_fd = ceph_open(cmount, abspath.c_str(), O_RDONLY, 0)) < 0) { report_error(cb, -ceph_fd); return; } HT_INFOF("open (%s) fd=%" PRIu32 " ceph_fd=%d", fname, fd, ceph_fd); { struct sockaddr_in addr; OpenFileDataCephPtr fdata(new OpenFileDataCeph(cmount, abspath, ceph_fd, O_RDONLY)); cb->get_address(addr); m_open_file_map.create(fd, addr, fdata); cb->response(fd); } } void CephBroker::create(ResponseCallbackOpen *cb, const char *fname, uint32_t flags, int32_t bufsz, int16_t replication, int64_t blksz){ int fd, ceph_fd; int oflags; String abspath; make_abs_path(fname, abspath); HT_DEBUGF("create file='%s' flags=%u bufsz=%d replication=%d blksz=%lld", fname, flags, bufsz, (int)replication, (Lld)blksz); fd = atomic_inc_return(&ms_next_fd); if (flags & Filesystem::OPEN_FLAG_OVERWRITE) oflags = O_WRONLY | O_CREAT | O_TRUNC; else oflags = O_WRONLY | O_CREAT | O_APPEND; //make sure the directories in the path exist String directory = abspath.substr(0, abspath.rfind('/')); int r; HT_INFOF("Calling mkdirs on %s", directory.c_str()); if((r=ceph_mkdirs(cmount, directory.c_str(), 0644)) < 0 && r!=-CEPHFS_EEXIST) { HT_ERRORF("create failed on mkdirs: dname='%s' - %d", directory.c_str(), -r); report_error(cb, -r); return; } //create file if ((ceph_fd = ceph_open(cmount, abspath.c_str(), oflags, 0644)) < 0) { std::string errs(cpp_strerror(-ceph_fd)); HT_ERRORF("open failed: file=%s - %s", abspath.c_str(), errs.c_str()); report_error(cb, ceph_fd); return; } HT_INFOF("create %s = %d", fname, ceph_fd); { struct sockaddr_in addr; OpenFileDataCephPtr fdata (new OpenFileDataCeph(cmount, fname, ceph_fd, O_WRONLY)); cb->get_address(addr); m_open_file_map.create(fd, addr, fdata); cb->response(fd); } } void CephBroker::close(ResponseCallback *cb, uint32_t fd) { if (m_verbose) { HT_INFOF("close fd=%" PRIu32, fd); } OpenFileDataCephPtr fdata; m_open_file_map.get(fd, fdata); m_open_file_map.remove(fd); cb->response_ok(); } void CephBroker::read(ResponseCallbackRead *cb, uint32_t fd, uint32_t amount) { OpenFileDataCephPtr fdata; ssize_t nread; int64_t offset; StaticBuffer buf(new uint8_t [amount], amount); HT_DEBUGF("read fd=%" PRIu32 " amount = %d", fd, amount); if (!m_open_file_map.get(fd, fdata)) { char errbuf[32]; sprintf(errbuf, "%" PRIu32, fd); cb->error(Error::DFSBROKER_BAD_FILE_HANDLE, errbuf); HT_ERRORF("bad file handle: %" PRIu32, fd); return; } if ((offset = ceph_lseek(cmount, fdata->fd, 0, SEEK_CUR)) < 0) { std::string errs(cpp_strerror(offset)); HT_ERRORF("lseek failed: fd=%" PRIu32 " ceph_fd=%d offset=0 SEEK_CUR - %s", fd, fdata->fd, errs.c_str()); report_error(cb, offset); return; } if ((nread = ceph_read(cmount, fdata->fd, (char *)buf.base, amount, 0)) < 0 ) { HT_ERRORF("read failed: fd=%" PRIu32 " ceph_fd=%d amount=%d", fd, fdata->fd, amount); report_error(cb, -nread); return; } buf.size = nread; cb->response((uint64_t)offset, buf); } void CephBroker::append(ResponseCallbackAppend *cb, uint32_t fd, uint32_t amount, const void *data, bool sync) { OpenFileDataCephPtr fdata; ssize_t nwritten; int64_t offset; HT_DEBUG_OUT << "append fd="<< fd <<" amount="<< amount <<" data='" << format_bytes(20, data, amount) <<" sync="<< sync << HT_END; if (!m_open_file_map.get(fd, fdata)) { char errbuf[32]; sprintf(errbuf, "%" PRIu32, fd); cb->error(Error::DFSBROKER_BAD_FILE_HANDLE, errbuf); return; } if ((offset = ceph_lseek(cmount, fdata->fd, 0, SEEK_CUR)) < 0) { std::string errs(cpp_strerror(offset)); HT_ERRORF("lseek failed: fd=%" PRIu32 " ceph_fd=%d offset=0 SEEK_CUR - %s", fd, fdata->fd, errs.c_str()); report_error(cb, offset); return; } if ((nwritten = ceph_write(cmount, fdata->fd, (const char *)data, amount, 0)) < 0) { std::string errs(cpp_strerror(nwritten)); HT_ERRORF("write failed: fd=%" PRIu32 " ceph_fd=%d amount=%d - %s", fd, fdata->fd, amount, errs.c_str()); report_error(cb, -nwritten); return; } int r; if (sync && ((r = ceph_fsync(cmount, fdata->fd, true)) != 0)) { std::string errs(cpp_strerror(errno)); HT_ERRORF("flush failed: fd=%" PRIu32 " ceph_fd=%d - %s", fd, fdata->fd, errs.c_str()); report_error(cb, r); return; } cb->response((uint64_t)offset, nwritten); } void CephBroker::seek(ResponseCallback *cb, uint32_t fd, uint64_t offset) { OpenFileDataCephPtr fdata; HT_DEBUGF("seek fd=%" PRIu32 " offset=%llu", fd, (Llu)offset); if (!m_open_file_map.get(fd, fdata)) { char errbuf[32]; sprintf(errbuf, "%" PRIu32, fd); cb->error(Error::DFSBROKER_BAD_FILE_HANDLE, errbuf); return; } loff_t res = ceph_lseek(cmount, fdata->fd, offset, SEEK_SET); if (res < 0) { std::string errs(cpp_strerror((int)res)); HT_ERRORF("lseek failed: fd=%" PRIu32 " ceph_fd=%d offset=%llu - %s", fd, fdata->fd, (Llu)offset, errs.c_str()); report_error(cb, offset); return; } cb->response_ok(); } void CephBroker::remove(ResponseCallback *cb, const char *fname) { String abspath; HT_DEBUGF("remove file='%s'", fname); make_abs_path(fname, abspath); int r; if ((r = ceph_unlink(cmount, abspath.c_str())) < 0) { std::string errs(cpp_strerror(r)); HT_ERRORF("unlink failed: file='%s' - %s", abspath.c_str(), errs.c_str()); report_error(cb, r); return; } cb->response_ok(); } void CephBroker::length(ResponseCallbackLength *cb, const char *fname, bool) { int r; struct ceph_statx stx; HT_DEBUGF("length file='%s'", fname); if ((r = ceph_statx(cmount, fname, &stx, CEPH_STATX_SIZE, AT_SYMLINK_NOFOLLOW)) < 0) { String abspath; make_abs_path(fname, abspath); std::string errs(cpp_strerror(r)); HT_ERRORF("length (stat) failed: file='%s' - %s", abspath.c_str(), errs.c_str()); report_error(cb,- r); return; } cb->response(stx.stx_size); } void CephBroker::pread(ResponseCallbackRead *cb, uint32_t fd, uint64_t offset, uint32_t amount, bool) { OpenFileDataCephPtr fdata; ssize_t nread; StaticBuffer buf(new uint8_t [amount], amount); HT_DEBUGF("pread fd=%" PRIu32 " offset=%llu amount=%d", fd, (Llu)offset, amount); if (!m_open_file_map.get(fd, fdata)) { char errbuf[32]; sprintf(errbuf, "%" PRIu32, fd); cb->error(Error::DFSBROKER_BAD_FILE_HANDLE, errbuf); return; } if ((nread = ceph_read(cmount, fdata->fd, (char *)buf.base, amount, offset)) < 0) { std::string errs(cpp_strerror(nread)); HT_ERRORF("pread failed: fd=%" PRIu32 " ceph_fd=%d amount=%d offset=%llu - %s", fd, fdata->fd, amount, (Llu)offset, errs.c_str()); report_error(cb, nread); return; } buf.size = nread; cb->response(offset, buf); } void CephBroker::mkdirs(ResponseCallback *cb, const char *dname) { String absdir; HT_DEBUGF("mkdirs dir='%s'", dname); make_abs_path(dname, absdir); int r; if((r=ceph_mkdirs(cmount, absdir.c_str(), 0644)) < 0 && r!=-CEPHFS_EEXIST) { HT_ERRORF("mkdirs failed: dname='%s' - %d", absdir.c_str(), -r); report_error(cb, -r); return; } cb->response_ok(); } void CephBroker::rmdir(ResponseCallback *cb, const char *dname) { String absdir; int r; make_abs_path(dname, absdir); if((r = rmdir_recursive(absdir.c_str())) < 0) { HT_ERRORF("failed to remove dir %s, got error %d", absdir.c_str(), r); report_error(cb, -r); return; } cb->response_ok(); } int CephBroker::rmdir_recursive(const char *directory) { struct ceph_dir_result *dirp; struct dirent de; struct ceph_statx stx; int r; if ((r = ceph_opendir(cmount, directory, &dirp)) < 0) return r; //failed to open while ((r = ceph_readdirplus_r(cmount, dirp, &de, &stx, CEPH_STATX_INO, AT_STATX_DONT_SYNC, NULL)) > 0) { String new_dir = de.d_name; if(!(new_dir.compare(".")==0 || new_dir.compare("..")==0)) { new_dir = directory; new_dir += '/'; new_dir += de.d_name; if (S_ISDIR(stx.stx_mode)) { //it's a dir, clear it out... if((r=rmdir_recursive(new_dir.c_str())) < 0) return r; } else { //delete this file if((r=ceph_unlink(cmount, new_dir.c_str())) < 0) return r; } } } if (r < 0) return r; //we got an error if ((r = ceph_closedir(cmount, dirp)) < 0) return r; return ceph_rmdir(cmount, directory); } void CephBroker::flush(ResponseCallback *cb, uint32_t fd) { OpenFileDataCephPtr fdata; HT_DEBUGF("flush fd=%" PRIu32, fd); if (!m_open_file_map.get(fd, fdata)) { char errbuf[32]; sprintf(errbuf, "%" PRIu32, fd); cb->error(Error::DFSBROKER_BAD_FILE_HANDLE, errbuf); return; } int r; if ((r = ceph_fsync(cmount, fdata->fd, true)) != 0) { std::string errs(cpp_strerror(r)); HT_ERRORF("flush failed: fd=%" PRIu32 " ceph_fd=%d - %s", fd, fdata->fd, errs.c_str()); report_error(cb, -r); return; } cb->response_ok(); } void CephBroker::status(ResponseCallback *cb) { cb->response_ok(); /*perhaps a total cheat, but both the local and Kosmos brokers included in Hypertable also do this. */ } void CephBroker::shutdown(ResponseCallback *cb) { m_open_file_map.remove_all(); cb->response_ok(); poll(0, 0, 2000); } void CephBroker::readdir(ResponseCallbackReaddir *cb, const char *dname) { std::vector<String> listing; String absdir; HT_DEBUGF("Readdir dir='%s'", dname); //get from ceph in a buffer make_abs_path(dname, absdir); struct ceph_dir_result *dirp; ceph_opendir(cmount, absdir.c_str(), &dirp); int r; int buflen = 100; //good default? char *buf = new char[buflen]; String *ent; int bufpos; while (1) { r = ceph_getdnames(cmount, dirp, buf, buflen); if (r==-CEPHFS_ERANGE) { //expand the buffer delete [] buf; buflen *= 2; buf = new char[buflen]; continue; } if (r<=0) break; //if we make it here, we got at least one name, maybe more bufpos = 0; while (bufpos<r) {//make new strings and add them to listing ent = new String(buf+bufpos); if (ent->compare(".") && ent->compare("..")) listing.push_back(*ent); bufpos+=ent->size()+1; delete ent; } } delete [] buf; ceph_closedir(cmount, dirp); if (r < 0) report_error(cb, -r); //Ceph shouldn't return r<0 on getdnames //(except for ERANGE) so if it happens this is bad cb->response(listing); } void CephBroker::exists(ResponseCallbackExists *cb, const char *fname) { String abspath; struct ceph_statx stx; HT_DEBUGF("exists file='%s'", fname); make_abs_path(fname, abspath); cb->response(ceph_statx(cmount, abspath.c_str(), &stx, 0, AT_SYMLINK_NOFOLLOW) == 0); } void CephBroker::rename(ResponseCallback *cb, const char *src, const char *dst) { String src_abs; String dest_abs; int r; make_abs_path(src, src_abs); make_abs_path(dst, dest_abs); if ((r = ceph_rename(cmount, src_abs.c_str(), dest_abs.c_str())) <0 ) { report_error(cb, r); return; } cb->response_ok(); } void CephBroker::debug(ResponseCallback *cb, int32_t command, StaticBuffer &serialized_parameters) { HT_ERROR("debug commands not implemented!"); cb->error(Error::NOT_IMPLEMENTED, format("Debug commands not supported")); } void CephBroker::report_error(ResponseCallback *cb, int error) { char errbuf[128]; errbuf[0] = 0; strerror_r(error, errbuf, 128); cb->error(Error::DFSBROKER_IO_ERROR, errbuf); }
14,707
26.908918
107
cc
null
ceph-main/src/client/hypertable/CephBroker.h
/** -*- C++ -*- * Copyright (C) 2009-2011 New Dream Network * * This file is part of Hypertable. * * Hypertable is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or any later version. * * Hypertable is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Hypertable. If not, see <http://www.gnu.org/licenses/> * * Authors: * Gregory Farnum <[email protected]> * Colin McCabe <[email protected]> */ #ifndef HYPERTABLE_CEPHBROKER_H #define HYPERTABLE_CEPHBROKER_H extern "C" { #include <unistd.h> } #include <atomic> #include "Common/String.h" #include "Common/Properties.h" #include "DfsBroker/Lib/Broker.h" #include <cephfs/libcephfs.h> namespace Hypertable { using namespace DfsBroker; /** * */ class OpenFileDataCeph : public OpenFileData { public: OpenFileDataCeph(struct ceph_mount_info *cmount_, const String& fname, int _fd, int _flags); virtual ~OpenFileDataCeph(); struct ceph_mount_info *cmount; int fd; int flags; String filename; }; /** * */ class OpenFileDataCephPtr : public OpenFileDataPtr { public: OpenFileDataCephPtr() : OpenFileDataPtr() { } explicit OpenFileDataCephPtr(OpenFileDataCeph *ofdl) : OpenFileDataPtr(ofdl, true) { } OpenFileDataCeph *operator->() const { return static_cast<OpenFileDataCeph *>(get()); } }; /** * */ class CephBroker : public DfsBroker::Broker { public: explicit CephBroker(PropertiesPtr& cfg); virtual ~CephBroker(); virtual void open(ResponseCallbackOpen *cb, const char *fname, uint32_t flags, uint32_t bufsz); virtual void create(ResponseCallbackOpen *cb, const char *fname, uint32_t flags, int32_t bufsz, int16_t replication, int64_t blksz); virtual void close(ResponseCallback *cb, uint32_t fd); virtual void read(ResponseCallbackRead *cb, uint32_t fd, uint32_t amount); virtual void append(ResponseCallbackAppend *cb, uint32_t fd, uint32_t amount, const void *data, bool sync); virtual void seek(ResponseCallback *cb, uint32_t fd, uint64_t offset); virtual void remove(ResponseCallback *cb, const char *fname); virtual void length(ResponseCallbackLength *cb, const char *fname, bool); virtual void pread(ResponseCallbackRead *cb, uint32_t fd, uint64_t offset, uint32_t amount, bool); virtual void mkdirs(ResponseCallback *cb, const char *dname); virtual void rmdir(ResponseCallback *cb, const char *dname); virtual void flush(ResponseCallback *cb, uint32_t fd); virtual void status(ResponseCallback *cb); virtual void shutdown(ResponseCallback *cb); virtual void readdir(ResponseCallbackReaddir *cb, const char *dname); virtual void exists(ResponseCallbackExists *cb, const char *fname); virtual void rename(ResponseCallback *cb, const char *src, const char *dst); virtual void debug(ResponseCallback *, int32_t command, StaticBuffer &serialized_parameters); private: struct ceph_mount_info *cmount; static std::atomic<int> ms_next_fd; virtual void report_error(ResponseCallback *cb, int error); void make_abs_path(const char *fname, String& abs) { if (fname[0] == '/') abs = fname; else abs = m_root_dir + "/" + fname; } int rmdir_recursive(const char *directory); bool m_verbose; String m_root_dir; }; } #endif //HYPERTABLE_CEPH_BROKER_H
3,854
31.669492
91
h
null
ceph-main/src/cls/2pc_queue/cls_2pc_queue.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "include/types.h" #include "cls/2pc_queue/cls_2pc_queue_types.h" #include "cls/2pc_queue/cls_2pc_queue_ops.h" #include "cls/2pc_queue/cls_2pc_queue_const.h" #include "cls/queue/cls_queue_ops.h" #include "cls/queue/cls_queue_src.h" #include "objclass/objclass.h" CLS_VER(1,0) CLS_NAME(2pc_queue) using ceph::bufferlist; using ceph::decode; using ceph::encode; constexpr auto CLS_QUEUE_URGENT_DATA_XATTR_NAME = "cls_queue_urgent_data"; static int cls_2pc_queue_init(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { auto in_iter = in->cbegin(); cls_queue_init_op op; try { decode(op, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_init: failed to decode entry: %s", err.what()); return -EINVAL; } cls_2pc_urgent_data urgent_data; cls_queue_init_op init_op; CLS_LOG(20, "INFO: cls_2pc_queue_init: max size is %lu (bytes)", op.queue_size); init_op.queue_size = op.queue_size; init_op.max_urgent_data_size = 23552; // overall head is 24KB ~ pending 1K reservations ops encode(urgent_data, init_op.bl_urgent_data); return queue_init(hctx, init_op); } static int cls_2pc_queue_get_capacity(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { cls_queue_get_capacity_ret op_ret; auto ret = queue_get_capacity(hctx, op_ret); if (ret < 0) { return ret; } encode(op_ret, *out); return 0; } static int cls_2pc_queue_get_topic_stats(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { cls_queue_get_stats_ret op_ret; // get head cls_queue_head head; auto ret = queue_read_head(hctx, head); if (ret < 0) { return ret; } const auto remaining_size = (head.tail.offset >= head.front.offset) ? (head.queue_size - head.tail.offset) + (head.front.offset - head.max_head_size) : head.front.offset - head.tail.offset; op_ret.queue_size = head.queue_size - head.max_head_size - remaining_size; cls_2pc_urgent_data urgent_data; try { auto in_iter = head.bl_urgent_data.cbegin(); decode(urgent_data, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_get_committed_entries: failed to decode header of queue: %s", err.what()); return -EINVAL; } op_ret.queue_entries = urgent_data.committed_entries; encode(op_ret, *out); return 0; } static int cls_2pc_queue_reserve(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { cls_2pc_queue_reserve_op res_op; try { auto in_iter = in->cbegin(); decode(res_op, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: failed to decode entry: %s", err.what()); return -EINVAL; } if (res_op.size == 0) { CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: cannot reserve zero bytes"); return -EINVAL; } if (res_op.entries == 0) { CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: cannot reserve zero entries"); return -EINVAL; } // get head cls_queue_head head; int ret = queue_read_head(hctx, head); if (ret < 0) { return ret; } cls_2pc_urgent_data urgent_data; try { auto in_iter = head.bl_urgent_data.cbegin(); decode(urgent_data, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: failed to decode entry: %s", err.what()); return -EINVAL; } const auto overhead = res_op.entries*QUEUE_ENTRY_OVERHEAD; const auto remaining_size = (head.tail.offset >= head.front.offset) ? (head.queue_size - head.tail.offset) + (head.front.offset - head.max_head_size) : head.front.offset - head.tail.offset; if (res_op.size + urgent_data.reserved_size + overhead > remaining_size) { CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: reservations exceeded maximum capacity"); CLS_LOG(10, "INFO: cls_2pc_queue_reserve: remaining size: %lu (bytes)", remaining_size); CLS_LOG(10, "INFO: cls_2pc_queue_reserve: current reservations: %lu (bytes)", urgent_data.reserved_size); CLS_LOG(10, "INFO: cls_2pc_queue_reserve: requested size: %lu (bytes)", res_op.size); return -ENOSPC; } urgent_data.reserved_size += res_op.size + overhead; // note that last id is incremented regadless of failures // to avoid "old reservation" issues below ++urgent_data.last_id; bool result; cls_2pc_reservations::iterator last_reservation; std::tie(last_reservation, result) = urgent_data.reservations.emplace(std::piecewise_construct, std::forward_as_tuple(urgent_data.last_id), std::forward_as_tuple(res_op.size, ceph::coarse_real_clock::now(), res_op.entries)); if (!result) { // an old reservation that was never committed or aborted is in the map // caller should try again assuming other IDs are ok CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: reservation id conflict after rollover: %u", urgent_data.last_id); return -EAGAIN; } // write back head head.bl_urgent_data.clear(); encode(urgent_data, head.bl_urgent_data); const uint64_t urgent_data_length = head.bl_urgent_data.length(); if (head.max_urgent_data_size < urgent_data_length) { CLS_LOG(10, "INFO: cls_2pc_queue_reserve: urgent data size: %lu exceeded maximum: %lu using xattrs", urgent_data_length, head.max_urgent_data_size); // add the last reservation to xattrs bufferlist bl_xattrs; auto ret = cls_cxx_getxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); if (ret < 0 && (ret != -ENOENT && ret != -ENODATA)) { CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: failed to read xattrs with: %d", ret); return ret; } cls_2pc_reservations xattr_reservations; if (ret >= 0) { // xattrs exist auto iter = bl_xattrs.cbegin(); try { decode(xattr_reservations, iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: failed to decode xattrs urgent data map"); return -EINVAL; } //end - catch } std::tie(std::ignore, result) = xattr_reservations.emplace(std::piecewise_construct, std::forward_as_tuple(urgent_data.last_id), std::forward_as_tuple(res_op.size, ceph::coarse_real_clock::now(), res_op.entries)); if (!result) { // an old reservation that was never committed or aborted is in the map // caller should try again assuming other IDs are ok CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: reservation id conflict inside xattrs after rollover: %u", urgent_data.last_id); return -EAGAIN; } bl_xattrs.clear(); encode(xattr_reservations, bl_xattrs); ret = cls_cxx_setxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); if (ret < 0) { CLS_LOG(1, "ERROR: cls_2pc_queue_reserve: failed to write xattrs with: %d", ret); return ret; } // remove the last reservation from the reservation list // and indicate that spillover happened urgent_data.has_xattrs = true; urgent_data.reservations.erase(last_reservation); head.bl_urgent_data.clear(); encode(urgent_data, head.bl_urgent_data); } ret = queue_write_head(hctx, head); if (ret < 0) { return ret; } CLS_LOG(20, "INFO: cls_2pc_queue_reserve: remaining size: %lu (bytes)", remaining_size); CLS_LOG(20, "INFO: cls_2pc_queue_reserve: current reservations: %lu (bytes)", urgent_data.reserved_size); CLS_LOG(20, "INFO: cls_2pc_queue_reserve: requested size: %lu (bytes)", res_op.size); CLS_LOG(20, "INFO: cls_2pc_queue_reserve: urgent data size: %lu (bytes)", urgent_data_length); cls_2pc_queue_reserve_ret op_ret; op_ret.id = urgent_data.last_id; encode(op_ret, *out); return 0; } static int cls_2pc_queue_commit(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { cls_2pc_queue_commit_op commit_op; try { auto in_iter = in->cbegin(); decode(commit_op, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_commit: failed to decode entry: %s", err.what()); return -EINVAL; } // get head cls_queue_head head; int ret = queue_read_head(hctx, head); if (ret < 0) { return ret; } cls_2pc_urgent_data urgent_data; try { auto in_iter = head.bl_urgent_data.cbegin(); decode(urgent_data, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_commit: failed to decode entry: %s", err.what()); return -EINVAL; } auto it = urgent_data.reservations.find(commit_op.id); cls_2pc_reservations xattr_reservations; bufferlist bl_xattrs; if (it == urgent_data.reservations.end()) { if (!urgent_data.has_xattrs) { CLS_LOG(1, "ERROR: cls_2pc_queue_commit: reservation does not exist: %u", commit_op.id); return -ENOENT; } // try to look for the reservation in xattrs auto ret = cls_cxx_getxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); if (ret < 0) { if (ret == -ENOENT || ret == -ENODATA) { // no xattrs, reservation does not exists CLS_LOG(1, "ERROR: cls_2pc_queue_commit: reservation does not exist: %u", commit_op.id); return -ENOENT; } CLS_LOG(1, "ERROR: cls_2pc_queue_commit: failed to read xattrs with: %d", ret); return ret; } auto iter = bl_xattrs.cbegin(); try { decode(xattr_reservations, iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_commit: failed to decode xattrs urgent data map"); return -EINVAL; } //end - catch it = xattr_reservations.find(commit_op.id); if (it == urgent_data.reservations.end()) { CLS_LOG(1, "ERROR: cls_2pc_queue_commit: reservation does not exist: %u", commit_op.id); return -ENOENT; } } auto& res = it->second; const auto actual_size = std::accumulate(commit_op.bl_data_vec.begin(), commit_op.bl_data_vec.end(), 0UL, [] (uint64_t sum, const bufferlist& bl) { return sum + bl.length(); }); if (res.size < actual_size) { CLS_LOG(1, "ERROR: cls_2pc_queue_commit: trying to commit %lu bytes to a %lu bytes reservation", actual_size, res.size); return -EINVAL; } // commit the data to the queue cls_queue_enqueue_op enqueue_op; enqueue_op.bl_data_vec = std::move(commit_op.bl_data_vec); ret = queue_enqueue(hctx, enqueue_op, head); if (ret < 0) { return ret; } urgent_data.reserved_size -= res.size; urgent_data.committed_entries += res.entries; if (xattr_reservations.empty()) { // remove the reservation from urgent data urgent_data.reservations.erase(it); } else { // remove the reservation from xattrs xattr_reservations.erase(it); bl_xattrs.clear(); encode(xattr_reservations, bl_xattrs); ret = cls_cxx_setxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); if (ret < 0) { CLS_LOG(1, "ERROR: cls_2pc_queue_commit: failed to write xattrs with: %d", ret); return ret; } } CLS_LOG(20, "INFO: cls_2pc_queue_commit: current reservations: %lu (bytes)", urgent_data.reserved_size); CLS_LOG(20, "INFO: cls_2pc_queue_commit: current reservation entries: %lu", urgent_data.reservations.size() + xattr_reservations.size()); // write back head head.bl_urgent_data.clear(); encode(urgent_data, head.bl_urgent_data); return queue_write_head(hctx, head); } static int cls_2pc_queue_abort(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { cls_2pc_queue_abort_op abort_op; try { auto in_iter = in->cbegin(); decode(abort_op, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_abort: failed to decode entry: %s", err.what()); return -EINVAL; } // get head cls_queue_head head; int ret = queue_read_head(hctx, head); if (ret < 0) { return ret; } cls_2pc_urgent_data urgent_data; try { auto in_iter = head.bl_urgent_data.cbegin(); decode(urgent_data, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_abort: failed to decode entry: %s", err.what()); return -EINVAL; } auto it = urgent_data.reservations.find(abort_op.id); uint64_t reservation_size; if (it == urgent_data.reservations.end()) { if (!urgent_data.has_xattrs) { CLS_LOG(20, "INFO: cls_2pc_queue_abort: reservation does not exist: %u", abort_op.id); return 0; } // try to look for the reservation in xattrs bufferlist bl_xattrs; auto ret = cls_cxx_getxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); if (ret < 0) { if (ret == -ENOENT || ret == -ENODATA) { // no xattrs, reservation does not exists CLS_LOG(20, "INFO: cls_2pc_queue_abort: reservation does not exist: %u", abort_op.id); return 0; } CLS_LOG(1, "ERROR: cls_2pc_queue_abort: failed to read xattrs with: %d", ret); return ret; } auto iter = bl_xattrs.cbegin(); cls_2pc_reservations xattr_reservations; try { decode(xattr_reservations, iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_abort: failed to decode xattrs urgent data map"); return -EINVAL; } //end - catch it = xattr_reservations.find(abort_op.id); if (it == xattr_reservations.end()) { CLS_LOG(20, "INFO: cls_2pc_queue_abort: reservation does not exist: %u", abort_op.id); return 0; } reservation_size = it->second.size; xattr_reservations.erase(it); bl_xattrs.clear(); encode(xattr_reservations, bl_xattrs); ret = cls_cxx_setxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); if (ret < 0) { CLS_LOG(1, "ERROR: cls_2pc_queue_abort: failed to write xattrs with: %d", ret); return ret; } } else { reservation_size = it->second.size; urgent_data.reservations.erase(it); } // remove the reservation urgent_data.reserved_size -= reservation_size; CLS_LOG(20, "INFO: cls_2pc_queue_abort: current reservations: %lu (bytes)", urgent_data.reserved_size); // write back head head.bl_urgent_data.clear(); encode(urgent_data, head.bl_urgent_data); return queue_write_head(hctx, head); } static int cls_2pc_queue_list_reservations(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { //get head cls_queue_head head; auto ret = queue_read_head(hctx, head); if (ret < 0) { return ret; } cls_2pc_urgent_data urgent_data; try { auto in_iter = head.bl_urgent_data.cbegin(); decode(urgent_data, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_list_reservations: failed to decode entry: %s", err.what()); return -EINVAL; } CLS_LOG(20, "INFO: cls_2pc_queue_list_reservations: %lu reservation entries found", urgent_data.reservations.size()); cls_2pc_queue_reservations_ret op_ret; op_ret.reservations = std::move(urgent_data.reservations); if (urgent_data.has_xattrs) { // try to look for the reservation in xattrs cls_2pc_reservations xattr_reservations; bufferlist bl_xattrs; ret = cls_cxx_getxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); if (ret < 0 && (ret != -ENOENT && ret != -ENODATA)) { CLS_LOG(1, "ERROR: cls_2pc_queue_list_reservations: failed to read xattrs with: %d", ret); return ret; } if (ret >= 0) { auto iter = bl_xattrs.cbegin(); try { decode(xattr_reservations, iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_list_reservations: failed to decode xattrs urgent data map"); return -EINVAL; } //end - catch CLS_LOG(20, "INFO: cls_2pc_queue_list_reservations: %lu reservation entries found in xatts", xattr_reservations.size()); op_ret.reservations.merge(xattr_reservations); } } encode(op_ret, *out); return 0; } static int cls_2pc_queue_expire_reservations(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { cls_2pc_queue_expire_op expire_op; try { auto in_iter = in->cbegin(); decode(expire_op, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_expire_reservations: failed to decode entry: %s", err.what()); return -EINVAL; } //get head cls_queue_head head; auto ret = queue_read_head(hctx, head); if (ret < 0) { return ret; } cls_2pc_urgent_data urgent_data; try { auto in_iter = head.bl_urgent_data.cbegin(); decode(urgent_data, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_expire_reservations: failed to decode entry: %s", err.what()); return -EINVAL; } CLS_LOG(20, "INFO: cls_2pc_queue_expire_reservations: %lu reservation entries found", urgent_data.reservations.size()); CLS_LOG(20, "INFO: cls_2pc_queue_expire_reservations: current reservations: %lu (bytes)", urgent_data.reserved_size); uint64_t reservation_size = 0U; auto stale_found = false; auto xattr_stale_found = false; for (auto it = urgent_data.reservations.begin(); it != urgent_data.reservations.end();) { if (it->second.timestamp < expire_op.stale_time) { CLS_LOG(5, "WARNING: cls_2pc_queue_expire_reservations: stale reservation %u will be removed", it->first); reservation_size += it->second.size; it = urgent_data.reservations.erase(it); stale_found = true; } else { ++it; } } if (urgent_data.has_xattrs) { // try to look for the reservation in xattrs cls_2pc_reservations xattr_reservations; bufferlist bl_xattrs; ret = cls_cxx_getxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); if (ret < 0 && (ret != -ENOENT && ret != -ENODATA)) { CLS_LOG(1, "ERROR: cls_2pc_queue_expire_reservations: failed to read xattrs with: %d", ret); return ret; } if (ret >= 0) { auto iter = bl_xattrs.cbegin(); try { decode(xattr_reservations, iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_expire_reservations: failed to decode xattrs urgent data map"); return -EINVAL; } //end - catch CLS_LOG(20, "INFO: cls_2pc_queue_expire_reservations: %lu reservation entries found in xatts", xattr_reservations.size()); for (auto it = xattr_reservations.begin(); it != xattr_reservations.end();) { if (it->second.timestamp < expire_op.stale_time) { CLS_LOG(5, "WARNING: cls_2pc_queue_expire_reservations: stale reservation %u will be removed", it->first); reservation_size += it->second.size; it = xattr_reservations.erase(it); xattr_stale_found = true; } else { ++it; } } if (xattr_stale_found) { // write xattr back without stale reservations bl_xattrs.clear(); encode(xattr_reservations, bl_xattrs); ret = cls_cxx_setxattr(hctx, CLS_QUEUE_URGENT_DATA_XATTR_NAME, &bl_xattrs); if (ret < 0) { CLS_LOG(1, "ERROR: cls_2pc_queue_expire_reservations: failed to write xattrs with: %d", ret); return ret; } } } } if (stale_found || xattr_stale_found) { urgent_data.reserved_size -= reservation_size; CLS_LOG(20, "INFO: cls_2pc_queue_expire_reservations: reservations after cleanup: %lu (bytes)", urgent_data.reserved_size); // write back head without stale reservations head.bl_urgent_data.clear(); encode(urgent_data, head.bl_urgent_data); return queue_write_head(hctx, head); } return 0; } static int cls_2pc_queue_list_entries(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { auto in_iter = in->cbegin(); cls_queue_list_op op; try { decode(op, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_list_entries: failed to decode entry: %s", err.what()); return -EINVAL; } cls_queue_head head; auto ret = queue_read_head(hctx, head); if (ret < 0) { return ret; } cls_queue_list_ret op_ret; ret = queue_list_entries(hctx, op, op_ret, head); if (ret < 0) { return ret; } encode(op_ret, *out); return 0; } static int cls_2pc_queue_remove_entries(cls_method_context_t hctx, bufferlist *in, bufferlist *out) { auto in_iter = in->cbegin(); cls_queue_remove_op op; try { decode(op, in_iter); } catch (ceph::buffer::error& err) { CLS_LOG(1, "ERROR: cls_2pc_queue_remove_entries: failed to decode entry: %s", err.what()); return -EINVAL; } cls_queue_head head; auto ret = queue_read_head(hctx, head); if (ret < 0) { return ret; } ret = queue_remove_entries(hctx, op, head); if (ret < 0) { return ret; } return queue_write_head(hctx, head); } CLS_INIT(2pc_queue) { CLS_LOG(1, "Loaded 2pc queue class!"); cls_handle_t h_class; cls_method_handle_t h_2pc_queue_init; cls_method_handle_t h_2pc_queue_get_capacity; cls_method_handle_t h_2pc_queue_get_topic_stats; cls_method_handle_t h_2pc_queue_reserve; cls_method_handle_t h_2pc_queue_commit; cls_method_handle_t h_2pc_queue_abort; cls_method_handle_t h_2pc_queue_list_reservations; cls_method_handle_t h_2pc_queue_list_entries; cls_method_handle_t h_2pc_queue_remove_entries; cls_method_handle_t h_2pc_queue_expire_reservations; cls_register(TPC_QUEUE_CLASS, &h_class); cls_register_cxx_method(h_class, TPC_QUEUE_INIT, CLS_METHOD_RD | CLS_METHOD_WR, cls_2pc_queue_init, &h_2pc_queue_init); cls_register_cxx_method(h_class, TPC_QUEUE_GET_CAPACITY, CLS_METHOD_RD, cls_2pc_queue_get_capacity, &h_2pc_queue_get_capacity); cls_register_cxx_method(h_class, TPC_QUEUE_GET_TOPIC_STATS, CLS_METHOD_RD, cls_2pc_queue_get_topic_stats, &h_2pc_queue_get_topic_stats); cls_register_cxx_method(h_class, TPC_QUEUE_RESERVE, CLS_METHOD_RD | CLS_METHOD_WR, cls_2pc_queue_reserve, &h_2pc_queue_reserve); cls_register_cxx_method(h_class, TPC_QUEUE_COMMIT, CLS_METHOD_RD | CLS_METHOD_WR, cls_2pc_queue_commit, &h_2pc_queue_commit); cls_register_cxx_method(h_class, TPC_QUEUE_ABORT, CLS_METHOD_RD | CLS_METHOD_WR, cls_2pc_queue_abort, &h_2pc_queue_abort); cls_register_cxx_method(h_class, TPC_QUEUE_LIST_RESERVATIONS, CLS_METHOD_RD, cls_2pc_queue_list_reservations, &h_2pc_queue_list_reservations); cls_register_cxx_method(h_class, TPC_QUEUE_LIST_ENTRIES, CLS_METHOD_RD, cls_2pc_queue_list_entries, &h_2pc_queue_list_entries); cls_register_cxx_method(h_class, TPC_QUEUE_REMOVE_ENTRIES, CLS_METHOD_RD | CLS_METHOD_WR, cls_2pc_queue_remove_entries, &h_2pc_queue_remove_entries); cls_register_cxx_method(h_class, TPC_QUEUE_EXPIRE_RESERVATIONS, CLS_METHOD_RD | CLS_METHOD_WR, cls_2pc_queue_expire_reservations, &h_2pc_queue_expire_reservations); return; }
22,789
34.833333
166
cc
null
ceph-main/src/cls/2pc_queue/cls_2pc_queue_client.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "cls/2pc_queue/cls_2pc_queue_client.h" #include "cls/2pc_queue/cls_2pc_queue_ops.h" #include "cls/2pc_queue/cls_2pc_queue_const.h" #include "cls/queue/cls_queue_ops.h" #include "cls/queue/cls_queue_const.h" using namespace librados; void cls_2pc_queue_init(ObjectWriteOperation& op, const std::string& queue_name, uint64_t size) { bufferlist in; cls_queue_init_op call; call.queue_size = size; encode(call, in); op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_INIT, in); } int cls_2pc_queue_get_capacity_result(const bufferlist& bl, uint64_t& size) { cls_queue_get_capacity_ret op_ret; auto iter = bl.cbegin(); try { decode(op_ret, iter); } catch (buffer::error& err) { return -EIO; } size = op_ret.queue_capacity; return 0; } int cls_2pc_queue_get_topic_stats_result(const bufferlist& bl, uint32_t& committed_entries, uint64_t& size) { cls_queue_get_stats_ret op_ret; auto iter = bl.cbegin(); try { decode(op_ret, iter); } catch (buffer::error& err) { return -EIO; } committed_entries = op_ret.queue_entries; size = op_ret.queue_size; return 0; } #ifndef CLS_CLIENT_HIDE_IOCTX int cls_2pc_queue_get_capacity(IoCtx& io_ctx, const std::string& queue_name, uint64_t& size) { bufferlist in, out; const auto r = io_ctx.exec(queue_name, TPC_QUEUE_CLASS, TPC_QUEUE_GET_CAPACITY, in, out); if (r < 0 ) { return r; } return cls_2pc_queue_get_capacity_result(out, size); } #endif // optionally async method for getting capacity (bytes) // after answer is received, call cls_2pc_queue_get_capacity_result() to parse the results void cls_2pc_queue_get_capacity(ObjectReadOperation& op, bufferlist* obl, int* prval) { bufferlist in; op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_GET_CAPACITY, in, obl, prval); } #ifndef CLS_CLIENT_HIDE_IOCTX int cls_2pc_queue_get_topic_stats(IoCtx& io_ctx, const std::string& queue_name, uint32_t& committed_entries, uint64_t& size) { bufferlist in, out; const auto r = io_ctx.exec(queue_name, TPC_QUEUE_CLASS, TPC_QUEUE_GET_TOPIC_STATS, in, out); if (r < 0 ) { return r; } return cls_2pc_queue_get_topic_stats_result(out, committed_entries, size); } #endif // optionally async method for getting number of commited entries and size (bytes) // after answer is received, call cls_2pc_queue_get_topic_stats_result() to parse the results void cls_2pc_queue_get_topic_stats(ObjectReadOperation& op, bufferlist* obl, int* prval) { bufferlist in; op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_GET_TOPIC_STATS, in, obl, prval); } int cls_2pc_queue_reserve_result(const bufferlist& bl, cls_2pc_reservation::id_t& res_id) { cls_2pc_queue_reserve_ret op_ret; auto iter = bl.cbegin(); try { decode(op_ret, iter); } catch (buffer::error& err) { return -EIO; } res_id = op_ret.id; return 0; } int cls_2pc_queue_reserve(IoCtx& io_ctx, const std::string& queue_name, uint64_t res_size, uint32_t entries, cls_2pc_reservation::id_t& res_id) { bufferlist in, out; cls_2pc_queue_reserve_op reserve_op; reserve_op.size = res_size; reserve_op.entries = entries; encode(reserve_op, in); int rval; ObjectWriteOperation op; op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_RESERVE, in, &out, &rval); const auto r = io_ctx.operate(queue_name, &op, librados::OPERATION_RETURNVEC); if (r < 0) { return r; } return cls_2pc_queue_reserve_result(out, res_id); } void cls_2pc_queue_reserve(ObjectWriteOperation& op, uint64_t res_size, uint32_t entries, bufferlist* obl, int* prval) { bufferlist in; cls_2pc_queue_reserve_op reserve_op; reserve_op.size = res_size; reserve_op.entries = entries; encode(reserve_op, in); op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_RESERVE, in, obl, prval); } void cls_2pc_queue_commit(ObjectWriteOperation& op, std::vector<bufferlist> bl_data_vec, cls_2pc_reservation::id_t res_id) { bufferlist in; cls_2pc_queue_commit_op commit_op; commit_op.id = res_id; commit_op.bl_data_vec = std::move(bl_data_vec); encode(commit_op, in); op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_COMMIT, in); } void cls_2pc_queue_abort(ObjectWriteOperation& op, cls_2pc_reservation::id_t res_id) { bufferlist in; cls_2pc_queue_abort_op abort_op; abort_op.id = res_id; encode(abort_op, in); op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_ABORT, in); } int cls_2pc_queue_list_entries_result(const bufferlist& bl, std::vector<cls_queue_entry>& entries, bool *truncated, std::string& next_marker) { cls_queue_list_ret ret; auto iter = bl.cbegin(); try { decode(ret, iter); } catch (buffer::error& err) { return -EIO; } entries = std::move(ret.entries); *truncated = ret.is_truncated; next_marker = std::move(ret.next_marker); return 0; } #ifndef CLS_CLIENT_HIDE_IOCTX int cls_2pc_queue_list_entries(IoCtx& io_ctx, const std::string& queue_name, const std::string& marker, uint32_t max, std::vector<cls_queue_entry>& entries, bool *truncated, std::string& next_marker) { bufferlist in, out; cls_queue_list_op op; op.start_marker = marker; op.max = max; encode(op, in); const auto r = io_ctx.exec(queue_name, TPC_QUEUE_CLASS, TPC_QUEUE_LIST_ENTRIES, in, out); if (r < 0) { return r; } return cls_2pc_queue_list_entries_result(out, entries, truncated, next_marker); } #endif void cls_2pc_queue_list_entries(ObjectReadOperation& op, const std::string& marker, uint32_t max, bufferlist* obl, int* prval) { bufferlist in; cls_queue_list_op list_op; list_op.start_marker = marker; list_op.max = max; encode(list_op, in); op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_LIST_ENTRIES, in, obl, prval); } int cls_2pc_queue_list_reservations_result(const bufferlist& bl, cls_2pc_reservations& reservations) { cls_2pc_queue_reservations_ret ret; auto iter = bl.cbegin(); try { decode(ret, iter); } catch (buffer::error& err) { return -EIO; } reservations = std::move(ret.reservations); return 0; } #ifndef CLS_CLIENT_HIDE_IOCTX int cls_2pc_queue_list_reservations(IoCtx& io_ctx, const std::string& queue_name, cls_2pc_reservations& reservations) { bufferlist in, out; const auto r = io_ctx.exec(queue_name, TPC_QUEUE_CLASS, TPC_QUEUE_LIST_RESERVATIONS, in, out); if (r < 0) { return r; } return cls_2pc_queue_list_reservations_result(out, reservations); } #endif void cls_2pc_queue_list_reservations(ObjectReadOperation& op, bufferlist* obl, int* prval) { bufferlist in; op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_LIST_RESERVATIONS, in, obl, prval); } void cls_2pc_queue_remove_entries(ObjectWriteOperation& op, const std::string& end_marker) { bufferlist in; cls_queue_remove_op rem_op; rem_op.end_marker = end_marker; encode(rem_op, in); op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_REMOVE_ENTRIES, in); } void cls_2pc_queue_expire_reservations(librados::ObjectWriteOperation& op, ceph::coarse_real_time stale_time) { bufferlist in; cls_2pc_queue_expire_op expire_op; expire_op.stale_time = stale_time; encode(expire_op, in); op.exec(TPC_QUEUE_CLASS, TPC_QUEUE_EXPIRE_RESERVATIONS, in); }
7,287
28.746939
128
cc
null
ceph-main/src/cls/2pc_queue/cls_2pc_queue_client.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <string> #include <vector> #include "include/rados/librados.hpp" #include "cls/queue/cls_queue_types.h" #include "cls/2pc_queue/cls_2pc_queue_types.h" // initialize the queue with maximum size (bytes) // note that the actual size of the queue will be larger, as 24K bytes will be allocated in the head object // and more may be allocated as xattrs of the object (depending with the number of concurrent reservations) void cls_2pc_queue_init(librados::ObjectWriteOperation& op, const std::string& queue_name, uint64_t size); // these overloads which call io_ctx.operate() or io_ctx.exec() should not be called in the rgw. // rgw_rados_operate() should be called after the overloads w/o calls to io_ctx.operate()/exec() #ifndef CLS_CLIENT_HIDE_IOCTX // return capacity (bytes) int cls_2pc_queue_get_capacity(librados::IoCtx& io_ctx, const std::string& queue_name, uint64_t& size); // return the number of committed entries and size (bytes) int cls_2pc_queue_get_topic_stats(librados::IoCtx& io_ctx, const std::string& queue_name, uint32_t& committed_entries, uint64_t& size); // make a reservation on the queue (in bytes) and number of expected entries (to calculate overhead) // return a reservation id if reservations is possible, 0 otherwise int cls_2pc_queue_reserve(librados::IoCtx& io_ctx, const std::string& queue_name, uint64_t res_size, uint32_t entries, cls_2pc_reservation::id_t& res_id); // incremental listing of all entries in the queue int cls_2pc_queue_list_entries(librados::IoCtx& io_ctx, const std::string& queue_name, const std::string& marker, uint32_t max, std::vector<cls_queue_entry>& entries, bool *truncated, std::string& next_marker); // list all pending reservations in the queue int cls_2pc_queue_list_reservations(librados::IoCtx& io_ctx, const std::string& queue_name, cls_2pc_reservations& reservations); #endif // optionally async method for getting capacity (bytes) // after answer is received, call cls_2pc_queue_get_capacity_result() to parse the results void cls_2pc_queue_get_capacity(librados::ObjectReadOperation& op, bufferlist* obl, int* prval); // optionally async method for getting capacity (bytes) // after answer is received, call cls_2pc_queue_get_topic_stats_result() to parse the results void cls_2pc_queue_get_topic_stats(librados::ObjectReadOperation& op, bufferlist* obl, int* prval); int cls_2pc_queue_get_capacity_result(const bufferlist& bl, uint64_t& size); int cls_2pc_queue_get_topic_stats_result(const bufferlist& bl, uint32_t& committed_entries, uint64_t& size); // optionally async method for making a reservation on the queue (in bytes) and number of expected entries (to calculate overhead) // notes: // (1) make sure that librados::OPERATION_RETURNVEC is passed to the executing function // (2) multiple operations cannot be executed in a batch (operations both read and write) // after answer is received, call cls_2pc_queue_reserve_result() to parse the results void cls_2pc_queue_reserve(librados::ObjectWriteOperation& op, uint64_t res_size, uint32_t entries, bufferlist* obl, int* prval); int cls_2pc_queue_reserve_result(const bufferlist& bl, cls_2pc_reservation::id_t& res_id); // commit data using a reservation done beforehand // res_id must be allocated using cls_2pc_queue_reserve, and could be either committed or aborted once // the size of bl_data_vec must be equal or smaller to the size reserved for the res_id // note that the number of entries in bl_data_vec does not have to match the number of entries reserved // only size (including the overhead of the entries) is checked void cls_2pc_queue_commit(librados::ObjectWriteOperation& op, std::vector<bufferlist> bl_data_vec, cls_2pc_reservation::id_t res_id); // abort a reservation // res_id must be allocated using cls_2pc_queue_reserve void cls_2pc_queue_abort(librados::ObjectWriteOperation& op, cls_2pc_reservation::id_t res_id); // optionally async incremental listing of all entries in the queue // after answer is received, call cls_2pc_queue_list_entries_result() to parse the results void cls_2pc_queue_list_entries(librados::ObjectReadOperation& op, const std::string& marker, uint32_t max, bufferlist* obl, int* prval); int cls_2pc_queue_list_entries_result(const bufferlist& bl, std::vector<cls_queue_entry>& entries, bool *truncated, std::string& next_marker); // optionally async listing of all pending reservations in the queue // after answer is received, call cls_2pc_queue_list_reservations_result() to parse the results void cls_2pc_queue_list_reservations(librados::ObjectReadOperation& op, bufferlist* obl, int* prval); int cls_2pc_queue_list_reservations_result(const librados::bufferlist& bl, cls_2pc_reservations& reservations); // expire stale reservations (older than the given time) void cls_2pc_queue_expire_reservations(librados::ObjectWriteOperation& op, ceph::coarse_real_time stale_time); // remove all entries up to the given marker void cls_2pc_queue_remove_entries(librados::ObjectWriteOperation& op, const std::string& end_marker);
5,223
55.782609
137
h
null
ceph-main/src/cls/2pc_queue/cls_2pc_queue_const.h
#pragma once #define TPC_QUEUE_CLASS "2pc_queue" #define TPC_QUEUE_INIT "2pc_queue_init" #define TPC_QUEUE_GET_CAPACITY "2pc_queue_get_capacity" #define TPC_QUEUE_GET_TOPIC_STATS "2pc_queue_get_topic_stats" #define TPC_QUEUE_RESERVE "2pc_queue_reserve" #define TPC_QUEUE_COMMIT "2pc_queue_commit" #define TPC_QUEUE_ABORT "2pc_queue_abort" #define TPC_QUEUE_LIST_RESERVATIONS "2pc_queue_list_reservations" #define TPC_QUEUE_LIST_ENTRIES "2pc_queue_list_entries" #define TPC_QUEUE_REMOVE_ENTRIES "2pc_queue_remove_entries" #define TPC_QUEUE_EXPIRE_RESERVATIONS "2pc_queue_expire_reservations"
594
36.1875
69
h