Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/qa/tasks/mgr/dashboard/test_rgw.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import base64
import logging
import time
from urllib import parse
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.hashes import SHA1
from cryptography.hazmat.primitives.twofactor.totp import TOTP
from .helper import DashboardTestCase, JLeaf, JList, JObj
logger = logging.getLogger(__name__)
class RgwTestCase(DashboardTestCase):
maxDiff = None
create_test_user = False
AUTH_ROLES = ['rgw-manager']
@classmethod
def setUpClass(cls):
super(RgwTestCase, cls).setUpClass()
# Create the administrator account.
cls._radosgw_admin_cmd([
'user', 'create', '--uid', 'admin', '--display-name', 'admin',
'--system', '--access-key', 'admin', '--secret', 'admin'
])
# Update the dashboard configuration.
cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin')
cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin')
# Create a test user?
if cls.create_test_user:
cls._radosgw_admin_cmd([
'user', 'create', '--uid', 'teuth-test-user', '--display-name',
'teuth-test-user'
])
cls._radosgw_admin_cmd([
'caps', 'add', '--uid', 'teuth-test-user', '--caps',
'metadata=write'
])
cls._radosgw_admin_cmd([
'subuser', 'create', '--uid', 'teuth-test-user', '--subuser',
'teuth-test-subuser', '--access', 'full', '--key-type', 's3',
'--access-key', 'xyz123'
])
cls._radosgw_admin_cmd([
'subuser', 'create', '--uid', 'teuth-test-user', '--subuser',
'teuth-test-subuser2', '--access', 'full', '--key-type',
'swift'
])
@classmethod
def tearDownClass(cls):
# Delete administrator account.
cls._radosgw_admin_cmd(['user', 'rm', '--uid', 'admin'])
if cls.create_test_user:
cls._radosgw_admin_cmd(['user', 'rm', '--uid=teuth-test-user', '--purge-data'])
super(RgwTestCase, cls).tearDownClass()
def get_rgw_user(self, uid, stats=True):
return self._get('/api/rgw/user/{}?stats={}'.format(uid, stats))
class RgwApiCredentialsTest(RgwTestCase):
AUTH_ROLES = ['rgw-manager']
def test_invalid_credentials(self):
self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'invalid')
self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'invalid')
resp = self._get('/api/rgw/user')
self.assertStatus(404)
self.assertIn('detail', resp)
self.assertIn('component', resp)
self.assertIn('Error connecting to Object Gateway', resp['detail'])
self.assertEqual(resp['component'], 'rgw')
def test_success(self):
# Set the default credentials.
self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin')
self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin')
data = self._get('/ui-api/rgw/status')
self.assertStatus(200)
self.assertIn('available', data)
self.assertIn('message', data)
self.assertTrue(data['available'])
class RgwSiteTest(RgwTestCase):
AUTH_ROLES = ['rgw-manager']
def test_get_placement_targets(self):
data = self._get('/api/rgw/site?query=placement-targets')
self.assertStatus(200)
self.assertSchema(data, JObj({
'zonegroup': str,
'placement_targets': JList(JObj({
'name': str,
'data_pool': str
}))
}))
def test_get_realms(self):
data = self._get('/api/rgw/site?query=realms')
self.assertStatus(200)
self.assertSchema(data, JList(str))
class RgwBucketTest(RgwTestCase):
_mfa_token_serial = '1'
_mfa_token_seed = '23456723'
_mfa_token_time_step = 2
AUTH_ROLES = ['rgw-manager']
@classmethod
def setUpClass(cls):
cls.create_test_user = True
super(RgwBucketTest, cls).setUpClass()
# Create MFA TOTP token for test user.
cls._radosgw_admin_cmd([
'mfa', 'create', '--uid', 'teuth-test-user', '--totp-serial', cls._mfa_token_serial,
'--totp-seed', cls._mfa_token_seed, '--totp-seed-type', 'base32',
'--totp-seconds', str(cls._mfa_token_time_step), '--totp-window', '1'
])
# Create tenanted users.
cls._radosgw_admin_cmd([
'user', 'create', '--tenant', 'testx', '--uid', 'teuth-test-user',
'--display-name', 'tenanted teuth-test-user'
])
cls._radosgw_admin_cmd([
'user', 'create', '--tenant', 'testx2', '--uid', 'teuth-test-user2',
'--display-name', 'tenanted teuth-test-user 2'
])
@classmethod
def tearDownClass(cls):
cls._radosgw_admin_cmd(
['user', 'rm', '--tenant', 'testx', '--uid=teuth-test-user', '--purge-data'])
cls._radosgw_admin_cmd(
['user', 'rm', '--tenant', 'testx2', '--uid=teuth-test-user2', '--purge-data'])
super(RgwBucketTest, cls).tearDownClass()
def _get_mfa_token_pin(self):
totp_key = base64.b32decode(self._mfa_token_seed)
totp = TOTP(totp_key, 6, SHA1(), self._mfa_token_time_step, backend=default_backend(),
enforce_key_length=False)
time_value = int(time.time())
return totp.generate(time_value)
def test_all(self):
# Create a new bucket.
self._post(
'/api/rgw/bucket',
params={
'bucket': 'teuth-test-bucket',
'uid': 'admin',
'zonegroup': 'default',
'placement_target': 'default-placement'
})
self.assertStatus(201)
data = self.jsonBody()
self.assertSchema(data, JObj(sub_elems={
'bucket_info': JObj(sub_elems={
'bucket': JObj(allow_unknown=True, sub_elems={
'name': JLeaf(str),
'bucket_id': JLeaf(str),
'tenant': JLeaf(str)
}),
'quota': JObj(sub_elems={}, allow_unknown=True),
'creation_time': JLeaf(str)
}, allow_unknown=True)
}, allow_unknown=True))
data = data['bucket_info']['bucket']
self.assertEqual(data['name'], 'teuth-test-bucket')
self.assertEqual(data['tenant'], '')
# List all buckets.
data = self._get('/api/rgw/bucket', version='1.1')
self.assertStatus(200)
self.assertEqual(len(data), 1)
self.assertIn('teuth-test-bucket', data)
# List all buckets with stats.
data = self._get('/api/rgw/bucket?stats=true', version='1.1')
self.assertStatus(200)
self.assertEqual(len(data), 1)
self.assertSchema(data[0], JObj(sub_elems={
'bid': JLeaf(str),
'bucket': JLeaf(str),
'bucket_quota': JObj(sub_elems={}, allow_unknown=True),
'id': JLeaf(str),
'owner': JLeaf(str),
'usage': JObj(sub_elems={}, allow_unknown=True),
'tenant': JLeaf(str),
}, allow_unknown=True))
# List all buckets names without stats.
data = self._get('/api/rgw/bucket?stats=false', version='1.1')
self.assertStatus(200)
self.assertEqual(data, ['teuth-test-bucket'])
# Get the bucket.
data = self._get('/api/rgw/bucket/teuth-test-bucket')
self.assertStatus(200)
self.assertSchema(data, JObj(sub_elems={
'id': JLeaf(str),
'bid': JLeaf(str),
'tenant': JLeaf(str),
'bucket': JLeaf(str),
'bucket_quota': JObj(sub_elems={}, allow_unknown=True),
'owner': JLeaf(str),
'mfa_delete': JLeaf(str),
'usage': JObj(sub_elems={}, allow_unknown=True),
'versioning': JLeaf(str)
}, allow_unknown=True))
self.assertEqual(data['bucket'], 'teuth-test-bucket')
self.assertEqual(data['owner'], 'admin')
self.assertEqual(data['placement_rule'], 'default-placement')
self.assertEqual(data['versioning'], 'Suspended')
# Update bucket: change owner, enable versioning.
self._put(
'/api/rgw/bucket/teuth-test-bucket',
params={
'bucket_id': data['id'],
'uid': 'teuth-test-user',
'versioning_state': 'Enabled'
})
self.assertStatus(200)
data = self._get('/api/rgw/bucket/teuth-test-bucket')
self.assertStatus(200)
self.assertSchema(data, JObj(sub_elems={
'owner': JLeaf(str),
'bid': JLeaf(str),
'tenant': JLeaf(str)
}, allow_unknown=True))
self.assertEqual(data['owner'], 'teuth-test-user')
self.assertEqual(data['versioning'], 'Enabled')
# Update bucket: enable MFA Delete.
self._put(
'/api/rgw/bucket/teuth-test-bucket',
params={
'bucket_id': data['id'],
'uid': 'teuth-test-user',
'versioning_state': 'Enabled',
'mfa_delete': 'Enabled',
'mfa_token_serial': self._mfa_token_serial,
'mfa_token_pin': self._get_mfa_token_pin()
})
self.assertStatus(200)
data = self._get('/api/rgw/bucket/teuth-test-bucket')
self.assertStatus(200)
self.assertEqual(data['versioning'], 'Enabled')
self.assertEqual(data['mfa_delete'], 'Enabled')
# Update bucket: disable versioning & MFA Delete.
time.sleep(self._mfa_token_time_step * 3) # Required to get new TOTP pin.
self._put(
'/api/rgw/bucket/teuth-test-bucket',
params={
'bucket_id': data['id'],
'uid': 'teuth-test-user',
'versioning_state': 'Suspended',
'mfa_delete': 'Disabled',
'mfa_token_serial': self._mfa_token_serial,
'mfa_token_pin': self._get_mfa_token_pin()
})
self.assertStatus(200)
data = self._get('/api/rgw/bucket/teuth-test-bucket')
self.assertStatus(200)
self.assertEqual(data['versioning'], 'Suspended')
self.assertEqual(data['mfa_delete'], 'Disabled')
# Delete the bucket.
self._delete('/api/rgw/bucket/teuth-test-bucket')
self.assertStatus(204)
data = self._get('/api/rgw/bucket', version='1.1')
self.assertStatus(200)
self.assertEqual(len(data), 0)
def test_crud_w_tenant(self):
# Create a new bucket. The tenant of the user is used when
# the bucket is created.
self._post(
'/api/rgw/bucket',
params={
'bucket': 'teuth-test-bucket',
'uid': 'testx$teuth-test-user',
'zonegroup': 'default',
'placement_target': 'default-placement'
})
self.assertStatus(201)
# It's not possible to validate the result because there
# IS NO result object returned by the RGW Admin OPS API
# when a tenanted bucket is created.
data = self.jsonBody()
self.assertIsNone(data)
# List all buckets.
data = self._get('/api/rgw/bucket', version='1.1')
self.assertStatus(200)
self.assertEqual(len(data), 1)
self.assertIn('testx/teuth-test-bucket', data)
def _verify_tenant_bucket(bucket, tenant, uid):
full_bucket_name = '{}/{}'.format(tenant, bucket)
_data = self._get('/api/rgw/bucket/{}'.format(
parse.quote_plus(full_bucket_name)))
self.assertStatus(200)
self.assertSchema(_data, JObj(sub_elems={
'owner': JLeaf(str),
'bucket': JLeaf(str),
'tenant': JLeaf(str),
'bid': JLeaf(str)
}, allow_unknown=True))
self.assertEqual(_data['owner'], '{}${}'.format(tenant, uid))
self.assertEqual(_data['bucket'], bucket)
self.assertEqual(_data['tenant'], tenant)
self.assertEqual(_data['bid'], full_bucket_name)
return _data
# Get the bucket.
data = _verify_tenant_bucket('teuth-test-bucket', 'testx', 'teuth-test-user')
self.assertEqual(data['placement_rule'], 'default-placement')
self.assertEqual(data['versioning'], 'Suspended')
# Update bucket: different user with different tenant, enable versioning.
self._put(
'/api/rgw/bucket/{}'.format(
parse.quote_plus('testx/teuth-test-bucket')),
params={
'bucket_id': data['id'],
'uid': 'testx2$teuth-test-user2',
'versioning_state': 'Enabled'
})
data = _verify_tenant_bucket('teuth-test-bucket', 'testx2', 'teuth-test-user2')
self.assertEqual(data['versioning'], 'Enabled')
# Change owner to a non-tenanted user
self._put(
'/api/rgw/bucket/{}'.format(
parse.quote_plus('testx2/teuth-test-bucket')),
params={
'bucket_id': data['id'],
'uid': 'admin'
})
self.assertStatus(200)
data = self._get('/api/rgw/bucket/teuth-test-bucket')
self.assertStatus(200)
self.assertIn('owner', data)
self.assertEqual(data['owner'], 'admin')
self.assertEqual(data['tenant'], '')
self.assertEqual(data['bucket'], 'teuth-test-bucket')
self.assertEqual(data['bid'], 'teuth-test-bucket')
self.assertEqual(data['versioning'], 'Enabled')
# Change owner back to tenanted user, suspend versioning.
self._put(
'/api/rgw/bucket/teuth-test-bucket',
params={
'bucket_id': data['id'],
'uid': 'testx$teuth-test-user',
'versioning_state': 'Suspended'
})
self.assertStatus(200)
data = _verify_tenant_bucket('teuth-test-bucket', 'testx', 'teuth-test-user')
self.assertEqual(data['versioning'], 'Suspended')
# Delete the bucket.
self._delete('/api/rgw/bucket/{}'.format(
parse.quote_plus('testx/teuth-test-bucket')))
self.assertStatus(204)
data = self._get('/api/rgw/bucket', version='1.1')
self.assertStatus(200)
self.assertEqual(len(data), 0)
def test_crud_w_locking(self):
# Create
self._post('/api/rgw/bucket',
params={
'bucket': 'teuth-test-bucket',
'uid': 'teuth-test-user',
'zonegroup': 'default',
'placement_target': 'default-placement',
'lock_enabled': 'true',
'lock_mode': 'GOVERNANCE',
'lock_retention_period_days': '0',
'lock_retention_period_years': '1'
})
self.assertStatus(201)
# Read
data = self._get('/api/rgw/bucket/teuth-test-bucket')
self.assertStatus(200)
self.assertSchema(
data,
JObj(sub_elems={
'lock_enabled': JLeaf(bool),
'lock_mode': JLeaf(str),
'lock_retention_period_days': JLeaf(int),
'lock_retention_period_years': JLeaf(int)
},
allow_unknown=True))
self.assertTrue(data['lock_enabled'])
self.assertEqual(data['lock_mode'], 'GOVERNANCE')
self.assertEqual(data['lock_retention_period_days'], 0)
self.assertEqual(data['lock_retention_period_years'], 1)
# Update
self._put('/api/rgw/bucket/teuth-test-bucket',
params={
'bucket_id': data['id'],
'uid': 'teuth-test-user',
'lock_mode': 'COMPLIANCE',
'lock_retention_period_days': '15',
'lock_retention_period_years': '0'
})
self.assertStatus(200)
data = self._get('/api/rgw/bucket/teuth-test-bucket')
self.assertTrue(data['lock_enabled'])
self.assertEqual(data['lock_mode'], 'COMPLIANCE')
self.assertEqual(data['lock_retention_period_days'], 15)
self.assertEqual(data['lock_retention_period_years'], 0)
self.assertStatus(200)
# Update: Disabling bucket versioning should fail if object locking enabled
self._put('/api/rgw/bucket/teuth-test-bucket',
params={
'bucket_id': data['id'],
'uid': 'teuth-test-user',
'versioning_state': 'Suspended'
})
self.assertStatus(409)
# Delete
self._delete('/api/rgw/bucket/teuth-test-bucket')
self.assertStatus(204)
class RgwDaemonTest(RgwTestCase):
AUTH_ROLES = ['rgw-manager']
@DashboardTestCase.RunAs('test', 'test', [{
'rgw': ['create', 'update', 'delete']
}])
def test_read_access_permissions(self):
self._get('/api/rgw/daemon')
self.assertStatus(403)
self._get('/api/rgw/daemon/id')
self.assertStatus(403)
def test_list(self):
data = self._get('/api/rgw/daemon')
self.assertStatus(200)
self.assertEqual(len(data), 1)
data = data[0]
self.assertIn('id', data)
self.assertIn('version', data)
self.assertIn('server_hostname', data)
self.assertIn('zonegroup_name', data)
self.assertIn('zone_name', data)
self.assertIn('port', data)
def test_get(self):
data = self._get('/api/rgw/daemon')
self.assertStatus(200)
data = self._get('/api/rgw/daemon/{}'.format(data[0]['id']))
self.assertStatus(200)
self.assertIn('rgw_metadata', data)
self.assertIn('rgw_id', data)
self.assertIn('rgw_status', data)
self.assertTrue(data['rgw_metadata'])
def test_status(self):
data = self._get('/ui-api/rgw/status')
self.assertStatus(200)
self.assertIn('available', data)
self.assertIn('message', data)
self.assertTrue(data['available'])
class RgwUserTest(RgwTestCase):
AUTH_ROLES = ['rgw-manager']
@classmethod
def setUpClass(cls):
super(RgwUserTest, cls).setUpClass()
def _assert_user_data(self, data):
self.assertSchema(data, JObj(sub_elems={
'caps': JList(JObj(sub_elems={}, allow_unknown=True)),
'display_name': JLeaf(str),
'email': JLeaf(str),
'keys': JList(JObj(sub_elems={}, allow_unknown=True)),
'max_buckets': JLeaf(int),
'subusers': JList(JLeaf(str)),
'suspended': JLeaf(int),
'swift_keys': JList(JObj(sub_elems={}, allow_unknown=True)),
'tenant': JLeaf(str),
'user_id': JLeaf(str),
'uid': JLeaf(str)
}, allow_unknown=True))
self.assertGreaterEqual(len(data['keys']), 1)
def test_get(self):
data = self.get_rgw_user('admin')
self.assertStatus(200)
self._assert_user_data(data)
self.assertEqual(data['user_id'], 'admin')
self.assertTrue(data['stats'])
self.assertIsInstance(data['stats'], dict)
# Test without stats.
data = self.get_rgw_user('admin', False)
self.assertStatus(200)
self._assert_user_data(data)
self.assertEqual(data['user_id'], 'admin')
def test_list(self):
data = self._get('/api/rgw/user')
self.assertStatus(200)
self.assertGreaterEqual(len(data), 1)
self.assertIn('admin', data)
def test_get_emails(self):
data = self._get('/api/rgw/user/get_emails')
self.assertStatus(200)
self.assertSchema(data, JList(str))
def test_create_get_update_delete(self):
# Create a new user.
self._post('/api/rgw/user', params={
'uid': 'teuth-test-user',
'display_name': 'display name'
})
self.assertStatus(201)
data = self.jsonBody()
self._assert_user_data(data)
self.assertEqual(data['user_id'], 'teuth-test-user')
self.assertEqual(data['display_name'], 'display name')
# Get the user.
data = self.get_rgw_user('teuth-test-user')
self.assertStatus(200)
self._assert_user_data(data)
self.assertEqual(data['tenant'], '')
self.assertEqual(data['user_id'], 'teuth-test-user')
self.assertEqual(data['uid'], 'teuth-test-user')
# Update the user.
self._put(
'/api/rgw/user/teuth-test-user',
params={'display_name': 'new name'})
self.assertStatus(200)
data = self.jsonBody()
self._assert_user_data(data)
self.assertEqual(data['display_name'], 'new name')
# Delete the user.
self._delete('/api/rgw/user/teuth-test-user')
self.assertStatus(204)
self.get_rgw_user('teuth-test-user')
self.assertStatus(500)
resp = self.jsonBody()
self.assertIn('detail', resp)
self.assertIn('failed request with status code 404', resp['detail'])
self.assertIn('"Code":"NoSuchUser"', resp['detail'])
self.assertIn('"HostId"', resp['detail'])
self.assertIn('"RequestId"', resp['detail'])
def test_create_get_update_delete_w_tenant(self):
# Create a new user.
self._post(
'/api/rgw/user',
params={
'uid': 'test01$teuth-test-user',
'display_name': 'display name'
})
self.assertStatus(201)
data = self.jsonBody()
self._assert_user_data(data)
self.assertEqual(data['user_id'], 'teuth-test-user')
self.assertEqual(data['display_name'], 'display name')
# Get the user.
data = self.get_rgw_user('test01$teuth-test-user')
self.assertStatus(200)
self._assert_user_data(data)
self.assertEqual(data['tenant'], 'test01')
self.assertEqual(data['user_id'], 'teuth-test-user')
self.assertEqual(data['uid'], 'test01$teuth-test-user')
# Update the user.
self._put(
'/api/rgw/user/test01$teuth-test-user',
params={'display_name': 'new name'})
self.assertStatus(200)
data = self.jsonBody()
self._assert_user_data(data)
self.assertEqual(data['display_name'], 'new name')
# Delete the user.
self._delete('/api/rgw/user/test01$teuth-test-user')
self.assertStatus(204)
self.get_rgw_user('test01$teuth-test-user')
self.assertStatus(500)
resp = self.jsonBody()
self.assertIn('detail', resp)
self.assertIn('failed request with status code 404', resp['detail'])
self.assertIn('"Code":"NoSuchUser"', resp['detail'])
self.assertIn('"HostId"', resp['detail'])
self.assertIn('"RequestId"', resp['detail'])
class RgwUserCapabilityTest(RgwTestCase):
AUTH_ROLES = ['rgw-manager']
@classmethod
def setUpClass(cls):
cls.create_test_user = True
super(RgwUserCapabilityTest, cls).setUpClass()
def test_set(self):
self._post(
'/api/rgw/user/teuth-test-user/capability',
params={
'type': 'usage',
'perm': 'read'
})
self.assertStatus(201)
data = self.jsonBody()
self.assertEqual(len(data), 1)
data = data[0]
self.assertEqual(data['type'], 'usage')
self.assertEqual(data['perm'], 'read')
# Get the user data to validate the capabilities.
data = self.get_rgw_user('teuth-test-user')
self.assertStatus(200)
self.assertGreaterEqual(len(data['caps']), 1)
self.assertEqual(data['caps'][0]['type'], 'usage')
self.assertEqual(data['caps'][0]['perm'], 'read')
def test_delete(self):
self._delete(
'/api/rgw/user/teuth-test-user/capability',
params={
'type': 'metadata',
'perm': 'write'
})
self.assertStatus(204)
# Get the user data to validate the capabilities.
data = self.get_rgw_user('teuth-test-user')
self.assertStatus(200)
self.assertEqual(len(data['caps']), 0)
class RgwUserKeyTest(RgwTestCase):
AUTH_ROLES = ['rgw-manager']
@classmethod
def setUpClass(cls):
cls.create_test_user = True
super(RgwUserKeyTest, cls).setUpClass()
def test_create_s3(self):
self._post(
'/api/rgw/user/teuth-test-user/key',
params={
'key_type': 's3',
'generate_key': 'false',
'access_key': 'abc987',
'secret_key': 'aaabbbccc'
})
data = self.jsonBody()
self.assertStatus(201)
self.assertGreaterEqual(len(data), 3)
key = self.find_object_in_list('access_key', 'abc987', data)
self.assertIsInstance(key, object)
self.assertEqual(key['secret_key'], 'aaabbbccc')
def test_create_swift(self):
self._post(
'/api/rgw/user/teuth-test-user/key',
params={
'key_type': 'swift',
'subuser': 'teuth-test-subuser',
'generate_key': 'false',
'secret_key': 'xxxyyyzzz'
})
data = self.jsonBody()
self.assertStatus(201)
self.assertGreaterEqual(len(data), 2)
key = self.find_object_in_list('secret_key', 'xxxyyyzzz', data)
self.assertIsInstance(key, object)
def test_delete_s3(self):
self._delete(
'/api/rgw/user/teuth-test-user/key',
params={
'key_type': 's3',
'access_key': 'xyz123'
})
self.assertStatus(204)
def test_delete_swift(self):
self._delete(
'/api/rgw/user/teuth-test-user/key',
params={
'key_type': 'swift',
'subuser': 'teuth-test-user:teuth-test-subuser2'
})
self.assertStatus(204)
class RgwUserQuotaTest(RgwTestCase):
AUTH_ROLES = ['rgw-manager']
@classmethod
def setUpClass(cls):
cls.create_test_user = True
super(RgwUserQuotaTest, cls).setUpClass()
def _assert_quota(self, data):
self.assertIn('user_quota', data)
self.assertIn('max_objects', data['user_quota'])
self.assertIn('enabled', data['user_quota'])
self.assertIn('max_size_kb', data['user_quota'])
self.assertIn('max_size', data['user_quota'])
self.assertIn('bucket_quota', data)
self.assertIn('max_objects', data['bucket_quota'])
self.assertIn('enabled', data['bucket_quota'])
self.assertIn('max_size_kb', data['bucket_quota'])
self.assertIn('max_size', data['bucket_quota'])
def test_get_quota(self):
data = self._get('/api/rgw/user/teuth-test-user/quota')
self.assertStatus(200)
self._assert_quota(data)
def test_set_user_quota(self):
self._put(
'/api/rgw/user/teuth-test-user/quota',
params={
'quota_type': 'user',
'enabled': 'true',
'max_size_kb': 2048,
'max_objects': 101
})
self.assertStatus(200)
data = self._get('/api/rgw/user/teuth-test-user/quota')
self.assertStatus(200)
self._assert_quota(data)
self.assertEqual(data['user_quota']['max_objects'], 101)
self.assertTrue(data['user_quota']['enabled'])
self.assertEqual(data['user_quota']['max_size_kb'], 2048)
def test_set_bucket_quota(self):
self._put(
'/api/rgw/user/teuth-test-user/quota',
params={
'quota_type': 'bucket',
'enabled': 'false',
'max_size_kb': 4096,
'max_objects': 2000
})
self.assertStatus(200)
data = self._get('/api/rgw/user/teuth-test-user/quota')
self.assertStatus(200)
self._assert_quota(data)
self.assertEqual(data['bucket_quota']['max_objects'], 2000)
self.assertFalse(data['bucket_quota']['enabled'])
self.assertEqual(data['bucket_quota']['max_size_kb'], 4096)
class RgwUserSubuserTest(RgwTestCase):
AUTH_ROLES = ['rgw-manager']
@classmethod
def setUpClass(cls):
cls.create_test_user = True
super(RgwUserSubuserTest, cls).setUpClass()
def test_create_swift(self):
self._post(
'/api/rgw/user/teuth-test-user/subuser',
params={
'subuser': 'tux',
'access': 'readwrite',
'key_type': 'swift'
})
self.assertStatus(200)
data = self.jsonBody()
subuser = self.find_object_in_list('id', 'teuth-test-user:tux', data)
self.assertIsInstance(subuser, object)
self.assertEqual(subuser['permissions'], 'read-write')
# Get the user data to validate the keys.
data = self.get_rgw_user('teuth-test-user')
self.assertStatus(200)
key = self.find_object_in_list('user', 'teuth-test-user:tux',
data['swift_keys'])
self.assertIsInstance(key, object)
def test_create_s3(self):
self._post(
'/api/rgw/user/teuth-test-user/subuser',
params={
'subuser': 'hugo',
'access': 'write',
'generate_secret': 'false',
'access_key': 'yyy',
'secret_key': 'xxx'
})
self.assertStatus(200)
data = self.jsonBody()
subuser = self.find_object_in_list('id', 'teuth-test-user:hugo', data)
self.assertIsInstance(subuser, object)
self.assertEqual(subuser['permissions'], 'write')
# Get the user data to validate the keys.
data = self.get_rgw_user('teuth-test-user')
self.assertStatus(200)
key = self.find_object_in_list('user', 'teuth-test-user:hugo',
data['keys'])
self.assertIsInstance(key, object)
self.assertEqual(key['secret_key'], 'xxx')
def test_delete_w_purge(self):
self._delete(
'/api/rgw/user/teuth-test-user/subuser/teuth-test-subuser2')
self.assertStatus(204)
# Get the user data to check that the keys don't exist anymore.
data = self.get_rgw_user('teuth-test-user')
self.assertStatus(200)
key = self.find_object_in_list(
'user', 'teuth-test-user:teuth-test-subuser2', data['swift_keys'])
self.assertIsNone(key)
def test_delete_wo_purge(self):
self._delete(
'/api/rgw/user/teuth-test-user/subuser/teuth-test-subuser',
params={'purge_keys': 'false'})
self.assertStatus(204)
# Get the user data to check whether they keys still exist.
data = self.get_rgw_user('teuth-test-user')
self.assertStatus(200)
key = self.find_object_in_list(
'user', 'teuth-test-user:teuth-test-subuser', data['keys'])
self.assertIsInstance(key, object)
| 31,731 | 35.515535 | 96 |
py
|
null |
ceph-main/qa/tasks/mgr/dashboard/test_role.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .helper import DashboardTestCase
class RoleTest(DashboardTestCase):
@classmethod
def _create_role(cls, name=None, description=None, scopes_permissions=None):
data = {}
if name:
data['name'] = name
if description:
data['description'] = description
if scopes_permissions:
data['scopes_permissions'] = scopes_permissions
cls._post('/api/role', data)
def test_crud_role(self):
self._create_role(name='role1',
description='Description 1',
scopes_permissions={'osd': ['read']})
self.assertStatus(201)
self.assertJsonBody({
'name': 'role1',
'description': 'Description 1',
'scopes_permissions': {'osd': ['read']},
'system': False
})
self._get('/api/role/role1')
self.assertStatus(200)
self.assertJsonBody({
'name': 'role1',
'description': 'Description 1',
'scopes_permissions': {'osd': ['read']},
'system': False
})
self._put('/api/role/role1', {
'description': 'Description 2',
'scopes_permissions': {'osd': ['read', 'update']},
})
self.assertStatus(200)
self.assertJsonBody({
'name': 'role1',
'description': 'Description 2',
'scopes_permissions': {'osd': ['read', 'update']},
'system': False
})
self._delete('/api/role/role1')
self.assertStatus(204)
def test_list_roles(self):
roles = self._get('/api/role')
self.assertStatus(200)
self.assertGreaterEqual(len(roles), 1)
for role in roles:
self.assertIn('name', role)
self.assertIn('description', role)
self.assertIn('scopes_permissions', role)
self.assertIn('system', role)
def test_get_role_does_not_exist(self):
self._get('/api/role/role2')
self.assertStatus(404)
def test_create_role_already_exists(self):
self._create_role(name='read-only',
description='Description 1',
scopes_permissions={'osd': ['read']})
self.assertStatus(400)
self.assertError(code='role_already_exists',
component='role')
def test_create_role_no_name(self):
self._create_role(description='Description 1',
scopes_permissions={'osd': ['read']})
self.assertStatus(400)
self.assertError(code='name_required',
component='role')
def test_create_role_invalid_scope(self):
self._create_role(name='role1',
description='Description 1',
scopes_permissions={'invalid-scope': ['read']})
self.assertStatus(400)
self.assertError(code='invalid_scope',
component='role')
def test_create_role_invalid_permission(self):
self._create_role(name='role1',
description='Description 1',
scopes_permissions={'osd': ['invalid-permission']})
self.assertStatus(400)
self.assertError(code='invalid_permission',
component='role')
def test_delete_role_does_not_exist(self):
self._delete('/api/role/role2')
self.assertStatus(404)
def test_delete_system_role(self):
self._delete('/api/role/read-only')
self.assertStatus(400)
self.assertError(code='cannot_delete_system_role',
component='role')
def test_delete_role_associated_with_user(self):
self.create_user("user", "user", ['read-only'])
self._create_role(name='role1',
description='Description 1',
scopes_permissions={'user': ['create', 'read', 'update', 'delete']})
self.assertStatus(201)
self._put('/api/user/user', {'roles': ['role1']})
self.assertStatus(200)
self._delete('/api/role/role1')
self.assertStatus(400)
self.assertError(code='role_is_associated_with_user',
component='role')
self._put('/api/user/user', {'roles': ['administrator']})
self.assertStatus(200)
self._delete('/api/role/role1')
self.assertStatus(204)
self.delete_user("user")
def test_update_role_does_not_exist(self):
self._put('/api/role/role2', {})
self.assertStatus(404)
def test_update_system_role(self):
self._put('/api/role/read-only', {})
self.assertStatus(400)
self.assertError(code='cannot_update_system_role',
component='role')
def test_clone_role(self):
self._post('/api/role/read-only/clone', {'new_name': 'foo'})
self.assertStatus(201)
self._delete('/api/role/foo')
| 5,072 | 33.746575 | 94 |
py
|
null |
ceph-main/qa/tasks/mgr/dashboard/test_settings.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .helper import DashboardTestCase, JAny, JList, JObj
class SettingsTest(DashboardTestCase):
def setUp(self):
super(SettingsTest, self).setUp()
self.settings = self._get('/api/settings')
def tearDown(self):
self._put(
'/api/settings',
{setting['name']: setting['value']
for setting in self.settings})
def test_list_settings(self):
settings = self._get('/api/settings')
self.assertGreater(len(settings), 10)
self.assertSchema(
settings,
JList(
JObj({
'default': JAny(none=False),
'name': str,
'type': str,
'value': JAny(none=False)
})))
self.assertStatus(200)
def test_get_setting(self):
setting = self._get('/api/settings/rgw-api-access-key')
self.assertSchema(
setting,
JObj({
'default': JAny(none=False),
'name': str,
'type': str,
'value': JAny(none=False)
}))
self.assertStatus(200)
def test_set_setting(self):
self._put('/api/settings/rgw-api-access-key', {'value': 'foo'})
self.assertStatus(200)
value = self._get('/api/settings/rgw-api-access-key')['value']
self.assertEqual('foo', value)
def test_bulk_set(self):
self._put('/api/settings', {
'RGW_API_ACCESS_KEY': 'dummy-key',
'RGW_API_SECRET_KEY': 'dummy-secret',
})
self.assertStatus(200)
access_key = self._get('/api/settings/rgw-api-access-key')['value']
self.assertStatus(200)
self.assertEqual('dummy-key', access_key)
secret_key = self._get('/api/settings/rgw-api-secret-key')['value']
self.assertStatus(200)
self.assertEqual('dummy-secret', secret_key)
| 2,000 | 29.318182 | 75 |
py
|
null |
ceph-main/qa/tasks/mgr/dashboard/test_summary.py
|
from __future__ import absolute_import
from .helper import DashboardTestCase
class SummaryTest(DashboardTestCase):
CEPHFS = True
def test_summary(self):
data = self._get("/api/summary")
self.assertStatus(200)
self.assertIn('health_status', data)
self.assertIn('mgr_id', data)
self.assertIn('have_mon_connection', data)
self.assertIn('rbd_mirroring', data)
self.assertIn('executing_tasks', data)
self.assertIn('finished_tasks', data)
self.assertIn('version', data)
self.assertIsNotNone(data['health_status'])
self.assertIsNotNone(data['mgr_id'])
self.assertIsNotNone(data['have_mon_connection'])
self.assertEqual(data['rbd_mirroring'], {'errors': 0, 'warnings': 0})
@DashboardTestCase.RunAs('test', 'test', ['pool-manager'])
def test_summary_permissions(self):
data = self._get("/api/summary")
self.assertStatus(200)
self.assertIn('health_status', data)
self.assertIn('mgr_id', data)
self.assertIn('have_mon_connection', data)
self.assertNotIn('rbd_mirroring', data)
self.assertIn('executing_tasks', data)
self.assertIn('finished_tasks', data)
self.assertIn('version', data)
self.assertIsNotNone(data['health_status'])
self.assertIsNotNone(data['mgr_id'])
self.assertIsNotNone(data['have_mon_connection'])
| 1,428 | 34.725 | 77 |
py
|
null |
ceph-main/qa/tasks/mgr/dashboard/test_telemetry.py
|
from .helper import DashboardTestCase, JObj
class TelemetryTest(DashboardTestCase):
pre_enabled_status = True
@classmethod
def setUpClass(cls):
super(TelemetryTest, cls).setUpClass()
data = cls._get('/api/mgr/module/telemetry')
cls.pre_enabled_status = data['enabled']
# identify ourselves so we can filter these reports out on the server side
cls._put(
'/api/settings',
{
'mgr/telemetry/channel_ident': True,
'mgr/telemetry/organization': 'ceph-qa',
}
)
@classmethod
def tearDownClass(cls):
if cls.pre_enabled_status:
cls._enable_module()
else:
cls._disable_module()
super(TelemetryTest, cls).tearDownClass()
def test_disable_module(self):
self._enable_module()
self._check_telemetry_enabled(True)
self._disable_module()
self._check_telemetry_enabled(False)
def test_enable_module_correct_license(self):
self._disable_module()
self._check_telemetry_enabled(False)
self._put('/api/telemetry', {
'enable': True,
'license_name': 'sharing-1-0'
})
self.assertStatus(200)
self._check_telemetry_enabled(True)
def test_enable_module_empty_license(self):
self._disable_module()
self._check_telemetry_enabled(False)
self._put('/api/telemetry', {
'enable': True,
'license_name': ''
})
self.assertStatus(400)
self.assertError(code='telemetry_enable_license_missing')
self._check_telemetry_enabled(False)
def test_enable_module_invalid_license(self):
self._disable_module()
self._check_telemetry_enabled(False)
self._put('/api/telemetry', {
'enable': True,
'license_name': 'invalid-license'
})
self.assertStatus(400)
self.assertError(code='telemetry_enable_license_missing')
self._check_telemetry_enabled(False)
def test_get_report(self):
self._enable_module()
data = self._get('/api/telemetry/report')
self.assertStatus(200)
schema = JObj({
'report': JObj({}, allow_unknown=True),
'device_report': JObj({}, allow_unknown=True)
})
self.assertSchema(data, schema)
@classmethod
def _enable_module(cls):
cls._put('/api/telemetry', {
'enable': True,
'license_name': 'sharing-1-0'
})
@classmethod
def _disable_module(cls):
cls._put('/api/telemetry', {
'enable': False
})
def _check_telemetry_enabled(self, enabled):
data = self._get('/api/mgr/module/telemetry')
self.assertStatus(200)
self.assertEqual(data['enabled'], enabled)
| 2,880 | 28.10101 | 82 |
py
|
null |
ceph-main/qa/tasks/mgr/dashboard/test_user.py
|
# -*- coding: utf-8 -*-
# pylint: disable=too-many-public-methods
from __future__ import absolute_import
import time
from datetime import datetime, timedelta
from .helper import DashboardTestCase
class UserTest(DashboardTestCase):
@classmethod
def setUpClass(cls):
super(UserTest, cls).setUpClass()
cls._ceph_cmd(['dashboard', 'set-pwd-policy-enabled', 'true'])
cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-length-enabled', 'true'])
cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-oldpwd-enabled', 'true'])
cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-username-enabled', 'true'])
cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-exclusion-list-enabled', 'true'])
cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-complexity-enabled', 'true'])
cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-sequential-chars-enabled', 'true'])
cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-repetitive-chars-enabled', 'true'])
@classmethod
def tearDownClass(cls):
cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-username-enabled', 'false'])
cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-exclusion-list-enabled', 'false'])
cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-complexity-enabled', 'false'])
cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-sequential-chars-enabled', 'false'])
cls._ceph_cmd(['dashboard', 'set-pwd-policy-check-repetitive-chars-enabled', 'false'])
super(UserTest, cls).tearDownClass()
@classmethod
def _create_user(cls, username=None, password=None, name=None, email=None, roles=None,
enabled=True, pwd_expiration_date=None, pwd_update_required=False):
data = {}
if username:
data['username'] = username
if password:
data['password'] = password
if name:
data['name'] = name
if email:
data['email'] = email
if roles:
data['roles'] = roles
if pwd_expiration_date:
data['pwdExpirationDate'] = pwd_expiration_date
data['pwdUpdateRequired'] = pwd_update_required
data['enabled'] = enabled
cls._post("/api/user", data)
@classmethod
def _reset_login_to_admin(cls, username=None):
cls.logout()
if username:
cls.delete_user(username)
cls.login('admin', 'admin')
def test_crud_user(self):
self._create_user(username='user1',
password='mypassword10#',
name='My Name',
email='[email protected]',
roles=['administrator'])
self.assertStatus(201)
user = self.jsonBody()
self._get('/api/user/user1')
self.assertStatus(200)
self.assertJsonBody({
'username': 'user1',
'name': 'My Name',
'email': '[email protected]',
'roles': ['administrator'],
'lastUpdate': user['lastUpdate'],
'enabled': True,
'pwdExpirationDate': None,
'pwdUpdateRequired': False
})
self._put('/api/user/user1', {
'name': 'My New Name',
'email': '[email protected]',
'roles': ['block-manager'],
})
self.assertStatus(200)
user = self.jsonBody()
self.assertJsonBody({
'username': 'user1',
'name': 'My New Name',
'email': '[email protected]',
'roles': ['block-manager'],
'lastUpdate': user['lastUpdate'],
'enabled': True,
'pwdExpirationDate': None,
'pwdUpdateRequired': False
})
self._delete('/api/user/user1')
self.assertStatus(204)
def test_crd_disabled_user(self):
self._create_user(username='klara',
password='mypassword10#',
name='Klara Musterfrau',
email='[email protected]',
roles=['administrator'],
enabled=False)
self.assertStatus(201)
user = self.jsonBody()
# Restart dashboard module.
self._unload_module('dashboard')
self._load_module('dashboard')
time.sleep(10)
self._get('/api/user/klara')
self.assertStatus(200)
self.assertJsonBody({
'username': 'klara',
'name': 'Klara Musterfrau',
'email': '[email protected]',
'roles': ['administrator'],
'lastUpdate': user['lastUpdate'],
'enabled': False,
'pwdExpirationDate': None,
'pwdUpdateRequired': False
})
self._delete('/api/user/klara')
self.assertStatus(204)
def test_list_users(self):
self._get('/api/user')
self.assertStatus(200)
user = self.jsonBody()
self.assertEqual(len(user), 1)
user = user[0]
self.assertJsonBody([{
'username': 'admin',
'name': None,
'email': None,
'roles': ['administrator'],
'lastUpdate': user['lastUpdate'],
'enabled': True,
'pwdExpirationDate': None,
'pwdUpdateRequired': False
}])
def test_create_user_already_exists(self):
self._create_user(username='admin',
password='mypassword10#',
name='administrator',
email='[email protected]',
roles=['administrator'])
self.assertStatus(400)
self.assertError(code='username_already_exists',
component='user')
def test_create_user_invalid_role(self):
self._create_user(username='user1',
password='mypassword10#',
name='My Name',
email='[email protected]',
roles=['invalid-role'])
self.assertStatus(400)
self.assertError(code='role_does_not_exist',
component='user')
def test_create_user_invalid_chars_in_name(self):
self._create_user(username='userö',
password='mypassword10#',
name='administrator',
email='[email protected]',
roles=['administrator'])
self.assertStatus(400)
self.assertError(code='ceph_type_not_valid',
component='user')
def test_delete_user_does_not_exist(self):
self._delete('/api/user/user2')
self.assertStatus(404)
@DashboardTestCase.RunAs('test', 'test', [{'user': ['create', 'read', 'update', 'delete']}])
def test_delete_current_user(self):
self._delete('/api/user/test')
self.assertStatus(400)
self.assertError(code='cannot_delete_current_user',
component='user')
@DashboardTestCase.RunAs('test', 'test', [{'user': ['create', 'read', 'update', 'delete']}])
def test_disable_current_user(self):
self._put('/api/user/test', {'enabled': False})
self.assertStatus(400)
self.assertError(code='cannot_disable_current_user',
component='user')
def test_update_user_does_not_exist(self):
self._put('/api/user/user2', {'name': 'My New Name'})
self.assertStatus(404)
def test_update_user_invalid_role(self):
self._put('/api/user/admin', {'roles': ['invalid-role']})
self.assertStatus(400)
self.assertError(code='role_does_not_exist',
component='user')
def test_change_password_from_other_user(self):
self._post('/api/user/test2/change_password', {
'old_password': 'abc',
'new_password': 'xyz'
})
self.assertStatus(400)
self.assertError(code='invalid_user_context', component='user')
def test_change_password_old_not_match(self):
self._post('/api/user/admin/change_password', {
'old_password': 'foo',
'new_password': 'bar'
})
self.assertStatus(400)
self.assertError(code='invalid_old_password', component='user')
def test_change_password_as_old_password(self):
self.create_user('test1', 'mypassword10#', ['read-only'], force_password=False)
self.login('test1', 'mypassword10#')
self._post('/api/user/test1/change_password', {
'old_password': 'mypassword10#',
'new_password': 'mypassword10#'
})
self.assertStatus(400)
self.assertError('password_policy_validation_failed', 'user',
'Password must not be the same as the previous one.')
self._reset_login_to_admin('test1')
def test_change_password_contains_username(self):
self.create_user('test1', 'mypassword10#', ['read-only'], force_password=False)
self.login('test1', 'mypassword10#')
self._post('/api/user/test1/change_password', {
'old_password': 'mypassword10#',
'new_password': 'mypasstest1@#'
})
self.assertStatus(400)
self.assertError('password_policy_validation_failed', 'user',
'Password must not contain username.')
self._reset_login_to_admin('test1')
def test_change_password_contains_forbidden_words(self):
self.create_user('test1', 'mypassword10#', ['read-only'], force_password=False)
self.login('test1', 'mypassword10#')
self._post('/api/user/test1/change_password', {
'old_password': 'mypassword10#',
'new_password': 'mypassOSD01'
})
self.assertStatus(400)
self.assertError('password_policy_validation_failed', 'user',
'Password must not contain the keyword "OSD".')
self._reset_login_to_admin('test1')
def test_change_password_contains_sequential_characters(self):
self.create_user('test1', 'mypassword10#', ['read-only'], force_password=False)
self.login('test1', 'mypassword10#')
self._post('/api/user/test1/change_password', {
'old_password': 'mypassword10#',
'new_password': 'mypass123456!@$'
})
self.assertStatus(400)
self.assertError('password_policy_validation_failed', 'user',
'Password must not contain sequential characters.')
self._reset_login_to_admin('test1')
def test_change_password_contains_repetetive_characters(self):
self.create_user('test1', 'mypassword10#', ['read-only'], force_password=False)
self.login('test1', 'mypassword10#')
self._post('/api/user/test1/change_password', {
'old_password': 'mypassword10#',
'new_password': 'aaaaA1@!#'
})
self.assertStatus(400)
self.assertError('password_policy_validation_failed', 'user',
'Password must not contain repetitive characters.')
self._reset_login_to_admin('test1')
@DashboardTestCase.RunAs('test1', 'mypassword10#', ['read-only'], False)
def test_change_password(self):
self._post('/api/user/test1/change_password', {
'old_password': 'mypassword10#',
'new_password': 'newpassword01#'
})
self.assertStatus(200)
self.logout()
self._post('/api/auth', {'username': 'test1', 'password': 'mypassword10#'})
self.assertStatus(400)
self.assertError(code='invalid_credentials', component='auth')
def test_create_user_password_cli(self):
exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-create',
'test1'],
'mypassword10#',
return_exit_code=True)
self.assertEqual(exitcode, 0)
self.delete_user('test1')
@DashboardTestCase.RunAs('test2', 'foo_bar_10#', force_password=False, login=False)
def test_change_user_password_cli(self):
exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password',
'test2'],
'foo_new-password01#',
return_exit_code=True)
self.assertEqual(exitcode, 0)
def test_create_user_password_force_cli(self):
exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-create',
'--force-password', 'test11'],
'bar',
return_exit_code=True)
self.assertEqual(exitcode, 0)
self.delete_user('test11')
@DashboardTestCase.RunAs('test22', 'foo_bar_10#', force_password=False, login=False)
def test_change_user_password_force_cli(self):
exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password',
'--force-password', 'test22'],
'bar',
return_exit_code=True)
self.assertEqual(exitcode, 0)
def test_create_user_password_cli_fail(self):
exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-create',
'test3'],
'foo',
return_exit_code=True)
self.assertNotEqual(exitcode, 0)
@DashboardTestCase.RunAs('test4', 'x1z_tst+_10#', force_password=False, login=False)
def test_change_user_password_cli_fail(self):
exitcode = self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password',
'test4'],
'bar',
return_exit_code=True)
self.assertNotEqual(exitcode, 0)
def test_create_user_with_pwd_expiration_date(self):
future_date = datetime.utcnow() + timedelta(days=10)
future_date = int(time.mktime(future_date.timetuple()))
self._create_user(username='user1',
password='mypassword10#',
name='My Name',
email='[email protected]',
roles=['administrator'],
pwd_expiration_date=future_date)
self.assertStatus(201)
user = self.jsonBody()
self._get('/api/user/user1')
self.assertStatus(200)
self.assertJsonBody({
'username': 'user1',
'name': 'My Name',
'email': '[email protected]',
'roles': ['administrator'],
'lastUpdate': user['lastUpdate'],
'enabled': True,
'pwdExpirationDate': future_date,
'pwdUpdateRequired': False
})
self._delete('/api/user/user1')
def test_create_with_pwd_expiration_date_not_valid(self):
past_date = datetime.utcnow() - timedelta(days=10)
past_date = int(time.mktime(past_date.timetuple()))
self._create_user(username='user1',
password='mypassword10#',
name='My Name',
email='[email protected]',
roles=['administrator'],
pwd_expiration_date=past_date)
self.assertStatus(400)
self.assertError(code='pwd_past_expiration_date', component='user')
def test_create_with_default_expiration_date(self):
future_date_1 = datetime.utcnow() + timedelta(days=9)
future_date_1 = int(time.mktime(future_date_1.timetuple()))
future_date_2 = datetime.utcnow() + timedelta(days=11)
future_date_2 = int(time.mktime(future_date_2.timetuple()))
self._ceph_cmd(['dashboard', 'set-user-pwd-expiration-span', '10'])
self._create_user(username='user1',
password='mypassword10#',
name='My Name',
email='[email protected]',
roles=['administrator'])
self.assertStatus(201)
user = self._get('/api/user/user1')
self.assertStatus(200)
self.assertIsNotNone(user['pwdExpirationDate'])
self.assertGreater(user['pwdExpirationDate'], future_date_1)
self.assertLess(user['pwdExpirationDate'], future_date_2)
self._delete('/api/user/user1')
self._ceph_cmd(['dashboard', 'set-user-pwd-expiration-span', '0'])
def test_pwd_expiration_date_update(self):
self._ceph_cmd(['dashboard', 'set-user-pwd-expiration-span', '10'])
self.create_user('user1', 'mypassword10#', ['administrator'])
user_1 = self._get('/api/user/user1')
self.assertStatus(200)
# Let's wait 1 s to ensure pwd expiration date is not the same
time.sleep(1)
self.login('user1', 'mypassword10#')
self._post('/api/user/user1/change_password', {
'old_password': 'mypassword10#',
'new_password': 'newpassword01#'
})
self.assertStatus(200)
# Compare password expiration dates.
self._reset_login_to_admin()
user_1_pwd_changed = self._get('/api/user/user1')
self.assertStatus(200)
self.assertLess(user_1['pwdExpirationDate'], user_1_pwd_changed['pwdExpirationDate'])
# Cleanup
self.delete_user('user1')
self._ceph_cmd(['dashboard', 'set-user-pwd-expiration-span', '0'])
def test_pwd_update_required(self):
self._create_user(username='user1',
password='mypassword10#',
name='My Name',
email='[email protected]',
roles=['administrator'],
pwd_update_required=True)
self.assertStatus(201)
user_1 = self._get('/api/user/user1')
self.assertStatus(200)
self.assertEqual(user_1['pwdUpdateRequired'], True)
self.login('user1', 'mypassword10#')
self.assertStatus(201)
self._get('/api/osd')
self.assertStatus(403)
self._reset_login_to_admin('user1')
def test_pwd_update_required_change_pwd(self):
self._create_user(username='user1',
password='mypassword10#',
name='My Name',
email='[email protected]',
roles=['administrator'],
pwd_update_required=True)
self.assertStatus(201)
self.login('user1', 'mypassword10#')
self._post('/api/user/user1/change_password', {
'old_password': 'mypassword10#',
'new_password': 'newpassword01#'
})
self.login('user1', 'newpassword01#')
user_1 = self._get('/api/user/user1')
self.assertStatus(200)
self.assertEqual(user_1['pwdUpdateRequired'], False)
self._get('/api/osd')
self.assertStatus(200)
self._reset_login_to_admin('user1')
def test_validate_password_weak(self):
self._post('/api/user/validate_password', {
'password': 'mypassword1'
})
self.assertStatus(200)
self.assertJsonBody({
'valid': True,
'credits': 11,
'valuation': 'Weak'
})
def test_validate_password_ok(self):
self._post('/api/user/validate_password', {
'password': 'mypassword1!@'
})
self.assertStatus(200)
self.assertJsonBody({
'valid': True,
'credits': 17,
'valuation': 'OK'
})
def test_validate_password_strong(self):
self._post('/api/user/validate_password', {
'password': 'testpassword0047!@'
})
self.assertStatus(200)
self.assertJsonBody({
'valid': True,
'credits': 22,
'valuation': 'Strong'
})
def test_validate_password_very_strong(self):
self._post('/api/user/validate_password', {
'password': 'testpassword#!$!@$'
})
self.assertStatus(200)
self.assertJsonBody({
'valid': True,
'credits': 30,
'valuation': 'Very strong'
})
def test_validate_password_fail(self):
self._post('/api/user/validate_password', {
'password': 'foo'
})
self.assertStatus(200)
self.assertJsonBody({
'valid': False,
'credits': 0,
'valuation': 'Password is too weak.'
})
def test_validate_password_fail_name(self):
self._post('/api/user/validate_password', {
'password': 'x1zhugo_10',
'username': 'hugo'
})
self.assertStatus(200)
self.assertJsonBody({
'valid': False,
'credits': 0,
'valuation': 'Password must not contain username.'
})
def test_validate_password_fail_oldpwd(self):
self._post('/api/user/validate_password', {
'password': 'x1zt-st10',
'old_password': 'x1zt-st10'
})
self.assertStatus(200)
self.assertJsonBody({
'valid': False,
'credits': 0,
'valuation': 'Password must not be the same as the previous one.'
})
def test_create_user_pwd_update_required(self):
self.create_user('foo', 'bar', cmd_args=['--pwd_update_required'])
self._get('/api/user/foo')
self.assertStatus(200)
self.assertJsonSubset({
'username': 'foo',
'pwdUpdateRequired': True
})
self.delete_user('foo')
| 22,019 | 37.904594 | 96 |
py
|
null |
ceph-main/qa/tasks/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/qa/tasks/tests/conftest.py
|
import glob
def pytest_addoption(parser):
parser.addoption("--suite-dir", help="suite dir")
def pytest_generate_tests(metafunc):
if "yaml_file" in metafunc.fixturenames:
suite_dir = metafunc.config.getoption("--suite-dir")
files = glob.glob(f"{suite_dir}/**/*.yaml", recursive=True)
metafunc.parametrize("yaml_file", list(set(files)))
| 371 | 27.615385 | 67 |
py
|
null |
ceph-main/qa/tasks/tests/test_devstack.py
|
from textwrap import dedent
from tasks import devstack
class TestDevstack(object):
def test_parse_os_table(self):
table_str = dedent("""
+---------------------+--------------------------------------+
| Property | Value |
+---------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| created_at | 2014-02-21T17:14:47.548361 |
| display_description | None |
| display_name | NAME |
| id | ffdbd1bb-60dc-4d95-acfe-88774c09ad3e |
| metadata | {} |
| size | 1 |
| snapshot_id | None |
| source_volid | None |
| status | creating |
| volume_type | None |
+---------------------+--------------------------------------+
""").strip()
expected = {
'Property': 'Value',
'attachments': '[]',
'availability_zone': 'nova',
'bootable': 'false',
'created_at': '2014-02-21T17:14:47.548361',
'display_description': 'None',
'display_name': 'NAME',
'id': 'ffdbd1bb-60dc-4d95-acfe-88774c09ad3e',
'metadata': '{}',
'size': '1',
'snapshot_id': 'None',
'source_volid': 'None',
'status': 'creating',
'volume_type': 'None'}
vol_info = devstack.parse_os_table(table_str)
assert vol_info == expected
| 2,096 | 41.795918 | 74 |
py
|
null |
ceph-main/qa/tasks/tests/test_import_yaml.py
|
import yaml
def test_load_yaml(yaml_file):
yaml.safe_load(open(yaml_file))
| 81 | 12.666667 | 35 |
py
|
null |
ceph-main/qa/tasks/tests/test_radosgw_admin.py
|
from unittest.mock import Mock
from tasks import radosgw_admin
acl_with_version = b"""<?xml version="1.0" encoding="UTF-8"?><AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>foo</ID><DisplayName>Foo</DisplayName></Owner><AccessControlList><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>foo</ID><DisplayName>Foo</DisplayName></Grantee><Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>
""" # noqa
acl_without_version = b"""<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>foo</ID><DisplayName>Foo</DisplayName></Owner><AccessControlList><Grant><Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="CanonicalUser"><ID>foo</ID><DisplayName>Foo</DisplayName></Grantee><Permission>FULL_CONTROL</Permission></Grant></AccessControlList></AccessControlPolicy>
""" # noqa
class TestGetAcl(object):
def setup(self):
self.key = Mock()
def test_removes_xml_version(self):
self.key.get_xml_acl = Mock(return_value=acl_with_version)
result = radosgw_admin.get_acl(self.key)
assert result.startswith('<AccessControlPolicy')
def test_xml_version_is_already_removed(self):
self.key.get_xml_acl = Mock(return_value=acl_without_version)
result = radosgw_admin.get_acl(self.key)
assert result.startswith('<AccessControlPolicy')
def test_newline_gets_trimmed(self):
self.key.get_xml_acl = Mock(return_value=acl_without_version)
result = radosgw_admin.get_acl(self.key)
assert result.endswith('\n') is False
| 1,663 | 51 | 440 |
py
|
null |
ceph-main/qa/tasks/util/__init__.py
|
from teuthology import misc
def get_remote(ctx, cluster, service_type, service_id):
"""
Get the Remote for the host where a particular role runs.
:param cluster: name of the cluster the service is part of
:param service_type: e.g. 'mds', 'osd', 'client'
:param service_id: The third part of a role, e.g. '0' for
the role 'ceph.client.0'
:return: a Remote instance for the host where the
requested role is placed
"""
def _is_instance(role):
role_tuple = misc.split_role(role)
return role_tuple == (cluster, service_type, str(service_id))
try:
(remote,) = ctx.cluster.only(_is_instance).remotes.keys()
except ValueError:
raise KeyError("Service {0}.{1}.{2} not found".format(cluster,
service_type,
service_id))
return remote
def get_remote_for_role(ctx, role):
return get_remote(ctx, *misc.split_role(role))
| 1,049 | 37.888889 | 75 |
py
|
null |
ceph-main/qa/tasks/util/chacra.py
|
#!/usr/bin/env python3
import argparse
import logging
import requests
import sys
from pathlib import Path
from urllib.parse import urlparse
log = logging.getLogger(__name__)
SHAMAN_SEARCH_URL = 'https://shaman.ceph.com/api/search'
PROJECT = 'ceph'
DISTRO = 'ubuntu'
RELEASE = 'focal'
ARCH='x86_64'
BRANCH = 'main'
SHA1 = 'latest'
FLAVOR = 'default'
FILENAME = 'cephadm'
def search(*args, **kwargs):
'''
Query shaman for a build result
'''
resp = requests.get(SHAMAN_SEARCH_URL, params=kwargs)
resp.raise_for_status()
return resp
def _get_distros(distro, release, arch=None):
ret = f'{distro}/{release}'
if arch:
ret = f'{ret}/{arch}'
return ret
def _get_binary_url(host, project, ref, sha1, distro, release, arch, flavor, filename):
return f'https://{host}/binaries/{project}/{ref}/{sha1}/{distro}/{release}/{arch}/flavors/{flavor}/{filename}'
def get_binary_url(
filename,
project=None,
distro=None,
release=None,
arch=None,
flavor=None,
branch=None,
sha1=None
):
'''
Return the chacra url for a build result
'''
# query shaman for the built binary
s = {}
if project:
s['project'] = project
if distro:
s['distros'] = _get_distros(distro, release, arch)
if flavor:
s['flavor'] = flavor
if branch:
s['ref'] = branch
if sha1:
s['sha1'] = sha1
resp = search(**s)
result = resp.json()
if len(result) == 0:
raise RuntimeError(f'no results found at {resp.url}')
# TODO: filter the result down to the correct arch etc.?
result = result[0]
status = result['status']
if status != 'ready':
raise RuntimeError(f'cannot pull file with status: {status}')
# build the chacra url
chacra_host = urlparse(result['url']).netloc
chacra_ref = result['ref']
chacra_sha1 = result['sha1']
log.info(f'got chacra host {chacra_host}, ref {chacra_ref}, sha1 {chacra_sha1} from {resp.url}')
# prefer codename if a release is not specified
if result.get('distro_codename'):
release = result.get('distro_codename')
elif result.get('distro_version'):
release = result.get('distro_version')
elif not release:
raise RuntimeError('cannot determine distro release!')
if not arch:
if ARCH in result['archs']:
arch = ARCH
elif len(result['archs']) > 0:
arch = result['archs'][0]
else:
raise RuntimeError('cannot determine the arch type!')
# build the url to the binary
url = _get_binary_url(
chacra_host,
result['project'],
chacra_ref,
chacra_sha1,
result['distro'],
release,
arch,
result['flavor'],
filename,
)
return url
def pull(
filename,
project=None,
distro=None,
release=None,
arch=None,
flavor=None,
branch=None,
sha1=None
):
'''
Pull a build result from chacra
'''
url = get_binary_url(
filename,
project=project,
distro=distro,
release=release,
arch=arch,
flavor=flavor,
branch=branch,
sha1=sha1
)
resp = requests.get(url, stream=True)
resp.raise_for_status()
log.info(f'got file from {resp.url}')
return resp
def main():
handler = logging.StreamHandler(sys.stdout)
log.addHandler(handler)
log.setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--project', default=PROJECT)
parser.add_argument('--distro', default=DISTRO)
parser.add_argument('--release', default=RELEASE)
parser.add_argument('--arch', default=ARCH)
parser.add_argument('--branch', default=BRANCH)
parser.add_argument('--sha1', default=SHA1)
parser.add_argument('--flavor', default=FLAVOR)
parser.add_argument('--src', default=FILENAME)
parser.add_argument('--dest', default=FILENAME)
args = parser.parse_args()
resp = pull(
args.src,
project=args.project,
distro=args.distro,
release=args.release,
arch=args.arch,
flavor=args.flavor,
branch=args.branch,
sha1=args.sha1
)
dest = Path(args.dest).absolute()
with open(dest, 'wb') as f:
for chunk in resp.iter_content(chunk_size=None, decode_unicode=True):
log.info('.',)
f.write(chunk)
log.info(f'wrote binary file: {dest}')
return 0
if __name__ == '__main__':
sys.exit(main())
| 4,580 | 23.497326 | 114 |
py
|
null |
ceph-main/qa/tasks/util/rados.py
|
import logging
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def rados(ctx, remote, cmd, wait=True, check_status=False):
testdir = teuthology.get_testdir(ctx)
log.info("rados %s" % ' '.join(cmd))
pre = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rados',
];
pre.extend(cmd)
proc = remote.run(
args=pre,
check_status=check_status,
wait=wait,
)
if wait:
return proc.exitstatus
else:
return proc
def create_ec_pool(remote, name, profile_name, pgnum, profile={}, cluster_name="ceph", application=None):
remote.run(args=['sudo', 'ceph'] +
cmd_erasure_code_profile(profile_name, profile) + ['--cluster', cluster_name])
remote.run(args=[
'sudo', 'ceph', 'osd', 'pool', 'create', name,
str(pgnum), str(pgnum), 'erasure', profile_name, '--cluster', cluster_name
])
if application:
remote.run(args=[
'sudo', 'ceph', 'osd', 'pool', 'application', 'enable', name, application, '--cluster', cluster_name
], check_status=False) # may fail as EINVAL when run in jewel upgrade test
def create_replicated_pool(remote, name, pgnum, cluster_name="ceph", application=None):
remote.run(args=[
'sudo', 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum), '--cluster', cluster_name
])
if application:
remote.run(args=[
'sudo', 'ceph', 'osd', 'pool', 'application', 'enable', name, application, '--cluster', cluster_name
], check_status=False)
def create_cache_pool(remote, base_name, cache_name, pgnum, size, cluster_name="ceph"):
remote.run(args=[
'sudo', 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum), '--cluster', cluster_name
])
remote.run(args=[
'sudo', 'ceph', 'osd', 'tier', 'add-cache', base_name, cache_name,
str(size), '--cluster', cluster_name
])
def cmd_erasure_code_profile(profile_name, profile):
"""
Return the shell command to run to create the erasure code profile
described by the profile parameter.
:param profile_name: a string matching [A-Za-z0-9-_.]+
:param profile: a map whose semantic depends on the erasure code plugin
:returns: a shell command as an array suitable for Remote.run
If profile is {}, it is replaced with
{ 'k': '2', 'm': '1', 'crush-failure-domain': 'osd'}
for backward compatibility. In previous versions of teuthology,
these values were hardcoded as function arguments and some yaml
files were designed with these implicit values. The teuthology
code should not know anything about the erasure code profile
content or semantic. The valid values and parameters are outside
its scope.
"""
if profile == {}:
profile = {
'k': '2',
'm': '1',
'crush-failure-domain': 'osd'
}
return [
'osd', 'erasure-code-profile', 'set',
profile_name
] + [ str(key) + '=' + str(value) for key, value in profile.items() ]
| 3,160 | 34.920455 | 112 |
py
|
null |
ceph-main/qa/tasks/util/rgw.py
|
import logging
import json
import time
from io import StringIO
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False,
omit_sudo=False, omit_tdir=False, format='json', decode=True,
log_level=logging.DEBUG):
log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd))
testdir = teuthology.get_testdir(ctx)
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
pre = [
'adjust-ulimits',
'ceph-coverage']
if not omit_tdir:
pre.append(
'{tdir}/archive/coverage'.format(tdir=testdir))
pre.extend([
'radosgw-admin',
'--log-to-stderr',
'--format', format,
'-n', client_with_id,
'--cluster', cluster_name,
])
pre.extend(cmd)
log.log(log_level, 'rgwadmin: cmd=%s' % pre)
(remote,) = ctx.cluster.only(client).remotes.keys()
proc = remote.run(
args=pre,
check_status=check_status,
omit_sudo=omit_sudo,
stdout=StringIO(),
stderr=StringIO(),
stdin=stdin,
)
r = proc.exitstatus
out = proc.stdout.getvalue()
if not decode:
return (r, out)
j = None
if not r and out != '':
try:
j = json.loads(out)
log.log(log_level, ' json result: %s' % j)
except ValueError:
j = out
log.log(log_level, ' raw result: %s' % j)
return (r, j)
def get_user_summary(out, user):
"""Extract the summary for a given user"""
user_summary = None
for summary in out['summary']:
if summary.get('user') == user:
user_summary = summary
if not user_summary:
raise AssertionError('No summary info found for user: %s' % user)
return user_summary
def get_user_successful_ops(out, user):
summary = out['summary']
if len(summary) == 0:
return 0
return get_user_summary(out, user)['total']['successful_ops']
def wait_for_radosgw(url, remote):
""" poll the given url until it starts accepting connections
add_daemon() doesn't wait until radosgw finishes startup, so this is used
to avoid racing with later tasks that expect radosgw to be up and listening
"""
# TODO: use '--retry-connrefused --retry 8' when teuthology is running on
# Centos 8 and other OS's with an updated version of curl
curl_cmd = ['curl',
url]
exit_status = 0
num_retries = 8
for seconds in range(num_retries):
proc = remote.run(
args=curl_cmd,
check_status=False,
stdout=StringIO(),
stderr=StringIO(),
stdin=StringIO(),
)
exit_status = proc.exitstatus
if exit_status == 0:
break
time.sleep(2**seconds)
assert exit_status == 0
| 2,967 | 28.68 | 79 |
py
|
null |
ceph-main/qa/tasks/util/workunit.py
|
import copy
from teuthology import misc
from teuthology.orchestra import run
class Refspec:
def __init__(self, refspec):
self.refspec = refspec
def __str__(self):
return self.refspec
def _clone(self, git_url, clonedir, opts=None):
if opts is None:
opts = []
return (['rm', '-rf', clonedir] +
[run.Raw('&&')] +
['git', 'clone'] + opts +
[git_url, clonedir])
def _cd(self, clonedir):
return ['cd', clonedir]
def _checkout(self):
return ['git', 'checkout', self.refspec]
def clone(self, git_url, clonedir):
return (self._clone(git_url, clonedir) +
[run.Raw('&&')] +
self._cd(clonedir) +
[run.Raw('&&')] +
self._checkout())
class Branch(Refspec):
def __init__(self, tag):
Refspec.__init__(self, tag)
def clone(self, git_url, clonedir):
opts = ['--depth', '1',
'--branch', self.refspec]
return (self._clone(git_url, clonedir, opts) +
[run.Raw('&&')] +
self._cd(clonedir))
class Head(Refspec):
def __init__(self):
Refspec.__init__(self, 'HEAD')
def clone(self, git_url, clonedir):
opts = ['--depth', '1']
return (self._clone(git_url, clonedir, opts) +
[run.Raw('&&')] +
self._cd(clonedir))
def get_refspec_after_overrides(config, overrides):
# mimic the behavior of the "install" task, where the "overrides" are
# actually the defaults of that task. in other words, if none of "sha1",
# "tag", or "branch" is specified by a "workunit" tasks, we will update
# it with the information in the "workunit" sub-task nested in "overrides".
overrides = copy.deepcopy(overrides.get('workunit', {}))
refspecs = {'suite_sha1': Refspec, 'suite_branch': Branch,
'sha1': Refspec, 'tag': Refspec, 'branch': Branch}
if any(map(lambda i: i in config, refspecs.keys())):
for i in refspecs.keys():
overrides.pop(i, None)
misc.deep_merge(config, overrides)
for spec, cls in refspecs.items():
refspec = config.get(spec)
if refspec:
refspec = cls(refspec)
break
if refspec is None:
refspec = Head()
return refspec
| 2,378 | 29.113924 | 79 |
py
|
null |
ceph-main/qa/tasks/util/test/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/qa/tasks/util/test/test_rados.py
|
#
# The MIT License
#
# Copyright (C) 2014 Cloudwatt <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from tasks.util import rados
class TestRados(object):
def test_cmd_erasure_code_profile(self):
name = 'NAME'
cmd = rados.cmd_erasure_code_profile(name, {})
assert 'k=2' in cmd
assert name in cmd
cmd = rados.cmd_erasure_code_profile(name, { 'k': '88' })
assert 'k=88' in cmd
assert name in cmd
| 1,568 | 37.268293 | 68 |
py
|
null |
ceph-main/qa/timezone/eastern.yaml
|
tasks:
- exec:
all:
- echo America/New_York | sudo tee /etc/timezone
| 79 | 15 | 54 |
yaml
|
null |
ceph-main/qa/timezone/pacific.yaml
|
tasks:
- exec:
all:
- echo America/Los_Angeles | sudo tee /etc/timezone
| 82 | 15.6 | 57 |
yaml
|
null |
ceph-main/qa/timezone/random.yaml
|
tasks:
- exec:
all:
- echo America/Los_Angeles | sudo tee /etc/timezone
- [ $RANDOM -gt 32000 ] && echo America/New_York | sudo tee /etc/timezone
| 162 | 26.166667 | 79 |
yaml
|
null |
ceph-main/qa/workunits/ceph-helpers-root.sh
|
#!/usr/bin/env bash
#
# Copyright (C) 2015 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
#######################################################################
function distro_id() {
source /etc/os-release
echo $ID
}
function distro_version() {
source /etc/os-release
echo $VERSION
}
function install() {
if [ $(distro_id) = "ubuntu" ]; then
sudo apt-get purge -y gcc
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
fi
for package in "$@" ; do
install_one $package
done
if [ $(distro_id) = "ubuntu" ]; then
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-11 11
sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-11 11
sudo update-alternatives --install /usr/bin/cc cc /usr/bin/gcc 11
sudo update-alternatives --set cc /usr/bin/gcc
sudo update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++ 11
sudo update-alternatives --set c++ /usr/bin/g++
fi
}
function install_one() {
case $(distro_id) in
ubuntu|debian|devuan|softiron)
sudo env DEBIAN_FRONTEND=noninteractive apt-get install -y "$@"
;;
centos|fedora|rhel)
sudo yum install -y "$@"
;;
opensuse*|suse|sles)
sudo zypper --non-interactive install "$@"
;;
*)
echo "$(distro_id) is unknown, $@ will have to be installed manually."
;;
esac
}
function install_pkg_on_ubuntu {
local project=$1
shift
local sha1=$1
shift
local codename=$1
shift
local force=$1
shift
local pkgs=$@
local missing_pkgs
if [ $force = "force" ]; then
missing_pkgs="$@"
else
for pkg in $pkgs; do
if ! dpkg -s $pkg &> /dev/null; then
missing_pkgs+=" $pkg"
fi
done
fi
if test -n "$missing_pkgs"; then
local shaman_url="https://shaman.ceph.com/api/repos/${project}/master/${sha1}/ubuntu/${codename}/repo"
sudo curl --silent --location $shaman_url --output /etc/apt/sources.list.d/$project.list
sudo env DEBIAN_FRONTEND=noninteractive apt-get update -y -o Acquire::Languages=none -o Acquire::Translation=none || true
sudo env DEBIAN_FRONTEND=noninteractive apt-get install --allow-unauthenticated -y $missing_pkgs
fi
}
#######################################################################
function control_osd() {
local action=$1
local id=$2
sudo systemctl $action ceph-osd@$id
return 0
}
#######################################################################
function pool_read_write() {
local size=${1:-1}
local dir=/tmp
local timeout=360
local test_pool=test_pool
ceph osd pool delete $test_pool $test_pool --yes-i-really-really-mean-it || return 1
ceph osd pool create $test_pool 4 || return 1
ceph osd pool set $test_pool size $size --yes-i-really-mean-it || return 1
ceph osd pool set $test_pool min_size $size || return 1
ceph osd pool application enable $test_pool rados
echo FOO > $dir/BAR
timeout $timeout rados --pool $test_pool put BAR $dir/BAR || return 1
timeout $timeout rados --pool $test_pool get BAR $dir/BAR.copy || return 1
diff $dir/BAR $dir/BAR.copy || return 1
ceph osd pool delete $test_pool $test_pool --yes-i-really-really-mean-it || return 1
}
#######################################################################
set -x
"$@"
| 3,944 | 29.346154 | 122 |
sh
|
null |
ceph-main/qa/workunits/false.sh
|
#!/bin/sh -ex
false
| 20 | 6 | 13 |
sh
|
null |
ceph-main/qa/workunits/kernel_untar_build.sh
|
#!/usr/bin/env bash
set -ex
wget -O linux.tar.gz http://download.ceph.com/qa/linux-5.4.tar.gz
mkdir t
cd t
tar xzf ../linux.tar.gz
cd linux*
make defconfig
make -j`grep -c processor /proc/cpuinfo`
cd ..
if ! rm -rv linux* ; then
echo "uh oh rm -r failed, it left behind:"
find .
exit 1
fi
cd ..
rm -rv t linux*
| 326 | 14.571429 | 65 |
sh
|
null |
ceph-main/qa/workunits/post-file.sh
|
#!/usr/bin/env bash
set -ex
what="$1"
[ -z "$what" ] && what=/etc/udev/rules.d
sudo ceph-post-file -d ceph-test-workunit $what
echo OK
| 137 | 14.333333 | 47 |
sh
|
null |
ceph-main/qa/workunits/test_telemetry_pacific.sh
|
#!/bin/bash -ex
# Set up ident details for cluster
ceph config set mgr mgr/telemetry/channel_ident true
ceph config set mgr mgr/telemetry/organization 'ceph-qa'
ceph config set mgr mgr/telemetry/description 'upgrade test cluster'
# Opt-in
ceph telemetry on --license sharing-1-0
# Check last_opt_revision
LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
if [ $LAST_OPT_REVISION -ne 3 ]; then
echo "last_opt_revision is incorrect."
exit 1
fi
# Check reports
ceph telemetry show
ceph telemetry show-device
ceph telemetry show-all
echo OK
| 573 | 22.916667 | 72 |
sh
|
null |
ceph-main/qa/workunits/test_telemetry_pacific_x.sh
|
#!/bin/bash -ex
# Assert that we're still opted in
LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
if [ $LAST_OPT_REVISION -ne 3 ]; then
echo "last_opt_revision is incorrect"
exit 1
fi
# Check the warning:
STATUS=$(ceph -s)
if ! [[ $STATUS == *"Telemetry requires re-opt-in"* ]]
then
echo "STATUS does not contain re-opt-in warning"
exit 1
fi
# Check new collections
COLLECTIONS=$(ceph telemetry collection ls)
NEW_COLLECTIONS=("perf_perf" "basic_mds_metadata" "basic_pool_usage" "basic_rook_v01" "perf_memory_metrics")
for col in ${NEW_COLLECTIONS[@]}; do
if ! [[ $COLLECTIONS == *$col* ]];
then
echo "COLLECTIONS does not contain" "'"$col"'."
exit 1
fi
done
# Run preview commands
ceph telemetry preview
ceph telemetry preview-device
ceph telemetry preview-all
# Opt in to new collections
ceph telemetry on --license sharing-1-0
ceph telemetry enable channel perf
# Check the warning:
timeout=60
STATUS=$(ceph -s)
until [[ $STATUS != *"Telemetry requires re-opt-in"* ]] || [ $timeout -le 0 ]; do
STATUS=$(ceph -s)
sleep 1
timeout=$(( timeout - 1 ))
done
if [ $timeout -le 0 ]; then
echo "STATUS should not contain re-opt-in warning at this point"
exit 1
fi
# Run show commands
ceph telemetry show
ceph telemetry show-device
ceph telemetry show
# Opt out
ceph telemetry off
echo OK
| 1,374 | 21.916667 | 108 |
sh
|
null |
ceph-main/qa/workunits/test_telemetry_quincy.sh
|
#!/bin/bash -ex
# Set up ident details for cluster
ceph config set mgr mgr/telemetry/channel_ident true
ceph config set mgr mgr/telemetry/organization 'ceph-qa'
ceph config set mgr mgr/telemetry/description 'upgrade test cluster'
#Run preview commands
ceph telemetry preview
ceph telemetry preview-device
ceph telemetry preview-all
# Assert that new collections are available
COLLECTIONS=$(ceph telemetry collection ls)
NEW_COLLECTIONS=("perf_perf" "basic_mds_metadata" "basic_pool_usage" "basic_rook_v01" "perf_memory_metrics")
for col in ${NEW_COLLECTIONS[@]}; do
if ! [[ $COLLECTIONS == *$col* ]];
then
echo "COLLECTIONS does not contain" "'"$col"'."
exit 1
fi
done
# Opt-in
ceph telemetry on --license sharing-1-0
# Enable perf channel
ceph telemetry enable channel perf
# For quincy, the last_opt_revision remains at 1 since last_opt_revision
# was phased out for fresh installs of quincy.
LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
if [ $LAST_OPT_REVISION -ne 1 ]; then
echo "last_opt_revision is incorrect"
exit 1
fi
# Run show commands
ceph telemetry show
ceph telemetry show-device
ceph telemetry show-all
echo OK
| 1,191 | 25.488889 | 108 |
sh
|
null |
ceph-main/qa/workunits/test_telemetry_quincy_x.sh
|
#!/bin/bash -ex
# For quincy, the last_opt_revision remains at 1 since last_opt_revision
# was phased out for fresh installs of quincy.
LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
if [ $LAST_OPT_REVISION -ne 1 ]; then
echo "last_opt_revision is incorrect"
exit 1
fi
# Check the warning:
ceph -s
COLLECTIONS=$(ceph telemetry collection ls)
NEW_COLLECTIONS=("perf_perf" "basic_mds_metadata" "basic_pool_usage" "basic_rook_v01" "perf_memory_metrics")
for col in ${NEW_COLLECTIONS[@]}; do
if ! [[ $COLLECTIONS == *$col* ]];
then
echo "COLLECTIONS does not contain" "'"$col"'."
exit 1
fi
done
#Run preview commands
ceph telemetry preview
ceph telemetry preview-device
ceph telemetry preview-all
# Opt in to new collections
# Currently, no new collections between latest quincy and reef (dev)
# Run show commands
ceph telemetry show
ceph telemetry show-device
ceph telemetry show
# Opt out
ceph telemetry off
echo OK
| 977 | 22.853659 | 108 |
sh
|
null |
ceph-main/qa/workunits/true.sh
|
#!/bin/sh -ex
true
| 20 | 4.25 | 13 |
sh
|
null |
ceph-main/qa/workunits/caps/mon_commands.sh
|
#!/bin/sh -ex
ceph-authtool --create-keyring k --gen-key -p --name client.xx
ceph auth add -i k client.xx mon "allow command foo; allow command bar *; allow command baz ...; allow command foo add * mon allow\\ rwx osd allow\\ *"
( ceph -k k -n client.xx foo || true ) | grep 'unrecog'
( ceph -k k -n client.xx foo ooo || true ) | grep 'Access denied'
( ceph -k k -n client.xx fo || true ) | grep 'Access denied'
( ceph -k k -n client.xx fooo || true ) | grep 'Access denied'
( ceph -k k -n client.xx bar || true ) | grep 'Access denied'
( ceph -k k -n client.xx bar a || true ) | grep 'unrecog'
( ceph -k k -n client.xx bar a b c || true ) | grep 'Access denied'
( ceph -k k -n client.xx ba || true ) | grep 'Access denied'
( ceph -k k -n client.xx barr || true ) | grep 'Access denied'
( ceph -k k -n client.xx baz || true ) | grep -v 'Access denied'
( ceph -k k -n client.xx baz a || true ) | grep -v 'Access denied'
( ceph -k k -n client.xx baz a b || true ) | grep -v 'Access denied'
( ceph -k k -n client.xx foo add osd.1 -i k mon 'allow rwx' osd 'allow *' || true ) | grep 'unrecog'
( ceph -k k -n client.xx foo add osd a b c -i k mon 'allow rwx' osd 'allow *' || true ) | grep 'Access denied'
( ceph -k k -n client.xx foo add osd a b c -i k mon 'allow *' || true ) | grep 'Access denied'
echo OK
| 1,352 | 53.12 | 151 |
sh
|
null |
ceph-main/qa/workunits/ceph-tests/ceph-admin-commands.sh
|
#!/bin/sh -ex
ceph -s
rados lspools
rbd ls
# check that the monitors work
ceph osd set nodown
ceph osd unset nodown
exit 0
| 125 | 10.454545 | 30 |
sh
|
null |
ceph-main/qa/workunits/cephadm/create_iscsi_disks.sh
|
#!/bin/bash -ex
# Create some file-backed iSCSI targets and attach them locally.
# Exit if it's not CentOS
if ! grep -q rhel /etc/*-release; then
echo "The script only supports CentOS."
exit 1
fi
[ -z "$SUDO" ] && SUDO=sudo
# 15 GB
DISK_FILE_SIZE="16106127360"
$SUDO yum install -y targetcli iscsi-initiator-utils
TARGET_NAME="iqn.2003-01.org.linux-iscsi.$(hostname).x8664:sn.foobar"
$SUDO targetcli /iscsi create ${TARGET_NAME}
$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1/portals delete 0.0.0.0 3260
$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1/portals create 127.0.0.1 3260
$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1 set attribute generate_node_acls=1
$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1 set attribute demo_mode_write_protect=0
for i in $(seq 3); do
# Create truncated files, and add them as luns
DISK_FILE="/tmp/disk${i}"
$SUDO truncate --size ${DISK_FILE_SIZE} ${DISK_FILE}
$SUDO targetcli /backstores/fileio create "lun${i}" ${DISK_FILE}
# Workaround for https://tracker.ceph.com/issues/47758
$SUDO targetcli "/backstores/fileio/lun${i}" set attribute optimal_sectors=0
$SUDO targetcli /iscsi/${TARGET_NAME}/tpg1/luns create "/backstores/fileio/lun${i}"
done
$SUDO iscsiadm -m discovery -t sendtargets -p 127.0.0.1
$SUDO iscsiadm -m node -p 127.0.0.1 -T ${TARGET_NAME} -l
| 1,325 | 34.837838 | 87 |
sh
|
null |
ceph-main/qa/workunits/cephadm/test_adoption.sh
|
#!/bin/bash -ex
SCRIPT_NAME=$(basename ${BASH_SOURCE[0]})
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CEPHADM_SRC_DIR=${SCRIPT_DIR}/../../../src/cephadm
CORPUS_COMMIT=9cd9ad020d93b0b420924fec55da307aff8bd422
[ -z "$SUDO" ] && SUDO=sudo
[ -d "$TMPDIR" ] || TMPDIR=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
trap "$SUDO rm -rf $TMPDIR" EXIT
if [ -z "$CEPHADM" ]; then
CEPHADM=`mktemp -p $TMPDIR tmp.cephadm.XXXXXX`
${CEPHADM_SRC_DIR}/build.sh "$CEPHADM"
fi
# at this point, we need $CEPHADM set
if ! [ -x "$CEPHADM" ]; then
echo "cephadm not found. Please set \$CEPHADM"
exit 1
fi
# combine into a single var
CEPHADM_BIN="$CEPHADM"
CEPHADM="$SUDO $CEPHADM_BIN"
## adopt
CORPUS_GIT_SUBMOD="cephadm-adoption-corpus"
GIT_CLONE_DIR=${TMPDIR}/${CORPUS_GIT_SUBMOD}
git clone https://github.com/ceph/$CORPUS_GIT_SUBMOD $GIT_CLONE_DIR
git -C $GIT_CLONE_DIR checkout $CORPUS_COMMIT
CORPUS_DIR=${GIT_CLONE_DIR}/archive
for subdir in `ls ${CORPUS_DIR}`; do
for tarfile in `ls ${CORPUS_DIR}/${subdir} | grep .tgz`; do
tarball=${CORPUS_DIR}/${subdir}/${tarfile}
FSID_LEGACY=`echo "$tarfile" | cut -c 1-36`
TMP_TAR_DIR=`mktemp -d -p $TMPDIR`
$SUDO tar xzvf $tarball -C $TMP_TAR_DIR
NAMES=$($CEPHADM ls --legacy-dir $TMP_TAR_DIR | jq -r '.[].name')
for name in $NAMES; do
$CEPHADM adopt \
--style legacy \
--legacy-dir $TMP_TAR_DIR \
--name $name
# validate after adopt
out=$($CEPHADM ls | jq '.[]' \
| jq 'select(.name == "'$name'")')
echo $out | jq -r '.style' | grep 'cephadm'
echo $out | jq -r '.fsid' | grep $FSID_LEGACY
done
# clean-up before next iter
$CEPHADM rm-cluster --fsid $FSID_LEGACY --force
$SUDO rm -rf $TMP_TAR_DIR
done
done
echo "OK"
| 1,838 | 29.147541 | 67 |
sh
|
null |
ceph-main/qa/workunits/cephadm/test_cephadm.sh
|
#!/bin/bash -ex
SCRIPT_NAME=$(basename ${BASH_SOURCE[0]})
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# cleanup during exit
[ -z "$CLEANUP" ] && CLEANUP=true
FSID='00000000-0000-0000-0000-0000deadbeef'
# images that are used
IMAGE_MAIN=${IMAGE_MAIN:-'quay.ceph.io/ceph-ci/ceph:main'}
IMAGE_PACIFIC=${IMAGE_PACIFIC:-'quay.ceph.io/ceph-ci/ceph:pacific'}
#IMAGE_OCTOPUS=${IMAGE_OCTOPUS:-'quay.ceph.io/ceph-ci/ceph:octopus'}
IMAGE_DEFAULT=${IMAGE_MAIN}
OSD_IMAGE_NAME="${SCRIPT_NAME%.*}_osd.img"
OSD_IMAGE_SIZE='6G'
OSD_TO_CREATE=2
OSD_VG_NAME=${SCRIPT_NAME%.*}
OSD_LV_NAME=${SCRIPT_NAME%.*}
# TMPDIR for test data
[ -d "$TMPDIR" ] || TMPDIR=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
[ -d "$TMPDIR_TEST_MULTIPLE_MOUNTS" ] || TMPDIR_TEST_MULTIPLE_MOUNTS=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
CEPHADM_SRC_DIR=${SCRIPT_DIR}/../../../src/cephadm
CEPHADM_SAMPLES_DIR=${CEPHADM_SRC_DIR}/samples
[ -z "$SUDO" ] && SUDO=sudo
# If cephadm is already installed on the system, use that one, avoid building
# # one if we can.
if [ -z "$CEPHADM" ] && command -v cephadm >/dev/null ; then
CEPHADM="$(command -v cephadm)"
fi
if [ -z "$CEPHADM" ]; then
CEPHADM=`mktemp -p $TMPDIR tmp.cephadm.XXXXXX`
${CEPHADM_SRC_DIR}/build.sh "$CEPHADM"
NO_BUILD_INFO=1
fi
# at this point, we need $CEPHADM set
if ! [ -x "$CEPHADM" ]; then
echo "cephadm not found. Please set \$CEPHADM"
exit 1
fi
# add image to args
CEPHADM_ARGS="$CEPHADM_ARGS --image $IMAGE_DEFAULT"
# combine into a single var
CEPHADM_BIN="$CEPHADM"
CEPHADM="$SUDO $CEPHADM_BIN $CEPHADM_ARGS"
# clean up previous run(s)?
$CEPHADM rm-cluster --fsid $FSID --force
$SUDO vgchange -an $OSD_VG_NAME || true
loopdev=$($SUDO losetup -a | grep $(basename $OSD_IMAGE_NAME) | awk -F : '{print $1}')
if ! [ "$loopdev" = "" ]; then
$SUDO losetup -d $loopdev
fi
function cleanup()
{
if [ $CLEANUP = false ]; then
# preserve the TMPDIR state
echo "========================"
echo "!!! CLEANUP=$CLEANUP !!!"
echo
echo "TMPDIR=$TMPDIR"
echo "========================"
return
fi
dump_all_logs $FSID
rm -rf $TMPDIR
}
trap cleanup EXIT
function expect_false()
{
set -x
if eval "$@"; then return 1; else return 0; fi
}
# expect_return_code $expected_code $command ...
function expect_return_code()
{
set -x
local expected_code="$1"
shift
local command="$@"
set +e
eval "$command"
local return_code="$?"
set -e
if [ ! "$return_code" -eq "$expected_code" ]; then return 1; else return 0; fi
}
function is_available()
{
local name="$1"
local condition="$2"
local tries="$3"
local num=0
while ! eval "$condition"; do
num=$(($num + 1))
if [ "$num" -ge $tries ]; then
echo "$name is not available"
false
fi
sleep 5
done
echo "$name is available"
true
}
function dump_log()
{
local fsid="$1"
local name="$2"
local num_lines="$3"
if [ -z $num_lines ]; then
num_lines=100
fi
echo '-------------------------'
echo 'dump daemon log:' $name
echo '-------------------------'
$CEPHADM logs --fsid $fsid --name $name -- --no-pager -n $num_lines
}
function dump_all_logs()
{
local fsid="$1"
local names=$($CEPHADM ls | jq -r '.[] | select(.fsid == "'$fsid'").name')
echo 'dumping logs for daemons: ' $names
for name in $names; do
dump_log $fsid $name
done
}
function nfs_stop()
{
# stop the running nfs server
local units="nfs-server nfs-kernel-server"
for unit in $units; do
if systemctl --no-pager status $unit > /dev/null; then
$SUDO systemctl stop $unit
fi
done
# ensure the NFS port is no longer in use
expect_false "$SUDO ss -tlnp '( sport = :nfs )' | grep LISTEN"
}
## prepare + check host
$SUDO $CEPHADM check-host
## run a gather-facts (output to stdout)
$SUDO $CEPHADM gather-facts
## NOTE: cephadm version is, as of around May 2023, no longer basing the
## output for `cephadm version` on the version of the containers. The version
## reported is that of the "binary" and is determined during the ceph build.
## `cephadm version` should NOT require sudo/root.
$CEPHADM_BIN version
$CEPHADM_BIN version | grep 'cephadm version'
# Typically cmake should be running the cephadm build script with CLI arguments
# that embed version info into the "binary". If not using a cephadm build via
# cmake you can set `NO_BUILD_INFO` to skip this check.
if [ -z "$NO_BUILD_INFO" ]; then
$CEPHADM_BIN version | grep -v 'UNSET'
$CEPHADM_BIN version | grep -v 'UNKNOWN'
fi
## test shell before bootstrap, when crash dir isn't (yet) present on this host
$CEPHADM shell --fsid $FSID -- ceph -v | grep 'ceph version'
$CEPHADM shell --fsid $FSID -e FOO=BAR -- printenv | grep FOO=BAR
# test stdin
echo foo | $CEPHADM shell -- cat | grep -q foo
# the shell commands a bit above this seems to cause the
# /var/lib/ceph/<fsid> directory to be made. Since we now
# check in bootstrap that there are no clusters with the same
# fsid based on the directory existing, we need to make sure
# this directory is gone before bootstrapping. We can
# accomplish this with another rm-cluster
$CEPHADM rm-cluster --fsid $FSID --force
## bootstrap
ORIG_CONFIG=`mktemp -p $TMPDIR`
CONFIG=`mktemp -p $TMPDIR`
MONCONFIG=`mktemp -p $TMPDIR`
KEYRING=`mktemp -p $TMPDIR`
IP=127.0.0.1
cat <<EOF > $ORIG_CONFIG
[global]
log to file = true
osd crush chooseleaf type = 0
EOF
$CEPHADM bootstrap \
--mon-id a \
--mgr-id x \
--mon-ip $IP \
--fsid $FSID \
--config $ORIG_CONFIG \
--output-config $CONFIG \
--output-keyring $KEYRING \
--output-pub-ssh-key $TMPDIR/ceph.pub \
--allow-overwrite \
--skip-mon-network \
--skip-monitoring-stack
test -e $CONFIG
test -e $KEYRING
rm -f $ORIG_CONFIG
$SUDO test -e /var/log/ceph/$FSID/ceph-mon.a.log
$SUDO test -e /var/log/ceph/$FSID/ceph-mgr.x.log
for u in ceph.target \
ceph-$FSID.target \
[email protected] \
[email protected]; do
systemctl is-enabled $u
systemctl is-active $u
done
systemctl | grep system-ceph | grep -q .slice # naming is escaped and annoying
# check ceph -s works (via shell w/ passed config/keyring)
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph -s | grep $FSID
for t in mon mgr node-exporter prometheus grafana; do
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph orch apply $t --unmanaged
done
## ls
$CEPHADM ls | jq '.[]' | jq 'select(.name == "mon.a").fsid' \
| grep $FSID
$CEPHADM ls | jq '.[]' | jq 'select(.name == "mgr.x").fsid' \
| grep $FSID
# make sure the version is returned correctly
$CEPHADM ls | jq '.[]' | jq 'select(.name == "mon.a").version' | grep -q \\.
## deploy
# add mon.b
cp $CONFIG $MONCONFIG
echo "public addrv = [v2:$IP:3301,v1:$IP:6790]" >> $MONCONFIG
jq --null-input \
--arg fsid $FSID \
--arg name mon.b \
--arg keyring /var/lib/ceph/$FSID/mon.a/keyring \
--arg config "$MONCONFIG" \
'{"fsid": $fsid, "name": $name, "params":{"keyring": $keyring, "config": $config}}' | \
$CEPHADM _orch deploy
for u in [email protected]; do
systemctl is-enabled $u
systemctl is-active $u
done
cond="$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph mon stat | grep '2 mons'"
is_available "mon.b" "$cond" 30
# add mgr.y
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph auth get-or-create mgr.y \
mon 'allow profile mgr' \
osd 'allow *' \
mds 'allow *' > $TMPDIR/keyring.mgr.y
jq --null-input \
--arg fsid $FSID \
--arg name mgr.y \
--arg keyring $TMPDIR/keyring.mgr.y \
--arg config "$CONFIG" \
'{"fsid": $fsid, "name": $name, "params":{"keyring": $keyring, "config": $config}}' | \
$CEPHADM _orch deploy
for u in [email protected]; do
systemctl is-enabled $u
systemctl is-active $u
done
for f in `seq 1 30`; do
if $CEPHADM shell --fsid $FSID \
--config $CONFIG --keyring $KEYRING -- \
ceph -s -f json-pretty \
| jq '.mgrmap.num_standbys' | grep -q 1 ; then break; fi
sleep 1
done
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph -s -f json-pretty \
| jq '.mgrmap.num_standbys' | grep -q 1
# add osd.{1,2,..}
dd if=/dev/zero of=$TMPDIR/$OSD_IMAGE_NAME bs=1 count=0 seek=$OSD_IMAGE_SIZE
loop_dev=$($SUDO losetup -f)
$SUDO vgremove -f $OSD_VG_NAME || true
$SUDO losetup $loop_dev $TMPDIR/$OSD_IMAGE_NAME
$SUDO pvcreate $loop_dev && $SUDO vgcreate $OSD_VG_NAME $loop_dev
# osd bootstrap keyring
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph auth get client.bootstrap-osd > $TMPDIR/keyring.bootstrap.osd
# create lvs first so ceph-volume doesn't overlap with lv creation
for id in `seq 0 $((--OSD_TO_CREATE))`; do
$SUDO lvcreate -l $((100/$OSD_TO_CREATE))%VG -n $OSD_LV_NAME.$id $OSD_VG_NAME
done
for id in `seq 0 $((--OSD_TO_CREATE))`; do
device_name=/dev/$OSD_VG_NAME/$OSD_LV_NAME.$id
CEPH_VOLUME="$CEPHADM ceph-volume \
--fsid $FSID \
--config $CONFIG \
--keyring $TMPDIR/keyring.bootstrap.osd --"
# prepare the osd
$CEPH_VOLUME lvm prepare --bluestore --data $device_name --no-systemd
$CEPH_VOLUME lvm batch --no-auto $device_name --yes --no-systemd
# osd id and osd fsid
$CEPH_VOLUME lvm list --format json $device_name > $TMPDIR/osd.map
osd_id=$($SUDO cat $TMPDIR/osd.map | jq -cr '.. | ."ceph.osd_id"? | select(.)')
osd_fsid=$($SUDO cat $TMPDIR/osd.map | jq -cr '.. | ."ceph.osd_fsid"? | select(.)')
# deploy the osd
jq --null-input \
--arg fsid $FSID \
--arg name osd.$osd_id \
--arg keyring $TMPDIR/keyring.bootstrap.osd \
--arg config "$CONFIG" \
--arg osd_fsid $osd_fsid \
'{"fsid": $fsid, "name": $name, "params":{"keyring": $keyring, "config": $config, "osd_fsid": $osd_fsid}}' | \
$CEPHADM _orch deploy
done
# add node-exporter
jq --null-input \
--arg fsid $FSID \
--arg name node-exporter.a \
'{"fsid": $fsid, "name": $name}' | \
${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy
cond="curl 'http://localhost:9100' | grep -q 'Node Exporter'"
is_available "node-exporter" "$cond" 10
# add prometheus
jq --null-input \
--arg fsid $FSID \
--arg name prometheus.a \
--argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/prometheus.json)" \
'{"fsid": $fsid, "name": $name, "config_blobs": $config_blobs}' | \
${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy
cond="curl 'localhost:9095/api/v1/query?query=up'"
is_available "prometheus" "$cond" 10
# add grafana
jq --null-input \
--arg fsid $FSID \
--arg name grafana.a \
--argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/grafana.json)" \
'{"fsid": $fsid, "name": $name, "config_blobs": $config_blobs}' | \
${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy
cond="curl --insecure 'https://localhost:3000' | grep -q 'grafana'"
is_available "grafana" "$cond" 50
# add nfs-ganesha
nfs_stop
nfs_rados_pool=$(cat ${CEPHADM_SAMPLES_DIR}/nfs.json | jq -r '.["pool"]')
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph osd pool create $nfs_rados_pool 64
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
rados --pool nfs-ganesha --namespace nfs-ns create conf-nfs.a
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph orch pause
jq --null-input \
--arg fsid $FSID \
--arg name nfs.a \
--arg keyring "$KEYRING" \
--arg config "$CONFIG" \
--argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/nfs.json)" \
'{"fsid": $fsid, "name": $name, "params": {"keyring": $keyring, "config": $config}, "config_blobs": $config_blobs}' | \
${CEPHADM} _orch deploy
cond="$SUDO ss -tlnp '( sport = :nfs )' | grep 'ganesha.nfsd'"
is_available "nfs" "$cond" 10
$CEPHADM shell --fsid $FSID --config $CONFIG --keyring $KEYRING -- \
ceph orch resume
# add alertmanager via custom container
alertmanager_image=$(cat ${CEPHADM_SAMPLES_DIR}/custom_container.json | jq -r '.image')
tcp_ports=$(jq .ports ${CEPHADM_SAMPLES_DIR}/custom_container.json)
jq --null-input \
--arg fsid $FSID \
--arg name container.alertmanager.a \
--arg keyring $TMPDIR/keyring.bootstrap.osd \
--arg config "$CONFIG" \
--arg image "$alertmanager_image" \
--argjson tcp_ports "${tcp_ports}" \
--argjson config_blobs "$(cat ${CEPHADM_SAMPLES_DIR}/custom_container.json)" \
'{"fsid": $fsid, "name": $name, "image": $image, "params": {"keyring": $keyring, "config": $config, "tcp_ports": $tcp_ports}, "config_blobs": $config_blobs}' | \
${CEPHADM//--image $IMAGE_DEFAULT/} _orch deploy
cond="$CEPHADM enter --fsid $FSID --name container.alertmanager.a -- test -f \
/etc/alertmanager/alertmanager.yml"
is_available "alertmanager.yml" "$cond" 10
cond="curl 'http://localhost:9093' | grep -q 'Alertmanager'"
is_available "alertmanager" "$cond" 10
## run
# WRITE ME
## unit
$CEPHADM unit --fsid $FSID --name mon.a -- is-enabled
$CEPHADM unit --fsid $FSID --name mon.a -- is-active
expect_false $CEPHADM unit --fsid $FSID --name mon.xyz -- is-active
$CEPHADM unit --fsid $FSID --name mon.a -- disable
expect_false $CEPHADM unit --fsid $FSID --name mon.a -- is-enabled
$CEPHADM unit --fsid $FSID --name mon.a -- enable
$CEPHADM unit --fsid $FSID --name mon.a -- is-enabled
$CEPHADM unit --fsid $FSID --name mon.a -- status
$CEPHADM unit --fsid $FSID --name mon.a -- stop
expect_return_code 3 $CEPHADM unit --fsid $FSID --name mon.a -- status
$CEPHADM unit --fsid $FSID --name mon.a -- start
## shell
$CEPHADM shell --fsid $FSID -- true
$CEPHADM shell --fsid $FSID -- test -d /var/log/ceph
expect_false $CEPHADM --timeout 10 shell --fsid $FSID -- sleep 60
$CEPHADM --timeout 60 shell --fsid $FSID -- sleep 10
$CEPHADM shell --fsid $FSID --mount $TMPDIR $TMPDIR_TEST_MULTIPLE_MOUNTS -- stat /mnt/$(basename $TMPDIR)
## enter
expect_false $CEPHADM enter
$CEPHADM enter --fsid $FSID --name mon.a -- test -d /var/lib/ceph/mon/ceph-a
$CEPHADM enter --fsid $FSID --name mgr.x -- test -d /var/lib/ceph/mgr/ceph-x
$CEPHADM enter --fsid $FSID --name mon.a -- pidof ceph-mon
expect_false $CEPHADM enter --fsid $FSID --name mgr.x -- pidof ceph-mon
$CEPHADM enter --fsid $FSID --name mgr.x -- pidof ceph-mgr
# this triggers a bug in older versions of podman, including 18.04's 1.6.2
#expect_false $CEPHADM --timeout 5 enter --fsid $FSID --name mon.a -- sleep 30
$CEPHADM --timeout 60 enter --fsid $FSID --name mon.a -- sleep 10
## ceph-volume
$CEPHADM ceph-volume --fsid $FSID -- inventory --format=json \
| jq '.[]'
## preserve test state
[ $CLEANUP = false ] && exit 0
## rm-daemon
# mon and osd require --force
expect_false $CEPHADM rm-daemon --fsid $FSID --name mon.a
# mgr does not
$CEPHADM rm-daemon --fsid $FSID --name mgr.x
expect_false $CEPHADM zap-osds --fsid $FSID
$CEPHADM zap-osds --fsid $FSID --force
## rm-cluster
expect_false $CEPHADM rm-cluster --fsid $FSID --zap-osds
$CEPHADM rm-cluster --fsid $FSID --force --zap-osds
echo PASS
| 15,287 | 31.185263 | 165 |
sh
|
null |
ceph-main/qa/workunits/cephadm/test_dashboard_e2e.sh
|
#!/bin/bash -ex
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
DASHBOARD_FRONTEND_DIR=${SCRIPT_DIR}/../../../src/pybind/mgr/dashboard/frontend
[ -z "$SUDO" ] && SUDO=sudo
install_common () {
NODEJS_VERSION="16"
if grep -q debian /etc/*-release; then
$SUDO apt-get update
# https://github.com/nodesource/distributions#manual-installation
$SUDO apt-get install curl gpg
KEYRING=/usr/share/keyrings/nodesource.gpg
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource.gpg.key | gpg --dearmor | $SUDO tee "$KEYRING" >/dev/null
DISTRO="$(source /etc/lsb-release; echo $DISTRIB_CODENAME)"
VERSION="node_$NODEJS_VERSION.x"
echo "deb [signed-by=$KEYRING] https://deb.nodesource.com/$VERSION $DISTRO main" | $SUDO tee /etc/apt/sources.list.d/nodesource.list
echo "deb-src [signed-by=$KEYRING] https://deb.nodesource.com/$VERSION $DISTRO main" | $SUDO tee -a /etc/apt/sources.list.d/nodesource.list
$SUDO apt-get update
$SUDO apt-get install nodejs
elif grep -q rhel /etc/*-release; then
$SUDO yum module -y enable nodejs:$NODEJS_VERSION
$SUDO yum install -y jq npm
else
echo "Unsupported distribution."
exit 1
fi
}
install_chrome () {
if grep -q debian /etc/*-release; then
$SUDO bash -c 'echo "deb [arch=amd64] https://dl.google.com/linux/chrome/deb/ stable main" > /etc/apt/sources.list.d/google-chrome.list'
curl -fsSL https://dl.google.com/linux/linux_signing_key.pub | $SUDO apt-key add -
$SUDO apt-get update
$SUDO apt-get install -y google-chrome-stable
$SUDO apt-get install -y xvfb
$SUDO rm /etc/apt/sources.list.d/google-chrome.list
elif grep -q rhel /etc/*-release; then
$SUDO dd of=/etc/yum.repos.d/google-chrome.repo status=none <<EOF
[google-chrome]
name=google-chrome
baseurl=https://dl.google.com/linux/chrome/rpm/stable/\$basearch
enabled=1
gpgcheck=1
gpgkey=https://dl-ssl.google.com/linux/linux_signing_key.pub
EOF
$SUDO yum install -y google-chrome-stable
$SUDO rm /etc/yum.repos.d/google-chrome.repo
# Cypress dependencies
$SUDO yum install -y xorg-x11-server-Xvfb gtk2-devel gtk3-devel libnotify-devel GConf2 nss.x86_64 libXScrnSaver alsa-lib
else
echo "Unsupported distribution."
exit 1
fi
}
cypress_run () {
local specs="$1"
local timeout="$2"
local override_config="excludeSpecPattern=*.po.ts,retries=0,specPattern=${specs}"
if [ x"$timeout" != "x" ]; then
override_config="${override_config},defaultCommandTimeout=${timeout}"
fi
npx cypress run --browser chrome --headless --config "$override_config"
}
install_common
install_chrome
CYPRESS_BASE_URL=$(ceph mgr services | jq -r .dashboard)
export CYPRESS_BASE_URL
cd $DASHBOARD_FRONTEND_DIR
# This is required for Cypress to understand typescript
npm ci --unsafe-perm
npx cypress verify
npx cypress info
# Take `orch device ls` and `orch ps` as ground truth.
ceph orch device ls --refresh
ceph orch ps --refresh
sleep 10 # the previous call is asynchronous
ceph orch device ls --format=json | tee cypress/fixtures/orchestrator/inventory.json
ceph orch ps --format=json | tee cypress/fixtures/orchestrator/services.json
DASHBOARD_ADMIN_SECRET_FILE="/tmp/dashboard-admin-secret.txt"
printf 'admin' > "${DASHBOARD_ADMIN_SECRET_FILE}"
ceph dashboard ac-user-set-password admin -i "${DASHBOARD_ADMIN_SECRET_FILE}" --force-password
# Run Dashboard e2e tests.
# These tests are designed with execution order in mind, since orchestrator operations
# are likely to change cluster state, we can't just run tests in arbitrarily order.
# See /ceph/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/ folder.
find cypress # List all specs
cypress_run "cypress/e2e/orchestrator/01-hosts.e2e-spec.ts"
# Hosts are removed and added in the previous step. Do a refresh again.
ceph orch device ls --refresh
sleep 10
ceph orch device ls --format=json | tee cypress/fixtures/orchestrator/inventory.json
cypress_run "cypress/e2e/orchestrator/03-inventory.e2e-spec.ts"
cypress_run "cypress/e2e/orchestrator/04-osds.e2e-spec.ts" 300000
| 4,224 | 38.12037 | 147 |
sh
|
null |
ceph-main/qa/workunits/cephadm/test_iscsi_etc_hosts.sh
|
#!/bin/bash
# checks if the container and host's /etc/hosts files match
# Necessary to avoid potential bugs caused by podman making
# edits to /etc/hosts file in the container
# exits with code 1 if host and iscsi container /etc/hosts do no match
set -ex
ISCSI_DAEMON=$(sudo /home/ubuntu/cephtest/cephadm ls | jq -r '.[] | select(.service_name == "iscsi.foo") | .name')
sudo /home/ubuntu/cephtest/cephadm enter --name $ISCSI_DAEMON -- cat /etc/hosts > iscsi_daemon_etc_hosts.txt
if cmp --silent /etc/hosts iscsi_daemon_etc_hosts.txt; then
echo "Daemon and host /etc/hosts files successfully matched"
else
echo "ERROR: /etc/hosts on host did not match /etc/hosts in the iscsi container!"
echo "Host /etc/hosts:"
cat /etc/hosts
echo "Iscsi container /etc/hosts:"
cat iscsi_daemon_etc_hosts.txt
exit 1
fi
| 819 | 36.272727 | 114 |
sh
|
null |
ceph-main/qa/workunits/cephadm/test_iscsi_pids_limit.sh
|
#!/bin/bash
# checks if the containers default pids-limit (4096) is removed and Iscsi
# containers continue to run
# exits 1 if fails
set -ex
ISCSI_CONT_IDS=$(sudo podman ps -qa --filter='name=iscsi')
CONT_COUNT=$(echo ${ISCSI_CONT_IDS} | wc -w)
test ${CONT_COUNT} -eq 2
for i in ${ISCSI_CONT_IDS}
do
test $(sudo podman exec ${i} cat /sys/fs/cgroup/pids/pids.max) == max
done
for i in ${ISCSI_CONT_IDS}
do
sudo podman exec ${i} /bin/sh -c 'for j in {0..20000}; do sleep 300 & done'
done
for i in ${ISCSI_CONT_IDS}
do
SLEEP_COUNT=$(sudo podman exec ${i} /bin/sh -c 'ps -ef | grep -c sleep')
test ${SLEEP_COUNT} -gt 20000
done
echo OK
| 648 | 20.633333 | 77 |
sh
|
null |
ceph-main/qa/workunits/cephadm/test_repos.sh
|
#!/bin/bash -ex
SCRIPT_NAME=$(basename ${BASH_SOURCE[0]})
SCRIPT_DIR=$(dirname ${BASH_SOURCE[0]})
CEPHADM_SRC_DIR=${SCRIPT_DIR}/../../../src/cephadm
[ -d "$TMPDIR" ] || TMPDIR=$(mktemp -d tmp.$SCRIPT_NAME.XXXXXX)
trap "$SUDO rm -rf $TMPDIR" EXIT
if [ -z "$CEPHADM" ]; then
CEPHADM=`mktemp -p $TMPDIR tmp.cephadm.XXXXXX`
${CEPHADM_SRC_DIR}/build.sh "$CEPHADM"
fi
# this is a pretty weak test, unfortunately, since the
# package may also be in the base OS.
function test_install_uninstall() {
( sudo apt update && \
sudo apt -y install cephadm && \
sudo $CEPHADM install && \
sudo apt -y remove cephadm ) || \
( sudo yum -y install cephadm && \
sudo $CEPHADM install && \
sudo yum -y remove cephadm ) || \
( sudo dnf -y install cephadm && \
sudo $CEPHADM install && \
sudo dnf -y remove cephadm ) || \
( sudo zypper -n install cephadm && \
sudo $CEPHADM install && \
sudo zypper -n remove cephadm )
}
sudo $CEPHADM -v add-repo --release octopus
test_install_uninstall
sudo $CEPHADM -v rm-repo
sudo $CEPHADM -v add-repo --dev main
test_install_uninstall
sudo $CEPHADM -v rm-repo
sudo $CEPHADM -v add-repo --release 15.2.7
test_install_uninstall
sudo $CEPHADM -v rm-repo
echo OK.
| 1,250 | 26.195652 | 63 |
sh
|
null |
ceph-main/qa/workunits/cephtool/test.sh
|
#!/usr/bin/env bash
# -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*-
# vim: ts=8 sw=8 ft=bash smarttab
set -x
source $(dirname $0)/../../standalone/ceph-helpers.sh
set -e
set -o functrace
PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
SUDO=${SUDO:-sudo}
export CEPH_DEV=1
function check_no_osd_down()
{
! ceph osd dump | grep ' down '
}
function wait_no_osd_down()
{
max_run=300
for i in $(seq 1 $max_run) ; do
if ! check_no_osd_down ; then
echo "waiting for osd(s) to come back up ($i/$max_run)"
sleep 1
else
break
fi
done
check_no_osd_down
}
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
function expect_true()
{
set -x
if ! "$@"; then return 1; else return 0; fi
}
TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX)
trap "rm -fr $TEMP_DIR" 0
TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
#
# retry_eagain max cmd args ...
#
# retry cmd args ... if it exits on error and its output contains the
# string EAGAIN, at most $max times
#
function retry_eagain()
{
local max=$1
shift
local status
local tmpfile=$TEMP_DIR/retry_eagain.$$
local count
for count in $(seq 1 $max) ; do
status=0
"$@" > $tmpfile 2>&1 || status=$?
if test $status = 0 ||
! grep --quiet EAGAIN $tmpfile ; then
break
fi
sleep 1
done
if test $count = $max ; then
echo retried with non zero exit status, $max times: "$@" >&2
fi
cat $tmpfile
rm $tmpfile
return $status
}
#
# map_enxio_to_eagain cmd arg ...
#
# add EAGAIN to the output of cmd arg ... if the output contains
# ENXIO.
#
function map_enxio_to_eagain()
{
local status=0
local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$
"$@" > $tmpfile 2>&1 || status=$?
if test $status != 0 &&
grep --quiet ENXIO $tmpfile ; then
echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile
fi
cat $tmpfile
rm $tmpfile
return $status
}
function check_response()
{
expected_string=$1
retcode=$2
expected_retcode=$3
if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then
echo "return code invalid: got $retcode, expected $expected_retcode" >&2
exit 1
fi
if ! grep --quiet -- "$expected_string" $TMPFILE ; then
echo "Didn't find $expected_string in output" >&2
cat $TMPFILE >&2
exit 1
fi
}
function get_config_value_or_die()
{
local target config_opt raw val
target=$1
config_opt=$2
raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`"
if [[ $? -ne 0 ]]; then
echo "error obtaining config opt '$config_opt' from '$target': $raw"
exit 1
fi
raw=`echo $raw | sed -e 's/[{} "]//g'`
val=`echo $raw | cut -f2 -d:`
echo "$val"
return 0
}
function expect_config_value()
{
local target config_opt expected_val val
target=$1
config_opt=$2
expected_val=$3
val=$(get_config_value_or_die $target $config_opt)
if [[ "$val" != "$expected_val" ]]; then
echo "expected '$expected_val', got '$val'"
exit 1
fi
}
function ceph_watch_start()
{
local whatch_opt=--watch
if [ -n "$1" ]; then
whatch_opt=--watch-$1
if [ -n "$2" ]; then
whatch_opt+=" --watch-channel $2"
fi
fi
CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$
ceph $whatch_opt > $CEPH_WATCH_FILE &
CEPH_WATCH_PID=$!
# wait until the "ceph" client is connected and receiving
# log messages from monitor
for i in `seq 3`; do
grep -q "cluster" $CEPH_WATCH_FILE && break
sleep 1
done
}
function ceph_watch_wait()
{
local regexp=$1
local timeout=30
if [ -n "$2" ]; then
timeout=$2
fi
for i in `seq ${timeout}`; do
grep -q "$regexp" $CEPH_WATCH_FILE && break
sleep 1
done
kill $CEPH_WATCH_PID
if ! grep "$regexp" $CEPH_WATCH_FILE; then
echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2
cat $CEPH_WATCH_FILE >&2
return 1
fi
}
function test_mon_injectargs()
{
ceph tell osd.0 injectargs --no-osd_enable_op_tracker
ceph tell osd.0 config get osd_enable_op_tracker | grep false
ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500'
ceph tell osd.0 config get osd_enable_op_tracker | grep true
ceph tell osd.0 config get osd_op_history_duration | grep 500
ceph tell osd.0 injectargs --no-osd_enable_op_tracker
ceph tell osd.0 config get osd_enable_op_tracker | grep false
ceph tell osd.0 injectargs -- --osd_enable_op_tracker
ceph tell osd.0 config get osd_enable_op_tracker | grep true
ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600'
ceph tell osd.0 config get osd_enable_op_tracker | grep true
ceph tell osd.0 config get osd_op_history_duration | grep 600
ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200'
ceph tell osd.0 config get osd_deep_scrub_interval | grep 2419200
ceph tell osd.0 injectargs -- '--mon_probe_timeout 2'
ceph tell osd.0 config get mon_probe_timeout | grep 2
ceph tell osd.0 injectargs -- '--mon-lease 6'
ceph tell osd.0 config get mon_lease | grep 6
# osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting
expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 2> $TMPFILE || return 1
check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs"
expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \
ceph tell osd.0 injectargs -- '--osd_op_history_duration'
}
function test_mon_injectargs_SI()
{
# Test SI units during injectargs and 'config set'
# We only aim at testing the units are parsed accordingly
# and don't intend to test whether the options being set
# actually expect SI units to be passed.
# Keep in mind that all integer based options that are not based on bytes
# (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to
# base 10.
initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects")
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G
expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true
check_response "(22) Invalid argument"
# now test with injectargs
ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10'
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10
ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K'
expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000
ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G'
expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000
expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F'
expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1'
$SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value
}
function test_mon_injectargs_IEC()
{
# Test IEC units during injectargs and 'config set'
# We only aim at testing the units are parsed accordingly
# and don't intend to test whether the options being set
# actually expect IEC units to be passed.
# Keep in mind that all integer based options that are based on bytes
# (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
# unit modifiers (for backwards compatibility and convenience) and be parsed
# to base 2.
initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn")
$SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000
expect_config_value "mon.a" "mon_data_size_warn" 15000000000
$SUDO ceph daemon mon.a config set mon_data_size_warn 15G
expect_config_value "mon.a" "mon_data_size_warn" 16106127360
$SUDO ceph daemon mon.a config set mon_data_size_warn 16Gi
expect_config_value "mon.a" "mon_data_size_warn" 17179869184
$SUDO ceph daemon mon.a config set mon_data_size_warn 10F > $TMPFILE || true
check_response "(22) Invalid argument"
# now test with injectargs
ceph tell mon.a injectargs '--mon_data_size_warn 15000000000'
expect_config_value "mon.a" "mon_data_size_warn" 15000000000
ceph tell mon.a injectargs '--mon_data_size_warn 15G'
expect_config_value "mon.a" "mon_data_size_warn" 16106127360
ceph tell mon.a injectargs '--mon_data_size_warn 16Gi'
expect_config_value "mon.a" "mon_data_size_warn" 17179869184
expect_false ceph tell mon.a injectargs '--mon_data_size_warn 10F'
$SUDO ceph daemon mon.a config set mon_data_size_warn $initial_value
}
function test_tiering_agent()
{
local slow=slow_eviction
local fast=fast_eviction
ceph osd pool create $slow 1 1
ceph osd pool application enable $slow rados
ceph osd pool create $fast 1 1
ceph osd tier add $slow $fast
ceph osd tier cache-mode $fast writeback
ceph osd tier set-overlay $slow $fast
ceph osd pool set $fast hit_set_type bloom
rados -p $slow put obj1 /etc/group
ceph osd pool set $fast target_max_objects 1
ceph osd pool set $fast hit_set_count 1
ceph osd pool set $fast hit_set_period 5
# wait for the object to be evicted from the cache
local evicted
evicted=false
for i in `seq 1 300` ; do
if ! rados -p $fast ls | grep obj1 ; then
evicted=true
break
fi
sleep 1
done
$evicted # assert
# the object is proxy read and promoted to the cache
rados -p $slow get obj1 - >/dev/null
# wait for the promoted object to be evicted again
evicted=false
for i in `seq 1 300` ; do
if ! rados -p $fast ls | grep obj1 ; then
evicted=true
break
fi
sleep 1
done
$evicted # assert
ceph osd tier remove-overlay $slow
ceph osd tier remove $slow $fast
ceph osd pool delete $fast $fast --yes-i-really-really-mean-it
ceph osd pool delete $slow $slow --yes-i-really-really-mean-it
}
function test_tiering_1()
{
# tiering
ceph osd pool create slow 2
ceph osd pool application enable slow rados
ceph osd pool create slow2 2
ceph osd pool application enable slow2 rados
ceph osd pool create cache 2
ceph osd pool create cache2 2
ceph osd tier add slow cache
ceph osd tier add slow cache2
expect_false ceph osd tier add slow2 cache
# application metadata should propagate to the tiers
ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow") | .application_metadata["rados"]' | grep '{}'
ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "slow2") | .application_metadata["rados"]' | grep '{}'
ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache") | .application_metadata["rados"]' | grep '{}'
ceph osd pool ls detail -f json | jq '.[] | select(.pool_name == "cache2") | .application_metadata["rados"]' | grep '{}'
# forward is removed/deprecated
expect_false ceph osd tier cache-mode cache forward
expect_false ceph osd tier cache-mode cache forward --yes-i-really-mean-it
# test some state transitions
ceph osd tier cache-mode cache writeback
expect_false ceph osd tier cache-mode cache readonly
expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
ceph osd tier cache-mode cache proxy
ceph osd tier cache-mode cache readproxy
ceph osd tier cache-mode cache none
ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
ceph osd tier cache-mode cache none
ceph osd tier cache-mode cache writeback
ceph osd tier cache-mode cache proxy
ceph osd tier cache-mode cache writeback
expect_false ceph osd tier cache-mode cache none
expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
# test with dirty objects in the tier pool
# tier pool currently set to 'writeback'
rados -p cache put /etc/passwd /etc/passwd
flush_pg_stats
# 1 dirty object in pool 'cache'
ceph osd tier cache-mode cache proxy
expect_false ceph osd tier cache-mode cache none
expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
ceph osd tier cache-mode cache writeback
# remove object from tier pool
rados -p cache rm /etc/passwd
rados -p cache cache-flush-evict-all
flush_pg_stats
# no dirty objects in pool 'cache'
ceph osd tier cache-mode cache proxy
ceph osd tier cache-mode cache none
ceph osd tier cache-mode cache readonly --yes-i-really-mean-it
TRIES=0
while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE
do
grep 'currently creating pgs' $TMPFILE
TRIES=$(( $TRIES + 1 ))
test $TRIES -ne 60
sleep 3
done
expect_false ceph osd pool set cache pg_num 4
ceph osd tier cache-mode cache none
ceph osd tier set-overlay slow cache
expect_false ceph osd tier set-overlay slow cache2
expect_false ceph osd tier remove slow cache
ceph osd tier remove-overlay slow
ceph osd tier set-overlay slow cache2
ceph osd tier remove-overlay slow
ceph osd tier remove slow cache
ceph osd tier add slow2 cache
expect_false ceph osd tier set-overlay slow cache
ceph osd tier set-overlay slow2 cache
ceph osd tier remove-overlay slow2
ceph osd tier remove slow2 cache
ceph osd tier remove slow cache2
# make sure a non-empty pool fails
rados -p cache2 put /etc/passwd /etc/passwd
while ! ceph df | grep cache2 | grep ' 1 ' ; do
echo waiting for pg stats to flush
sleep 2
done
expect_false ceph osd tier add slow cache2
ceph osd tier add slow cache2 --force-nonempty
ceph osd tier remove slow cache2
ceph osd pool ls | grep cache2
ceph osd pool ls -f json-pretty | grep cache2
ceph osd pool ls detail | grep cache2
ceph osd pool ls detail -f json-pretty | grep cache2
ceph osd pool delete slow slow --yes-i-really-really-mean-it
ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it
ceph osd pool delete cache cache --yes-i-really-really-mean-it
ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it
}
function test_tiering_2()
{
# make sure we can't clobber snapshot state
ceph osd pool create snap_base 2
ceph osd pool application enable snap_base rados
ceph osd pool create snap_cache 2
ceph osd pool mksnap snap_cache snapname
expect_false ceph osd tier add snap_base snap_cache
ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it
ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it
}
function test_tiering_3()
{
# make sure we can't create snapshot on tier
ceph osd pool create basex 2
ceph osd pool application enable basex rados
ceph osd pool create cachex 2
ceph osd tier add basex cachex
expect_false ceph osd pool mksnap cache snapname
ceph osd tier remove basex cachex
ceph osd pool delete basex basex --yes-i-really-really-mean-it
ceph osd pool delete cachex cachex --yes-i-really-really-mean-it
}
function test_tiering_4()
{
# make sure we can't create an ec pool tier
ceph osd pool create eccache 2 2 erasure
expect_false ceph osd set-require-min-compat-client bobtail
ceph osd pool create repbase 2
ceph osd pool application enable repbase rados
expect_false ceph osd tier add repbase eccache
ceph osd pool delete repbase repbase --yes-i-really-really-mean-it
ceph osd pool delete eccache eccache --yes-i-really-really-mean-it
}
function test_tiering_5()
{
# convenient add-cache command
ceph osd pool create slow 2
ceph osd pool application enable slow rados
ceph osd pool create cache3 2
ceph osd tier add-cache slow cache3 1024000
ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4'
ceph osd tier remove slow cache3 2> $TMPFILE || true
check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first"
ceph osd tier remove-overlay slow
ceph osd tier remove slow cache3
ceph osd pool ls | grep cache3
ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it
! ceph osd pool ls | grep cache3 || exit 1
ceph osd pool delete slow slow --yes-i-really-really-mean-it
}
function test_tiering_6()
{
# check add-cache whether work
ceph osd pool create datapool 2
ceph osd pool application enable datapool rados
ceph osd pool create cachepool 2
ceph osd tier add-cache datapool cachepool 1024000
ceph osd tier cache-mode cachepool writeback
rados -p datapool put object /etc/passwd
rados -p cachepool stat object
rados -p cachepool cache-flush object
rados -p datapool stat object
ceph osd tier remove-overlay datapool
ceph osd tier remove datapool cachepool
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
}
function test_tiering_7()
{
# protection against pool removal when used as tiers
ceph osd pool create datapool 2
ceph osd pool application enable datapool rados
ceph osd pool create cachepool 2
ceph osd tier add-cache datapool cachepool 1024000
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true
check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'"
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true
check_response "EBUSY: pool 'datapool' has tiers cachepool"
ceph osd tier remove-overlay datapool
ceph osd tier remove datapool cachepool
ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
}
function test_tiering_8()
{
## check health check
ceph osd set notieragent
ceph osd pool create datapool 2
ceph osd pool application enable datapool rados
ceph osd pool create cache4 2
ceph osd tier add-cache datapool cache4 1024000
ceph osd tier cache-mode cache4 writeback
tmpfile=$(mktemp|grep tmp)
dd if=/dev/zero of=$tmpfile bs=4K count=1
ceph osd pool set cache4 target_max_objects 200
ceph osd pool set cache4 target_max_bytes 1000000
rados -p cache4 put foo1 $tmpfile
rados -p cache4 put foo2 $tmpfile
rm -f $tmpfile
flush_pg_stats
ceph df | grep datapool | grep ' 2 '
ceph osd tier remove-overlay datapool
ceph osd tier remove datapool cache4
ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it
ceph osd pool delete datapool datapool --yes-i-really-really-mean-it
ceph osd unset notieragent
}
function test_tiering_9()
{
# make sure 'tier remove' behaves as we expect
# i.e., removing a tier from a pool that's not its base pool only
# results in a 'pool foo is now (or already was) not a tier of bar'
#
ceph osd pool create basepoolA 2
ceph osd pool application enable basepoolA rados
ceph osd pool create basepoolB 2
ceph osd pool application enable basepoolB rados
poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}')
poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}')
ceph osd pool create cache5 2
ceph osd pool create cache6 2
ceph osd tier add basepoolA cache5
ceph osd tier add basepoolB cache6
ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of'
ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id"
ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of'
ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id"
ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of'
! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1
ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of'
! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1
! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1
! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1
ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it
ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it
ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it
ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it
}
function test_auth()
{
expect_false ceph auth add client.xx mon 'invalid' osd "allow *"
expect_false ceph auth add client.xx mon 'allow *' osd "allow *" invalid "allow *"
ceph auth add client.xx mon 'allow *' osd "allow *"
ceph auth export client.xx >client.xx.keyring
ceph auth add client.xx -i client.xx.keyring
rm -f client.xx.keyring
ceph auth list | grep client.xx
ceph auth ls | grep client.xx
ceph auth get client.xx | grep caps | grep mon
ceph auth get client.xx | grep caps | grep osd
ceph auth get-key client.xx
ceph auth print-key client.xx
ceph auth print_key client.xx
ceph auth caps client.xx osd "allow rw"
expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon"
ceph auth get client.xx | grep osd | grep "allow rw"
ceph auth caps client.xx mon 'allow command "osd tree"'
ceph auth export | grep client.xx
ceph auth export -o authfile
ceph auth import -i authfile
ceph auth export -o authfile2
diff authfile authfile2
rm authfile authfile2
ceph auth del client.xx
expect_false ceph auth get client.xx
# (almost) interactive mode
echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph
ceph auth get client.xx
# script mode
echo 'auth del client.xx' | ceph
expect_false ceph auth get client.xx
}
function test_auth_profiles()
{
ceph auth add client.xx-profile-ro mon 'allow profile read-only' \
mgr 'allow profile read-only'
ceph auth add client.xx-profile-rw mon 'allow profile read-write' \
mgr 'allow profile read-write'
ceph auth add client.xx-profile-rd mon 'allow profile role-definer'
ceph auth export > client.xx.keyring
# read-only is allowed all read-only commands (auth excluded)
ceph -n client.xx-profile-ro -k client.xx.keyring status
ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
# read-only gets access denied for rw commands or auth commands
ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true
check_response "EACCES: access denied"
# read-write is allowed for all read-write commands (except auth)
ceph -n client.xx-profile-rw -k client.xx.keyring status
ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
ceph -n client.xx-profile-rw -k client.xx.keyring fs dump
ceph -n client.xx-profile-rw -k client.xx.keyring log foo
ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
# read-write gets access denied for auth commands
ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true
check_response "EACCES: access denied"
# role-definer is allowed RWX 'auth' commands and read-only 'mon' commands
ceph -n client.xx-profile-rd -k client.xx.keyring auth ls
ceph -n client.xx-profile-rd -k client.xx.keyring auth export
ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo
ceph -n client.xx-profile-rd -k client.xx.keyring status
ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true
check_response "EACCES: access denied"
# read-only 'mon' subsystem commands are allowed
ceph -n client.xx-profile-rd -k client.xx.keyring mon dump
# but read-write 'mon' commands are not
ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring fs dump >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
# add a new role-definer with the existing role-definer
ceph -n client.xx-profile-rd -k client.xx.keyring \
auth add client.xx-profile-rd2 mon 'allow profile role-definer'
ceph -n client.xx-profile-rd -k client.xx.keyring \
auth export > client.xx.keyring.2
# remove old role-definer using the new role-definer
ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \
auth del client.xx-profile-rd
# remove the remaining role-definer with admin
ceph auth del client.xx-profile-rd2
rm -f client.xx.keyring client.xx.keyring.2
}
function test_mon_caps()
{
ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
chmod +r $TEMP_DIR/ceph.client.bug.keyring
ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
# pass --no-mon-config since we are looking for the permission denied error
rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
cat $TMPFILE
check_response "Permission denied"
rm -rf $TEMP_DIR/ceph.client.bug.keyring
ceph auth del client.bug
ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring
chmod +r $TEMP_DIR/ceph.client.bug.keyring
ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key
ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
check_response "Permission denied"
}
function test_mon_misc()
{
# with and without verbosity
ceph osd dump | grep '^epoch'
ceph --concise osd dump | grep '^epoch'
ceph osd df | grep 'MIN/MAX VAR'
# df
ceph df > $TMPFILE
grep RAW $TMPFILE
grep -v DIRTY $TMPFILE
ceph df detail > $TMPFILE
grep DIRTY $TMPFILE
ceph df --format json > $TMPFILE
grep 'total_bytes' $TMPFILE
grep -v 'dirty' $TMPFILE
ceph df detail --format json > $TMPFILE
grep 'rd_bytes' $TMPFILE
grep 'dirty' $TMPFILE
ceph df --format xml | grep '<total_bytes>'
ceph df detail --format xml | grep '<rd_bytes>'
ceph fsid
ceph health
ceph health detail
ceph health --format json-pretty
ceph health detail --format xml-pretty
ceph time-sync-status
ceph node ls
for t in mon osd mds mgr ; do
ceph node ls $t
done
ceph_watch_start
mymsg="this is a test log message $$.$(date)"
ceph log "$mymsg"
ceph log last | grep "$mymsg"
ceph log last 100 | grep "$mymsg"
ceph_watch_wait "$mymsg"
ceph mgr stat
ceph mgr dump
ceph mgr dump | jq -e '.active_clients[0].name'
ceph mgr module ls
ceph mgr module enable restful
expect_false ceph mgr module enable foodne
ceph mgr module enable foodne --force
ceph mgr module disable foodne
ceph mgr module disable foodnebizbangbash
ceph mon metadata a
ceph mon metadata
ceph mon count-metadata ceph_version
ceph mon versions
ceph mgr metadata
ceph mgr versions
ceph mgr count-metadata ceph_version
ceph versions
ceph node ls
}
function check_mds_active()
{
fs_name=$1
ceph fs get $fs_name | grep active
}
function wait_mds_active()
{
fs_name=$1
max_run=300
for i in $(seq 1 $max_run) ; do
if ! check_mds_active $fs_name ; then
echo "waiting for an active MDS daemon ($i/$max_run)"
sleep 5
else
break
fi
done
check_mds_active $fs_name
}
function get_mds_gids()
{
fs_name=$1
ceph fs get $fs_name --format=json | python3 -c "import json; import sys; print(' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()]))"
}
function fail_all_mds()
{
fs_name=$1
ceph fs set $fs_name cluster_down true
mds_gids=$(get_mds_gids $fs_name)
for mds_gid in $mds_gids ; do
ceph mds fail $mds_gid
done
if check_mds_active $fs_name ; then
echo "An active MDS remains, something went wrong"
ceph fs get $fs_name
exit -1
fi
}
function remove_all_fs()
{
existing_fs=$(ceph fs ls --format=json | python3 -c "import json; import sys; print(' '.join([fs['name'] for fs in json.load(sys.stdin)]))")
for fs_name in $existing_fs ; do
echo "Removing fs ${fs_name}..."
fail_all_mds $fs_name
echo "Removing existing filesystem '${fs_name}'..."
ceph fs rm $fs_name --yes-i-really-mean-it
echo "Removed '${fs_name}'."
done
}
# So that tests requiring MDS can skip if one is not configured
# in the cluster at all
function mds_exists()
{
ceph auth ls | grep "^mds"
}
# some of the commands are just not idempotent.
function without_test_dup_command()
{
if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then
$@
else
local saved=${CEPH_CLI_TEST_DUP_COMMAND}
unset CEPH_CLI_TEST_DUP_COMMAND
$@
CEPH_CLI_TEST_DUP_COMMAND=saved
fi
}
function test_mds_tell()
{
local FS_NAME=cephfs
if ! mds_exists ; then
echo "Skipping test, no MDS found"
return
fi
remove_all_fs
ceph osd pool create fs_data 16
ceph osd pool create fs_metadata 16
ceph fs new $FS_NAME fs_metadata fs_data
wait_mds_active $FS_NAME
# Test injectargs by GID
old_mds_gids=$(get_mds_gids $FS_NAME)
echo Old GIDs: $old_mds_gids
for mds_gid in $old_mds_gids ; do
ceph tell mds.$mds_gid injectargs "--debug-mds 20"
done
expect_false ceph tell mds.a injectargs mds_max_file_recover -1
# Test respawn by rank
without_test_dup_command ceph tell mds.0 respawn
new_mds_gids=$old_mds_gids
while [ $new_mds_gids -eq $old_mds_gids ] ; do
sleep 5
new_mds_gids=$(get_mds_gids $FS_NAME)
done
echo New GIDs: $new_mds_gids
# Test respawn by ID
without_test_dup_command ceph tell mds.a respawn
new_mds_gids=$old_mds_gids
while [ $new_mds_gids -eq $old_mds_gids ] ; do
sleep 5
new_mds_gids=$(get_mds_gids $FS_NAME)
done
echo New GIDs: $new_mds_gids
remove_all_fs
ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
}
function test_mon_mds()
{
local FS_NAME=cephfs
remove_all_fs
ceph osd pool create fs_data 16
ceph osd pool create fs_metadata 16
ceph fs new $FS_NAME fs_metadata fs_data
ceph fs set $FS_NAME cluster_down true
ceph fs set $FS_NAME cluster_down false
ceph mds compat rm_incompat 4
ceph mds compat rm_incompat 4
# We don't want any MDSs to be up, their activity can interfere with
# the "current_epoch + 1" checking below if they're generating updates
fail_all_mds $FS_NAME
ceph mds compat show
ceph fs dump
ceph fs get $FS_NAME
for mds_gid in $(get_mds_gids $FS_NAME) ; do
ceph mds metadata $mds_id
done
ceph mds metadata
ceph mds versions
ceph mds count-metadata os
# XXX mds fail, but how do you undo it?
mdsmapfile=$TEMP_DIR/mdsmap.$$
current_epoch=$(ceph fs dump -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
[ -s $mdsmapfile ]
rm $mdsmapfile
ceph osd pool create data2 16
ceph osd pool create data3 16
data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
ceph fs add_data_pool cephfs $data2_pool
ceph fs add_data_pool cephfs $data3_pool
ceph fs add_data_pool cephfs 100 >& $TMPFILE || true
check_response "Error ENOENT"
ceph fs add_data_pool cephfs foobarbaz >& $TMPFILE || true
check_response "Error ENOENT"
ceph fs rm_data_pool cephfs $data2_pool
ceph fs rm_data_pool cephfs $data3_pool
ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
ceph fs set cephfs max_mds 4
ceph fs set cephfs max_mds 3
ceph fs set cephfs max_mds 256
expect_false ceph fs set cephfs max_mds 257
ceph fs set cephfs max_mds 4
ceph fs set cephfs max_mds 256
expect_false ceph fs set cephfs max_mds 257
expect_false ceph fs set cephfs max_mds asdf
expect_false ceph fs set cephfs inline_data true
ceph fs set cephfs inline_data true --yes-i-really-really-mean-it
ceph fs set cephfs inline_data yes --yes-i-really-really-mean-it
ceph fs set cephfs inline_data 1 --yes-i-really-really-mean-it
expect_false ceph fs set cephfs inline_data --yes-i-really-really-mean-it
ceph fs set cephfs inline_data false
ceph fs set cephfs inline_data no
ceph fs set cephfs inline_data 0
expect_false ceph fs set cephfs inline_data asdf
ceph fs set cephfs max_file_size 1048576
expect_false ceph fs set cephfs max_file_size 123asdf
expect_false ceph fs set cephfs allow_new_snaps
ceph fs set cephfs allow_new_snaps true
ceph fs set cephfs allow_new_snaps 0
ceph fs set cephfs allow_new_snaps false
ceph fs set cephfs allow_new_snaps no
expect_false ceph fs set cephfs allow_new_snaps taco
# we should never be able to add EC pools as data or metadata pools
# create an ec-pool...
ceph osd pool create mds-ec-pool 16 16 erasure
set +e
ceph fs add_data_pool cephfs mds-ec-pool 2>$TMPFILE
check_response 'erasure-code' $? 22
set -e
ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}')
metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}')
fail_all_mds $FS_NAME
set +e
# Check that rmfailed requires confirmation
expect_false ceph mds rmfailed 0
ceph mds rmfailed 0 --yes-i-really-mean-it
set -e
# Check that `fs new` is no longer permitted
expect_false ceph fs new cephfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
# Check that 'fs reset' runs
ceph fs reset $FS_NAME --yes-i-really-mean-it
# Check that creating a second FS fails by default
ceph osd pool create fs_metadata2 16
ceph osd pool create fs_data2 16
set +e
expect_false ceph fs new cephfs2 fs_metadata2 fs_data2
set -e
# Check that setting enable_multiple enables creation of second fs
ceph fs flag set enable_multiple true --yes-i-really-mean-it
ceph fs new cephfs2 fs_metadata2 fs_data2
# Clean up multi-fs stuff
fail_all_mds cephfs2
ceph fs rm cephfs2 --yes-i-really-mean-it
ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it
ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it
fail_all_mds $FS_NAME
# Clean up to enable subsequent fs new tests
ceph fs rm $FS_NAME --yes-i-really-mean-it
set +e
ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
check_response 'erasure-code' $? 22
ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
check_response 'already used by filesystem' $? 22
ceph fs new $FS_NAME mds-ec-pool fs_data --force 2>$TMPFILE
check_response 'erasure-code' $? 22
ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE
check_response 'erasure-code' $? 22
set -e
# ... new create a cache tier in front of the EC pool...
ceph osd pool create mds-tier 2
ceph osd tier add mds-ec-pool mds-tier
ceph osd tier set-overlay mds-ec-pool mds-tier
tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}')
# Use of a readonly tier should be forbidden
ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it
set +e
ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
check_response 'has a write tier (mds-tier) that is configured to forward' $? 22
set -e
# Use of a writeback tier should enable FS creation
ceph osd tier cache-mode mds-tier writeback
ceph fs new $FS_NAME fs_metadata mds-ec-pool --force
# While a FS exists using the tiered pools, I should not be allowed
# to remove the tier
set +e
ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE
check_response 'in use by CephFS' $? 16
ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE
check_response 'in use by CephFS' $? 16
set -e
fail_all_mds $FS_NAME
ceph fs rm $FS_NAME --yes-i-really-mean-it
# ... but we should be forbidden from using the cache pool in the FS directly.
set +e
ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE
check_response 'in use as a cache tier' $? 22
ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE
check_response 'already used by filesystem' $? 22
ceph fs new $FS_NAME mds-tier fs_data --force 2>$TMPFILE
check_response 'in use as a cache tier' $? 22
ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE
check_response 'already used by filesystem' $? 22
ceph fs new $FS_NAME mds-tier mds-tier --force 2>$TMPFILE
check_response 'in use as a cache tier' $? 22
set -e
# Clean up tier + EC pools
ceph osd tier remove-overlay mds-ec-pool
ceph osd tier remove mds-ec-pool mds-tier
# Create a FS using the 'cache' pool now that it's no longer a tier
ceph fs new $FS_NAME fs_metadata mds-tier --force
# We should be forbidden from using this pool as a tier now that
# it's in use for CephFS
set +e
ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE
check_response 'in use by CephFS' $? 16
set -e
fail_all_mds $FS_NAME
ceph fs rm $FS_NAME --yes-i-really-mean-it
# We should be permitted to use an EC pool with overwrites enabled
# as the data pool...
ceph osd pool set mds-ec-pool allow_ec_overwrites true
ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE
fail_all_mds $FS_NAME
ceph fs rm $FS_NAME --yes-i-really-mean-it
# ...but not as the metadata pool
set +e
ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE
check_response 'already used by filesystem' $? 22
ceph fs new $FS_NAME mds-ec-pool fs_data --force 2>$TMPFILE
check_response 'erasure-code' $? 22
set -e
ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
# Create a FS and check that we can subsequently add a cache tier to it
ceph fs new $FS_NAME fs_metadata fs_data --force
# Adding overlay to FS pool should be permitted, RADOS clients handle this.
ceph osd tier add fs_metadata mds-tier
ceph osd tier cache-mode mds-tier writeback
ceph osd tier set-overlay fs_metadata mds-tier
# Removing tier should be permitted because the underlying pool is
# replicated (#11504 case)
ceph osd tier cache-mode mds-tier proxy
ceph osd tier remove-overlay fs_metadata
ceph osd tier remove fs_metadata mds-tier
ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
# Clean up FS
fail_all_mds $FS_NAME
ceph fs rm $FS_NAME --yes-i-really-mean-it
ceph mds stat
# ceph mds tell mds.a getmap
# ceph mds rm
# ceph mds rmfailed
# ceph mds set_state
ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
}
function test_mon_mds_metadata()
{
local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
test "$nmons" -gt 0
ceph fs dump |
sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
while read gid id rank; do
ceph mds metadata ${gid} | grep '"hostname":'
ceph mds metadata ${id} | grep '"hostname":'
ceph mds metadata ${rank} | grep '"hostname":'
local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":')
test "$n" -eq "$nmons"
done
expect_false ceph mds metadata UNKNOWN
}
function test_mon_mon()
{
# print help message
ceph --help mon
# -h works even when some arguments are passed
ceph osd dump -h | grep 'osd dump'
ceph osd dump 123 -h | grep 'osd dump'
# no mon add/remove
ceph mon dump
ceph mon getmap -o $TEMP_DIR/monmap.$$
[ -s $TEMP_DIR/monmap.$$ ]
# ceph mon tell
first=$(ceph mon dump -f json | jq -r '.mons[0].name')
ceph tell mon.$first mon_status
# test mon features
ceph mon feature ls
ceph mon feature set kraken --yes-i-really-mean-it
expect_false ceph mon feature set abcd
expect_false ceph mon feature set abcd --yes-i-really-mean-it
# test elector
expect_failure $TEMP_DIR ceph mon add disallowed_leader $first
ceph mon set election_strategy disallow
ceph mon add disallowed_leader $first
ceph mon set election_strategy connectivity
ceph mon rm disallowed_leader $first
ceph mon set election_strategy classic
expect_failure $TEMP_DIR ceph mon rm disallowed_leader $first
# test mon stat
# don't check output, just ensure it does not fail.
ceph mon stat
ceph mon stat -f json | jq '.'
}
function test_mon_priority_and_weight()
{
for i in 0 1 65535; do
ceph mon set-weight a $i
w=$(ceph mon dump --format=json-pretty 2>/dev/null | jq '.mons[0].weight')
[[ "$w" == "$i" ]]
done
for i in -1 65536; do
expect_false ceph mon set-weight a $i
done
}
function gen_secrets_file()
{
# lets assume we can have the following types
# all - generates both cephx and lockbox, with mock dm-crypt key
# cephx - only cephx
# no_cephx - lockbox and dm-crypt, no cephx
# no_lockbox - dm-crypt and cephx, no lockbox
# empty - empty file
# empty_json - correct json, empty map
# bad_json - bad json :)
#
local t=$1
if [[ -z "$t" ]]; then
t="all"
fi
fn=$(mktemp $TEMP_DIR/secret.XXXXXX)
echo $fn
if [[ "$t" == "empty" ]]; then
return 0
fi
echo "{" > $fn
if [[ "$t" == "bad_json" ]]; then
echo "asd: ; }" >> $fn
return 0
elif [[ "$t" == "empty_json" ]]; then
echo "}" >> $fn
return 0
fi
cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\""
lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\""
dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\""
if [[ "$t" == "all" ]]; then
echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn
elif [[ "$t" == "cephx" ]]; then
echo "$cephx_secret" >> $fn
elif [[ "$t" == "no_cephx" ]]; then
echo "$lb_secret,$dmcrypt_key" >> $fn
elif [[ "$t" == "no_lockbox" ]]; then
echo "$cephx_secret,$dmcrypt_key" >> $fn
else
echo "unknown gen_secrets_file() type \'$fn\'"
return 1
fi
echo "}" >> $fn
return 0
}
function test_mon_osd_create_destroy()
{
ceph osd new 2>&1 | grep 'EINVAL'
ceph osd new '' -1 2>&1 | grep 'EINVAL'
ceph osd new '' 10 2>&1 | grep 'EINVAL'
old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
old_osds=$(ceph osd ls)
num_osds=$(ceph osd ls | wc -l)
uuid=$(uuidgen)
id=$(ceph osd new $uuid 2>/dev/null)
for i in $old_osds; do
[[ "$i" != "$id" ]]
done
ceph osd find $id
id2=`ceph osd new $uuid 2>/dev/null`
[[ $id2 == $id ]]
ceph osd new $uuid $id
id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST
uuid2=$(uuidgen)
id2=$(ceph osd new $uuid2)
ceph osd find $id2
[[ "$id2" != "$id" ]]
ceph osd new $uuid $id2 2>&1 | grep EEXIST
ceph osd new $uuid2 $id2
# test with secrets
empty_secrets=$(gen_secrets_file "empty")
empty_json=$(gen_secrets_file "empty_json")
all_secrets=$(gen_secrets_file "all")
cephx_only=$(gen_secrets_file "cephx")
no_cephx=$(gen_secrets_file "no_cephx")
no_lockbox=$(gen_secrets_file "no_lockbox")
bad_json=$(gen_secrets_file "bad_json")
# empty secrets should be idempotent
new_id=$(ceph osd new $uuid $id -i $empty_secrets)
[[ "$new_id" == "$id" ]]
# empty json, thus empty secrets
new_id=$(ceph osd new $uuid $id -i $empty_json)
[[ "$new_id" == "$id" ]]
ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST'
ceph osd rm $id
ceph osd rm $id2
ceph osd setmaxosd $old_maxosd
ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL'
ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL'
osds=$(ceph osd ls)
id=$(ceph osd new $uuid -i $all_secrets)
for i in $osds; do
[[ "$i" != "$id" ]]
done
ceph osd find $id
# validate secrets and dm-crypt are set
k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
s=$(cat $all_secrets | jq '.cephx_secret')
[[ $k == $s ]]
k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \
jq '.key')
s=$(cat $all_secrets | jq '.cephx_lockbox_secret')
[[ $k == $s ]]
ceph config-key exists dm-crypt/osd/$uuid/luks
osds=$(ceph osd ls)
id2=$(ceph osd new $uuid2 -i $cephx_only)
for i in $osds; do
[[ "$i" != "$id2" ]]
done
ceph osd find $id2
k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key')
s=$(cat $all_secrets | jq '.cephx_secret')
[[ $k == $s ]]
expect_false ceph auth get-key client.osd-lockbox.$uuid2
expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks
ceph osd destroy osd.$id2 --yes-i-really-mean-it
ceph osd destroy $id2 --yes-i-really-mean-it
ceph osd find $id2
expect_false ceph auth get-key osd.$id2
ceph osd dump | grep osd.$id2 | grep destroyed
id3=$id2
uuid3=$(uuidgen)
ceph osd new $uuid3 $id3 -i $all_secrets
ceph osd dump | grep osd.$id3 | expect_false grep destroyed
ceph auth get-key client.osd-lockbox.$uuid3
ceph auth get-key osd.$id3
ceph config-key exists dm-crypt/osd/$uuid3/luks
ceph osd purge-new osd.$id3 --yes-i-really-mean-it
expect_false ceph osd find $id2
expect_false ceph auth get-key osd.$id2
expect_false ceph auth get-key client.osd-lockbox.$uuid3
expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks
ceph osd purge osd.$id3 --yes-i-really-mean-it
ceph osd purge-new osd.$id3 --yes-i-really-mean-it # idempotent
ceph osd purge osd.$id --yes-i-really-mean-it
ceph osd purge 123456 --yes-i-really-mean-it
expect_false ceph osd find $id
expect_false ceph auth get-key osd.$id
expect_false ceph auth get-key client.osd-lockbox.$uuid
expect_false ceph config-key exists dm-crypt/osd/$uuid/luks
rm $empty_secrets $empty_json $all_secrets $cephx_only \
$no_cephx $no_lockbox $bad_json
for i in $(ceph osd ls); do
[[ "$i" != "$id" ]]
[[ "$i" != "$id2" ]]
[[ "$i" != "$id3" ]]
done
[[ "$(ceph osd ls | wc -l)" == "$num_osds" ]]
ceph osd setmaxosd $old_maxosd
}
function test_mon_config_key()
{
key=asdfasdfqwerqwreasdfuniquesa123df
ceph config-key list | grep -c $key | grep 0
ceph config-key get $key | grep -c bar | grep 0
ceph config-key set $key bar
ceph config-key get $key | grep bar
ceph config-key list | grep -c $key | grep 1
ceph config-key dump | grep $key | grep bar
ceph config-key rm $key
expect_false ceph config-key get $key
ceph config-key list | grep -c $key | grep 0
ceph config-key dump | grep -c $key | grep 0
}
function test_mon_osd()
{
#
# osd blocklist
#
bl=192.168.0.1:0/1000
ceph osd blocklist add $bl
ceph osd blocklist ls | grep $bl
ceph osd blocklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl
ceph osd dump --format=json-pretty | grep $bl
ceph osd dump | grep $bl
ceph osd blocklist rm $bl
ceph osd blocklist ls | expect_false grep $bl
bl=192.168.0.1
# test without nonce, invalid nonce
ceph osd blocklist add $bl
ceph osd blocklist ls | grep $bl
ceph osd blocklist rm $bl
ceph osd blocklist ls | expect_false grep $bl
expect_false "ceph osd blocklist add $bl/-1"
expect_false "ceph osd blocklist add $bl/foo"
# test with invalid address
expect_false "ceph osd blocklist add 1234.56.78.90/100"
# test range blocklisting
bl=192.168.0.1:0/24
ceph osd blocklist range add $bl
ceph osd blocklist ls | grep $bl
ceph osd blocklist range rm $bl
ceph osd blocklist ls | expect_false grep $bl
bad_bl=192.168.0.1/33
expect_false ceph osd blocklist range add $bad_bl
# Test `clear`
ceph osd blocklist add $bl
ceph osd blocklist ls | grep $bl
ceph osd blocklist clear
ceph osd blocklist ls | expect_false grep $bl
# deprecated syntax?
ceph osd blacklist ls
#
# osd crush
#
ceph osd crush reweight-all
ceph osd crush tunables legacy
ceph osd crush show-tunables | grep argonaut
ceph osd crush tunables bobtail
ceph osd crush show-tunables | grep bobtail
ceph osd crush tunables firefly
ceph osd crush show-tunables | grep firefly
ceph osd crush set-tunable straw_calc_version 0
ceph osd crush get-tunable straw_calc_version | grep 0
ceph osd crush set-tunable straw_calc_version 1
ceph osd crush get-tunable straw_calc_version | grep 1
#
# require-min-compat-client
expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables
ceph osd get-require-min-compat-client | grep luminous
ceph osd dump | grep 'require_min_compat_client luminous'
#
# osd scrub
#
# blocking
ceph osd scrub 0 --block
ceph osd deep-scrub 0 --block
# how do I tell when these are done?
ceph osd scrub 0
ceph osd deep-scrub 0
ceph osd repair 0
# pool scrub, force-recovery/backfill
pool_names=`rados lspools`
for pool_name in $pool_names
do
ceph osd pool scrub $pool_name
ceph osd pool deep-scrub $pool_name
ceph osd pool repair $pool_name
ceph osd pool force-recovery $pool_name
ceph osd pool cancel-force-recovery $pool_name
ceph osd pool force-backfill $pool_name
ceph osd pool cancel-force-backfill $pool_name
done
for f in noup nodown noin noout noscrub nodeep-scrub nobackfill \
norebalance norecover notieragent
do
ceph osd set $f
ceph osd unset $f
done
expect_false ceph osd set bogus
expect_false ceph osd unset bogus
for f in sortbitwise recover_deletes require_jewel_osds \
require_kraken_osds
do
expect_false ceph osd set $f
expect_false ceph osd unset $f
done
ceph osd require-osd-release reef
# can't lower
expect_false ceph osd require-osd-release quincy
expect_false ceph osd require-osd-release pacific
# these are no-ops but should succeed.
ceph osd set noup
ceph osd down 0
ceph osd dump | grep 'osd.0 down'
ceph osd unset noup
max_run=1000
for ((i=0; i < $max_run; i++)); do
if ! ceph osd dump | grep 'osd.0 up'; then
echo "waiting for osd.0 to come back up ($i/$max_run)"
sleep 1
else
break
fi
done
ceph osd dump | grep 'osd.0 up'
ceph osd dump | grep 'osd.0 up'
# ceph osd find expects the OsdName, so both ints and osd.n should work.
ceph osd find 1
ceph osd find osd.1
expect_false ceph osd find osd.xyz
expect_false ceph osd find xyz
expect_false ceph osd find 0.1
ceph --format plain osd find 1 # falls back to json-pretty
if [ `uname` == Linux ]; then
ceph osd metadata 1 | grep 'distro'
ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty
fi
ceph osd out 0
ceph osd dump | grep 'osd.0.*out'
ceph osd in 0
ceph osd dump | grep 'osd.0.*in'
ceph osd find 0
ceph osd info 0
ceph osd info osd.0
expect_false ceph osd info osd.xyz
expect_false ceph osd info xyz
expect_false ceph osd info 42
expect_false ceph osd info osd.42
ceph osd info
info_json=$(ceph osd info --format=json | jq -cM '.')
dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
if [[ "${info_json}" != "${dump_json}" ]]; then
echo "waiting for OSDs to settle"
sleep 10
info_json=$(ceph osd info --format=json | jq -cM '.')
dump_json=$(ceph osd dump --format=json | jq -cM '.osds')
[[ "${info_json}" == "${dump_json}" ]]
fi
info_json=$(ceph osd info 0 --format=json | jq -cM '.')
dump_json=$(ceph osd dump --format=json | \
jq -cM '.osds[] | select(.osd == 0)')
[[ "${info_json}" == "${dump_json}" ]]
info_plain="$(ceph osd info)"
dump_plain="$(ceph osd dump | grep '^osd')"
[[ "${info_plain}" == "${dump_plain}" ]]
info_plain="$(ceph osd info 0)"
dump_plain="$(ceph osd dump | grep '^osd.0')"
[[ "${info_plain}" == "${dump_plain}" ]]
ceph osd add-nodown 0 1
ceph health detail | grep 'NODOWN'
ceph osd rm-nodown 0 1
! ceph health detail | grep 'NODOWN'
ceph osd out 0 # so we can mark it as noin later
ceph osd add-noin 0
ceph health detail | grep 'NOIN'
ceph osd rm-noin 0
! ceph health detail | grep 'NOIN'
ceph osd in 0
ceph osd add-noout 0
ceph health detail | grep 'NOOUT'
ceph osd rm-noout 0
! ceph health detail | grep 'NOOUT'
# test osd id parse
expect_false ceph osd add-noup 797er
expect_false ceph osd add-nodown u9uwer
expect_false ceph osd add-noin 78~15
expect_false ceph osd rm-noup 1234567
expect_false ceph osd rm-nodown fsadf7
expect_false ceph osd rm-noout 790-fd
ids=`ceph osd ls-tree default`
for osd in $ids
do
ceph osd add-nodown $osd
ceph osd add-noout $osd
done
ceph -s | grep 'NODOWN'
ceph -s | grep 'NOOUT'
ceph osd rm-nodown any
ceph osd rm-noout all
! ceph -s | grep 'NODOWN'
! ceph -s | grep 'NOOUT'
# test crush node flags
ceph osd add-noup osd.0
ceph osd add-nodown osd.0
ceph osd add-noin osd.0
ceph osd add-noout osd.0
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
ceph osd rm-noup osd.0
ceph osd rm-nodown osd.0
ceph osd rm-noin osd.0
ceph osd rm-noout osd.0
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0"
ceph osd crush add-bucket foo host root=default
ceph osd add-noup foo
ceph osd add-nodown foo
ceph osd add-noin foo
ceph osd add-noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
ceph osd rm-noup foo
ceph osd rm-nodown foo
ceph osd rm-noin foo
ceph osd rm-noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
ceph osd add-noup foo
ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo
ceph osd crush rm foo
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo
ceph osd set-group noup osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
ceph osd set-group noup,nodown osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
ceph osd set-group noup,nodown,noin osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
ceph osd set-group noup,nodown,noin,noout osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
ceph osd unset-group noup osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
ceph osd unset-group noup,nodown osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
ceph osd unset-group noup,nodown,noin osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
ceph osd unset-group noup,nodown,noin,noout osd.0
ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
ceph osd set-group noup,nodown,noin,noout osd.0 osd.1
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin'
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout'
ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noup'
ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noin'
ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noout'
ceph osd unset-group noup,nodown,noin,noout osd.0 osd.1
ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout'
ceph osd dump -f json-pretty | jq ".osds[1].state" | expect_false grep 'noup\|nodown\|noin\|noout'
ceph osd set-group noup all
ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup'
ceph osd unset-group noup all
ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup'
# crush node flags
ceph osd crush add-bucket foo host root=default
ceph osd set-group noup foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
ceph osd set-group noup,nodown foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
ceph osd set-group noup,nodown,noin foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
ceph osd set-group noup,nodown,noin,noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
ceph osd unset-group noup foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
ceph osd unset-group noup,nodown foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
ceph osd unset-group noup,nodown,noin foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
ceph osd unset-group noup,nodown,noin,noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin\|noout'
ceph osd set-group noin,noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
ceph osd unset-group noin,noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
ceph osd set-group noup,nodown,noin,noout foo
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin'
ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout'
ceph osd crush rm foo
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo'
# test device class flags
osd_0_device_class=$(ceph osd crush get-device-class osd.0)
ceph osd set-group noup $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
ceph osd set-group noup,nodown $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
ceph osd set-group noup,nodown,noin $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
ceph osd set-group noup,nodown,noin,noout $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
ceph osd unset-group noup $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
ceph osd unset-group noup,nodown $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
ceph osd unset-group noup,nodown,noin $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
ceph osd unset-group noup,nodown,noin,noout $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin\|noout'
ceph osd set-group noin,noout $osd_0_device_class
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin'
ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout'
ceph osd unset-group noin,noout $osd_0_device_class
ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep $osd_0_device_class
# make sure mark out preserves weight
ceph osd reweight osd.0 .5
ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
ceph osd out 0
ceph osd in 0
ceph osd dump | grep ^osd.0 | grep 'weight 0.5'
ceph osd getmap -o $f
[ -s $f ]
rm $f
save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
[ "$save" -gt 0 ]
ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY'
ceph osd setmaxosd 10
ceph osd getmaxosd | grep 'max_osd = 10'
ceph osd setmaxosd $save
ceph osd getmaxosd | grep "max_osd = $save"
for id in `ceph osd ls` ; do
retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version
done
ceph osd rm 0 2>&1 | grep 'EBUSY'
local old_osds=$(echo $(ceph osd ls))
id=`ceph osd create`
ceph osd find $id
ceph osd lost $id --yes-i-really-mean-it
expect_false ceph osd setmaxosd $id
local new_osds=$(echo $(ceph osd ls))
for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
ceph osd rm $id
done
uuid=`uuidgen`
id=`ceph osd create $uuid`
id2=`ceph osd create $uuid`
[ "$id" = "$id2" ]
ceph osd rm $id
ceph --help osd
# reset max_osd.
ceph osd setmaxosd $id
ceph osd getmaxosd | grep "max_osd = $save"
local max_osd=$save
ceph osd create $uuid 0 2>&1 | grep 'EINVAL'
ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL'
id=`ceph osd create $uuid $max_osd`
[ "$id" = "$max_osd" ]
ceph osd find $id
max_osd=$((max_osd + 1))
ceph osd getmaxosd | grep "max_osd = $max_osd"
ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST'
ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST'
id2=`ceph osd create $uuid`
[ "$id" = "$id2" ]
id2=`ceph osd create $uuid $id`
[ "$id" = "$id2" ]
uuid=`uuidgen`
local gap_start=$max_osd
id=`ceph osd create $uuid $((gap_start + 100))`
[ "$id" = "$((gap_start + 100))" ]
max_osd=$((id + 1))
ceph osd getmaxosd | grep "max_osd = $max_osd"
ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST'
#
# When CEPH_CLI_TEST_DUP_COMMAND is set, osd create
# is repeated and consumes two osd id, not just one.
#
local next_osd=$gap_start
id=`ceph osd create $(uuidgen)`
[ "$id" = "$next_osd" ]
next_osd=$((id + 1))
id=`ceph osd create $(uuidgen) $next_osd`
[ "$id" = "$next_osd" ]
local new_osds=$(echo $(ceph osd ls))
for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do
[ $id -ge $save ]
ceph osd rm $id
done
ceph osd setmaxosd $save
ceph osd ls
ceph osd pool create data 16
ceph osd pool application enable data rados
ceph osd lspools | grep data
ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting'
ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting'
ceph osd pool delete data data --yes-i-really-really-mean-it
ceph osd pause
ceph osd dump | grep 'flags.*pauserd,pausewr'
ceph osd unpause
ceph osd tree
ceph osd tree up
ceph osd tree down
ceph osd tree in
ceph osd tree out
ceph osd tree destroyed
ceph osd tree up in
ceph osd tree up out
ceph osd tree down in
ceph osd tree down out
ceph osd tree out down
expect_false ceph osd tree up down
expect_false ceph osd tree up destroyed
expect_false ceph osd tree down destroyed
expect_false ceph osd tree up down destroyed
expect_false ceph osd tree in out
expect_false ceph osd tree up foo
ceph osd metadata
ceph osd count-metadata os
ceph osd versions
ceph osd perf
ceph osd blocked-by
ceph osd stat | grep up
}
function test_mon_crush()
{
f=$TEMP_DIR/map.$$
epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1)
[ -s $f ]
[ "$epoch" -gt 1 ]
nextepoch=$(( $epoch + 1 ))
echo epoch $epoch nextepoch $nextepoch
rm -f $f.epoch
expect_false ceph osd setcrushmap $nextepoch -i $f
gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
echo gotepoch $gotepoch
[ "$gotepoch" -eq "$nextepoch" ]
# should be idempotent
gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1)
echo epoch $gotepoch
[ "$gotepoch" -eq "$nextepoch" ]
rm $f
}
function test_mon_osd_pool()
{
#
# osd pool
#
ceph osd pool create data 16
ceph osd pool application enable data rados
ceph osd pool mksnap data datasnap
rados -p data lssnap | grep datasnap
ceph osd pool rmsnap data datasnap
expect_false ceph osd pool rmsnap pool_fake snapshot
ceph osd pool delete data data --yes-i-really-really-mean-it
ceph osd pool create data2 16
ceph osd pool application enable data2 rados
ceph osd pool rename data2 data3
ceph osd lspools | grep data3
ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
ceph osd pool create replicated 16 16 replicated
ceph osd pool create replicated 1 16 replicated
ceph osd pool create replicated 16 16 # default is replicated
ceph osd pool create replicated 16 # default is replicated, pgp_num = pg_num
ceph osd pool application enable replicated rados
# should fail because the type is not the same
expect_false ceph osd pool create replicated 16 16 erasure
ceph osd lspools | grep replicated
ceph osd pool create ec_test 1 1 erasure
ceph osd pool application enable ec_test rados
set +e
ceph osd count-metadata osd_objectstore | grep 'bluestore'
if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail
ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE
check_response "pool must only be stored on bluestore for scrubbing to work" $? 22
else
ceph osd pool set ec_test allow_ec_overwrites true || return 1
expect_false ceph osd pool set ec_test allow_ec_overwrites false
fi
set -e
ceph osd pool delete replicated replicated --yes-i-really-really-mean-it
ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it
# test create pool with rule
ceph osd erasure-code-profile set foo foo
ceph osd erasure-code-profile ls | grep foo
ceph osd crush rule create-erasure foo foo
ceph osd pool create erasure 16 16 erasure foo
expect_false ceph osd erasure-code-profile rm foo
ceph osd pool delete erasure erasure --yes-i-really-really-mean-it
ceph osd crush rule rm foo
ceph osd erasure-code-profile rm foo
# autoscale mode
ceph osd pool create modeon --autoscale-mode=on
ceph osd dump | grep modeon | grep 'autoscale_mode on'
ceph osd pool create modewarn --autoscale-mode=warn
ceph osd dump | grep modewarn | grep 'autoscale_mode warn'
ceph osd pool create modeoff --autoscale-mode=off
ceph osd dump | grep modeoff | grep 'autoscale_mode off'
ceph osd pool delete modeon modeon --yes-i-really-really-mean-it
ceph osd pool delete modewarn modewarn --yes-i-really-really-mean-it
ceph osd pool delete modeoff modeoff --yes-i-really-really-mean-it
}
function test_mon_osd_pool_quota()
{
#
# test osd pool set/get quota
#
# create tmp pool
ceph osd pool create tmp-quota-pool 32
ceph osd pool application enable tmp-quota-pool rados
#
# set erroneous quotas
#
expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10
expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1
expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa
#
# set valid quotas
#
ceph osd pool set-quota tmp-quota-pool max_bytes 10
ceph osd pool set-quota tmp-quota-pool max_objects 10M
#
# get quotas in json-pretty format
#
ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
grep '"quota_max_objects":.*10000000'
ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \
grep '"quota_max_bytes":.*10'
#
# get quotas
#
ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 B'
ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10.*M objects'
#
# set valid quotas with unit prefix
#
ceph osd pool set-quota tmp-quota-pool max_bytes 10K
#
# get quotas
#
ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
#
# set valid quotas with unit prefix
#
ceph osd pool set-quota tmp-quota-pool max_bytes 10Ki
#
# get quotas
#
ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki'
#
#
# reset pool quotas
#
ceph osd pool set-quota tmp-quota-pool max_bytes 0
ceph osd pool set-quota tmp-quota-pool max_objects 0
#
# test N/A quotas
#
ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A'
ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A'
#
# cleanup tmp pool
ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it
}
function test_mon_pg()
{
# Make sure we start healthy.
wait_for_health_ok
ceph pg debug unfound_objects_exist
ceph pg debug degraded_pgs_exist
ceph pg deep-scrub 1.0
ceph pg dump
ceph pg dump pgs_brief --format=json
ceph pg dump pgs --format=json
ceph pg dump pools --format=json
ceph pg dump osds --format=json
ceph pg dump sum --format=json
ceph pg dump all --format=json
ceph pg dump pgs_brief osds --format=json
ceph pg dump pools osds pgs_brief --format=json
ceph pg dump_json
ceph pg dump_pools_json
ceph pg dump_stuck inactive
ceph pg dump_stuck unclean
ceph pg dump_stuck stale
ceph pg dump_stuck undersized
ceph pg dump_stuck degraded
ceph pg ls
ceph pg ls 1
ceph pg ls stale
expect_false ceph pg ls scrubq
ceph pg ls active stale repair recovering
ceph pg ls 1 active
ceph pg ls 1 active stale
ceph pg ls-by-primary osd.0
ceph pg ls-by-primary osd.0 1
ceph pg ls-by-primary osd.0 active
ceph pg ls-by-primary osd.0 active stale
ceph pg ls-by-primary osd.0 1 active stale
ceph pg ls-by-osd osd.0
ceph pg ls-by-osd osd.0 1
ceph pg ls-by-osd osd.0 active
ceph pg ls-by-osd osd.0 active stale
ceph pg ls-by-osd osd.0 1 active stale
ceph pg ls-by-pool rbd
ceph pg ls-by-pool rbd active stale
# can't test this...
# ceph pg force_create_pg
ceph pg getmap -o $TEMP_DIR/map.$$
[ -s $TEMP_DIR/map.$$ ]
ceph pg map 1.0 | grep acting
ceph pg repair 1.0
ceph pg scrub 1.0
ceph osd set-full-ratio .962
ceph osd dump | grep '^full_ratio 0.962'
ceph osd set-backfillfull-ratio .912
ceph osd dump | grep '^backfillfull_ratio 0.912'
ceph osd set-nearfull-ratio .892
ceph osd dump | grep '^nearfull_ratio 0.892'
# Check health status
ceph osd set-nearfull-ratio .913
ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
ceph health detail | grep OSD_OUT_OF_ORDER_FULL
ceph osd set-nearfull-ratio .892
ceph osd set-backfillfull-ratio .963
ceph health -f json | grep OSD_OUT_OF_ORDER_FULL
ceph health detail | grep OSD_OUT_OF_ORDER_FULL
ceph osd set-backfillfull-ratio .912
# Check injected full results
$SUDO ceph tell osd.0 injectfull nearfull
wait_for_health "OSD_NEARFULL"
ceph health detail | grep "osd.0 is near full"
$SUDO ceph tell osd.0 injectfull none
wait_for_health_ok
$SUDO ceph tell osd.1 injectfull backfillfull
wait_for_health "OSD_BACKFILLFULL"
ceph health detail | grep "osd.1 is backfill full"
$SUDO ceph tell osd.1 injectfull none
wait_for_health_ok
$SUDO ceph tell osd.2 injectfull failsafe
# failsafe and full are the same as far as the monitor is concerned
wait_for_health "OSD_FULL"
ceph health detail | grep "osd.2 is full"
$SUDO ceph tell osd.2 injectfull none
wait_for_health_ok
$SUDO ceph tell osd.0 injectfull full
wait_for_health "OSD_FULL"
ceph health detail | grep "osd.0 is full"
$SUDO ceph tell osd.0 injectfull none
wait_for_health_ok
ceph pg stat | grep 'pgs:'
ceph pg 1.0 query
ceph tell 1.0 query
first=$(ceph mon dump -f json | jq -r '.mons[0].name')
ceph tell mon.$first quorum enter
ceph quorum_status
ceph report | grep osd_stats
ceph status
ceph -s
#
# tell osd version
#
ceph tell osd.0 version
expect_false ceph tell osd.9999 version
expect_false ceph tell osd.foo version
# back to pg stuff
ceph tell osd.0 dump_pg_recovery_stats | grep Started
ceph osd reweight 0 0.9
expect_false ceph osd reweight 0 -1
ceph osd reweight osd.0 1
ceph osd primary-affinity osd.0 .9
expect_false ceph osd primary-affinity osd.0 -2
expect_false ceph osd primary-affinity osd.9999 .5
ceph osd primary-affinity osd.0 1
ceph osd pool set rbd size 2
ceph osd pg-temp 1.0 0 1
ceph osd pg-temp 1.0 osd.1 osd.0
expect_false ceph osd pg-temp 1.0 0 1 2
expect_false ceph osd pg-temp asdf qwer
expect_false ceph osd pg-temp 1.0 asdf
ceph osd pg-temp 1.0 # cleanup pg-temp
ceph pg repeer 1.0
expect_false ceph pg repeer 0.0 # pool 0 shouldn't exist anymore
# don't test ceph osd primary-temp for now
}
function test_mon_osd_pool_set()
{
TEST_POOL_GETSET=pool_getset
expect_false ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio -0.3
expect_true ceph osd pool create $TEST_POOL_GETSET 1 --target_size_ratio 1
ceph osd pool application enable $TEST_POOL_GETSET rados
ceph osd pool set $TEST_POOL_GETSET pg_autoscale_mode off
wait_for_clean
ceph osd pool get $TEST_POOL_GETSET all
for s in pg_num pgp_num size min_size crush_rule target_size_ratio; do
ceph osd pool get $TEST_POOL_GETSET $s
done
old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
(( new_size = old_size + 1 ))
ceph osd pool set $TEST_POOL_GETSET size $new_size --yes-i-really-mean-it
ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
ceph osd pool set $TEST_POOL_GETSET size $old_size --yes-i-really-mean-it
ceph osd pool create pool_erasure 1 1 erasure
ceph osd pool application enable pool_erasure rados
wait_for_clean
set +e
ceph osd pool set pool_erasure size 4444 2>$TMPFILE
check_response 'not change the size'
set -e
ceph osd pool get pool_erasure erasure_code_profile
ceph osd pool rm pool_erasure pool_erasure --yes-i-really-really-mean-it
for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub bulk; do
ceph osd pool set $TEST_POOL_GETSET $flag false
ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
ceph osd pool set $TEST_POOL_GETSET $flag true
ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
ceph osd pool set $TEST_POOL_GETSET $flag 1
ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true"
ceph osd pool set $TEST_POOL_GETSET $flag 0
ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false"
expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf
expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2
done
ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456
ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456'
ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0
ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456
ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456'
ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0
ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456
ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456'
ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0
ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
ceph osd pool set $TEST_POOL_GETSET recovery_priority -5
ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: -5'
ceph osd pool set $TEST_POOL_GETSET recovery_priority 0
ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority -11
expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority 11
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio -3
expect_false ceph osd pool set $TEST_POOL_GETSET target_size_ratio abc
expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 0.1
expect_true ceph osd pool set $TEST_POOL_GETSET target_size_ratio 1
ceph osd pool get $TEST_POOL_GETSET target_size_ratio | grep 'target_size_ratio: 1'
ceph osd pool set $TEST_POOL_GETSET nopgchange 1
expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10
expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10
ceph osd pool set $TEST_POOL_GETSET nopgchange 0
ceph osd pool set $TEST_POOL_GETSET pg_num 10
wait_for_clean
ceph osd pool set $TEST_POOL_GETSET pgp_num 10
expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 0
expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 0
old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //')
new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32))
ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs
ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs
wait_for_clean
ceph osd pool set $TEST_POOL_GETSET nosizechange 1
expect_false ceph osd pool set $TEST_POOL_GETSET size 2
expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2
ceph osd pool set $TEST_POOL_GETSET nosizechange 0
ceph osd pool set $TEST_POOL_GETSET size 2
wait_for_clean
ceph osd pool set $TEST_POOL_GETSET min_size 2
expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
ceph osd pool get rbd crush_rule | grep 'crush_rule: '
ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive
ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive'
ceph osd pool set $TEST_POOL_GETSET compression_mode unset
ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib
ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib'
ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset
ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1
expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2
ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2
ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2'
ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0
ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.'
ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET csum_type crc32c
ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c'
ceph osd pool set $TEST_POOL_GETSET csum_type unset
ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.'
for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
ceph osd pool set $TEST_POOL_GETSET $size 100
ceph osd pool get $TEST_POOL_GETSET $size | grep '100'
ceph osd pool set $TEST_POOL_GETSET $size 0
ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.'
done
ceph osd pool set $TEST_POOL_GETSET nodelete 1
expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
ceph osd pool set $TEST_POOL_GETSET nodelete 0
ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it
}
function test_mon_osd_tiered_pool_set()
{
# this is really a tier pool
ceph osd pool create real-tier 2
ceph osd tier add rbd real-tier
# expect us to be unable to set negative values for hit_set_*
for o in hit_set_period hit_set_count hit_set_fpp; do
expect_false ceph osd pool set real_tier $o -1
done
# and hit_set_fpp should be in range 0..1
expect_false ceph osd pool set real_tier hit_set_fpp 2
ceph osd pool set real-tier hit_set_type explicit_hash
ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash"
ceph osd pool set real-tier hit_set_type explicit_object
ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object"
ceph osd pool set real-tier hit_set_type bloom
ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom"
expect_false ceph osd pool set real-tier hit_set_type i_dont_exist
ceph osd pool set real-tier hit_set_period 123
ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123"
ceph osd pool set real-tier hit_set_count 12
ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12"
ceph osd pool set real-tier hit_set_fpp .01
ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01"
ceph osd pool set real-tier target_max_objects 123
ceph osd pool get real-tier target_max_objects | \
grep 'target_max_objects:[ \t]\+123'
ceph osd pool set real-tier target_max_bytes 123456
ceph osd pool get real-tier target_max_bytes | \
grep 'target_max_bytes:[ \t]\+123456'
ceph osd pool set real-tier cache_target_dirty_ratio .123
ceph osd pool get real-tier cache_target_dirty_ratio | \
grep 'cache_target_dirty_ratio:[ \t]\+0.123'
expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2
expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1
ceph osd pool set real-tier cache_target_dirty_high_ratio .123
ceph osd pool get real-tier cache_target_dirty_high_ratio | \
grep 'cache_target_dirty_high_ratio:[ \t]\+0.123'
expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2
expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1
ceph osd pool set real-tier cache_target_full_ratio .123
ceph osd pool get real-tier cache_target_full_ratio | \
grep 'cache_target_full_ratio:[ \t]\+0.123'
ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
ceph osd pool set real-tier cache_target_full_ratio 1.0
ceph osd pool set real-tier cache_target_full_ratio 0
expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1
ceph osd pool set real-tier cache_min_flush_age 123
ceph osd pool get real-tier cache_min_flush_age | \
grep 'cache_min_flush_age:[ \t]\+123'
ceph osd pool set real-tier cache_min_evict_age 234
ceph osd pool get real-tier cache_min_evict_age | \
grep 'cache_min_evict_age:[ \t]\+234'
# iec vs si units
ceph osd pool set real-tier target_max_objects 1K
ceph osd pool get real-tier target_max_objects | grep 1000
for o in target_max_bytes target_size_bytes compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do
ceph osd pool set real-tier $o 1Ki # no i suffix
val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
[[ $val == 1024 ]]
ceph osd pool set real-tier $o 1M # with i suffix
val=$(ceph osd pool get real-tier $o --format=json | jq -c ".$o")
[[ $val == 1048576 ]]
done
# this is not a tier pool
ceph osd pool create fake-tier 2
ceph osd pool application enable fake-tier rados
wait_for_clean
expect_false ceph osd pool set fake-tier hit_set_type explicit_hash
expect_false ceph osd pool get fake-tier hit_set_type
expect_false ceph osd pool set fake-tier hit_set_type explicit_object
expect_false ceph osd pool get fake-tier hit_set_type
expect_false ceph osd pool set fake-tier hit_set_type bloom
expect_false ceph osd pool get fake-tier hit_set_type
expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist
expect_false ceph osd pool set fake-tier hit_set_period 123
expect_false ceph osd pool get fake-tier hit_set_period
expect_false ceph osd pool set fake-tier hit_set_count 12
expect_false ceph osd pool get fake-tier hit_set_count
expect_false ceph osd pool set fake-tier hit_set_fpp .01
expect_false ceph osd pool get fake-tier hit_set_fpp
expect_false ceph osd pool set fake-tier target_max_objects 123
expect_false ceph osd pool get fake-tier target_max_objects
expect_false ceph osd pool set fake-tier target_max_bytes 123456
expect_false ceph osd pool get fake-tier target_max_bytes
expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123
expect_false ceph osd pool get fake-tier cache_target_dirty_ratio
expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2
expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1
expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123
expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio
expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2
expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1
expect_false ceph osd pool set fake-tier cache_target_full_ratio .123
expect_false ceph osd pool get fake-tier cache_target_full_ratio
expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0
expect_false ceph osd pool set fake-tier cache_target_full_ratio 0
expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1
expect_false ceph osd pool set fake-tier cache_min_flush_age 123
expect_false ceph osd pool get fake-tier cache_min_flush_age
expect_false ceph osd pool set fake-tier cache_min_evict_age 234
expect_false ceph osd pool get fake-tier cache_min_evict_age
ceph osd tier remove rbd real-tier
ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it
ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it
}
function test_mon_osd_erasure_code()
{
ceph osd erasure-code-profile set fooprofile a=b c=d
ceph osd erasure-code-profile set fooprofile a=b c=d
expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f
ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force
ceph osd erasure-code-profile set fooprofile a=b c=d e=f
expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h
# make sure rule-foo doesn't work anymore
expect_false ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host
ceph osd erasure-code-profile set barprofile crush-failure-domain=host
# clean up
ceph osd erasure-code-profile rm fooprofile
ceph osd erasure-code-profile rm barprofile
# try weird k and m values
expect_false ceph osd erasure-code-profile set badk k=1 m=1
expect_false ceph osd erasure-code-profile set badk k=1 m=2
expect_false ceph osd erasure-code-profile set badk k=0 m=2
expect_false ceph osd erasure-code-profile set badk k=-1 m=2
expect_false ceph osd erasure-code-profile set badm k=2 m=0
expect_false ceph osd erasure-code-profile set badm k=2 m=-1
ceph osd erasure-code-profile set good k=2 m=1
ceph osd erasure-code-profile rm good
}
function test_mon_osd_misc()
{
set +e
# expect error about missing 'pool' argument
ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
# expect error about unused argument foo
ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
# expect "not in range" for invalid overload percentage
ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
set -e
local old_bytes_per_osd=$(ceph config get mgr mon_reweight_min_bytes_per_osd)
local old_pgs_per_osd=$(ceph config get mgr mon_reweight_min_pgs_per_osd)
# otherwise ceph-mgr complains like:
# Error EDOM: Refusing to reweight: we only have 5372 kb used across all osds!
# Error EDOM: Refusing to reweight: we only have 20 PGs across 3 osds!
ceph config set mgr mon_reweight_min_bytes_per_osd 0
ceph config set mgr mon_reweight_min_pgs_per_osd 0
ceph osd reweight-by-utilization 110
ceph osd reweight-by-utilization 110 .5
expect_false ceph osd reweight-by-utilization 110 0
expect_false ceph osd reweight-by-utilization 110 -0.1
ceph osd test-reweight-by-utilization 110 .5 --no-increasing
ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing
expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing
expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing
ceph osd reweight-by-pg 110
ceph osd test-reweight-by-pg 110 .5
ceph osd reweight-by-pg 110 rbd
ceph osd reweight-by-pg 110 .5 rbd
expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf
# restore the setting
ceph config set mgr mon_reweight_min_bytes_per_osd $old_bytes_per_osd
ceph config set mgr mon_reweight_min_pgs_per_osd $old_pgs_per_osd
}
function test_admin_heap_profiler()
{
do_test=1
set +e
# expect 'heap' commands to be correctly parsed
ceph tell osd.0 heap stats 2>$TMPFILE
if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then
echo "tcmalloc not enabled; skip heap profiler test"
do_test=0
fi
set -e
[[ $do_test -eq 0 ]] && return 0
$SUDO ceph tell osd.0 heap start_profiler
$SUDO ceph tell osd.0 heap dump
$SUDO ceph tell osd.0 heap stop_profiler
$SUDO ceph tell osd.0 heap release
}
function test_osd_bench()
{
# test osd bench limits
# As we should not rely on defaults (as they may change over time),
# lets inject some values and perform some simple tests
# max iops: 10 # 100 IOPS
# max throughput: 10485760 # 10MB/s
# max block size: 2097152 # 2MB
# duration: 10 # 10 seconds
local args="\
--osd-bench-duration 10 \
--osd-bench-max-block-size 2097152 \
--osd-bench-large-size-max-throughput 10485760 \
--osd-bench-small-size-max-iops 10"
ceph tell osd.0 injectargs ${args## }
# anything with a bs larger than 2097152 must fail
expect_false ceph tell osd.0 bench 1 2097153
# but using 'osd_bench_max_bs' must succeed
ceph tell osd.0 bench 1 2097152
# we assume 1MB as a large bs; anything lower is a small bs
# for a 4096 bytes bs, for 10 seconds, we are limited by IOPS
# max count: 409600 (bytes)
# more than max count must not be allowed
expect_false ceph tell osd.0 bench 409601 4096
# but 409600 must be succeed
ceph tell osd.0 bench 409600 4096
# for a large bs, we are limited by throughput.
# for a 2MB block size for 10 seconds, assuming 10MB/s throughput,
# the max count will be (10MB * 10s) = 100MB
# max count: 104857600 (bytes)
# more than max count must not be allowed
expect_false ceph tell osd.0 bench 104857601 2097152
# up to max count must be allowed
ceph tell osd.0 bench 104857600 2097152
}
function test_osd_negative_filestore_merge_threshold()
{
$SUDO ceph daemon osd.0 config set filestore_merge_threshold -1
expect_config_value "osd.0" "filestore_merge_threshold" -1
}
function test_mon_tell()
{
for m in mon.a mon.b; do
ceph tell $m sessions
ceph_watch_start debug audit
ceph tell mon.a sessions
ceph_watch_wait "${m} \[DBG\] from.*cmd='sessions' args=\[\]: dispatch"
done
expect_false ceph tell mon.foo version
}
function test_mon_ping()
{
ceph ping mon.a
ceph ping mon.b
expect_false ceph ping mon.foo
ceph ping mon.\*
}
function test_mon_deprecated_commands()
{
# current DEPRECATED commands are marked with FLAG(DEPRECATED)
#
# Testing should be accomplished by setting
# 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for
# each one of these commands.
ceph tell mon.* injectargs '--mon-debug-deprecated-as-obsolete'
expect_false ceph config-key list 2> $TMPFILE
check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete"
ceph tell mon.* injectargs '--no-mon-debug-deprecated-as-obsolete'
}
function test_mon_cephdf_commands()
{
# ceph df detail:
# pool section:
# RAW USED The near raw used per pool in raw total
ceph osd pool create cephdf_for_test 1 1 replicated
ceph osd pool application enable cephdf_for_test rados
ceph osd pool set cephdf_for_test size 2
dd if=/dev/zero of=./cephdf_for_test bs=4k count=1
rados put cephdf_for_test cephdf_for_test -p cephdf_for_test
#wait for update
for i in `seq 1 10`; do
rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break
sleep 1
done
# "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need
# to sync mon with osd
flush_pg_stats
local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats'
stored=`ceph df detail --format=json | jq "$jq_filter.stored * 2"`
stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"`
ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it
rm ./cephdf_for_test
expect_false test $stored != $stored_raw
}
function test_mon_pool_application()
{
ceph osd pool create app_for_test 16
ceph osd pool application enable app_for_test rbd
expect_false ceph osd pool application enable app_for_test rgw
ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it
ceph osd pool ls detail | grep "application rbd,rgw"
ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
expect_false ceph osd pool application set app_for_test cephfs key value
ceph osd pool application set app_for_test rbd key1 value1
ceph osd pool application set app_for_test rbd key2 value2
ceph osd pool application set app_for_test rgw key1 value1
ceph osd pool application get app_for_test rbd key1 | grep 'value1'
ceph osd pool application get app_for_test rbd key2 | grep 'value2'
ceph osd pool application get app_for_test rgw key1 | grep 'value1'
ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}'
ceph osd pool application rm app_for_test rgw key1
ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}'
ceph osd pool application rm app_for_test rbd key2
ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}'
ceph osd pool application rm app_for_test rbd key1
ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}'
ceph osd pool application rm app_for_test rbd key1 # should be idempotent
expect_false ceph osd pool application disable app_for_test rgw
ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent
ceph osd pool ls detail | grep "application rbd"
ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}'
ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it
ceph osd pool ls detail | grep -v "application "
ceph osd pool ls detail --format=json | grep '"application_metadata":{}'
ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it
}
function test_mon_tell_help_command()
{
ceph tell mon.a help | grep sync_force
ceph tell mon.a -h | grep sync_force
ceph tell mon.a config -h | grep 'config diff get'
# wrong target
expect_false ceph tell mon.zzz help
}
function test_mon_stdin_stdout()
{
echo foo | ceph config-key set test_key -i -
ceph config-key get test_key -o - | grep -c foo | grep -q 1
}
function test_osd_tell_help_command()
{
ceph tell osd.1 help
expect_false ceph tell osd.100 help
}
function test_osd_compact()
{
ceph tell osd.1 compact
$SUDO ceph daemon osd.1 compact
}
function test_mds_tell_help_command()
{
local FS_NAME=cephfs
if ! mds_exists ; then
echo "Skipping test, no MDS found"
return
fi
remove_all_fs
ceph osd pool create fs_data 16
ceph osd pool create fs_metadata 16
ceph fs new $FS_NAME fs_metadata fs_data
wait_mds_active $FS_NAME
ceph tell mds.a help
expect_false ceph tell mds.z help
remove_all_fs
ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
}
function test_mgr_tell()
{
ceph tell mgr version
}
function test_mgr_devices()
{
ceph device ls
expect_false ceph device info doesnotexist
expect_false ceph device get-health-metrics doesnotexist
}
function test_per_pool_scrub_status()
{
ceph osd pool create noscrub_pool 16
ceph osd pool create noscrub_pool2 16
ceph -s | expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
ceph -s --format json | \
jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set"
ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail |
expect_false grep -q "Pool .* has .*scrub.* flag"
ceph health detail | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
expect_false grep -q "Pool .* has .*scrub.* flag"
ceph osd pool set noscrub_pool noscrub 1
ceph -s | expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
ceph -s --format json | \
jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
expect_true grep -q "Some pool(s) have the noscrub flag(s) set"
ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
expect_true grep -q "Pool noscrub_pool has noscrub flag"
ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
ceph osd pool set noscrub_pool nodeep-scrub 1
ceph osd pool set noscrub_pool2 nodeep-scrub 1
ceph -s | expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
ceph -s --format json | \
jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \
expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set"
ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
expect_true grep -q "Pool noscrub_pool has noscrub flag"
ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \
expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag"
ceph health detail | expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag"
ceph health detail | expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag"
ceph osd pool rm noscrub_pool noscrub_pool --yes-i-really-really-mean-it
ceph osd pool rm noscrub_pool2 noscrub_pool2 --yes-i-really-really-mean-it
}
#
# New tests should be added to the TESTS array below
#
# Individual tests may be run using the '-t <testname>' argument
# The user can specify '-t <testname>' as many times as she wants
#
# Tests will be run in order presented in the TESTS array, or in
# the order specified by the '-t <testname>' options.
#
# '-l' will list all the available test names
# '-h' will show usage
#
# The test maintains backward compatibility: not specifying arguments
# will run all tests following the order they appear in the TESTS array.
#
set +x
MON_TESTS+=" mon_injectargs"
MON_TESTS+=" mon_injectargs_SI"
for i in `seq 9`; do
MON_TESTS+=" tiering_$i";
done
MON_TESTS+=" auth"
MON_TESTS+=" auth_profiles"
MON_TESTS+=" mon_misc"
MON_TESTS+=" mon_mon"
MON_TESTS+=" mon_osd"
MON_TESTS+=" mon_config_key"
MON_TESTS+=" mon_crush"
MON_TESTS+=" mon_osd_create_destroy"
MON_TESTS+=" mon_osd_pool"
MON_TESTS+=" mon_osd_pool_quota"
MON_TESTS+=" mon_pg"
MON_TESTS+=" mon_osd_pool_set"
MON_TESTS+=" mon_osd_tiered_pool_set"
MON_TESTS+=" mon_osd_erasure_code"
MON_TESTS+=" mon_osd_misc"
MON_TESTS+=" mon_tell"
MON_TESTS+=" mon_ping"
MON_TESTS+=" mon_deprecated_commands"
MON_TESTS+=" mon_caps"
MON_TESTS+=" mon_cephdf_commands"
MON_TESTS+=" mon_tell_help_command"
MON_TESTS+=" mon_stdin_stdout"
OSD_TESTS+=" osd_bench"
OSD_TESTS+=" osd_negative_filestore_merge_threshold"
OSD_TESTS+=" tiering_agent"
OSD_TESTS+=" admin_heap_profiler"
OSD_TESTS+=" osd_tell_help_command"
OSD_TESTS+=" osd_compact"
OSD_TESTS+=" per_pool_scrub_status"
MDS_TESTS+=" mds_tell"
MDS_TESTS+=" mon_mds"
MDS_TESTS+=" mon_mds_metadata"
MDS_TESTS+=" mds_tell_help_command"
MGR_TESTS+=" mgr_tell"
MGR_TESTS+=" mgr_devices"
TESTS+=$MON_TESTS
TESTS+=$OSD_TESTS
TESTS+=$MDS_TESTS
TESTS+=$MGR_TESTS
#
# "main" follows
#
function list_tests()
{
echo "AVAILABLE TESTS"
for i in $TESTS; do
echo " $i"
done
}
function usage()
{
echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...]]"
}
tests_to_run=()
sanity_check=true
while [[ $# -gt 0 ]]; do
opt=$1
case "$opt" in
"-l" )
do_list=1
;;
"--asok-does-not-need-root" )
SUDO=""
;;
"--no-sanity-check" )
sanity_check=false
;;
"--test-mon" )
tests_to_run+="$MON_TESTS"
;;
"--test-osd" )
tests_to_run+="$OSD_TESTS"
;;
"--test-mds" )
tests_to_run+="$MDS_TESTS"
;;
"--test-mgr" )
tests_to_run+="$MGR_TESTS"
;;
"-t" )
shift
if [[ -z "$1" ]]; then
echo "missing argument to '-t'"
usage ;
exit 1
fi
tests_to_run+=" $1"
;;
"-h" )
usage ;
exit 0
;;
esac
shift
done
if [[ $do_list -eq 1 ]]; then
list_tests ;
exit 0
fi
ceph osd pool create rbd 16
if test -z "$tests_to_run" ; then
tests_to_run="$TESTS"
fi
if $sanity_check ; then
wait_no_osd_down
fi
for i in $tests_to_run; do
if $sanity_check ; then
check_no_osd_down
fi
set -x
test_${i}
set +x
done
if $sanity_check ; then
check_no_osd_down
fi
set -x
echo OK
| 106,096 | 34.460227 | 173 |
sh
|
null |
ceph-main/qa/workunits/cephtool/test_daemon.sh
|
#!/usr/bin/env bash
set -ex
expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
echo note: assuming mon.a is on the current host
# can set to 'sudo ./ceph' to execute tests from current dir for development
CEPH=${CEPH:-'sudo ceph'}
${CEPH} daemon mon.a version | grep version
# get debug_ms setting and strip it, painfully for reuse
old_ms=$(${CEPH} daemon mon.a config get debug_ms | \
grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g')
${CEPH} daemon mon.a config set debug_ms 13
new_ms=$(${CEPH} daemon mon.a config get debug_ms | \
grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g')
[ "$new_ms" = "13/13" ]
${CEPH} daemon mon.a config set debug_ms $old_ms
new_ms=$(${CEPH} daemon mon.a config get debug_ms | \
grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g')
[ "$new_ms" = "$old_ms" ]
# unregistered/non-existent command
expect_false ${CEPH} daemon mon.a bogus_command_blah foo
set +e
OUTPUT=$(${CEPH} -c /not/a/ceph.conf daemon mon.a help 2>&1)
# look for EINVAL
if [ $? != 22 ] ; then exit 1; fi
if ! echo "$OUTPUT" | grep -q '.*open.*/not/a/ceph.conf'; then
echo "didn't find expected error in bad conf search"
exit 1
fi
set -e
echo OK
| 1,184 | 25.931818 | 76 |
sh
|
null |
ceph-main/qa/workunits/cephtool/test_kvstore_tool.sh
|
#!/usr/bin/env bash
set -x
source $(dirname $0)/../../standalone/ceph-helpers.sh
set -e
set -o functrace
PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
SUDO=${SUDO:-sudo}
export CEPH_DEV=1
echo note: test ceph_kvstore_tool with bluestore
expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
TEMP_DIR=$(mktemp -d ./cephtool.XXX)
trap "rm -fr $TEMP_DIR" 0
TEMP_FILE=$(mktemp $TEMP_DIR/test_invalid.XXX)
function test_ceph_kvstore_tool()
{
# create a data directory
ceph-objectstore-tool --data-path ${TEMP_DIR} --op mkfs --no-mon-config
# list
origin_kv_nums=`ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list 2>/dev/null | wc -l`
# exists
prefix=`ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list 2>/dev/null | head -n 1 | awk '{print $1}'`
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists ${prefix}
expect_false ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists ${prefix}notexist
# list-crc
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list-crc
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list-crc ${prefix}
# list with prefix
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list ${prefix}
# set
echo "helloworld" >> ${TEMP_FILE}
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} set TESTPREFIX TESTKEY in ${TEMP_FILE}
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists TESTPREFIX TESTKEY
# get
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} get TESTPREFIX TESTKEY out ${TEMP_FILE}.bak
diff ${TEMP_FILE} ${TEMP_FILE}.bak
# rm
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} rm TESTPREFIX TESTKEY
expect_false ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists TESTPREFIX TESTKEY
# compact
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} compact
# destructive-repair
ceph-kvstore-tool bluestore-kv ${TEMP_DIR} destructive-repair
current_kv_nums=`ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list 2>/dev/null | wc -l`
test ${origin_kv_nums} -eq ${current_kv_nums}
}
test_ceph_kvstore_tool
echo OK
| 1,976 | 26.458333 | 101 |
sh
|
null |
ceph-main/qa/workunits/client/test.sh
|
#!/bin/sh
set -ex
ceph_test_client
| 37 | 5.333333 | 16 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_2pc_queue.sh
|
#!/bin/sh -e
ceph_test_cls_2pc_queue
exit 0
| 46 | 6.833333 | 23 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_cas.sh
|
#!/bin/sh -e
GTEST_FILTER=${CLS_CAS_GTEST_FILTER:-*}
ceph_test_cls_cas --gtest_filter=${GTEST_FILTER}
exit 0
| 111 | 15 | 48 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_cmpomap.sh
|
#!/bin/sh -e
ceph_test_cls_cmpomap
exit 0
| 44 | 6.5 | 21 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_hello.sh
|
#!/bin/sh -e
ceph_test_cls_hello
exit 0
| 42 | 6.166667 | 19 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_journal.sh
|
#!/bin/sh -e
GTEST_FILTER=${CLS_JOURNAL_GTEST_FILTER:-*}
ceph_test_cls_journal --gtest_filter=${GTEST_FILTER}
exit 0
| 119 | 16.142857 | 52 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_lock.sh
|
#!/bin/sh -e
ceph_test_cls_lock
exit 0
| 41 | 6 | 18 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_log.sh
|
#!/bin/sh -e
ceph_test_cls_log
exit 0
| 40 | 5.833333 | 17 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_numops.sh
|
#!/bin/sh -e
ceph_test_cls_numops
exit 0
| 43 | 6.333333 | 20 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_rbd.sh
|
#!/bin/sh -e
GTEST_FILTER=${CLS_RBD_GTEST_FILTER:-*}
ceph_test_cls_rbd --gtest_filter=${GTEST_FILTER}
exit 0
| 111 | 15 | 48 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_refcount.sh
|
#!/bin/sh -e
ceph_test_cls_refcount
exit 0
| 45 | 6.666667 | 22 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_rgw.sh
|
#!/bin/sh -e
ceph_test_cls_rgw
#ceph_test_cls_rgw_meta
#ceph_test_cls_rgw_log
#ceph_test_cls_rgw_opstate
exit 0
| 114 | 11.777778 | 26 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_rgw_gc.sh
|
#!/bin/sh -e
ceph_test_cls_rgw_gc
exit 0
| 43 | 6.333333 | 20 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_rgw_stats.sh
|
#!/bin/sh -e
ceph_test_cls_rgw_stats
exit 0
| 46 | 6.833333 | 23 |
sh
|
null |
ceph-main/qa/workunits/cls/test_cls_sdk.sh
|
#!/bin/sh -e
ceph_test_cls_sdk
exit 0
| 40 | 5.833333 | 17 |
sh
|
null |
ceph-main/qa/workunits/direct_io/big.sh
|
#!/bin/sh -ex
echo "test large (16MB) dio write"
dd if=/dev/zero of=foo.big bs=16M count=1 oflag=direct
echo OK
| 114 | 15.428571 | 54 |
sh
|
null |
ceph-main/qa/workunits/direct_io/direct_io_test.c
|
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include <inttypes.h>
#include <fcntl.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
/*
* direct_io_test
*
* This test does some I/O using O_DIRECT.
*
* Semantics of O_DIRECT can be found at http://lwn.net/Articles/348739/
*
*/
static int g_num_pages = 100;
static int g_duration = 10;
struct chunk {
uint64_t offset;
uint64_t pad0;
uint64_t pad1;
uint64_t pad2;
uint64_t pad3;
uint64_t pad4;
uint64_t pad5;
uint64_t not_offset;
} __attribute__((packed));
static int page_size;
static char temp_file[] = "direct_io_temp_file_XXXXXX";
static int safe_write(int fd, const void *buf, signed int len)
{
const char *b = (const char*)buf;
/* Handle EINTR and short writes */
while (1) {
int res = write(fd, b, len);
if (res < 0) {
int err = errno;
if (err != EINTR) {
return err;
}
}
len -= res;
b += res;
if (len <= 0)
return 0;
}
}
static int do_read(int fd, char *buf, int buf_sz)
{
/* We assume no short reads or EINTR. It's not really clear how
* those things interact with O_DIRECT. */
int ret = read(fd, buf, buf_sz);
if (ret < 0) {
int err = errno;
printf("do_read: error: %d (%s)\n", err, strerror(err));
return err;
}
if (ret != buf_sz) {
printf("do_read: short read\n");
return -EIO;
}
return 0;
}
static int setup_temp_file(void)
{
int fd;
int64_t num_chunks, i;
if (page_size % sizeof(struct chunk)) {
printf("setup_big_file: page_size doesn't divide evenly "
"into data blocks.\n");
return -EINVAL;
}
fd = mkstemp(temp_file);
if (fd < 0) {
int err = errno;
printf("setup_big_file: mkostemps failed with error %d\n", err);
return err;
}
num_chunks = g_num_pages * (page_size / sizeof(struct chunk));
for (i = 0; i < num_chunks; ++i) {
int ret;
struct chunk c;
memset(&c, 0, sizeof(c));
c.offset = i * sizeof(struct chunk);
c.pad0 = 0;
c.pad1 = 1;
c.pad2 = 2;
c.pad3 = 3;
c.pad4 = 4;
c.pad5 = 5;
c.not_offset = ~c.offset;
ret = safe_write(fd, &c, sizeof(struct chunk));
if (ret) {
printf("setup_big_file: safe_write failed with "
"error: %d\n", ret);
TEMP_FAILURE_RETRY(close(fd));
unlink(temp_file);
return ret;
}
}
TEMP_FAILURE_RETRY(close(fd));
return 0;
}
static int verify_chunk(const struct chunk *c, uint64_t offset)
{
if (c->offset != offset) {
printf("verify_chunk(%" PRId64 "): bad offset value (got: %"
PRId64 ", expected: %" PRId64 "\n", offset, c->offset, offset);
return EIO;
}
if (c->pad0 != 0) {
printf("verify_chunk(%" PRId64 "): bad pad0 value\n", offset);
return EIO;
}
if (c->pad1 != 1) {
printf("verify_chunk(%" PRId64 "): bad pad1 value\n", offset);
return EIO;
}
if (c->pad2 != 2) {
printf("verify_chunk(%" PRId64 "): bad pad2 value\n", offset);
return EIO;
}
if (c->pad3 != 3) {
printf("verify_chunk(%" PRId64 "): bad pad3 value\n", offset);
return EIO;
}
if (c->pad4 != 4) {
printf("verify_chunk(%" PRId64 "): bad pad4 value\n", offset);
return EIO;
}
if (c->pad5 != 5) {
printf("verify_chunk(%" PRId64 "): bad pad5 value\n", offset);
return EIO;
}
if (c->not_offset != ~offset) {
printf("verify_chunk(%" PRId64 "): bad not_offset value\n",
offset);
return EIO;
}
return 0;
}
static int do_o_direct_reads(void)
{
int fd, ret;
unsigned int i;
void *buf = 0;
time_t cur_time, end_time;
ret = posix_memalign(&buf, page_size, page_size);
if (ret) {
printf("do_o_direct_reads: posix_memalign returned %d\n", ret);
goto done;
}
fd = open(temp_file, O_RDONLY | O_DIRECT);
if (fd < 0) {
ret = errno;
printf("do_o_direct_reads: error opening fd: %d\n", ret);
goto free_buf;
}
// read the first chunk and see if it looks OK
ret = do_read(fd, buf, page_size);
if (ret)
goto close_fd;
ret = verify_chunk((struct chunk*)buf, 0);
if (ret)
goto close_fd;
// read some random chunks and see how they look
cur_time = time(NULL);
end_time = cur_time + g_duration;
i = 0;
do {
time_t next_time;
uint64_t offset;
int page;
unsigned int seed;
seed = i++;
page = rand_r(&seed) % g_num_pages;
offset = page;
offset *= page_size;
if (lseek64(fd, offset, SEEK_SET) == -1) {
int err = errno;
printf("lseek64(%" PRId64 ") failed: error %d (%s)\n",
offset, err, strerror(err));
goto close_fd;
}
ret = do_read(fd, buf, page_size);
if (ret)
goto close_fd;
ret = verify_chunk((struct chunk*)buf, offset);
if (ret)
goto close_fd;
next_time = time(NULL);
if (next_time > cur_time) {
printf(".");
}
cur_time = next_time;
} while (time(NULL) < end_time);
printf("\ndo_o_direct_reads: SUCCESS\n");
close_fd:
TEMP_FAILURE_RETRY(close(fd));
free_buf:
free(buf);
done:
return ret;
}
static void usage(char *argv0)
{
printf("%s: tests direct I/O\n", argv0);
printf("-d <seconds>: sets duration to <seconds>\n");
printf("-h: this help\n");
printf("-p <pages>: sets number of pages to allocate\n");
}
static void parse_args(int argc, char *argv[])
{
int c;
while ((c = getopt (argc, argv, "d:hp:")) != -1) {
switch (c) {
case 'd':
g_duration = atoi(optarg);
if (g_duration <= 0) {
printf("tried to set invalid value of "
"g_duration: %d\n", g_num_pages);
exit(1);
}
break;
case 'h':
usage(argv[0]);
exit(0);
break;
case 'p':
g_num_pages = atoi(optarg);
if (g_num_pages <= 0) {
printf("tried to set invalid value of "
"g_num_pages: %d\n", g_num_pages);
exit(1);
}
break;
case '?':
usage(argv[0]);
exit(1);
break;
default:
usage(argv[0]);
exit(1);
break;
}
}
}
int main(int argc, char *argv[])
{
int ret;
parse_args(argc, argv);
setvbuf(stdout, NULL, _IONBF, 0);
page_size = getpagesize();
ret = setup_temp_file();
if (ret) {
printf("setup_temp_file failed with error %d\n", ret);
goto done;
}
ret = do_o_direct_reads();
if (ret) {
printf("do_o_direct_reads failed with error %d\n", ret);
goto unlink_temp_file;
}
unlink_temp_file:
unlink(temp_file);
done:
return ret;
}
| 9,251 | 28.559105 | 86 |
c
|
null |
ceph-main/qa/workunits/direct_io/misc.sh
|
#!/bin/sh -ex
# a few test cases from henry
echo "test read from hole"
dd if=/dev/zero of=dd3 bs=1 seek=1048576 count=0
dd if=dd3 of=/tmp/ddout1 skip=8 bs=512 count=2 iflag=direct
dd if=/dev/zero of=/tmp/dd3 bs=512 count=2
cmp /tmp/dd3 /tmp/ddout1
echo "other thing"
dd if=/dev/urandom of=/tmp/dd10 bs=500 count=1
dd if=/tmp/dd10 of=dd10 bs=512 seek=8388 count=1
dd if=dd10 of=/tmp/dd10out bs=512 skip=8388 count=1 iflag=direct
cmp /tmp/dd10 /tmp/dd10out
echo OK
| 466 | 26.470588 | 64 |
sh
|
null |
ceph-main/qa/workunits/direct_io/test_short_dio_read.c
|
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
int main()
{
char buf[409600];
ssize_t r;
int err;
int fd = open("shortfile", O_WRONLY|O_CREAT, 0644);
if (fd < 0) {
err = errno;
printf("error: open() failed with: %d (%s)\n", err, strerror(err));
exit(err);
}
printf("writing first 3 bytes of 10k file\n");
r = write(fd, "foo", 3);
if (r == -1) {
err = errno;
printf("error: write() failed with: %d (%s)\n", err, strerror(err));
close(fd);
exit(err);
}
r = ftruncate(fd, 10000);
if (r == -1) {
err = errno;
printf("error: ftruncate() failed with: %d (%s)\n", err, strerror(err));
close(fd);
exit(err);
}
fsync(fd);
close(fd);
printf("reading O_DIRECT\n");
fd = open("shortfile", O_RDONLY|O_DIRECT);
if (fd < 0) {
err = errno;
printf("error: open() failed with: %d (%s)\n", err, strerror(err));
exit(err);
}
r = read(fd, buf, sizeof(buf));
close(fd);
printf("got %d\n", (int)r);
if (r != 10000)
return 1;
return 0;
}
| 1,168 | 19.155172 | 74 |
c
|
null |
ceph-main/qa/workunits/direct_io/test_sync_io.c
|
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdio.h>
#include <inttypes.h>
#include <linux/types.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <errno.h>
//#include "../client/ioctl.h"
#include <linux/ioctl.h>
#define CEPH_IOCTL_MAGIC 0x97
#define CEPH_IOC_SYNCIO _IO(CEPH_IOCTL_MAGIC, 5)
void write_pattern()
{
printf("writing pattern\n");
uint64_t i;
int r;
int fd = open("foo", O_CREAT|O_WRONLY, 0644);
if (fd < 0) {
r = errno;
printf("write_pattern: error: open() failed with: %d (%s)\n", r, strerror(r));
exit(r);
}
for (i=0; i<1048576 * sizeof(i); i += sizeof(i)) {
r = write(fd, &i, sizeof(i));
if (r == -1) {
r = errno;
printf("write_pattern: error: write() failed with: %d (%s)\n", r, strerror(r));
break;
}
}
close(fd);
}
int verify_pattern(char *buf, size_t len, uint64_t off)
{
size_t i;
for (i = 0; i < len; i += sizeof(uint64_t)) {
uint64_t expected = i + off;
uint64_t actual = *(uint64_t*)(buf + i);
if (expected != actual) {
printf("error: offset %llu had %llu\n", (unsigned long long)expected,
(unsigned long long)actual);
exit(1);
}
}
return 0;
}
void generate_pattern(void *buf, size_t len, uint64_t offset)
{
uint64_t *v = buf;
size_t i;
for (i=0; i<len / sizeof(v); i++)
v[i] = i * sizeof(v) + offset;
verify_pattern(buf, len, offset);
}
int read_file(int buf_align, uint64_t offset, int len, int direct) {
printf("read_file buf_align %d offset %llu len %d\n", buf_align,
(unsigned long long)offset, len);
void *rawbuf;
int r;
int flags;
int err = 0;
if(direct)
flags = O_RDONLY|O_DIRECT;
else
flags = O_RDONLY;
int fd = open("foo", flags);
if (fd < 0) {
err = errno;
printf("read_file: error: open() failed with: %d (%s)\n", err, strerror(err));
exit(err);
}
if (!direct)
ioctl(fd, CEPH_IOC_SYNCIO);
if ((r = posix_memalign(&rawbuf, 4096, len + buf_align)) != 0) {
printf("read_file: error: posix_memalign failed with %d", r);
close(fd);
exit (r);
}
void *buf = (char *)rawbuf + buf_align;
memset(buf, 0, len);
r = pread(fd, buf, len, offset);
if (r == -1) {
err = errno;
printf("read_file: error: pread() failed with: %d (%s)\n", err, strerror(err));
goto out;
}
r = verify_pattern(buf, len, offset);
out:
close(fd);
free(rawbuf);
return r;
}
int read_direct(int buf_align, uint64_t offset, int len)
{
printf("read_direct buf_align %d offset %llu len %d\n", buf_align,
(unsigned long long)offset, len);
return read_file(buf_align, offset, len, 1);
}
int read_sync(int buf_align, uint64_t offset, int len)
{
printf("read_sync buf_align %d offset %llu len %d\n", buf_align,
(unsigned long long)offset, len);
return read_file(buf_align, offset, len, 0);
}
int write_file(int buf_align, uint64_t offset, int len, int direct)
{
printf("write_file buf_align %d offset %llu len %d\n", buf_align,
(unsigned long long)offset, len);
void *rawbuf;
int r;
int err = 0;
int flags;
if (direct)
flags = O_WRONLY|O_DIRECT|O_CREAT;
else
flags = O_WRONLY|O_CREAT;
int fd = open("foo", flags, 0644);
if (fd < 0) {
int err = errno;
printf("write_file: error: open() failed with: %d (%s)\n", err, strerror(err));
exit(err);
}
if ((r = posix_memalign(&rawbuf, 4096, len + buf_align)) != 0) {
printf("write_file: error: posix_memalign failed with %d", r);
err = r;
goto out_close;
}
if (!direct)
ioctl(fd, CEPH_IOC_SYNCIO);
void *buf = (char *)rawbuf + buf_align;
generate_pattern(buf, len, offset);
r = pwrite(fd, buf, len, offset);
close(fd);
fd = open("foo", O_RDONLY);
if (fd < 0) {
err = errno;
printf("write_file: error: open() failed with: %d (%s)\n", err, strerror(err));
free(rawbuf);
goto out_unlink;
}
void *buf2 = malloc(len);
if (!buf2) {
err = -ENOMEM;
printf("write_file: error: malloc failed\n");
goto out_free;
}
memset(buf2, 0, len);
r = pread(fd, buf2, len, offset);
if (r == -1) {
err = errno;
printf("write_file: error: pread() failed with: %d (%s)\n", err, strerror(err));
goto out_free_buf;
}
r = verify_pattern(buf2, len, offset);
out_free_buf:
free(buf2);
out_free:
free(rawbuf);
out_close:
close(fd);
out_unlink:
unlink("foo");
if (err)
exit(err);
return r;
}
int write_direct(int buf_align, uint64_t offset, int len)
{
printf("write_direct buf_align %d offset %llu len %d\n", buf_align,
(unsigned long long)offset, len);
return write_file (buf_align, offset, len, 1);
}
int write_sync(int buf_align, uint64_t offset, int len)
{
printf("write_sync buf_align %d offset %llu len %d\n", buf_align,
(unsigned long long)offset, len);
return write_file (buf_align, offset, len, 0);
}
int main(int argc, char **argv)
{
uint64_t i, j, k;
int read = 1;
int write = 1;
if (argc >= 2 && strcmp(argv[1], "read") == 0)
write = 0;
if (argc >= 2 && strcmp(argv[1], "write") == 0)
read = 0;
if (read) {
write_pattern();
for (i = 0; i < 4096; i += 512)
for (j = 4*1024*1024 - 4096; j < 4*1024*1024 + 4096; j += 512)
for (k = 1024; k <= 16384; k *= 2) {
read_direct(i, j, k);
read_sync(i, j, k);
}
}
unlink("foo");
if (write) {
for (i = 0; i < 4096; i += 512)
for (j = 4*1024*1024 - 4096 + 512; j < 4*1024*1024 + 4096; j += 512)
for (k = 1024; k <= 16384; k *= 2) {
write_direct(i, j, k);
write_sync(i, j, k);
}
}
return 0;
}
| 5,551 | 21.119522 | 84 |
c
|
null |
ceph-main/qa/workunits/erasure-code/bench.html
|
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd" >
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Erasure Code Plugins Benchmarks</title>
<link href="examples.css" rel="stylesheet" type="text/css">
<script language="javascript" type="text/javascript" src="jquery.js"></script>
<script language="javascript" type="text/javascript" src="jquery.flot.js"></script>
<script language="javascript" type="text/javascript" src="jquery.flot.categories.js"></script>
<script language="javascript" type="text/javascript" src="bench.js"></script>
<script language="javascript" type="text/javascript" src="plot.js"></script>
</head>
<body>
<div id="header">
<h2>Erasure Code Plugins Benchmarks</h2>
</div>
<div id="content">
<div class="demo-container">
<div id="encode" class="demo-placeholder"></div>
</div>
<p>encode: Y = GB/s, X = K/M</p>
<div class="demo-container">
<div id="decode" class="demo-placeholder"></div>
</div>
<p>decode: Y = GB/s, X = K/M/erasures</p>
</div>
</body>
</html>
| 1,168 | 32.4 | 98 |
html
|
null |
ceph-main/qa/workunits/erasure-code/bench.sh
|
#!/usr/bin/env bash
#
# Copyright (C) 2015 Red Hat <[email protected]>
# Copyright (C) 2013,2014 Cloudwatt <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
# Test that it works from sources with:
#
# CEPH_ERASURE_CODE_BENCHMARK=src/ceph_erasure_code_benchmark \
# PLUGIN_DIRECTORY=build/lib \
# qa/workunits/erasure-code/bench.sh fplot jerasure |
# tee qa/workunits/erasure-code/bench.js
#
# This should start immediately and display:
#
# ...
# [ '2/1', .48035538612887358583 ],
# [ '3/2', .21648470405675016626 ],
# etc.
#
# and complete within a few seconds. The result can then be displayed with:
#
# firefox qa/workunits/erasure-code/bench.html
#
# Once it is confirmed to work, it can be run with a more significant
# volume of data so that the measures are more reliable:
#
# TOTAL_SIZE=$((4 * 1024 * 1024 * 1024)) \
# CEPH_ERASURE_CODE_BENCHMARK=src/ceph_erasure_code_benchmark \
# PLUGIN_DIRECTORY=build/lib \
# qa/workunits/erasure-code/bench.sh fplot jerasure |
# tee qa/workunits/erasure-code/bench.js
#
set -e
export PATH=/sbin:$PATH
: ${VERBOSE:=false}
: ${CEPH_ERASURE_CODE_BENCHMARK:=ceph_erasure_code_benchmark}
: ${PLUGIN_DIRECTORY:=/usr/lib/ceph/erasure-code}
: ${PLUGINS:=isa jerasure}
: ${TECHNIQUES:=vandermonde cauchy}
: ${TOTAL_SIZE:=$((1024 * 1024))}
: ${SIZE:=4096}
: ${PARAMETERS:=--parameter jerasure-per-chunk-alignment=true}
function bench_header() {
echo -e "seconds\tKB\tplugin\tk\tm\twork.\titer.\tsize\teras.\tcommand."
}
function bench() {
local plugin=$1
shift
local k=$1
shift
local m=$1
shift
local workload=$1
shift
local iterations=$1
shift
local size=$1
shift
local erasures=$1
shift
command=$(echo $CEPH_ERASURE_CODE_BENCHMARK \
--plugin $plugin \
--workload $workload \
--iterations $iterations \
--size $size \
--erasures $erasures \
--parameter k=$k \
--parameter m=$m \
--erasure-code-dir $PLUGIN_DIRECTORY)
result=$($command "$@")
echo -e "$result\t$plugin\t$k\t$m\t$workload\t$iterations\t$size\t$erasures\t$command ""$@"
}
function packetsize() {
local k=$1
local w=$2
local vector_wordsize=$3
local size=$4
local p=$(( ($size / $k / $w / $vector_wordsize ) * $vector_wordsize))
if [ $p -gt 3100 ] ; then
p=3100
fi
echo $p
}
function bench_run() {
local plugin=jerasure
local w=8
local VECTOR_WORDSIZE=16
local ks="2 3 4 6 10"
declare -A k2ms
k2ms[2]="1"
k2ms[3]="2"
k2ms[4]="2 3"
k2ms[6]="2 3 4"
k2ms[10]="3 4"
local isa2technique_vandermonde='reed_sol_van'
local isa2technique_cauchy='cauchy'
local jerasure2technique_vandermonde='reed_sol_van'
local jerasure2technique_cauchy='cauchy_good'
for technique in ${TECHNIQUES} ; do
for plugin in ${PLUGINS} ; do
eval technique_parameter=\$${plugin}2technique_${technique}
echo "serie encode_${technique}_${plugin}"
for k in $ks ; do
for m in ${k2ms[$k]} ; do
bench $plugin $k $m encode $(($TOTAL_SIZE / $SIZE)) $SIZE 0 \
--parameter packetsize=$(packetsize $k $w $VECTOR_WORDSIZE $SIZE) \
${PARAMETERS} \
--parameter technique=$technique_parameter
done
done
done
done
for technique in ${TECHNIQUES} ; do
for plugin in ${PLUGINS} ; do
eval technique_parameter=\$${plugin}2technique_${technique}
echo "serie decode_${technique}_${plugin}"
for k in $ks ; do
for m in ${k2ms[$k]} ; do
echo
for erasures in $(seq 1 $m) ; do
bench $plugin $k $m decode $(($TOTAL_SIZE / $SIZE)) $SIZE $erasures \
--parameter packetsize=$(packetsize $k $w $VECTOR_WORDSIZE $SIZE) \
${PARAMETERS} \
--parameter technique=$technique_parameter
done
done
done
done
done
}
function fplot() {
local serie
bench_run | while read seconds total plugin k m workload iteration size erasures rest ; do
if [ -z $seconds ] ; then
echo null,
elif [ $seconds = serie ] ; then
if [ "$serie" ] ; then
echo '];'
fi
local serie=`echo $total | sed 's/cauchy_\([0-9]\)/cauchy_good_\1/g'`
echo "var $serie = ["
else
local x
if [ $workload = encode ] ; then
x=$k/$m
else
x=$k/$m/$erasures
fi
echo "[ '$x', " $(echo "( $total / 1024 / 1024 ) / $seconds" | bc -ql) " ], "
fi
done
echo '];'
}
function main() {
bench_header
bench_run
}
if [ "$1" = fplot ] ; then
"$@"
else
main
fi
# Local Variables:
# compile-command: "\
# CEPH_ERASURE_CODE_BENCHMARK=../../../src/ceph_erasure_code_benchmark \
# PLUGIN_DIRECTORY=../../../build/lib \
# ./bench.sh
# "
# End:
| 5,694 | 28.507772 | 96 |
sh
|
null |
ceph-main/qa/workunits/erasure-code/encode-decode-non-regression.sh
|
#!/usr/bin/env bash
#
# Copyright (C) 2014 Red Hat <[email protected]>
#
# Author: Loic Dachary <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
set -ex
: ${CORPUS:=https://github.com/ceph/ceph-erasure-code-corpus.git}
: ${DIRECTORY:=$CEPH_ROOT/ceph-erasure-code-corpus}
# when running from sources, the current directory must have precedence
export PATH=:$PATH
if ! test -d $DIRECTORY ; then
git clone $CORPUS $DIRECTORY
fi
my_version=v$(ceph --version | cut -f3 -d ' ')
all_versions=$((ls -d $DIRECTORY/v* ; echo $DIRECTORY/$my_version ) | sort)
for version in $all_versions ; do
if test -d $version ; then
$version/non-regression.sh
fi
if test $version = $DIRECTORY/$my_version ; then
break
fi
done
| 1,196 | 28.195122 | 75 |
sh
|
null |
ceph-main/qa/workunits/erasure-code/examples.css
|
* { padding: 0; margin: 0; vertical-align: top; }
body {
background: url(background.png) repeat-x;
font: 18px/1.5em "proxima-nova", Helvetica, Arial, sans-serif;
}
a { color: #069; }
a:hover { color: #28b; }
h2 {
margin-top: 15px;
font: normal 32px "omnes-pro", Helvetica, Arial, sans-serif;
}
h3 {
margin-left: 30px;
font: normal 26px "omnes-pro", Helvetica, Arial, sans-serif;
color: #666;
}
p {
margin-top: 10px;
}
button {
font-size: 18px;
padding: 1px 7px;
}
input {
font-size: 18px;
}
input[type=checkbox] {
margin: 7px;
}
#header {
position: relative;
width: 900px;
margin: auto;
}
#header h2 {
margin-left: 10px;
vertical-align: middle;
font-size: 42px;
font-weight: bold;
text-decoration: none;
color: #000;
}
#content {
width: 880px;
margin: 0 auto;
padding: 10px;
}
#footer {
margin-top: 25px;
margin-bottom: 10px;
text-align: center;
font-size: 12px;
color: #999;
}
.demo-container {
box-sizing: border-box;
width: 850px;
height: 450px;
padding: 20px 15px 15px 15px;
margin: 15px auto 30px auto;
border: 1px solid #ddd;
background: #fff;
background: linear-gradient(#f6f6f6 0, #fff 50px);
background: -o-linear-gradient(#f6f6f6 0, #fff 50px);
background: -ms-linear-gradient(#f6f6f6 0, #fff 50px);
background: -moz-linear-gradient(#f6f6f6 0, #fff 50px);
background: -webkit-linear-gradient(#f6f6f6 0, #fff 50px);
box-shadow: 0 3px 10px rgba(0,0,0,0.15);
-o-box-shadow: 0 3px 10px rgba(0,0,0,0.1);
-ms-box-shadow: 0 3px 10px rgba(0,0,0,0.1);
-moz-box-shadow: 0 3px 10px rgba(0,0,0,0.1);
-webkit-box-shadow: 0 3px 10px rgba(0,0,0,0.1);
}
.demo-placeholder {
width: 100%;
height: 100%;
font-size: 14px;
line-height: 1.2em;
}
.legend table {
border-spacing: 5px;
}
| 1,738 | 16.927835 | 63 |
css
|
null |
ceph-main/qa/workunits/fs/cephfs_mirror_ha_gen.sh
|
#!/bin/bash -ex
#
# cephfs_mirror_ha_gen.sh - generate workload to synchronize
#
. $(dirname $0)/cephfs_mirror_helpers.sh
cleanup()
{
for i in `seq 1 $NR_DIRECTORIES`
do
local repo_name="${REPO_PATH_PFX}_$i"
for j in `seq 1 $NR_SNAPSHOTS`
do
snap_name=$repo_name/.snap/snap_$j
if test -d $snap_name; then
rmdir $snap_name
fi
done
done
exit 1
}
trap cleanup EXIT
configure_peer()
{
ceph mgr module enable mirroring
ceph fs snapshot mirror enable $PRIMARY_FS
ceph fs snapshot mirror peer_add $PRIMARY_FS client.mirror_remote@ceph $BACKUP_FS
for i in `seq 1 $NR_DIRECTORIES`
do
local repo_name="${REPO_PATH_PFX}_$i"
ceph fs snapshot mirror add $PRIMARY_FS "$MIRROR_SUBDIR/$repo_name"
done
}
create_snaps()
{
for i in `seq 1 $NR_DIRECTORIES`
do
local repo_name="${REPO_PATH_PFX}_$i"
for j in `seq 1 $NR_SNAPSHOTS`
do
snap_name=$repo_name/.snap/snap_$j
r=$(( $RANDOM % 100 + 5 ))
arr=($repo_name "reset" "--hard" "HEAD~$r")
exec_git_cmd "${arr[@]}"
mkdir $snap_name
store_checksum $snap_name
done
done
}
unset CEPH_CLI_TEST_DUP_COMMAND
echo "running generator on prmary file system..."
# setup git repos to be used as data set
setup_repos
# turn on mirroring, add peers...
configure_peer
# snapshots on primary
create_snaps
# do not cleanup when exiting on success..
trap - EXIT
| 1,541 | 21.028571 | 85 |
sh
|
null |
ceph-main/qa/workunits/fs/cephfs_mirror_ha_verify.sh
|
#!/bin/bash -ex
#
# cephfs_mirror_ha_verify.sh - verify synchronized snapshots
#
. $(dirname $0)/cephfs_mirror_helpers.sh
echo "running verifier on secondary file system..."
for i in `seq 1 $NR_DIRECTORIES`
do
repo_name="${REPO_PATH_PFX}_$i"
for j in `seq 1 $NR_SNAPSHOTS`
do
for s in 1 1 2 4 4 4 4 4 8 8 8 8 16 16 32 64 64 128 128
do
sleep $s
snap_name=$repo_name/.snap/snap_$j
if test -d $repo_name; then
echo "checking snapshot [$snap_name] in $repo_name"
if test -d $snap_name; then
echo "generating hash for $snap_name"
cksum=''
calc_checksum $snap_name cksum
ret=$(compare_checksum $cksum $snap_name)
if [ $ret -ne 0 ]; then
echo "checksum failed $snap_name ($cksum)"
return $ret
else
echo "checksum matched $snap_name ($cksum)"
break
fi
fi
fi
done
echo "couldn't complete verification for: $snap_name"
done
done
echo "verify done!"
| 1,223 | 28.853659 | 67 |
sh
|
null |
ceph-main/qa/workunits/fs/cephfs_mirror_helpers.sh
|
PRIMARY_FS='dc'
BACKUP_FS='dc-backup'
REPO=ceph-qa-suite
REPO_DIR=ceph_repo
REPO_PATH_PFX="$REPO_DIR/$REPO"
NR_DIRECTORIES=4
NR_SNAPSHOTS=4
MIRROR_SUBDIR='/mirror'
calc_checksum()
{
local path=$1
local -n ref=$2
ref=`find -L $path -type f -exec md5sum {} + | awk '{ print $1 }' | md5sum | awk '{ print $1 }'`
}
store_checksum()
{
local path=$1
local cksum='' #something invalid
local fhash=`echo -n $path | md5sum | awk '{ print $1 }'`
calc_checksum $path cksum
echo -n $cksum > "/tmp/primary-$fhash"
}
compare_checksum()
{
local ret=0
local cksum=$1
local path=$2
local fhash=`echo -n $path | md5sum | awk '{ print $1 }'`
local cksum_ondisk=`cat /tmp/primary-$fhash`
if [ $cksum != $cksum_ondisk ]; then
echo "$cksum <> $cksum_ondisk"
ret=1
fi
echo $ret
}
exec_git_cmd()
{
local arg=("$@")
local repo_name=${arg[0]}
local cmd=${arg[@]:1}
git --git-dir "$repo_name/.git" $cmd
}
clone_repo()
{
local repo_name=$1
git clone --branch giant "http://github.com/ceph/$REPO" $repo_name
}
setup_repos()
{
mkdir "$REPO_DIR"
for i in `seq 1 $NR_DIRECTORIES`
do
local repo_name="${REPO_PATH_PFX}_$i"
mkdir $repo_name
clone_repo $repo_name
done
}
| 1,289 | 18.253731 | 101 |
sh
|
null |
ceph-main/qa/workunits/fs/fscrypt.sh
|
#!/usr/bin/env bash
set -xe
mydir=`dirname $0`
if [ $# -ne 2 ]
then
echo "2 parameters are required!\n"
echo "Usage:"
echo " fscrypt.sh <type> <testdir>"
echo " type: should be any of 'none', 'unlocked' or 'locked'"
echo " testdir: the test direcotry name"
exit 1
fi
fscrypt=$1
testcase=$2
testdir=fscrypt_test_${fscrypt}_${testcase}
mkdir $testdir
XFSPROGS_DIR='xfprogs-dev-dir'
XFSTESTS_DIR='xfstest-dev-dir'
export XFS_IO_PROG="$(type -P xfs_io)"
# Setup the xfstests env
setup_xfstests_env()
{
git clone https://git.ceph.com/xfstests-dev.git $XFSTESTS_DIR --depth 1
pushd $XFSTESTS_DIR
. common/encrypt
popd
}
install_deps()
{
local system_value=$(sudo lsb_release -is | awk '{print tolower($0)}')
case $system_value in
"centos" | "centosstream" | "fedora")
sudo yum install -y inih-devel userspace-rcu-devel \
libblkid-devel gettext libedit-devel \
libattr-devel device-mapper-devel libicu-devel
;;
"ubuntu" | "debian")
sudo apt-get install -y libinih-dev liburcu-dev \
libblkid-dev gettext libedit-dev libattr1-dev \
libdevmapper-dev libicu-dev pkg-config
;;
*)
echo "Unsupported distro $system_value"
exit 1
;;
esac
}
# Install xfsprogs-dev from source to support "add_enckey" for xfs_io
install_xfsprogs()
{
local install_xfsprogs=0
xfs_io -c "help add_enckey" | grep -q 'not found' && install_xfsprogs=1
if [ $install_xfsprogs -eq 1 ]; then
install_deps
git clone https://git.ceph.com/xfsprogs-dev.git $XFSPROGS_DIR --depth 1
pushd $XFSPROGS_DIR
make
sudo make install
popd
fi
}
clean_up()
{
rm -rf $XFSPROGS_DIR
rm -rf $XFSTESTS_DIR
rm -rf $testdir
}
# For now will test the V2 encryption policy only as the
# V1 encryption policy is deprecated
install_xfsprogs
setup_xfstests_env
# Generate a fixed keying identifier
raw_key=$(_generate_raw_encryption_key)
keyid=$(_add_enckey $testdir "$raw_key" | awk '{print $NF}')
case ${fscrypt} in
"none")
# do nothing for the test directory and will test it
# as one non-encrypted directory.
pushd $testdir
${mydir}/../suites/${testcase}.sh
popd
clean_up
;;
"unlocked")
# set encrypt policy with the key provided and then
# the test directory will be encrypted & unlocked
_set_encpolicy $testdir $keyid
pushd $testdir
${mydir}/../suites/${testcase}.sh
popd
clean_up
;;
"locked")
# remove the key, then the test directory will be locked
# and any modification will be denied by requiring the key
_rm_enckey $testdir $keyid
clean_up
;;
*)
clean_up
echo "Unknown parameter $1"
exit 1
esac
| 2,581 | 20.516667 | 73 |
sh
|
null |
ceph-main/qa/workunits/fs/multiclient_sync_read_eof.py
|
#!/usr/bin/python3
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument('mnt1')
parser.add_argument('mnt2')
parser.add_argument('fn')
args = parser.parse_args()
open(os.path.join(args.mnt1, args.fn), 'w')
f1 = open(os.path.join(args.mnt1, args.fn), 'r+')
f2 = open(os.path.join(args.mnt2, args.fn), 'r+')
f1.write('foo')
f1.flush()
a = f2.read(3)
print('got "%s"' % a)
assert a == 'foo'
f2.write('bar')
f2.flush()
a = f1.read(3)
print('got "%s"' % a)
assert a == 'bar'
## test short reads
f1.write('short')
f1.flush()
a = f2.read(100)
print('got "%s"' % a)
assert a == 'short'
f2.write('longer')
f2.flush()
a = f1.read(1000)
print('got "%s"' % a)
assert a == 'longer'
print('ok')
main()
| 854 | 18.883721 | 53 |
py
|
null |
ceph-main/qa/workunits/fs/snap-hierarchy.sh
|
#!/bin/sh
set -ex
if [ -d "$1" ]; then
mkdir -p -- "$1" && cd "$1"
fi
[ "$VERIFY" != verify ] && mkdir 1
[ "$VERIFY" != verify ] && mkdir 1/.snap/first
stat 1/.snap/first
[ "$VERIFY" != verify ] && mkdir 1/2
stat 1/.snap/first/2 && exit 1
[ "$VERIFY" != verify ] && mkdir 1/2/.snap/second
stat 1/2/.snap/second
[ "$VERIFY" != verify ] && touch 1/foo
stat 1/.snap/first/foo && exit 1
[ "$VERIFY" != verify ] && mkdir 1/.snap/third
stat 1/.snap/third/foo || exit 1
[ "$VERIFY" != verify ] && mkdir 1/2/3
[ "$VERIFY" != verify ] && mkdir 1/2/.snap/fourth
stat 1/2/.snap/fourth/3
exit 0
| 589 | 22.6 | 49 |
sh
|
null |
ceph-main/qa/workunits/fs/test_o_trunc.c
|
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
int main(int argc, char *argv[])
{
char obuf[32], ibuf[1024];
int n, max = 0;
if (argc > 2)
max = atoi(argv[2]);
if (!max)
max = 600;
memset(obuf, 0xff, sizeof(obuf));
for (n = 1; n <= max; ++n) {
int fd, ret;
fd = open(argv[1], O_RDWR | O_CREAT | O_TRUNC, 0644);
printf("%d/%d: open fd = %d\n", n, max, fd);
ret = write(fd, obuf, sizeof(obuf));
printf("write ret = %d\n", ret);
sleep(1);
ret = write(fd, obuf, sizeof(obuf));
printf("write ret = %d\n", ret);
ret = pread(fd, ibuf, sizeof(ibuf), 0);
printf("pread ret = %d\n", ret);
if (memcmp(obuf, ibuf, sizeof(obuf))) {
printf("mismatch\n");
close(fd);
break;
}
close(fd);
}
return 0;
}
| 851 | 17.521739 | 55 |
c
|
null |
ceph-main/qa/workunits/fs/test_o_trunc.sh
|
#!/bin/sh -ex
mydir=`dirname $0`
$mydir/test_o_trunc trunc.foo 600
echo OK
| 78 | 8.875 | 33 |
sh
|
null |
ceph-main/qa/workunits/fs/test_python.sh
|
#!/bin/sh -ex
# Running as root because the filesystem root directory will be
# owned by uid 0, and that's where we're writing.
sudo python3 -m nose -v $(dirname $0)/../../../src/test/pybind/test_cephfs.py
exit 0
| 214 | 29.714286 | 77 |
sh
|
null |
ceph-main/qa/workunits/fs/damage/test-first-damage.sh
|
#!/bin/bash
set -ex
FIRST_DAMAGE="first-damage.py"
FS=cephfs
METADATA_POOL=cephfs_meta
MOUNT=~/mnt/mnt.0
PYTHON=python3
function usage {
printf '%s: [--fs=<fs_name>] [--metadata-pool=<pool>] [--first-damage=</path/to/first-damage.py>]\n'
exit 1
}
function create {
ceph config set mds mds_bal_fragment_dirs 0
mkdir dir
DIR_INODE=$(stat -c '%i' dir)
touch dir/a
touch dir/"a space"
touch -- $(printf 'dir/\xff')
mkdir dir/.snap/1
mkdir dir/.snap/2
# two snaps
rm dir/a
mkdir dir/.snap/3
# not present in HEAD
touch dir/a
mkdir dir/.snap/4
# one snap
rm dir/a
touch dir/a
mkdir dir/.snap/5
# unlink then create
rm dir/a
touch dir/a
# unlink then create, HEAD not snapped
ls dir/.snap/*/
mkdir big
BIG_DIR_INODE=$(stat -c '%i' big)
for i in `seq 1 15000`; do
touch $(printf 'big/%08d' $i)
done
}
function flush {
ceph tell mds."$FS":0 flush journal
}
function damage {
local IS=$(printf '%llx.%08llx' "$DIR_INODE" 0)
local LS=$(ceph tell mds."$FS":0 dump snaps | jq .last_created)
local T=$(mktemp -p /tmp)
# nuke snap 1 version of "a"
rados --pool="$METADATA_POOL" getomapval "$IS" a_$(printf %x $((LS-4))) "$T"
printf '\xff\xff\xff\xf0' | dd of="$T" count=4 bs=1 conv=notrunc,nocreat
rados --pool="$METADATA_POOL" setomapval "$IS" a_$(printf %x $((LS-4))) --input-file="$T"
# nuke snap 4 version of "a"
rados --pool="$METADATA_POOL" getomapval "$IS" a_$(printf %x $((LS-1))) "$T"
printf '\xff\xff\xff\xff' | dd of="$T" count=4 bs=1 conv=notrunc,nocreat
rados --pool="$METADATA_POOL" setomapval "$IS" a_$(printf %x $((LS-1))) --input-file="$T"
# screw up HEAD
rados --pool="$METADATA_POOL" getomapval "$IS" a_head "$T"
printf '\xfe\xff\xff\xff' | dd of="$T" count=4 bs=1 conv=notrunc,nocreat
rados --pool="$METADATA_POOL" setomapval "$IS" a_head --input-file="$T"
# screw up HEAD on what dentry in big
IS=$(printf '%llx.%08llx' "$BIG_DIR_INODE" 0)
rados --pool="$METADATA_POOL" getomapval "$IS" 00009999_head "$T"
printf '\xfe\xff\xff\xff' | dd of="$T" count=4 bs=1 conv=notrunc,nocreat
rados --pool="$METADATA_POOL" setomapval "$IS" 00009999_head --input-file="$T"
rm -f "$T"
}
function recover {
flush
ceph fs fail "$FS"
sleep 5
cephfs-journal-tool --rank="$FS":0 event recover_dentries summary
cephfs-journal-tool --rank="$FS":0 journal reset
"$PYTHON" $FIRST_DAMAGE --debug /tmp/debug1 --memo /tmp/memo1 "$METADATA_POOL"
"$PYTHON" $FIRST_DAMAGE --debug /tmp/debug2 --memo /tmp/memo2 --repair-nosnap "$METADATA_POOL"
"$PYTHON" $FIRST_DAMAGE --debug /tmp/debug3 --memo /tmp/memo3 --remove "$METADATA_POOL"
ceph fs set "$FS" joinable true
}
function check {
stat dir || exit 1
stat dir/a || exit 1
for i in `seq 1 5`; do
stat dir/.snap/$i || exit 2
done
stat dir/.snap/2/a || exit 3
stat dir/.snap/5/a || exit 4
if stat dir/.snap/1/a; then
echo should be gone
exit 5
fi
if stat dir/.snap/3/a; then
echo should not ever exist
exit 6
fi
if stat dir/.snap/4/a; then
echo should be gone
exit 7
fi
}
function cleanup {
rmdir dir/.snap/*
find dir
rm -rf dir
}
function mount {
sudo --preserve-env=CEPH_CONF bin/mount.ceph :/ "$MOUNT" -o name=admin,noshare
df -h "$MOUNT"
}
function main {
eval set -- $(getopt --name "$0" --options '' --longoptions 'help,fs:,metadata-pool:,first-damage:,mount:,python:' -- "$@")
while [ "$#" -gt 0 ]; do
echo "$*"
echo "$1"
case "$1" in
-h|--help)
usage
;;
--fs)
FS="$2"
shift 2
;;
--metadata-pool)
METADATA_POOL="$2"
shift 2
;;
--mount)
MOUNT="$2"
shift 2
;;
--first-damage)
FIRST_DAMAGE="$2"
shift 2
;;
--python)
PYTHON="$2"
shift 2
;;
--)
shift
break
;;
*)
usage
;;
esac
done
mount
pushd "$MOUNT"
create
popd
sudo umount -f "$MOUNT"
# flush dentries/inodes to omap
flush
damage
recover
sleep 5 # for mds to join
mount
pushd "$MOUNT"
check
cleanup
popd
sudo umount -f "$MOUNT"
}
main "$@"
| 4,385 | 21.492308 | 125 |
sh
|
null |
ceph-main/qa/workunits/fs/full/subvolume_clone.sh
|
#!/usr/bin/env bash
set -ex
# This testcase tests the 'ceph fs subvolume snapshot clone' when the osd is full.
# The clone fails with 'MetadataMgrException: -28 (error in write)' and
# truncates the config file of corresponding subvolume while updating the config file.
# Hence the subsequent subvolume commands on the clone fails with
# 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)' traceback.
# The osd is of the size 1GB. The full-ratios are set so that osd is treated full
# at around 600MB. The subvolume is created and 100MB is written.
# The subvolume is snapshotted and cloned ten times. Since the clone delay is set to 15 seconds,
# all the clones reach pending state for sure. Among ten clones, only few succeed and rest fails
# with ENOSPACE.
# At this stage, ".meta" config file of the failed clones are checked if it's truncated.
# and clone status command is checked for traceback.
# Note that the failed clones would be in retry loop and it's state would be 'pending' or 'in-progress'.
# It's state is not updated to 'failed' as the config update to gets ENOSPACE too.
ignore_failure() {
if "$@"; then return 0; else return 0; fi
}
expect_failure() {
if "$@"; then return 1; else return 0; fi
}
NUM_CLONES=10
ceph fs subvolume create cephfs sub_0
subvol_path_0=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
# For debugging
echo "Before ratios are set"
df $CEPH_MNT
ceph osd df
ceph osd set-full-ratio 0.6
ceph osd set-nearfull-ratio 0.50
ceph osd set-backfillfull-ratio 0.55
# For debugging
echo "After ratios are set"
df -h
ceph osd df
for i in {1..100};do sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path_0/1MB_file-$i status=progress bs=1M count=1 conv=fdatasync;done
# For debugging
echo "After subvolumes are written"
df -h $CEPH_MNT
ceph osd df
# snapshot
ceph fs subvolume snapshot create cephfs sub_0 snap_0
# Set clone snapshot delay
ceph config set mgr mgr/volumes/snapshot_clone_delay 15
# Schedule few clones, some would fail with no space
for i in $(eval echo {1..$NUM_CLONES});do ceph fs subvolume snapshot clone cephfs sub_0 snap_0 clone_$i;done
# Wait for osd is full
timeout=90
while [ $timeout -gt 0 ]
do
health=$(ceph health detail)
[[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
echo "Wating for osd to be full: $timeout"
sleep 1
let "timeout-=1"
done
# For debugging
echo "After osd is full"
df -h $CEPH_MNT
ceph osd df
# Check clone status, this should not crash
for i in $(eval echo {1..$NUM_CLONES})
do
ignore_failure ceph fs clone status cephfs clone_$i >/tmp/out_${PID}_file 2>/tmp/error_${PID}_file
cat /tmp/error_${PID}_file
if grep "complete" /tmp/out_${PID}_file; then
echo "The clone_$i is completed"
else
#in-progress/pending clones, No traceback should be found in stderr
echo clone_$i in PENDING/IN-PROGRESS
expect_failure sudo grep "Traceback" /tmp/error_${PID}_file
#config file should not be truncated and GLOBAL section should be found
sudo grep "GLOBAL" $CEPH_MNT/volumes/_nogroup/clone_$i/.meta
fi
done
# Hard cleanup
ignore_failure sudo rm -rf $CEPH_MNT/_index/clone/*
ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/clone_*
ignore_failure sudo rmdir $CEPH_MNT/volumes/_nogroup/sub_0/.snap/snap_0
ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/sub_0
#Set the ratios back for other full tests to run
ceph osd set-full-ratio 0.95
ceph osd set-nearfull-ratio 0.95
ceph osd set-backfillfull-ratio 0.95
#After test
echo "After test"
df -h $CEPH_MNT
ceph osd df
echo OK
| 3,553 | 30.175439 | 133 |
sh
|
null |
ceph-main/qa/workunits/fs/full/subvolume_rm.sh
|
#!/usr/bin/env bash
set -ex
# This testcase tests the scenario of the 'ceph fs subvolume rm' mgr command
# when the osd is full. The command used to hang. The osd is of the size 1GB.
# The subvolume is created and 500MB file is written. The full-ratios are
# set below 500MB such that the osd is treated as full. Now the subvolume is
# is removed. This should be successful with the introduction of FULL
# capabilities which the mgr holds.
set -e
expect_failure() {
if "$@"; then return 1; else return 0; fi
}
ceph fs subvolume create cephfs sub_0
subvol_path=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
#For debugging
echo "Before write"
df -h
ceph osd df
sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/500MB_file-1 status=progress bs=1M count=500
ceph osd set-full-ratio 0.2
ceph osd set-nearfull-ratio 0.16
ceph osd set-backfillfull-ratio 0.18
timeout=30
while [ $timeout -gt 0 ]
do
health=$(ceph health detail)
[[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
echo "Wating for osd to be full: $timeout"
sleep 1
let "timeout-=1"
done
#For debugging
echo "After ratio set"
df -h
ceph osd df
#Delete subvolume
ceph fs subvolume rm cephfs sub_0
#Validate subvolume is deleted
expect_failure ceph fs subvolume info cephfs sub_0
#Wait for subvolume to delete data
trashdir=$CEPH_MNT/volumes/_deleting
timeout=30
while [ $timeout -gt 0 ]
do
[ -z "$(sudo ls -A $trashdir)" ] && echo "Trash directory $trashdir is empty" && break
echo "Wating for trash dir to be empty: $timeout"
sleep 1
let "timeout-=1"
done
#Set the ratios back for other full tests to run
ceph osd set-full-ratio 0.95
ceph osd set-nearfull-ratio 0.95
ceph osd set-backfillfull-ratio 0.95
#After test
echo "After test"
df -h
ceph osd df
echo OK
| 1,766 | 23.205479 | 93 |
sh
|
null |
ceph-main/qa/workunits/fs/full/subvolume_snapshot_rm.sh
|
#!/usr/bin/env bash
set -ex
# This testcase tests the 'ceph fs subvolume snapshot rm' when the osd is full.
# The snapshot rm fails with 'MetadataMgrException: -28 (error in write)' and
# truncates the config file of corresponding subvolume. Hence the subsequent
# snapshot rm of the same snapshot fails with 'MetadataMgrException: -2 (section 'GLOBAL' does not exist)'
# traceback.
# The osd is of the size 1GB. The subvolume is created and 800MB file is written.
# Then full-ratios are set below 500MB such that the osd is treated as full.
# The subvolume snapshot is taken which succeeds as no extra space is required
# for snapshot. Now, the removal of the snapshot fails with ENOSPACE as it
# fails to remove the snapshot metadata set. The snapshot removal fails
# but should not traceback and truncate the config file.
set -e
expect_failure() {
if "$@"; then return 1; else return 0; fi
}
ignore_failure() {
if "$@"; then return 0; else return 0; fi
}
ceph fs subvolume create cephfs sub_0
subvol_path=$(ceph fs subvolume getpath cephfs sub_0 2>/dev/null)
#For debugging
echo "Before write"
df $CEPH_MNT
ceph osd df
# Write 800MB file and set full ratio to around 200MB
ignore_failure sudo dd if=/dev/urandom of=$CEPH_MNT$subvol_path/800MB_file-1 status=progress bs=1M count=800 conv=fdatasync
ceph osd set-full-ratio 0.2
ceph osd set-nearfull-ratio 0.16
ceph osd set-backfillfull-ratio 0.18
timeout=30
while [ $timeout -gt 0 ]
do
health=$(ceph health detail)
[[ $health = *"OSD_FULL"* ]] && echo "OSD is full" && break
echo "Wating for osd to be full: $timeout"
sleep 1
let "timeout-=1"
done
#Take snapshot
ceph fs subvolume snapshot create cephfs sub_0 snap_0
#Remove snapshot fails but should not throw traceback
expect_failure ceph fs subvolume snapshot rm cephfs sub_0 snap_0 2>/tmp/error_${PID}_file
cat /tmp/error_${PID}_file
# No traceback should be found
expect_failure grep "Traceback" /tmp/error_${PID}_file
# Validate config file is not truncated and GLOBAL section exists
sudo grep "GLOBAL" $CEPH_MNT/volumes/_nogroup/sub_0/.meta
#For debugging
echo "After write"
df $CEPH_MNT
ceph osd df
# Snapshot removal with force option should succeed
ceph fs subvolume snapshot rm cephfs sub_0 snap_0 --force
#Cleanup from backend
ignore_failure sudo rm -f /tmp/error_${PID}_file
ignore_failure sudo rm -rf $CEPH_MNT/volumes/_nogroup/sub_0
#Set the ratios back for other full tests to run
ceph osd set-full-ratio 0.95
ceph osd set-nearfull-ratio 0.95
ceph osd set-backfillfull-ratio 0.95
#After test
echo "After test"
df -h $CEPH_MNT
ceph osd df
echo OK
| 2,595 | 28.83908 | 123 |
sh
|
null |
ceph-main/qa/workunits/fs/maxentries/maxentries.sh
|
#!/usr/bin/env bash
set -ex
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
function make_files()
{
set +x
temp_dir=`mktemp -d mkfile_test_XXXXXX`
for i in $(seq 1 $1)
do
echo -n | dd of="${temp_dir}/file_$i" conv=fsync || return 1
sync "${temp_dir}" || return 1
done
set -x
return 0
}
function make_dirs()
{
set +x
temp_dir=`mktemp -d mkdir_test_XXXXXX`
for i in $(seq 1 $1)
do
mkdir -p ${temp_dir}/dir_${i} || return 1
sync "${temp_dir}" || return 1
done
set -x
return 0
}
function make_nodes()
{
set +x
temp_dir=`mktemp -d mknod_test_XXXXXX`
for i in $(seq 1 $1)
do
mknod ${temp_dir}/fifo_${i} p || return 1
sync "${temp_dir}" || return 1
done
set -x
return 0
}
function rename_files()
{
set +x
temp_dir=`mktemp -d rename_test_XXXXXX`
mkdir -p ${temp_dir}/rename
for i in $(seq 1 $1)
do
touch ${temp_dir}/file_${i} || return 1
mv ${temp_dir}/file_${i} ${temp_dir}/rename/ || return 1
sync "${temp_dir}" || return 1
done
set -x
return 0
}
function make_symlinks()
{
set +x
temp_dir=`mktemp -d symlink_test_XXXXXX`
mkdir -p ${temp_dir}/symlink
touch ${temp_dir}/file
for i in $(seq 1 $1)
do
ln -s ../file ${temp_dir}/symlink/sym_${i} || return 1
sync "${temp_dir}" || return 1
done
set -x
return 0
}
function make_links()
{
set +x
temp_dir=`mktemp -d link_test_XXXXXX`
mkdir -p ${temp_dir}/link
touch ${temp_dir}/file
for i in $(seq 1 $1)
do
ln ${temp_dir}/file ${temp_dir}/link/link_${i} || return 1
sync "${temp_dir}" || return 1
done
set -x
return 0
}
function cleanup()
{
rm -rf *
}
test_dir="max_entries"
mkdir -p $test_dir
pushd $test_dir
dir_max_entries=100
ceph config set mds mds_dir_max_entries $dir_max_entries
ok_dir_max_entries=$dir_max_entries
fail_dir_max_entries=$((dir_max_entries+1))
# make files test
make_files $ok_dir_max_entries
expect_false make_files $fail_dir_max_entries
# make dirs test
make_dirs $ok_dir_max_entries
expect_false make_dirs $fail_dir_max_entries
# make nodes test
make_nodes $ok_dir_max_entries
expect_false make_nodes $fail_dir_max_entries
# rename files test
rename_files $ok_dir_max_entries
expect_false rename_files $fail_dir_max_entries
# symlink files test
make_symlinks $ok_dir_max_entries
expect_false make_symlinks $fail_dir_max_entries
# link files test
make_links $ok_dir_max_entries
expect_false make_links $fail_dir_max_entries
# no limit (e.g., default value)
dir_max_entries=0
ceph config set mds mds_dir_max_entries $dir_max_entries
make_files 500
make_dirs 500
make_nodes 500
rename_files 500
make_symlinks 500
make_links 500
cleanup
popd # $test_dir
echo OK
| 2,730 | 16.50641 | 64 |
sh
|
null |
ceph-main/qa/workunits/fs/misc/acl.sh
|
#!/bin/sh -x
set -e
mkdir -p testdir
cd testdir
set +e
setfacl -d -m u:nobody:rw .
if test $? != 0; then
echo "Filesystem does not support ACL"
exit 0
fi
expect_failure() {
if "$@"; then return 1; else return 0; fi
}
set -e
c=0
while [ $c -lt 100 ]
do
c=`expr $c + 1`
# inherited ACL from parent directory's default ACL
mkdir d1
c1=`getfacl d1 | grep -c "nobody:rw"`
echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null
c2=`getfacl d1 | grep -c "nobody:rw"`
rmdir d1
if [ $c1 -ne 2 ] || [ $c2 -ne 2 ]
then
echo "ERROR: incorrect ACLs"
exit 1
fi
done
mkdir d1
# The ACL xattr only contains ACL header. ACL should be removed
# in this case.
setfattr -n system.posix_acl_access -v 0x02000000 d1
setfattr -n system.posix_acl_default -v 0x02000000 .
expect_failure getfattr -n system.posix_acl_access d1
expect_failure getfattr -n system.posix_acl_default .
rmdir d1
cd ..
rmdir testdir
echo OK
| 919 | 17.039216 | 63 |
sh
|
null |
ceph-main/qa/workunits/fs/misc/chmod.sh
|
#!/bin/sh -x
set -e
check_perms() {
file=$1
r=$(ls -la ${file})
if test $? != 0; then
echo "ERROR: File listing/stat failed"
exit 1
fi
perms=$2
if test "${perms}" != $(echo ${r} | awk '{print $1}') && \
test "${perms}." != $(echo ${r} | awk '{print $1}') && \
test "${perms}+" != $(echo ${r} | awk '{print $1}'); then
echo "ERROR: Permissions should be ${perms}"
exit 1
fi
}
file=test_chmod.$$
echo "foo" > ${file}
if test $? != 0; then
echo "ERROR: Failed to create file ${file}"
exit 1
fi
chmod 400 ${file}
if test $? != 0; then
echo "ERROR: Failed to change mode of ${file}"
exit 1
fi
check_perms ${file} "-r--------"
set +e
echo "bar" >> ${file}
if test $? = 0; then
echo "ERROR: Write to read-only file should Fail"
exit 1
fi
set -e
chmod 600 ${file}
echo "bar" >> ${file}
if test $? != 0; then
echo "ERROR: Write to writeable file failed"
exit 1
fi
check_perms ${file} "-rw-------"
echo "foo" >> ${file}
if test $? != 0; then
echo "ERROR: Failed to write to file"
exit 1
fi
| 1,039 | 16.04918 | 68 |
sh
|
null |
ceph-main/qa/workunits/fs/misc/dac_override.sh
|
#!/bin/sh -x
expect_failure() {
if "$@"; then return 1; else return 0; fi
}
set -e
mkdir -p testdir
file=test_chmod.$$
echo "foo" > testdir/${file}
sudo chmod 600 testdir
# only root can read
expect_failure cat testdir/${file}
# directory read/write DAC override for root should allow read
sudo cat testdir/${file}
| 322 | 15.15 | 62 |
sh
|
null |
ceph-main/qa/workunits/fs/misc/direct_io.py
|
#!/usr/bin/python3
import mmap
import os
import subprocess
def main():
path = "testfile"
fd = os.open(path, os.O_RDWR | os.O_CREAT | os.O_TRUNC | os.O_DIRECT, 0o644)
ino = os.fstat(fd).st_ino
obj_name = "{ino:x}.00000000".format(ino=ino)
pool_name = os.getxattr(path, "ceph.file.layout.pool")
buf = mmap.mmap(-1, 1)
buf.write(b'1')
os.write(fd, buf)
proc = subprocess.Popen(['rados', '-p', pool_name, 'get', obj_name, 'tmpfile'])
proc.wait()
with open('tmpfile', 'rb') as tmpf:
out = tmpf.read(1)
if out != b'1':
raise RuntimeError("data were not written to object store directly")
with open('tmpfile', 'wb') as tmpf:
tmpf.write(b'2')
proc = subprocess.Popen(['rados', '-p', pool_name, 'put', obj_name, 'tmpfile'])
proc.wait()
os.lseek(fd, 0, os.SEEK_SET)
out = os.read(fd, 1)
if out != b'2':
raise RuntimeError("data were not directly read from object store")
os.close(fd)
print('ok')
main()
| 1,025 | 22.860465 | 83 |
py
|
null |
ceph-main/qa/workunits/fs/misc/dirfrag.sh
|
#!/usr/bin/env bash
set -e
DEPTH=5
COUNT=10000
kill_jobs() {
jobs -p | xargs kill
}
trap kill_jobs INT
create_files() {
for i in `seq 1 $COUNT`
do
touch file$i
done
}
delete_files() {
for i in `ls -f`
do
if [[ ${i}a = file*a ]]
then
rm -f $i
fi
done
}
rm -rf testdir
mkdir testdir
cd testdir
echo "creating folder hierarchy"
for i in `seq 1 $DEPTH`; do
mkdir dir$i
cd dir$i
create_files &
done
wait
echo "created hierarchy, now cleaning up"
for i in `seq 1 $DEPTH`; do
delete_files &
cd ..
done
wait
echo "cleaned up hierarchy"
cd ..
rm -rf testdir
| 605 | 10.433962 | 41 |
sh
|
null |
ceph-main/qa/workunits/fs/misc/filelock_deadlock.py
|
#!/usr/bin/python3
import errno
import fcntl
import os
import signal
import struct
import time
def handler(signum, frame):
pass
def lock_two(f1, f2):
lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 10, 0, 0)
fcntl.fcntl(f1, fcntl.F_SETLKW, lockdata)
time.sleep(10)
# don't wait forever
signal.signal(signal.SIGALRM, handler)
signal.alarm(10)
exitcode = 0
try:
fcntl.fcntl(f2, fcntl.F_SETLKW, lockdata)
except IOError as e:
if e.errno == errno.EDEADLK:
exitcode = 1
elif e.errno == errno.EINTR:
exitcode = 2
else:
exitcode = 3
os._exit(exitcode)
def main():
pid1 = os.fork()
if pid1 == 0:
f1 = open("testfile1", 'w')
f2 = open("testfile2", 'w')
lock_two(f1, f2)
pid2 = os.fork()
if pid2 == 0:
f1 = open("testfile2", 'w')
f2 = open("testfile3", 'w')
lock_two(f1, f2)
pid3 = os.fork()
if pid3 == 0:
f1 = open("testfile3", 'w')
f2 = open("testfile1", 'w')
lock_two(f1, f2)
deadlk_count = 0
i = 0
while i < 3:
pid, status = os.wait()
exitcode = status >> 8
if exitcode == 1:
deadlk_count += 1
elif exitcode != 0:
raise RuntimeError("unexpect exit code of child")
i += 1
if deadlk_count != 1:
raise RuntimeError("unexpect count of EDEADLK")
print('ok')
main()
| 1,475 | 19.219178 | 67 |
py
|
null |
ceph-main/qa/workunits/fs/misc/filelock_interrupt.py
|
#!/usr/bin/python3
from contextlib import contextmanager
import errno
import fcntl
import signal
import struct
@contextmanager
def timeout(seconds):
def timeout_handler(signum, frame):
raise InterruptedError
orig_handler = signal.signal(signal.SIGALRM, timeout_handler)
try:
signal.alarm(seconds)
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, orig_handler)
"""
introduced by Linux 3.15
"""
setattr(fcntl, "F_OFD_GETLK", 36)
setattr(fcntl, "F_OFD_SETLK", 37)
setattr(fcntl, "F_OFD_SETLKW", 38)
def main():
f1 = open("testfile", 'w')
f2 = open("testfile", 'w')
fcntl.flock(f1, fcntl.LOCK_SH | fcntl.LOCK_NB)
"""
is flock interruptible?
"""
with timeout(5):
try:
fcntl.flock(f2, fcntl.LOCK_EX)
except InterruptedError:
pass
else:
raise RuntimeError("expect flock to block")
fcntl.flock(f1, fcntl.LOCK_UN)
lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 10, 0, 0)
try:
fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata)
except IOError as e:
if e.errno != errno.EINVAL:
raise
else:
print('kernel does not support fcntl.F_OFD_SETLK')
return
lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 10, 10, 0, 0)
fcntl.fcntl(f2, fcntl.F_OFD_SETLK, lockdata)
"""
is posix lock interruptible?
"""
with timeout(5):
try:
lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
fcntl.fcntl(f2, fcntl.F_OFD_SETLKW, lockdata)
except InterruptedError:
pass
else:
raise RuntimeError("expect posix lock to block")
"""
file handler 2 should still hold lock on 10~10
"""
try:
lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 10, 10, 0, 0)
fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata)
except IOError as e:
if e.errno == errno.EAGAIN:
pass
else:
raise RuntimeError("expect file handler 2 to hold lock on 10~10")
lockdata = struct.pack('hhllhh', fcntl.F_UNLCK, 0, 0, 0, 0, 0)
fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata)
fcntl.fcntl(f2, fcntl.F_OFD_SETLK, lockdata)
print('ok')
main()
| 2,299 | 23.210526 | 74 |
py
|
null |
ceph-main/qa/workunits/fs/misc/i_complete_vs_rename.sh
|
#!/bin/sh
set -e
mkdir x
cd x
touch a
touch b
touch c
touch d
ls
chmod 777 .
stat e || true
touch f
touch g
# over existing file
echo attempting rename over existing file...
touch ../xx
mv ../xx f
ls | grep f || false
echo rename over existing file is okay
# over negative dentry
echo attempting rename over negative dentry...
touch ../xx
mv ../xx e
ls | grep e || false
echo rename over negative dentry is ok
echo OK
| 423 | 12.25 | 46 |
sh
|
null |
ceph-main/qa/workunits/fs/misc/layout_vxattrs.sh
|
#!/usr/bin/env bash
set -ex
# detect data pool
datapool=
dir=.
while true ; do
echo $dir
datapool=$(getfattr -n ceph.dir.layout.pool $dir --only-values) && break
dir=$dir/..
done
# file
rm -f file file2
touch file file2
getfattr -n ceph.file.layout file
getfattr -n ceph.file.layout file | grep -q object_size=
getfattr -n ceph.file.layout file | grep -q stripe_count=
getfattr -n ceph.file.layout file | grep -q stripe_unit=
getfattr -n ceph.file.layout file | grep -q pool=
getfattr -n ceph.file.layout.pool file
getfattr -n ceph.file.layout.pool_namespace file
getfattr -n ceph.file.layout.stripe_unit file
getfattr -n ceph.file.layout.stripe_count file
getfattr -n ceph.file.layout.object_size file
getfattr -n ceph.file.layout.bogus file 2>&1 | grep -q 'No such attribute'
getfattr -n ceph.dir.layout file 2>&1 | grep -q 'No such attribute'
setfattr -n ceph.file.layout.stripe_unit -v 1048576 file2
setfattr -n ceph.file.layout.stripe_count -v 8 file2
setfattr -n ceph.file.layout.object_size -v 10485760 file2
setfattr -n ceph.file.layout.pool -v $datapool file2
getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
setfattr -n ceph.file.layout.pool_namespace -v foons file2
getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons
setfattr -x ceph.file.layout.pool_namespace file2
getfattr -n ceph.file.layout.pool_namespace file2 | grep -q -v foons
getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 1048576
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 8
getfattr -n ceph.file.layout.object_size file2 | grep -q 10485760
setfattr -n ceph.file.layout -v "stripe_unit=4194304 stripe_count=16 object_size=41943040 pool=$datapool pool_namespace=foons" file2
getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 4194304
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16
getfattr -n ceph.file.layout.object_size file2 | grep -q 41943040
getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons
setfattr -n ceph.file.layout -v "stripe_unit=1048576" file2
getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 1048576
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16
getfattr -n ceph.file.layout.object_size file2 | grep -q 41943040
getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons
setfattr -n ceph.file.layout -v "stripe_unit=2097152 stripe_count=4 object_size=2097152 pool=$datapool pool_namespace=barns" file2
getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 2097152
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 4
getfattr -n ceph.file.layout.object_size file2 | grep -q 2097152
getfattr -n ceph.file.layout.pool file2 | grep -q $datapool
getfattr -n ceph.file.layout.pool_namespace file2 | grep -q barns
# dir
rm -f dir/file || true
rmdir dir || true
mkdir -p dir
getfattr -d -m - dir | grep -q ceph.dir.layout && exit 1 || true
getfattr -d -m - dir | grep -q ceph.file.layout && exit 1 || true
getfattr -n ceph.dir.layout dir && exit 1 || true
setfattr -n ceph.dir.layout.stripe_unit -v 1048576 dir
setfattr -n ceph.dir.layout.stripe_count -v 8 dir
setfattr -n ceph.dir.layout.object_size -v 10485760 dir
setfattr -n ceph.dir.layout.pool -v $datapool dir
setfattr -n ceph.dir.layout.pool_namespace -v dirns dir
getfattr -n ceph.dir.layout dir
getfattr -n ceph.dir.layout dir | grep -q object_size=10485760
getfattr -n ceph.dir.layout dir | grep -q stripe_count=8
getfattr -n ceph.dir.layout dir | grep -q stripe_unit=1048576
getfattr -n ceph.dir.layout dir | grep -q pool=$datapool
getfattr -n ceph.dir.layout dir | grep -q pool_namespace=dirns
getfattr -n ceph.dir.layout.pool dir | grep -q $datapool
getfattr -n ceph.dir.layout.stripe_unit dir | grep -q 1048576
getfattr -n ceph.dir.layout.stripe_count dir | grep -q 8
getfattr -n ceph.dir.layout.object_size dir | grep -q 10485760
getfattr -n ceph.dir.layout.pool_namespace dir | grep -q dirns
setfattr -n ceph.file.layout -v "stripe_count=16" file2
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16
setfattr -n ceph.file.layout -v "object_size=10485760 stripe_count=8 stripe_unit=1048576 pool=$datapool pool_namespace=dirns" file2
getfattr -n ceph.file.layout.stripe_count file2 | grep -q 8
touch dir/file
getfattr -n ceph.file.layout.pool dir/file | grep -q $datapool
getfattr -n ceph.file.layout.stripe_unit dir/file | grep -q 1048576
getfattr -n ceph.file.layout.stripe_count dir/file | grep -q 8
getfattr -n ceph.file.layout.object_size dir/file | grep -q 10485760
getfattr -n ceph.file.layout.pool_namespace dir/file | grep -q dirns
setfattr -x ceph.dir.layout.pool_namespace dir
getfattr -n ceph.dir.layout dir | grep -q -v pool_namespace=dirns
setfattr -x ceph.dir.layout dir
getfattr -n ceph.dir.layout dir 2>&1 | grep -q 'No such attribute'
echo OK
| 4,935 | 41.551724 | 132 |
sh
|
null |
ceph-main/qa/workunits/fs/misc/mkpool_layout_vxattrs.sh
|
#!/usr/bin/env bash
set -e
touch foo.$$
ceph osd pool create foo.$$ 8
ceph fs add_data_pool cephfs foo.$$
setfattr -n ceph.file.layout.pool -v foo.$$ foo.$$
# cleanup
rm foo.$$
ceph fs rm_data_pool cephfs foo.$$
ceph osd pool rm foo.$$ foo.$$ --yes-i-really-really-mean-it
echo OK
| 285 | 16.875 | 60 |
sh
|
null |
ceph-main/qa/workunits/fs/misc/multiple_rsync.sh
|
#!/bin/sh -ex
# Populate with some arbitrary files from the local system. Take
# a copy to protect against false fails from system updates during test.
export PAYLOAD=/tmp/multiple_rsync_payload.$$
sudo cp -r /usr/lib/ $PAYLOAD
set -e
sudo rsync -av $PAYLOAD payload.1
sudo rsync -av $PAYLOAD payload.2
# this shouldn't transfer any additional files
echo we should get 4 here if no additional files are transferred
sudo rsync -auv $PAYLOAD payload.1 | tee /tmp/$$
hexdump -C /tmp/$$
wc -l /tmp/$$ | grep 4
sudo rsync -auv $PAYLOAD payload.2 | tee /tmp/$$
hexdump -C /tmp/$$
wc -l /tmp/$$ | grep 4
echo OK
rm /tmp/$$
sudo rm -rf $PAYLOAD
| 644 | 23.807692 | 72 |
sh
|
null |
ceph-main/qa/workunits/fs/misc/rstats.sh
|
#!/usr/bin/env bash
set -x
timeout=30
old_value=""
new_value=""
wait_until_changed() {
name=$1
wait=0
while [ $wait -lt $timeout ]; do
new_value=`getfattr --only-value -n ceph.dir.$name .`
[ $new_value == $old_value ] || return 0
sleep 1
wait=$(($wait + 1))
done
return 1
}
check_rctime() {
old_sec=$(echo $old_value | cut -d. -f1)
old_nsec=$(echo $old_value | cut -d. -f2)
new_sec=$(echo $new_value | cut -d. -f1)
new_nsec=$(echo $new_value | cut -d. -f2)
[ "$old_sec" -lt "$new_sec" ] && return 0
[ "$old_sec" -gt "$new_sec" ] && return 1
[ "$old_nsec" -lt "$new_nsec" ] && return 0
return 1
}
# sync(3) does not make ceph-fuse flush dirty caps, because fuse kernel module
# does not notify ceph-fuse about it. Use fsync(3) instead.
fsync_path() {
cmd="import os; fd=os.open(\"$1\", os.O_RDONLY); os.fsync(fd); os.close(fd)"
python3 -c "$cmd"
}
set -e
mkdir -p rstats_testdir/d1/d2
cd rstats_testdir
# rfiles
old_value=`getfattr --only-value -n ceph.dir.rfiles .`
[ $old_value == 0 ] || false
touch d1/d2/f1
wait_until_changed rfiles
[ $new_value == $(($old_value + 1)) ] || false
# rsubdirs
old_value=`getfattr --only-value -n ceph.dir.rsubdirs .`
[ $old_value == 3 ] || false
mkdir d1/d2/d3
wait_until_changed rsubdirs
[ $new_value == $(($old_value + 1)) ] || false
# rbytes
old_value=`getfattr --only-value -n ceph.dir.rbytes .`
[ $old_value == 0 ] || false
echo hello > d1/d2/f2
fsync_path d1/d2/f2
wait_until_changed rbytes
[ $new_value == $(($old_value + 6)) ] || false
#rctime
old_value=`getfattr --only-value -n ceph.dir.rctime .`
touch d1/d2/d3 # touch existing file
fsync_path d1/d2/d3
wait_until_changed rctime
check_rctime
old_value=`getfattr --only-value -n ceph.dir.rctime .`
touch d1/d2/f3 # create new file
wait_until_changed rctime
check_rctime
cd ..
rm -rf rstats_testdir
echo OK
| 1,836 | 21.679012 | 78 |
sh
|
null |
ceph-main/qa/workunits/fs/misc/trivial_sync.sh
|
#!/usr/bin/env bash
set -e
mkdir foo
echo foo > bar
sync
| 59 | 6.5 | 19 |
sh
|
null |
ceph-main/qa/workunits/fs/misc/xattrs.sh
|
#!/bin/sh -x
set -e
touch file
setfattr -n user.foo -v foo file
setfattr -n user.bar -v bar file
setfattr -n user.empty file
getfattr -d file | grep foo
getfattr -d file | grep bar
getfattr -d file | grep empty
echo OK.
| 224 | 14 | 32 |
sh
|
null |
ceph-main/qa/workunits/fs/norstats/kernel_untar_tar.sh
|
#!/usr/bin/env bash
# check if there is file changed while being archived
set -ex
KERNEL=linux-4.0.5
wget -q http://download.ceph.com/qa/$KERNEL.tar.xz
mkdir untar_tar
cd untar_tar
tar Jxvf ../$KERNEL.tar.xz $KERNEL/Documentation/
tar cf doc.tar $KERNEL
tar xf doc.tar
sync
tar c $KERNEL >/dev/null
rm -rf $KERNEL
tar xf doc.tar
sync
tar c $KERNEL >/dev/null
echo Ok
| 376 | 12.962963 | 53 |
sh
|
null |
ceph-main/qa/workunits/fs/quota/quota.sh
|
#!/usr/bin/env bash
set -ex
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
function write_file()
{
set +x
for ((i=1;i<=$2;i++))
do
dd if=/dev/zero of=$1 bs=1M count=1 conv=notrunc oflag=append 2>/dev/null >/dev/null
if [ $? != 0 ]; then
echo Try to write $(($i * 1048576))
set -x
return 1
fi
sleep 0.05
done
set -x
return 0
}
mkdir quota-test
cd quota-test
# bytes
setfattr . -n ceph.quota.max_bytes -v 100M
expect_false write_file big 1000 # 1g
expect_false write_file second 10
setfattr . -n ceph.quota.max_bytes -v 0
dd if=/dev/zero of=third bs=1M count=10
dd if=/dev/zero of=big2 bs=1M count=100
rm -rf *
# files
setfattr . -n ceph.quota.max_files -v 5
mkdir ok
touch ok/1
touch ok/2
touch 3
expect_false touch shouldbefail # 5 files will include the "."
expect_false touch ok/shouldbefail # 5 files will include the "."
setfattr . -n ceph.quota.max_files -v 0
touch shouldbecreated
touch shouldbecreated2
rm -rf *
# mix
mkdir bytes bytes/files
setfattr bytes -n ceph.quota.max_bytes -v 10M
setfattr bytes/files -n ceph.quota.max_files -v 5
dd if=/dev/zero of=bytes/files/1 bs=1M count=4
dd if=/dev/zero of=bytes/files/2 bs=1M count=4
expect_false write_file bytes/files/3 1000
expect_false write_file bytes/files/4 1000
expect_false write_file bytes/files/5 1000
stat --printf="%n %s\n" bytes/files/1 #4M
stat --printf="%n %s\n" bytes/files/2 #4M
stat --printf="%n %s\n" bytes/files/3 #bigger than 2M
stat --printf="%n %s\n" bytes/files/4 #should be zero
expect_false stat bytes/files/5 #shouldn't be exist
rm -rf *
#mv
mkdir files limit
truncate files/file -s 10G
setfattr limit -n ceph.quota.max_bytes -v 1M
expect_false mv files limit/
rm -rf *
#limit by ancestor
mkdir -p ancestor/p1/p2/parent/p3
setfattr ancestor -n ceph.quota.max_bytes -v 1M
setfattr ancestor/p1/p2/parent -n ceph.quota.max_bytes -v 1G
expect_false write_file ancestor/p1/p2/parent/p3/file1 900 #900m
stat --printf="%n %s\n" ancestor/p1/p2/parent/p3/file1
#get/set attribute
setfattr -n ceph.quota.max_bytes -v 0 .
setfattr -n ceph.quota.max_bytes -v 1 .
setfattr -n ceph.quota.max_bytes -v 9223372036854775807 .
expect_false setfattr -n ceph.quota.max_bytes -v 9223372036854775808 .
expect_false setfattr -n ceph.quota.max_bytes -v -1 .
expect_false setfattr -n ceph.quota.max_bytes -v -9223372036854775808 .
expect_false setfattr -n ceph.quota.max_bytes -v -9223372036854775809 .
setfattr -n ceph.quota.max_bytes -v 0 .
setfattr -n ceph.quota.max_bytes -v 1Ti .
setfattr -n ceph.quota.max_bytes -v 8388607Ti .
expect_false setfattr -n ceph.quota.max_bytes -v 8388608Ti .
expect_false setfattr -n ceph.quota.max_bytes -v -1Ti .
expect_false setfattr -n ceph.quota.max_bytes -v -8388609Ti .
expect_false setfattr -n ceph.quota.max_bytes -v -8388610Ti .
setfattr -n ceph.quota.max_files -v 0 .
setfattr -n ceph.quota.max_files -v 1 .
setfattr -n ceph.quota.max_files -v 9223372036854775807 .
expect_false setfattr -n ceph.quota.max_files -v 9223372036854775808 .
expect_false setfattr -n ceph.quota.max_files -v -1 .
expect_false setfattr -n ceph.quota.max_files -v -9223372036854775808 .
expect_false setfattr -n ceph.quota.max_files -v -9223372036854775809 .
setfattr -n ceph.quota -v "max_bytes=0 max_files=0" .
setfattr -n ceph.quota -v "max_bytes=1 max_files=0" .
setfattr -n ceph.quota -v "max_bytes=0 max_files=1" .
setfattr -n ceph.quota -v "max_bytes=1 max_files=1" .
expect_false setfattr -n ceph.quota -v "max_bytes=-1 max_files=0" .
expect_false setfattr -n ceph.quota -v "max_bytes=0 max_files=-1" .
expect_false setfattr -n ceph.quota -v "max_bytes=-1 max_files=-1" .
#addme
cd ..
rm -rf quota-test
echo OK
| 3,702 | 26.029197 | 86 |
sh
|
null |
ceph-main/qa/workunits/fs/snaps/snap-rm-diff.sh
|
#!/bin/sh -ex
wget -q http://download.ceph.com/qa/linux-2.6.33.tar.bz2
mkdir foo
cp linux* foo
mkdir foo/.snap/barsnap
rm foo/linux*
diff -q foo/.snap/barsnap/linux* linux* && echo "passed: files are identical"
rmdir foo/.snap/barsnap
echo OK
| 244 | 21.272727 | 77 |
sh
|
null |
ceph-main/qa/workunits/fs/snaps/snaptest-1.sh
|
#!/usr/bin/env bash
set -ex
echo 1 > file1
echo 2 > file2
echo 3 > file3
[ -e file4 ] && rm file4
mkdir .snap/snap1
echo 4 > file4
now=`ls`
then=`ls .snap/snap1`
rmdir .snap/snap1
if [ "$now" = "$then" ]; then
echo live and snap contents are identical?
false
fi
# do it again
echo 1 > file1
echo 2 > file2
echo 3 > file3
mkdir .snap/snap1
echo 4 > file4
rmdir .snap/snap1
rm file?
echo OK
| 402 | 12.433333 | 46 |
sh
|
null |
ceph-main/qa/workunits/fs/snaps/snaptest-2.sh
|
#!/usr/bin/env bash
echo "Create dir 100 to 199 ..."
for i in $(seq 100 199); do
echo " create dir $i"
mkdir "$i"
for y in $(seq 10 20); do
echo "This is a test file before any snapshot was taken." >"$i/$y"
done
done
echo "Take first snapshot .snap/test1"
mkdir .snap/test1
echo "Create dir 200 to 299 ..."
for i in $(seq 200 299); do
echo " create dir $i"
mkdir $i
for y in $(seq 20 29); do
echo "This is a test file. Created after .snap/test1" >"$i/$y"
done
done
echo "Create a snapshot in every first level dir ..."
for dir in $(ls); do
echo " create $dir/.snap/snap-subdir-test"
mkdir "$dir/.snap/snap-subdir-test"
for y in $(seq 30 39); do
echo " create $dir/$y file after the snapshot"
echo "This is a test file. Created after $dir/.snap/snap-subdir-test" >"$dir/$y"
done
done
echo "Take second snapshot .snap/test2"
mkdir .snap/test2
echo "Copy content of .snap/test1 to copyofsnap1 ..."
mkdir copyofsnap1
cp -Rv .snap/test1 copyofsnap1/
echo "Take third snapshot .snap/test3"
mkdir .snap/test3
echo "Delete the snapshots..."
find ./ -type d -print | \
xargs -I% -n1 find %/.snap -mindepth 1 -maxdepth 1 \
\( ! -name "_*" \) -print 2>/dev/null
find ./ -type d -print | \
xargs -I% -n1 find %/.snap -mindepth 1 -maxdepth 1 \
\( ! -name "_*" \) -print 2>/dev/null | \
xargs -n1 rmdir
echo "Delete all the files and directories ..."
rm -Rfv ./*
echo OK
| 1,522 | 24.383333 | 96 |
sh
|
null |
ceph-main/qa/workunits/fs/snaps/snaptest-authwb.sh
|
#!/bin/sh -x
set -e
touch foo
chmod +x foo
mkdir .snap/s
find .snap/s/foo -executable | grep foo
rmdir .snap/s
rm foo
echo OK
| 129 | 9 | 39 |
sh
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.