Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/src/pybind/mgr/rbd_support/perf.py
|
import errno
import json
import rados
import rbd
import time
import traceback
from datetime import datetime, timedelta
from threading import Condition, Lock, Thread
from typing import cast, Any, Callable, Dict, List, Optional, Set, Tuple, Union
from .common import (GLOBAL_POOL_KEY, authorize_request, extract_pool_key,
get_rbd_pools, PoolKeyT)
QUERY_POOL_ID = "pool_id"
QUERY_POOL_ID_MAP = "pool_id_map"
QUERY_IDS = "query_ids"
QUERY_SUM_POOL_COUNTERS = "pool_counters"
QUERY_RAW_POOL_COUNTERS = "raw_pool_counters"
QUERY_LAST_REQUEST = "last_request"
OSD_PERF_QUERY_REGEX_MATCH_ALL = '^(.*)$'
OSD_PERF_QUERY_COUNTERS = ['write_ops',
'read_ops',
'write_bytes',
'read_bytes',
'write_latency',
'read_latency']
OSD_PERF_QUERY_COUNTERS_INDICES = {
OSD_PERF_QUERY_COUNTERS[i]: i for i in range(len(OSD_PERF_QUERY_COUNTERS))}
OSD_PERF_QUERY_LATENCY_COUNTER_INDICES = [4, 5]
OSD_PERF_QUERY_MAX_RESULTS = 256
POOL_REFRESH_INTERVAL = timedelta(minutes=5)
QUERY_EXPIRE_INTERVAL = timedelta(minutes=1)
STATS_RATE_INTERVAL = timedelta(minutes=1)
REPORT_MAX_RESULTS = 64
# {(pool_id, namespace)...}
ResolveImageNamesT = Set[Tuple[int, str]]
# (time, [value,...])
PerfCounterT = Tuple[int, List[int]]
# current, previous
RawImageCounterT = Tuple[PerfCounterT, Optional[PerfCounterT]]
# image_id => perf_counter
RawImagesCounterT = Dict[str, RawImageCounterT]
# namespace_counters => raw_images
RawNamespacesCountersT = Dict[str, RawImagesCounterT]
# pool_id => namespaces_counters
RawPoolCountersT = Dict[int, RawNamespacesCountersT]
SumImageCounterT = List[int]
# image_id => sum_image
SumImagesCounterT = Dict[str, SumImageCounterT]
# namespace => sum_images
SumNamespacesCountersT = Dict[str, SumImagesCounterT]
# pool_id, sum_namespaces
SumPoolCountersT = Dict[int, SumNamespacesCountersT]
ExtractDataFuncT = Callable[[int, Optional[RawImageCounterT], SumImageCounterT], float]
class PerfHandler:
user_queries: Dict[PoolKeyT, Dict[str, Any]] = {}
image_cache: Dict[str, str] = {}
lock = Lock()
query_condition = Condition(lock)
refresh_condition = Condition(lock)
image_name_cache: Dict[Tuple[int, str], Dict[str, str]] = {}
image_name_refresh_time = datetime.fromtimestamp(0)
@classmethod
def prepare_regex(cls, value: Any) -> str:
return '^({})$'.format(value)
@classmethod
def prepare_osd_perf_query(cls,
pool_id: Optional[int],
namespace: Optional[str],
counter_type: str) -> Dict[str, Any]:
pool_id_regex = OSD_PERF_QUERY_REGEX_MATCH_ALL
namespace_regex = OSD_PERF_QUERY_REGEX_MATCH_ALL
if pool_id:
pool_id_regex = cls.prepare_regex(pool_id)
if namespace:
namespace_regex = cls.prepare_regex(namespace)
return {
'key_descriptor': [
{'type': 'pool_id', 'regex': pool_id_regex},
{'type': 'namespace', 'regex': namespace_regex},
{'type': 'object_name',
'regex': '^(?:rbd|journal)_data\\.(?:([0-9]+)\\.)?([^.]+)\\.'},
],
'performance_counter_descriptors': OSD_PERF_QUERY_COUNTERS,
'limit': {'order_by': counter_type,
'max_count': OSD_PERF_QUERY_MAX_RESULTS},
}
@classmethod
def pool_spec_search_keys(cls, pool_key: str) -> List[str]:
return [pool_key[0:len(pool_key) - x]
for x in range(0, len(pool_key) + 1)]
@classmethod
def submatch_pool_key(cls, pool_key: PoolKeyT, search_key: str) -> bool:
return ((pool_key[1] == search_key[1] or not search_key[1])
and (pool_key[0] == search_key[0] or not search_key[0]))
def __init__(self, module: Any) -> None:
self.module = module
self.log = module.log
self.stop_thread = False
self.thread = Thread(target=self.run)
def setup(self) -> None:
self.thread.start()
def shutdown(self) -> None:
self.log.info("PerfHandler: shutting down")
self.stop_thread = True
if self.thread.is_alive():
self.log.debug("PerfHandler: joining thread")
self.thread.join()
self.log.info("PerfHandler: shut down")
def run(self) -> None:
try:
self.log.info("PerfHandler: starting")
while not self.stop_thread:
with self.lock:
self.scrub_expired_queries()
self.process_raw_osd_perf_counters()
self.refresh_condition.notify()
stats_period = self.module.get_ceph_option("mgr_stats_period")
self.query_condition.wait(stats_period)
self.log.debug("PerfHandler: tick")
except (rados.ConnectionShutdown, rbd.ConnectionShutdown):
self.log.exception("PerfHandler: client blocklisted")
self.module.client_blocklisted.set()
except Exception as ex:
self.log.fatal("Fatal runtime error: {}\n{}".format(
ex, traceback.format_exc()))
def merge_raw_osd_perf_counters(self,
pool_key: PoolKeyT,
query: Dict[str, Any],
now_ts: int,
resolve_image_names: ResolveImageNamesT) -> RawPoolCountersT:
pool_id_map = query[QUERY_POOL_ID_MAP]
# collect and combine the raw counters from all sort orders
raw_pool_counters: Dict[int, Dict[str, Dict[str, Any]]] = query.setdefault(QUERY_RAW_POOL_COUNTERS, {})
for query_id in query[QUERY_IDS]:
res = self.module.get_osd_perf_counters(query_id)
for counter in res['counters']:
# replace pool id from object name if it exists
k = counter['k']
pool_id = int(k[2][0]) if k[2][0] else int(k[0][0])
namespace = k[1][0]
image_id = k[2][1]
# ignore metrics from non-matching pools/namespaces
if pool_id not in pool_id_map:
continue
if pool_key[1] is not None and pool_key[1] != namespace:
continue
# flag the pool (and namespace) for refresh if we cannot find
# image name in the cache
resolve_image_key = (pool_id, namespace)
if image_id not in self.image_name_cache.get(resolve_image_key, {}):
resolve_image_names.add(resolve_image_key)
# copy the 'sum' counter values for each image (ignore count)
# if we haven't already processed it for this round
raw_namespaces = raw_pool_counters.setdefault(pool_id, {})
raw_images = raw_namespaces.setdefault(namespace, {})
raw_image = raw_images.get(image_id)
# save the last two perf counters for each image
new_current = (now_ts, [int(x[0]) for x in counter['c']])
if raw_image:
old_current, _ = raw_image
if old_current[0] < now_ts:
raw_images[image_id] = (new_current, old_current)
else:
raw_images[image_id] = (new_current, None)
self.log.debug("merge_raw_osd_perf_counters: {}".format(raw_pool_counters))
return raw_pool_counters
def sum_osd_perf_counters(self,
query: Dict[str, dict],
raw_pool_counters: RawPoolCountersT,
now_ts: int) -> SumPoolCountersT:
# update the cumulative counters for each image
sum_pool_counters = query.setdefault(QUERY_SUM_POOL_COUNTERS, {})
for pool_id, raw_namespaces in raw_pool_counters.items():
sum_namespaces = sum_pool_counters.setdefault(pool_id, {})
for namespace, raw_images in raw_namespaces.items():
sum_namespace = sum_namespaces.setdefault(namespace, {})
for image_id, raw_image in raw_images.items():
# zero-out non-updated raw counters
if not raw_image[0]:
continue
old_current, _ = raw_image
if old_current[0] < now_ts:
new_current = (now_ts, [0] * len(old_current[1]))
raw_images[image_id] = (new_current, old_current)
continue
counters = old_current[1]
# copy raw counters if this is a newly discovered image or
# increment existing counters
sum_image = sum_namespace.setdefault(image_id, None)
if sum_image:
for i in range(len(counters)):
sum_image[i] += counters[i]
else:
sum_namespace[image_id] = [x for x in counters]
self.log.debug("sum_osd_perf_counters: {}".format(sum_pool_counters))
return sum_pool_counters
def refresh_image_names(self, resolve_image_names: ResolveImageNamesT) -> None:
for pool_id, namespace in resolve_image_names:
image_key = (pool_id, namespace)
images = self.image_name_cache.setdefault(image_key, {})
with self.module.rados.open_ioctx2(int(pool_id)) as ioctx:
ioctx.set_namespace(namespace)
for image_meta in rbd.RBD().list2(ioctx):
images[image_meta['id']] = image_meta['name']
self.log.debug("resolve_image_names: {}={}".format(image_key, images))
def scrub_missing_images(self) -> None:
for pool_key, query in self.user_queries.items():
raw_pool_counters = query.get(QUERY_RAW_POOL_COUNTERS, {})
sum_pool_counters = query.get(QUERY_SUM_POOL_COUNTERS, {})
for pool_id, sum_namespaces in sum_pool_counters.items():
raw_namespaces = raw_pool_counters.get(pool_id, {})
for namespace, sum_images in sum_namespaces.items():
raw_images = raw_namespaces.get(namespace, {})
image_key = (pool_id, namespace)
image_names = self.image_name_cache.get(image_key, {})
for image_id in list(sum_images.keys()):
# scrub image counters if we failed to resolve image name
if image_id not in image_names:
self.log.debug("scrub_missing_images: dropping {}/{}".format(
image_key, image_id))
del sum_images[image_id]
if image_id in raw_images:
del raw_images[image_id]
def process_raw_osd_perf_counters(self) -> None:
now = datetime.now()
now_ts = int(now.strftime("%s"))
# clear the image name cache if we need to refresh all active pools
if self.image_name_cache and \
self.image_name_refresh_time + POOL_REFRESH_INTERVAL < now:
self.log.debug("process_raw_osd_perf_counters: expiring image name cache")
self.image_name_cache = {}
resolve_image_names: Set[Tuple[int, str]] = set()
for pool_key, query in self.user_queries.items():
if not query[QUERY_IDS]:
continue
raw_pool_counters = self.merge_raw_osd_perf_counters(
pool_key, query, now_ts, resolve_image_names)
self.sum_osd_perf_counters(query, raw_pool_counters, now_ts)
if resolve_image_names:
self.image_name_refresh_time = now
self.refresh_image_names(resolve_image_names)
self.scrub_missing_images()
elif not self.image_name_cache:
self.scrub_missing_images()
def resolve_pool_id(self, pool_name: str) -> int:
pool_id = self.module.rados.pool_lookup(pool_name)
if not pool_id:
raise rados.ObjectNotFound("Pool '{}' not found".format(pool_name),
errno.ENOENT)
return pool_id
def scrub_expired_queries(self) -> None:
# perf counters need to be periodically refreshed to continue
# to be registered
expire_time = datetime.now() - QUERY_EXPIRE_INTERVAL
for pool_key in list(self.user_queries.keys()):
user_query = self.user_queries[pool_key]
if user_query[QUERY_LAST_REQUEST] < expire_time:
self.unregister_osd_perf_queries(pool_key, user_query[QUERY_IDS])
del self.user_queries[pool_key]
def register_osd_perf_queries(self,
pool_id: Optional[int],
namespace: Optional[str]) -> List[int]:
query_ids = []
try:
for counter in OSD_PERF_QUERY_COUNTERS:
query = self.prepare_osd_perf_query(pool_id, namespace, counter)
self.log.debug("register_osd_perf_queries: {}".format(query))
query_id = self.module.add_osd_perf_query(query)
if query_id is None:
raise RuntimeError('Failed to add OSD perf query: {}'.format(query))
query_ids.append(query_id)
except Exception:
for query_id in query_ids:
self.module.remove_osd_perf_query(query_id)
raise
return query_ids
def unregister_osd_perf_queries(self, pool_key: PoolKeyT, query_ids: List[int]) -> None:
self.log.info("unregister_osd_perf_queries: pool_key={}, query_ids={}".format(
pool_key, query_ids))
for query_id in query_ids:
self.module.remove_osd_perf_query(query_id)
query_ids[:] = []
def register_query(self, pool_key: PoolKeyT) -> Dict[str, Any]:
if pool_key not in self.user_queries:
pool_name, namespace = pool_key
pool_id = None
if pool_name:
pool_id = self.resolve_pool_id(cast(str, pool_name))
user_query = {
QUERY_POOL_ID: pool_id,
QUERY_POOL_ID_MAP: {pool_id: pool_name},
QUERY_IDS: self.register_osd_perf_queries(pool_id, namespace),
QUERY_LAST_REQUEST: datetime.now()
}
self.user_queries[pool_key] = user_query
# force an immediate stat pull if this is a new query
self.query_condition.notify()
self.refresh_condition.wait(5)
else:
user_query = self.user_queries[pool_key]
# ensure query doesn't expire
user_query[QUERY_LAST_REQUEST] = datetime.now()
if pool_key == GLOBAL_POOL_KEY:
# refresh the global pool id -> name map upon each
# processing period
user_query[QUERY_POOL_ID_MAP] = {
pool_id: pool_name for pool_id, pool_name
in get_rbd_pools(self.module).items()}
self.log.debug("register_query: pool_key={}, query_ids={}".format(
pool_key, user_query[QUERY_IDS]))
return user_query
def extract_stat(self,
index: int,
raw_image: Optional[RawImageCounterT],
sum_image: Any) -> float:
# require two raw counters between a fixed time window
if not raw_image or not raw_image[0] or not raw_image[1]:
return 0
current_counter, previous_counter = cast(Tuple[PerfCounterT, PerfCounterT], raw_image)
current_time = current_counter[0]
previous_time = previous_counter[0]
if current_time <= previous_time or \
current_time - previous_time > STATS_RATE_INTERVAL.total_seconds():
return 0
current_value = current_counter[1][index]
instant_rate = float(current_value) / (current_time - previous_time)
# convert latencies from sum to average per op
ops_index = None
if OSD_PERF_QUERY_COUNTERS[index] == 'write_latency':
ops_index = OSD_PERF_QUERY_COUNTERS_INDICES['write_ops']
elif OSD_PERF_QUERY_COUNTERS[index] == 'read_latency':
ops_index = OSD_PERF_QUERY_COUNTERS_INDICES['read_ops']
if ops_index is not None:
ops = max(1, self.extract_stat(ops_index, raw_image, sum_image))
instant_rate /= ops
return instant_rate
def extract_counter(self,
index: int,
raw_image: Optional[RawImageCounterT],
sum_image: List[int]) -> int:
if sum_image:
return sum_image[index]
return 0
def generate_report(self,
query: Dict[str, Union[Dict[str, str],
Dict[int, Dict[str, dict]]]],
sort_by: str,
extract_data: ExtractDataFuncT) -> Tuple[Dict[int, str],
List[Dict[str, List[float]]]]:
pool_id_map = cast(Dict[int, str], query[QUERY_POOL_ID_MAP])
sum_pool_counters = cast(SumPoolCountersT,
query.setdefault(QUERY_SUM_POOL_COUNTERS,
cast(SumPoolCountersT, {})))
# pool_id => {namespace => {image_id => [counter..] }
raw_pool_counters = cast(RawPoolCountersT,
query.setdefault(QUERY_RAW_POOL_COUNTERS,
cast(RawPoolCountersT, {})))
sort_by_index = OSD_PERF_QUERY_COUNTERS.index(sort_by)
# pre-sort and limit the response
results = []
for pool_id, sum_namespaces in sum_pool_counters.items():
if pool_id not in pool_id_map:
continue
raw_namespaces: RawNamespacesCountersT = raw_pool_counters.get(pool_id, {})
for namespace, sum_images in sum_namespaces.items():
raw_images = raw_namespaces.get(namespace, {})
for image_id, sum_image in sum_images.items():
raw_image = raw_images.get(image_id)
# always sort by recent IO activity
results.append(((pool_id, namespace, image_id),
self.extract_stat(sort_by_index, raw_image,
sum_image)))
results = sorted(results, key=lambda x: x[1], reverse=True)[:REPORT_MAX_RESULTS]
# build the report in sorted order
pool_descriptors: Dict[str, int] = {}
counters = []
for key, _ in results:
pool_id = key[0]
pool_name = pool_id_map[pool_id]
namespace = key[1]
image_id = key[2]
image_names = self.image_name_cache.get((pool_id, namespace), {})
image_name = image_names[image_id]
raw_namespaces = raw_pool_counters.get(pool_id, {})
raw_images = raw_namespaces.get(namespace, {})
raw_image = raw_images.get(image_id)
sum_namespaces = sum_pool_counters[pool_id]
sum_images = sum_namespaces[namespace]
sum_image = sum_images.get(image_id, [])
pool_descriptor = pool_name
if namespace:
pool_descriptor += "/{}".format(namespace)
pool_index = pool_descriptors.setdefault(pool_descriptor,
len(pool_descriptors))
image_descriptor = "{}/{}".format(pool_index, image_name)
data = [extract_data(i, raw_image, sum_image)
for i in range(len(OSD_PERF_QUERY_COUNTERS))]
# skip if no data to report
if data == [0 for i in range(len(OSD_PERF_QUERY_COUNTERS))]:
continue
counters.append({image_descriptor: data})
return {idx: descriptor for descriptor, idx
in pool_descriptors.items()}, \
counters
def get_perf_data(self,
report: str,
pool_spec: Optional[str],
sort_by: str,
extract_data: ExtractDataFuncT) -> Tuple[int, str, str]:
self.log.debug("get_perf_{}s: pool_spec={}, sort_by={}".format(
report, pool_spec, sort_by))
self.scrub_expired_queries()
pool_key = extract_pool_key(pool_spec)
authorize_request(self.module, pool_key[0], pool_key[1])
user_query = self.register_query(pool_key)
now = datetime.now()
pool_descriptors, counters = self.generate_report(
user_query, sort_by, extract_data)
report = {
'timestamp': time.mktime(now.timetuple()),
'{}_descriptors'.format(report): OSD_PERF_QUERY_COUNTERS,
'pool_descriptors': pool_descriptors,
'{}s'.format(report): counters
}
return 0, json.dumps(report), ""
def get_perf_stats(self,
pool_spec: Optional[str],
sort_by: str) -> Tuple[int, str, str]:
return self.get_perf_data(
"stat", pool_spec, sort_by, self.extract_stat)
def get_perf_counters(self,
pool_spec: Optional[str],
sort_by: str) -> Tuple[int, str, str]:
return self.get_perf_data(
"counter", pool_spec, sort_by, self.extract_counter)
| 21,941 | 40.874046 | 111 |
py
|
null |
ceph-main/src/pybind/mgr/rbd_support/schedule.py
|
import datetime
import json
import rados
import rbd
import re
from dateutil.parser import parse
from typing import cast, Any, Callable, Dict, List, Optional, Set, Tuple, TYPE_CHECKING
from .common import get_rbd_pools
if TYPE_CHECKING:
from .module import Module
SCHEDULE_INTERVAL = "interval"
SCHEDULE_START_TIME = "start_time"
class LevelSpec:
def __init__(self,
name: str,
id: str,
pool_id: Optional[str],
namespace: Optional[str],
image_id: Optional[str] = None) -> None:
self.name = name
self.id = id
self.pool_id = pool_id
self.namespace = namespace
self.image_id = image_id
def __eq__(self, level_spec: Any) -> bool:
return self.id == level_spec.id
def is_child_of(self, level_spec: 'LevelSpec') -> bool:
if level_spec.is_global():
return not self.is_global()
if level_spec.pool_id != self.pool_id:
return False
if level_spec.namespace is None:
return self.namespace is not None
if level_spec.namespace != self.namespace:
return False
if level_spec.image_id is None:
return self.image_id is not None
return False
def is_global(self) -> bool:
return self.pool_id is None
def get_pool_id(self) -> Optional[str]:
return self.pool_id
def matches(self,
pool_id: str,
namespace: str,
image_id: Optional[str] = None) -> bool:
if self.pool_id and self.pool_id != pool_id:
return False
if self.namespace and self.namespace != namespace:
return False
if self.image_id and self.image_id != image_id:
return False
return True
def intersects(self, level_spec: 'LevelSpec') -> bool:
if self.pool_id is None or level_spec.pool_id is None:
return True
if self.pool_id != level_spec.pool_id:
return False
if self.namespace is None or level_spec.namespace is None:
return True
if self.namespace != level_spec.namespace:
return False
if self.image_id is None or level_spec.image_id is None:
return True
if self.image_id != level_spec.image_id:
return False
return True
@classmethod
def make_global(cls) -> 'LevelSpec':
return LevelSpec("", "", None, None, None)
@classmethod
def from_pool_spec(cls,
pool_id: int,
pool_name: str,
namespace: Optional[str] = None) -> 'LevelSpec':
if namespace is None:
id = "{}".format(pool_id)
name = "{}/".format(pool_name)
else:
id = "{}/{}".format(pool_id, namespace)
name = "{}/{}/".format(pool_name, namespace)
return LevelSpec(name, id, str(pool_id), namespace, None)
@classmethod
def from_name(cls,
module: 'Module',
name: str,
namespace_validator: Optional[Callable] = None,
image_validator: Optional[Callable] = None,
allow_image_level: bool = True) -> 'LevelSpec':
# parse names like:
# '', 'rbd/', 'rbd/ns/', 'rbd//image', 'rbd/image', 'rbd/ns/image'
match = re.match(r'^(?:([^/]+)/(?:(?:([^/]*)/|)(?:([^/@]+))?)?)?$',
name)
if not match:
raise ValueError("failed to parse {}".format(name))
if match.group(3) and not allow_image_level:
raise ValueError(
"invalid name {}: image level is not allowed".format(name))
id = ""
pool_id = None
namespace = None
image_name = None
image_id = None
if match.group(1):
pool_name = match.group(1)
try:
pool_id = module.rados.pool_lookup(pool_name)
if pool_id is None:
raise ValueError("pool {} does not exist".format(pool_name))
if pool_id not in get_rbd_pools(module):
raise ValueError("{} is not an RBD pool".format(pool_name))
pool_id = str(pool_id)
id += pool_id
if match.group(2) is not None or match.group(3):
id += "/"
with module.rados.open_ioctx(pool_name) as ioctx:
namespace = match.group(2) or ""
if namespace:
namespaces = rbd.RBD().namespace_list(ioctx)
if namespace not in namespaces:
raise ValueError(
"namespace {} does not exist".format(
namespace))
id += namespace
ioctx.set_namespace(namespace)
if namespace_validator:
namespace_validator(ioctx)
if match.group(3):
image_name = match.group(3)
try:
with rbd.Image(ioctx, image_name,
read_only=True) as image:
image_id = image.id()
id += "/" + image_id
if image_validator:
image_validator(image)
except rbd.ImageNotFound:
raise ValueError("image {} does not exist".format(
image_name))
except rbd.InvalidArgument:
raise ValueError(
"image {} is not in snapshot mirror mode".format(
image_name))
except rados.ObjectNotFound:
raise ValueError("pool {} does not exist".format(pool_name))
# normalize possible input name like 'rbd//image'
if not namespace and image_name:
name = "{}/{}".format(pool_name, image_name)
return LevelSpec(name, id, pool_id, namespace, image_id)
@classmethod
def from_id(cls,
handler: Any,
id: str,
namespace_validator: Optional[Callable] = None,
image_validator: Optional[Callable] = None) -> 'LevelSpec':
# parse ids like:
# '', '123', '123/', '123/ns', '123//image_id', '123/ns/image_id'
match = re.match(r'^(?:(\d+)(?:/([^/]*)(?:/([^/@]+))?)?)?$', id)
if not match:
raise ValueError("failed to parse: {}".format(id))
name = ""
pool_id = None
namespace = None
image_id = None
if match.group(1):
pool_id = match.group(1)
try:
pool_name = handler.module.rados.pool_reverse_lookup(
int(pool_id))
if pool_name is None:
raise ValueError("pool {} does not exist".format(pool_name))
name += pool_name + "/"
if match.group(2) is not None or match.group(3):
with handler.module.rados.open_ioctx(pool_name) as ioctx:
namespace = match.group(2) or ""
if namespace:
namespaces = rbd.RBD().namespace_list(ioctx)
if namespace not in namespaces:
raise ValueError(
"namespace {} does not exist".format(
namespace))
name += namespace + "/"
if namespace_validator:
ioctx.set_namespace(namespace)
elif not match.group(3):
name += "/"
if match.group(3):
image_id = match.group(3)
try:
with rbd.Image(ioctx, image_id=image_id,
read_only=True) as image:
image_name = image.get_name()
name += image_name
if image_validator:
image_validator(image)
except rbd.ImageNotFound:
raise ValueError("image {} does not exist".format(
image_id))
except rbd.InvalidArgument:
raise ValueError(
"image {} is not in snapshot mirror mode".format(
image_id))
except rados.ObjectNotFound:
raise ValueError("pool {} does not exist".format(pool_id))
return LevelSpec(name, id, pool_id, namespace, image_id)
class Interval:
def __init__(self, minutes: int) -> None:
self.minutes = minutes
def __eq__(self, interval: Any) -> bool:
return self.minutes == interval.minutes
def __hash__(self) -> int:
return hash(self.minutes)
def to_string(self) -> str:
if self.minutes % (60 * 24) == 0:
interval = int(self.minutes / (60 * 24))
units = 'd'
elif self.minutes % 60 == 0:
interval = int(self.minutes / 60)
units = 'h'
else:
interval = int(self.minutes)
units = 'm'
return "{}{}".format(interval, units)
@classmethod
def from_string(cls, interval: str) -> 'Interval':
match = re.match(r'^(\d+)(d|h|m)?$', interval)
if not match:
raise ValueError("Invalid interval ({})".format(interval))
minutes = int(match.group(1))
if match.group(2) == 'd':
minutes *= 60 * 24
elif match.group(2) == 'h':
minutes *= 60
return Interval(minutes)
class StartTime:
def __init__(self,
hour: int,
minute: int,
tzinfo: Optional[datetime.tzinfo]) -> None:
self.time = datetime.time(hour, minute, tzinfo=tzinfo)
self.minutes = self.time.hour * 60 + self.time.minute
if self.time.tzinfo:
utcoffset = cast(datetime.timedelta, self.time.utcoffset())
self.minutes += int(utcoffset.seconds / 60)
def __eq__(self, start_time: Any) -> bool:
return self.minutes == start_time.minutes
def __hash__(self) -> int:
return hash(self.minutes)
def to_string(self) -> str:
return self.time.isoformat()
@classmethod
def from_string(cls, start_time: Optional[str]) -> Optional['StartTime']:
if not start_time:
return None
try:
t = parse(start_time).timetz()
except ValueError as e:
raise ValueError("Invalid start time {}: {}".format(start_time, e))
return StartTime(t.hour, t.minute, tzinfo=t.tzinfo)
class Schedule:
def __init__(self, name: str) -> None:
self.name = name
self.items: Set[Tuple[Interval, Optional[StartTime]]] = set()
def __len__(self) -> int:
return len(self.items)
def add(self,
interval: Interval,
start_time: Optional[StartTime] = None) -> None:
self.items.add((interval, start_time))
def remove(self,
interval: Interval,
start_time: Optional[StartTime] = None) -> None:
self.items.discard((interval, start_time))
def next_run(self, now: datetime.datetime) -> str:
schedule_time = None
for interval, opt_start in self.items:
period = datetime.timedelta(minutes=interval.minutes)
start_time = datetime.datetime(1970, 1, 1)
if opt_start:
start = cast(StartTime, opt_start)
start_time += datetime.timedelta(minutes=start.minutes)
time = start_time + \
(int((now - start_time) / period) + 1) * period
if schedule_time is None or time < schedule_time:
schedule_time = time
if schedule_time is None:
raise ValueError('no items is added')
return datetime.datetime.strftime(schedule_time, "%Y-%m-%d %H:%M:00")
def to_list(self) -> List[Dict[str, Optional[str]]]:
def item_to_dict(interval: Interval,
start_time: Optional[StartTime]) -> Dict[str, Optional[str]]:
if start_time:
schedule_start_time: Optional[str] = start_time.to_string()
else:
schedule_start_time = None
return {SCHEDULE_INTERVAL: interval.to_string(),
SCHEDULE_START_TIME: schedule_start_time}
return [item_to_dict(interval, start_time)
for interval, start_time in self.items]
def to_json(self) -> str:
return json.dumps(self.to_list(), indent=4, sort_keys=True)
@classmethod
def from_json(cls, name: str, val: str) -> 'Schedule':
try:
items = json.loads(val)
schedule = Schedule(name)
for item in items:
interval = Interval.from_string(item[SCHEDULE_INTERVAL])
start_time = item[SCHEDULE_START_TIME] and \
StartTime.from_string(item[SCHEDULE_START_TIME]) or None
schedule.add(interval, start_time)
return schedule
except json.JSONDecodeError as e:
raise ValueError("Invalid JSON ({})".format(str(e)))
except KeyError as e:
raise ValueError(
"Invalid schedule format (missing key {})".format(str(e)))
except TypeError as e:
raise ValueError("Invalid schedule format ({})".format(str(e)))
class Schedules:
def __init__(self, handler: Any) -> None:
self.handler = handler
self.level_specs: Dict[str, LevelSpec] = {}
self.schedules: Dict[str, Schedule] = {}
# Previous versions incorrectly stored the global config in
# the localized module option. Check the config is here and fix it.
schedule_cfg = self.handler.module.get_module_option(
self.handler.MODULE_OPTION_NAME, '')
if not schedule_cfg:
schedule_cfg = self.handler.module.get_localized_module_option(
self.handler.MODULE_OPTION_NAME, '')
if schedule_cfg:
self.handler.module.set_module_option(
self.handler.MODULE_OPTION_NAME, schedule_cfg)
self.handler.module.set_localized_module_option(
self.handler.MODULE_OPTION_NAME, None)
def __len__(self) -> int:
return len(self.schedules)
def load(self,
namespace_validator: Optional[Callable] = None,
image_validator: Optional[Callable] = None) -> None:
self.level_specs = {}
self.schedules = {}
schedule_cfg = self.handler.module.get_module_option(
self.handler.MODULE_OPTION_NAME, '')
if schedule_cfg:
try:
level_spec = LevelSpec.make_global()
self.level_specs[level_spec.id] = level_spec
schedule = Schedule.from_json(level_spec.name, schedule_cfg)
self.schedules[level_spec.id] = schedule
except ValueError:
self.handler.log.error(
"Failed to decode configured schedule {}".format(
schedule_cfg))
for pool_id, pool_name in get_rbd_pools(self.handler.module).items():
try:
with self.handler.module.rados.open_ioctx2(int(pool_id)) as ioctx:
self.load_from_pool(ioctx, namespace_validator,
image_validator)
except rados.ConnectionShutdown:
raise
except rados.Error as e:
self.handler.log.error(
"Failed to load schedules for pool {}: {}".format(
pool_name, e))
def load_from_pool(self,
ioctx: rados.Ioctx,
namespace_validator: Optional[Callable],
image_validator: Optional[Callable]) -> None:
pool_name = ioctx.get_pool_name()
stale_keys = []
start_after = ''
try:
while True:
with rados.ReadOpCtx() as read_op:
self.handler.log.info(
"load_schedules: {}, start_after={}".format(
pool_name, start_after))
it, ret = ioctx.get_omap_vals(read_op, start_after, "", 128)
ioctx.operate_read_op(read_op, self.handler.SCHEDULE_OID)
it = list(it)
for k, v in it:
start_after = k
v = v.decode()
self.handler.log.info(
"load_schedule: {} {}".format(k, v))
try:
try:
level_spec = LevelSpec.from_id(
self.handler, k, namespace_validator,
image_validator)
except ValueError:
self.handler.log.debug(
"Stale schedule key %s in pool %s",
k, pool_name)
stale_keys.append(k)
continue
self.level_specs[level_spec.id] = level_spec
schedule = Schedule.from_json(level_spec.name, v)
self.schedules[level_spec.id] = schedule
except ValueError:
self.handler.log.error(
"Failed to decode schedule: pool={}, {} {}".format(
pool_name, k, v))
if not it:
break
except StopIteration:
pass
except rados.ObjectNotFound:
pass
if stale_keys:
with rados.WriteOpCtx() as write_op:
ioctx.remove_omap_keys(write_op, stale_keys)
ioctx.operate_write_op(write_op, self.handler.SCHEDULE_OID)
def save(self, level_spec: LevelSpec, schedule: Optional[Schedule]) -> None:
if level_spec.is_global():
schedule_cfg = schedule and schedule.to_json() or None
self.handler.module.set_module_option(
self.handler.MODULE_OPTION_NAME, schedule_cfg)
return
pool_id = level_spec.get_pool_id()
assert pool_id
with self.handler.module.rados.open_ioctx2(int(pool_id)) as ioctx:
with rados.WriteOpCtx() as write_op:
if schedule:
ioctx.set_omap(write_op, (level_spec.id, ),
(schedule.to_json(), ))
else:
ioctx.remove_omap_keys(write_op, (level_spec.id, ))
ioctx.operate_write_op(write_op, self.handler.SCHEDULE_OID)
def add(self,
level_spec: LevelSpec,
interval: str,
start_time: Optional[str]) -> None:
schedule = self.schedules.get(level_spec.id, Schedule(level_spec.name))
schedule.add(Interval.from_string(interval),
StartTime.from_string(start_time))
self.schedules[level_spec.id] = schedule
self.level_specs[level_spec.id] = level_spec
self.save(level_spec, schedule)
def remove(self,
level_spec: LevelSpec,
interval: Optional[str],
start_time: Optional[str]) -> None:
schedule = self.schedules.pop(level_spec.id, None)
if schedule:
if interval is None:
schedule = None
else:
try:
schedule.remove(Interval.from_string(interval),
StartTime.from_string(start_time))
finally:
if schedule:
self.schedules[level_spec.id] = schedule
if not schedule:
del self.level_specs[level_spec.id]
self.save(level_spec, schedule)
def find(self,
pool_id: str,
namespace: str,
image_id: Optional[str] = None) -> Optional['Schedule']:
levels = [pool_id, namespace]
if image_id:
levels.append(image_id)
nr_levels = len(levels)
while nr_levels >= 0:
# an empty spec id implies global schedule
level_spec_id = "/".join(levels[:nr_levels])
found = self.schedules.get(level_spec_id)
if found is not None:
return found
nr_levels -= 1
return None
def intersects(self, level_spec: LevelSpec) -> bool:
for ls in self.level_specs.values():
if ls.intersects(level_spec):
return True
return False
def to_list(self, level_spec: LevelSpec) -> Dict[str, dict]:
if level_spec.id in self.schedules:
parent: Optional[LevelSpec] = level_spec
else:
# try to find existing parent
parent = None
for level_spec_id in self.schedules:
ls = self.level_specs[level_spec_id]
if ls == level_spec:
parent = ls
break
if level_spec.is_child_of(ls) and \
(not parent or ls.is_child_of(parent)):
parent = ls
if not parent:
# set to non-existing parent so we still could list its children
parent = level_spec
result = {}
for level_spec_id, schedule in self.schedules.items():
ls = self.level_specs[level_spec_id]
if ls == parent or ls == level_spec or ls.is_child_of(level_spec):
result[level_spec_id] = {
'name': schedule.name,
'schedule': schedule.to_list(),
}
return result
| 22,757 | 38.237931 | 87 |
py
|
null |
ceph-main/src/pybind/mgr/rbd_support/task.py
|
import errno
import json
import rados
import rbd
import re
import traceback
import uuid
from contextlib import contextmanager
from datetime import datetime, timedelta
from functools import partial, wraps
from threading import Condition, Lock, Thread
from typing import cast, Any, Callable, Dict, Iterator, List, Optional, Tuple, TypeVar
from .common import (authorize_request, extract_pool_key, get_rbd_pools,
is_authorized, GLOBAL_POOL_KEY)
RBD_TASK_OID = "rbd_task"
TASK_SEQUENCE = "sequence"
TASK_ID = "id"
TASK_REFS = "refs"
TASK_MESSAGE = "message"
TASK_RETRY_ATTEMPTS = "retry_attempts"
TASK_RETRY_TIME = "retry_time"
TASK_RETRY_MESSAGE = "retry_message"
TASK_IN_PROGRESS = "in_progress"
TASK_PROGRESS = "progress"
TASK_CANCELED = "canceled"
TASK_REF_POOL_NAME = "pool_name"
TASK_REF_POOL_NAMESPACE = "pool_namespace"
TASK_REF_IMAGE_NAME = "image_name"
TASK_REF_IMAGE_ID = "image_id"
TASK_REF_ACTION = "action"
TASK_REF_ACTION_FLATTEN = "flatten"
TASK_REF_ACTION_REMOVE = "remove"
TASK_REF_ACTION_TRASH_REMOVE = "trash remove"
TASK_REF_ACTION_MIGRATION_EXECUTE = "migrate execute"
TASK_REF_ACTION_MIGRATION_COMMIT = "migrate commit"
TASK_REF_ACTION_MIGRATION_ABORT = "migrate abort"
VALID_TASK_ACTIONS = [TASK_REF_ACTION_FLATTEN,
TASK_REF_ACTION_REMOVE,
TASK_REF_ACTION_TRASH_REMOVE,
TASK_REF_ACTION_MIGRATION_EXECUTE,
TASK_REF_ACTION_MIGRATION_COMMIT,
TASK_REF_ACTION_MIGRATION_ABORT]
TASK_RETRY_INTERVAL = timedelta(seconds=30)
TASK_MAX_RETRY_INTERVAL = timedelta(seconds=300)
MAX_COMPLETED_TASKS = 50
T = TypeVar('T')
FuncT = TypeVar('FuncT', bound=Callable[..., Any])
class Throttle:
def __init__(self: Any, throttle_period: timedelta) -> None:
self.throttle_period = throttle_period
self.time_of_last_call = datetime.min
def __call__(self: 'Throttle', fn: FuncT) -> FuncT:
@wraps(fn)
def wrapper(*args: Any, **kwargs: Any) -> Any:
now = datetime.now()
if self.time_of_last_call + self.throttle_period <= now:
self.time_of_last_call = now
return fn(*args, **kwargs)
return cast(FuncT, wrapper)
TaskRefsT = Dict[str, str]
class Task:
def __init__(self, sequence: int, task_id: str, message: str, refs: TaskRefsT):
self.sequence = sequence
self.task_id = task_id
self.message = message
self.refs = refs
self.retry_message: Optional[str] = None
self.retry_attempts = 0
self.retry_time: Optional[datetime] = None
self.in_progress = False
self.progress = 0.0
self.canceled = False
self.failed = False
self.progress_posted = False
def __str__(self) -> str:
return self.to_json()
@property
def sequence_key(self) -> bytes:
return "{0:016X}".format(self.sequence).encode()
def cancel(self) -> None:
self.canceled = True
self.fail("Operation canceled")
def fail(self, message: str) -> None:
self.failed = True
self.failure_message = message
def to_dict(self) -> Dict[str, Any]:
d = {TASK_SEQUENCE: self.sequence,
TASK_ID: self.task_id,
TASK_MESSAGE: self.message,
TASK_REFS: self.refs
}
if self.retry_message:
d[TASK_RETRY_MESSAGE] = self.retry_message
if self.retry_attempts:
d[TASK_RETRY_ATTEMPTS] = self.retry_attempts
if self.retry_time:
d[TASK_RETRY_TIME] = self.retry_time.isoformat()
if self.in_progress:
d[TASK_IN_PROGRESS] = True
d[TASK_PROGRESS] = self.progress
if self.canceled:
d[TASK_CANCELED] = True
return d
def to_json(self) -> str:
return str(json.dumps(self.to_dict()))
@classmethod
def from_json(cls, val: str) -> 'Task':
try:
d = json.loads(val)
action = d.get(TASK_REFS, {}).get(TASK_REF_ACTION)
if action not in VALID_TASK_ACTIONS:
raise ValueError("Invalid task action: {}".format(action))
return Task(d[TASK_SEQUENCE], d[TASK_ID], d[TASK_MESSAGE], d[TASK_REFS])
except json.JSONDecodeError as e:
raise ValueError("Invalid JSON ({})".format(str(e)))
except KeyError as e:
raise ValueError("Invalid task format (missing key {})".format(str(e)))
# pool_name, namespace, image_name
ImageSpecT = Tuple[str, str, str]
# pool_name, namespace
PoolSpecT = Tuple[str, str]
MigrationStatusT = Dict[str, str]
class TaskHandler:
lock = Lock()
condition = Condition(lock)
in_progress_task = None
tasks_by_sequence: Dict[int, Task] = dict()
tasks_by_id: Dict[str, Task] = dict()
completed_tasks: List[Task] = []
sequence = 0
def __init__(self, module: Any) -> None:
self.module = module
self.log = module.log
self.stop_thread = False
self.thread = Thread(target=self.run)
def setup(self) -> None:
with self.lock:
self.init_task_queue()
self.thread.start()
@property
def default_pool_name(self) -> str:
return self.module.get_ceph_option("rbd_default_pool")
def extract_pool_spec(self, pool_spec: str) -> PoolSpecT:
pool_spec = extract_pool_key(pool_spec)
if pool_spec == GLOBAL_POOL_KEY:
pool_spec = (self.default_pool_name, '')
return cast(PoolSpecT, pool_spec)
def extract_image_spec(self, image_spec: str) -> ImageSpecT:
match = re.match(r'^(?:([^/]+)/(?:([^/]+)/)?)?([^/@]+)$',
image_spec or '')
if not match:
raise ValueError("Invalid image spec: {}".format(image_spec))
return (match.group(1) or self.default_pool_name, match.group(2) or '',
match.group(3))
def shutdown(self) -> None:
self.log.info("TaskHandler: shutting down")
self.stop_thread = True
if self.thread.is_alive():
self.log.debug("TaskHandler: joining thread")
self.thread.join()
self.log.info("TaskHandler: shut down")
def run(self) -> None:
try:
self.log.info("TaskHandler: starting")
while not self.stop_thread:
with self.lock:
now = datetime.now()
for sequence in sorted([sequence for sequence, task
in self.tasks_by_sequence.items()
if not task.retry_time or task.retry_time <= now]):
self.execute_task(sequence)
self.condition.wait(5)
self.log.debug("TaskHandler: tick")
except (rados.ConnectionShutdown, rbd.ConnectionShutdown):
self.log.exception("TaskHandler: client blocklisted")
self.module.client_blocklisted.set()
except Exception as ex:
self.log.fatal("Fatal runtime error: {}\n{}".format(
ex, traceback.format_exc()))
@contextmanager
def open_ioctx(self, spec: PoolSpecT) -> Iterator[rados.Ioctx]:
try:
with self.module.rados.open_ioctx(spec[0]) as ioctx:
ioctx.set_namespace(spec[1])
yield ioctx
except rados.ObjectNotFound:
self.log.error("Failed to locate pool {}".format(spec[0]))
raise
@classmethod
def format_image_spec(cls, image_spec: ImageSpecT) -> str:
image = image_spec[2]
if image_spec[1]:
image = "{}/{}".format(image_spec[1], image)
if image_spec[0]:
image = "{}/{}".format(image_spec[0], image)
return image
def init_task_queue(self) -> None:
for pool_id, pool_name in get_rbd_pools(self.module).items():
try:
with self.module.rados.open_ioctx2(int(pool_id)) as ioctx:
self.load_task_queue(ioctx, pool_name)
try:
namespaces = rbd.RBD().namespace_list(ioctx)
except rbd.OperationNotSupported:
self.log.debug("Namespaces not supported")
continue
for namespace in namespaces:
ioctx.set_namespace(namespace)
self.load_task_queue(ioctx, pool_name)
except rados.ObjectNotFound:
# pool DNE
pass
if self.tasks_by_sequence:
self.sequence = list(sorted(self.tasks_by_sequence.keys()))[-1]
self.log.debug("sequence={}, tasks_by_sequence={}, tasks_by_id={}".format(
self.sequence, str(self.tasks_by_sequence), str(self.tasks_by_id)))
def load_task_queue(self, ioctx: rados.Ioctx, pool_name: str) -> None:
pool_spec = pool_name
if ioctx.nspace:
pool_spec += "/{}".format(ioctx.nspace)
start_after = ''
try:
while True:
with rados.ReadOpCtx() as read_op:
self.log.info("load_task_task: {}, start_after={}".format(
pool_spec, start_after))
it, ret = ioctx.get_omap_vals(read_op, start_after, "", 128)
ioctx.operate_read_op(read_op, RBD_TASK_OID)
it = list(it)
for k, v in it:
start_after = k
v = v.decode()
self.log.info("load_task_task: task={}".format(v))
try:
task = Task.from_json(v)
self.append_task(task)
except ValueError:
self.log.error("Failed to decode task: pool_spec={}, task={}".format(pool_spec, v))
if not it:
break
except StopIteration:
pass
except rados.ObjectNotFound:
# rbd_task DNE
pass
def append_task(self, task: Task) -> None:
self.tasks_by_sequence[task.sequence] = task
self.tasks_by_id[task.task_id] = task
def task_refs_match(self, task_refs: TaskRefsT, refs: TaskRefsT) -> bool:
if TASK_REF_IMAGE_ID not in refs and TASK_REF_IMAGE_ID in task_refs:
task_refs = task_refs.copy()
del task_refs[TASK_REF_IMAGE_ID]
self.log.debug("task_refs_match: ref1={}, ref2={}".format(task_refs, refs))
return task_refs == refs
def find_task(self, refs: TaskRefsT) -> Optional[Task]:
self.log.debug("find_task: refs={}".format(refs))
# search for dups and return the original
for task_id in reversed(sorted(self.tasks_by_id.keys())):
task = self.tasks_by_id[task_id]
if self.task_refs_match(task.refs, refs):
return task
# search for a completed task (message replay)
for task in reversed(self.completed_tasks):
if self.task_refs_match(task.refs, refs):
return task
else:
return None
def add_task(self,
ioctx: rados.Ioctx,
message: str,
refs: TaskRefsT) -> str:
self.log.debug("add_task: message={}, refs={}".format(message, refs))
# ensure unique uuid across all pools
while True:
task_id = str(uuid.uuid4())
if task_id not in self.tasks_by_id:
break
self.sequence += 1
task = Task(self.sequence, task_id, message, refs)
# add the task to the rbd_task omap
task_json = task.to_json()
omap_keys = (task.sequence_key, )
omap_vals = (str.encode(task_json), )
self.log.info("adding task: %s %s",
omap_keys[0].decode(),
omap_vals[0].decode())
with rados.WriteOpCtx() as write_op:
ioctx.set_omap(write_op, omap_keys, omap_vals)
ioctx.operate_write_op(write_op, RBD_TASK_OID)
self.append_task(task)
self.condition.notify()
return task_json
def remove_task(self,
ioctx: Optional[rados.Ioctx],
task: Task,
remove_in_memory: bool = True) -> None:
self.log.info("remove_task: task={}".format(str(task)))
if ioctx:
try:
with rados.WriteOpCtx() as write_op:
omap_keys = (task.sequence_key, )
ioctx.remove_omap_keys(write_op, omap_keys)
ioctx.operate_write_op(write_op, RBD_TASK_OID)
except rados.ObjectNotFound:
pass
if remove_in_memory:
try:
del self.tasks_by_id[task.task_id]
del self.tasks_by_sequence[task.sequence]
# keep a record of the last N tasks to help avoid command replay
# races
if not task.failed and not task.canceled:
self.log.debug("remove_task: moving to completed tasks")
self.completed_tasks.append(task)
self.completed_tasks = self.completed_tasks[-MAX_COMPLETED_TASKS:]
except KeyError:
pass
def execute_task(self, sequence: int) -> None:
task = self.tasks_by_sequence[sequence]
self.log.info("execute_task: task={}".format(str(task)))
pool_valid = False
try:
with self.open_ioctx((task.refs[TASK_REF_POOL_NAME],
task.refs[TASK_REF_POOL_NAMESPACE])) as ioctx:
pool_valid = True
action = task.refs[TASK_REF_ACTION]
execute_fn = {TASK_REF_ACTION_FLATTEN: self.execute_flatten,
TASK_REF_ACTION_REMOVE: self.execute_remove,
TASK_REF_ACTION_TRASH_REMOVE: self.execute_trash_remove,
TASK_REF_ACTION_MIGRATION_EXECUTE: self.execute_migration_execute,
TASK_REF_ACTION_MIGRATION_COMMIT: self.execute_migration_commit,
TASK_REF_ACTION_MIGRATION_ABORT: self.execute_migration_abort
}.get(action)
if not execute_fn:
self.log.error("Invalid task action: {}".format(action))
else:
task.in_progress = True
self.in_progress_task = task
self.lock.release()
try:
execute_fn(ioctx, task)
except rbd.OperationCanceled:
self.log.info("Operation canceled: task={}".format(
str(task)))
finally:
self.lock.acquire()
task.in_progress = False
self.in_progress_task = None
self.complete_progress(task)
self.remove_task(ioctx, task)
except rados.ObjectNotFound as e:
self.log.error("execute_task: {}".format(e))
if pool_valid:
task.retry_message = "{}".format(e)
self.update_progress(task, 0)
else:
# pool DNE -- remove in-memory task
self.complete_progress(task)
self.remove_task(None, task)
except (rados.ConnectionShutdown, rbd.ConnectionShutdown):
raise
except (rados.Error, rbd.Error) as e:
self.log.error("execute_task: {}".format(e))
task.retry_message = "{}".format(e)
self.update_progress(task, 0)
finally:
task.in_progress = False
task.retry_attempts += 1
task.retry_time = datetime.now() + min(
TASK_RETRY_INTERVAL * task.retry_attempts,
TASK_MAX_RETRY_INTERVAL)
def progress_callback(self, task: Task, current: int, total: int) -> int:
progress = float(current) / float(total)
self.log.debug("progress_callback: task={}, progress={}".format(
str(task), progress))
# avoid deadlocking when a new command comes in during a progress callback
if not self.lock.acquire(False):
return 0
try:
if not self.in_progress_task or self.in_progress_task.canceled:
return -rbd.ECANCELED
self.in_progress_task.progress = progress
finally:
self.lock.release()
if not task.progress_posted:
# delayed creation of progress event until first callback
self.post_progress(task, progress)
else:
self.throttled_update_progress(task, progress)
return 0
def execute_flatten(self, ioctx: rados.Ioctx, task: Task) -> None:
self.log.info("execute_flatten: task={}".format(str(task)))
try:
with rbd.Image(ioctx, task.refs[TASK_REF_IMAGE_NAME]) as image:
image.flatten(on_progress=partial(self.progress_callback, task))
except rbd.InvalidArgument:
task.fail("Image does not have parent")
self.log.info("{}: task={}".format(task.failure_message, str(task)))
except rbd.ImageNotFound:
task.fail("Image does not exist")
self.log.info("{}: task={}".format(task.failure_message, str(task)))
def execute_remove(self, ioctx: rados.Ioctx, task: Task) -> None:
self.log.info("execute_remove: task={}".format(str(task)))
try:
rbd.RBD().remove(ioctx, task.refs[TASK_REF_IMAGE_NAME],
on_progress=partial(self.progress_callback, task))
except rbd.ImageNotFound:
task.fail("Image does not exist")
self.log.info("{}: task={}".format(task.failure_message, str(task)))
def execute_trash_remove(self, ioctx: rados.Ioctx, task: Task) -> None:
self.log.info("execute_trash_remove: task={}".format(str(task)))
try:
rbd.RBD().trash_remove(ioctx, task.refs[TASK_REF_IMAGE_ID],
on_progress=partial(self.progress_callback, task))
except rbd.ImageNotFound:
task.fail("Image does not exist")
self.log.info("{}: task={}".format(task.failure_message, str(task)))
def execute_migration_execute(self, ioctx: rados.Ioctx, task: Task) -> None:
self.log.info("execute_migration_execute: task={}".format(str(task)))
try:
rbd.RBD().migration_execute(ioctx, task.refs[TASK_REF_IMAGE_NAME],
on_progress=partial(self.progress_callback, task))
except rbd.ImageNotFound:
task.fail("Image does not exist")
self.log.info("{}: task={}".format(task.failure_message, str(task)))
except rbd.InvalidArgument:
task.fail("Image is not migrating")
self.log.info("{}: task={}".format(task.failure_message, str(task)))
def execute_migration_commit(self, ioctx: rados.Ioctx, task: Task) -> None:
self.log.info("execute_migration_commit: task={}".format(str(task)))
try:
rbd.RBD().migration_commit(ioctx, task.refs[TASK_REF_IMAGE_NAME],
on_progress=partial(self.progress_callback, task))
except rbd.ImageNotFound:
task.fail("Image does not exist")
self.log.info("{}: task={}".format(task.failure_message, str(task)))
except rbd.InvalidArgument:
task.fail("Image is not migrating or migration not executed")
self.log.info("{}: task={}".format(task.failure_message, str(task)))
def execute_migration_abort(self, ioctx: rados.Ioctx, task: Task) -> None:
self.log.info("execute_migration_abort: task={}".format(str(task)))
try:
rbd.RBD().migration_abort(ioctx, task.refs[TASK_REF_IMAGE_NAME],
on_progress=partial(self.progress_callback, task))
except rbd.ImageNotFound:
task.fail("Image does not exist")
self.log.info("{}: task={}".format(task.failure_message, str(task)))
except rbd.InvalidArgument:
task.fail("Image is not migrating")
self.log.info("{}: task={}".format(task.failure_message, str(task)))
def complete_progress(self, task: Task) -> None:
if not task.progress_posted:
# ensure progress event exists before we complete/fail it
self.post_progress(task, 0)
self.log.debug("complete_progress: task={}".format(str(task)))
try:
if task.failed:
self.module.remote("progress", "fail", task.task_id,
task.failure_message)
else:
self.module.remote("progress", "complete", task.task_id)
except ImportError:
# progress module is disabled
pass
def _update_progress(self, task: Task, progress: float) -> None:
self.log.debug("update_progress: task={}, progress={}".format(str(task), progress))
try:
refs = {"origin": "rbd_support"}
refs.update(task.refs)
self.module.remote("progress", "update", task.task_id,
task.message, progress, refs)
except ImportError:
# progress module is disabled
pass
def post_progress(self, task: Task, progress: float) -> None:
self._update_progress(task, progress)
task.progress_posted = True
def update_progress(self, task: Task, progress: float) -> None:
if task.progress_posted:
self._update_progress(task, progress)
@Throttle(timedelta(seconds=1))
def throttled_update_progress(self, task: Task, progress: float) -> None:
self.update_progress(task, progress)
def queue_flatten(self, image_spec: str) -> Tuple[int, str, str]:
image_spec = self.extract_image_spec(image_spec)
authorize_request(self.module, image_spec[0], image_spec[1])
self.log.info("queue_flatten: {}".format(image_spec))
refs = {TASK_REF_ACTION: TASK_REF_ACTION_FLATTEN,
TASK_REF_POOL_NAME: image_spec[0],
TASK_REF_POOL_NAMESPACE: image_spec[1],
TASK_REF_IMAGE_NAME: image_spec[2]}
with self.open_ioctx(image_spec[:2]) as ioctx:
try:
with rbd.Image(ioctx, image_spec[2]) as image:
refs[TASK_REF_IMAGE_ID] = image.id()
try:
parent_image_id = image.parent_id()
except rbd.ImageNotFound:
parent_image_id = None
except rbd.ImageNotFound:
pass
task = self.find_task(refs)
if task:
return 0, task.to_json(), ''
if TASK_REF_IMAGE_ID not in refs:
raise rbd.ImageNotFound("Image {} does not exist".format(
self.format_image_spec(image_spec)), errno=errno.ENOENT)
if not parent_image_id:
raise rbd.ImageNotFound("Image {} does not have a parent".format(
self.format_image_spec(image_spec)), errno=errno.ENOENT)
return 0, self.add_task(ioctx,
"Flattening image {}".format(
self.format_image_spec(image_spec)),
refs), ""
def queue_remove(self, image_spec: str) -> Tuple[int, str, str]:
image_spec = self.extract_image_spec(image_spec)
authorize_request(self.module, image_spec[0], image_spec[1])
self.log.info("queue_remove: {}".format(image_spec))
refs = {TASK_REF_ACTION: TASK_REF_ACTION_REMOVE,
TASK_REF_POOL_NAME: image_spec[0],
TASK_REF_POOL_NAMESPACE: image_spec[1],
TASK_REF_IMAGE_NAME: image_spec[2]}
with self.open_ioctx(image_spec[:2]) as ioctx:
try:
with rbd.Image(ioctx, image_spec[2]) as image:
refs[TASK_REF_IMAGE_ID] = image.id()
snaps = list(image.list_snaps())
except rbd.ImageNotFound:
pass
task = self.find_task(refs)
if task:
return 0, task.to_json(), ''
if TASK_REF_IMAGE_ID not in refs:
raise rbd.ImageNotFound("Image {} does not exist".format(
self.format_image_spec(image_spec)), errno=errno.ENOENT)
if snaps:
raise rbd.ImageBusy("Image {} has snapshots".format(
self.format_image_spec(image_spec)), errno=errno.EBUSY)
return 0, self.add_task(ioctx,
"Removing image {}".format(
self.format_image_spec(image_spec)),
refs), ''
def queue_trash_remove(self, image_id_spec: str) -> Tuple[int, str, str]:
image_id_spec = self.extract_image_spec(image_id_spec)
authorize_request(self.module, image_id_spec[0], image_id_spec[1])
self.log.info("queue_trash_remove: {}".format(image_id_spec))
refs = {TASK_REF_ACTION: TASK_REF_ACTION_TRASH_REMOVE,
TASK_REF_POOL_NAME: image_id_spec[0],
TASK_REF_POOL_NAMESPACE: image_id_spec[1],
TASK_REF_IMAGE_ID: image_id_spec[2]}
task = self.find_task(refs)
if task:
return 0, task.to_json(), ''
# verify that image exists in trash
with self.open_ioctx(image_id_spec[:2]) as ioctx:
rbd.RBD().trash_get(ioctx, image_id_spec[2])
return 0, self.add_task(ioctx,
"Removing image {} from trash".format(
self.format_image_spec(image_id_spec)),
refs), ''
def get_migration_status(self,
ioctx: rados.Ioctx,
image_spec: ImageSpecT) -> Optional[MigrationStatusT]:
try:
return rbd.RBD().migration_status(ioctx, image_spec[2])
except (rbd.InvalidArgument, rbd.ImageNotFound):
return None
def validate_image_migrating(self,
image_spec: ImageSpecT,
migration_status: Optional[MigrationStatusT]) -> None:
if not migration_status:
raise rbd.InvalidArgument("Image {} is not migrating".format(
self.format_image_spec(image_spec)), errno=errno.EINVAL)
def resolve_pool_name(self, pool_id: str) -> str:
osd_map = self.module.get('osd_map')
for pool in osd_map['pools']:
if pool['pool'] == pool_id:
return pool['pool_name']
return '<unknown>'
def queue_migration_execute(self, image_spec: str) -> Tuple[int, str, str]:
image_spec = self.extract_image_spec(image_spec)
authorize_request(self.module, image_spec[0], image_spec[1])
self.log.info("queue_migration_execute: {}".format(image_spec))
refs = {TASK_REF_ACTION: TASK_REF_ACTION_MIGRATION_EXECUTE,
TASK_REF_POOL_NAME: image_spec[0],
TASK_REF_POOL_NAMESPACE: image_spec[1],
TASK_REF_IMAGE_NAME: image_spec[2]}
with self.open_ioctx(image_spec[:2]) as ioctx:
status = self.get_migration_status(ioctx, image_spec)
if status:
refs[TASK_REF_IMAGE_ID] = status['dest_image_id']
task = self.find_task(refs)
if task:
return 0, task.to_json(), ''
self.validate_image_migrating(image_spec, status)
assert status
if status['state'] not in [rbd.RBD_IMAGE_MIGRATION_STATE_PREPARED,
rbd.RBD_IMAGE_MIGRATION_STATE_EXECUTING]:
raise rbd.InvalidArgument("Image {} is not in ready state".format(
self.format_image_spec(image_spec)), errno=errno.EINVAL)
source_pool = self.resolve_pool_name(status['source_pool_id'])
dest_pool = self.resolve_pool_name(status['dest_pool_id'])
return 0, self.add_task(ioctx,
"Migrating image {} to {}".format(
self.format_image_spec((source_pool,
status['source_pool_namespace'],
status['source_image_name'])),
self.format_image_spec((dest_pool,
status['dest_pool_namespace'],
status['dest_image_name']))),
refs), ''
def queue_migration_commit(self, image_spec: str) -> Tuple[int, str, str]:
image_spec = self.extract_image_spec(image_spec)
authorize_request(self.module, image_spec[0], image_spec[1])
self.log.info("queue_migration_commit: {}".format(image_spec))
refs = {TASK_REF_ACTION: TASK_REF_ACTION_MIGRATION_COMMIT,
TASK_REF_POOL_NAME: image_spec[0],
TASK_REF_POOL_NAMESPACE: image_spec[1],
TASK_REF_IMAGE_NAME: image_spec[2]}
with self.open_ioctx(image_spec[:2]) as ioctx:
status = self.get_migration_status(ioctx, image_spec)
if status:
refs[TASK_REF_IMAGE_ID] = status['dest_image_id']
task = self.find_task(refs)
if task:
return 0, task.to_json(), ''
self.validate_image_migrating(image_spec, status)
assert status
if status['state'] != rbd.RBD_IMAGE_MIGRATION_STATE_EXECUTED:
raise rbd.InvalidArgument("Image {} has not completed migration".format(
self.format_image_spec(image_spec)), errno=errno.EINVAL)
return 0, self.add_task(ioctx,
"Committing image migration for {}".format(
self.format_image_spec(image_spec)),
refs), ''
def queue_migration_abort(self, image_spec: str) -> Tuple[int, str, str]:
image_spec = self.extract_image_spec(image_spec)
authorize_request(self.module, image_spec[0], image_spec[1])
self.log.info("queue_migration_abort: {}".format(image_spec))
refs = {TASK_REF_ACTION: TASK_REF_ACTION_MIGRATION_ABORT,
TASK_REF_POOL_NAME: image_spec[0],
TASK_REF_POOL_NAMESPACE: image_spec[1],
TASK_REF_IMAGE_NAME: image_spec[2]}
with self.open_ioctx(image_spec[:2]) as ioctx:
status = self.get_migration_status(ioctx, image_spec)
if status:
refs[TASK_REF_IMAGE_ID] = status['dest_image_id']
task = self.find_task(refs)
if task:
return 0, task.to_json(), ''
self.validate_image_migrating(image_spec, status)
return 0, self.add_task(ioctx,
"Aborting image migration for {}".format(
self.format_image_spec(image_spec)),
refs), ''
def task_cancel(self, task_id: str) -> Tuple[int, str, str]:
self.log.info("task_cancel: {}".format(task_id))
task = self.tasks_by_id.get(task_id)
if not task or not is_authorized(self.module,
task.refs[TASK_REF_POOL_NAME],
task.refs[TASK_REF_POOL_NAMESPACE]):
return -errno.ENOENT, '', "No such task {}".format(task_id)
task.cancel()
remove_in_memory = True
if self.in_progress_task and self.in_progress_task.task_id == task_id:
self.log.info("Attempting to cancel in-progress task: {}".format(str(self.in_progress_task)))
remove_in_memory = False
# complete any associated event in the progress module
self.complete_progress(task)
# remove from rbd_task omap
with self.open_ioctx((task.refs[TASK_REF_POOL_NAME],
task.refs[TASK_REF_POOL_NAMESPACE])) as ioctx:
self.remove_task(ioctx, task, remove_in_memory)
return 0, "", ""
def task_list(self, task_id: Optional[str]) -> Tuple[int, str, str]:
self.log.info("task_list: {}".format(task_id))
if task_id:
task = self.tasks_by_id.get(task_id)
if not task or not is_authorized(self.module,
task.refs[TASK_REF_POOL_NAME],
task.refs[TASK_REF_POOL_NAMESPACE]):
return -errno.ENOENT, '', "No such task {}".format(task_id)
return 0, json.dumps(task.to_dict(), indent=4, sort_keys=True), ""
else:
tasks = []
for sequence in sorted(self.tasks_by_sequence.keys()):
task = self.tasks_by_sequence[sequence]
if is_authorized(self.module,
task.refs[TASK_REF_POOL_NAME],
task.refs[TASK_REF_POOL_NAMESPACE]):
tasks.append(task.to_dict())
return 0, json.dumps(tasks, indent=4, sort_keys=True), ""
| 33,972 | 38.595571 | 111 |
py
|
null |
ceph-main/src/pybind/mgr/rbd_support/trash_purge_schedule.py
|
import json
import rados
import rbd
import traceback
from datetime import datetime
from threading import Condition, Lock, Thread
from typing import Any, Dict, List, Optional, Tuple
from .common import get_rbd_pools
from .schedule import LevelSpec, Schedules
class TrashPurgeScheduleHandler:
MODULE_OPTION_NAME = "trash_purge_schedule"
SCHEDULE_OID = "rbd_trash_purge_schedule"
REFRESH_DELAY_SECONDS = 60.0
lock = Lock()
condition = Condition(lock)
def __init__(self, module: Any) -> None:
self.module = module
self.log = module.log
self.last_refresh_pools = datetime(1970, 1, 1)
self.stop_thread = False
self.thread = Thread(target=self.run)
def setup(self) -> None:
self.init_schedule_queue()
self.thread.start()
def shutdown(self) -> None:
self.log.info("TrashPurgeScheduleHandler: shutting down")
self.stop_thread = True
if self.thread.is_alive():
self.log.debug("TrashPurgeScheduleHandler: joining thread")
self.thread.join()
self.log.info("TrashPurgeScheduleHandler: shut down")
def run(self) -> None:
try:
self.log.info("TrashPurgeScheduleHandler: starting")
while not self.stop_thread:
refresh_delay = self.refresh_pools()
with self.lock:
(ns_spec, wait_time) = self.dequeue()
if not ns_spec:
self.condition.wait(min(wait_time, refresh_delay))
continue
pool_id, namespace = ns_spec
self.trash_purge(pool_id, namespace)
with self.lock:
self.enqueue(datetime.now(), pool_id, namespace)
except (rados.ConnectionShutdown, rbd.ConnectionShutdown):
self.log.exception("TrashPurgeScheduleHandler: client blocklisted")
self.module.client_blocklisted.set()
except Exception as ex:
self.log.fatal("Fatal runtime error: {}\n{}".format(
ex, traceback.format_exc()))
def trash_purge(self, pool_id: str, namespace: str) -> None:
try:
with self.module.rados.open_ioctx2(int(pool_id)) as ioctx:
ioctx.set_namespace(namespace)
rbd.RBD().trash_purge(ioctx, datetime.now())
except (rados.ConnectionShutdown, rbd.ConnectionShutdown):
raise
except Exception as e:
self.log.error("exception when purging {}/{}: {}".format(
pool_id, namespace, e))
def init_schedule_queue(self) -> None:
self.queue: Dict[str, List[Tuple[str, str]]] = {}
# pool_id => {namespace => pool_name}
self.pools: Dict[str, Dict[str, str]] = {}
self.schedules = Schedules(self)
self.refresh_pools()
self.log.debug("TrashPurgeScheduleHandler: queue is initialized")
def load_schedules(self) -> None:
self.log.info("TrashPurgeScheduleHandler: load_schedules")
self.schedules.load()
def refresh_pools(self) -> float:
elapsed = (datetime.now() - self.last_refresh_pools).total_seconds()
if elapsed < self.REFRESH_DELAY_SECONDS:
return self.REFRESH_DELAY_SECONDS - elapsed
self.log.debug("TrashPurgeScheduleHandler: refresh_pools")
with self.lock:
self.load_schedules()
if not self.schedules:
self.log.debug("TrashPurgeScheduleHandler: no schedules")
self.pools = {}
self.queue = {}
self.last_refresh_pools = datetime.now()
return self.REFRESH_DELAY_SECONDS
pools: Dict[str, Dict[str, str]] = {}
for pool_id, pool_name in get_rbd_pools(self.module).items():
if not self.schedules.intersects(
LevelSpec.from_pool_spec(pool_id, pool_name)):
continue
with self.module.rados.open_ioctx2(int(pool_id)) as ioctx:
self.load_pool(ioctx, pools)
with self.lock:
self.refresh_queue(pools)
self.pools = pools
self.last_refresh_pools = datetime.now()
return self.REFRESH_DELAY_SECONDS
def load_pool(self, ioctx: rados.Ioctx, pools: Dict[str, Dict[str, str]]) -> None:
pool_id = str(ioctx.get_pool_id())
pool_name = ioctx.get_pool_name()
pools[pool_id] = {}
pool_namespaces = ['']
self.log.debug("load_pool: {}".format(pool_name))
try:
pool_namespaces += rbd.RBD().namespace_list(ioctx)
except rbd.OperationNotSupported:
self.log.debug("namespaces not supported")
except rbd.ConnectionShutdown:
raise
except Exception as e:
self.log.error("exception when scanning pool {}: {}".format(
pool_name, e))
for namespace in pool_namespaces:
pools[pool_id][namespace] = pool_name
def rebuild_queue(self) -> None:
now = datetime.now()
# don't remove from queue "due" images
now_string = datetime.strftime(now, "%Y-%m-%d %H:%M:00")
for schedule_time in list(self.queue):
if schedule_time > now_string:
del self.queue[schedule_time]
if not self.schedules:
return
for pool_id, namespaces in self.pools.items():
for namespace in namespaces:
self.enqueue(now, pool_id, namespace)
self.condition.notify()
def refresh_queue(self, current_pools: Dict[str, Dict[str, str]]) -> None:
now = datetime.now()
for pool_id, namespaces in self.pools.items():
for namespace in namespaces:
if pool_id not in current_pools or \
namespace not in current_pools[pool_id]:
self.remove_from_queue(pool_id, namespace)
for pool_id, namespaces in current_pools.items():
for namespace in namespaces:
if pool_id not in self.pools or \
namespace not in self.pools[pool_id]:
self.enqueue(now, pool_id, namespace)
self.condition.notify()
def enqueue(self, now: datetime, pool_id: str, namespace: str) -> None:
schedule = self.schedules.find(pool_id, namespace)
if not schedule:
self.log.debug(
"TrashPurgeScheduleHandler: no schedule for {}/{}".format(
pool_id, namespace))
return
schedule_time = schedule.next_run(now)
if schedule_time not in self.queue:
self.queue[schedule_time] = []
self.log.debug(
"TrashPurgeScheduleHandler: scheduling {}/{} at {}".format(
pool_id, namespace, schedule_time))
ns_spec = (pool_id, namespace)
if ns_spec not in self.queue[schedule_time]:
self.queue[schedule_time].append((pool_id, namespace))
def dequeue(self) -> Tuple[Optional[Tuple[str, str]], float]:
if not self.queue:
return None, 1000.0
now = datetime.now()
schedule_time = sorted(self.queue)[0]
if datetime.strftime(now, "%Y-%m-%d %H:%M:%S") < schedule_time:
wait_time = (datetime.strptime(schedule_time,
"%Y-%m-%d %H:%M:%S") - now)
return None, wait_time.total_seconds()
namespaces = self.queue[schedule_time]
namespace = namespaces.pop(0)
if not namespaces:
del self.queue[schedule_time]
return namespace, 0.0
def remove_from_queue(self, pool_id: str, namespace: str) -> None:
self.log.debug(
"TrashPurgeScheduleHandler: descheduling {}/{}".format(
pool_id, namespace))
empty_slots = []
for schedule_time, namespaces in self.queue.items():
if (pool_id, namespace) in namespaces:
namespaces.remove((pool_id, namespace))
if not namespaces:
empty_slots.append(schedule_time)
for schedule_time in empty_slots:
del self.queue[schedule_time]
def add_schedule(self,
level_spec: LevelSpec,
interval: str,
start_time: Optional[str]) -> Tuple[int, str, str]:
self.log.debug(
"TrashPurgeScheduleHandler: add_schedule: level_spec={}, interval={}, start_time={}".format(
level_spec.name, interval, start_time))
# TODO: optimize to rebuild only affected part of the queue
with self.lock:
self.schedules.add(level_spec, interval, start_time)
self.rebuild_queue()
return 0, "", ""
def remove_schedule(self,
level_spec: LevelSpec,
interval: Optional[str],
start_time: Optional[str]) -> Tuple[int, str, str]:
self.log.debug(
"TrashPurgeScheduleHandler: remove_schedule: level_spec={}, interval={}, start_time={}".format(
level_spec.name, interval, start_time))
# TODO: optimize to rebuild only affected part of the queue
with self.lock:
self.schedules.remove(level_spec, interval, start_time)
self.rebuild_queue()
return 0, "", ""
def list(self, level_spec: LevelSpec) -> Tuple[int, str, str]:
self.log.debug(
"TrashPurgeScheduleHandler: list: level_spec={}".format(
level_spec.name))
with self.lock:
result = self.schedules.to_list(level_spec)
return 0, json.dumps(result, indent=4, sort_keys=True), ""
def status(self, level_spec: LevelSpec) -> Tuple[int, str, str]:
self.log.debug(
"TrashPurgeScheduleHandler: status: level_spec={}".format(
level_spec.name))
scheduled = []
with self.lock:
for schedule_time in sorted(self.queue):
for pool_id, namespace in self.queue[schedule_time]:
if not level_spec.matches(pool_id, namespace):
continue
pool_name = self.pools[pool_id][namespace]
scheduled.append({
'schedule_time': schedule_time,
'pool_id': pool_id,
'pool_name': pool_name,
'namespace': namespace
})
return 0, json.dumps({'scheduled': scheduled}, indent=4,
sort_keys=True), ""
| 10,652 | 36.510563 | 107 |
py
|
null |
ceph-main/src/pybind/mgr/restful/__init__.py
|
from .module import Module
| 27 | 13 | 26 |
py
|
null |
ceph-main/src/pybind/mgr/restful/common.py
|
# List of valid osd flags
OSD_FLAGS = [
'pause', 'noup', 'nodown', 'noout', 'noin', 'nobackfill',
'norecover', 'noscrub', 'nodeep-scrub',
]
# Implemented osd commands
OSD_IMPLEMENTED_COMMANDS = [
'scrub', 'deep-scrub', 'repair'
]
# Valid values for the 'var' argument to 'ceph osd pool set'
POOL_PROPERTIES_1 = [
'size', 'min_size', 'pg_num',
'crush_rule', 'hashpspool',
]
POOL_PROPERTIES_2 = [
'pgp_num'
]
POOL_PROPERTIES = POOL_PROPERTIES_1 + POOL_PROPERTIES_2
# Valid values for the 'ceph osd pool set-quota' command
POOL_QUOTA_PROPERTIES = [
('quota_max_bytes', 'max_bytes'),
('quota_max_objects', 'max_objects'),
]
POOL_ARGS = POOL_PROPERTIES + [x for x,_ in POOL_QUOTA_PROPERTIES]
# Transform command to a human readable form
def humanify_command(command):
out = [command['prefix']]
for arg, val in command.items():
if arg != 'prefix':
out.append("%s=%s" % (str(arg), str(val)))
return " ".join(out)
def invalid_pool_args(args):
invalid = []
for arg in args:
if arg not in POOL_ARGS:
invalid.append(arg)
return invalid
def pool_update_commands(pool_name, args):
commands = [[], []]
# We should increase pgp_num when we are re-setting pg_num
if 'pg_num' in args and 'pgp_num' not in args:
args['pgp_num'] = args['pg_num']
# Run the first pool set and quota properties in parallel
for var in POOL_PROPERTIES_1:
if var in args:
commands[0].append({
'prefix': 'osd pool set',
'pool': pool_name,
'var': var,
'val': args[var],
})
for (var, field) in POOL_QUOTA_PROPERTIES:
if var in args:
commands[0].append({
'prefix': 'osd pool set-quota',
'pool': pool_name,
'field': field,
'val': str(args[var]),
})
# The second pool set properties need to be run after the first wave
for var in POOL_PROPERTIES_2:
if var in args:
commands[1].append({
'prefix': 'osd pool set',
'pool': pool_name,
'var': var,
'val': args[var],
})
return commands
def crush_rule_osds(node_buckets, rule):
nodes_by_id = dict((b['id'], b) for b in node_buckets)
def _gather_leaf_ids(node_id):
if node_id >= 0:
return set([node_id])
result = set()
for item in nodes_by_id[node_id]['items']:
result |= _gather_leaf_ids(item['id'])
return result
def _gather_descendent_ids(node, typ):
result = set()
for item in node['items']:
if item['id'] >= 0:
if typ == "osd":
result.add(item['id'])
else:
child_node = nodes_by_id[item['id']]
if child_node['type_name'] == typ:
result.add(child_node['id'])
elif 'items' in child_node:
result |= _gather_descendent_ids(child_node, typ)
return result
def _gather_osds(root, steps):
if root['id'] >= 0:
return set([root['id']])
osds = set()
step = steps[0]
if step['op'] == 'choose_firstn':
# Choose all descendents of the current node of type 'type'
descendent_ids = _gather_descendent_ids(root, step['type'])
for node_id in descendent_ids:
if node_id >= 0:
osds.add(node_id)
else:
osds |= _gather_osds(nodes_by_id[node_id], steps[1:])
elif step['op'] == 'chooseleaf_firstn':
# Choose all descendents of the current node of type 'type',
# and select all leaves beneath those
descendent_ids = _gather_descendent_ids(root, step['type'])
for node_id in descendent_ids:
if node_id >= 0:
osds.add(node_id)
else:
for desc_node in nodes_by_id[node_id]['items']:
# Short circuit another iteration to find the emit
# and assume anything we've done a chooseleaf on
# is going to be part of the selected set of osds
osds |= _gather_leaf_ids(desc_node['id'])
elif step['op'] == 'emit':
if root['id'] >= 0:
osds |= root['id']
return osds
osds = set()
for i, step in enumerate(rule['steps']):
if step['op'] == 'take':
osds |= _gather_osds(nodes_by_id[step['item']], rule['steps'][i + 1:])
return osds
| 4,740 | 29.197452 | 82 |
py
|
null |
ceph-main/src/pybind/mgr/restful/context.py
|
# Global instance to share
instance = None
| 43 | 13.666667 | 26 |
py
|
null |
ceph-main/src/pybind/mgr/restful/decorators.py
|
from pecan import request, response
from base64 import b64decode
from functools import wraps
import traceback
from . import context
# Handle authorization
def auth(f):
@wraps(f)
def decorated(*args, **kwargs):
if not context.instance.enable_auth:
return f(*args, **kwargs)
if not request.authorization:
response.status = 401
response.headers['WWW-Authenticate'] = 'Basic realm="Login Required"'
return {'message': 'auth: No HTTP username/password'}
username, password = b64decode(request.authorization[1]).decode('utf-8').split(':')
# Check that the username exists
if username not in context.instance.keys:
response.status = 401
response.headers['WWW-Authenticate'] = 'Basic realm="Login Required"'
return {'message': 'auth: No such user'}
# Check the password
if context.instance.keys[username] != password:
response.status = 401
response.headers['WWW-Authenticate'] = 'Basic realm="Login Required"'
return {'message': 'auth: Incorrect password'}
return f(*args, **kwargs)
return decorated
# Helper function to lock the function
def lock(f):
@wraps(f)
def decorated(*args, **kwargs):
with context.instance.requests_lock:
return f(*args, **kwargs)
return decorated
# Support ?page=N argument
def paginate(f):
@wraps(f)
def decorated(*args, **kwargs):
_out = f(*args, **kwargs)
# Do not modify anything without a specific request
if not 'page' in kwargs:
return _out
# A pass-through for errors, etc
if not isinstance(_out, list):
return _out
# Parse the page argument
_page = kwargs['page']
try:
_page = int(_page)
except ValueError:
response.status = 500
return {'message': 'The requested page is not an integer'}
# Raise _page so that 0 is the first page and -1 is the last
_page += 1
if _page > 0:
_page *= 100
else:
_page = len(_out) - (_page*100)
return _out[_page - 100: _page]
return decorated
| 2,270 | 26.695122 | 91 |
py
|
null |
ceph-main/src/pybind/mgr/restful/hooks.py
|
from pecan.hooks import PecanHook
import traceback
from . import context
class ErrorHook(PecanHook):
def on_error(self, stat, exc):
context.instance.log.error(str(traceback.format_exc()))
| 204 | 17.636364 | 63 |
py
|
null |
ceph-main/src/pybind/mgr/restful/module.py
|
"""
A RESTful API for Ceph
"""
import os
import json
import time
import errno
import inspect
import tempfile
import threading
import traceback
import socket
import fcntl
from . import common
from . import context
from uuid import uuid4
from pecan import jsonify, make_app
from OpenSSL import crypto
from pecan.rest import RestController
from werkzeug.serving import make_server, make_ssl_devcert
from .hooks import ErrorHook
from mgr_module import MgrModule, CommandResult, NotifyType
from mgr_util import build_url
class CannotServe(Exception):
pass
class CommandsRequest(object):
"""
This class handles parallel as well as sequential execution of
commands. The class accept a list of iterables that should be
executed sequentially. Each iterable can contain several commands
that can be executed in parallel.
Example:
[[c1,c2],[c3,c4]]
- run c1 and c2 in parallel
- wait for them to finish
- run c3 and c4 in parallel
- wait for them to finish
"""
def __init__(self, commands_arrays):
self.id = str(id(self))
# Filter out empty sub-requests
commands_arrays = [x for x in commands_arrays
if len(x) != 0]
self.running = []
self.waiting = commands_arrays[1:]
self.finished = []
self.failed = []
self.lock = threading.RLock()
if not len(commands_arrays):
# Nothing to run
return
# Process first iteration of commands_arrays in parallel
results = self.run(commands_arrays[0])
self.running.extend(results)
def run(self, commands):
"""
A static method that will execute the given list of commands in
parallel and will return the list of command results.
"""
# Gather the results (in parallel)
results = []
for index, command in enumerate(commands):
tag = '%s:%s:%d' % (__name__, self.id, index)
# Store the result
result = CommandResult(tag)
result.command = common.humanify_command(command)
results.append(result)
# Run the command
context.instance.send_command(result, 'mon', '', json.dumps(command), tag)
return results
def next(self):
with self.lock:
if not self.waiting:
# Nothing to run
return
# Run a next iteration of commands
commands = self.waiting[0]
self.waiting = self.waiting[1:]
self.running.extend(self.run(commands))
def finish(self, tag):
with self.lock:
for index in range(len(self.running)):
if self.running[index].tag == tag:
if self.running[index].r == 0:
self.finished.append(self.running.pop(index))
else:
self.failed.append(self.running.pop(index))
return True
# No such tag found
return False
def is_running(self, tag):
for result in self.running:
if result.tag == tag:
return True
return False
def is_ready(self):
with self.lock:
return not self.running and self.waiting
def is_waiting(self):
return bool(self.waiting)
def is_finished(self):
with self.lock:
return not self.running and not self.waiting
def has_failed(self):
return bool(self.failed)
def get_state(self):
with self.lock:
if not self.is_finished():
return "pending"
if self.has_failed():
return "failed"
return "success"
def __json__(self):
return {
'id': self.id,
'running': [
{
'command': x.command,
'outs': x.outs,
'outb': x.outb,
} for x in self.running
],
'finished': [
{
'command': x.command,
'outs': x.outs,
'outb': x.outb,
} for x in self.finished
],
'waiting': [
[common.humanify_command(y) for y in x]
for x in self.waiting
],
'failed': [
{
'command': x.command,
'outs': x.outs,
'outb': x.outb,
} for x in self.failed
],
'is_waiting': self.is_waiting(),
'is_finished': self.is_finished(),
'has_failed': self.has_failed(),
'state': self.get_state(),
}
class Module(MgrModule):
MODULE_OPTIONS = [
{'name': 'server_addr'},
{'name': 'server_port'},
{'name': 'key_file'},
{'name': 'enable_auth', 'type': 'bool', 'default': True},
]
COMMANDS = [
{
"cmd": "restful create-key name=key_name,type=CephString",
"desc": "Create an API key with this name",
"perm": "rw"
},
{
"cmd": "restful delete-key name=key_name,type=CephString",
"desc": "Delete an API key with this name",
"perm": "rw"
},
{
"cmd": "restful list-keys",
"desc": "List all API keys",
"perm": "r"
},
{
"cmd": "restful create-self-signed-cert",
"desc": "Create localized self signed certificate",
"perm": "rw"
},
{
"cmd": "restful restart",
"desc": "Restart API server",
"perm": "rw"
},
]
NOTIFY_TYPES = [NotifyType.command]
def __init__(self, *args, **kwargs):
super(Module, self).__init__(*args, **kwargs)
context.instance = self
self.requests = []
self.requests_lock = threading.RLock()
self.keys = {}
self.enable_auth = True
self.server = None
self.stop_server = False
self.serve_event = threading.Event()
def serve(self):
self.log.debug('serve enter')
while not self.stop_server:
try:
self._serve()
self.server.socket.close()
except CannotServe as cs:
self.log.warning("server not running: %s", cs)
except:
self.log.error(str(traceback.format_exc()))
# Wait and clear the threading event
self.serve_event.wait()
self.serve_event.clear()
self.log.debug('serve exit')
def refresh_keys(self):
self.keys = {}
rawkeys = self.get_store_prefix('keys/') or {}
for k, v in rawkeys.items():
self.keys[k[5:]] = v # strip of keys/ prefix
def _serve(self):
# Load stored authentication keys
self.refresh_keys()
jsonify._instance = jsonify.GenericJSON(
sort_keys=True,
indent=4,
separators=(',', ': '),
)
server_addr = self.get_localized_module_option('server_addr', '::')
if server_addr is None:
raise CannotServe('no server_addr configured; try "ceph config-key set mgr/restful/server_addr <ip>"')
server_port = int(self.get_localized_module_option('server_port', '8003'))
self.log.info('server_addr: %s server_port: %d',
server_addr, server_port)
cert = self.get_localized_store("crt")
if cert is not None:
cert_tmp = tempfile.NamedTemporaryFile()
cert_tmp.write(cert.encode('utf-8'))
cert_tmp.flush()
cert_fname = cert_tmp.name
else:
cert_fname = self.get_localized_store('crt_file')
pkey = self.get_localized_store("key")
if pkey is not None:
pkey_tmp = tempfile.NamedTemporaryFile()
pkey_tmp.write(pkey.encode('utf-8'))
pkey_tmp.flush()
pkey_fname = pkey_tmp.name
else:
pkey_fname = self.get_localized_module_option('key_file')
self.enable_auth = self.get_localized_module_option('enable_auth', True)
if not cert_fname or not pkey_fname:
raise CannotServe('no certificate configured')
if not os.path.isfile(cert_fname):
raise CannotServe('certificate %s does not exist' % cert_fname)
if not os.path.isfile(pkey_fname):
raise CannotServe('private key %s does not exist' % pkey_fname)
# Publish the URI that others may use to access the service we're
# about to start serving
addr = self.get_mgr_ip() if server_addr == "::" else server_addr
self.set_uri(build_url(scheme='https', host=addr, port=server_port, path='/'))
# Create the HTTPS werkzeug server serving pecan app
self.server = make_server(
host=server_addr,
port=server_port,
app=make_app(
root='restful.api.Root',
hooks = [ErrorHook()], # use a callable if pecan >= 0.3.2
),
ssl_context=(cert_fname, pkey_fname),
)
sock_fd_flag = fcntl.fcntl(self.server.socket.fileno(), fcntl.F_GETFD)
if not (sock_fd_flag & fcntl.FD_CLOEXEC):
self.log.debug("set server socket close-on-exec")
fcntl.fcntl(self.server.socket.fileno(), fcntl.F_SETFD, sock_fd_flag | fcntl.FD_CLOEXEC)
if self.stop_server:
self.log.debug('made server, but stop flag set')
else:
self.log.debug('made server, serving forever')
self.server.serve_forever()
def shutdown(self):
self.log.debug('shutdown enter')
try:
self.stop_server = True
if self.server:
self.log.debug('calling server.shutdown')
self.server.shutdown()
self.log.debug('called server.shutdown')
self.serve_event.set()
except:
self.log.error(str(traceback.format_exc()))
raise
self.log.debug('shutdown exit')
def restart(self):
try:
if self.server:
self.server.shutdown()
self.serve_event.set()
except:
self.log.error(str(traceback.format_exc()))
def notify(self, notify_type: NotifyType, tag: str):
try:
self._notify(notify_type, tag)
except:
self.log.error(str(traceback.format_exc()))
def _notify(self, notify_type: NotifyType, tag):
if notify_type != NotifyType.command:
self.log.debug("Unhandled notification type '%s'", notify_type)
return
# we can safely skip all the sequential commands
if tag == 'seq':
return
try:
with self.requests_lock:
request = next(x for x in self.requests if x.is_running(tag))
request.finish(tag)
if request.is_ready():
request.next()
except StopIteration:
# the command was not issued by me
pass
def config_notify(self):
self.enable_auth = self.get_localized_module_option('enable_auth', True)
def create_self_signed_cert(self):
# create a key pair
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 2048)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().O = "IT"
cert.get_subject().CN = "ceph-restful"
cert.set_serial_number(int(uuid4()))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10*365*24*60*60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(pkey)
cert.sign(pkey, 'sha512')
return (
crypto.dump_certificate(crypto.FILETYPE_PEM, cert),
crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)
)
def handle_command(self, inbuf, command):
self.log.warning("Handling command: '%s'" % str(command))
if command['prefix'] == "restful create-key":
if command['key_name'] in self.keys:
return 0, self.keys[command['key_name']], ""
else:
key = str(uuid4())
self.keys[command['key_name']] = key
self.set_store('keys/' + command['key_name'], key)
return (
0,
self.keys[command['key_name']],
"",
)
elif command['prefix'] == "restful delete-key":
if command['key_name'] in self.keys:
del self.keys[command['key_name']]
self.set_store('keys/' + command['key_name'], None)
return (
0,
"",
"",
)
elif command['prefix'] == "restful list-keys":
self.refresh_keys()
return (
0,
json.dumps(self.keys, indent=4, sort_keys=True),
"",
)
elif command['prefix'] == "restful create-self-signed-cert":
cert, pkey = self.create_self_signed_cert()
self.set_store(self.get_mgr_id() + '/crt', cert.decode('utf-8'))
self.set_store(self.get_mgr_id() + '/key', pkey.decode('utf-8'))
self.restart()
return (
0,
"Restarting RESTful API server...",
""
)
elif command['prefix'] == 'restful restart':
self.restart();
return (
0,
"Restarting RESTful API server...",
""
)
else:
return (
-errno.EINVAL,
"",
"Command not found '{0}'".format(command['prefix'])
)
def get_doc_api(self, root, prefix=''):
doc = {}
for _obj in dir(root):
obj = getattr(root, _obj)
if isinstance(obj, RestController):
doc.update(self.get_doc_api(obj, prefix + '/' + _obj))
if getattr(root, '_lookup', None) and isinstance(root._lookup('0')[0], RestController):
doc.update(self.get_doc_api(root._lookup('0')[0], prefix + '/<arg>'))
prefix = prefix or '/'
doc[prefix] = {}
for method in 'get', 'post', 'patch', 'delete':
if getattr(root, method, None):
doc[prefix][method.upper()] = inspect.getdoc(getattr(root, method)).split('\n')
if len(doc[prefix]) == 0:
del doc[prefix]
return doc
def get_mons(self):
mon_map_mons = self.get('mon_map')['mons']
mon_status = json.loads(self.get('mon_status')['json'])
# Add more information
for mon in mon_map_mons:
mon['in_quorum'] = mon['rank'] in mon_status['quorum']
mon['server'] = self.get_metadata("mon", mon['name'])['hostname']
mon['leader'] = mon['rank'] == mon_status['quorum'][0]
return mon_map_mons
def get_osd_pools(self):
osds = dict(map(lambda x: (x['osd'], []), self.get('osd_map')['osds']))
pools = dict(map(lambda x: (x['pool'], x), self.get('osd_map')['pools']))
crush = self.get('osd_map_crush')
crush_rules = crush['rules']
osds_by_pool = {}
for pool_id, pool in pools.items():
pool_osds = None
for rule in [r for r in crush_rules if r['rule_id'] == pool['crush_rule']]:
pool_osds = common.crush_rule_osds(crush['buckets'], rule)
osds_by_pool[pool_id] = pool_osds
for pool_id in pools.keys():
for in_pool_id in osds_by_pool[pool_id]:
osds[in_pool_id].append(pool_id)
return osds
def get_osds(self, pool_id=None, ids=None):
# Get data
osd_map = self.get('osd_map')
osd_metadata = self.get('osd_metadata')
# Update the data with the additional info from the osd map
osds = osd_map['osds']
# Filter by osd ids
if ids is not None:
osds = [x for x in osds if str(x['osd']) in ids]
# Get list of pools per osd node
pools_map = self.get_osd_pools()
# map osd IDs to reweight
reweight_map = dict([
(x.get('id'), x.get('reweight', None))
for x in self.get('osd_map_tree')['nodes']
])
# Build OSD data objects
for osd in osds:
osd['pools'] = pools_map[osd['osd']]
osd['server'] = osd_metadata.get(str(osd['osd']), {}).get('hostname', None)
osd['reweight'] = reweight_map.get(osd['osd'], 0.0)
if osd['up']:
osd['valid_commands'] = common.OSD_IMPLEMENTED_COMMANDS
else:
osd['valid_commands'] = []
# Filter by pool
if pool_id:
pool_id = int(pool_id)
osds = [x for x in osds if pool_id in x['pools']]
return osds
def get_osd_by_id(self, osd_id):
osd = [x for x in self.get('osd_map')['osds']
if x['osd'] == osd_id]
if len(osd) != 1:
return None
return osd[0]
def get_pool_by_id(self, pool_id):
pool = [x for x in self.get('osd_map')['pools']
if x['pool'] == pool_id]
if len(pool) != 1:
return None
return pool[0]
def submit_request(self, _request, **kwargs):
with self.requests_lock:
request = CommandsRequest(_request)
self.requests.append(request)
if kwargs.get('wait', 0):
while not request.is_finished():
time.sleep(0.001)
return request
def run_command(self, command):
# tag with 'seq' so that we can ignore these in notify function
result = CommandResult('seq')
self.send_command(result, 'mon', '', json.dumps(command), 'seq')
return result.wait()
| 18,203 | 28.648208 | 114 |
py
|
null |
ceph-main/src/pybind/mgr/restful/api/__init__.py
|
from pecan import expose
from pecan.rest import RestController
from .config import Config
from .crush import Crush
from .doc import Doc
from .mon import Mon
from .osd import Osd
from .pool import Pool
from .perf import Perf
from .request import Request
from .server import Server
class Root(RestController):
config = Config()
crush = Crush()
doc = Doc()
mon = Mon()
osd = Osd()
perf = Perf()
pool = Pool()
request = Request()
server = Server()
@expose(template='json')
def get(self, **kwargs):
"""
Show the basic information for the REST API
This includes values like api version or auth method
"""
return {
'api_version': 1,
'auth':
'Use "ceph restful create-key <key>" to create a key pair, '
'pass it as HTTP Basic auth to authenticate',
'doc': 'See /doc endpoint',
'info': "Ceph Manager RESTful API server",
}
| 990 | 23.775 | 76 |
py
|
null |
ceph-main/src/pybind/mgr/restful/api/config.py
|
from pecan import expose, request
from pecan.rest import RestController
from restful import common, context
from restful.decorators import auth
class ConfigOsd(RestController):
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show OSD configuration options
"""
flags = context.instance.get("osd_map")['flags']
# pause is a valid osd config command that sets pauserd,pausewr
flags = flags.replace('pauserd,pausewr', 'pause')
return flags.split(',')
@expose(template='json')
@auth
def patch(self, **kwargs):
"""
Modify OSD configuration options
"""
args = request.json
commands = []
valid_flags = set(args.keys()) & set(common.OSD_FLAGS)
invalid_flags = list(set(args.keys()) - valid_flags)
if invalid_flags:
context.instance.log.warning("%s not valid to set/unset", invalid_flags)
for flag in list(valid_flags):
if args[flag]:
mode = 'set'
else:
mode = 'unset'
commands.append({
'prefix': 'osd ' + mode,
'key': flag,
})
return context.instance.submit_request([commands], **kwargs)
class ConfigClusterKey(RestController):
def __init__(self, key):
self.key = key
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show specific configuration option
"""
return context.instance.get("config").get(self.key, None)
class ConfigCluster(RestController):
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show all cluster configuration options
"""
return context.instance.get("config")
@expose()
def _lookup(self, key, *remainder):
return ConfigClusterKey(key), remainder
class Config(RestController):
cluster = ConfigCluster()
osd = ConfigOsd()
| 1,999 | 21.988506 | 84 |
py
|
null |
ceph-main/src/pybind/mgr/restful/api/crush.py
|
from pecan import expose
from pecan.rest import RestController
from restful import common, context
from restful.decorators import auth
class CrushRule(RestController):
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show crush rules
"""
crush = context.instance.get('osd_map_crush')
rules = crush['rules']
for rule in rules:
rule['osd_count'] = len(common.crush_rule_osds(crush['buckets'], rule))
return rules
class Crush(RestController):
rule = CrushRule()
| 561 | 20.615385 | 83 |
py
|
null |
ceph-main/src/pybind/mgr/restful/api/doc.py
|
from pecan import expose
from pecan.rest import RestController
from restful import context
import restful
class Doc(RestController):
@expose(template='json')
def get(self, **kwargs):
"""
Show documentation information
"""
return context.instance.get_doc_api(restful.api.Root)
| 320 | 19.0625 | 61 |
py
|
null |
ceph-main/src/pybind/mgr/restful/api/mon.py
|
from pecan import expose, response
from pecan.rest import RestController
from restful import context
from restful.decorators import auth
class MonName(RestController):
def __init__(self, name):
self.name = name
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show the information for the monitor name
"""
mon = [x for x in context.instance.get_mons()
if x['name'] == self.name]
if len(mon) != 1:
response.status = 500
return {'message': 'Failed to identify the monitor node "{}"'.format(self.name)}
return mon[0]
class Mon(RestController):
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show the information for all the monitors
"""
return context.instance.get_mons()
@expose()
def _lookup(self, name, *remainder):
return MonName(name), remainder
| 953 | 22.268293 | 92 |
py
|
null |
ceph-main/src/pybind/mgr/restful/api/osd.py
|
from pecan import expose, request, response
from pecan.rest import RestController
from restful import common, context
from restful.decorators import auth
class OsdIdCommand(RestController):
def __init__(self, osd_id):
self.osd_id = osd_id
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show implemented commands for the OSD id
"""
osd = context.instance.get_osd_by_id(self.osd_id)
if not osd:
response.status = 500
return {'message': 'Failed to identify the OSD id "{}"'.format(self.osd_id)}
if osd['up']:
return common.OSD_IMPLEMENTED_COMMANDS
else:
return []
@expose(template='json')
@auth
def post(self, **kwargs):
"""
Run the implemented command for the OSD id
"""
command = request.json.get('command', None)
osd = context.instance.get_osd_by_id(self.osd_id)
if not osd:
response.status = 500
return {'message': 'Failed to identify the OSD id "{}"'.format(self.osd_id)}
if not osd['up'] or command not in common.OSD_IMPLEMENTED_COMMANDS:
response.status = 500
return {'message': 'Command "{}" not available'.format(command)}
return context.instance.submit_request([[{
'prefix': 'osd ' + command,
'who': str(self.osd_id)
}]], **kwargs)
class OsdId(RestController):
def __init__(self, osd_id):
self.osd_id = osd_id
self.command = OsdIdCommand(osd_id)
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show the information for the OSD id
"""
osd = context.instance.get_osds(ids=[str(self.osd_id)])
if len(osd) != 1:
response.status = 500
return {'message': 'Failed to identify the OSD id "{}"'.format(self.osd_id)}
return osd[0]
@expose(template='json')
@auth
def patch(self, **kwargs):
"""
Modify the state (up, in) of the OSD id or reweight it
"""
args = request.json
commands = []
if 'in' in args:
if args['in']:
commands.append({
'prefix': 'osd in',
'ids': [str(self.osd_id)]
})
else:
commands.append({
'prefix': 'osd out',
'ids': [str(self.osd_id)]
})
if 'up' in args:
if args['up']:
response.status = 500
return {'message': "It is not valid to set a down OSD to be up"}
else:
commands.append({
'prefix': 'osd down',
'ids': [str(self.osd_id)]
})
if 'reweight' in args:
commands.append({
'prefix': 'osd reweight',
'id': self.osd_id,
'weight': args['reweight']
})
return context.instance.submit_request([commands], **kwargs)
class Osd(RestController):
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show the information for all the OSDs
"""
# Parse request args
# TODO Filter by ids
pool_id = kwargs.get('pool', None)
return context.instance.get_osds(pool_id)
@expose()
def _lookup(self, osd_id, *remainder):
return OsdId(int(osd_id)), remainder
| 3,542 | 25.051471 | 88 |
py
|
null |
ceph-main/src/pybind/mgr/restful/api/perf.py
|
from pecan import expose, request, response
from pecan.rest import RestController
from restful import context
from restful.decorators import auth, lock, paginate
import re
class Perf(RestController):
@expose(template='json')
@paginate
@auth
def get(self, **kwargs):
"""
List all the available performance counters
Options:
- 'daemon' -- filter by daemon, accepts Python regexp
"""
counters = context.instance.get_unlabeled_perf_counters()
if 'daemon' in kwargs:
_re = re.compile(kwargs['daemon'])
counters = {k: v for k, v in counters.items() if _re.match(k)}
return counters
| 688 | 23.607143 | 74 |
py
|
null |
ceph-main/src/pybind/mgr/restful/api/pool.py
|
from pecan import expose, request, response
from pecan.rest import RestController
from restful import common, context
from restful.decorators import auth
class PoolId(RestController):
def __init__(self, pool_id):
self.pool_id = pool_id
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show the information for the pool id
"""
pool = context.instance.get_pool_by_id(self.pool_id)
if not pool:
response.status = 500
return {'message': 'Failed to identify the pool id "{}"'.format(self.pool_id)}
# pgp_num is called pg_placement_num, deal with that
if 'pg_placement_num' in pool:
pool['pgp_num'] = pool.pop('pg_placement_num')
return pool
@expose(template='json')
@auth
def patch(self, **kwargs):
"""
Modify the information for the pool id
"""
try:
args = request.json
except ValueError:
response.status = 400
return {'message': 'Bad request: malformed JSON or wrong Content-Type'}
# Get the pool info for its name
pool = context.instance.get_pool_by_id(self.pool_id)
if not pool:
response.status = 500
return {'message': 'Failed to identify the pool id "{}"'.format(self.pool_id)}
# Check for invalid pool args
invalid = common.invalid_pool_args(args)
if invalid:
response.status = 500
return {'message': 'Invalid arguments found: "{}"'.format(invalid)}
# Schedule the update request
return context.instance.submit_request(common.pool_update_commands(pool['pool_name'], args), **kwargs)
@expose(template='json')
@auth
def delete(self, **kwargs):
"""
Remove the pool data for the pool id
"""
pool = context.instance.get_pool_by_id(self.pool_id)
if not pool:
response.status = 500
return {'message': 'Failed to identify the pool id "{}"'.format(self.pool_id)}
return context.instance.submit_request([[{
'prefix': 'osd pool delete',
'pool': pool['pool_name'],
'pool2': pool['pool_name'],
'yes_i_really_really_mean_it': True
}]], **kwargs)
class Pool(RestController):
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show the information for all the pools
"""
pools = context.instance.get('osd_map')['pools']
# pgp_num is called pg_placement_num, deal with that
for pool in pools:
if 'pg_placement_num' in pool:
pool['pgp_num'] = pool.pop('pg_placement_num')
return pools
@expose(template='json')
@auth
def post(self, **kwargs):
"""
Create a new pool
Requires name and pg_num dict arguments
"""
args = request.json
# Check for the required arguments
pool_name = args.pop('name', None)
if pool_name is None:
response.status = 500
return {'message': 'You need to specify the pool "name" argument'}
pg_num = args.pop('pg_num', None)
if pg_num is None:
response.status = 500
return {'message': 'You need to specify the "pg_num" argument'}
# Run the pool create command first
create_command = {
'prefix': 'osd pool create',
'pool': pool_name,
'pg_num': pg_num
}
# Check for invalid pool args
invalid = common.invalid_pool_args(args)
if invalid:
response.status = 500
return {'message': 'Invalid arguments found: "{}"'.format(invalid)}
# Schedule the creation and update requests
return context.instance.submit_request(
[[create_command]] +
common.pool_update_commands(pool_name, args),
**kwargs
)
@expose()
def _lookup(self, pool_id, *remainder):
return PoolId(int(pool_id)), remainder
| 4,111 | 28.163121 | 110 |
py
|
null |
ceph-main/src/pybind/mgr/restful/api/request.py
|
from pecan import expose, request, response
from pecan.rest import RestController
from restful import context
from restful.decorators import auth, lock, paginate
class RequestId(RestController):
def __init__(self, request_id):
self.request_id = request_id
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show the information for the request id
"""
request = [x for x in context.instance.requests
if x.id == self.request_id]
if len(request) != 1:
response.status = 500
return {'message': 'Unknown request id "{}"'.format(self.request_id)}
return request[0]
@expose(template='json')
@auth
@lock
def delete(self, **kwargs):
"""
Remove the request id from the database
"""
for index in range(len(context.instance.requests)):
if context.instance.requests[index].id == self.request_id:
return context.instance.requests.pop(index)
# Failed to find the job to cancel
response.status = 500
return {'message': 'No such request id'}
class Request(RestController):
@expose(template='json')
@paginate
@auth
def get(self, **kwargs):
"""
List all the available requests
"""
return context.instance.requests
@expose(template='json')
@auth
@lock
def delete(self, **kwargs):
"""
Remove all the finished requests
"""
num_requests = len(context.instance.requests)
context.instance.requests = [x for x in context.instance.requests
if not x.is_finished()]
remaining = len(context.instance.requests)
# Return the job statistics
return {
'cleaned': num_requests - remaining,
'remaining': remaining,
}
@expose(template='json')
@auth
def post(self, **kwargs):
"""
Pass through method to create any request
"""
if isinstance(request.json, list):
if all(isinstance(element, list) for element in request.json):
return context.instance.submit_request(request.json, **kwargs)
# The request.json has wrong format
response.status = 500
return {'message': 'The request format should be [[{c1},{c2}]]'}
return context.instance.submit_request([[request.json]], **kwargs)
@expose()
def _lookup(self, request_id, *remainder):
return RequestId(request_id), remainder
| 2,604 | 26.712766 | 81 |
py
|
null |
ceph-main/src/pybind/mgr/restful/api/server.py
|
from pecan import expose
from pecan.rest import RestController
from restful import context
from restful.decorators import auth
class ServerFqdn(RestController):
def __init__(self, fqdn):
self.fqdn = fqdn
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show the information for the server fqdn
"""
return context.instance.get_server(self.fqdn)
class Server(RestController):
@expose(template='json')
@auth
def get(self, **kwargs):
"""
Show the information for all the servers
"""
return context.instance.list_servers()
@expose()
def _lookup(self, fqdn, *remainder):
return ServerFqdn(fqdn), remainder
| 737 | 19.5 | 53 |
py
|
null |
ceph-main/src/pybind/mgr/rgw/__init__.py
|
# flake8: noqa
from .module import Module
| 42 | 13.333333 | 26 |
py
|
null |
ceph-main/src/pybind/mgr/rgw/module.py
|
import json
import threading
import yaml
import errno
import base64
import functools
import sys
from mgr_module import MgrModule, CLICommand, HandleCommandResult, Option
import orchestrator
from ceph.deployment.service_spec import RGWSpec, PlacementSpec, SpecValidationError
from typing import Any, Optional, Sequence, Iterator, List, Callable, TypeVar, cast, Dict, Tuple, Union, TYPE_CHECKING
from ceph.rgw.types import RGWAMException, RGWAMEnvMgr, RealmToken
from ceph.rgw.rgwam_core import EnvArgs, RGWAM
from orchestrator import OrchestratorClientMixin, OrchestratorError, DaemonDescription, OrchResult
FuncT = TypeVar('FuncT', bound=Callable[..., Any])
if TYPE_CHECKING:
# this uses a version check as opposed to a try/except because this
# form makes mypy happy and try/except doesn't.
if sys.version_info >= (3, 8):
from typing import Protocol
else:
from typing_extensions import Protocol
class MgrModuleProtocol(Protocol):
def tool_exec(self, args: List[str]) -> Tuple[int, str, str]:
...
def apply_rgw(self, spec: RGWSpec) -> OrchResult[str]:
...
def list_daemons(self, service_name: Optional[str] = None,
daemon_type: Optional[str] = None,
daemon_id: Optional[str] = None,
host: Optional[str] = None,
refresh: bool = False) -> OrchResult[List['DaemonDescription']]:
...
else:
class MgrModuleProtocol:
pass
class RGWSpecParsingError(Exception):
pass
class OrchestratorAPI(OrchestratorClientMixin):
def __init__(self, mgr: MgrModule):
super(OrchestratorAPI, self).__init__()
self.set_mgr(mgr)
def status(self) -> Dict[str, Union[str, bool]]:
try:
status, message, _module_details = super().available()
return dict(available=status, message=message)
except (RuntimeError, OrchestratorError, ImportError) as e:
return dict(available=False, message=f'Orchestrator is unavailable: {e}')
class RGWAMOrchMgr(RGWAMEnvMgr):
def __init__(self, mgr: MgrModuleProtocol):
self.mgr = mgr
def tool_exec(self, prog: str, args: List[str]) -> Tuple[List[str], int, str, str]:
cmd = [prog] + args
rc, stdout, stderr = self.mgr.tool_exec(args=cmd)
return cmd, rc, stdout, stderr
def apply_rgw(self, spec: RGWSpec) -> None:
completion = self.mgr.apply_rgw(spec)
orchestrator.raise_if_exception(completion)
def list_daemons(self, service_name: Optional[str] = None,
daemon_type: Optional[str] = None,
daemon_id: Optional[str] = None,
host: Optional[str] = None,
refresh: bool = True) -> List['DaemonDescription']:
completion = self.mgr.list_daemons(service_name,
daemon_type,
daemon_id=daemon_id,
host=host,
refresh=refresh)
return orchestrator.raise_if_exception(completion)
def check_orchestrator(func: FuncT) -> FuncT:
@functools.wraps(func)
def wrapper(self: Any, *args: Any, **kwargs: Any) -> HandleCommandResult:
available = self.api.status()['available']
if available:
return func(self, *args, **kwargs)
else:
err_msg = "Cephadm is not available. Please enable cephadm by 'ceph mgr module enable cephadm'."
return HandleCommandResult(retval=-errno.EINVAL, stdout='', stderr=err_msg)
return cast(FuncT, wrapper)
class Module(orchestrator.OrchestratorClientMixin, MgrModule):
MODULE_OPTIONS: List[Option] = []
# These are "native" Ceph options that this module cares about.
NATIVE_OPTIONS: List[Option] = []
def __init__(self, *args: Any, **kwargs: Any):
self.inited = False
self.lock = threading.Lock()
super(Module, self).__init__(*args, **kwargs)
self.api = OrchestratorAPI(self)
# ensure config options members are initialized; see config_notify()
self.config_notify()
with self.lock:
self.inited = True
self.env = EnvArgs(RGWAMOrchMgr(self))
# set up some members to enable the serve() method and shutdown()
self.run = True
self.event = threading.Event()
def config_notify(self) -> None:
"""
This method is called whenever one of our config options is changed.
"""
# This is some boilerplate that stores MODULE_OPTIONS in a class
# member, so that, for instance, the 'emphatic' option is always
# available as 'self.emphatic'.
for opt in self.MODULE_OPTIONS:
setattr(self,
opt['name'],
self.get_module_option(opt['name']))
self.log.debug(' mgr option %s = %s',
opt['name'], getattr(self, opt['name']))
# Do the same for the native options.
for opt in self.NATIVE_OPTIONS:
setattr(self,
opt, # type: ignore
self.get_ceph_option(opt))
self.log.debug(' native option %s = %s', opt, getattr(self, opt)) # type: ignore
@CLICommand('rgw admin', perm='rw')
def _cmd_rgw_admin(self, params: Sequence[str]) -> HandleCommandResult:
"""rgw admin"""
cmd, returncode, out, err = self.env.mgr.tool_exec('radosgw-admin', params or [])
self.log.error('retcode=%d' % returncode)
self.log.error('out=%s' % out)
self.log.error('err=%s' % err)
return HandleCommandResult(retval=returncode, stdout=out, stderr=err)
@CLICommand('rgw realm bootstrap', perm='rw')
@check_orchestrator
def _cmd_rgw_realm_bootstrap(self,
realm_name: Optional[str] = None,
zonegroup_name: Optional[str] = None,
zone_name: Optional[str] = None,
port: Optional[int] = None,
placement: Optional[str] = None,
zone_endpoints: Optional[str] = None,
start_radosgw: Optional[bool] = True,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Bootstrap new rgw realm, zonegroup, and zone"""
if inbuf:
try:
rgw_specs = self._parse_rgw_specs(inbuf)
except RGWSpecParsingError as e:
return HandleCommandResult(retval=-errno.EINVAL, stderr=f'{e}')
elif (realm_name and zonegroup_name and zone_name):
placement_spec = PlacementSpec.from_string(placement) if placement else None
rgw_specs = [RGWSpec(rgw_realm=realm_name,
rgw_zonegroup=zonegroup_name,
rgw_zone=zone_name,
rgw_frontend_port=port,
placement=placement_spec,
zone_endpoints=zone_endpoints)]
else:
err_msg = 'Invalid arguments: either pass a spec with -i or provide the realm, zonegroup and zone.'
return HandleCommandResult(retval=-errno.EINVAL, stdout='', stderr=err_msg)
try:
for spec in rgw_specs:
RGWAM(self.env).realm_bootstrap(spec, start_radosgw)
except RGWAMException as e:
self.log.error('cmd run exception: (%d) %s' % (e.retcode, e.message))
return HandleCommandResult(retval=e.retcode, stdout=e.stdout, stderr=e.stderr)
return HandleCommandResult(retval=0, stdout="Realm(s) created correctly. Please, use 'ceph rgw realm tokens' to get the token.", stderr='')
def _parse_rgw_specs(self, inbuf: str) -> List[RGWSpec]:
"""Parse RGW specs from a YAML file."""
# YAML '---' document separator with no content generates
# None entries in the output. Let's skip them silently.
yaml_objs: Iterator = yaml.safe_load_all(inbuf)
specs = [o for o in yaml_objs if o is not None]
rgw_specs = []
for spec in specs:
# A secondary zone spec normally contains only the zone and the reaml token
# since no rgw_realm is specified in this case we extract it from the token
if 'rgw_realm_token' in spec:
realm_token = RealmToken.from_base64_str(spec['rgw_realm_token'])
if realm_token is None:
raise RGWSpecParsingError(f"Invalid realm token: {spec['rgw_realm_token']}")
spec['rgw_realm'] = realm_token.realm_name
try:
rgw_spec = RGWSpec.from_json(spec)
rgw_spec.validate()
except SpecValidationError as e:
raise RGWSpecParsingError(f'RGW Spec parsing/validation error: {e}')
else:
rgw_specs.append(rgw_spec)
return rgw_specs
@CLICommand('rgw realm zone-creds create', perm='rw')
def _cmd_rgw_realm_new_zone_creds(self,
realm_name: Optional[str] = None,
endpoints: Optional[str] = None,
sys_uid: Optional[str] = None) -> HandleCommandResult:
"""Create credentials for new zone creation"""
try:
retval, out, err = RGWAM(self.env).realm_new_zone_creds(realm_name, endpoints, sys_uid)
except RGWAMException as e:
self.log.error('cmd run exception: (%d) %s' % (e.retcode, e.message))
return HandleCommandResult(retval=e.retcode, stdout=e.stdout, stderr=e.stderr)
return HandleCommandResult(retval=retval, stdout=out, stderr=err)
@CLICommand('rgw realm zone-creds remove', perm='rw')
def _cmd_rgw_realm_rm_zone_creds(self, realm_token: Optional[str] = None) -> HandleCommandResult:
"""Create credentials for new zone creation"""
try:
retval, out, err = RGWAM(self.env).realm_rm_zone_creds(realm_token)
except RGWAMException as e:
self.log.error('cmd run exception: (%d) %s' % (e.retcode, e.message))
return HandleCommandResult(retval=e.retcode, stdout=e.stdout, stderr=e.stderr)
return HandleCommandResult(retval=retval, stdout=out, stderr=err)
@CLICommand('rgw realm tokens', perm='r')
def list_realm_tokens(self) -> HandleCommandResult:
try:
realms_info = self.get_realm_tokens()
except RGWAMException as e:
self.log.error(f'cmd run exception: ({e.retcode}) {e.message}')
return HandleCommandResult(retval=e.retcode, stdout=e.stdout, stderr=e.stderr)
return HandleCommandResult(retval=0, stdout=json.dumps(realms_info, indent=4), stderr='')
def get_realm_tokens(self) -> List[Dict]:
realms_info = []
for realm_info in RGWAM(self.env).get_realms_info():
if not realm_info['master_zone_id']:
realms_info.append({'realm': realm_info['realm_name'], 'token': 'realm has no master zone'})
elif not realm_info['endpoint']:
realms_info.append({'realm': realm_info['realm_name'], 'token': 'master zone has no endpoint'})
elif not (realm_info['access_key'] and realm_info['secret']):
realms_info.append({'realm': realm_info['realm_name'], 'token': 'master zone has no access/secret keys'})
else:
keys = ['realm_name', 'realm_id', 'endpoint', 'access_key', 'secret']
realm_token = RealmToken(**{k: realm_info[k] for k in keys})
realm_token_b = realm_token.to_json().encode('utf-8')
realm_token_s = base64.b64encode(realm_token_b).decode('utf-8')
realms_info.append({'realm': realm_info['realm_name'], 'token': realm_token_s})
return realms_info
@CLICommand('rgw zone modify', perm='rw')
def update_zone_info(self, realm_name: str, zonegroup_name: str, zone_name: str, realm_token: str, zone_endpoints: List[str]) -> HandleCommandResult:
try:
retval, out, err = RGWAM(self.env).zone_modify(realm_name,
zonegroup_name,
zone_name,
zone_endpoints,
realm_token)
return HandleCommandResult(retval, 'Zone updated successfully', '')
except RGWAMException as e:
self.log.error('cmd run exception: (%d) %s' % (e.retcode, e.message))
return HandleCommandResult(retval=e.retcode, stdout=e.stdout, stderr=e.stderr)
@CLICommand('rgw zone create', perm='rw')
@check_orchestrator
def _cmd_rgw_zone_create(self,
zone_name: Optional[str] = None,
realm_token: Optional[str] = None,
port: Optional[int] = None,
placement: Optional[str] = None,
start_radosgw: Optional[bool] = True,
zone_endpoints: Optional[str] = None,
inbuf: Optional[str] = None) -> HandleCommandResult:
"""Bootstrap new rgw zone that syncs with zone on another cluster in the same realm"""
created_zones = self.rgw_zone_create(zone_name, realm_token, port, placement,
start_radosgw, zone_endpoints, inbuf)
return HandleCommandResult(retval=0, stdout=f"Zones {', '.join(created_zones)} created successfully")
def rgw_zone_create(self,
zone_name: Optional[str] = None,
realm_token: Optional[str] = None,
port: Optional[int] = None,
placement: Optional[str] = None,
start_radosgw: Optional[bool] = True,
zone_endpoints: Optional[str] = None,
inbuf: Optional[str] = None) -> Any:
if inbuf:
try:
rgw_specs = self._parse_rgw_specs(inbuf)
except RGWSpecParsingError as e:
return HandleCommandResult(retval=-errno.EINVAL, stderr=f'{e}')
elif (zone_name and realm_token):
token = RealmToken.from_base64_str(realm_token)
placement_spec = PlacementSpec.from_string(placement) if placement else None
rgw_specs = [RGWSpec(rgw_realm=token.realm_name,
rgw_zone=zone_name,
rgw_realm_token=realm_token,
rgw_frontend_port=port,
placement=placement_spec,
zone_endpoints=zone_endpoints)]
else:
err_msg = 'Invalid arguments: either pass a spec with -i or provide the zone_name and realm_token.'
return HandleCommandResult(retval=-errno.EINVAL, stdout='', stderr=err_msg)
try:
created_zones = []
for rgw_spec in rgw_specs:
RGWAM(self.env).zone_create(rgw_spec, start_radosgw)
if rgw_spec.rgw_zone is not None:
created_zones.append(rgw_spec.rgw_zone)
return created_zones
except RGWAMException as e:
self.log.error('cmd run exception: (%d) %s' % (e.retcode, e.message))
return HandleCommandResult(retval=e.retcode, stdout=e.stdout, stderr=e.stderr)
return created_zones
@CLICommand('rgw realm reconcile', perm='rw')
def _cmd_rgw_realm_reconcile(self,
realm_name: Optional[str] = None,
zonegroup_name: Optional[str] = None,
zone_name: Optional[str] = None,
update: Optional[bool] = False) -> HandleCommandResult:
"""Bootstrap new rgw zone that syncs with existing zone"""
try:
retval, out, err = RGWAM(self.env).realm_reconcile(realm_name, zonegroup_name,
zone_name, update)
except RGWAMException as e:
self.log.error('cmd run exception: (%d) %s' % (e.retcode, e.message))
return HandleCommandResult(retval=e.retcode, stdout=e.stdout, stderr=e.stderr)
return HandleCommandResult(retval=retval, stdout=out, stderr=err)
def shutdown(self) -> None:
"""
This method is called by the mgr when the module needs to shut
down (i.e., when the serve() function needs to exit).
"""
self.log.info('Stopping')
self.run = False
self.event.set()
def import_realm_token(self,
zone_name: Optional[str] = None,
realm_token: Optional[str] = None,
port: Optional[int] = None,
placement: Optional[str] = None,
start_radosgw: Optional[bool] = True,
zone_endpoints: Optional[str] = None) -> None:
self.rgw_zone_create(zone_name, realm_token, port, placement, start_radosgw,
zone_endpoints)
| 17,647 | 45.564644 | 153 |
py
|
null |
ceph-main/src/pybind/mgr/rook/__init__.py
|
import os
if 'UNITTEST' in os.environ:
import tests
from .module import RookOrchestrator
| 94 | 14.833333 | 36 |
py
|
null |
ceph-main/src/pybind/mgr/rook/generate_rook_ceph_client.sh
|
#!/bin/sh
set -e
script_location="$(dirname "$(readlink -f "$0")")"
cd "$script_location"
rm -rf rook_client
cp -r ./rook-client-python/rook_client .
rm -rf rook_client/cassandra
rm -rf rook_client/edgefs
rm -rf rook_client/tests
| 235 | 14.733333 | 50 |
sh
|
null |
ceph-main/src/pybind/mgr/rook/module.py
|
import datetime
import logging
import re
import threading
import functools
import os
import json
from ceph.deployment import inventory
from ceph.deployment.service_spec import ServiceSpec, NFSServiceSpec, RGWSpec, PlacementSpec
from ceph.utils import datetime_now
from typing import List, Dict, Optional, Callable, Any, TypeVar, Tuple, TYPE_CHECKING
try:
from ceph.deployment.drive_group import DriveGroupSpec
except ImportError:
pass # just for type checking
try:
from kubernetes import client, config
from kubernetes.client.rest import ApiException
kubernetes_imported = True
# https://github.com/kubernetes-client/python/issues/895
from kubernetes.client.models.v1_container_image import V1ContainerImage
def names(self: Any, names: Any) -> None:
self._names = names
V1ContainerImage.names = V1ContainerImage.names.setter(names)
except ImportError:
kubernetes_imported = False
client = None
config = None
from mgr_module import MgrModule, Option, NFS_POOL_NAME
import orchestrator
from orchestrator import handle_orch_error, OrchResult, raise_if_exception
from .rook_cluster import RookCluster
T = TypeVar('T')
FuncT = TypeVar('FuncT', bound=Callable)
ServiceSpecT = TypeVar('ServiceSpecT', bound=ServiceSpec)
class RookEnv(object):
def __init__(self) -> None:
# POD_NAMESPACE already exist for Rook 0.9
self.namespace = os.environ.get('POD_NAMESPACE', 'rook-ceph')
# ROOK_CEPH_CLUSTER_CRD_NAME is new is Rook 1.0
self.cluster_name = os.environ.get('ROOK_CEPH_CLUSTER_CRD_NAME', self.namespace)
self.operator_namespace = os.environ.get('ROOK_OPERATOR_NAMESPACE', self.namespace)
self.crd_version = os.environ.get('ROOK_CEPH_CLUSTER_CRD_VERSION', 'v1')
self.api_name = "ceph.rook.io/" + self.crd_version
def api_version_match(self) -> bool:
return self.crd_version == 'v1'
def has_namespace(self) -> bool:
return 'POD_NAMESPACE' in os.environ
class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
"""
Writes are a two-phase thing, firstly sending
the write to the k8s API (fast) and then waiting
for the corresponding change to appear in the
Ceph cluster (slow)
Right now, we are calling the k8s API synchronously.
"""
MODULE_OPTIONS: List[Option] = [
# TODO: configure k8s API addr instead of assuming local
Option(
'storage_class',
type='str',
default='local',
desc='storage class name for LSO-discovered PVs',
),
Option(
'drive_group_interval',
type='float',
default=300.0,
desc='interval in seconds between re-application of applied drive_groups',
),
]
@staticmethod
def can_run() -> Tuple[bool, str]:
if not kubernetes_imported:
return False, "`kubernetes` python module not found"
if not RookEnv().api_version_match():
return False, "Rook version unsupported."
return True, ''
def available(self) -> Tuple[bool, str, Dict[str, Any]]:
if not kubernetes_imported:
return False, "`kubernetes` python module not found", {}
elif not self._rook_env.has_namespace():
return False, "ceph-mgr not running in Rook cluster", {}
try:
self.k8s.list_namespaced_pod(self._rook_env.namespace)
except ApiException as e:
return False, "Cannot reach Kubernetes API: {}".format(e), {}
else:
return True, "", {}
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(RookOrchestrator, self).__init__(*args, **kwargs)
self._initialized = threading.Event()
self._k8s_CoreV1_api: Optional[client.CoreV1Api] = None
self._k8s_BatchV1_api: Optional[client.BatchV1Api] = None
self._k8s_CustomObjects_api: Optional[client.CustomObjectsApi] = None
self._k8s_StorageV1_api: Optional[client.StorageV1Api] = None
self._rook_cluster: Optional[RookCluster] = None
self._rook_env = RookEnv()
self._k8s_AppsV1_api: Optional[client.AppsV1Api] = None
self.config_notify()
if TYPE_CHECKING:
self.storage_class = 'foo'
self.drive_group_interval = 10.0
self._load_drive_groups()
self._shutdown = threading.Event()
def config_notify(self) -> None:
"""
This method is called whenever one of our config options is changed.
TODO: this method should be moved into mgr_module.py
"""
for opt in self.MODULE_OPTIONS:
setattr(self,
opt['name'], # type: ignore
self.get_module_option(opt['name'])) # type: ignore
self.log.debug(' mgr option %s = %s',
opt['name'], getattr(self, opt['name'])) # type: ignore
assert isinstance(self.storage_class, str)
assert isinstance(self.drive_group_interval, float)
if self._rook_cluster:
self._rook_cluster.storage_class = self.storage_class
def shutdown(self) -> None:
self._shutdown.set()
@property
def k8s(self):
# type: () -> client.CoreV1Api
self._initialized.wait()
assert self._k8s_CoreV1_api is not None
return self._k8s_CoreV1_api
@property
def rook_cluster(self):
# type: () -> RookCluster
self._initialized.wait()
assert self._rook_cluster is not None
return self._rook_cluster
def serve(self) -> None:
# For deployed clusters, we should always be running inside
# a Rook cluster. For development convenience, also support
# running outside (reading ~/.kube config)
if self._rook_env.has_namespace():
config.load_incluster_config()
else:
self.log.warning("DEVELOPMENT ONLY: Reading kube config from ~")
config.load_kube_config()
# So that I can do port forwarding from my workstation - jcsp
from kubernetes.client import configuration
configuration.verify_ssl = False
self._k8s_CoreV1_api = client.CoreV1Api()
self._k8s_BatchV1_api = client.BatchV1Api()
self._k8s_CustomObjects_api = client.CustomObjectsApi()
self._k8s_StorageV1_api = client.StorageV1Api()
self._k8s_AppsV1_api = client.AppsV1Api()
try:
# XXX mystery hack -- I need to do an API call from
# this context, or subsequent API usage from handle_command
# fails with SSLError('bad handshake'). Suspect some kind of
# thread context setup in SSL lib?
self._k8s_CoreV1_api.list_namespaced_pod(self._rook_env.namespace)
except ApiException:
# Ignore here to make self.available() fail with a proper error message
pass
assert isinstance(self.storage_class, str)
self._rook_cluster = RookCluster(
self._k8s_CoreV1_api,
self._k8s_BatchV1_api,
self._k8s_CustomObjects_api,
self._k8s_StorageV1_api,
self._k8s_AppsV1_api,
self._rook_env,
self.storage_class)
self._initialized.set()
self.config_notify()
while not self._shutdown.is_set():
self._apply_drivegroups(list(self._drive_group_map.values()))
self._shutdown.wait(self.drive_group_interval)
@handle_orch_error
def get_inventory(self, host_filter: Optional[orchestrator.InventoryFilter] = None, refresh: bool = False) -> List[orchestrator.InventoryHost]:
host_list = None
if host_filter and host_filter.hosts:
# Explicit host list
host_list = host_filter.hosts
elif host_filter and host_filter.labels:
# TODO: query k8s API to resolve to host list, and pass
# it into RookCluster.get_discovered_devices
raise NotImplementedError()
discovered_devs = self.rook_cluster.get_discovered_devices(host_list)
result = []
for host_name, host_devs in discovered_devs.items():
devs = []
for d in host_devs:
devs.append(d)
result.append(orchestrator.InventoryHost(host_name, inventory.Devices(devs)))
return result
@handle_orch_error
def get_hosts(self):
# type: () -> List[orchestrator.HostSpec]
return self.rook_cluster.get_hosts()
@handle_orch_error
def describe_service(self,
service_type: Optional[str] = None,
service_name: Optional[str] = None,
refresh: bool = False) -> List[orchestrator.ServiceDescription]:
now = datetime_now()
# CephCluster
cl = self.rook_cluster.rook_api_get(
"cephclusters/{0}".format(self.rook_cluster.rook_env.cluster_name))
self.log.debug('CephCluster %s' % cl)
image_name = cl['spec'].get('cephVersion', {}).get('image', None)
num_nodes = len(self.rook_cluster.get_node_names())
spec = {}
if service_type == 'mon' or service_type is None:
spec['mon'] = orchestrator.ServiceDescription(
spec=ServiceSpec(
'mon',
placement=PlacementSpec(
count=cl['spec'].get('mon', {}).get('count', 1),
),
),
size=cl['spec'].get('mon', {}).get('count', 1),
container_image_name=image_name,
last_refresh=now,
)
if service_type == 'mgr' or service_type is None:
spec['mgr'] = orchestrator.ServiceDescription(
spec=ServiceSpec(
'mgr',
placement=PlacementSpec.from_string('count:1'),
),
size=1,
container_image_name=image_name,
last_refresh=now,
)
if (
service_type == 'crash' or service_type is None
and not cl['spec'].get('crashCollector', {}).get('disable', False)
):
spec['crash'] = orchestrator.ServiceDescription(
spec=ServiceSpec(
'crash',
placement=PlacementSpec.from_string('*'),
),
size=num_nodes,
container_image_name=image_name,
last_refresh=now,
)
if service_type == 'mds' or service_type is None:
# CephFilesystems
all_fs = self.rook_cluster.get_resource("cephfilesystems")
for fs in all_fs:
svc = 'mds.' + fs['metadata']['name']
if svc in spec:
continue
# FIXME: we are conflating active (+ standby) with count
active = fs['spec'].get('metadataServer', {}).get('activeCount', 1)
total_mds = active
if fs['spec'].get('metadataServer', {}).get('activeStandby', False):
total_mds = active * 2
spec[svc] = orchestrator.ServiceDescription(
spec=ServiceSpec(
service_type='mds',
service_id=fs['metadata']['name'],
placement=PlacementSpec(count=active),
),
size=total_mds,
container_image_name=image_name,
last_refresh=now,
)
if service_type == 'rgw' or service_type is None:
# CephObjectstores
all_zones = self.rook_cluster.get_resource("cephobjectstores")
for zone in all_zones:
svc = 'rgw.' + zone['metadata']['name']
if svc in spec:
continue
active = zone['spec']['gateway']['instances'];
if 'securePort' in zone['spec']['gateway']:
ssl = True
port = zone['spec']['gateway']['securePort']
else:
ssl = False
port = zone['spec']['gateway']['port'] or 80
rgw_zone = zone['spec'].get('zone', {}).get('name') or None
spec[svc] = orchestrator.ServiceDescription(
spec=RGWSpec(
service_id=zone['metadata']['name'],
rgw_zone=rgw_zone,
ssl=ssl,
rgw_frontend_port=port,
placement=PlacementSpec(count=active),
),
size=active,
container_image_name=image_name,
last_refresh=now,
)
if service_type == 'nfs' or service_type is None:
# CephNFSes
all_nfs = self.rook_cluster.get_resource("cephnfses")
nfs_pods = self.rook_cluster.describe_pods('nfs', None, None)
for nfs in all_nfs:
# Starting with V.17.2.0, the 'rados' spec part in 'cephnfs' resources does not contain the 'pool' item
if 'pool' in nfs['spec']['rados']:
if nfs['spec']['rados']['pool'] != NFS_POOL_NAME:
continue
nfs_name = nfs['metadata']['name']
svc = 'nfs.' + nfs_name
if svc in spec:
continue
active = nfs['spec'].get('server', {}).get('active')
creation_timestamp = datetime.datetime.strptime(nfs['metadata']['creationTimestamp'], '%Y-%m-%dT%H:%M:%SZ')
spec[svc] = orchestrator.ServiceDescription(
spec=NFSServiceSpec(
service_id=nfs_name,
placement=PlacementSpec(count=active),
),
size=active,
last_refresh=now,
running=len([1 for pod in nfs_pods if pod['labels']['ceph_nfs'] == nfs_name]),
created=creation_timestamp.astimezone(tz=datetime.timezone.utc)
)
if service_type == 'osd' or service_type is None:
# OSDs
# FIXME: map running OSDs back to their respective services...
# the catch-all unmanaged
all_osds = self.rook_cluster.get_osds()
svc = 'osd'
spec[svc] = orchestrator.ServiceDescription(
spec=DriveGroupSpec(
unmanaged=True,
service_type='osd',
),
size=len(all_osds),
last_refresh=now,
running=sum(osd.status.phase == 'Running' for osd in all_osds)
)
# drivegroups
for name, dg in self._drive_group_map.items():
spec[f'osd.{name}'] = orchestrator.ServiceDescription(
spec=dg,
last_refresh=now,
size=0,
running=0,
)
if service_type == 'rbd-mirror' or service_type is None:
# rbd-mirrors
all_mirrors = self.rook_cluster.get_resource("cephrbdmirrors")
for mirror in all_mirrors:
logging.warn(mirror)
mirror_name = mirror['metadata']['name']
svc = 'rbd-mirror.' + mirror_name
if svc in spec:
continue
spec[svc] = orchestrator.ServiceDescription(
spec=ServiceSpec(
service_id=mirror_name,
service_type="rbd-mirror",
placement=PlacementSpec(count=1),
),
size=1,
last_refresh=now,
)
for dd in self._list_daemons():
if dd.service_name() not in spec:
continue
service = spec[dd.service_name()]
service.running += 1
if not service.container_image_id:
service.container_image_id = dd.container_image_id
if not service.container_image_name:
service.container_image_name = dd.container_image_name
if service.last_refresh is None or not dd.last_refresh or dd.last_refresh < service.last_refresh:
service.last_refresh = dd.last_refresh
if service.created is None or dd.created is None or dd.created < service.created:
service.created = dd.created
return [v for k, v in spec.items()]
@handle_orch_error
def list_daemons(self,
service_name: Optional[str] = None,
daemon_type: Optional[str] = None,
daemon_id: Optional[str] = None,
host: Optional[str] = None,
refresh: bool = False) -> List[orchestrator.DaemonDescription]:
return self._list_daemons(service_name=service_name,
daemon_type=daemon_type,
daemon_id=daemon_id,
host=host,
refresh=refresh)
def _list_daemons(self,
service_name: Optional[str] = None,
daemon_type: Optional[str] = None,
daemon_id: Optional[str] = None,
host: Optional[str] = None,
refresh: bool = False) -> List[orchestrator.DaemonDescription]:
pods = self.rook_cluster.describe_pods(daemon_type, daemon_id, host)
self.log.debug('pods %s' % pods)
result = []
for p in pods:
sd = orchestrator.DaemonDescription()
sd.hostname = p['hostname']
sd.daemon_type = p['labels']['app'].replace('rook-ceph-', '')
status = {
'Pending': orchestrator.DaemonDescriptionStatus.starting,
'Running': orchestrator.DaemonDescriptionStatus.running,
'Succeeded': orchestrator.DaemonDescriptionStatus.stopped,
'Failed': orchestrator.DaemonDescriptionStatus.error,
'Unknown': orchestrator.DaemonDescriptionStatus.unknown,
}[p['phase']]
sd.status = status
if 'ceph_daemon_id' in p['labels']:
sd.daemon_id = p['labels']['ceph_daemon_id']
elif 'ceph-osd-id' in p['labels']:
sd.daemon_id = p['labels']['ceph-osd-id']
else:
# Unknown type -- skip it
continue
if service_name is not None and service_name != sd.service_name():
continue
sd.container_image_name = p['container_image_name']
sd.container_image_id = p['container_image_id']
sd.created = p['created']
sd.last_configured = p['created']
sd.last_deployed = p['created']
sd.started = p['started']
sd.last_refresh = p['refreshed']
result.append(sd)
return result
def _get_pool_params(self) -> Tuple[int, str]:
num_replicas = self.get_ceph_option('osd_pool_default_size')
assert type(num_replicas) is int
leaf_type_id = self.get_ceph_option('osd_crush_chooseleaf_type')
assert type(leaf_type_id) is int
crush = self.get('osd_map_crush')
leaf_type = 'host'
for t in crush['types']:
if t['type_id'] == leaf_type_id:
leaf_type = t['name']
break
return num_replicas, leaf_type
@handle_orch_error
def remove_service(self, service_name: str, force: bool = False) -> str:
if service_name == 'rbd-mirror':
return self.rook_cluster.rm_service('cephrbdmirrors', 'default-rbd-mirror')
service_type, service_id = service_name.split('.', 1)
if service_type == 'mds':
return self.rook_cluster.rm_service('cephfilesystems', service_id)
elif service_type == 'rgw':
return self.rook_cluster.rm_service('cephobjectstores', service_id)
elif service_type == 'nfs':
ret, out, err = self.mon_command({
'prefix': 'auth ls'
})
matches = re.findall(rf'client\.nfs-ganesha\.{service_id}\..*', out)
for match in matches:
self.check_mon_command({
'prefix': 'auth rm',
'entity': match
})
return self.rook_cluster.rm_service('cephnfses', service_id)
elif service_type == 'rbd-mirror':
return self.rook_cluster.rm_service('cephrbdmirrors', service_id)
elif service_type == 'osd':
if service_id in self._drive_group_map:
del self._drive_group_map[service_id]
self._save_drive_groups()
return f'Removed {service_name}'
elif service_type == 'ingress':
self.log.info("{0} service '{1}' does not exist".format('ingress', service_id))
return 'The Rook orchestrator does not currently support ingress'
else:
raise orchestrator.OrchestratorError(f'Service type {service_type} not supported')
def zap_device(self, host: str, path: str) -> OrchResult[str]:
try:
self.rook_cluster.create_zap_job(host, path)
except Exception as e:
logging.error(e)
return OrchResult(None, Exception("Unable to zap device: " + str(e.with_traceback(None))))
return OrchResult(f'{path} on {host} zapped')
@handle_orch_error
def apply_mon(self, spec):
# type: (ServiceSpec) -> str
if spec.placement.hosts or spec.placement.label:
raise RuntimeError("Host list or label is not supported by rook.")
return self.rook_cluster.update_mon_count(spec.placement.count)
def apply_rbd_mirror(self, spec: ServiceSpec) -> OrchResult[str]:
try:
self.rook_cluster.rbd_mirror(spec)
return OrchResult("Success")
except Exception as e:
return OrchResult(None, e)
@handle_orch_error
def apply_mds(self, spec):
# type: (ServiceSpec) -> str
num_replicas, leaf_type = self._get_pool_params()
return self.rook_cluster.apply_filesystem(spec, num_replicas, leaf_type)
@handle_orch_error
def apply_rgw(self, spec):
# type: (RGWSpec) -> str
num_replicas, leaf_type = self._get_pool_params()
return self.rook_cluster.apply_objectstore(spec, num_replicas, leaf_type)
@handle_orch_error
def apply_nfs(self, spec):
# type: (NFSServiceSpec) -> str
try:
return self.rook_cluster.apply_nfsgw(spec, self)
except Exception as e:
logging.error(e)
return "Unable to create NFS daemon, check logs for more traceback\n" + str(e.with_traceback(None))
@handle_orch_error
def remove_daemons(self, names: List[str]) -> List[str]:
return self.rook_cluster.remove_pods(names)
def apply_drivegroups(self, specs: List[DriveGroupSpec]) -> OrchResult[List[str]]:
for drive_group in specs:
self._drive_group_map[str(drive_group.service_id)] = drive_group
self._save_drive_groups()
return OrchResult(self._apply_drivegroups(specs))
def _apply_drivegroups(self, ls: List[DriveGroupSpec]) -> List[str]:
all_hosts = raise_if_exception(self.get_hosts())
result_list: List[str] = []
for drive_group in ls:
matching_hosts = drive_group.placement.filter_matching_hosts(
lambda label=None, as_hostspec=None: all_hosts
)
if not self.rook_cluster.node_exists(matching_hosts[0]):
raise RuntimeError("Node '{0}' is not in the Kubernetes "
"cluster".format(matching_hosts))
# Validate whether cluster CRD can accept individual OSD
# creations (i.e. not useAllDevices)
if not self.rook_cluster.can_create_osd():
raise RuntimeError("Rook cluster configuration does not "
"support OSD creation.")
result_list.append(self.rook_cluster.add_osds(drive_group, matching_hosts))
return result_list
def _load_drive_groups(self) -> None:
stored_drive_group = self.get_store("drive_group_map")
self._drive_group_map: Dict[str, DriveGroupSpec] = {}
if stored_drive_group:
for name, dg in json.loads(stored_drive_group).items():
try:
self._drive_group_map[name] = DriveGroupSpec.from_json(dg)
except ValueError as e:
self.log.error(f'Failed to load drive group {name} ({dg}): {e}')
def _save_drive_groups(self) -> None:
json_drive_group_map = {
name: dg.to_json() for name, dg in self._drive_group_map.items()
}
self.set_store("drive_group_map", json.dumps(json_drive_group_map))
def remove_osds(self,
osd_ids: List[str],
replace: bool = False,
force: bool = False,
zap: bool = False,
no_destroy: bool = False) -> OrchResult[str]:
assert self._rook_cluster is not None
if zap:
raise RuntimeError("Rook does not support zapping devices during OSD removal.")
res = self._rook_cluster.remove_osds(osd_ids, replace, force, self.mon_command)
return OrchResult(res)
def add_host_label(self, host: str, label: str) -> OrchResult[str]:
return self.rook_cluster.add_host_label(host, label)
def remove_host_label(self, host: str, label: str, force: bool = False) -> OrchResult[str]:
return self.rook_cluster.remove_host_label(host, label)
"""
@handle_orch_error
def create_osds(self, drive_group):
# type: (DriveGroupSpec) -> str
# Creates OSDs from a drive group specification.
# $: ceph orch osd create -i <dg.file>
# The drivegroup file must only contain one spec at a time.
#
targets = [] # type: List[str]
if drive_group.data_devices and drive_group.data_devices.paths:
targets += [d.path for d in drive_group.data_devices.paths]
if drive_group.data_directories:
targets += drive_group.data_directories
all_hosts = raise_if_exception(self.get_hosts())
matching_hosts = drive_group.placement.filter_matching_hosts(lambda label=None, as_hostspec=None: all_hosts)
assert len(matching_hosts) == 1
if not self.rook_cluster.node_exists(matching_hosts[0]):
raise RuntimeError("Node '{0}' is not in the Kubernetes "
"cluster".format(matching_hosts))
# Validate whether cluster CRD can accept individual OSD
# creations (i.e. not useAllDevices)
if not self.rook_cluster.can_create_osd():
raise RuntimeError("Rook cluster configuration does not "
"support OSD creation.")
return self.rook_cluster.add_osds(drive_group, matching_hosts)
# TODO: this was the code to update the progress reference:
@handle_orch_error
def has_osds(matching_hosts: List[str]) -> bool:
# Find OSD pods on this host
pod_osd_ids = set()
pods = self.k8s.list_namespaced_pod(self._rook_env.namespace,
label_selector="rook_cluster={},app=rook-ceph-osd".format(self._rook_env.cluster_name),
field_selector="spec.nodeName={0}".format(
matching_hosts[0]
)).items
for p in pods:
pod_osd_ids.add(int(p.metadata.labels['ceph-osd-id']))
self.log.debug('pod_osd_ids={0}'.format(pod_osd_ids))
found = []
osdmap = self.get("osd_map")
for osd in osdmap['osds']:
osd_id = osd['osd']
if osd_id not in pod_osd_ids:
continue
metadata = self.get_metadata('osd', "%s" % osd_id)
if metadata and metadata['devices'] in targets:
found.append(osd_id)
else:
self.log.info("ignoring osd {0} {1}".format(
osd_id, metadata['devices'] if metadata else 'DNE'
))
return found is not None
"""
@handle_orch_error
def blink_device_light(self, ident_fault: str, on: bool, locs: List[orchestrator.DeviceLightLoc]) -> List[str]:
return self.rook_cluster.blink_light(ident_fault, on, locs)
| 29,141 | 39.587744 | 147 |
py
|
null |
ceph-main/src/pybind/mgr/rook/rook_cluster.py
|
"""
This module wrap's Rook + Kubernetes APIs to expose the calls
needed to implement an orchestrator module. While the orchestrator
module exposes an async API, this module simply exposes blocking API
call methods.
This module is runnable outside of ceph-mgr, useful for testing.
"""
import datetime
import threading
import logging
from contextlib import contextmanager
from time import sleep
import re
from orchestrator import OrchResult
import jsonpatch
from urllib.parse import urljoin
import json
# Optional kubernetes imports to enable MgrModule.can_run
# to behave cleanly.
from urllib3.exceptions import ProtocolError
from ceph.deployment.inventory import Device
from ceph.deployment.drive_group import DriveGroupSpec
from ceph.deployment.service_spec import ServiceSpec, NFSServiceSpec, RGWSpec, PlacementSpec, HostPlacementSpec
from ceph.utils import datetime_now
from ceph.deployment.drive_selection.matchers import SizeMatcher
from nfs.cluster import create_ganesha_pool
from nfs.module import Module
from nfs.export import NFSRados
from mgr_module import NFS_POOL_NAME
from mgr_util import merge_dicts
from typing import Optional, Tuple, TypeVar, List, Callable, Any, cast, Generic, \
Iterable, Dict, Iterator, Type
try:
from kubernetes import client, watch
from kubernetes.client.rest import ApiException
except ImportError:
class ApiException(Exception): # type: ignore
status = 0
from .rook_client.ceph import cephfilesystem as cfs
from .rook_client.ceph import cephnfs as cnfs
from .rook_client.ceph import cephobjectstore as cos
from .rook_client.ceph import cephcluster as ccl
from .rook_client.ceph import cephrbdmirror as crbdm
from .rook_client._helper import CrdClass
import orchestrator
try:
from rook.module import RookEnv, RookOrchestrator
except ImportError:
pass # just used for type checking.
T = TypeVar('T')
FuncT = TypeVar('FuncT', bound=Callable)
CrdClassT = TypeVar('CrdClassT', bound=CrdClass)
log = logging.getLogger(__name__)
def __urllib3_supports_read_chunked() -> bool:
# There is a bug in CentOS 7 as it ships a urllib3 which is lower
# than required by kubernetes-client
try:
from urllib3.response import HTTPResponse
return hasattr(HTTPResponse, 'read_chunked')
except ImportError:
return False
_urllib3_supports_read_chunked = __urllib3_supports_read_chunked()
class ApplyException(orchestrator.OrchestratorError):
"""
For failures to update the Rook CRDs, usually indicating
some kind of interference between our attempted update
and other conflicting activity.
"""
def threaded(f: Callable[..., None]) -> Callable[..., threading.Thread]:
def wrapper(*args: Any, **kwargs: Any) -> threading.Thread:
t = threading.Thread(target=f, args=args, kwargs=kwargs)
t.start()
return t
return cast(Callable[..., threading.Thread], wrapper)
class DefaultFetcher():
def __init__(self, storage_class: str, coreV1_api: 'client.CoreV1Api'):
self.storage_class = storage_class
self.coreV1_api = coreV1_api
def fetch(self) -> None:
self.inventory: KubernetesResource[client.V1PersistentVolumeList] = KubernetesResource(self.coreV1_api.list_persistent_volume)
self.pvs_in_sc = [i for i in self.inventory.items if i.spec.storage_class_name == self.storage_class]
def convert_size(self, size_str: str) -> int:
units = ("", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "", "K", "M", "G", "T", "P", "E")
coeff_and_unit = re.search('(\d+)(\D+)', size_str)
assert coeff_and_unit is not None
coeff = int(coeff_and_unit[1])
unit = coeff_and_unit[2]
try:
factor = units.index(unit) % 7
except ValueError:
log.error("PV size format invalid")
raise
size = coeff * (2 ** (10 * factor))
return size
def devices(self) -> Dict[str, List[Device]]:
nodename_to_devices: Dict[str, List[Device]] = {}
for i in self.pvs_in_sc:
node, device = self.device(i)
if node not in nodename_to_devices:
nodename_to_devices[node] = []
nodename_to_devices[node].append(device)
return nodename_to_devices
def device(self, i: 'client.V1PersistentVolume') -> Tuple[str, Device]:
node = 'N/A'
if i.spec.node_affinity:
terms = i.spec.node_affinity.required.node_selector_terms
if len(terms) == 1 and len(terms[0].match_expressions) == 1 and terms[0].match_expressions[0].key == 'kubernetes.io/hostname' and len(terms[0].match_expressions[0].values) == 1:
node = terms[0].match_expressions[0].values[0]
size = self.convert_size(i.spec.capacity['storage'])
path = i.spec.host_path.path if i.spec.host_path else i.spec.local.path if i.spec.local else ('/dev/' + i.metadata.annotations['storage.openshift.com/device-name']) if i.metadata.annotations and 'storage.openshift.com/device-name' in i.metadata.annotations else ''
state = i.spec.volume_mode == 'Block' and i.status.phase == 'Available'
pv_name = i.metadata.name
device = Device(
path = path,
sys_api = dict(
size = size,
node = node,
pv_name = pv_name
),
available = state,
)
return (node, device)
class LSOFetcher(DefaultFetcher):
def __init__(self, storage_class: 'str', coreV1_api: 'client.CoreV1Api', customObjects_api: 'client.CustomObjectsApi', nodenames: 'Optional[List[str]]' = None):
super().__init__(storage_class, coreV1_api)
self.customObjects_api = customObjects_api
self.nodenames = nodenames
def fetch(self) -> None:
super().fetch()
self.discovery: KubernetesCustomResource = KubernetesCustomResource(self.customObjects_api.list_cluster_custom_object,
group="local.storage.openshift.io",
version="v1alpha1",
plural="localvolumediscoveryresults")
def predicate(self, item: 'client.V1ConfigMapList') -> bool:
if self.nodenames is not None:
return item['spec']['nodeName'] in self.nodenames
else:
return True
def devices(self) -> Dict[str, List[Device]]:
try:
lso_discovery_results = [i for i in self.discovery.items if self.predicate(i)]
except ApiException as dummy_e:
log.error("Failed to fetch device metadata")
raise
self.lso_devices = {}
for i in lso_discovery_results:
drives = i['status']['discoveredDevices']
for drive in drives:
self.lso_devices[drive['deviceID'].split('/')[-1]] = drive
nodename_to_devices: Dict[str, List[Device]] = {}
for i in self.pvs_in_sc:
node, device = (None, None)
if (not i.metadata.annotations) or ('storage.openshift.com/device-id' not in i.metadata.annotations) or (i.metadata.annotations['storage.openshift.com/device-id'] not in self.lso_devices):
node, device = super().device(i)
else:
node, device = self.device(i)
if node not in nodename_to_devices:
nodename_to_devices[node] = []
nodename_to_devices[node].append(device)
return nodename_to_devices
def device(self, i: Any) -> Tuple[str, Device]:
node = i.metadata.labels['kubernetes.io/hostname']
device_discovery = self.lso_devices[i.metadata.annotations['storage.openshift.com/device-id']]
pv_name = i.metadata.name
vendor: str = device_discovery['model'].split()[0] if len(device_discovery['model'].split()) >= 1 else ''
model: str = ' '.join(device_discovery['model'].split()[1:]) if len(device_discovery['model'].split()) > 1 else ''
device = Device(
path = device_discovery['path'],
sys_api = dict(
size = device_discovery['size'],
rotational = '1' if device_discovery['property']=='Rotational' else '0',
node = node,
pv_name = pv_name,
model = model,
vendor = vendor
),
available = device_discovery['status']['state']=='Available',
device_id = device_discovery['deviceID'].split('/')[-1],
lsm_data = dict(
serialNum = device_discovery['serial']
)
)
return (node, device)
class PDFetcher(DefaultFetcher):
""" Physical Devices Fetcher"""
def __init__(self, coreV1_api: 'client.CoreV1Api'):
self.coreV1_api = coreV1_api
def fetch(self) -> None:
""" Collect the devices information from k8s configmaps"""
self.dev_cms: KubernetesResource = KubernetesResource(self.coreV1_api.list_namespaced_config_map,
namespace='rook-ceph',
label_selector='app=rook-discover')
def devices(self) -> Dict[str, List[Device]]:
""" Return the list of devices found"""
node_devices: Dict[str, List[Device]] = {}
for i in self.dev_cms.items:
devices_list: List[Device] = []
for d in json.loads(i.data['devices']):
devices_list.append(self.device(d)[1])
node_devices[i.metadata.labels['rook.io/node']] = devices_list
return node_devices
def device(self, devData: Dict[str,str]) -> Tuple[str, Device]:
""" Build an orchestrator device """
if 'cephVolumeData' in devData and devData['cephVolumeData']:
return "", Device.from_json(json.loads(devData['cephVolumeData']))
else:
return "", Device(
path='/dev/' + devData['name'],
sys_api=dict(
rotational='1' if devData['rotational'] else '0',
size=devData['size']
),
available=False,
rejected_reasons=['device data coming from ceph-volume not provided'],
)
class KubernetesResource(Generic[T]):
def __init__(self, api_func: Callable, **kwargs: Any) -> None:
"""
Generic kubernetes Resource parent class
The api fetch and watch methods should be common across resource types,
Exceptions in the runner thread are propagated to the caller.
:param api_func: kubernetes client api function that is passed to the watcher
:param filter_func: signature: ``(Item) -> bool``.
"""
self.kwargs = kwargs
self.api_func = api_func
# ``_items`` is accessed by different threads. I assume assignment is atomic.
self._items: Dict[str, T] = dict()
self.thread = None # type: Optional[threading.Thread]
self.exception: Optional[Exception] = None
if not _urllib3_supports_read_chunked:
logging.info('urllib3 is too old. Fallback to full fetches')
def _fetch(self) -> str:
""" Execute the requested api method as a one-off fetch"""
response = self.api_func(**self.kwargs)
metadata = response.metadata
self._items = {item.metadata.name: item for item in response.items}
log.info('Full fetch of {}. result: {}'.format(self.api_func, len(self._items)))
return metadata.resource_version
@property
def items(self) -> Iterable[T]:
"""
Returns the items of the request.
Creates the watcher as a side effect.
:return:
"""
if self.exception:
e = self.exception
self.exception = None
raise e # Propagate the exception to the user.
if not self.thread or not self.thread.is_alive():
resource_version = self._fetch()
if _urllib3_supports_read_chunked:
# Start a thread which will use the kubernetes watch client against a resource
log.debug("Attaching resource watcher for k8s {}".format(self.api_func))
self.thread = self._watch(resource_version)
return self._items.values()
def get_item_name(self, item: Any) -> Any:
try:
return item.metadata.name
except AttributeError:
raise AttributeError(
"{} doesn't contain a metadata.name. Unable to track changes".format(
self.api_func))
@threaded
def _watch(self, res_ver: Optional[str]) -> None:
""" worker thread that runs the kubernetes watch """
self.exception = None
w = watch.Watch()
try:
# execute generator to continually watch resource for changes
for event in w.stream(self.api_func, resource_version=res_ver, watch=True,
**self.kwargs):
self.health = ''
item = event['object']
name = self.get_item_name(item)
log.info('{} event: {}'.format(event['type'], name))
if event['type'] in ('ADDED', 'MODIFIED'):
self._items = merge_dicts(self._items, {name: item})
elif event['type'] == 'DELETED':
self._items = {k:v for k,v in self._items.items() if k != name}
elif event['type'] == 'BOOKMARK':
pass
elif event['type'] == 'ERROR':
raise ApiException(str(event))
else:
raise KeyError('Unknown watch event {}'.format(event['type']))
except ProtocolError as e:
if 'Connection broken' in str(e):
log.info('Connection reset.')
return
raise
except ApiException as e:
log.exception('K8s API failed. {}'.format(self.api_func))
self.exception = e
raise
except Exception as e:
log.exception("Watcher failed. ({})".format(self.api_func))
self.exception = e
raise
class KubernetesCustomResource(KubernetesResource):
def _fetch(self) -> str:
response = self.api_func(**self.kwargs)
metadata = response['metadata']
self._items = {item['metadata']['name']: item for item in response['items']}
log.info('Full fetch of {}. result: {}'.format(self.api_func, len(self._items)))
return metadata['resourceVersion']
def get_item_name(self, item: Any) -> Any:
try:
return item['metadata']['name']
except AttributeError:
raise AttributeError(
"{} doesn't contain a metadata.name. Unable to track changes".format(
self.api_func))
class DefaultCreator():
def __init__(self, inventory: 'Dict[str, List[Device]]', coreV1_api: 'client.CoreV1Api', storage_class: 'str'):
self.coreV1_api = coreV1_api
self.storage_class = storage_class
self.inventory = inventory
def device_to_device_set(self, drive_group: DriveGroupSpec, d: Device) -> ccl.StorageClassDeviceSetsItem:
device_set = ccl.StorageClassDeviceSetsItem(
name=d.sys_api['pv_name'],
volumeClaimTemplates= ccl.VolumeClaimTemplatesList(),
count=1,
encrypted=drive_group.encrypted,
portable=False
)
device_set.volumeClaimTemplates.append(
ccl.VolumeClaimTemplatesItem(
metadata=ccl.Metadata(
name="data"
),
spec=ccl.Spec(
storageClassName=self.storage_class,
volumeMode="Block",
accessModes=ccl.CrdObjectList(["ReadWriteOnce"]),
resources={
"requests":{
"storage": 1
}
},
volumeName=d.sys_api['pv_name']
)
)
)
return device_set
def filter_devices(self, rook_pods: KubernetesResource, drive_group: DriveGroupSpec, matching_hosts: List[str]) -> List[Device]:
device_list = []
assert drive_group.data_devices is not None
sizematcher: Optional[SizeMatcher] = None
if drive_group.data_devices.size:
sizematcher = SizeMatcher('size', drive_group.data_devices.size)
limit = getattr(drive_group.data_devices, 'limit', None)
count = 0
all = getattr(drive_group.data_devices, 'all', None)
paths = [device.path for device in drive_group.data_devices.paths]
osd_list = []
for pod in rook_pods.items:
if (
hasattr(pod, 'metadata')
and hasattr(pod.metadata, 'labels')
and 'osd' in pod.metadata.labels
and 'ceph.rook.io/DeviceSet' in pod.metadata.labels
):
osd_list.append(pod.metadata.labels['ceph.rook.io/DeviceSet'])
for _, node in self.inventory.items():
for device in node:
if device.sys_api['pv_name'] in osd_list:
count += 1
for _, node in self.inventory.items():
for device in node:
if not limit or (count < limit):
if device.available:
if (
all
or (
device.sys_api['node'] in matching_hosts
and ((sizematcher != None) or sizematcher.compare(device))
and (
not drive_group.data_devices.paths
or (device.path in paths)
)
)
):
device_list.append(device)
count += 1
return device_list
def add_osds(self, rook_pods: KubernetesResource, drive_group: DriveGroupSpec, matching_hosts: List[str]) -> Any:
to_create = self.filter_devices(rook_pods, drive_group,matching_hosts)
assert drive_group.data_devices is not None
def _add_osds(current_cluster, new_cluster):
# type: (ccl.CephCluster, ccl.CephCluster) -> ccl.CephCluster
if not hasattr(new_cluster.spec, 'storage') or not new_cluster.spec.storage:
new_cluster.spec.storage = ccl.Storage()
if not hasattr(new_cluster.spec.storage, 'storageClassDeviceSets') or not new_cluster.spec.storage.storageClassDeviceSets:
new_cluster.spec.storage.storageClassDeviceSets = ccl.StorageClassDeviceSetsList()
existing_scds = [
scds.name for scds in new_cluster.spec.storage.storageClassDeviceSets
]
for device in to_create:
new_scds = self.device_to_device_set(drive_group, device)
if new_scds.name not in existing_scds:
new_cluster.spec.storage.storageClassDeviceSets.append(new_scds)
return new_cluster
return _add_osds
class LSOCreator(DefaultCreator):
def filter_devices(self, rook_pods: KubernetesResource, drive_group: DriveGroupSpec, matching_hosts: List[str]) -> List[Device]:
device_list = []
assert drive_group.data_devices is not None
sizematcher = None
if drive_group.data_devices.size:
sizematcher = SizeMatcher('size', drive_group.data_devices.size)
limit = getattr(drive_group.data_devices, 'limit', None)
all = getattr(drive_group.data_devices, 'all', None)
paths = [device.path for device in drive_group.data_devices.paths]
vendor = getattr(drive_group.data_devices, 'vendor', None)
model = getattr(drive_group.data_devices, 'model', None)
count = 0
osd_list = []
for pod in rook_pods.items:
if (
hasattr(pod, 'metadata')
and hasattr(pod.metadata, 'labels')
and 'osd' in pod.metadata.labels
and 'ceph.rook.io/DeviceSet' in pod.metadata.labels
):
osd_list.append(pod.metadata.labels['ceph.rook.io/DeviceSet'])
for _, node in self.inventory.items():
for device in node:
if device.sys_api['pv_name'] in osd_list:
count += 1
for _, node in self.inventory.items():
for device in node:
if not limit or (count < limit):
if device.available:
if (
all
or (
device.sys_api['node'] in matching_hosts
and ((sizematcher != None) or sizematcher.compare(device))
and (
not drive_group.data_devices.paths
or device.path in paths
)
and (
not vendor
or device.sys_api['vendor'] == vendor
)
and (
not model
or device.sys_api['model'].startsWith(model)
)
)
):
device_list.append(device)
count += 1
return device_list
class DefaultRemover():
def __init__(
self,
coreV1_api: 'client.CoreV1Api',
batchV1_api: 'client.BatchV1Api',
appsV1_api: 'client.AppsV1Api',
osd_ids: List[str],
replace_flag: bool,
force_flag: bool,
mon_command: Callable,
patch: Callable,
rook_env: 'RookEnv',
inventory: Dict[str, List[Device]]
):
self.batchV1_api = batchV1_api
self.appsV1_api = appsV1_api
self.coreV1_api = coreV1_api
self.osd_ids = osd_ids
self.replace_flag = replace_flag
self.force_flag = force_flag
self.mon_command = mon_command
self.patch = patch
self.rook_env = rook_env
self.inventory = inventory
self.osd_pods: KubernetesResource = KubernetesResource(self.coreV1_api.list_namespaced_pod,
namespace=self.rook_env.namespace,
label_selector='app=rook-ceph-osd')
self.jobs: KubernetesResource = KubernetesResource(self.batchV1_api.list_namespaced_job,
namespace=self.rook_env.namespace,
label_selector='app=rook-ceph-osd-prepare')
self.pvcs: KubernetesResource = KubernetesResource(self.coreV1_api.list_namespaced_persistent_volume_claim,
namespace=self.rook_env.namespace)
def remove_device_sets(self) -> str:
self.to_remove: Dict[str, int] = {}
self.pvc_to_remove: List[str] = []
for pod in self.osd_pods.items:
if (
hasattr(pod, 'metadata')
and hasattr(pod.metadata, 'labels')
and 'osd' in pod.metadata.labels
and pod.metadata.labels['osd'] in self.osd_ids
):
if pod.metadata.labels['ceph.rook.io/DeviceSet'] in self.to_remove:
self.to_remove[pod.metadata.labels['ceph.rook.io/DeviceSet']] = self.to_remove[pod.metadata.labels['ceph.rook.io/DeviceSet']] + 1
else:
self.to_remove[pod.metadata.labels['ceph.rook.io/DeviceSet']] = 1
self.pvc_to_remove.append(pod.metadata.labels['ceph.rook.io/pvc'])
def _remove_osds(current_cluster, new_cluster):
# type: (ccl.CephCluster, ccl.CephCluster) -> ccl.CephCluster
assert new_cluster.spec.storage is not None and new_cluster.spec.storage.storageClassDeviceSets is not None
for _set in new_cluster.spec.storage.storageClassDeviceSets:
if _set.name in self.to_remove:
if _set.count == self.to_remove[_set.name]:
new_cluster.spec.storage.storageClassDeviceSets.remove(_set)
else:
_set.count = _set.count - self.to_remove[_set.name]
return new_cluster
return self.patch(ccl.CephCluster, 'cephclusters', self.rook_env.cluster_name, _remove_osds)
def check_force(self) -> None:
if not self.force_flag:
safe_args = {'prefix': 'osd safe-to-destroy',
'ids': [str(x) for x in self.osd_ids]}
ret, out, err = self.mon_command(safe_args)
if ret != 0:
raise RuntimeError(err)
def set_osds_down(self) -> None:
down_flag_args = {
'prefix': 'osd down',
'ids': [str(x) for x in self.osd_ids]
}
ret, out, err = self.mon_command(down_flag_args)
if ret != 0:
raise RuntimeError(err)
def scale_deployments(self) -> None:
for osd_id in self.osd_ids:
self.appsV1_api.patch_namespaced_deployment_scale(namespace=self.rook_env.namespace,
name='rook-ceph-osd-{}'.format(osd_id),
body=client.V1Scale(spec=client.V1ScaleSpec(replicas=0)))
def set_osds_out(self) -> None:
out_flag_args = {
'prefix': 'osd out',
'ids': [str(x) for x in self.osd_ids]
}
ret, out, err = self.mon_command(out_flag_args)
if ret != 0:
raise RuntimeError(err)
def delete_deployments(self) -> None:
for osd_id in self.osd_ids:
self.appsV1_api.delete_namespaced_deployment(namespace=self.rook_env.namespace,
name='rook-ceph-osd-{}'.format(osd_id),
propagation_policy='Foreground')
def clean_up_prepare_jobs_and_pvc(self) -> None:
for job in self.jobs.items:
if job.metadata.labels['ceph.rook.io/pvc'] in self.pvc_to_remove:
self.batchV1_api.delete_namespaced_job(name=job.metadata.name, namespace=self.rook_env.namespace,
propagation_policy='Foreground')
self.coreV1_api.delete_namespaced_persistent_volume_claim(name=job.metadata.labels['ceph.rook.io/pvc'],
namespace=self.rook_env.namespace,
propagation_policy='Foreground')
def purge_osds(self) -> None:
for id in self.osd_ids:
purge_args = {
'prefix': 'osd purge-actual',
'id': int(id),
'yes_i_really_mean_it': True
}
ret, out, err = self.mon_command(purge_args)
if ret != 0:
raise RuntimeError(err)
def destroy_osds(self) -> None:
for id in self.osd_ids:
destroy_args = {
'prefix': 'osd destroy-actual',
'id': int(id),
'yes_i_really_mean_it': True
}
ret, out, err = self.mon_command(destroy_args)
if ret != 0:
raise RuntimeError(err)
def remove(self) -> str:
try:
self.check_force()
except Exception as e:
log.exception("Error checking if OSDs are safe to destroy")
return f"OSDs not safe to destroy or unable to check if they are safe to destroy: {e}"
try:
remove_result = self.remove_device_sets()
except Exception as e:
log.exception("Error patching ceph cluster CRD")
return f"Not possible to modify Ceph cluster CRD: {e}"
try:
self.scale_deployments()
self.delete_deployments()
self.clean_up_prepare_jobs_and_pvc()
except Exception as e:
log.exception("Ceph cluster CRD patched, but error cleaning environment")
return f"Error cleaning environment after removing OSDs from Ceph cluster CRD: {e}"
try:
self.set_osds_down()
self.set_osds_out()
if self.replace_flag:
self.destroy_osds()
else:
self.purge_osds()
except Exception as e:
log.exception("OSDs removed from environment, but not able to remove OSDs from Ceph cluster")
return f"Error removing OSDs from Ceph cluster: {e}"
return remove_result
class RookCluster(object):
# import of client.CoreV1Api must be optional at import time.
# Instead allow mgr/rook to be imported anyway.
def __init__(
self,
coreV1_api: 'client.CoreV1Api',
batchV1_api: 'client.BatchV1Api',
customObjects_api: 'client.CustomObjectsApi',
storageV1_api: 'client.StorageV1Api',
appsV1_api: 'client.AppsV1Api',
rook_env: 'RookEnv',
storage_class: 'str'
):
self.rook_env = rook_env # type: RookEnv
self.coreV1_api = coreV1_api # client.CoreV1Api
self.batchV1_api = batchV1_api
self.customObjects_api = customObjects_api
self.storageV1_api = storageV1_api # client.StorageV1Api
self.appsV1_api = appsV1_api # client.AppsV1Api
self.storage_class = storage_class # type: str
# TODO: replace direct k8s calls with Rook API calls
self.storage_classes : KubernetesResource = KubernetesResource(self.storageV1_api.list_storage_class)
self.rook_pods: KubernetesResource[client.V1Pod] = KubernetesResource(self.coreV1_api.list_namespaced_pod,
namespace=self.rook_env.namespace,
label_selector="rook_cluster={0}".format(
self.rook_env.namespace))
self.nodes: KubernetesResource[client.V1Node] = KubernetesResource(self.coreV1_api.list_node)
def rook_url(self, path: str) -> str:
prefix = "/apis/ceph.rook.io/%s/namespaces/%s/" % (
self.rook_env.crd_version, self.rook_env.namespace)
return urljoin(prefix, path)
def rook_api_call(self, verb: str, path: str, **kwargs: Any) -> Any:
full_path = self.rook_url(path)
log.debug("[%s] %s" % (verb, full_path))
return self.coreV1_api.api_client.call_api(
full_path,
verb,
auth_settings=['BearerToken'],
response_type="object",
_return_http_data_only=True,
_preload_content=True,
**kwargs)
def rook_api_get(self, path: str, **kwargs: Any) -> Any:
return self.rook_api_call("GET", path, **kwargs)
def rook_api_delete(self, path: str) -> Any:
return self.rook_api_call("DELETE", path)
def rook_api_patch(self, path: str, **kwargs: Any) -> Any:
return self.rook_api_call("PATCH", path,
header_params={"Content-Type": "application/json-patch+json"},
**kwargs)
def rook_api_post(self, path: str, **kwargs: Any) -> Any:
return self.rook_api_call("POST", path, **kwargs)
def get_storage_class(self) -> 'client.V1StorageClass':
matching_sc = [i for i in self.storage_classes.items if self.storage_class == i.metadata.name]
if len(matching_sc) == 0:
log.error(f"No storage class exists matching configured Rook orchestrator storage class which currently is <{self.storage_class}>. This storage class can be set in ceph config (mgr/rook/storage_class)")
raise Exception('No storage class exists matching name provided in ceph config at mgr/rook/storage_class')
return matching_sc[0]
def get_discovered_devices(self, nodenames: Optional[List[str]] = None) -> Dict[str, List[Device]]:
self.fetcher: Optional[DefaultFetcher] = None
op_settings = self.coreV1_api.read_namespaced_config_map(name="rook-ceph-operator-config", namespace='rook-ceph').data
if op_settings.get('ROOK_ENABLE_DISCOVERY_DAEMON', 'false').lower() == 'true':
self.fetcher = PDFetcher(self.coreV1_api)
else:
storage_class = self.get_storage_class()
if storage_class.metadata.labels and ('local.storage.openshift.io/owner-name' in storage_class.metadata.labels):
self.fetcher = LSOFetcher(self.storage_class, self.coreV1_api, self.customObjects_api, nodenames)
else:
self.fetcher = DefaultFetcher(self.storage_class, self.coreV1_api)
self.fetcher.fetch()
return self.fetcher.devices()
def get_osds(self) -> List:
osd_pods: KubernetesResource = KubernetesResource(self.coreV1_api.list_namespaced_pod,
namespace=self.rook_env.namespace,
label_selector='app=rook-ceph-osd')
return list(osd_pods.items)
def get_nfs_conf_url(self, nfs_cluster: str, instance: str) -> Optional[str]:
#
# Fetch cephnfs object for "nfs_cluster" and then return a rados://
# URL for the instance within that cluster. If the fetch fails, just
# return None.
#
try:
ceph_nfs = self.rook_api_get("cephnfses/{0}".format(nfs_cluster))
except ApiException as e:
log.info("Unable to fetch cephnfs object: {}".format(e.status))
return None
pool = ceph_nfs['spec']['rados']['pool']
namespace = ceph_nfs['spec']['rados'].get('namespace', None)
if namespace == None:
url = "rados://{0}/conf-{1}.{2}".format(pool, nfs_cluster, instance)
else:
url = "rados://{0}/{1}/conf-{2}.{3}".format(pool, namespace, nfs_cluster, instance)
return url
def describe_pods(self,
service_type: Optional[str],
service_id: Optional[str],
nodename: Optional[str]) -> List[Dict[str, Any]]:
"""
Go query the k8s API about deployment, containers related to this
filesystem
Example Rook Pod labels for a mgr daemon:
Labels: app=rook-ceph-mgr
pod-template-hash=2171958073
rook_cluster=rook
And MDS containers additionally have `rook_filesystem` label
Label filter is rook_cluster=<cluster namespace>
rook_file_system=<self.fs_name>
"""
def predicate(item):
# type: (client.V1Pod) -> bool
metadata = item.metadata
if service_type is not None:
if metadata.labels['app'] != "rook-ceph-{0}".format(service_type):
return False
if service_id is not None:
try:
k, v = {
"mds": ("rook_file_system", service_id),
"osd": ("ceph-osd-id", service_id),
"mon": ("mon", service_id),
"mgr": ("mgr", service_id),
"nfs": ("nfs", service_id),
"rgw": ("ceph_rgw", service_id),
}[service_type]
except KeyError:
raise orchestrator.OrchestratorValidationError(
'{} not supported'.format(service_type))
if metadata.labels[k] != v:
return False
if nodename is not None:
if item.spec.node_name != nodename:
return False
return True
refreshed = datetime_now()
pods = [i for i in self.rook_pods.items if predicate(i)]
pods_summary = []
prefix = 'sha256:'
for p in pods:
d = p.to_dict()
image_name = None
for c in d['spec']['containers']:
# look at the first listed container in the pod...
image_name = c['image']
break
ls = d['status'].get('container_statuses')
if not ls:
# ignore pods with no containers
continue
image_id = ls[0]['image_id']
image_id = image_id.split(prefix)[1] if prefix in image_id else image_id
s = {
"name": d['metadata']['name'],
"hostname": d['spec']['node_name'],
"labels": d['metadata']['labels'],
'phase': d['status']['phase'],
'container_image_name': image_name,
'container_image_id': image_id,
'refreshed': refreshed,
# these may get set below...
'started': None,
'created': None,
}
# note: we want UTC
if d['metadata'].get('creation_timestamp', None):
s['created'] = d['metadata']['creation_timestamp'].astimezone(
tz=datetime.timezone.utc)
if d['status'].get('start_time', None):
s['started'] = d['status']['start_time'].astimezone(
tz=datetime.timezone.utc)
pods_summary.append(s)
return pods_summary
def remove_pods(self, names: List[str]) -> List[str]:
pods = [i for i in self.rook_pods.items]
for p in pods:
d = p.to_dict()
daemon_type = d['metadata']['labels']['app'].replace('rook-ceph-','')
daemon_id = d['metadata']['labels']['ceph_daemon_id']
name = daemon_type + '.' + daemon_id
if name in names:
self.coreV1_api.delete_namespaced_pod(
d['metadata']['name'],
self.rook_env.namespace,
body=client.V1DeleteOptions()
)
return [f'Removed Pod {n}' for n in names]
def get_node_names(self) -> List[str]:
return [i.metadata.name for i in self.nodes.items]
@contextmanager
def ignore_409(self, what: str) -> Iterator[None]:
try:
yield
except ApiException as e:
if e.status == 409:
# Idempotent, succeed.
log.info("{} already exists".format(what))
else:
raise
def apply_filesystem(self, spec: ServiceSpec, num_replicas: int,
leaf_type: str) -> str:
# TODO use spec.placement
# TODO warn if spec.extended has entries we don't kow how
# to action.
all_hosts = self.get_hosts()
def _update_fs(new: cfs.CephFilesystem) -> cfs.CephFilesystem:
new.spec.metadataServer.activeCount = spec.placement.count or 1
new.spec.metadataServer.placement = cfs.Placement(
nodeAffinity=cfs.NodeAffinity(
requiredDuringSchedulingIgnoredDuringExecution=cfs.RequiredDuringSchedulingIgnoredDuringExecution(
nodeSelectorTerms=cfs.NodeSelectorTermsList(
[placement_spec_to_node_selector(spec.placement, all_hosts)]
)
)
)
)
return new
def _create_fs() -> cfs.CephFilesystem:
fs = cfs.CephFilesystem(
apiVersion=self.rook_env.api_name,
metadata=dict(
name=spec.service_id,
namespace=self.rook_env.namespace,
),
spec=cfs.Spec(
dataPools=cfs.DataPoolsList(
{
cfs.DataPoolsItem(
failureDomain=leaf_type,
replicated=cfs.Replicated(
size=num_replicas
)
)
}
),
metadataPool=cfs.MetadataPool(
failureDomain=leaf_type,
replicated=cfs.Replicated(
size=num_replicas
)
),
metadataServer=cfs.MetadataServer(
activeCount=spec.placement.count or 1,
activeStandby=True,
placement=
cfs.Placement(
nodeAffinity=cfs.NodeAffinity(
requiredDuringSchedulingIgnoredDuringExecution=cfs.RequiredDuringSchedulingIgnoredDuringExecution(
nodeSelectorTerms=cfs.NodeSelectorTermsList(
[placement_spec_to_node_selector(spec.placement, all_hosts)]
)
)
)
)
)
)
)
return fs
assert spec.service_id is not None
return self._create_or_patch(
cfs.CephFilesystem, 'cephfilesystems', spec.service_id,
_update_fs, _create_fs)
def get_matching_node(self, host: str) -> Any:
matching_node = None
for node in self.nodes.items:
if node.metadata.labels['kubernetes.io/hostname'] == host:
matching_node = node
return matching_node
def add_host_label(self, host: str, label: str) -> OrchResult[str]:
matching_node = self.get_matching_node(host)
if matching_node == None:
return OrchResult(None, RuntimeError(f"Cannot add {label} label to {host}: host not found in cluster"))
matching_node.metadata.labels['ceph-label/'+ label] = ""
self.coreV1_api.patch_node(host, matching_node)
return OrchResult(f'Added {label} label to {host}')
def remove_host_label(self, host: str, label: str) -> OrchResult[str]:
matching_node = self.get_matching_node(host)
if matching_node == None:
return OrchResult(None, RuntimeError(f"Cannot remove {label} label from {host}: host not found in cluster"))
matching_node.metadata.labels.pop('ceph-label/' + label, None)
self.coreV1_api.patch_node(host, matching_node)
return OrchResult(f'Removed {label} label from {host}')
def apply_objectstore(self, spec: RGWSpec, num_replicas: int, leaf_type: str) -> str:
assert spec.service_id is not None
name = spec.service_id
if '.' in spec.service_id:
# rook does not like . in the name. this is could
# there because it is a legacy rgw spec that was named
# like $realm.$zone, except that I doubt there were any
# users of this code. Instead, focus on future users and
# translate . to - (fingers crossed!) instead.
name = spec.service_id.replace('.', '-')
all_hosts = self.get_hosts()
def _create_zone() -> cos.CephObjectStore:
port = None
secure_port = None
if spec.ssl:
secure_port = spec.get_port()
else:
port = spec.get_port()
object_store = cos.CephObjectStore(
apiVersion=self.rook_env.api_name,
metadata=dict(
name=name,
namespace=self.rook_env.namespace
),
spec=cos.Spec(
gateway=cos.Gateway(
port=port,
securePort=secure_port,
instances=spec.placement.count or 1,
placement=cos.Placement(
cos.NodeAffinity(
requiredDuringSchedulingIgnoredDuringExecution=cos.RequiredDuringSchedulingIgnoredDuringExecution(
nodeSelectorTerms=cos.NodeSelectorTermsList(
[
placement_spec_to_node_selector(spec.placement, all_hosts)
]
)
)
)
)
),
dataPool=cos.DataPool(
failureDomain=leaf_type,
replicated=cos.Replicated(
size=num_replicas
)
),
metadataPool=cos.MetadataPool(
failureDomain=leaf_type,
replicated=cos.Replicated(
size=num_replicas
)
)
)
)
if spec.rgw_zone:
object_store.spec.zone=cos.Zone(
name=spec.rgw_zone
)
return object_store
def _update_zone(new: cos.CephObjectStore) -> cos.CephObjectStore:
if new.spec.gateway:
new.spec.gateway.instances = spec.placement.count or 1
else:
new.spec.gateway=cos.Gateway(
instances=spec.placement.count or 1
)
return new
return self._create_or_patch(
cos.CephObjectStore, 'cephobjectstores', name,
_update_zone, _create_zone)
def apply_nfsgw(self, spec: NFSServiceSpec, mgr: 'RookOrchestrator') -> str:
# TODO use spec.placement
# TODO warn if spec.extended has entries we don't kow how
# to action.
# TODO Number of pods should be based on the list of hosts in the
# PlacementSpec.
assert spec.service_id, "service id in NFS service spec cannot be an empty string or None " # for mypy typing
service_id = spec.service_id
mgr_module = cast(Module, mgr)
count = spec.placement.count or 1
def _update_nfs(new: cnfs.CephNFS) -> cnfs.CephNFS:
new.spec.server.active = count
return new
def _create_nfs() -> cnfs.CephNFS:
rook_nfsgw = cnfs.CephNFS(
apiVersion=self.rook_env.api_name,
metadata=dict(
name=spec.service_id,
namespace=self.rook_env.namespace,
),
spec=cnfs.Spec(
rados=cnfs.Rados(
namespace=service_id,
pool=NFS_POOL_NAME,
),
server=cnfs.Server(
active=count
)
)
)
return rook_nfsgw
create_ganesha_pool(mgr)
NFSRados(mgr_module.rados, service_id).write_obj('', f'conf-nfs.{spec.service_id}')
return self._create_or_patch(cnfs.CephNFS, 'cephnfses', service_id,
_update_nfs, _create_nfs)
def rm_service(self, rooktype: str, service_id: str) -> str:
self.customObjects_api.delete_namespaced_custom_object(group="ceph.rook.io", version="v1",
namespace=self.rook_env.namespace,
plural=rooktype, name=service_id)
objpath = "{0}/{1}".format(rooktype, service_id)
return f'Removed {objpath}'
def get_resource(self, resource_type: str) -> Iterable:
custom_objects: KubernetesCustomResource = KubernetesCustomResource(self.customObjects_api.list_namespaced_custom_object,
group="ceph.rook.io",
version="v1",
namespace=self.rook_env.namespace,
plural=resource_type)
return custom_objects.items
def can_create_osd(self) -> bool:
current_cluster = self.rook_api_get(
"cephclusters/{0}".format(self.rook_env.cluster_name))
use_all_nodes = current_cluster['spec'].get('useAllNodes', False)
# If useAllNodes is set, then Rook will not be paying attention
# to anything we put in 'nodes', so can't do OSD creation.
return not use_all_nodes
def node_exists(self, node_name: str) -> bool:
return node_name in self.get_node_names()
def update_mon_count(self, newcount: Optional[int]) -> str:
def _update_mon_count(current, new):
# type: (ccl.CephCluster, ccl.CephCluster) -> ccl.CephCluster
if newcount is None:
raise orchestrator.OrchestratorError('unable to set mon count to None')
if not new.spec.mon:
raise orchestrator.OrchestratorError("mon attribute not specified in new spec")
new.spec.mon.count = newcount
return new
return self._patch(ccl.CephCluster, 'cephclusters', self.rook_env.cluster_name, _update_mon_count)
def add_osds(self, drive_group, matching_hosts):
# type: (DriveGroupSpec, List[str]) -> str
assert drive_group.objectstore in ("bluestore", "filestore")
assert drive_group.service_id
storage_class = self.get_storage_class()
inventory = self.get_discovered_devices()
creator: Optional[DefaultCreator] = None
if (
storage_class.metadata.labels
and 'local.storage.openshift.io/owner-name' in storage_class.metadata.labels
):
creator = LSOCreator(inventory, self.coreV1_api, self.storage_class)
else:
creator = DefaultCreator(inventory, self.coreV1_api, self.storage_class)
return self._patch(
ccl.CephCluster,
'cephclusters',
self.rook_env.cluster_name,
creator.add_osds(self.rook_pods, drive_group, matching_hosts)
)
def remove_osds(self, osd_ids: List[str], replace: bool, force: bool, mon_command: Callable) -> str:
inventory = self.get_discovered_devices()
self.remover = DefaultRemover(
self.coreV1_api,
self.batchV1_api,
self.appsV1_api,
osd_ids,
replace,
force,
mon_command,
self._patch,
self.rook_env,
inventory
)
return self.remover.remove()
def get_hosts(self) -> List[orchestrator.HostSpec]:
ret = []
for node in self.nodes.items:
spec = orchestrator.HostSpec(
node.metadata.name,
addr='/'.join([addr.address for addr in node.status.addresses]),
labels=[label.split('/')[1] for label in node.metadata.labels if label.startswith('ceph-label')],
)
ret.append(spec)
return ret
def create_zap_job(self, host: str, path: str) -> None:
body = client.V1Job(
api_version="batch/v1",
metadata=client.V1ObjectMeta(
name="rook-ceph-device-zap",
namespace=self.rook_env.namespace
),
spec=client.V1JobSpec(
template=client.V1PodTemplateSpec(
spec=client.V1PodSpec(
containers=[
client.V1Container(
name="device-zap",
image="rook/ceph:master",
command=["bash"],
args=["-c", f"ceph-volume raw list {path} && dd if=/dev/zero of=\"{path}\" bs=1M count=1 oflag=direct,dsync || ceph-volume lvm zap --destroy {path}"],
env=[
client.V1EnvVar(
name="ROOK_CEPH_USERNAME",
value_from=client.V1EnvVarSource(
secret_key_ref=client.V1SecretKeySelector(
key="ceph-username",
name="rook-ceph-mon"
)
)
),
client.V1EnvVar(
name="ROOK_CEPH_SECRET",
value_from=client.V1EnvVarSource(
secret_key_ref=client.V1SecretKeySelector(
key="ceph-secret",
name="rook-ceph-mon"
)
)
)
],
security_context=client.V1SecurityContext(
run_as_user=0,
privileged=True
),
volume_mounts=[
client.V1VolumeMount(
mount_path="/etc/ceph",
name="ceph-conf-emptydir"
),
client.V1VolumeMount(
mount_path="/etc/rook",
name="rook-config"
),
client.V1VolumeMount(
mount_path="/dev",
name="devices"
)
]
)
],
volumes=[
client.V1Volume(
name="ceph-conf-emptydir",
empty_dir=client.V1EmptyDirVolumeSource()
),
client.V1Volume(
name="rook-config",
empty_dir=client.V1EmptyDirVolumeSource()
),
client.V1Volume(
name="devices",
host_path=client.V1HostPathVolumeSource(
path="/dev"
)
),
],
node_selector={
"kubernetes.io/hostname": host
},
restart_policy="Never"
)
)
)
)
self.batchV1_api.create_namespaced_job(self.rook_env.namespace, body)
def rbd_mirror(self, spec: ServiceSpec) -> None:
service_id = spec.service_id or "default-rbd-mirror"
all_hosts = self.get_hosts()
def _create_rbd_mirror() -> crbdm.CephRBDMirror:
return crbdm.CephRBDMirror(
apiVersion=self.rook_env.api_name,
metadata=dict(
name=service_id,
namespace=self.rook_env.namespace,
),
spec=crbdm.Spec(
count=spec.placement.count or 1,
placement=crbdm.Placement(
nodeAffinity=crbdm.NodeAffinity(
requiredDuringSchedulingIgnoredDuringExecution=crbdm.RequiredDuringSchedulingIgnoredDuringExecution(
nodeSelectorTerms=crbdm.NodeSelectorTermsList(
[
placement_spec_to_node_selector(spec.placement, all_hosts)
]
)
)
)
)
)
)
def _update_rbd_mirror(new: crbdm.CephRBDMirror) -> crbdm.CephRBDMirror:
new.spec.count = spec.placement.count or 1
new.spec.placement = crbdm.Placement(
nodeAffinity=crbdm.NodeAffinity(
requiredDuringSchedulingIgnoredDuringExecution=crbdm.RequiredDuringSchedulingIgnoredDuringExecution(
nodeSelectorTerms=crbdm.NodeSelectorTermsList(
[
placement_spec_to_node_selector(spec.placement, all_hosts)
]
)
)
)
)
return new
self._create_or_patch(crbdm.CephRBDMirror, 'cephrbdmirrors', service_id, _update_rbd_mirror, _create_rbd_mirror)
def _patch(self, crd: Type, crd_name: str, cr_name: str, func: Callable[[CrdClassT, CrdClassT], CrdClassT]) -> str:
current_json = self.rook_api_get(
"{}/{}".format(crd_name, cr_name)
)
current = crd.from_json(current_json)
new = crd.from_json(current_json) # no deepcopy.
new = func(current, new)
patch = list(jsonpatch.make_patch(current_json, new.to_json()))
log.info('patch for {}/{}: \n{}'.format(crd_name, cr_name, patch))
if len(patch) == 0:
return "No change"
try:
self.rook_api_patch(
"{}/{}".format(crd_name, cr_name),
body=patch)
except ApiException as e:
log.exception("API exception: {0}".format(e))
raise ApplyException(
"Failed to update {}/{}: {}".format(crd_name, cr_name, e))
return "Success"
def _create_or_patch(self,
crd: Type,
crd_name: str,
cr_name: str,
update_func: Callable[[CrdClassT], CrdClassT],
create_func: Callable[[], CrdClassT]) -> str:
try:
current_json = self.rook_api_get(
"{}/{}".format(crd_name, cr_name)
)
except ApiException as e:
if e.status == 404:
current_json = None
else:
raise
if current_json:
new = crd.from_json(current_json) # no deepcopy.
new = update_func(new)
patch = list(jsonpatch.make_patch(current_json, new.to_json()))
log.info('patch for {}/{}: \n{}'.format(crd_name, cr_name, patch))
if len(patch) == 0:
return "No change"
try:
self.rook_api_patch(
"{}/{}".format(crd_name, cr_name),
body=patch)
except ApiException as e:
log.exception("API exception: {0}".format(e))
raise ApplyException(
"Failed to update {}/{}: {}".format(crd_name, cr_name, e))
return "Updated"
else:
new = create_func()
with self.ignore_409("{} {} already exists".format(crd_name,
cr_name)):
self.rook_api_post("{}/".format(crd_name),
body=new.to_json())
return "Created"
def get_ceph_image(self) -> str:
try:
api_response = self.coreV1_api.list_namespaced_pod(self.rook_env.namespace,
label_selector="app=rook-ceph-mon",
timeout_seconds=10)
if api_response.items:
return api_response.items[-1].spec.containers[0].image
else:
raise orchestrator.OrchestratorError(
"Error getting ceph image. Cluster without monitors")
except ApiException as e:
raise orchestrator.OrchestratorError("Error getting ceph image: {}".format(e))
def _execute_blight_job(self, ident_fault: str, on: bool, loc: orchestrator.DeviceLightLoc) -> str:
operation_id = str(hash(loc))
message = ""
# job definition
job_metadata = client.V1ObjectMeta(name=operation_id,
namespace= self.rook_env.namespace,
labels={"ident": operation_id})
pod_metadata = client.V1ObjectMeta(labels={"ident": operation_id})
pod_container = client.V1Container(name="ceph-lsmcli-command",
security_context=client.V1SecurityContext(privileged=True),
image=self.get_ceph_image(),
command=["lsmcli",],
args=['local-disk-%s-led-%s' % (ident_fault,'on' if on else 'off'),
'--path', loc.path or loc.dev,],
volume_mounts=[client.V1VolumeMount(name="devices", mount_path="/dev"),
client.V1VolumeMount(name="run-udev", mount_path="/run/udev")])
pod_spec = client.V1PodSpec(containers=[pod_container],
active_deadline_seconds=30, # Max time to terminate pod
restart_policy="Never",
node_selector= {"kubernetes.io/hostname": loc.host},
volumes=[client.V1Volume(name="devices",
host_path=client.V1HostPathVolumeSource(path="/dev")),
client.V1Volume(name="run-udev",
host_path=client.V1HostPathVolumeSource(path="/run/udev"))])
pod_template = client.V1PodTemplateSpec(metadata=pod_metadata,
spec=pod_spec)
job_spec = client.V1JobSpec(active_deadline_seconds=60, # Max time to terminate job
ttl_seconds_after_finished=10, # Alfa. Lifetime after finishing (either Complete or Failed)
backoff_limit=0,
template=pod_template)
job = client.V1Job(api_version="batch/v1",
kind="Job",
metadata=job_metadata,
spec=job_spec)
# delete previous job if it exists
try:
try:
api_response = self.batchV1_api.delete_namespaced_job(operation_id,
self.rook_env.namespace,
propagation_policy="Background")
except ApiException as e:
if e.status != 404: # No problem if the job does not exist
raise
# wait until the job is not present
deleted = False
retries = 0
while not deleted and retries < 10:
api_response = self.batchV1_api.list_namespaced_job(self.rook_env.namespace,
label_selector="ident=%s" % operation_id,
timeout_seconds=10)
deleted = not api_response.items
if retries > 5:
sleep(0.1)
retries += 1
if retries == 10 and not deleted:
raise orchestrator.OrchestratorError(
"Light <{}> in <{}:{}> cannot be executed. Cannot delete previous job <{}>".format(
on, loc.host, loc.path or loc.dev, operation_id))
# create the job
api_response = self.batchV1_api.create_namespaced_job(self.rook_env.namespace, job)
# get the result
finished = False
while not finished:
api_response = self.batchV1_api.read_namespaced_job(operation_id,
self.rook_env.namespace)
finished = api_response.status.succeeded or api_response.status.failed
if finished:
message = api_response.status.conditions[-1].message
# get the result of the lsmcli command
api_response=self.coreV1_api.list_namespaced_pod(self.rook_env.namespace,
label_selector="ident=%s" % operation_id,
timeout_seconds=10)
if api_response.items:
pod_name = api_response.items[-1].metadata.name
message = self.coreV1_api.read_namespaced_pod_log(pod_name,
self.rook_env.namespace)
except ApiException as e:
log.exception('K8s API failed. {}'.format(e))
raise
# Finally, delete the job.
# The job uses <ttl_seconds_after_finished>. This makes that the TTL controller delete automatically the job.
# This feature is in Alpha state, so extra explicit delete operations trying to delete the Job has been used strategically
try:
api_response = self.batchV1_api.delete_namespaced_job(operation_id,
self.rook_env.namespace,
propagation_policy="Background")
except ApiException as e:
if e.status != 404: # No problem if the job does not exist
raise
return message
def blink_light(self, ident_fault, on, locs):
# type: (str, bool, List[orchestrator.DeviceLightLoc]) -> List[str]
return [self._execute_blight_job(ident_fault, on, loc) for loc in locs]
def placement_spec_to_node_selector(spec: PlacementSpec, all_hosts: List) -> ccl.NodeSelectorTermsItem:
all_hostnames = [hs.hostname for hs in all_hosts]
res = ccl.NodeSelectorTermsItem(matchExpressions=ccl.MatchExpressionsList())
if spec.host_pattern and spec.host_pattern != "*":
raise RuntimeError("The Rook orchestrator only supports a host_pattern of * for placements")
if spec.label:
res.matchExpressions.append(
ccl.MatchExpressionsItem(
key="ceph-label/" + spec.label,
operator="Exists"
)
)
if spec.hosts:
host_list = [h.hostname for h in spec.hosts if h.hostname in all_hostnames]
res.matchExpressions.append(
ccl.MatchExpressionsItem(
key="kubernetes.io/hostname",
operator="In",
values=ccl.CrdObjectList(host_list)
)
)
if spec.host_pattern == "*" or (not spec.label and not spec.hosts and not spec.host_pattern):
res.matchExpressions.append(
ccl.MatchExpressionsItem(
key="kubernetes.io/hostname",
operator="Exists",
)
)
return res
def node_selector_to_placement_spec(node_selector: ccl.NodeSelectorTermsItem) -> PlacementSpec:
res = PlacementSpec()
for expression in node_selector.matchExpressions:
if expression.key.startswith("ceph-label/"):
res.label = expression.key.split('/')[1]
elif expression.key == "kubernetes.io/hostname":
if expression.operator == "Exists":
res.host_pattern = "*"
elif expression.operator == "In":
res.hosts = [HostPlacementSpec(hostname=value, network='', name='')for value in expression.values]
return res
| 69,901 | 43.637292 | 272 |
py
|
null |
ceph-main/src/pybind/mgr/rook/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/pybind/mgr/rook/tests/test_placement.py
|
# flake8: noqa
from rook.rook_cluster import placement_spec_to_node_selector, node_selector_to_placement_spec
from rook.rook_client.ceph.cephcluster import MatchExpressionsItem, MatchExpressionsList, NodeSelectorTermsItem
import pytest
from orchestrator import HostSpec
from ceph.deployment.service_spec import PlacementSpec
@pytest.mark.parametrize("hosts",
[ # noqa: E128
[
HostSpec(
hostname="node1",
labels=["label1"]
),
HostSpec(
hostname="node2",
labels=[]
),
HostSpec(
hostname="node3",
labels=["label1"]
)
]
])
@pytest.mark.parametrize("expected_placement_spec, expected_node_selector",
[ # noqa: E128
(
PlacementSpec(
label="label1"
),
NodeSelectorTermsItem(
matchExpressions=MatchExpressionsList(
[
MatchExpressionsItem(
key="ceph-label/label1",
operator="Exists"
)
]
)
)
),
(
PlacementSpec(
label="label1",
host_pattern="*"
),
NodeSelectorTermsItem(
matchExpressions=MatchExpressionsList(
[
MatchExpressionsItem(
key="ceph-label/label1",
operator="Exists"
),
MatchExpressionsItem(
key="kubernetes.io/hostname",
operator="Exists",
)
]
)
)
),
(
PlacementSpec(
host_pattern="*"
),
NodeSelectorTermsItem(
matchExpressions=MatchExpressionsList(
[
MatchExpressionsItem(
key="kubernetes.io/hostname",
operator="Exists",
)
]
)
)
),
(
PlacementSpec(
hosts=["node1", "node2", "node3"]
),
NodeSelectorTermsItem(
matchExpressions=MatchExpressionsList(
[
MatchExpressionsItem(
key="kubernetes.io/hostname",
operator="In",
values=["node1", "node2", "node3"]
)
]
)
)
),
])
def test_placement_spec_translate(hosts, expected_placement_spec, expected_node_selector):
node_selector = placement_spec_to_node_selector(expected_placement_spec, hosts)
assert [(getattr(expression, 'key', None), getattr(expression, 'operator', None), getattr(expression, 'values', None)) for expression in node_selector.matchExpressions] == [(getattr(expression, 'key', None), getattr(expression, 'operator', None), getattr(expression, 'values', None)) for expression in expected_node_selector.matchExpressions]
placement_spec = node_selector_to_placement_spec(expected_node_selector)
assert placement_spec == expected_placement_spec
assert (getattr(placement_spec, 'label', None), getattr(placement_spec, 'hosts', None), getattr(placement_spec, 'host_pattern', None)) == (getattr(expected_placement_spec, 'label', None), getattr(expected_placement_spec, 'hosts', None), getattr(expected_placement_spec, 'host_pattern', None))
| 3,800 | 36.633663 | 346 |
py
|
null |
ceph-main/src/pybind/mgr/selftest/__init__.py
|
# flake8: noqa
from .module import Module
| 42 | 13.333333 | 26 |
py
|
null |
ceph-main/src/pybind/mgr/selftest/module.py
|
from mgr_module import MgrModule, CommandResult, HandleCommandResult, CLICommand, Option
import enum
import json
import random
import sys
import threading
from code import InteractiveInterpreter
from contextlib import redirect_stderr, redirect_stdout
from io import StringIO
from typing import Any, Dict, List, Optional, Tuple
# These workloads are things that can be requested to run inside the
# serve() function
class Workload(enum.Enum):
COMMAND_SPAM = 'command_spam'
THROW_EXCEPTION = 'throw_exception'
SHUTDOWN = 'shutdown'
class Module(MgrModule):
"""
This module is for testing the ceph-mgr python interface from within
a running ceph-mgr daemon.
It implements a sychronous self-test command for calling the functions
in the MgrModule interface one by one, and a background "workload"
command for causing the module to perform some thrashing-type
activities in its serve() thread.
"""
# The test code in qa/ relies on these options existing -- they
# are of course not really used for anything in the module
MODULE_OPTIONS = [
Option(name='testkey'),
Option(name='testlkey'),
Option(name='testnewline'),
Option(name='roption1'),
Option(name='roption2',
type='str',
default='xyz'),
Option(name='rwoption1'),
Option(name='rwoption2',
type='int'),
Option(name='rwoption3',
type='float'),
Option(name='rwoption4',
type='str'),
Option(name='rwoption5',
type='bool'),
Option(name='rwoption6',
type='bool',
default=True),
Option(name='rwoption7',
type='int',
min=1,
max=42),
]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(Module, self).__init__(*args, **kwargs)
self._event = threading.Event()
self._workload: Optional[Workload] = None
self._health: Dict[str, Dict[str, Any]] = {}
self._repl = InteractiveInterpreter(dict(mgr=self))
@CLICommand('mgr self-test python-version', perm='r')
def python_version(self) -> Tuple[int, str, str]:
'''
Query the version of the embedded Python runtime
'''
major = sys.version_info.major
minor = sys.version_info.minor
micro = sys.version_info.micro
return 0, f'{major}.{minor}.{micro}', ''
@CLICommand('mgr self-test run')
def run(self) -> Tuple[int, str, str]:
'''
Run mgr python interface tests
'''
self._self_test()
return 0, '', 'Self-test succeeded'
@CLICommand('mgr self-test background start')
def backgroun_start(self, workload: Workload) -> Tuple[int, str, str]:
'''
Activate a background workload (one of command_spam, throw_exception)
'''
self._workload = workload
self._event.set()
return 0, '', 'Running `{0}` in background'.format(self._workload)
@CLICommand('mgr self-test background stop')
def background_stop(self) -> Tuple[int, str, str]:
'''
Stop background workload if any is running
'''
if self._workload:
was_running = self._workload
self._workload = None
self._event.set()
return 0, '', 'Stopping background workload `{0}`'.format(
was_running)
else:
return 0, '', 'No background workload was running'
@CLICommand('mgr self-test config get')
def config_get(self, key: str) -> Tuple[int, str, str]:
'''
Peek at a configuration value
'''
return 0, str(self.get_module_option(key)), ''
@CLICommand('mgr self-test config get_localized')
def config_get_localized(self, key: str) -> Tuple[int, str, str]:
'''
Peek at a configuration value (localized variant)
'''
return 0, str(self.get_localized_module_option(key)), ''
@CLICommand('mgr self-test remote')
def test_remote(self) -> Tuple[int, str, str]:
'''
Test inter-module calls
'''
self._test_remote_calls()
return 0, '', 'Successfully called'
@CLICommand('mgr self-test module')
def module(self, module: str) -> Tuple[int, str, str]:
'''
Run another module's self_test() method
'''
try:
r = self.remote(module, "self_test")
except RuntimeError as e:
return -1, '', "Test failed: {0}".format(e)
else:
return 0, str(r), "Self-test OK"
@CLICommand('mgr self-test cluster-log')
def do_cluster_log(self,
channel: str,
priority: str,
message: str) -> Tuple[int, str, str]:
'''
Create an audit log record.
'''
priority_map = {
'info': self.ClusterLogPrio.INFO,
'security': self.ClusterLogPrio.SEC,
'warning': self.ClusterLogPrio.WARN,
'error': self.ClusterLogPrio.ERROR
}
self.cluster_log(channel,
priority_map[priority],
message)
return 0, '', 'Successfully called'
@CLICommand('mgr self-test health set')
def health_set(self, checks: str) -> Tuple[int, str, str]:
'''
Set a health check from a JSON-formatted description.
'''
try:
health_check = json.loads(checks)
except Exception as e:
return -1, "", "Failed to decode JSON input: {}".format(e)
try:
for check, info in health_check.items():
self._health[check] = {
"severity": str(info["severity"]),
"summary": str(info["summary"]),
"count": 123,
"detail": [str(m) for m in info["detail"]]
}
except Exception as e:
return -1, "", "Invalid health check format: {}".format(e)
self.set_health_checks(self._health)
return 0, "", ""
@CLICommand('mgr self-test health clear')
def health_clear(self, checks: Optional[List[str]] = None) -> Tuple[int, str, str]:
'''
Clear health checks by name. If no names provided, clear all.
'''
if checks is not None:
for check in checks:
if check in self._health:
del self._health[check]
else:
self._health = dict()
self.set_health_checks(self._health)
return 0, "", ""
@CLICommand('mgr self-test insights_set_now_offset')
def insights_set_now_offset(self, hours: int) -> Tuple[int, str, str]:
'''
Set the now time for the insights module.
'''
self.remote("insights", "testing_set_now_time_offset", hours)
return 0, "", ""
def _self_test(self) -> None:
self.log.info("Running self-test procedure...")
self._self_test_osdmap()
self._self_test_getters()
self._self_test_config()
self._self_test_store()
self._self_test_misc()
self._self_test_perf_counters()
def _self_test_getters(self) -> None:
self.version
self.get_context()
self.get_mgr_id()
# In this function, we will assume that the system is in a steady
# state, i.e. if a server/service appears in one call, it will
# not have gone by the time we call another function referring to it
objects = [
"fs_map",
"osdmap_crush_map_text",
"osd_map",
"config",
"mon_map",
"service_map",
"osd_metadata",
"pg_summary",
"pg_status",
"pg_dump",
"pg_ready",
"df",
"pg_stats",
"pool_stats",
"osd_stats",
"osd_ping_times",
"health",
"mon_status",
"mgr_map"
]
for obj in objects:
assert self.get(obj) is not None
assert self.get("__OBJ_DNE__") is None
servers = self.list_servers()
for server in servers:
self.get_server(server['hostname']) # type: ignore
osdmap = self.get('osd_map')
for o in osdmap['osds']:
osd_id = o['osd']
self.get_metadata("osd", str(osd_id))
self.get_daemon_status("osd", "0")
def _self_test_config(self) -> None:
# This is not a strong test (can't tell if values really
# persisted), it's just for the python interface bit.
self.set_module_option("testkey", "testvalue")
assert self.get_module_option("testkey") == "testvalue"
self.set_localized_module_option("testkey", "foo")
assert self.get_localized_module_option("testkey") == "foo"
# Must return the default value defined in MODULE_OPTIONS.
value = self.get_localized_module_option("rwoption6")
assert isinstance(value, bool)
assert value is True
# Use default value.
assert self.get_module_option("roption1") is None
assert self.get_module_option("roption1", "foobar") == "foobar"
assert self.get_module_option("roption2") == "xyz"
assert self.get_module_option("roption2", "foobar") == "xyz"
# Option type is not defined => return as string.
self.set_module_option("rwoption1", 8080)
value = self.get_module_option("rwoption1")
assert isinstance(value, str)
assert value == "8080"
# Option type is defined => return as integer.
self.set_module_option("rwoption2", 10)
value = self.get_module_option("rwoption2")
assert isinstance(value, int)
assert value == 10
# Option type is defined => return as float.
self.set_module_option("rwoption3", 1.5)
value = self.get_module_option("rwoption3")
assert isinstance(value, float)
assert value == 1.5
# Option type is defined => return as string.
self.set_module_option("rwoption4", "foo")
value = self.get_module_option("rwoption4")
assert isinstance(value, str)
assert value == "foo"
# Option type is defined => return as bool.
self.set_module_option("rwoption5", False)
value = self.get_module_option("rwoption5")
assert isinstance(value, bool)
assert value is False
# Option value range is specified
try:
self.set_module_option("rwoption7", 43)
except Exception as e:
assert isinstance(e, ValueError)
else:
message = "should raise if value is not in specified range"
assert False, message
# Specified module does not exist => return None.
assert self.get_module_option_ex("foo", "bar") is None
# Specified key does not exist => return None.
assert self.get_module_option_ex("dashboard", "bar") is None
self.set_module_option_ex("telemetry", "contact", "[email protected]")
assert self.get_module_option_ex("telemetry", "contact") == "[email protected]"
# No option default value, so use the specified one.
assert self.get_module_option_ex("dashboard", "password") is None
assert self.get_module_option_ex("dashboard", "password", "foobar") == "foobar"
# Option type is not defined => return as string.
self.set_module_option_ex("selftest", "rwoption1", 1234)
value = self.get_module_option_ex("selftest", "rwoption1")
assert isinstance(value, str)
assert value == "1234"
# Option type is defined => return as integer.
self.set_module_option_ex("telemetry", "interval", 60)
value = self.get_module_option_ex("telemetry", "interval")
assert isinstance(value, int)
assert value == 60
# Option type is defined => return as bool.
self.set_module_option_ex("telemetry", "leaderboard", True)
value = self.get_module_option_ex("telemetry", "leaderboard")
assert isinstance(value, bool)
assert value is True
def _self_test_store(self) -> None:
existing_keys = set(self.get_store_prefix("test").keys())
self.set_store("testkey", "testvalue")
assert self.get_store("testkey") == "testvalue"
assert (set(self.get_store_prefix("test").keys())
== {"testkey"} | existing_keys)
def _self_test_perf_counters(self) -> None:
self.get_perf_schema("osd", "0")
self.get_counter("osd", "0", "osd.op")
# get_counter
# get_all_perf_coutners
def _self_test_misc(self) -> None:
self.set_uri("http://this.is.a.test.com")
self.set_health_checks({})
def _self_test_osdmap(self) -> None:
osdmap = self.get_osdmap()
osdmap.get_epoch()
osdmap.get_crush_version()
osdmap.dump()
inc = osdmap.new_incremental()
osdmap.apply_incremental(inc)
inc.get_epoch()
inc.dump()
crush = osdmap.get_crush()
crush.dump()
crush.get_item_name(-1)
crush.get_item_weight(-1)
crush.find_takes()
crush.get_take_weight_osd_map(-1)
# osdmap.get_pools_by_take()
# osdmap.calc_pg_upmaps()
# osdmap.map_pools_pgs_up()
# inc.set_osd_reweights
# inc.set_crush_compat_weight_set_weights
self.log.info("Finished self-test procedure.")
def _test_remote_calls(self) -> None:
# Test making valid call
self.remote("influx", "self_test")
# Test calling module that exists but isn't enabled
# (arbitrarily pick a non-always-on module to use)
disabled_module = "telegraf"
mgr_map = self.get("mgr_map")
assert disabled_module not in mgr_map['modules']
# (This works until the Z release in about 2027)
latest_release = sorted(mgr_map['always_on_modules'].keys())[-1]
assert disabled_module not in mgr_map['always_on_modules'][latest_release]
try:
self.remote(disabled_module, "handle_command", {"prefix": "influx self-test"})
except ImportError:
pass
else:
raise RuntimeError("ImportError not raised for disabled module")
# Test calling module that doesn't exist
try:
self.remote("idontexist", "self_test")
except ImportError:
pass
else:
raise RuntimeError("ImportError not raised for nonexistent module")
# Test calling method that doesn't exist
try:
self.remote("influx", "idontexist")
except NameError:
pass
else:
raise RuntimeError("KeyError not raised")
def remote_from_orchestrator_cli_self_test(self, what: str) -> Any:
import orchestrator
if what == 'OrchestratorError':
return orchestrator.OrchResult(result=None, exception=orchestrator.OrchestratorError('hello, world'))
elif what == "ZeroDivisionError":
return orchestrator.OrchResult(result=None, exception=ZeroDivisionError('hello, world'))
assert False, repr(what)
def shutdown(self) -> None:
self._workload = Workload.SHUTDOWN
self._event.set()
def _command_spam(self) -> None:
self.log.info("Starting command_spam workload...")
while not self._event.is_set():
osdmap = self.get_osdmap()
dump = osdmap.dump()
count = len(dump['osds'])
i = int(random.random() * count)
w = random.random()
result = CommandResult('')
self.send_command(result, 'mon', '', json.dumps({
'prefix': 'osd reweight',
'id': i,
'weight': w}), '')
_ = osdmap.get_crush().dump()
r, outb, outs = result.wait()
self._event.clear()
self.log.info("Ended command_spam workload...")
@CLICommand('mgr self-test eval')
def eval(self,
s: Optional[str] = None,
inbuf: Optional[str] = None) -> HandleCommandResult:
'''
eval given source
'''
source = s or inbuf
if source is None:
return HandleCommandResult(-1, '', 'source is not specified')
err = StringIO()
out = StringIO()
with redirect_stderr(err), redirect_stdout(out):
needs_more = self._repl.runsource(source)
if needs_more:
retval = 2
stdout = ''
stderr = ''
else:
retval = 0
stdout = out.getvalue()
stderr = err.getvalue()
return HandleCommandResult(retval, stdout, stderr)
def serve(self) -> None:
while True:
if self._workload == Workload.COMMAND_SPAM:
self._command_spam()
elif self._workload == Workload.SHUTDOWN:
self.log.info("Shutting down...")
break
elif self._workload == Workload.THROW_EXCEPTION:
raise RuntimeError("Synthetic exception in serve")
else:
self.log.info("Waiting for workload request...")
self._event.wait()
self._event.clear()
| 17,529 | 33.440079 | 113 |
py
|
null |
ceph-main/src/pybind/mgr/snap_schedule/__init__.py
|
# -*- coding: utf-8 -*-
from os import environ
if 'SNAP_SCHED_UNITTEST' in environ:
import tests
elif 'UNITTEST' in environ:
import tests
from .module import Module
else:
from .module import Module
| 216 | 17.083333 | 36 |
py
|
null |
ceph-main/src/pybind/mgr/snap_schedule/module.py
|
"""
Copyright (C) 2019 SUSE
LGPL2.1. See file COPYING.
"""
import errno
import json
import sqlite3
from typing import Any, Dict, Optional, Tuple
from .fs.schedule_client import SnapSchedClient
from mgr_module import MgrModule, CLIReadCommand, CLIWriteCommand, Option
from mgr_util import CephfsConnectionException
from threading import Event
class Module(MgrModule):
MODULE_OPTIONS = [
Option(
'allow_m_granularity',
type='bool',
default=False,
desc='allow minute scheduled snapshots',
runtime=True,
),
Option(
'dump_on_update',
type='bool',
default=False,
desc='dump database to debug log on update',
runtime=True,
),
]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(Module, self).__init__(*args, **kwargs)
self._initialized = Event()
self.client = SnapSchedClient(self)
@property
def default_fs(self) -> str:
fs_map = self.get('fs_map')
if fs_map['filesystems']:
return fs_map['filesystems'][0]['mdsmap']['fs_name']
else:
self.log.error('No filesystem instance could be found.')
raise CephfsConnectionException(
-errno.ENOENT, "no filesystem found")
def has_fs(self, fs_name: str) -> bool:
return fs_name in self.client.get_all_filesystems()
def serve(self) -> None:
self._initialized.set()
def handle_command(self, inbuf: str, cmd: Dict[str, str]) -> Tuple[int, str, str]:
self._initialized.wait()
return -errno.EINVAL, "", "Unknown command"
@CLIReadCommand('fs snap-schedule status')
def snap_schedule_get(self,
path: str = '/',
fs: Optional[str] = None,
format: Optional[str] = 'plain') -> Tuple[int, str, str]:
'''
List current snapshot schedules
'''
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
try:
ret_scheds = self.client.get_snap_schedules(use_fs, path)
except CephfsConnectionException as e:
return e.to_tuple()
except Exception as e:
return -errno.EIO, '', str(e)
if format == 'json':
json_report = ','.join([ret_sched.report_json() for ret_sched in ret_scheds])
return 0, f'[{json_report}]', ''
return 0, '\n===\n'.join([ret_sched.report() for ret_sched in ret_scheds]), ''
@CLIReadCommand('fs snap-schedule list')
def snap_schedule_list(self, path: str,
recursive: bool = False,
fs: Optional[str] = None,
format: Optional[str] = 'plain') -> Tuple[int, str, str]:
'''
Get current snapshot schedule for <path>
'''
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
scheds = self.client.list_snap_schedules(use_fs, path, recursive)
self.log.debug(f'recursive is {recursive}')
except CephfsConnectionException as e:
return e.to_tuple()
except Exception as e:
return -errno.EIO, '', str(e)
if not scheds:
if format == 'json':
output: Dict[str, str] = {}
return 0, json.dumps(output), ''
return -errno.ENOENT, '', f'SnapSchedule for {path} not found'
if format == 'json':
# json_list = ','.join([sched.json_list() for sched in scheds])
schedule_list = [sched.schedule for sched in scheds]
retention_list = [sched.retention for sched in scheds]
out = {'path': path, 'schedule': schedule_list, 'retention': retention_list}
return 0, json.dumps(out), ''
return 0, '\n'.join([str(sched) for sched in scheds]), ''
@CLIWriteCommand('fs snap-schedule add')
def snap_schedule_add(self,
path: str,
snap_schedule: str,
start: Optional[str] = None,
fs: Optional[str] = None) -> Tuple[int, str, str]:
'''
Set a snapshot schedule for <path>
'''
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
subvol = None
self.client.store_snap_schedule(use_fs,
abs_path,
(abs_path, snap_schedule,
use_fs, path, start, subvol))
suc_msg = f'Schedule set for path {path}'
except sqlite3.IntegrityError:
existing_scheds = self.client.get_snap_schedules(use_fs, path)
report = [s.report() for s in existing_scheds]
error_msg = f'Found existing schedule {report}'
self.log.error(error_msg)
return -errno.EEXIST, '', error_msg
except ValueError as e:
return -errno.ENOENT, '', str(e)
except CephfsConnectionException as e:
return e.to_tuple()
except Exception as e:
return -errno.EIO, '', str(e)
return 0, suc_msg, ''
@CLIWriteCommand('fs snap-schedule remove')
def snap_schedule_rm(self,
path: str,
repeat: Optional[str] = None,
start: Optional[str] = None,
fs: Optional[str] = None) -> Tuple[int, str, str]:
'''
Remove a snapshot schedule for <path>
'''
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
self.client.rm_snap_schedule(use_fs, abs_path, repeat, start)
except ValueError as e:
return -errno.ENOENT, '', str(e)
except CephfsConnectionException as e:
return e.to_tuple()
except Exception as e:
return -errno.EIO, '', str(e)
return 0, 'Schedule removed for path {}'.format(path), ''
@CLIWriteCommand('fs snap-schedule retention add')
def snap_schedule_retention_add(self,
path: str,
retention_spec_or_period: str,
retention_count: Optional[str] = None,
fs: Optional[str] = None) -> Tuple[int, str, str]:
'''
Set a retention specification for <path>
'''
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
self.client.add_retention_spec(use_fs, abs_path,
retention_spec_or_period,
retention_count)
except ValueError as e:
return -errno.ENOENT, '', str(e)
except CephfsConnectionException as e:
return e.to_tuple()
except Exception as e:
return -errno.EIO, '', str(e)
return 0, 'Retention added to path {}'.format(path), ''
@CLIWriteCommand('fs snap-schedule retention remove')
def snap_schedule_retention_rm(self,
path: str,
retention_spec_or_period: str,
retention_count: Optional[str] = None,
fs: Optional[str] = None) -> Tuple[int, str, str]:
'''
Remove a retention specification for <path>
'''
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
self.client.rm_retention_spec(use_fs, abs_path,
retention_spec_or_period,
retention_count)
except ValueError as e:
return -errno.ENOENT, '', str(e)
except CephfsConnectionException as e:
return e.to_tuple()
except Exception as e:
return -errno.EIO, '', str(e)
return 0, 'Retention removed from path {}'.format(path), ''
@CLIWriteCommand('fs snap-schedule activate')
def snap_schedule_activate(self,
path: str,
repeat: Optional[str] = None,
start: Optional[str] = None,
fs: Optional[str] = None) -> Tuple[int, str, str]:
'''
Activate a snapshot schedule for <path>
'''
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
self.client.activate_snap_schedule(use_fs, abs_path, repeat, start)
except ValueError as e:
return -errno.ENOENT, '', str(e)
except CephfsConnectionException as e:
return e.to_tuple()
except Exception as e:
return -errno.EIO, '', str(e)
return 0, 'Schedule activated for path {}'.format(path), ''
@CLIWriteCommand('fs snap-schedule deactivate')
def snap_schedule_deactivate(self,
path: str,
repeat: Optional[str] = None,
start: Optional[str] = None,
fs: Optional[str] = None) -> Tuple[int, str, str]:
'''
Deactivate a snapshot schedule for <path>
'''
try:
use_fs = fs if fs else self.default_fs
if not self.has_fs(use_fs):
return -errno.EINVAL, '', f"no such filesystem: {use_fs}"
abs_path = path
self.client.deactivate_snap_schedule(use_fs, abs_path, repeat, start)
except ValueError as e:
return -errno.ENOENT, '', str(e)
except CephfsConnectionException as e:
return e.to_tuple()
except Exception as e:
return -errno.EIO, '', str(e)
return 0, 'Schedule deactivated for path {}'.format(path), ''
| 10,809 | 39.792453 | 89 |
py
|
null |
ceph-main/src/pybind/mgr/snap_schedule/fs/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/pybind/mgr/snap_schedule/fs/schedule.py
|
"""
Copyright (C) 2020 SUSE
LGPL2.1. See file COPYING.
"""
from datetime import datetime, timezone
import json
import logging
import re
import sqlite3
from typing import cast, Any, Dict, List, Tuple, Optional, Union
log = logging.getLogger(__name__)
# Work around missing datetime.fromisoformat for < python3.7
SNAP_DB_TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
try:
from backports.datetime_fromisoformat import MonkeyPatch
MonkeyPatch.patch_fromisoformat()
except ImportError:
log.debug('backports.datetime_fromisoformat not found')
try:
# have mypy ignore this line. We use the attribute error to detect if we
# have fromisoformat or not
ts_parser = datetime.fromisoformat # type: ignore
log.debug('found datetime.fromisoformat')
except AttributeError:
log.info(('Couldn\'t find datetime.fromisoformat, falling back to '
f'static timestamp parsing ({SNAP_DB_TS_FORMAT}'))
def ts_parser(data_string: str) -> datetime: # type: ignore
try:
date = datetime.strptime(data_string, SNAP_DB_TS_FORMAT)
return date
except ValueError:
msg = f'''The date string {data_string} does not match the required format
{SNAP_DB_TS_FORMAT}. For more flexibel date parsing upgrade to
python3.7 or install
https://github.com/movermeyer/backports.datetime_fromisoformat'''
log.error(msg)
raise ValueError(msg)
def parse_timestamp(ts: str) -> datetime:
date = ts_parser(ts)
# normalize any non utc timezone to utc. If no tzinfo is supplied, assume
# its already utc
# import pdb; pdb.set_trace()
if date.tzinfo is not timezone.utc and date.tzinfo is not None:
date = date.astimezone(timezone.utc)
return date
def parse_retention(retention: str) -> Dict[str, int]:
ret = {}
log.debug(f'parse_retention({retention})')
matches = re.findall(r'\d+[a-z]', retention)
for m in matches:
ret[m[-1]] = int(m[0:-1])
matches = re.findall(r'\d+[A-Z]', retention)
for m in matches:
ret[m[-1]] = int(m[0:-1])
log.debug(f'parse_retention({retention}) -> {ret}')
return ret
RETENTION_MULTIPLIERS = ['n', 'M', 'h', 'd', 'w', 'm', 'y']
TableRowT = Dict[str, Union[int, str]]
def dump_retention(retention: Dict[str, str]) -> str:
ret = ''
for mult in RETENTION_MULTIPLIERS:
if mult in retention:
ret += str(retention[mult]) + mult
return ret
class Schedule(object):
'''
Wrapper to work with schedules stored in sqlite
'''
def __init__(self,
path: str,
schedule: str,
fs_name: str,
rel_path: str,
start: Optional[str] = None,
subvol: Optional[str] = None,
retention_policy: str = '{}',
created: Optional[str] = None,
first: Optional[str] = None,
last: Optional[str] = None,
last_pruned: Optional[str] = None,
created_count: int = 0,
pruned_count: int = 0,
active: bool = True,
) -> None:
self.fs = fs_name
self.subvol = subvol
self.path = path
self.rel_path = rel_path
self.schedule = schedule
self.retention = json.loads(retention_policy)
if start is None:
now = datetime.now(timezone.utc)
self.start = datetime(now.year,
now.month,
now.day,
tzinfo=now.tzinfo)
else:
self.start = parse_timestamp(start)
if created is None:
self.created: Optional[datetime] = datetime.now(timezone.utc)
else:
self.created = parse_timestamp(created)
if first:
self.first: Optional[datetime] = parse_timestamp(first)
else:
self.first = None
if last:
self.last: Optional[datetime] = parse_timestamp(last)
else:
self.last = None
if last_pruned:
self.last_pruned: Optional[datetime] = parse_timestamp(last_pruned)
else:
self.last_pruned = None
self.created_count = created_count
self.pruned_count = pruned_count
self.active = bool(active)
@classmethod
def _from_db_row(cls, table_row: TableRowT, fs: str) -> 'Schedule':
return cls(cast(str, table_row['path']),
cast(str, table_row['schedule']),
fs,
cast(str, table_row['rel_path']),
cast(str, table_row['start']),
cast(str, table_row['subvol']),
cast(str, table_row['retention']),
cast(str, table_row['created']),
cast(str, table_row['first']),
cast(str, table_row['last']),
cast(str, table_row['last_pruned']),
cast(int, table_row['created_count']),
cast(int, table_row['pruned_count']),
cast(bool, table_row['active']),
)
def __str__(self) -> str:
return f'{self.path} {self.schedule} {dump_retention(self.retention)}'
def json_list(self) -> str:
return json.dumps({'path': self.path, 'schedule': self.schedule,
'retention': dump_retention(self.retention)})
CREATE_TABLES = '''CREATE TABLE IF NOT EXISTS schedules(
id INTEGER PRIMARY KEY ASC,
path TEXT NOT NULL UNIQUE,
subvol TEXT,
retention TEXT DEFAULT '{}',
rel_path TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS schedules_meta(
id INTEGER PRIMARY KEY ASC,
schedule_id INT,
start TEXT NOT NULL,
first TEXT,
last TEXT,
last_pruned TEXT,
created TEXT NOT NULL,
repeat INT NOT NULL,
schedule TEXT NOT NULL,
created_count INT DEFAULT 0,
pruned_count INT DEFAULT 0,
active INT NOT NULL,
FOREIGN KEY(schedule_id) REFERENCES schedules(id) ON DELETE CASCADE,
UNIQUE (schedule_id, start, repeat)
);'''
EXEC_QUERY = '''SELECT
s.retention,
sm.repeat - (strftime("%s", "now") - strftime("%s", sm.start)) %
sm.repeat "until",
sm.start, sm.repeat, sm.schedule
FROM schedules s
INNER JOIN schedules_meta sm ON sm.schedule_id = s.id
WHERE
s.path = ? AND
strftime("%s", "now") - strftime("%s", sm.start) > 0 AND
sm.active = 1
ORDER BY until;'''
PROTO_GET_SCHEDULES = '''SELECT
s.path, s.subvol, s.rel_path, sm.active,
sm.schedule, s.retention, sm.start, sm.first, sm.last,
sm.last_pruned, sm.created, sm.created_count, sm.pruned_count
FROM schedules s
INNER JOIN schedules_meta sm ON sm.schedule_id = s.id
WHERE'''
GET_SCHEDULES = PROTO_GET_SCHEDULES + ' s.path = ?'
@classmethod
def get_db_schedules(cls,
path: str,
db: sqlite3.Connection,
fs: str,
schedule: Optional[str] = None,
start: Optional[str] = None,
repeat: Optional[str] = None) -> List['Schedule']:
query = cls.GET_SCHEDULES
data: Tuple[Any, ...] = (path,)
if repeat:
query += ' AND sm.repeat = ?'
data += (repeat,)
if schedule:
query += ' AND sm.schedule = ?'
data += (schedule,)
if start:
query += ' AND sm.start = ?'
data += (start,)
with db:
c = db.execute(query, data)
return [cls._from_db_row(row, fs) for row in c.fetchall()]
@classmethod
def list_schedules(cls,
path: str,
db: sqlite3.Connection,
fs: str, recursive: bool) -> List['Schedule']:
with db:
if recursive:
c = db.execute(cls.PROTO_GET_SCHEDULES + ' path LIKE ?',
(f'{path}%',))
else:
c = db.execute(cls.PROTO_GET_SCHEDULES + ' path = ?',
(f'{path}',))
return [cls._from_db_row(row, fs) for row in c.fetchall()]
@classmethod
def list_all_schedules(cls,
db: sqlite3.Connection,
fs: str) -> List['Schedule']:
with db:
c = db.execute(cls.PROTO_GET_SCHEDULES + " path LIKE '%'")
return [cls._from_db_row(row, fs) for row in c.fetchall()]
INSERT_SCHEDULE = '''INSERT INTO
schedules(path, subvol, retention, rel_path)
Values(?, ?, ?, ?);'''
INSERT_SCHEDULE_META = '''INSERT INTO
schedules_meta(schedule_id, start, created, repeat, schedule,
active)
SELECT ?, ?, ?, ?, ?, ?'''
def store_schedule(self, db: sqlite3.Connection) -> None:
sched_id = None
with db:
try:
log.debug(f'schedule with retention {self.retention}')
c = db.execute(self.INSERT_SCHEDULE,
(self.path,
self.subvol,
json.dumps(self.retention),
self.rel_path,))
sched_id = c.lastrowid
except sqlite3.IntegrityError:
# might be adding another schedule, retrieve sched id
log.debug((f'found schedule entry for {self.path}, '
'trying to add meta'))
c = db.execute('SELECT id FROM schedules where path = ?',
(self.path,))
sched_id = c.fetchone()[0]
pass
assert self.created, "self.created should be set"
db.execute(self.INSERT_SCHEDULE_META,
(sched_id,
self.start.strftime(SNAP_DB_TS_FORMAT),
self.created.strftime(SNAP_DB_TS_FORMAT),
self.repeat,
self.schedule,
1))
@classmethod
def rm_schedule(cls,
db: sqlite3.Connection,
path: str,
repeat: Optional[str],
start: Optional[str]) -> None:
with db:
cur = db.execute('SELECT id FROM schedules WHERE path = ?',
(path,))
row = cur.fetchone()
if row is None:
log.info(f'no schedule for {path} found')
raise ValueError('SnapSchedule for {} not found'.format(path))
id_ = tuple(row)
if repeat or start:
meta_delete = ('DELETE FROM schedules_meta '
'WHERE schedule_id = ?')
delete_param = id_
if repeat:
meta_delete += ' AND schedule = ?'
delete_param += (repeat,)
if start:
meta_delete += ' AND start = ?'
delete_param += (start,)
# maybe only delete meta entry
log.debug(f'executing {meta_delete}, {delete_param}')
res = db.execute(meta_delete + ';', delete_param).rowcount
if res < 1:
raise ValueError(f'No schedule found for {repeat} {start}')
db.execute('COMMIT;')
# now check if we have schedules in meta left, if not delete
# the schedule as well
meta_count = db.execute(
'SELECT COUNT() FROM schedules_meta WHERE schedule_id = ?',
id_)
if meta_count.fetchone() == (0,):
log.debug(
'no more schedules left, cleaning up schedules table')
db.execute('DELETE FROM schedules WHERE id = ?;', id_)
else:
# just delete the schedule CASCADE DELETE takes care of the
# rest
db.execute('DELETE FROM schedules WHERE id = ?;', id_)
GET_RETENTION = '''SELECT retention FROM schedules
WHERE path = ?'''
UPDATE_RETENTION = '''UPDATE schedules
SET retention = ?
WHERE path = ?'''
@classmethod
def add_retention(cls,
db: sqlite3.Connection,
path: str,
retention_spec: str) -> None:
with db:
row = db.execute(cls.GET_RETENTION, (path,)).fetchone()
if row is None:
raise ValueError(f'No schedule found for {path}')
retention = parse_retention(retention_spec)
if not retention:
raise ValueError(f'Retention spec {retention_spec} is invalid')
log.debug(f'db result is {tuple(row)}')
current = row['retention']
current_retention = json.loads(current)
for r, v in retention.items():
if r in current_retention:
msg = (f'Retention for {r} is already present with value'
f'{current_retention[r]}. Please remove first')
raise ValueError(msg)
current_retention.update(retention)
db.execute(cls.UPDATE_RETENTION,
(json.dumps(current_retention), path))
@classmethod
def rm_retention(cls,
db: sqlite3.Connection,
path: str,
retention_spec: str) -> None:
with db:
row = db.execute(cls.GET_RETENTION, (path,)).fetchone()
if row is None:
raise ValueError(f'No schedule found for {path}')
retention = parse_retention(retention_spec)
current = row['retention']
current_retention = json.loads(current)
for r, v in retention.items():
if r not in current_retention or current_retention[r] != v:
msg = (f'Retention for {r}: {v} was not set for {path} '
'can\'t remove')
raise ValueError(msg)
current_retention.pop(r)
db.execute(cls.UPDATE_RETENTION,
(json.dumps(current_retention), path))
def report(self) -> str:
return self.report_json()
def report_json(self) -> str:
return json.dumps(dict(self.__dict__),
default=lambda o: o.strftime(SNAP_DB_TS_FORMAT))
@classmethod
def parse_schedule(cls, schedule: str) -> Tuple[int, str]:
return int(schedule[0:-1]), schedule[-1]
@property
def repeat(self) -> int:
period, mult = self.parse_schedule(self.schedule)
if mult == 'M':
return period * 60
elif mult == 'h':
return period * 60 * 60
elif mult == 'd':
return period * 60 * 60 * 24
elif mult == 'w':
return period * 60 * 60 * 24 * 7
else:
raise ValueError(f'schedule multiplier "{mult}" not recognized')
UPDATE_LAST = '''UPDATE schedules_meta
SET
last = ?,
created_count = created_count + 1,
first = CASE WHEN first IS NULL THEN ? ELSE first END
WHERE EXISTS(
SELECT id
FROM schedules s
WHERE s.id = schedules_meta.schedule_id
AND s.path = ?
AND schedules_meta.start = ?
AND schedules_meta.repeat = ?);'''
def update_last(self, time: datetime, db: sqlite3.Connection) -> None:
with db:
db.execute(self.UPDATE_LAST,
(time.strftime(SNAP_DB_TS_FORMAT),
time.strftime(SNAP_DB_TS_FORMAT),
self.path,
self.start.strftime(SNAP_DB_TS_FORMAT),
self.repeat))
self.created_count += 1
self.last = time
if not self.first:
self.first = time
UPDATE_INACTIVE = '''UPDATE schedules_meta
SET
active = 0
WHERE EXISTS(
SELECT id
FROM schedules s
WHERE s.id = schedules_meta.schedule_id
AND s.path = ?
AND schedules_meta.start = ?
AND schedules_meta.repeat = ?);'''
def set_inactive(self, db: sqlite3.Connection) -> None:
with db:
log.debug((f'Deactivating schedule ({self.repeat}, '
f'{self.start}) on path {self.path}'))
db.execute(self.UPDATE_INACTIVE,
(self.path,
self.start.strftime(SNAP_DB_TS_FORMAT),
self.repeat))
self.active = False
UPDATE_ACTIVE = '''UPDATE schedules_meta
SET
active = 1
WHERE EXISTS(
SELECT id
FROM schedules s
WHERE s.id = schedules_meta.schedule_id
AND s.path = ?
AND schedules_meta.start = ?
AND schedules_meta.repeat = ?);'''
def set_active(self, db: sqlite3.Connection) -> None:
with db:
log.debug(f'Activating schedule ({self.repeat}, {self.start}) '
f'on path {self.path}')
db.execute(self.UPDATE_ACTIVE,
(self.path,
self.start.strftime(SNAP_DB_TS_FORMAT),
self.repeat))
self.active = True
UPDATE_PRUNED = '''UPDATE schedules_meta
SET
last_pruned = ?,
pruned_count = pruned_count + ?
WHERE EXISTS(
SELECT id
FROM schedules s
WHERE s.id = schedules_meta.schedule_id
AND s.path = ?
AND schedules_meta.start = ?
AND schedules_meta.repeat = ?);'''
def update_pruned(self,
time: datetime,
db: sqlite3.Connection,
pruned: int) -> None:
with db:
db.execute(self.UPDATE_PRUNED,
(time.strftime(SNAP_DB_TS_FORMAT), pruned,
self.path,
self.start.strftime(SNAP_DB_TS_FORMAT),
self.repeat))
self.pruned_count += pruned
self.last_pruned = time
| 18,445 | 35.671968 | 86 |
py
|
null |
ceph-main/src/pybind/mgr/snap_schedule/fs/schedule_client.py
|
"""
Copyright (C) 2020 SUSE
LGPL2.1. See file COPYING.
"""
import cephfs
import rados
from contextlib import contextmanager
from mgr_util import CephfsClient, open_filesystem
from collections import OrderedDict
from datetime import datetime, timezone
import logging
from threading import Timer, Lock
from typing import cast, Any, Callable, Dict, Iterator, List, Set, Optional, \
Tuple, TypeVar, Union, Type
from types import TracebackType
import sqlite3
from .schedule import Schedule
import traceback
SNAP_SCHEDULE_NAMESPACE = 'cephfs-snap-schedule'
SNAP_DB_PREFIX = 'snap_db'
# increment this every time the db schema changes and provide upgrade code
SNAP_DB_VERSION = '0'
SNAP_DB_OBJECT_NAME = f'{SNAP_DB_PREFIX}_v{SNAP_DB_VERSION}'
# scheduled snapshots are tz suffixed
SNAPSHOT_TS_FORMAT_TZ = '%Y-%m-%d-%H_%M_%S_%Z'
# for backward compat snapshot name parsing
SNAPSHOT_TS_FORMAT = '%Y-%m-%d-%H_%M_%S'
# length of timestamp format (without tz suffix)
# e.g.: scheduled-2022-04-19-05_39_00_UTC (len = "2022-04-19-05_39_00")
SNAPSHOT_TS_FORMAT_LEN = 19
SNAPSHOT_PREFIX = 'scheduled'
log = logging.getLogger(__name__)
CephfsClientT = TypeVar('CephfsClientT', bound=CephfsClient)
@contextmanager
def open_ioctx(self: CephfsClientT,
pool: Union[int, str]) -> Iterator[rados.Ioctx]:
try:
if type(pool) is int:
with self.mgr.rados.open_ioctx2(pool) as ioctx:
ioctx.set_namespace(SNAP_SCHEDULE_NAMESPACE)
yield ioctx
else:
with self.mgr.rados.open_ioctx(pool) as ioctx:
ioctx.set_namespace(SNAP_SCHEDULE_NAMESPACE)
yield ioctx
except rados.ObjectNotFound:
log.error("Failed to locate pool {}".format(pool))
raise
FuncT = TypeVar('FuncT', bound=Callable[..., None])
def updates_schedule_db(func: FuncT) -> FuncT:
def f(self: 'SnapSchedClient', fs: str, schedule_or_path: str, *args: Any) -> None:
ret = func(self, fs, schedule_or_path, *args)
path = schedule_or_path
if isinstance(schedule_or_path, Schedule):
path = schedule_or_path.path
self.refresh_snap_timers(fs, path)
return ret
return cast(FuncT, f)
def get_prune_set(candidates: Set[Tuple[cephfs.DirEntry, datetime]],
retention: Dict[str, int],
max_snaps_to_retain: int) -> Set:
PRUNING_PATTERNS = OrderedDict([
# n is for keep last n snapshots, uses the snapshot name timestamp
# format for lowest granularity
# NOTE: prune set has tz suffix stripped out.
("n", SNAPSHOT_TS_FORMAT),
# TODO remove M for release
("M", '%Y-%m-%d-%H_%M'),
("h", '%Y-%m-%d-%H'),
("d", '%Y-%m-%d'),
("w", '%G-%V'),
("m", '%Y-%m'),
("y", '%Y'),
])
keep = []
if not retention:
log.info(f'no retention set, assuming n: {max_snaps_to_retain}')
retention = {'n': max_snaps_to_retain}
for period, date_pattern in PRUNING_PATTERNS.items():
log.debug(f'compiling keep set for period {period}')
period_count = retention.get(period, 0)
if not period_count:
continue
last = None
kept_for_this_period = 0
for snap in sorted(candidates, key=lambda x: x[0].d_name,
reverse=True):
snap_ts = snap[1].strftime(date_pattern)
if snap_ts != last:
last = snap_ts
if snap not in keep:
log.debug((f'keeping {snap[0].d_name} due to '
f'{period_count}{period}'))
keep.append(snap)
kept_for_this_period += 1
if kept_for_this_period == period_count:
log.debug(('found enough snapshots for '
f'{period_count}{period}'))
break
if len(keep) > max_snaps_to_retain:
log.info(f'Pruning keep set; would retain first {max_snaps_to_retain}'
f' out of {len(keep)} snaps')
keep = keep[:max_snaps_to_retain]
return candidates - set(keep)
def snap_name_to_timestamp(scheduled_snap_name: str) -> str:
""" extract timestamp from a schedule snapshot with tz suffix stripped out """
ts = scheduled_snap_name.lstrip(f'{SNAPSHOT_PREFIX}-')
return ts[0:SNAPSHOT_TS_FORMAT_LEN]
class DBInfo():
def __init__(self, fs: str, db: sqlite3.Connection):
self.fs: str = fs
self.lock: Lock = Lock()
self.db: sqlite3.Connection = db
# context manager for serializing db connection usage
class DBConnectionManager():
def __init__(self, info: DBInfo):
self.dbinfo: DBInfo = info
# using string as return type hint since __future__.annotations is not
# available with Python 3.6; its avaialbe starting from Pytohn 3.7
def __enter__(self) -> 'DBConnectionManager':
log.debug(f'locking db connection for {self.dbinfo.fs}')
self.dbinfo.lock.acquire()
log.debug(f'locked db connection for {self.dbinfo.fs}')
return self
def __exit__(self,
exception_type: Optional[Type[BaseException]],
exception_value: Optional[BaseException],
traceback: Optional[TracebackType]) -> None:
log.debug(f'unlocking db connection for {self.dbinfo.fs}')
self.dbinfo.lock.release()
log.debug(f'unlocked db connection for {self.dbinfo.fs}')
class SnapSchedClient(CephfsClient):
def __init__(self, mgr: Any) -> None:
super(SnapSchedClient, self).__init__(mgr)
# Each db connection is now guarded by a Lock; this is required to
# avoid concurrent DB transactions when more than one paths in a
# file-system are scheduled at the same interval eg. 1h; without the
# lock, there are races to use the same connection, causing nested
# transactions to be aborted
self.sqlite_connections: Dict[str, DBInfo] = {}
self.active_timers: Dict[Tuple[str, str], List[Timer]] = {}
self.conn_lock: Lock = Lock() # lock to protect add/lookup db connections
# restart old schedules
for fs_name in self.get_all_filesystems():
with self.get_schedule_db(fs_name) as conn_mgr:
db = conn_mgr.dbinfo.db
sched_list = Schedule.list_all_schedules(db, fs_name)
for sched in sched_list:
self.refresh_snap_timers(fs_name, sched.path, db)
@property
def allow_minute_snaps(self) -> None:
return self.mgr.get_module_option('allow_m_granularity')
@property
def dump_on_update(self) -> None:
return self.mgr.get_module_option('dump_on_update')
def get_schedule_db(self, fs: str) -> DBConnectionManager:
dbinfo = None
self.conn_lock.acquire()
if fs not in self.sqlite_connections:
poolid = self.get_metadata_pool(fs)
assert poolid, f'fs "{fs}" not found'
uri = f"file:///*{poolid}:/{SNAP_DB_OBJECT_NAME}.db?vfs=ceph"
log.debug(f"using uri {uri}")
db = sqlite3.connect(uri, check_same_thread=False, uri=True)
db.execute('PRAGMA FOREIGN_KEYS = 1')
db.execute('PRAGMA JOURNAL_MODE = PERSIST')
db.execute('PRAGMA PAGE_SIZE = 65536')
db.execute('PRAGMA CACHE_SIZE = 256')
db.execute('PRAGMA TEMP_STORE = memory')
db.row_factory = sqlite3.Row
# check for legacy dump store
pool_param = cast(Union[int, str], poolid)
with open_ioctx(self, pool_param) as ioctx:
try:
size, _mtime = ioctx.stat(SNAP_DB_OBJECT_NAME)
dump = ioctx.read(SNAP_DB_OBJECT_NAME, size).decode('utf-8')
db.executescript(dump)
ioctx.remove_object(SNAP_DB_OBJECT_NAME)
except rados.ObjectNotFound:
log.debug(f'No legacy schedule DB found in {fs}')
db.executescript(Schedule.CREATE_TABLES)
self.sqlite_connections[fs] = DBInfo(fs, db)
dbinfo = self.sqlite_connections[fs]
self.conn_lock.release()
return DBConnectionManager(dbinfo)
def _is_allowed_repeat(self, exec_row: Dict[str, str], path: str) -> bool:
if Schedule.parse_schedule(exec_row['schedule'])[1] == 'M':
if self.allow_minute_snaps:
log.debug(('Minute repeats allowed, '
f'scheduling snapshot on path {path}'))
return True
else:
log.info(('Minute repeats disabled, '
f'skipping snapshot on path {path}'))
return False
else:
return True
def fetch_schedules(self, db: sqlite3.Connection, path: str) -> List[sqlite3.Row]:
with db:
if self.dump_on_update:
dump = [line for line in db.iterdump()]
dump = "\n".join(dump)
log.debug(f"db dump:\n{dump}")
cur = db.execute(Schedule.EXEC_QUERY, (path,))
all_rows = cur.fetchall()
rows = [r for r in all_rows
if self._is_allowed_repeat(r, path)][0:1]
return rows
def refresh_snap_timers(self, fs: str, path: str, olddb: Optional[sqlite3.Connection] = None) -> None:
try:
log.debug((f'SnapDB on {fs} changed for {path}, '
'updating next Timer'))
rows = []
# olddb is passed in the case where we land here without a timer
# the lock on the db connection has already been taken
if olddb:
rows = self.fetch_schedules(olddb, path)
else:
with self.get_schedule_db(fs) as conn_mgr:
db = conn_mgr.dbinfo.db
rows = self.fetch_schedules(db, path)
timers = self.active_timers.get((fs, path), [])
for timer in timers:
timer.cancel()
timers = []
for row in rows:
log.debug(f'Creating new snapshot timer for {path}')
t = Timer(row[1],
self.create_scheduled_snapshot,
args=[fs, path, row[0], row[2], row[3]])
t.start()
timers.append(t)
log.debug(f'Will snapshot {path} in fs {fs} in {row[1]}s')
self.active_timers[(fs, path)] = timers
except Exception:
self._log_exception('refresh_snap_timers')
def _log_exception(self, fct: str) -> None:
log.error(f'{fct} raised an exception:')
log.error(traceback.format_exc())
def create_scheduled_snapshot(self,
fs_name: str,
path: str,
retention: str,
start: str,
repeat: str) -> None:
log.debug(f'Scheduled snapshot of {path} triggered')
set_schedule_to_inactive = False
try:
with self.get_schedule_db(fs_name) as conn_mgr:
db = conn_mgr.dbinfo.db
try:
sched = Schedule.get_db_schedules(path,
db,
fs_name,
repeat=repeat,
start=start)[0]
time = datetime.now(timezone.utc)
with open_filesystem(self, fs_name) as fs_handle:
snap_ts = time.strftime(SNAPSHOT_TS_FORMAT_TZ)
snap_dir = self.mgr.rados.conf_get('client_snapdir')
snap_name = f'{path}/{snap_dir}/{SNAPSHOT_PREFIX}-{snap_ts}'
fs_handle.mkdir(snap_name, 0o755)
log.info(f'created scheduled snapshot of {path}')
log.debug(f'created scheduled snapshot {snap_name}')
sched.update_last(time, db)
except cephfs.ObjectNotFound:
# maybe path is missing or wrong
self._log_exception('create_scheduled_snapshot')
log.debug(f'path {path} is probably missing or wrong; '
'remember to strip off the mount point path '
'prefix to provide the correct path')
set_schedule_to_inactive = True
except cephfs.Error:
self._log_exception('create_scheduled_snapshot')
except Exception:
# catch all exceptions cause otherwise we'll never know since this
# is running in a thread
self._log_exception('create_scheduled_snapshot')
finally:
if set_schedule_to_inactive:
sched.set_inactive(db)
finally:
with self.get_schedule_db(fs_name) as conn_mgr:
db = conn_mgr.dbinfo.db
self.refresh_snap_timers(fs_name, path, db)
self.prune_snapshots(sched)
def prune_snapshots(self, sched: Schedule) -> None:
try:
log.debug('Pruning snapshots')
ret = sched.retention
path = sched.path
prune_candidates = set()
time = datetime.now(timezone.utc)
mds_max_snaps_per_dir = self.mgr.get_ceph_option('mds_max_snaps_per_dir')
with open_filesystem(self, sched.fs) as fs_handle:
snap_dir = self.mgr.rados.conf_get('client_snapdir')
with fs_handle.opendir(f'{path}/{snap_dir}') as d_handle:
dir_ = fs_handle.readdir(d_handle)
while dir_:
if dir_.d_name.decode('utf-8').startswith(f'{SNAPSHOT_PREFIX}-'):
log.debug(f'add {dir_.d_name} to pruning')
ts = datetime.strptime(
snap_name_to_timestamp(dir_.d_name.decode('utf-8')), SNAPSHOT_TS_FORMAT)
prune_candidates.add((dir_, ts))
else:
log.debug(f'skipping dir entry {dir_.d_name}')
dir_ = fs_handle.readdir(d_handle)
# Limit ourselves to one snapshot less than allowed by config to allow for
# snapshot creation before pruning
to_prune = get_prune_set(prune_candidates, ret, mds_max_snaps_per_dir - 1)
for k in to_prune:
dirname = k[0].d_name.decode('utf-8')
log.debug(f'rmdir on {dirname}')
fs_handle.rmdir(f'{path}/{snap_dir}/{dirname}')
if to_prune:
with self.get_schedule_db(sched.fs) as conn_mgr:
db = conn_mgr.dbinfo.db
sched.update_pruned(time, db, len(to_prune))
except Exception:
self._log_exception('prune_snapshots')
def get_snap_schedules(self, fs: str, path: str) -> List[Schedule]:
with self.get_schedule_db(fs) as conn_mgr:
db = conn_mgr.dbinfo.db
return Schedule.get_db_schedules(path, db, fs)
def list_snap_schedules(self,
fs: str,
path: str,
recursive: bool) -> List[Schedule]:
with self.get_schedule_db(fs) as conn_mgr:
db = conn_mgr.dbinfo.db
return Schedule.list_schedules(path, db, fs, recursive)
@updates_schedule_db
# TODO improve interface
def store_snap_schedule(self,
fs: str, path_: str,
args: Tuple[str, str, str, str,
Optional[str], Optional[str]]) -> None:
sched = Schedule(*args)
log.debug(f'repeat is {sched.repeat}')
if sched.parse_schedule(sched.schedule)[1] == 'M' and not self.allow_minute_snaps:
log.error('not allowed')
raise ValueError('no minute snaps allowed')
log.debug(f'attempting to add schedule {sched}')
with self.get_schedule_db(fs) as conn_mgr:
db = conn_mgr.dbinfo.db
sched.store_schedule(db)
@updates_schedule_db
def rm_snap_schedule(self,
fs: str, path: str,
schedule: Optional[str],
start: Optional[str]) -> None:
with self.get_schedule_db(fs) as conn_mgr:
db = conn_mgr.dbinfo.db
Schedule.rm_schedule(db, path, schedule, start)
@updates_schedule_db
def add_retention_spec(self,
fs: str,
path: str,
retention_spec_or_period: str,
retention_count: Optional[str]) -> None:
retention_spec = retention_spec_or_period
if retention_count:
retention_spec = retention_count + retention_spec
with self.get_schedule_db(fs) as conn_mgr:
db = conn_mgr.dbinfo.db
Schedule.add_retention(db, path, retention_spec)
@updates_schedule_db
def rm_retention_spec(self,
fs: str,
path: str,
retention_spec_or_period: str,
retention_count: Optional[str]) -> None:
retention_spec = retention_spec_or_period
if retention_count:
retention_spec = retention_count + retention_spec
with self.get_schedule_db(fs) as conn_mgr:
db = conn_mgr.dbinfo.db
Schedule.rm_retention(db, path, retention_spec)
@updates_schedule_db
def activate_snap_schedule(self,
fs: str,
path: str,
schedule: Optional[str],
start: Optional[str]) -> None:
with self.get_schedule_db(fs) as conn_mgr:
db = conn_mgr.dbinfo.db
schedules = Schedule.get_db_schedules(path, db, fs,
schedule=schedule,
start=start)
for s in schedules:
s.set_active(db)
@updates_schedule_db
def deactivate_snap_schedule(self,
fs: str, path: str,
schedule: Optional[str],
start: Optional[str]) -> None:
with self.get_schedule_db(fs) as conn_mgr:
db = conn_mgr.dbinfo.db
schedules = Schedule.get_db_schedules(path, db, fs,
schedule=schedule,
start=start)
for s in schedules:
s.set_inactive(db)
| 19,243 | 42.244944 | 106 |
py
|
null |
ceph-main/src/pybind/mgr/snap_schedule/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/pybind/mgr/snap_schedule/tests/conftest.py
|
import pytest
import sqlite3
from ..fs.schedule import Schedule
# simple_schedule fixture returns schedules without any timing arguments
# the tuple values correspong to ctor args for Schedule
_simple_schedules = [
('/foo', '6h', 'fs_name', '/foo'),
('/foo', '24h', 'fs_name', '/foo'),
('/bar', '1d', 'fs_name', '/bar'),
('/fnord', '1w', 'fs_name', '/fnord'),
]
@pytest.fixture(params=_simple_schedules)
def simple_schedule(request):
return Schedule(*request.param)
@pytest.fixture
def simple_schedules():
return [Schedule(*s) for s in _simple_schedules]
@pytest.fixture
def db():
db = sqlite3.connect(':memory:',
check_same_thread=False)
with db:
db.row_factory = sqlite3.Row
db.execute("PRAGMA FOREIGN_KEYS = 1")
db.executescript(Schedule.CREATE_TABLES)
return db
| 859 | 23.571429 | 72 |
py
|
null |
ceph-main/src/pybind/mgr/snap_schedule/tests/fs/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/pybind/mgr/snap_schedule/tests/fs/test_schedule.py
|
import datetime
import json
import pytest
import random
import sqlite3
from ...fs.schedule import Schedule, parse_retention
SELECT_ALL = ('select * from schedules s'
' INNER JOIN schedules_meta sm'
' ON sm.schedule_id = s.id')
def assert_updated(new, old, update_expected={}):
'''
This helper asserts that an object new has been updated in the
attributes in the dict updated AND has not changed in other attributes
compared to old.
if update expected is the empty dict, equality is checked
'''
for var in vars(new):
if var in update_expected:
expected_val = update_expected.get(var)
new_val = getattr(new, var)
if isinstance(expected_val, datetime.datetime):
assert new_val.year == expected_val.year
assert new_val.month == expected_val.month
assert new_val.day == expected_val.day
assert new_val.hour == expected_val.hour
assert new_val.minute == expected_val.minute
assert new_val.second == expected_val.second
else:
assert new_val == expected_val, f'new did not update value for {var}'
else:
expected_val = getattr(old, var)
new_val = getattr(new, var)
if isinstance(expected_val, datetime.datetime):
assert new_val.year == expected_val.year
assert new_val.month == expected_val.month
assert new_val.day == expected_val.day
assert new_val.hour == expected_val.hour
assert new_val.minute == expected_val.minute
assert new_val.second == expected_val.second
else:
assert new_val == expected_val, f'new changed unexpectedly in value for {var}'
class TestSchedule(object):
'''
Test the schedule class basics and that its methods update self as expected
'''
def test_start_default_midnight(self, simple_schedule):
now = datetime.datetime.now(datetime.timezone.utc)
assert simple_schedule.start.second == 0
assert simple_schedule.start.minute == 0
assert simple_schedule.start.hour == 0
assert simple_schedule.start.day == now.day
assert simple_schedule.start.month == now.month
assert simple_schedule.start.year == now.year
assert simple_schedule.start.tzinfo == now.tzinfo
def test_created_now(self, simple_schedule):
now = datetime.datetime.now(datetime.timezone.utc)
assert simple_schedule.created.minute == now.minute
assert simple_schedule.created.hour == now.hour
assert simple_schedule.created.day == now.day
assert simple_schedule.created.month == now.month
assert simple_schedule.created.year == now.year
assert simple_schedule.created.tzinfo == now.tzinfo
def test_repeat_valid(self, simple_schedule):
repeat = simple_schedule.repeat
assert isinstance(repeat, int)
def test_store_single(self, db, simple_schedule):
simple_schedule.store_schedule(db)
row = ()
with db:
row = db.execute(SELECT_ALL).fetchone()
db_schedule = Schedule._from_db_row(row, simple_schedule.fs)
assert_updated(db_schedule, simple_schedule)
def test_store_multiple(self, db, simple_schedules):
[s.store_schedule(db) for s in simple_schedules]
rows = []
with db:
rows = db.execute(SELECT_ALL).fetchall()
assert len(rows) == len(simple_schedules)
def test_update_last(self, db, simple_schedule):
simple_schedule.store_schedule(db)
with db:
_ = db.execute(SELECT_ALL).fetchone()
first_time = datetime.datetime.now(datetime.timezone.utc)
simple_schedule.update_last(first_time, db)
with db:
after = db.execute(SELECT_ALL).fetchone()
assert_updated(Schedule._from_db_row(after, simple_schedule.fs),
simple_schedule)
second_time = datetime.datetime.now(datetime.timezone.utc)
simple_schedule.update_last(second_time, db)
with db:
after2 = db.execute(SELECT_ALL).fetchone()
assert_updated(Schedule._from_db_row(after2, simple_schedule.fs),
simple_schedule)
def test_set_inactive_active(self, db, simple_schedule):
simple_schedule.store_schedule(db)
with db:
_ = db.execute(SELECT_ALL).fetchone()
simple_schedule.set_inactive(db)
with db:
after = db.execute(SELECT_ALL).fetchone()
assert_updated(Schedule._from_db_row(after, simple_schedule.fs),
simple_schedule)
simple_schedule.set_active(db)
with db:
after2 = db.execute(SELECT_ALL).fetchone()
assert_updated(Schedule._from_db_row(after2, simple_schedule.fs),
simple_schedule)
def test_update_pruned(self, db, simple_schedule):
simple_schedule.store_schedule(db)
with db:
_ = db.execute(SELECT_ALL).fetchone()
now = datetime.datetime.now(datetime.timezone.utc)
pruned_count = random.randint(1, 1000)
simple_schedule.update_pruned(now, db, pruned_count)
with db:
after = db.execute(SELECT_ALL).fetchone()
assert_updated(Schedule._from_db_row(after, simple_schedule.fs),
simple_schedule)
# TODO test get_schedules and list_schedules
class TestScheduleDB(object):
'''
This class tests that Schedules methods update the DB correctly
'''
def test_update_last(self, db, simple_schedule):
simple_schedule.store_schedule(db)
with db:
before = db.execute(SELECT_ALL).fetchone()
first_time = datetime.datetime.now(datetime.timezone.utc)
simple_schedule.update_last(first_time, db)
with db:
after = db.execute(SELECT_ALL).fetchone()
assert_updated(Schedule._from_db_row(after, simple_schedule.fs),
Schedule._from_db_row(before, simple_schedule.fs),
{'created_count': 1,
'last': first_time,
'first': first_time})
second_time = datetime.datetime.now(datetime.timezone.utc)
simple_schedule.update_last(second_time, db)
with db:
after2 = db.execute(SELECT_ALL).fetchone()
assert_updated(Schedule._from_db_row(after2, simple_schedule.fs),
Schedule._from_db_row(after, simple_schedule.fs),
{'created_count': 2, 'last': second_time})
def test_set_inactive_active(self, db, simple_schedule):
simple_schedule.store_schedule(db)
with db:
before = db.execute(SELECT_ALL).fetchone()
simple_schedule.set_inactive(db)
with db:
after = db.execute(SELECT_ALL).fetchone()
assert_updated(Schedule._from_db_row(after, simple_schedule.fs),
Schedule._from_db_row(before, simple_schedule.fs),
{'active': 0})
simple_schedule.set_active(db)
with db:
after2 = db.execute(SELECT_ALL).fetchone()
assert_updated(Schedule._from_db_row(after2, simple_schedule.fs),
Schedule._from_db_row(after, simple_schedule.fs),
{'active': 1})
def test_update_pruned(self, db, simple_schedule):
simple_schedule.store_schedule(db)
with db:
before = db.execute(SELECT_ALL).fetchone()
now = datetime.datetime.now(datetime.timezone.utc)
pruned_count = random.randint(1, 1000)
simple_schedule.update_pruned(now, db, pruned_count)
with db:
after = db.execute(SELECT_ALL).fetchone()
assert_updated(Schedule._from_db_row(after, simple_schedule.fs),
Schedule._from_db_row(before, simple_schedule.fs),
{'last_pruned': now, 'pruned_count': pruned_count})
def test_add_retention(self, db, simple_schedule):
simple_schedule.store_schedule(db)
with db:
before = db.execute(SELECT_ALL).fetchone()
retention = "7d12m"
simple_schedule.add_retention(db, simple_schedule.path, retention)
with db:
after = db.execute(SELECT_ALL).fetchone()
assert after['retention'] == json.dumps(parse_retention(retention))
retention2 = "4w"
simple_schedule.add_retention(db, simple_schedule.path, retention2)
with db:
after = db.execute(SELECT_ALL).fetchone()
assert after['retention'] == json.dumps(parse_retention(retention + retention2))
def test_per_path_and_repeat_uniqness(self, db):
s1 = Schedule(*('/foo', '24h', 'fs_name', '/foo'))
s2 = Schedule(*('/foo', '1d', 'fs_name', '/foo'))
s1.store_schedule(db)
with pytest.raises(sqlite3.IntegrityError):
s2.store_schedule(db)
| 9,126 | 34.513619 | 94 |
py
|
null |
ceph-main/src/pybind/mgr/snap_schedule/tests/fs/test_schedule_client.py
|
from datetime import datetime, timedelta
from unittest.mock import MagicMock
import pytest
from ...fs.schedule_client import get_prune_set, SNAPSHOT_TS_FORMAT
class TestScheduleClient(object):
def test_get_prune_set_empty_retention_no_prune(self):
now = datetime.now()
candidates = set()
for i in range(10):
ts = now - timedelta(minutes=i*5)
fake_dir = MagicMock()
fake_dir.d_name = f'scheduled-{ts.strftime(SNAPSHOT_TS_FORMAT)}'
candidates.add((fake_dir, ts))
ret = {}
prune_set = get_prune_set(candidates, ret, 99)
assert prune_set == set(), 'candidates are pruned despite empty retention'
def test_get_prune_set_two_retention_specs(self):
now = datetime.now()
candidates = set()
for i in range(10):
ts = now - timedelta(hours=i*1)
fake_dir = MagicMock()
fake_dir.d_name = f'scheduled-{ts.strftime(SNAPSHOT_TS_FORMAT)}'
candidates.add((fake_dir, ts))
for i in range(10):
ts = now - timedelta(days=i*1)
fake_dir = MagicMock()
fake_dir.d_name = f'scheduled-{ts.strftime(SNAPSHOT_TS_FORMAT)}'
candidates.add((fake_dir, ts))
# should keep 8 snapshots
ret = {'h': 6, 'd': 2}
prune_set = get_prune_set(candidates, ret, 99)
assert len(prune_set) == len(candidates) - 8, 'wrong size of prune set'
| 1,459 | 37.421053 | 82 |
py
|
null |
ceph-main/src/pybind/mgr/stats/__init__.py
|
from .module import Module
| 27 | 13 | 26 |
py
|
null |
ceph-main/src/pybind/mgr/stats/module.py
|
"""
performance stats for ceph filesystem (for now...)
"""
import json
from typing import List, Dict
from mgr_module import MgrModule, Option, NotifyType
from .fs.perf_stats import FSPerfStats
class Module(MgrModule):
COMMANDS = [
{
"cmd": "fs perf stats "
"name=mds_rank,type=CephString,req=false "
"name=client_id,type=CephString,req=false "
"name=client_ip,type=CephString,req=false ",
"desc": "retrieve ceph fs performance stats",
"perm": "r"
},
]
MODULE_OPTIONS: List[Option] = []
NOTIFY_TYPES = [NotifyType.command, NotifyType.fs_map]
def __init__(self, *args, **kwargs):
super(Module, self).__init__(*args, **kwargs)
self.fs_perf_stats = FSPerfStats(self)
def notify(self, notify_type: NotifyType, notify_id):
if notify_type == NotifyType.command:
self.fs_perf_stats.notify_cmd(notify_id)
elif notify_type == NotifyType.fs_map:
self.fs_perf_stats.notify_fsmap()
def handle_command(self, inbuf, cmd):
prefix = cmd['prefix']
# only supported command is `fs perf stats` right now
if prefix.startswith('fs perf stats'):
return self.fs_perf_stats.get_perf_data(cmd)
raise NotImplementedError(cmd['prefix'])
| 1,353 | 31.238095 | 63 |
py
|
null |
ceph-main/src/pybind/mgr/stats/fs/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/pybind/mgr/stats/fs/perf_stats.py
|
import re
import json
import time
import uuid
import errno
import traceback
import logging
from collections import OrderedDict
from typing import List, Dict, Set
from mgr_module import CommandResult
from datetime import datetime, timedelta
from threading import Lock, Condition, Thread, Timer
from ipaddress import ip_address
PERF_STATS_VERSION = 2
QUERY_IDS = "query_ids"
GLOBAL_QUERY_ID = "global_query_id"
QUERY_LAST_REQUEST = "last_time_stamp"
QUERY_RAW_COUNTERS = "query_raw_counters"
QUERY_RAW_COUNTERS_GLOBAL = "query_raw_counters_global"
MDS_RANK_ALL = (-1,)
CLIENT_ID_ALL = r"\d*"
CLIENT_IP_ALL = ".*"
fs_list = [] # type: List[str]
MDS_PERF_QUERY_REGEX_MATCH_ALL_RANKS = '^(.*)$'
MDS_PERF_QUERY_REGEX_MATCH_CLIENTS = r'^(client.{0}\s+{1}):.*'
MDS_PERF_QUERY_COUNTERS_MAP = OrderedDict({'cap_hit': 0,
'read_latency': 1,
'write_latency': 2,
'metadata_latency': 3,
'dentry_lease': 4,
'opened_files': 5,
'pinned_icaps': 6,
'opened_inodes': 7,
'read_io_sizes': 8,
'write_io_sizes': 9,
'avg_read_latency': 10,
'stdev_read_latency': 11,
'avg_write_latency': 12,
'stdev_write_latency': 13,
'avg_metadata_latency': 14,
'stdev_metadata_latency': 15})
MDS_PERF_QUERY_COUNTERS = [] # type: List[str]
MDS_GLOBAL_PERF_QUERY_COUNTERS = list(MDS_PERF_QUERY_COUNTERS_MAP.keys())
QUERY_EXPIRE_INTERVAL = timedelta(minutes=1)
REREGISTER_TIMER_INTERVAL = 1
CLIENT_METADATA_KEY = "client_metadata"
CLIENT_METADATA_SUBKEYS = ["hostname", "root"]
CLIENT_METADATA_SUBKEYS_OPTIONAL = ["mount_point"]
NON_EXISTENT_KEY_STR = "N/A"
logger = logging.getLogger(__name__)
class FilterSpec(object):
"""
query filters encapsulated and used as key for query map
"""
def __init__(self, mds_ranks, client_id, client_ip):
self.mds_ranks = mds_ranks
self.client_id = client_id
self.client_ip = client_ip
def __hash__(self):
return hash((self.mds_ranks, self.client_id, self.client_ip))
def __eq__(self, other):
return (self.mds_ranks, self.client_id, self.client_ip) == (other.mds_ranks, other.client_id, self.client_ip)
def __ne__(self, other):
return not(self == other)
def extract_mds_ranks_from_spec(mds_rank_spec):
if not mds_rank_spec:
return MDS_RANK_ALL
match = re.match(r'^\d+(,\d+)*$', mds_rank_spec)
if not match:
raise ValueError("invalid mds filter spec: {}".format(mds_rank_spec))
return tuple(int(mds_rank) for mds_rank in match.group(0).split(','))
def extract_client_id_from_spec(client_id_spec):
if not client_id_spec:
return CLIENT_ID_ALL
# the client id is the spec itself since it'll be a part
# of client filter regex.
if not client_id_spec.isdigit():
raise ValueError('invalid client_id filter spec: {}'.format(client_id_spec))
return client_id_spec
def extract_client_ip_from_spec(client_ip_spec):
if not client_ip_spec:
return CLIENT_IP_ALL
client_ip = client_ip_spec
if client_ip.startswith('v1:'):
client_ip = client_ip.replace('v1:', '')
elif client_ip.startswith('v2:'):
client_ip = client_ip.replace('v2:', '')
try:
ip_address(client_ip)
return client_ip_spec
except ValueError:
raise ValueError('invalid client_ip filter spec: {}'.format(client_ip_spec))
def extract_mds_ranks_from_report(mds_ranks_str):
if not mds_ranks_str:
return []
return [int(x) for x in mds_ranks_str.split(',')]
def extract_client_id_and_ip(client):
match = re.match(r'^(client\.\d+)\s(.*)', client)
if match:
return match.group(1), match.group(2)
return None, None
class FSPerfStats(object):
lock = Lock()
q_cv = Condition(lock)
r_cv = Condition(lock)
user_queries = {} # type: Dict[str, Dict]
meta_lock = Lock()
rqtimer = None
client_metadata = {
'metadata' : {},
'to_purge' : set(),
'in_progress' : {},
} # type: Dict
def __init__(self, module):
self.module = module
self.log = module.log
self.prev_rank0_gid = None
# report processor thread
self.report_processor = Thread(target=self.run)
self.report_processor.start()
def set_client_metadata(self, fs_name, client_id, key, meta):
result = (self.client_metadata['metadata'].setdefault(
fs_name, {})).setdefault(client_id, {})
if not key in result or not result[key] == meta:
result[key] = meta
def notify_cmd(self, cmdtag):
self.log.debug("cmdtag={0}".format(cmdtag))
with self.meta_lock:
try:
result = self.client_metadata['in_progress'].pop(cmdtag)
except KeyError:
self.log.warn(f"cmdtag {cmdtag} not found in client metadata")
return
fs_name = result[0]
client_meta = result[2].wait()
if client_meta[0] != 0:
self.log.warn("failed to fetch client metadata from gid {0}, err={1}".format(
result[1], client_meta[2]))
return
self.log.debug("notify: client metadata={0}".format(json.loads(client_meta[1])))
for metadata in json.loads(client_meta[1]):
client_id = "client.{0}".format(metadata['id'])
result = (self.client_metadata['metadata'].setdefault(fs_name, {})).setdefault(client_id, {})
for subkey in CLIENT_METADATA_SUBKEYS:
self.set_client_metadata(fs_name, client_id, subkey, metadata[CLIENT_METADATA_KEY][subkey])
for subkey in CLIENT_METADATA_SUBKEYS_OPTIONAL:
self.set_client_metadata(fs_name, client_id, subkey,
metadata[CLIENT_METADATA_KEY].get(subkey, NON_EXISTENT_KEY_STR))
metric_features = int(metadata[CLIENT_METADATA_KEY]["metric_spec"]["metric_flags"]["feature_bits"], 16)
supported_metrics = [metric for metric, bit in MDS_PERF_QUERY_COUNTERS_MAP.items() if metric_features & (1 << bit)]
self.set_client_metadata(fs_name, client_id, "valid_metrics", supported_metrics)
kver = metadata[CLIENT_METADATA_KEY].get("kernel_version", None)
if kver:
self.set_client_metadata(fs_name, client_id, "kernel_version", kver)
# when all async requests are done, purge clients metadata if any.
if not self.client_metadata['in_progress']:
global fs_list
for fs_name in fs_list:
for client in self.client_metadata['to_purge']:
try:
if client in self.client_metadata['metadata'][fs_name]:
self.log.info("purge client metadata for {0}".format(client))
self.client_metadata['metadata'][fs_name].pop(client)
except:
pass
if fs_name in self.client_metadata['metadata'] and not bool(self.client_metadata['metadata'][fs_name]):
self.client_metadata['metadata'].pop(fs_name)
self.client_metadata['to_purge'].clear()
self.log.debug("client_metadata={0}, to_purge={1}".format(
self.client_metadata['metadata'], self.client_metadata['to_purge']))
def notify_fsmap(self):
#Reregister the user queries when there is a new rank0 mds
with self.lock:
gid_state = FSPerfStats.get_rank0_mds_gid_state(self.module.get('fs_map'))
if not gid_state:
return
for value in gid_state:
rank0_gid, state = value
if (rank0_gid and rank0_gid != self.prev_rank0_gid and state == 'up:active'):
#the new rank0 MDS is up:active
ua_last_updated = time.monotonic()
if (self.rqtimer and self.rqtimer.is_alive()):
self.rqtimer.cancel()
self.rqtimer = Timer(REREGISTER_TIMER_INTERVAL,
self.re_register_queries,
args=(rank0_gid, ua_last_updated,))
self.rqtimer.start()
def re_register_queries(self, rank0_gid, ua_last_updated):
#reregister queries if the metrics are the latest. Otherwise reschedule the timer and
#wait for the empty metrics
with self.lock:
if self.mx_last_updated >= ua_last_updated:
self.log.debug("reregistering queries...")
self.module.reregister_mds_perf_queries()
self.prev_rank0_gid = rank0_gid
else:
#reschedule the timer
self.rqtimer = Timer(REREGISTER_TIMER_INTERVAL,
self.re_register_queries, args=(rank0_gid, ua_last_updated,))
self.rqtimer.start()
@staticmethod
def get_rank0_mds_gid_state(fsmap):
gid_state = []
for fs in fsmap['filesystems']:
mds_map = fs['mdsmap']
if mds_map is not None:
for mds_id, mds_status in mds_map['info'].items():
if mds_status['rank'] == 0:
gid_state.append([mds_status['gid'], mds_status['state']])
if gid_state:
return gid_state
logger.warn("No rank0 mds in the fsmap")
def update_client_meta(self):
new_updates = {}
pending_updates = [v[0] for v in self.client_metadata['in_progress'].values()]
global fs_list
fs_list.clear()
with self.meta_lock:
fsmap = self.module.get('fs_map')
for fs in fsmap['filesystems']:
mds_map = fs['mdsmap']
if mds_map is not None:
fsname = mds_map['fs_name']
for mds_id, mds_status in mds_map['info'].items():
if mds_status['rank'] == 0:
fs_list.append(fsname)
rank0_gid = mds_status['gid']
tag = str(uuid.uuid4())
result = CommandResult(tag)
new_updates[tag] = (fsname, rank0_gid, result)
self.client_metadata['in_progress'].update(new_updates)
self.log.debug(f"updating client metadata from {new_updates}")
cmd_dict = {'prefix': 'client ls'}
for tag,val in new_updates.items():
self.module.send_command(val[2], "mds", str(val[1]), json.dumps(cmd_dict), tag)
def run(self):
try:
self.log.info("FSPerfStats::report_processor starting...")
while True:
with self.lock:
self.scrub_expired_queries()
self.process_mds_reports()
self.r_cv.notify()
stats_period = int(self.module.get_ceph_option("mgr_stats_period"))
self.q_cv.wait(stats_period)
self.log.debug("FSPerfStats::tick")
except Exception as e:
self.log.fatal("fatal error: {}".format(traceback.format_exc()))
def cull_mds_entries(self, raw_perf_counters, incoming_metrics, missing_clients):
# this is pretty straight forward -- find what MDSs are missing from
# what is tracked vs what we received in incoming report and purge
# the whole bunch.
tracked_ranks = raw_perf_counters.keys()
available_ranks = [int(counter['k'][0][0]) for counter in incoming_metrics]
for rank in set(tracked_ranks) - set(available_ranks):
culled = raw_perf_counters.pop(rank)
self.log.info("culled {0} client entries from rank {1} (laggy: {2})".format(
len(culled[1]), rank, "yes" if culled[0] else "no"))
missing_clients.update(list(culled[1].keys()))
def cull_client_entries(self, raw_perf_counters, incoming_metrics, missing_clients):
# this is a bit more involved -- for each rank figure out what clients
# are missing in incoming report and purge them from our tracked map.
# but, if this is invoked after cull_mds_entries(), the rank set
# is same, so we can loop based on that assumption.
ranks = raw_perf_counters.keys()
for rank in ranks:
tracked_clients = raw_perf_counters[rank][1].keys()
available_clients = [extract_client_id_and_ip(counter['k'][1][0]) for counter in incoming_metrics]
for client in set(tracked_clients) - set([c[0] for c in available_clients if c[0] is not None]):
raw_perf_counters[rank][1].pop(client)
self.log.info("culled {0} from rank {1}".format(client, rank))
missing_clients.add(client)
def cull_missing_entries(self, raw_perf_counters, incoming_metrics):
missing_clients = set() # type: Set[str]
self.cull_mds_entries(raw_perf_counters, incoming_metrics, missing_clients)
self.cull_client_entries(raw_perf_counters, incoming_metrics, missing_clients)
self.log.debug("missing_clients={0}".format(missing_clients))
with self.meta_lock:
if self.client_metadata['in_progress']:
self.client_metadata['to_purge'].update(missing_clients)
self.log.info("deferring client metadata purge (now {0} client(s))".format(
len(self.client_metadata['to_purge'])))
else:
global fs_list
for fs_name in fs_list:
for client in missing_clients:
try:
self.log.info("purge client metadata for {0}".format(client))
if client in self.client_metadata['metadata'][fs_name]:
self.client_metadata['metadata'][fs_name].pop(client)
except KeyError:
pass
self.log.debug("client_metadata={0}".format(self.client_metadata['metadata']))
def cull_global_metrics(self, raw_perf_counters, incoming_metrics):
tracked_clients = raw_perf_counters.keys()
available_clients = [counter['k'][0][0] for counter in incoming_metrics]
for client in set(tracked_clients) - set(available_clients):
raw_perf_counters.pop(client)
def get_raw_perf_counters(self, query):
raw_perf_counters = query.setdefault(QUERY_RAW_COUNTERS, {})
for query_id in query[QUERY_IDS]:
result = self.module.get_mds_perf_counters(query_id)
self.log.debug("raw_perf_counters={}".format(raw_perf_counters))
self.log.debug("get_raw_perf_counters={}".format(result))
# extract passed in delayed ranks. metrics for delayed ranks are tagged
# as stale.
delayed_ranks = extract_mds_ranks_from_report(result['metrics'][0][0])
# what's received from MDS
incoming_metrics = result['metrics'][1]
# metrics updated (monotonic) time
self.mx_last_updated = result['metrics'][2][0]
# cull missing MDSs and clients
self.cull_missing_entries(raw_perf_counters, incoming_metrics)
# iterate over metrics list and update our copy (note that we have
# already culled the differences).
global fs_list
for fs_name in fs_list:
for counter in incoming_metrics:
mds_rank = int(counter['k'][0][0])
client_id, client_ip = extract_client_id_and_ip(counter['k'][1][0])
if self.client_metadata['metadata'].get(fs_name):
if (client_id is not None or not client_ip) and\
self.client_metadata["metadata"][fs_name].get(client_id): # client_id _could_ be 0
with self.meta_lock:
self.set_client_metadata(fs_name, client_id, "IP", client_ip)
else:
self.log.warn(f"client metadata for client_id={client_id} might be unavailable")
else:
self.log.warn(f"client metadata for filesystem={fs_name} might be unavailable")
raw_counters = raw_perf_counters.setdefault(mds_rank, [False, {}])
raw_counters[0] = True if mds_rank in delayed_ranks else False
raw_client_counters = raw_counters[1].setdefault(client_id, [])
del raw_client_counters[:]
raw_client_counters.extend(counter['c'])
# send an asynchronous client metadata refresh
self.update_client_meta()
def get_raw_perf_counters_global(self, query):
raw_perf_counters = query.setdefault(QUERY_RAW_COUNTERS_GLOBAL, {})
result = self.module.get_mds_perf_counters(query[GLOBAL_QUERY_ID])
self.log.debug("raw_perf_counters_global={}".format(raw_perf_counters))
self.log.debug("get_raw_perf_counters_global={}".format(result))
global_metrics = result['metrics'][1]
self.cull_global_metrics(raw_perf_counters, global_metrics)
for counter in global_metrics:
client_id, _ = extract_client_id_and_ip(counter['k'][0][0])
raw_client_counters = raw_perf_counters.setdefault(client_id, [])
del raw_client_counters[:]
raw_client_counters.extend(counter['c'])
def process_mds_reports(self):
for query in self.user_queries.values():
self.get_raw_perf_counters(query)
self.get_raw_perf_counters_global(query)
def scrub_expired_queries(self):
expire_time = datetime.now() - QUERY_EXPIRE_INTERVAL
for filter_spec in list(self.user_queries.keys()):
user_query = self.user_queries[filter_spec]
self.log.debug("scrubbing query={}".format(user_query))
if user_query[QUERY_LAST_REQUEST] < expire_time:
expired_query_ids = user_query[QUERY_IDS].copy()
expired_query_ids.append(user_query[GLOBAL_QUERY_ID])
self.log.debug("unregistering query={} ids={}".format(user_query, expired_query_ids))
self.unregister_mds_perf_queries(filter_spec, expired_query_ids)
del self.user_queries[filter_spec]
def prepare_mds_perf_query(self, rank, client_id, client_ip):
mds_rank_regex = MDS_PERF_QUERY_REGEX_MATCH_ALL_RANKS
if not rank == -1:
mds_rank_regex = '^({})$'.format(rank)
client_regex = MDS_PERF_QUERY_REGEX_MATCH_CLIENTS.format(client_id, client_ip)
return {
'key_descriptor' : [
{'type' : 'mds_rank', 'regex' : mds_rank_regex},
{'type' : 'client_id', 'regex' : client_regex},
],
'performance_counter_descriptors' : MDS_PERF_QUERY_COUNTERS,
}
def prepare_global_perf_query(self, client_id, client_ip):
client_regex = MDS_PERF_QUERY_REGEX_MATCH_CLIENTS.format(client_id, client_ip)
return {
'key_descriptor' : [
{'type' : 'client_id', 'regex' : client_regex},
],
'performance_counter_descriptors' : MDS_GLOBAL_PERF_QUERY_COUNTERS,
}
def unregister_mds_perf_queries(self, filter_spec, query_ids):
self.log.info("unregister_mds_perf_queries: filter_spec={0}, query_id={1}".format(
filter_spec, query_ids))
for query_id in query_ids:
self.module.remove_mds_perf_query(query_id)
def register_mds_perf_query(self, filter_spec):
mds_ranks = filter_spec.mds_ranks
client_id = filter_spec.client_id
client_ip = filter_spec.client_ip
query_ids = []
try:
# register per-mds perf query
for rank in mds_ranks:
query = self.prepare_mds_perf_query(rank, client_id, client_ip)
self.log.info("register_mds_perf_query: {}".format(query))
query_id = self.module.add_mds_perf_query(query)
if query_id is None: # query id can be 0
raise RuntimeError("failed to add MDS perf query: {}".format(query))
query_ids.append(query_id)
except Exception:
for query_id in query_ids:
self.module.remove_mds_perf_query(query_id)
raise
return query_ids
def register_global_perf_query(self, filter_spec):
client_id = filter_spec.client_id
client_ip = filter_spec.client_ip
# register a global perf query for metrics
query = self.prepare_global_perf_query(client_id, client_ip)
self.log.info("register_global_perf_query: {}".format(query))
query_id = self.module.add_mds_perf_query(query)
if query_id is None: # query id can be 0
raise RuntimeError("failed to add global perf query: {}".format(query))
return query_id
def register_query(self, filter_spec):
user_query = self.user_queries.get(filter_spec, None)
if not user_query:
user_query = {
QUERY_IDS : self.register_mds_perf_query(filter_spec),
GLOBAL_QUERY_ID : self.register_global_perf_query(filter_spec),
QUERY_LAST_REQUEST : datetime.now(),
}
self.user_queries[filter_spec] = user_query
self.q_cv.notify()
self.r_cv.wait(5)
else:
user_query[QUERY_LAST_REQUEST] = datetime.now()
return user_query
def generate_report(self, user_query):
result = {} # type: Dict
global fs_list
# start with counter info -- metrics that are global and per mds
result["version"] = PERF_STATS_VERSION
result["global_counters"] = MDS_GLOBAL_PERF_QUERY_COUNTERS
result["counters"] = MDS_PERF_QUERY_COUNTERS
# fill in client metadata
raw_perfs_global = user_query.setdefault(QUERY_RAW_COUNTERS_GLOBAL, {})
raw_perfs = user_query.setdefault(QUERY_RAW_COUNTERS, {})
with self.meta_lock:
raw_counters_clients = []
for val in raw_perfs.values():
raw_counters_clients.extend(list(val[1]))
result_meta = result.setdefault("client_metadata", {})
for fs_name in fs_list:
meta = self.client_metadata["metadata"]
if fs_name in meta and len(meta[fs_name]):
for client_id in raw_perfs_global.keys():
if client_id in meta[fs_name] and client_id in raw_counters_clients:
client_meta = (result_meta.setdefault(fs_name, {})).setdefault(client_id, {})
client_meta.update(meta[fs_name][client_id])
# start populating global perf metrics w/ client metadata
metrics = result.setdefault("global_metrics", {})
for fs_name in fs_list:
if fs_name in meta and len(meta[fs_name]):
for client_id, counters in raw_perfs_global.items():
if client_id in meta[fs_name] and client_id in raw_counters_clients:
global_client_metrics = (metrics.setdefault(fs_name, {})).setdefault(client_id, [])
del global_client_metrics[:]
global_client_metrics.extend(counters)
# and, now per-mds metrics keyed by mds rank along with delayed ranks
metrics = result.setdefault("metrics", {})
metrics["delayed_ranks"] = [rank for rank, counters in raw_perfs.items() if counters[0]]
for rank, counters in raw_perfs.items():
mds_key = "mds.{}".format(rank)
mds_metrics = metrics.setdefault(mds_key, {})
mds_metrics.update(counters[1])
return result
def extract_query_filters(self, cmd):
mds_rank_spec = cmd.get('mds_rank', None)
client_id_spec = cmd.get('client_id', None)
client_ip_spec = cmd.get('client_ip', None)
self.log.debug("mds_rank_spec={0}, client_id_spec={1}, client_ip_spec={2}".format(
mds_rank_spec, client_id_spec, client_ip_spec))
mds_ranks = extract_mds_ranks_from_spec(mds_rank_spec)
client_id = extract_client_id_from_spec(client_id_spec)
client_ip = extract_client_ip_from_spec(client_ip_spec)
return FilterSpec(mds_ranks, client_id, client_ip)
def get_perf_data(self, cmd):
try:
filter_spec = self.extract_query_filters(cmd)
except ValueError as e:
return -errno.EINVAL, "", str(e)
counters = {}
with self.lock:
user_query = self.register_query(filter_spec)
result = self.generate_report(user_query)
return 0, json.dumps(result), ""
| 25,764 | 44.360915 | 131 |
py
|
null |
ceph-main/src/pybind/mgr/status/__init__.py
|
from .module import Module
| 27 | 13 | 26 |
py
|
null |
ceph-main/src/pybind/mgr/status/module.py
|
"""
High level status display commands
"""
from collections import defaultdict
from prettytable import PrettyTable
from typing import Any, Dict, List, Optional, Tuple, Union
import errno
import fnmatch
import mgr_util
import json
from mgr_module import CLIReadCommand, MgrModule, HandleCommandResult
class Module(MgrModule):
def get_latest(self, daemon_type: str, daemon_name: str, stat: str) -> int:
data = self.get_counter(daemon_type, daemon_name, stat)[stat]
if data:
return data[-1][1]
else:
return 0
def get_rate(self, daemon_type: str, daemon_name: str, stat: str) -> int:
data = self.get_counter(daemon_type, daemon_name, stat)[stat]
if data and len(data) > 1 and (int(data[-1][0] - data[-2][0]) != 0):
return (data[-1][1] - data[-2][1]) // int(data[-1][0] - data[-2][0])
else:
return 0
@CLIReadCommand("fs status")
def handle_fs_status(self,
fs: Optional[str] = None,
format: str = 'plain') -> Tuple[int, str, str]:
"""
Show the status of a CephFS filesystem
"""
output = ""
json_output: Dict[str, List[Dict[str, Union[int, str, List[str]]]]] = \
dict(mdsmap=[],
pools=[],
clients=[],
mds_version=[])
output_format = format
fs_filter = fs
mds_versions = defaultdict(list)
fsmap = self.get("fs_map")
for filesystem in fsmap['filesystems']:
if fs_filter and filesystem['mdsmap']['fs_name'] != fs_filter:
continue
rank_table = PrettyTable(
("RANK", "STATE", "MDS", "ACTIVITY", "DNS", "INOS", "DIRS", "CAPS"),
border=False,
)
rank_table.left_padding_width = 0
rank_table.right_padding_width = 2
mdsmap = filesystem['mdsmap']
client_count = 0
for rank in mdsmap["in"]:
up = "mds_{0}".format(rank) in mdsmap["up"]
if up:
gid = mdsmap['up']["mds_{0}".format(rank)]
info = mdsmap['info']['gid_{0}'.format(gid)]
dns = self.get_latest("mds", info['name'], "mds_mem.dn")
inos = self.get_latest("mds", info['name'], "mds_mem.ino")
dirs = self.get_latest("mds", info['name'], "mds_mem.dir")
caps = self.get_latest("mds", info['name'], "mds_mem.cap")
if rank == 0:
client_count = self.get_latest("mds", info['name'],
"mds_sessions.session_count")
elif client_count == 0:
# In case rank 0 was down, look at another rank's
# sessionmap to get an indication of clients.
client_count = self.get_latest("mds", info['name'],
"mds_sessions.session_count")
laggy = "laggy_since" in info
state = info['state'].split(":")[1]
if laggy:
state += "(laggy)"
if state == "active" and not laggy:
c_state = mgr_util.colorize(state, mgr_util.GREEN)
else:
c_state = mgr_util.colorize(state, mgr_util.YELLOW)
# Populate based on context of state, e.g. client
# ops for an active daemon, replay progress, reconnect
# progress
activity = ""
if state == "active":
rate = self.get_rate("mds", info['name'],
"mds_server.handle_client_request")
if output_format not in ('json', 'json-pretty'):
activity = "Reqs: " + mgr_util.format_dimless(rate, 5) + "/s"
metadata = self.get_metadata('mds', info['name'],
default=defaultdict(lambda: 'unknown'))
assert metadata
mds_versions[metadata['ceph_version']].append(info['name'])
if output_format in ('json', 'json-pretty'):
json_output['mdsmap'].append({
'rank': rank,
'name': info['name'],
'state': state,
'rate': rate if state == "active" else "0",
'dns': dns,
'inos': inos,
'dirs': dirs,
'caps': caps
})
else:
rank_table.add_row([
mgr_util.bold(rank.__str__()), c_state, info['name'],
activity,
mgr_util.format_dimless(dns, 5),
mgr_util.format_dimless(inos, 5),
mgr_util.format_dimless(dirs, 5),
mgr_util.format_dimless(caps, 5)
])
else:
if output_format in ('json', 'json-pretty'):
json_output['mdsmap'].append({
'rank': rank,
'state': "failed"
})
else:
rank_table.add_row([
rank, "failed", "", "", "", "", "", ""
])
# Find the standby replays
for gid_str, daemon_info in mdsmap['info'].items():
if daemon_info['state'] != "up:standby-replay":
continue
inos = self.get_latest("mds", daemon_info['name'], "mds_mem.ino")
dns = self.get_latest("mds", daemon_info['name'], "mds_mem.dn")
dirs = self.get_latest("mds", daemon_info['name'], "mds_mem.dir")
caps = self.get_latest("mds", daemon_info['name'], "mds_mem.cap")
events = self.get_rate("mds", daemon_info['name'], "mds_log.replayed")
if output_format not in ('json', 'json-pretty'):
activity = "Evts: " + mgr_util.format_dimless(events, 5) + "/s"
metadata = self.get_metadata('mds', daemon_info['name'],
default=defaultdict(lambda: 'unknown'))
assert metadata
mds_versions[metadata['ceph_version']].append(daemon_info['name'])
if output_format in ('json', 'json-pretty'):
json_output['mdsmap'].append({
'rank': rank,
'name': daemon_info['name'],
'state': 'standby-replay',
'events': events,
'dns': 5,
'inos': 5,
'dirs': 5,
'caps': 5
})
else:
rank_table.add_row([
"{0}-s".format(daemon_info['rank']), "standby-replay",
daemon_info['name'], activity,
mgr_util.format_dimless(dns, 5),
mgr_util.format_dimless(inos, 5),
mgr_util.format_dimless(dirs, 5),
mgr_util.format_dimless(caps, 5)
])
df = self.get("df")
pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])
osdmap = self.get("osd_map")
pools = dict([(p['pool'], p) for p in osdmap['pools']])
metadata_pool_id = mdsmap['metadata_pool']
data_pool_ids = mdsmap['data_pools']
pools_table = PrettyTable(["POOL", "TYPE", "USED", "AVAIL"],
border=False)
pools_table.left_padding_width = 0
pools_table.right_padding_width = 2
for pool_id in [metadata_pool_id] + data_pool_ids:
pool_type = "metadata" if pool_id == metadata_pool_id else "data"
stats = pool_stats[pool_id]
if output_format in ('json', 'json-pretty'):
json_output['pools'].append({
'id': pool_id,
'name': pools[pool_id]['pool_name'],
'type': pool_type,
'used': stats['bytes_used'],
'avail': stats['max_avail']
})
else:
pools_table.add_row([
pools[pool_id]['pool_name'], pool_type,
mgr_util.format_bytes(stats['bytes_used'], 5),
mgr_util.format_bytes(stats['max_avail'], 5)
])
if output_format in ('json', 'json-pretty'):
json_output['clients'].append({
'fs': mdsmap['fs_name'],
'clients': client_count,
})
else:
output += "{0} - {1} clients\n".format(
mdsmap['fs_name'], client_count)
output += "=" * len(mdsmap['fs_name']) + "\n"
output += rank_table.get_string()
output += "\n" + pools_table.get_string() + "\n"
if not output and not json_output and fs_filter is not None:
return errno.EINVAL, "", "Invalid filesystem: " + fs_filter
standby_table = PrettyTable(["STANDBY MDS"], border=False)
standby_table.left_padding_width = 0
standby_table.right_padding_width = 2
for standby in fsmap['standbys']:
metadata = self.get_metadata('mds', standby['name'],
default=defaultdict(lambda: 'unknown'))
assert metadata
mds_versions[metadata['ceph_version']].append(standby['name'])
if output_format in ('json', 'json-pretty'):
json_output['mdsmap'].append({
'name': standby['name'],
'state': "standby"
})
else:
standby_table.add_row([standby['name']])
if output_format not in ('json', 'json-pretty'):
output += "\n" + standby_table.get_string() + "\n"
if len(mds_versions) == 1:
if output_format in ('json', 'json-pretty'):
json_output['mds_version'] = [dict(version=k, daemon=v)
for k, v in mds_versions.items()]
else:
output += "MDS version: {0}".format([*mds_versions][0])
else:
version_table = PrettyTable(["VERSION", "DAEMONS"],
border=False)
version_table.left_padding_width = 0
version_table.right_padding_width = 2
for version, daemons in mds_versions.items():
if output_format in ('json', 'json-pretty'):
json_output['mds_version'].append({
'version': version,
'daemons': daemons
})
else:
version_table.add_row([
version,
", ".join(daemons)
])
if output_format not in ('json', 'json-pretty'):
output += version_table.get_string() + "\n"
if output_format == "json":
return HandleCommandResult(stdout=json.dumps(json_output, sort_keys=True))
elif output_format == "json-pretty":
return HandleCommandResult(stdout=json.dumps(json_output, sort_keys=True, indent=4,
separators=(',', ': ')))
else:
return HandleCommandResult(stdout=output)
@CLIReadCommand("osd status")
def handle_osd_status(self, bucket: Optional[str] = None, format: str = 'plain') -> Tuple[int, str, str]:
"""
Show the status of OSDs within a bucket, or all
"""
json_output: Dict[str, List[Any]] = \
dict(OSDs=[])
output_format = format
osd_table = PrettyTable(['ID', 'HOST', 'USED', 'AVAIL', 'WR OPS',
'WR DATA', 'RD OPS', 'RD DATA', 'STATE'],
border=False)
osd_table.align['ID'] = 'r'
osd_table.align['HOST'] = 'l'
osd_table.align['USED'] = 'r'
osd_table.align['AVAIL'] = 'r'
osd_table.align['WR OPS'] = 'r'
osd_table.align['WR DATA'] = 'r'
osd_table.align['RD OPS'] = 'r'
osd_table.align['RD DATA'] = 'r'
osd_table.align['STATE'] = 'l'
osd_table.left_padding_width = 0
osd_table.right_padding_width = 2
osdmap = self.get("osd_map")
filter_osds = set()
bucket_filter = None
if bucket is not None:
self.log.debug(f"Filtering to bucket '{bucket}'")
bucket_filter = bucket
crush = self.get("osd_map_crush")
found = False
for bucket_ in crush['buckets']:
if fnmatch.fnmatch(bucket_['name'], bucket_filter):
found = True
filter_osds.update([i['id'] for i in bucket_['items']])
if not found:
msg = "Bucket '{0}' not found".format(bucket_filter)
return errno.ENOENT, msg, ""
# Build dict of OSD ID to stats
osd_stats = dict([(o['osd'], o) for o in self.get("osd_stats")['osd_stats']])
for osd in osdmap['osds']:
osd_id = osd['osd']
if bucket_filter and osd_id not in filter_osds:
continue
hostname = ""
kb_used = 0
kb_avail = 0
if osd_id in osd_stats:
metadata = self.get_metadata('osd', str(osd_id), default=defaultdict(str))
stats = osd_stats[osd_id]
assert metadata
hostname = metadata['hostname']
kb_used = stats['kb_used'] * 1024
kb_avail = stats['kb_avail'] * 1024
wr_ops_rate = (self.get_rate("osd", osd_id.__str__(), "osd.op_w") +
self.get_rate("osd", osd_id.__str__(), "osd.op_rw"))
wr_byte_rate = self.get_rate("osd", osd_id.__str__(), "osd.op_in_bytes")
rd_ops_rate = self.get_rate("osd", osd_id.__str__(), "osd.op_r")
rd_byte_rate = self.get_rate("osd", osd_id.__str__(), "osd.op_out_bytes")
osd_table.add_row([osd_id, hostname,
mgr_util.format_bytes(kb_used, 5),
mgr_util.format_bytes(kb_avail, 5),
mgr_util.format_dimless(wr_ops_rate, 5),
mgr_util.format_bytes(wr_byte_rate, 5),
mgr_util.format_dimless(rd_ops_rate, 5),
mgr_util.format_bytes(rd_byte_rate, 5),
','.join(osd['state']),
])
if output_format in ('json', 'json-pretty'):
json_output['OSDs'].append({
'id': osd_id,
'host name': hostname,
'kb used' : kb_used,
'kb available':kb_avail,
'write ops rate': wr_ops_rate,
'write byte rate': wr_byte_rate,
'read ops rate': rd_ops_rate,
'read byte rate': rd_byte_rate,
'state': osd['state']
})
if output_format == "json":
return 0, json.dumps(json_output, sort_keys=True) , ""
elif output_format == "json-pretty":
return 0, json.dumps(json_output, sort_keys=True,indent=4,separators=(',', ': ')) , ""
else:
return 0, osd_table.get_string(), ""
| 16,519 | 43.053333 | 109 |
py
|
null |
ceph-main/src/pybind/mgr/telegraf/__init__.py
|
from .module import Module
| 27 | 13 | 26 |
py
|
null |
ceph-main/src/pybind/mgr/telegraf/basesocket.py
|
import socket
from urllib.parse import ParseResult
from typing import Any, Dict, Optional, Tuple, Union
class BaseSocket(object):
schemes = {
'unixgram': (socket.AF_UNIX, socket.SOCK_DGRAM),
'unix': (socket.AF_UNIX, socket.SOCK_STREAM),
'tcp': (socket.AF_INET, socket.SOCK_STREAM),
'tcp6': (socket.AF_INET6, socket.SOCK_STREAM),
'udp': (socket.AF_INET, socket.SOCK_DGRAM),
'udp6': (socket.AF_INET6, socket.SOCK_DGRAM),
}
def __init__(self, url: ParseResult) -> None:
self.url = url
try:
socket_family, socket_type = self.schemes[self.url.scheme]
except KeyError:
raise RuntimeError('Unsupported socket type: %s', self.url.scheme)
self.sock = socket.socket(family=socket_family, type=socket_type)
if self.sock.family == socket.AF_UNIX:
self.address: Union[str, Tuple[str, int]] = self.url.path
else:
assert self.url.hostname
assert self.url.port
self.address = (self.url.hostname, self.url.port)
def connect(self) -> None:
return self.sock.connect(self.address)
def close(self) -> None:
self.sock.close()
def send(self, data: str, flags: int = 0) -> int:
return self.sock.send(data.encode('utf-8') + b'\n', flags)
def __del__(self) -> None:
self.sock.close()
def __enter__(self) -> 'BaseSocket':
self.connect()
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.close()
| 1,576 | 30.54 | 78 |
py
|
null |
ceph-main/src/pybind/mgr/telegraf/module.py
|
import errno
import json
import itertools
import socket
import time
from threading import Event
from telegraf.basesocket import BaseSocket
from telegraf.protocol import Line
from mgr_module import CLICommand, CLIReadCommand, MgrModule, Option, OptionValue, PG_STATES
from typing import cast, Any, Dict, Iterable, Optional, Tuple
from urllib.parse import urlparse
class Module(MgrModule):
MODULE_OPTIONS = [
Option(name='address',
default='unixgram:///tmp/telegraf.sock'),
Option(name='interval',
type='secs',
default=15)]
ceph_health_mapping = {'HEALTH_OK': 0, 'HEALTH_WARN': 1, 'HEALTH_ERR': 2}
@property
def config_keys(self) -> Dict[str, OptionValue]:
return dict((o['name'], o.get('default', None)) for o in self.MODULE_OPTIONS)
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(Module, self).__init__(*args, **kwargs)
self.event = Event()
self.run = True
self.fsid: Optional[str] = None
self.config: Dict[str, OptionValue] = dict()
def get_fsid(self) -> str:
if not self.fsid:
self.fsid = self.get('mon_map')['fsid']
assert self.fsid is not None
return self.fsid
def get_pool_stats(self) -> Iterable[Dict[str, Any]]:
df = self.get('df')
df_types = [
'bytes_used',
'kb_used',
'dirty',
'rd',
'rd_bytes',
'stored_raw',
'wr',
'wr_bytes',
'objects',
'max_avail',
'quota_objects',
'quota_bytes'
]
for df_type in df_types:
for pool in df['pools']:
yield {
'measurement': 'ceph_pool_stats',
'tags': {
'pool_name': pool['name'],
'pool_id': pool['id'],
'type_instance': df_type,
'fsid': self.get_fsid()
},
'value': pool['stats'][df_type],
}
def get_daemon_stats(self) -> Iterable[Dict[str, Any]]:
for daemon, counters in self.get_unlabeled_perf_counters().items():
svc_type, svc_id = daemon.split('.', 1)
metadata = self.get_metadata(svc_type, svc_id)
if not metadata:
continue
for path, counter_info in counters.items():
if counter_info['type'] & self.PERFCOUNTER_HISTOGRAM:
continue
yield {
'measurement': 'ceph_daemon_stats',
'tags': {
'ceph_daemon': daemon,
'type_instance': path,
'host': metadata['hostname'],
'fsid': self.get_fsid()
},
'value': counter_info['value']
}
def get_pg_stats(self) -> Dict[str, int]:
stats = dict()
pg_status = self.get('pg_status')
for key in ['bytes_total', 'data_bytes', 'bytes_used', 'bytes_avail',
'num_pgs', 'num_objects', 'num_pools']:
stats[key] = pg_status[key]
for state in PG_STATES:
stats['num_pgs_{0}'.format(state)] = 0
stats['num_pgs'] = pg_status['num_pgs']
for state in pg_status['pgs_by_state']:
states = state['state_name'].split('+')
for s in PG_STATES:
key = 'num_pgs_{0}'.format(s)
if s in states:
stats[key] += state['count']
return stats
def get_cluster_stats(self) -> Iterable[Dict[str, Any]]:
stats = dict()
health = json.loads(self.get('health')['json'])
stats['health'] = self.ceph_health_mapping.get(health['status'])
mon_status = json.loads(self.get('mon_status')['json'])
stats['num_mon'] = len(mon_status['monmap']['mons'])
stats['mon_election_epoch'] = mon_status['election_epoch']
stats['mon_outside_quorum'] = len(mon_status['outside_quorum'])
stats['mon_quorum'] = len(mon_status['quorum'])
osd_map = self.get('osd_map')
stats['num_osd'] = len(osd_map['osds'])
stats['num_pg_temp'] = len(osd_map['pg_temp'])
stats['osd_epoch'] = osd_map['epoch']
mgr_map = self.get('mgr_map')
stats['mgr_available'] = int(mgr_map['available'])
stats['num_mgr_standby'] = len(mgr_map['standbys'])
stats['mgr_epoch'] = mgr_map['epoch']
num_up = 0
num_in = 0
for osd in osd_map['osds']:
if osd['up'] == 1:
num_up += 1
if osd['in'] == 1:
num_in += 1
stats['num_osd_up'] = num_up
stats['num_osd_in'] = num_in
fs_map = self.get('fs_map')
stats['num_mds_standby'] = len(fs_map['standbys'])
stats['num_fs'] = len(fs_map['filesystems'])
stats['mds_epoch'] = fs_map['epoch']
num_mds_up = 0
for fs in fs_map['filesystems']:
num_mds_up += len(fs['mdsmap']['up'])
stats['num_mds_up'] = num_mds_up
stats['num_mds'] = num_mds_up + cast(int, stats['num_mds_standby'])
stats.update(self.get_pg_stats())
for key, value in stats.items():
assert value is not None
yield {
'measurement': 'ceph_cluster_stats',
'tags': {
'type_instance': key,
'fsid': self.get_fsid()
},
'value': int(value)
}
def set_config_option(self, option: str, value: str) -> None:
if option not in self.config_keys.keys():
raise RuntimeError('{0} is a unknown configuration '
'option'.format(option))
if option == 'interval':
try:
interval = int(value)
except (ValueError, TypeError):
raise RuntimeError('invalid {0} configured. Please specify '
'a valid integer'.format(option))
if interval < 5:
raise RuntimeError('interval should be set to at least 5 seconds')
self.config[option] = interval
else:
self.config[option] = value
def init_module_config(self) -> None:
self.config['address'] = \
self.get_module_option("address", default=self.config_keys['address'])
interval = self.get_module_option("interval",
default=self.config_keys['interval'])
assert interval
self.config['interval'] = int(interval)
def now(self) -> int:
return int(round(time.time() * 1000000000))
def gather_measurements(self) -> Iterable[Dict[str, Any]]:
return itertools.chain(
self.get_pool_stats(),
self.get_daemon_stats(),
self.get_cluster_stats()
)
def send_to_telegraf(self) -> None:
url = urlparse(cast(str, self.config['address']))
sock = BaseSocket(url)
self.log.debug('Sending data to Telegraf at %s', sock.address)
now = self.now()
try:
with sock as s:
for measurement in self.gather_measurements():
self.log.debug(measurement)
line = Line(measurement['measurement'],
measurement['value'],
measurement['tags'], now)
self.log.debug(line.to_line_protocol())
s.send(line.to_line_protocol())
except (socket.error, RuntimeError, IOError, OSError):
self.log.exception('Failed to send statistics to Telegraf:')
except FileNotFoundError:
self.log.exception('Failed to open Telegraf at: %s', url.geturl())
def shutdown(self) -> None:
self.log.info('Stopping Telegraf module')
self.run = False
self.event.set()
@CLIReadCommand('telegraf config-show')
def config_show(self) -> Tuple[int, str, str]:
"""
Show current configuration
"""
return 0, json.dumps(self.config), ''
@CLICommand('telegraf config-set')
def config_set(self, key: str, value: str) -> Tuple[int, str, str]:
"""
Set a configuration value
"""
if not value:
return -errno.EINVAL, '', 'Value should not be empty or None'
self.log.debug('Setting configuration option %s to %s', key, value)
self.set_config_option(key, value)
self.set_module_option(key, value)
return 0, 'Configuration option {0} updated'.format(key), ''
@CLICommand('telegraf send')
def send(self) -> Tuple[int, str, str]:
"""
Force sending data to Telegraf
"""
self.send_to_telegraf()
return 0, 'Sending data to Telegraf', ''
def self_test(self) -> None:
measurements = list(self.gather_measurements())
if len(measurements) == 0:
raise RuntimeError('No measurements found')
def serve(self) -> None:
self.log.info('Starting Telegraf module')
self.init_module_config()
self.run = True
self.log.debug('Waiting 10 seconds before starting')
self.event.wait(10)
while self.run:
start = self.now()
self.send_to_telegraf()
runtime = (self.now() - start) / 1000000
self.log.debug('Sending data to Telegraf took %d ms', runtime)
self.log.debug("Sleeping for %d seconds", self.config['interval'])
self.event.wait(cast(int, self.config['interval']))
| 9,814 | 33.559859 | 92 |
py
|
null |
ceph-main/src/pybind/mgr/telegraf/protocol.py
|
from typing import Dict, Optional, Union
from telegraf.utils import format_string, format_value, ValueType
class Line(object):
def __init__(self,
measurement: ValueType,
values: Union[Dict[str, ValueType], ValueType],
tags: Optional[Dict[str, str]] = None,
timestamp: Optional[int] = None) -> None:
self.measurement = measurement
self.values = values
self.tags = tags
self.timestamp = timestamp
def get_output_measurement(self) -> str:
return format_string(self.measurement)
def get_output_values(self) -> str:
if not isinstance(self.values, dict):
metric_values = {'value': self.values}
else:
metric_values = self.values
sorted_values = sorted(metric_values.items())
sorted_values = [(k, v) for k, v in sorted_values if v is not None]
return ','.join('{0}={1}'.format(format_string(k), format_value(v)) for k, v in sorted_values)
def get_output_tags(self) -> str:
if not self.tags:
self.tags = dict()
sorted_tags = sorted(self.tags.items())
return ','.join('{0}={1}'.format(format_string(k), format_string(v)) for k, v in sorted_tags)
def get_output_timestamp(self) -> str:
return ' {0}'.format(self.timestamp) if self.timestamp else ''
def to_line_protocol(self) -> str:
tags = self.get_output_tags()
return '{0}{1} {2}{3}'.format(
self.get_output_measurement(),
"," + tags if tags else '',
self.get_output_values(),
self.get_output_timestamp()
)
| 1,674 | 31.843137 | 102 |
py
|
null |
ceph-main/src/pybind/mgr/telegraf/utils.py
|
from typing import Union
ValueType = Union[str, bool, int, float]
def format_string(key: ValueType) -> str:
if isinstance(key, str):
return key.replace(',', r'\,') \
.replace(' ', r'\ ') \
.replace('=', r'\=')
else:
return str(key)
def format_value(value: ValueType) -> str:
if isinstance(value, str):
value = value.replace('"', '\"')
return f'"{value}"'
elif isinstance(value, bool):
return str(value)
elif isinstance(value, int):
return f"{value}i"
elif isinstance(value, float):
return str(value)
else:
raise ValueError()
| 658 | 23.407407 | 42 |
py
|
null |
ceph-main/src/pybind/mgr/telemetry/__init__.py
|
import os
if 'UNITTEST' in os.environ:
import tests
try:
from .module import Module
except ImportError:
pass
| 123 | 11.4 | 30 |
py
|
null |
ceph-main/src/pybind/mgr/telemetry/module.py
|
"""
Telemetry module for ceph-mgr
Collect statistics from Ceph cluster and send this back to the Ceph project
when user has opted-in
"""
import logging
import numbers
import enum
import errno
import hashlib
import json
import rbd
import requests
import uuid
import time
from datetime import datetime, timedelta
from prettytable import PrettyTable
from threading import Event, Lock
from collections import defaultdict
from typing import cast, Any, DefaultDict, Dict, List, Optional, Tuple, TypeVar, TYPE_CHECKING, Union
from mgr_module import CLICommand, CLIReadCommand, MgrModule, Option, OptionValue, ServiceInfoT
ALL_CHANNELS = ['basic', 'ident', 'crash', 'device', 'perf']
LICENSE = 'sharing-1-0'
LICENSE_NAME = 'Community Data License Agreement - Sharing - Version 1.0'
LICENSE_URL = 'https://cdla.io/sharing-1-0/'
NO_SALT_CNT = 0
# Latest revision of the telemetry report. Bump this each time we make
# *any* change.
REVISION = 3
# History of revisions
# --------------------
#
# Version 1:
# Mimic and/or nautilus are lumped together here, since
# we didn't track revisions yet.
#
# Version 2:
# - added revision tracking, nagging, etc.
# - added config option changes
# - added channels
# - added explicit license acknowledgement to the opt-in process
#
# Version 3:
# - added device health metrics (i.e., SMART data, minus serial number)
# - remove crush_rule
# - added CephFS metadata (how many MDSs, fs features, how many data pools,
# how much metadata is cached, rfiles, rbytes, rsnapshots)
# - added more pool metadata (rep vs ec, cache tiering mode, ec profile)
# - added host count, and counts for hosts with each of (mon, osd, mds, mgr)
# - whether an OSD cluster network is in use
# - rbd pool and image count, and rbd mirror mode (pool-level)
# - rgw daemons, zones, zonegroups; which rgw frontends
# - crush map stats
class Collection(str, enum.Enum):
basic_base = 'basic_base'
device_base = 'device_base'
crash_base = 'crash_base'
ident_base = 'ident_base'
perf_perf = 'perf_perf'
basic_mds_metadata = 'basic_mds_metadata'
basic_pool_usage = 'basic_pool_usage'
basic_usage_by_class = 'basic_usage_by_class'
basic_rook_v01 = 'basic_rook_v01'
perf_memory_metrics = 'perf_memory_metrics'
basic_pool_options_bluestore = 'basic_pool_options_bluestore'
MODULE_COLLECTION : List[Dict] = [
{
"name": Collection.basic_base,
"description": "Basic information about the cluster (capacity, number and type of daemons, version, etc.)",
"channel": "basic",
"nag": False
},
{
"name": Collection.device_base,
"description": "Information about device health metrics",
"channel": "device",
"nag": False
},
{
"name": Collection.crash_base,
"description": "Information about daemon crashes (daemon type and version, backtrace, etc.)",
"channel": "crash",
"nag": False
},
{
"name": Collection.ident_base,
"description": "User-provided identifying information about the cluster",
"channel": "ident",
"nag": False
},
{
"name": Collection.perf_perf,
"description": "Information about performance counters of the cluster",
"channel": "perf",
"nag": True
},
{
"name": Collection.basic_mds_metadata,
"description": "MDS metadata",
"channel": "basic",
"nag": False
},
{
"name": Collection.basic_pool_usage,
"description": "Default pool application and usage statistics",
"channel": "basic",
"nag": False
},
{
"name": Collection.basic_usage_by_class,
"description": "Default device class usage statistics",
"channel": "basic",
"nag": False
},
{
"name": Collection.basic_rook_v01,
"description": "Basic Rook deployment data",
"channel": "basic",
"nag": True
},
{
"name": Collection.perf_memory_metrics,
"description": "Heap stats and mempools for mon and mds",
"channel": "perf",
"nag": False
},
{
"name": Collection.basic_pool_options_bluestore,
"description": "Per-pool bluestore config options",
"channel": "basic",
"nag": False
},
]
ROOK_KEYS_BY_COLLECTION : List[Tuple[str, Collection]] = [
# Note: a key cannot be both a node and a leaf, e.g.
# "rook/a/b"
# "rook/a/b/c"
("rook/version", Collection.basic_rook_v01),
("rook/kubernetes/version", Collection.basic_rook_v01),
("rook/csi/version", Collection.basic_rook_v01),
("rook/node/count/kubernetes-total", Collection.basic_rook_v01),
("rook/node/count/with-ceph-daemons", Collection.basic_rook_v01),
("rook/node/count/with-csi-rbd-plugin", Collection.basic_rook_v01),
("rook/node/count/with-csi-cephfs-plugin", Collection.basic_rook_v01),
("rook/node/count/with-csi-nfs-plugin", Collection.basic_rook_v01),
("rook/usage/storage-class/count/total", Collection.basic_rook_v01),
("rook/usage/storage-class/count/rbd", Collection.basic_rook_v01),
("rook/usage/storage-class/count/cephfs", Collection.basic_rook_v01),
("rook/usage/storage-class/count/nfs", Collection.basic_rook_v01),
("rook/usage/storage-class/count/bucket", Collection.basic_rook_v01),
("rook/cluster/storage/device-set/count/total", Collection.basic_rook_v01),
("rook/cluster/storage/device-set/count/portable", Collection.basic_rook_v01),
("rook/cluster/storage/device-set/count/non-portable", Collection.basic_rook_v01),
("rook/cluster/mon/count", Collection.basic_rook_v01),
("rook/cluster/mon/allow-multiple-per-node", Collection.basic_rook_v01),
("rook/cluster/mon/max-id", Collection.basic_rook_v01),
("rook/cluster/mon/pvc/enabled", Collection.basic_rook_v01),
("rook/cluster/mon/stretch/enabled", Collection.basic_rook_v01),
("rook/cluster/network/provider", Collection.basic_rook_v01),
("rook/cluster/external-mode", Collection.basic_rook_v01),
]
class Module(MgrModule):
metadata_keys = [
"arch",
"ceph_version",
"os",
"cpu",
"kernel_description",
"kernel_version",
"distro_description",
"distro"
]
MODULE_OPTIONS = [
Option(name='url',
type='str',
default='https://telemetry.ceph.com/report'),
Option(name='device_url',
type='str',
default='https://telemetry.ceph.com/device'),
Option(name='enabled',
type='bool',
default=False),
Option(name='last_opt_revision',
type='int',
default=1),
Option(name='leaderboard',
type='bool',
default=False),
Option(name='leaderboard_description',
type='str',
default=None),
Option(name='description',
type='str',
default=None),
Option(name='contact',
type='str',
default=None),
Option(name='organization',
type='str',
default=None),
Option(name='proxy',
type='str',
default=None),
Option(name='interval',
type='int',
default=24,
min=8),
Option(name='channel_basic',
type='bool',
default=True,
desc='Share basic cluster information (size, version)'),
Option(name='channel_ident',
type='bool',
default=False,
desc='Share a user-provided description and/or contact email for the cluster'),
Option(name='channel_crash',
type='bool',
default=True,
desc='Share metadata about Ceph daemon crashes (version, stack straces, etc)'),
Option(name='channel_device',
type='bool',
default=True,
desc=('Share device health metrics '
'(e.g., SMART data, minus potentially identifying info like serial numbers)')),
Option(name='channel_perf',
type='bool',
default=False,
desc='Share various performance metrics of a cluster'),
]
@property
def config_keys(self) -> Dict[str, OptionValue]:
return dict((o['name'], o.get('default', None)) for o in self.MODULE_OPTIONS)
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(Module, self).__init__(*args, **kwargs)
self.event = Event()
self.run = False
self.db_collection: Optional[List[str]] = None
self.last_opted_in_ceph_version: Optional[int] = None
self.last_opted_out_ceph_version: Optional[int] = None
self.last_upload: Optional[int] = None
self.last_report: Dict[str, Any] = dict()
self.report_id: Optional[str] = None
self.salt: Optional[str] = None
self.get_report_lock = Lock()
self.config_update_module_option()
# for mypy which does not run the code
if TYPE_CHECKING:
self.url = ''
self.device_url = ''
self.enabled = False
self.last_opt_revision = 0
self.leaderboard = ''
self.leaderboard_description = ''
self.interval = 0
self.proxy = ''
self.channel_basic = True
self.channel_ident = False
self.channel_crash = True
self.channel_device = True
self.channel_perf = False
self.db_collection = ['basic_base', 'device_base']
self.last_opted_in_ceph_version = 17
self.last_opted_out_ceph_version = 0
def config_update_module_option(self) -> None:
for opt in self.MODULE_OPTIONS:
setattr(self,
opt['name'],
self.get_module_option(opt['name']))
self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name']))
def config_notify(self) -> None:
self.config_update_module_option()
# wake up serve() thread
self.event.set()
def load(self) -> None:
last_upload = self.get_store('last_upload', None)
if last_upload is None:
self.last_upload = None
else:
self.last_upload = int(last_upload)
report_id = self.get_store('report_id', None)
if report_id is None:
self.report_id = str(uuid.uuid4())
self.set_store('report_id', self.report_id)
else:
self.report_id = report_id
salt = self.get_store('salt', None)
if salt is None:
self.salt = str(uuid.uuid4())
self.set_store('salt', self.salt)
else:
self.salt = salt
self.init_collection()
last_opted_in_ceph_version = self.get_store('last_opted_in_ceph_version', None)
if last_opted_in_ceph_version is None:
self.last_opted_in_ceph_version = None
else:
self.last_opted_in_ceph_version = int(last_opted_in_ceph_version)
last_opted_out_ceph_version = self.get_store('last_opted_out_ceph_version', None)
if last_opted_out_ceph_version is None:
self.last_opted_out_ceph_version = None
else:
self.last_opted_out_ceph_version = int(last_opted_out_ceph_version)
def gather_osd_metadata(self,
osd_map: Dict[str, List[Dict[str, int]]]) -> Dict[str, Dict[str, int]]:
keys = ["osd_objectstore", "rotational"]
keys += self.metadata_keys
metadata: Dict[str, Dict[str, int]] = dict()
for key in keys:
metadata[key] = defaultdict(int)
for osd in osd_map['osds']:
res = self.get_metadata('osd', str(osd['osd']))
if res is None:
self.log.debug('Could not get metadata for osd.%s' % str(osd['osd']))
continue
for k, v in res.items():
if k not in keys:
continue
metadata[k][v] += 1
return metadata
def gather_mon_metadata(self,
mon_map: Dict[str, List[Dict[str, str]]]) -> Dict[str, Dict[str, int]]:
keys = list()
keys += self.metadata_keys
metadata: Dict[str, Dict[str, int]] = dict()
for key in keys:
metadata[key] = defaultdict(int)
for mon in mon_map['mons']:
res = self.get_metadata('mon', mon['name'])
if res is None:
self.log.debug('Could not get metadata for mon.%s' % (mon['name']))
continue
for k, v in res.items():
if k not in keys:
continue
metadata[k][v] += 1
return metadata
def gather_mds_metadata(self) -> Dict[str, Dict[str, int]]:
metadata: Dict[str, Dict[str, int]] = dict()
res = self.get('mds_metadata') # metadata of *all* mds daemons
if res is None or not res:
self.log.debug('Could not get metadata for mds daemons')
return metadata
keys = list()
keys += self.metadata_keys
for key in keys:
metadata[key] = defaultdict(int)
for mds in res.values():
for k, v in mds.items():
if k not in keys:
continue
metadata[k][v] += 1
return metadata
def gather_crush_info(self) -> Dict[str, Union[int,
bool,
List[int],
Dict[str, int],
Dict[int, int]]]:
osdmap = self.get_osdmap()
crush_raw = osdmap.get_crush()
crush = crush_raw.dump()
BucketKeyT = TypeVar('BucketKeyT', int, str)
def inc(d: Dict[BucketKeyT, int], k: BucketKeyT) -> None:
if k in d:
d[k] += 1
else:
d[k] = 1
device_classes: Dict[str, int] = {}
for dev in crush['devices']:
inc(device_classes, dev.get('class', ''))
bucket_algs: Dict[str, int] = {}
bucket_types: Dict[str, int] = {}
bucket_sizes: Dict[int, int] = {}
for bucket in crush['buckets']:
if '~' in bucket['name']: # ignore shadow buckets
continue
inc(bucket_algs, bucket['alg'])
inc(bucket_types, bucket['type_id'])
inc(bucket_sizes, len(bucket['items']))
return {
'num_devices': len(crush['devices']),
'num_types': len(crush['types']),
'num_buckets': len(crush['buckets']),
'num_rules': len(crush['rules']),
'device_classes': list(device_classes.values()),
'tunables': crush['tunables'],
'compat_weight_set': '-1' in crush['choose_args'],
'num_weight_sets': len(crush['choose_args']),
'bucket_algs': bucket_algs,
'bucket_sizes': bucket_sizes,
'bucket_types': bucket_types,
}
def gather_configs(self) -> Dict[str, List[str]]:
# cluster config options
cluster = set()
r, outb, outs = self.mon_command({
'prefix': 'config dump',
'format': 'json'
})
if r != 0:
return {}
try:
dump = json.loads(outb)
except json.decoder.JSONDecodeError:
return {}
for opt in dump:
name = opt.get('name')
if name:
cluster.add(name)
# daemon-reported options (which may include ceph.conf)
active = set()
ls = self.get("modified_config_options")
for opt in ls.get('options', {}):
active.add(opt)
return {
'cluster_changed': sorted(list(cluster)),
'active_changed': sorted(list(active)),
}
def anonymize_entity_name(self, entity_name:str) -> str:
if '.' not in entity_name:
self.log.debug(f"Cannot split entity name ({entity_name}), no '.' is found")
return entity_name
(etype, eid) = entity_name.split('.', 1)
m = hashlib.sha1()
salt = ''
if self.salt is not None:
salt = self.salt
# avoid asserting that salt exists
if not self.salt:
# do not set self.salt to a temp value
salt = f"no_salt_found_{NO_SALT_CNT}"
NO_SALT_CNT += 1
self.log.debug(f"No salt found, created a temp one: {salt}")
m.update(salt.encode('utf-8'))
m.update(eid.encode('utf-8'))
m.update(salt.encode('utf-8'))
return etype + '.' + m.hexdigest()
def get_heap_stats(self) -> Dict[str, dict]:
result: Dict[str, dict] = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
anonymized_daemons = {}
osd_map = self.get('osd_map')
# Combine available daemons
daemons = []
for osd in osd_map['osds']:
daemons.append('osd'+'.'+str(osd['osd']))
# perf_memory_metrics collection (1/2)
if self.is_enabled_collection(Collection.perf_memory_metrics):
mon_map = self.get('mon_map')
mds_metadata = self.get('mds_metadata')
for mon in mon_map['mons']:
daemons.append('mon'+'.'+mon['name'])
for mds in mds_metadata:
daemons.append('mds'+'.'+mds)
# Grab output from the "daemon.x heap stats" command
for daemon in daemons:
daemon_type, daemon_id = daemon.split('.', 1)
heap_stats = self.parse_heap_stats(daemon_type, daemon_id)
if heap_stats:
if (daemon_type != 'osd'):
# Anonymize mon and mds
anonymized_daemons[daemon] = self.anonymize_entity_name(daemon)
daemon = anonymized_daemons[daemon]
result[daemon_type][daemon] = heap_stats
else:
continue
if anonymized_daemons:
# for debugging purposes only, this data is never reported
self.log.debug('Anonymized daemon mapping for telemetry heap_stats (anonymized: real): {}'.format(anonymized_daemons))
return result
def parse_heap_stats(self, daemon_type: str, daemon_id: Any) -> Dict[str, int]:
parsed_output = {}
cmd_dict = {
'prefix': 'heap',
'heapcmd': 'stats'
}
r, outb, outs = self.tell_command(daemon_type, str(daemon_id), cmd_dict)
if r != 0:
self.log.error("Invalid command dictionary: {}".format(cmd_dict))
else:
if 'tcmalloc heap stats' in outb:
values = [int(i) for i in outb.split() if i.isdigit()]
# `categories` must be ordered this way for the correct output to be parsed
categories = ['use_by_application',
'page_heap_freelist',
'central_cache_freelist',
'transfer_cache_freelist',
'thread_cache_freelists',
'malloc_metadata',
'actual_memory_used',
'released_to_os',
'virtual_address_space_used',
'spans_in_use',
'thread_heaps_in_use',
'tcmalloc_page_size']
if len(values) != len(categories):
self.log.error('Received unexpected output from {}.{}; ' \
'number of values should match the number' \
'of expected categories:\n values: len={} {} '\
'~ categories: len={} {} ~ outs: {}'.format(daemon_type, daemon_id, len(values), values, len(categories), categories, outs))
else:
parsed_output = dict(zip(categories, values))
else:
self.log.error('No heap stats available on {}.{}: {}'.format(daemon_type, daemon_id, outs))
return parsed_output
def get_mempool(self, mode: str = 'separated') -> Dict[str, dict]:
result: Dict[str, dict] = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
anonymized_daemons = {}
osd_map = self.get('osd_map')
# Combine available daemons
daemons = []
for osd in osd_map['osds']:
daemons.append('osd'+'.'+str(osd['osd']))
# perf_memory_metrics collection (2/2)
if self.is_enabled_collection(Collection.perf_memory_metrics):
mon_map = self.get('mon_map')
mds_metadata = self.get('mds_metadata')
for mon in mon_map['mons']:
daemons.append('mon'+'.'+mon['name'])
for mds in mds_metadata:
daemons.append('mds'+'.'+mds)
# Grab output from the "dump_mempools" command
for daemon in daemons:
daemon_type, daemon_id = daemon.split('.', 1)
cmd_dict = {
'prefix': 'dump_mempools',
'format': 'json'
}
r, outb, outs = self.tell_command(daemon_type, daemon_id, cmd_dict)
if r != 0:
self.log.error("Invalid command dictionary: {}".format(cmd_dict))
continue
else:
try:
# This is where the mempool will land.
dump = json.loads(outb)
if mode == 'separated':
# Anonymize mon and mds
if daemon_type != 'osd':
anonymized_daemons[daemon] = self.anonymize_entity_name(daemon)
daemon = anonymized_daemons[daemon]
result[daemon_type][daemon] = dump['mempool']['by_pool']
elif mode == 'aggregated':
for mem_type in dump['mempool']['by_pool']:
result[daemon_type][mem_type]['bytes'] += dump['mempool']['by_pool'][mem_type]['bytes']
result[daemon_type][mem_type]['items'] += dump['mempool']['by_pool'][mem_type]['items']
else:
self.log.error("Incorrect mode specified in get_mempool: {}".format(mode))
except (json.decoder.JSONDecodeError, KeyError) as e:
self.log.exception("Error caught on {}.{}: {}".format(daemon_type, daemon_id, e))
continue
if anonymized_daemons:
# for debugging purposes only, this data is never reported
self.log.debug('Anonymized daemon mapping for telemetry mempool (anonymized: real): {}'.format(anonymized_daemons))
return result
def get_osd_histograms(self, mode: str = 'separated') -> List[Dict[str, dict]]:
# Initialize result dict
result: Dict[str, dict] = defaultdict(lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(
lambda: defaultdict(int))))))
# Get list of osd ids from the metadata
osd_metadata = self.get('osd_metadata')
# Grab output from the "osd.x perf histogram dump" command
for osd_id in osd_metadata:
cmd_dict = {
'prefix': 'perf histogram dump',
'id': str(osd_id),
'format': 'json'
}
r, outb, outs = self.osd_command(cmd_dict)
# Check for invalid calls
if r != 0:
self.log.error("Invalid command dictionary: {}".format(cmd_dict))
continue
else:
try:
# This is where the histograms will land if there are any.
dump = json.loads(outb)
for histogram in dump['osd']:
# Log axis information. There are two axes, each represented
# as a dictionary. Both dictionaries are contained inside a
# list called 'axes'.
axes = []
for axis in dump['osd'][histogram]['axes']:
# This is the dict that contains information for an individual
# axis. It will be appended to the 'axes' list at the end.
axis_dict: Dict[str, Any] = defaultdict()
# Collecting information for buckets, min, name, etc.
axis_dict['buckets'] = axis['buckets']
axis_dict['min'] = axis['min']
axis_dict['name'] = axis['name']
axis_dict['quant_size'] = axis['quant_size']
axis_dict['scale_type'] = axis['scale_type']
# Collecting ranges; placing them in lists to
# improve readability later on.
ranges = []
for _range in axis['ranges']:
_max, _min = None, None
if 'max' in _range:
_max = _range['max']
if 'min' in _range:
_min = _range['min']
ranges.append([_min, _max])
axis_dict['ranges'] = ranges
# Now that 'axis_dict' contains all the appropriate
# information for the current axis, append it to the 'axes' list.
# There will end up being two axes in the 'axes' list, since the
# histograms are 2D.
axes.append(axis_dict)
# Add the 'axes' list, containing both axes, to result.
# At this point, you will see that the name of the key is the string
# form of our axes list (str(axes)). This is there so that histograms
# with different axis configs will not be combined.
# These key names are later dropped when only the values are returned.
result[str(axes)][histogram]['axes'] = axes
# Collect current values and make sure they are in
# integer form.
values = []
for value_list in dump['osd'][histogram]['values']:
values.append([int(v) for v in value_list])
if mode == 'separated':
if 'osds' not in result[str(axes)][histogram]:
result[str(axes)][histogram]['osds'] = []
result[str(axes)][histogram]['osds'].append({'osd_id': int(osd_id), 'values': values})
elif mode == 'aggregated':
# Aggregate values. If 'values' have already been initialized,
# we can safely add.
if 'values' in result[str(axes)][histogram]:
for i in range (0, len(values)):
for j in range (0, len(values[i])):
values[i][j] += result[str(axes)][histogram]['values'][i][j]
# Add the values to result.
result[str(axes)][histogram]['values'] = values
# Update num_combined_osds
if 'num_combined_osds' not in result[str(axes)][histogram]:
result[str(axes)][histogram]['num_combined_osds'] = 1
else:
result[str(axes)][histogram]['num_combined_osds'] += 1
else:
self.log.error('Incorrect mode specified in get_osd_histograms: {}'.format(mode))
return list()
# Sometimes, json errors occur if you give it an empty string.
# I am also putting in a catch for a KeyError since it could
# happen where the code is assuming that a key exists in the
# schema when it doesn't. In either case, we'll handle that
# by continuing and collecting what we can from other osds.
except (json.decoder.JSONDecodeError, KeyError) as e:
self.log.exception("Error caught on osd.{}: {}".format(osd_id, e))
continue
return list(result.values())
def get_io_rate(self) -> dict:
return self.get('io_rate')
def get_stats_per_pool(self) -> dict:
result = self.get('pg_dump')['pool_stats']
# collect application metadata from osd_map
osd_map = self.get('osd_map')
application_metadata = {pool['pool']: pool['application_metadata'] for pool in osd_map['pools']}
# add application to each pool from pg_dump
for pool in result:
pool['application'] = []
# Only include default applications
for application in application_metadata[pool['poolid']]:
if application in ['cephfs', 'mgr', 'rbd', 'rgw']:
pool['application'].append(application)
return result
def get_stats_per_pg(self) -> dict:
return self.get('pg_dump')['pg_stats']
def get_rocksdb_stats(self) -> Dict[str, str]:
# Initalizers
result: Dict[str, str] = defaultdict()
version = self.get_rocksdb_version()
# Update result
result['version'] = version
return result
def gather_crashinfo(self) -> List[Dict[str, str]]:
crashlist: List[Dict[str, str]] = list()
errno, crashids, err = self.remote('crash', 'ls')
if errno:
return crashlist
for crashid in crashids.split():
errno, crashinfo, err = self.remote('crash', 'do_info', crashid)
if errno:
continue
c = json.loads(crashinfo)
# redact hostname
del c['utsname_hostname']
# entity_name might have more than one '.', beware
(etype, eid) = c.get('entity_name', '').split('.', 1)
m = hashlib.sha1()
assert self.salt
m.update(self.salt.encode('utf-8'))
m.update(eid.encode('utf-8'))
m.update(self.salt.encode('utf-8'))
c['entity_name'] = etype + '.' + m.hexdigest()
# redact final line of python tracebacks, as the exception
# payload may contain identifying information
if 'mgr_module' in c and 'backtrace' in c:
# backtrace might be empty
if len(c['backtrace']) > 0:
c['backtrace'][-1] = '<redacted>'
crashlist.append(c)
return crashlist
def gather_perf_counters(self, mode: str = 'separated') -> Dict[str, dict]:
# Extract perf counter data with get_unlabeled_perf_counters(), a method
# from mgr/mgr_module.py. This method returns a nested dictionary that
# looks a lot like perf schema, except with some additional fields.
#
# Example of output, a snapshot of a mon daemon:
# "mon.b": {
# "bluestore.kv_flush_lat": {
# "count": 2431,
# "description": "Average kv_thread flush latency",
# "nick": "fl_l",
# "priority": 8,
# "type": 5,
# "units": 1,
# "value": 88814109
# },
# },
perf_counters = self.get_unlabeled_perf_counters()
# Initialize 'result' dict
result: Dict[str, dict] = defaultdict(lambda: defaultdict(
lambda: defaultdict(lambda: defaultdict(int))))
# 'separated' mode
anonymized_daemon_dict = {}
for daemon, perf_counters_by_daemon in perf_counters.items():
daemon_type = daemon[0:3] # i.e. 'mds', 'osd', 'rgw'
if mode == 'separated':
# anonymize individual daemon names except osds
if (daemon_type != 'osd'):
anonymized_daemon = self.anonymize_entity_name(daemon)
anonymized_daemon_dict[anonymized_daemon] = daemon
daemon = anonymized_daemon
# Calculate num combined daemon types if in aggregated mode
if mode == 'aggregated':
if 'num_combined_daemons' not in result[daemon_type]:
result[daemon_type]['num_combined_daemons'] = 1
else:
result[daemon_type]['num_combined_daemons'] += 1
for collection in perf_counters_by_daemon:
# Split the collection to avoid redundancy in final report; i.e.:
# bluestore.kv_flush_lat, bluestore.kv_final_lat -->
# bluestore: kv_flush_lat, kv_final_lat
col_0, col_1 = collection.split('.')
# Debug log for empty keys. This initially was a problem for prioritycache
# perf counters, where the col_0 was empty for certain mon counters:
#
# "mon.a": { instead of "mon.a": {
# "": { "prioritycache": {
# "cache_bytes": {...}, "cache_bytes": {...},
#
# This log is here to detect any future instances of a similar issue.
if (daemon == "") or (col_0 == "") or (col_1 == ""):
self.log.debug("Instance of an empty key: {}{}".format(daemon, collection))
if mode == 'separated':
# Add value to result
result[daemon][col_0][col_1]['value'] = \
perf_counters_by_daemon[collection]['value']
# Check that 'count' exists, as not all counters have a count field.
if 'count' in perf_counters_by_daemon[collection]:
result[daemon][col_0][col_1]['count'] = \
perf_counters_by_daemon[collection]['count']
elif mode == 'aggregated':
# Not every rgw daemon has the same schema. Specifically, each rgw daemon
# has a uniquely-named collection that starts off identically (i.e.
# "objecter-0x...") then diverges (i.e. "...55f4e778e140.op_rmw").
# This bit of code combines these unique counters all under one rgw instance.
# Without this check, the schema would remain separeted out in the final report.
if col_0[0:11] == "objecter-0x":
col_0 = "objecter-0x"
# Check that the value can be incremented. In some cases,
# the files are of type 'pair' (real-integer-pair, integer-integer pair).
# In those cases, the value is a dictionary, and not a number.
# i.e. throttle-msgr_dispatch_throttler-hbserver["wait"]
if isinstance(perf_counters_by_daemon[collection]['value'], numbers.Number):
result[daemon_type][col_0][col_1]['value'] += \
perf_counters_by_daemon[collection]['value']
# Check that 'count' exists, as not all counters have a count field.
if 'count' in perf_counters_by_daemon[collection]:
result[daemon_type][col_0][col_1]['count'] += \
perf_counters_by_daemon[collection]['count']
else:
self.log.error('Incorrect mode specified in gather_perf_counters: {}'.format(mode))
return {}
if mode == 'separated':
# for debugging purposes only, this data is never reported
self.log.debug('Anonymized daemon mapping for telemetry perf_counters (anonymized: real): {}'.format(anonymized_daemon_dict))
return result
def get_active_channels(self) -> List[str]:
r = []
if self.channel_basic:
r.append('basic')
if self.channel_crash:
r.append('crash')
if self.channel_device:
r.append('device')
if self.channel_ident:
r.append('ident')
if self.channel_perf:
r.append('perf')
return r
def gather_device_report(self) -> Dict[str, Dict[str, Dict[str, str]]]:
try:
time_format = self.remote('devicehealth', 'get_time_format')
except Exception as e:
self.log.debug('Unable to format time: {}'.format(e))
return {}
cutoff = datetime.utcnow() - timedelta(hours=self.interval * 2)
min_sample = cutoff.strftime(time_format)
devices = self.get('devices')['devices']
if not devices:
self.log.debug('Unable to get device info from the mgr.')
return {}
# anon-host-id -> anon-devid -> { timestamp -> record }
res: Dict[str, Dict[str, Dict[str, str]]] = {}
for d in devices:
devid = d['devid']
try:
# this is a map of stamp -> {device info}
m = self.remote('devicehealth', 'get_recent_device_metrics',
devid, min_sample)
except Exception as e:
self.log.error('Unable to get recent metrics from device with id "{}": {}'.format(devid, e))
continue
# anonymize host id
try:
host = d['location'][0]['host']
except (KeyError, IndexError) as e:
self.log.exception('Unable to get host from device with id "{}": {}'.format(devid, e))
continue
anon_host = self.get_store('host-id/%s' % host)
if not anon_host:
anon_host = str(uuid.uuid1())
self.set_store('host-id/%s' % host, anon_host)
serial = None
for dev, rep in m.items():
rep['host_id'] = anon_host
if serial is None and 'serial_number' in rep:
serial = rep['serial_number']
# anonymize device id
anon_devid = self.get_store('devid-id/%s' % devid)
if not anon_devid:
# ideally devid is 'vendor_model_serial',
# but can also be 'model_serial', 'serial'
if '_' in devid:
anon_devid = f"{devid.rsplit('_', 1)[0]}_{uuid.uuid1()}"
else:
anon_devid = str(uuid.uuid1())
self.set_store('devid-id/%s' % devid, anon_devid)
self.log.info('devid %s / %s, host %s / %s' % (devid, anon_devid,
host, anon_host))
# anonymize the smartctl report itself
if serial:
m_str = json.dumps(m)
m = json.loads(m_str.replace(serial, 'deleted'))
if anon_host not in res:
res[anon_host] = {}
res[anon_host][anon_devid] = m
return res
def get_latest(self, daemon_type: str, daemon_name: str, stat: str) -> int:
data = self.get_counter(daemon_type, daemon_name, stat)[stat]
if data:
return data[-1][1]
else:
return 0
def compile_report(self, channels: Optional[List[str]] = None) -> Dict[str, Any]:
if not channels:
channels = self.get_active_channels()
report = {
'leaderboard': self.leaderboard,
'leaderboard_description': self.leaderboard_description,
'report_version': 1,
'report_timestamp': datetime.utcnow().isoformat(),
'report_id': self.report_id,
'channels': channels,
'channels_available': ALL_CHANNELS,
'license': LICENSE,
'collections_available': [c['name'].name for c in MODULE_COLLECTION],
'collections_opted_in': [c['name'].name for c in MODULE_COLLECTION if self.is_enabled_collection(c['name'])],
}
if 'ident' in channels:
for option in ['description', 'contact', 'organization']:
report[option] = getattr(self, option)
if 'basic' in channels:
mon_map = self.get('mon_map')
osd_map = self.get('osd_map')
service_map = self.get('service_map')
fs_map = self.get('fs_map')
df = self.get('df')
df_pools = {pool['id']: pool for pool in df['pools']}
report['created'] = mon_map['created']
# mons
v1_mons = 0
v2_mons = 0
ipv4_mons = 0
ipv6_mons = 0
for mon in mon_map['mons']:
for a in mon['public_addrs']['addrvec']:
if a['type'] == 'v2':
v2_mons += 1
elif a['type'] == 'v1':
v1_mons += 1
if a['addr'].startswith('['):
ipv6_mons += 1
else:
ipv4_mons += 1
report['mon'] = {
'count': len(mon_map['mons']),
'features': mon_map['features'],
'min_mon_release': mon_map['min_mon_release'],
'v1_addr_mons': v1_mons,
'v2_addr_mons': v2_mons,
'ipv4_addr_mons': ipv4_mons,
'ipv6_addr_mons': ipv6_mons,
}
report['config'] = self.gather_configs()
# pools
rbd_num_pools = 0
rbd_num_images_by_pool = []
rbd_mirroring_by_pool = []
num_pg = 0
report['pools'] = list()
for pool in osd_map['pools']:
num_pg += pool['pg_num']
ec_profile = {}
if pool['erasure_code_profile']:
orig = osd_map['erasure_code_profiles'].get(
pool['erasure_code_profile'], {})
ec_profile = {
k: orig[k] for k in orig.keys()
if k in ['k', 'm', 'plugin', 'technique',
'crush-failure-domain', 'l']
}
pool_data = {
'pool': pool['pool'],
'pg_num': pool['pg_num'],
'pgp_num': pool['pg_placement_num'],
'size': pool['size'],
'min_size': pool['min_size'],
'pg_autoscale_mode': pool['pg_autoscale_mode'],
'target_max_bytes': pool['target_max_bytes'],
'target_max_objects': pool['target_max_objects'],
'type': ['', 'replicated', '', 'erasure'][pool['type']],
'erasure_code_profile': ec_profile,
'cache_mode': pool['cache_mode'],
}
# basic_pool_usage collection
if self.is_enabled_collection(Collection.basic_pool_usage):
pool_data['application'] = []
for application in pool['application_metadata']:
# Only include default applications
if application in ['cephfs', 'mgr', 'rbd', 'rgw']:
pool_data['application'].append(application)
pool_stats = df_pools[pool['pool']]['stats']
pool_data['stats'] = { # filter out kb_used
'avail_raw': pool_stats['avail_raw'],
'bytes_used': pool_stats['bytes_used'],
'compress_bytes_used': pool_stats['compress_bytes_used'],
'compress_under_bytes': pool_stats['compress_under_bytes'],
'data_bytes_used': pool_stats['data_bytes_used'],
'dirty': pool_stats['dirty'],
'max_avail': pool_stats['max_avail'],
'objects': pool_stats['objects'],
'omap_bytes_used': pool_stats['omap_bytes_used'],
'percent_used': pool_stats['percent_used'],
'quota_bytes': pool_stats['quota_bytes'],
'quota_objects': pool_stats['quota_objects'],
'rd': pool_stats['rd'],
'rd_bytes': pool_stats['rd_bytes'],
'stored': pool_stats['stored'],
'stored_data': pool_stats['stored_data'],
'stored_omap': pool_stats['stored_omap'],
'stored_raw': pool_stats['stored_raw'],
'wr': pool_stats['wr'],
'wr_bytes': pool_stats['wr_bytes']
}
pool_data['options'] = {}
# basic_pool_options_bluestore collection
if self.is_enabled_collection(Collection.basic_pool_options_bluestore):
bluestore_options = ['compression_algorithm',
'compression_mode',
'compression_required_ratio',
'compression_min_blob_size',
'compression_max_blob_size']
for option in bluestore_options:
if option in pool['options']:
pool_data['options'][option] = pool['options'][option]
cast(List[Dict[str, Any]], report['pools']).append(pool_data)
if 'rbd' in pool['application_metadata']:
rbd_num_pools += 1
ioctx = self.rados.open_ioctx(pool['pool_name'])
rbd_num_images_by_pool.append(
sum(1 for _ in rbd.RBD().list2(ioctx)))
rbd_mirroring_by_pool.append(
rbd.RBD().mirror_mode_get(ioctx) != rbd.RBD_MIRROR_MODE_DISABLED)
report['rbd'] = {
'num_pools': rbd_num_pools,
'num_images_by_pool': rbd_num_images_by_pool,
'mirroring_by_pool': rbd_mirroring_by_pool}
# osds
cluster_network = False
for osd in osd_map['osds']:
if osd['up'] and not cluster_network:
front_ip = osd['public_addrs']['addrvec'][0]['addr'].split(':')[0]
back_ip = osd['cluster_addrs']['addrvec'][0]['addr'].split(':')[0]
if front_ip != back_ip:
cluster_network = True
report['osd'] = {
'count': len(osd_map['osds']),
'require_osd_release': osd_map['require_osd_release'],
'require_min_compat_client': osd_map['require_min_compat_client'],
'cluster_network': cluster_network,
}
# crush
report['crush'] = self.gather_crush_info()
# cephfs
report['fs'] = {
'count': len(fs_map['filesystems']),
'feature_flags': fs_map['feature_flags'],
'num_standby_mds': len(fs_map['standbys']),
'filesystems': [],
}
num_mds = len(fs_map['standbys'])
for fsm in fs_map['filesystems']:
fs = fsm['mdsmap']
num_sessions = 0
cached_ino = 0
cached_dn = 0
cached_cap = 0
subtrees = 0
rfiles = 0
rbytes = 0
rsnaps = 0
for gid, mds in fs['info'].items():
num_sessions += self.get_latest('mds', mds['name'],
'mds_sessions.session_count')
cached_ino += self.get_latest('mds', mds['name'],
'mds_mem.ino')
cached_dn += self.get_latest('mds', mds['name'],
'mds_mem.dn')
cached_cap += self.get_latest('mds', mds['name'],
'mds_mem.cap')
subtrees += self.get_latest('mds', mds['name'],
'mds.subtrees')
if mds['rank'] == 0:
rfiles = self.get_latest('mds', mds['name'],
'mds.root_rfiles')
rbytes = self.get_latest('mds', mds['name'],
'mds.root_rbytes')
rsnaps = self.get_latest('mds', mds['name'],
'mds.root_rsnaps')
report['fs']['filesystems'].append({ # type: ignore
'max_mds': fs['max_mds'],
'ever_allowed_features': fs['ever_allowed_features'],
'explicitly_allowed_features': fs['explicitly_allowed_features'],
'num_in': len(fs['in']),
'num_up': len(fs['up']),
'num_standby_replay': len(
[mds for gid, mds in fs['info'].items()
if mds['state'] == 'up:standby-replay']),
'num_mds': len(fs['info']),
'num_sessions': num_sessions,
'cached_inos': cached_ino,
'cached_dns': cached_dn,
'cached_caps': cached_cap,
'cached_subtrees': subtrees,
'balancer_enabled': len(fs['balancer']) > 0,
'num_data_pools': len(fs['data_pools']),
'standby_count_wanted': fs['standby_count_wanted'],
'approx_ctime': fs['created'][0:7],
'files': rfiles,
'bytes': rbytes,
'snaps': rsnaps,
})
num_mds += len(fs['info'])
report['fs']['total_num_mds'] = num_mds # type: ignore
# daemons
report['metadata'] = dict(osd=self.gather_osd_metadata(osd_map),
mon=self.gather_mon_metadata(mon_map))
if self.is_enabled_collection(Collection.basic_mds_metadata):
report['metadata']['mds'] = self.gather_mds_metadata() # type: ignore
# host counts
servers = self.list_servers()
self.log.debug('servers %s' % servers)
hosts = {
'num': len([h for h in servers if h['hostname']]),
}
for t in ['mon', 'mds', 'osd', 'mgr']:
nr_services = sum(1 for host in servers if
any(service for service in cast(List[ServiceInfoT],
host['services'])
if service['type'] == t))
hosts['num_with_' + t] = nr_services
report['hosts'] = hosts
report['usage'] = {
'pools': len(df['pools']),
'pg_num': num_pg,
'total_used_bytes': df['stats']['total_used_bytes'],
'total_bytes': df['stats']['total_bytes'],
'total_avail_bytes': df['stats']['total_avail_bytes']
}
# basic_usage_by_class collection
if self.is_enabled_collection(Collection.basic_usage_by_class):
report['usage']['stats_by_class'] = {} # type: ignore
for device_class in df['stats_by_class']:
if device_class in ['hdd', 'ssd', 'nvme']:
report['usage']['stats_by_class'][device_class] = df['stats_by_class'][device_class] # type: ignore
services: DefaultDict[str, int] = defaultdict(int)
for key, value in service_map['services'].items():
services[key] += 1
if key == 'rgw':
rgw = {}
zones = set()
zonegroups = set()
frontends = set()
count = 0
d = value.get('daemons', dict())
for k, v in d.items():
if k == 'summary' and v:
rgw[k] = v
elif isinstance(v, dict) and 'metadata' in v:
count += 1
zones.add(v['metadata']['zone_id'])
zonegroups.add(v['metadata']['zonegroup_id'])
frontends.add(v['metadata']['frontend_type#0'])
# we could actually iterate over all the keys of
# the dict and check for how many frontends there
# are, but it is unlikely that one would be running
# more than 2 supported ones
f2 = v['metadata'].get('frontend_type#1', None)
if f2:
frontends.add(f2)
rgw['count'] = count
rgw['zones'] = len(zones)
rgw['zonegroups'] = len(zonegroups)
rgw['frontends'] = list(frontends) # sets aren't json-serializable
report['rgw'] = rgw
report['services'] = services
try:
report['balancer'] = self.remote('balancer', 'gather_telemetry')
except ImportError:
report['balancer'] = {
'active': False
}
# Rook
self.get_rook_data(report)
if 'crash' in channels:
report['crashes'] = self.gather_crashinfo()
if 'perf' in channels:
if self.is_enabled_collection(Collection.perf_perf):
report['perf_counters'] = self.gather_perf_counters('separated')
report['stats_per_pool'] = self.get_stats_per_pool()
report['stats_per_pg'] = self.get_stats_per_pg()
report['io_rate'] = self.get_io_rate()
report['osd_perf_histograms'] = self.get_osd_histograms('separated')
report['mempool'] = self.get_mempool('separated')
report['heap_stats'] = self.get_heap_stats()
report['rocksdb_stats'] = self.get_rocksdb_stats()
# NOTE: We do not include the 'device' channel in this report; it is
# sent to a different endpoint.
return report
def get_rook_data(self, report: Dict[str, object]) -> None:
r, outb, outs = self.mon_command({
'prefix': 'config-key dump',
'format': 'json'
})
if r != 0:
return
try:
config_kv_dump = json.loads(outb)
except json.decoder.JSONDecodeError:
return
for elem in ROOK_KEYS_BY_COLLECTION:
# elem[0] is the full key path (e.g. "rook/node/count/with-csi-nfs-plugin")
# elem[1] is the Collection this key belongs to
if self.is_enabled_collection(elem[1]):
self.add_kv_to_report(report, elem[0], config_kv_dump.get(elem[0]))
def add_kv_to_report(self, report: Dict[str, object], key_path: str, value: Any) -> None:
last_node = key_path.split('/')[-1]
for node in key_path.split('/')[0:-1]:
if node not in report:
report[node] = {}
report = report[node] # type: ignore
# sanity check of keys correctness
if not isinstance(report, dict):
self.log.error(f"'{key_path}' is an invalid key, expected type 'dict' but got {type(report)}")
return
if last_node in report:
self.log.error(f"'{key_path}' is an invalid key, last part must not exist at this point")
return
report[last_node] = value
def _try_post(self, what: str, url: str, report: Dict[str, Dict[str, str]]) -> Optional[str]:
self.log.info('Sending %s to: %s' % (what, url))
proxies = dict()
if self.proxy:
self.log.info('Send using HTTP(S) proxy: %s', self.proxy)
proxies['http'] = self.proxy
proxies['https'] = self.proxy
try:
resp = requests.put(url=url, json=report, proxies=proxies)
resp.raise_for_status()
except Exception as e:
fail_reason = 'Failed to send %s to %s: %s' % (what, url, str(e))
self.log.error(fail_reason)
return fail_reason
return None
class EndPoint(enum.Enum):
ceph = 'ceph'
device = 'device'
def collection_delta(self, channels: Optional[List[str]] = None) -> Optional[List[Collection]]:
'''
Find collections that are available in the module, but are not in the db
'''
if self.db_collection is None:
return None
if not channels:
channels = ALL_CHANNELS
else:
for ch in channels:
if ch not in ALL_CHANNELS:
self.log.debug(f"invalid channel name: {ch}")
return None
new_collection : List[Collection] = []
for c in MODULE_COLLECTION:
if c['name'].name not in self.db_collection:
if c['channel'] in channels:
new_collection.append(c['name'])
return new_collection
def is_major_upgrade(self) -> bool:
'''
Returns True only if the user last opted-in to an older major
'''
if self.last_opted_in_ceph_version is None or self.last_opted_in_ceph_version == 0:
# we do not know what Ceph version was when the user last opted-in,
# thus we do not wish to nag in case of a major upgrade
return False
mon_map = self.get('mon_map')
mon_min = mon_map.get("min_mon_release", 0)
if mon_min - self.last_opted_in_ceph_version > 0:
self.log.debug(f"major upgrade: mon_min is: {mon_min} and user last opted-in in {self.last_opted_in_ceph_version}")
return True
return False
def is_opted_in(self) -> bool:
# If len is 0 it means that the user is either opted-out (never
# opted-in, or invoked `telemetry off`), or they upgraded from a
# telemetry revision 1 or 2, which required to re-opt in to revision 3,
# regardless, hence is considered as opted-out
if self.db_collection is None:
return False
return len(self.db_collection) > 0
def should_nag(self) -> bool:
# Find delta between opted-in collections and module collections;
# nag only if module has a collection which is not in db, and nag == True.
# We currently do not nag if the user is opted-out (or never opted-in).
# If we wish to do this in the future, we need to have a tri-mode state
# (opted in, opted out, no action yet), and it needs to be guarded by a
# config option (so that nagging can be turned off via config).
# We also need to add a last_opted_out_ceph_version variable, for the
# major upgrade check.
# check if there are collections the user is not opt-in to
# that we should nag about
if self.db_collection is not None:
for c in MODULE_COLLECTION:
if c['name'].name not in self.db_collection:
if c['nag'] == True:
self.log.debug(f"The collection: {c['name']} is not reported")
return True
# user might be opted-in to the most recent collection, or there is no
# new collection which requires nagging about; thus nag in case it's a
# major upgrade and there are new collections
# (which their own nag == False):
new_collections = False
col_delta = self.collection_delta()
if col_delta is not None and len(col_delta) > 0:
new_collections = True
return self.is_major_upgrade() and new_collections
def init_collection(self) -> None:
# We fetch from db the collections the user had already opted-in to.
# During the transition the results will be empty, but the user might
# be opted-in to an older version (e.g. revision = 3)
collection = self.get_store('collection')
if collection is not None:
self.db_collection = json.loads(collection)
if self.db_collection is None:
# happens once on upgrade
if not self.enabled:
# user is not opted-in
self.set_store('collection', json.dumps([]))
self.log.debug("user is not opted-in")
else:
# user is opted-in, verify the revision:
if self.last_opt_revision == REVISION:
self.log.debug(f"telemetry revision is {REVISION}")
base_collection = [Collection.basic_base.name, Collection.device_base.name, Collection.crash_base.name, Collection.ident_base.name]
self.set_store('collection', json.dumps(base_collection))
else:
# user is opted-in to an older version, meaning they need
# to re-opt in regardless
self.set_store('collection', json.dumps([]))
self.log.debug(f"user is opted-in but revision is old ({self.last_opt_revision}), needs to re-opt-in")
# reload collection after setting
collection = self.get_store('collection')
if collection is not None:
self.db_collection = json.loads(collection)
else:
raise RuntimeError('collection is None after initial setting')
else:
# user has already upgraded
self.log.debug(f"user has upgraded already: collection: {self.db_collection}")
def is_enabled_collection(self, collection: Collection) -> bool:
if self.db_collection is None:
return False
return collection.name in self.db_collection
def opt_in_all_collections(self) -> None:
"""
Opt-in to all collections; Update db with the currently available collections in the module
"""
if self.db_collection is None:
raise RuntimeError('db_collection is None after initial setting')
for c in MODULE_COLLECTION:
if c['name'].name not in self.db_collection:
self.db_collection.append(c['name'])
self.set_store('collection', json.dumps(self.db_collection))
def send(self,
report: Dict[str, Dict[str, str]],
endpoint: Optional[List[EndPoint]] = None) -> Tuple[int, str, str]:
if not endpoint:
endpoint = [self.EndPoint.ceph, self.EndPoint.device]
failed = []
success = []
self.log.debug('Send endpoints %s' % endpoint)
for e in endpoint:
if e == self.EndPoint.ceph:
fail_reason = self._try_post('ceph report', self.url, report)
if fail_reason:
failed.append(fail_reason)
else:
now = int(time.time())
self.last_upload = now
self.set_store('last_upload', str(now))
success.append('Ceph report sent to {0}'.format(self.url))
self.log.info('Sent report to {0}'.format(self.url))
elif e == self.EndPoint.device:
if 'device' in self.get_active_channels():
devices = self.gather_device_report()
if devices:
num_devs = 0
num_hosts = 0
for host, ls in devices.items():
self.log.debug('host %s devices %s' % (host, ls))
if not len(ls):
continue
fail_reason = self._try_post('devices', self.device_url,
ls)
if fail_reason:
failed.append(fail_reason)
else:
num_devs += len(ls)
num_hosts += 1
if num_devs:
success.append('Reported %d devices from %d hosts across a total of %d hosts' % (
num_devs, num_hosts, len(devices)))
else:
fail_reason = 'Unable to send device report: Device channel is on, but the generated report was empty.'
failed.append(fail_reason)
self.log.error(fail_reason)
if failed:
return 1, '', '\n'.join(success + failed)
return 0, '', '\n'.join(success)
def format_perf_histogram(self, report: Dict[str, Any]) -> None:
# Formatting the perf histograms so they are human-readable. This will change the
# ranges and values, which are currently in list form, into strings so that
# they are displayed horizontally instead of vertically.
if 'report' in report:
report = report['report']
try:
# Formatting ranges and values in osd_perf_histograms
mode = 'osd_perf_histograms'
for config in report[mode]:
for histogram in config:
# Adjust ranges by converting lists into strings
for axis in config[histogram]['axes']:
for i in range(0, len(axis['ranges'])):
axis['ranges'][i] = str(axis['ranges'][i])
for osd in config[histogram]['osds']:
for i in range(0, len(osd['values'])):
osd['values'][i] = str(osd['values'][i])
except KeyError:
# If the perf channel is not enabled, there should be a KeyError since
# 'osd_perf_histograms' would not be present in the report. In that case,
# the show function should pass as usual without trying to format the
# histograms.
pass
def toggle_channel(self, action: str, channels: Optional[List[str]] = None) -> Tuple[int, str, str]:
'''
Enable or disable a list of channels
'''
if not self.enabled:
# telemetry should be on for channels to be toggled
msg = 'Telemetry is off. Please consider opting-in with `ceph telemetry on`.\n' \
'Preview sample reports with `ceph telemetry preview`.'
return 0, msg, ''
if channels is None:
msg = f'Please provide a channel name. Available channels: {ALL_CHANNELS}.'
return 0, msg, ''
state = action == 'enable'
msg = ''
for c in channels:
if c not in ALL_CHANNELS:
msg = f"{msg}{c} is not a valid channel name. "\
f"Available channels: {ALL_CHANNELS}.\n"
else:
self.set_module_option(f"channel_{c}", state)
setattr(self,
f"channel_{c}",
state)
msg = f"{msg}channel_{c} is {action}d\n"
return 0, msg, ''
@CLIReadCommand('telemetry status')
def status(self) -> Tuple[int, str, str]:
'''
Show current configuration
'''
r = {}
for opt in self.MODULE_OPTIONS:
r[opt['name']] = getattr(self, opt['name'])
r['last_upload'] = (time.ctime(self.last_upload)
if self.last_upload else self.last_upload)
return 0, json.dumps(r, indent=4, sort_keys=True), ''
@CLIReadCommand('telemetry diff')
def diff(self) -> Tuple[int, str, str]:
'''
Show the diff between opted-in collection and available collection
'''
diff = []
keys = ['nag']
for c in MODULE_COLLECTION:
if not self.is_enabled_collection(c['name']):
diff.append({key: val for key, val in c.items() if key not in keys})
r = None
if diff == []:
r = "Telemetry is up to date"
else:
r = json.dumps(diff, indent=4, sort_keys=True)
return 0, r, ''
@CLICommand('telemetry on')
def on(self, license: Optional[str] = None) -> Tuple[int, str, str]:
'''
Enable telemetry reports from this cluster
'''
if license != LICENSE:
return -errno.EPERM, '', f'''Telemetry data is licensed under the {LICENSE_NAME} ({LICENSE_URL}).
To enable, add '--license {LICENSE}' to the 'ceph telemetry on' command.'''
else:
self.set_module_option('enabled', True)
self.enabled = True
self.opt_in_all_collections()
# for major releases upgrade nagging
mon_map = self.get('mon_map')
mon_min = mon_map.get("min_mon_release", 0)
self.set_store('last_opted_in_ceph_version', str(mon_min))
self.last_opted_in_ceph_version = mon_min
msg = 'Telemetry is on.'
disabled_channels = ''
active_channels = self.get_active_channels()
for c in ALL_CHANNELS:
if c not in active_channels and c != 'ident':
disabled_channels = f"{disabled_channels} {c}"
if len(disabled_channels) > 0:
msg = f"{msg}\nSome channels are disabled, please enable with:\n"\
f"`ceph telemetry enable channel{disabled_channels}`"
# wake up serve() to reset health warning
self.event.set()
return 0, msg, ''
@CLICommand('telemetry off')
def off(self) -> Tuple[int, str, str]:
'''
Disable telemetry reports from this cluster
'''
if not self.enabled:
# telemetry is already off
msg = 'Telemetry is currently not enabled, nothing to turn off. '\
'Please consider opting-in with `ceph telemetry on`.\n' \
'Preview sample reports with `ceph telemetry preview`.'
return 0, msg, ''
self.set_module_option('enabled', False)
self.enabled = False
self.set_store('collection', json.dumps([]))
self.db_collection = []
# we might need this info in the future, in case
# of nagging when user is opted-out
mon_map = self.get('mon_map')
mon_min = mon_map.get("min_mon_release", 0)
self.set_store('last_opted_out_ceph_version', str(mon_min))
self.last_opted_out_ceph_version = mon_min
msg = 'Telemetry is now disabled.'
return 0, msg, ''
@CLIReadCommand('telemetry enable channel all')
def enable_channel_all(self, channels: List[str] = ALL_CHANNELS) -> Tuple[int, str, str]:
'''
Enable all channels
'''
return self.toggle_channel('enable', channels)
@CLIReadCommand('telemetry enable channel')
def enable_channel(self, channels: Optional[List[str]] = None) -> Tuple[int, str, str]:
'''
Enable a list of channels
'''
return self.toggle_channel('enable', channels)
@CLIReadCommand('telemetry disable channel all')
def disable_channel_all(self, channels: List[str] = ALL_CHANNELS) -> Tuple[int, str, str]:
'''
Disable all channels
'''
return self.toggle_channel('disable', channels)
@CLIReadCommand('telemetry disable channel')
def disable_channel(self, channels: Optional[List[str]] = None) -> Tuple[int, str, str]:
'''
Disable a list of channels
'''
return self.toggle_channel('disable', channels)
@CLIReadCommand('telemetry channel ls')
def channel_ls(self) -> Tuple[int, str, str]:
'''
List all channels
'''
table = PrettyTable(
[
'NAME', 'ENABLED', 'DEFAULT', 'DESC',
],
border=False)
table.align['NAME'] = 'l'
table.align['ENABLED'] = 'l'
table.align['DEFAULT'] = 'l'
table.align['DESC'] = 'l'
table.left_padding_width = 0
table.right_padding_width = 4
for c in ALL_CHANNELS:
enabled = "ON" if getattr(self, f"channel_{c}") else "OFF"
for o in self.MODULE_OPTIONS:
if o['name'] == f"channel_{c}":
default = "ON" if o.get('default', None) else "OFF"
desc = o.get('desc', None)
table.add_row((
c,
enabled,
default,
desc,
))
return 0, table.get_string(sortby="NAME"), ''
@CLIReadCommand('telemetry collection ls')
def collection_ls(self) -> Tuple[int, str, str]:
'''
List all collections
'''
col_delta = self.collection_delta()
msg = ''
if col_delta is not None and len(col_delta) > 0:
msg = f"New collections are available:\n" \
f"{sorted([c.name for c in col_delta])}\n" \
f"Run `ceph telemetry on` to opt-in to these collections.\n"
table = PrettyTable(
[
'NAME', 'STATUS', 'DESC',
],
border=False)
table.align['NAME'] = 'l'
table.align['STATUS'] = 'l'
table.align['DESC'] = 'l'
table.left_padding_width = 0
table.right_padding_width = 4
for c in MODULE_COLLECTION:
name = c['name']
opted_in = self.is_enabled_collection(name)
channel_enabled = getattr(self, f"channel_{c['channel']}")
status = ''
if channel_enabled and opted_in:
status = "REPORTING"
else:
why = ''
delimiter = ''
if not opted_in:
why += "NOT OPTED-IN"
delimiter = ', '
if not channel_enabled:
why += f"{delimiter}CHANNEL {c['channel']} IS OFF"
status = f"NOT REPORTING: {why}"
desc = c['description']
table.add_row((
name,
status,
desc,
))
if len(msg):
# add a new line between message and table output
msg = f"{msg} \n"
return 0, f'{msg}{table.get_string(sortby="NAME")}', ''
@CLICommand('telemetry send')
def do_send(self,
endpoint: Optional[List[EndPoint]] = None,
license: Optional[str] = None) -> Tuple[int, str, str]:
'''
Send a sample report
'''
if not self.is_opted_in() and license != LICENSE:
self.log.debug(('A telemetry send attempt while opted-out. '
'Asking for license agreement'))
return -errno.EPERM, '', f'''Telemetry data is licensed under the {LICENSE_NAME} ({LICENSE_URL}).
To manually send telemetry data, add '--license {LICENSE}' to the 'ceph telemetry send' command.
Please consider enabling the telemetry module with 'ceph telemetry on'.'''
else:
self.last_report = self.compile_report()
return self.send(self.last_report, endpoint)
@CLIReadCommand('telemetry show')
def show(self, channels: Optional[List[str]] = None) -> Tuple[int, str, str]:
'''
Show a sample report of opted-in collections (except for 'device')
'''
if not self.enabled:
# if telemetry is off, no report is being sent, hence nothing to show
msg = 'Telemetry is off. Please consider opting-in with `ceph telemetry on`.\n' \
'Preview sample reports with `ceph telemetry preview`.'
return 0, msg, ''
report = self.get_report_locked(channels=channels)
self.format_perf_histogram(report)
report = json.dumps(report, indent=4, sort_keys=True)
if self.channel_device:
report += '''\nDevice report is generated separately. To see it run 'ceph telemetry show-device'.'''
return 0, report, ''
@CLIReadCommand('telemetry preview')
def preview(self, channels: Optional[List[str]] = None) -> Tuple[int, str, str]:
'''
Preview a sample report of the most recent collections available (except for 'device')
'''
report = {}
# We use a lock to prevent a scenario where the user wishes to preview
# the report, and at the same time the module hits the interval of
# sending a report with the opted-in collection, which has less data
# than in the preview report.
col_delta = self.collection_delta()
with self.get_report_lock:
if col_delta is not None and len(col_delta) == 0:
# user is already opted-in to the most recent collection
msg = 'Telemetry is up to date, see report with `ceph telemetry show`.'
return 0, msg, ''
else:
# there are collections the user is not opted-in to
next_collection = []
for c in MODULE_COLLECTION:
next_collection.append(c['name'].name)
opted_in_collection = self.db_collection
self.db_collection = next_collection
report = self.get_report(channels=channels)
self.db_collection = opted_in_collection
self.format_perf_histogram(report)
report = json.dumps(report, indent=4, sort_keys=True)
if self.channel_device:
report += '''\nDevice report is generated separately. To see it run 'ceph telemetry preview-device'.'''
return 0, report, ''
@CLIReadCommand('telemetry show-device')
def show_device(self) -> Tuple[int, str, str]:
'''
Show a sample device report
'''
if not self.enabled:
# if telemetry is off, no report is being sent, hence nothing to show
msg = 'Telemetry is off. Please consider opting-in with `ceph telemetry on`.\n' \
'Preview sample device reports with `ceph telemetry preview-device`.'
return 0, msg, ''
if not self.channel_device:
# if device channel is off, device report is not being sent, hence nothing to show
msg = 'device channel is off. Please enable with `ceph telemetry enable channel device`.\n' \
'Preview sample device reports with `ceph telemetry preview-device`.'
return 0, msg, ''
return 0, json.dumps(self.get_report_locked('device'), indent=4, sort_keys=True), ''
@CLIReadCommand('telemetry preview-device')
def preview_device(self) -> Tuple[int, str, str]:
'''
Preview a sample device report of the most recent device collection
'''
report = {}
device_col_delta = self.collection_delta(['device'])
with self.get_report_lock:
if device_col_delta is not None and len(device_col_delta) == 0 and self.channel_device:
# user is already opted-in to the most recent device collection,
# and device channel is on, thus `show-device` should be called
msg = 'device channel is on and up to date, see report with `ceph telemetry show-device`.'
return 0, msg, ''
# either the user is not opted-in at all, or there are collections
# they are not opted-in to
next_collection = []
for c in MODULE_COLLECTION:
next_collection.append(c['name'].name)
opted_in_collection = self.db_collection
self.db_collection = next_collection
report = self.get_report('device')
self.db_collection = opted_in_collection
report = json.dumps(report, indent=4, sort_keys=True)
return 0, report, ''
@CLIReadCommand('telemetry show-all')
def show_all(self) -> Tuple[int, str, str]:
'''
Show a sample report of all enabled channels (including 'device' channel)
'''
if not self.enabled:
# if telemetry is off, no report is being sent, hence nothing to show
msg = 'Telemetry is off. Please consider opting-in with `ceph telemetry on`.\n' \
'Preview sample reports with `ceph telemetry preview`.'
return 0, msg, ''
if not self.channel_device:
# device channel is off, no need to display its report
report = self.get_report_locked('default')
else:
# telemetry is on and device channel is enabled, show both
report = self.get_report_locked('all')
self.format_perf_histogram(report)
return 0, json.dumps(report, indent=4, sort_keys=True), ''
@CLIReadCommand('telemetry preview-all')
def preview_all(self) -> Tuple[int, str, str]:
'''
Preview a sample report of the most recent collections available of all channels (including 'device')
'''
report = {}
col_delta = self.collection_delta()
with self.get_report_lock:
if col_delta is not None and len(col_delta) == 0:
# user is already opted-in to the most recent collection
msg = 'Telemetry is up to date, see report with `ceph telemetry show`.'
return 0, msg, ''
# there are collections the user is not opted-in to
next_collection = []
for c in MODULE_COLLECTION:
next_collection.append(c['name'].name)
opted_in_collection = self.db_collection
self.db_collection = next_collection
report = self.get_report('all')
self.db_collection = opted_in_collection
self.format_perf_histogram(report)
report = json.dumps(report, indent=4, sort_keys=True)
return 0, report, ''
def get_report_locked(self,
report_type: str = 'default',
channels: Optional[List[str]] = None) -> Dict[str, Any]:
'''
A wrapper around get_report to allow for compiling a report of the most recent module collections
'''
with self.get_report_lock:
return self.get_report(report_type, channels)
def get_report(self,
report_type: str = 'default',
channels: Optional[List[str]] = None) -> Dict[str, Any]:
if report_type == 'default':
return self.compile_report(channels=channels)
elif report_type == 'device':
return self.gather_device_report()
elif report_type == 'all':
return {'report': self.compile_report(channels=channels),
'device_report': self.gather_device_report()}
return {}
def self_test(self) -> None:
self.opt_in_all_collections()
report = self.compile_report(channels=ALL_CHANNELS)
if len(report) == 0:
raise RuntimeError('Report is empty')
if 'report_id' not in report:
raise RuntimeError('report_id not found in report')
def shutdown(self) -> None:
self.run = False
self.event.set()
def refresh_health_checks(self) -> None:
health_checks = {}
# TODO do we want to nag also in case the user is not opted-in?
if self.enabled and self.should_nag():
health_checks['TELEMETRY_CHANGED'] = {
'severity': 'warning',
'summary': 'Telemetry requires re-opt-in',
'detail': [
'telemetry module includes new collections; please re-opt-in to new collections with `ceph telemetry on`'
]
}
self.set_health_checks(health_checks)
def serve(self) -> None:
self.load()
self.run = True
self.log.debug('Waiting for mgr to warm up')
time.sleep(10)
while self.run:
self.event.clear()
self.refresh_health_checks()
if not self.is_opted_in():
self.log.debug('Not sending report until user re-opts-in')
self.event.wait(1800)
continue
if not self.enabled:
self.log.debug('Not sending report until configured to do so')
self.event.wait(1800)
continue
now = int(time.time())
if not self.last_upload or \
(now - self.last_upload) > self.interval * 3600:
self.log.info('Compiling and sending report to %s',
self.url)
try:
self.last_report = self.compile_report()
except Exception:
self.log.exception('Exception while compiling report:')
self.send(self.last_report)
else:
self.log.debug('Interval for sending new report has not expired')
sleep = 3600
self.log.debug('Sleeping for %d seconds', sleep)
self.event.wait(sleep)
@staticmethod
def can_run() -> Tuple[bool, str]:
return True, ''
| 87,530 | 41.183614 | 159 |
py
|
null |
ceph-main/src/pybind/mgr/telemetry/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/pybind/mgr/telemetry/tests/test_telemetry.py
|
import json
import pytest
import unittest
from unittest import mock
import telemetry
from typing import cast, Any, DefaultDict, Dict, List, Optional, Tuple, TypeVar, TYPE_CHECKING, Union
OptionValue = Optional[Union[bool, int, float, str]]
Collection = telemetry.module.Collection
ALL_CHANNELS = telemetry.module.ALL_CHANNELS
MODULE_COLLECTION = telemetry.module.MODULE_COLLECTION
COLLECTION_BASE = ["basic_base", "device_base", "crash_base", "ident_base"]
class TestTelemetry:
@pytest.mark.parametrize("preconfig,postconfig,prestore,poststore,expected",
[
(
# user is not opted-in
{
'last_opt_revision': 1,
'enabled': False,
},
{
'last_opt_revision': 1,
'enabled': False,
},
{
# None
},
{
'collection': []
},
{
'is_opted_in': False,
'is_enabled_collection':
{
'basic_base': False,
'basic_mds_metadata': False,
},
},
),
(
# user is opted-in to an old revision
{
'last_opt_revision': 2,
'enabled': True,
},
{
'last_opt_revision': 2,
'enabled': True,
},
{
# None
},
{
'collection': []
},
{
'is_opted_in': False,
'is_enabled_collection':
{
'basic_base': False,
'basic_mds_metadata': False,
},
},
),
(
# user is opted-in to the latest revision
{
'last_opt_revision': 3,
'enabled': True,
},
{
'last_opt_revision': 3,
'enabled': True,
},
{
# None
},
{
'collection': COLLECTION_BASE
},
{
'is_opted_in': True,
'is_enabled_collection':
{
'basic_base': True,
'basic_mds_metadata': False,
},
},
),
])
def test_upgrade(self,
preconfig: Dict[str, Any], \
postconfig: Dict[str, Any], \
prestore: Dict[str, Any], \
poststore: Dict[str, Any], \
expected: Dict[str, Any]) -> None:
m = telemetry.Module('telemetry', '', '')
if preconfig is not None:
for k, v in preconfig.items():
# no need to mock.patch since _ceph_set_module_option() which
# is called from set_module_option() is already mocked for
# tests, and provides setting default values for all module
# options
m.set_module_option(k, v)
m.config_update_module_option()
m.load()
collection = json.loads(m.get_store('collection'))
assert collection == poststore['collection']
assert m.is_opted_in() == expected['is_opted_in']
assert m.is_enabled_collection(Collection.basic_base) == expected['is_enabled_collection']['basic_base']
assert m.is_enabled_collection(Collection.basic_mds_metadata) == expected['is_enabled_collection']['basic_mds_metadata']
| 4,223 | 33.622951 | 128 |
py
|
null |
ceph-main/src/pybind/mgr/test_orchestrator/README.md
|
# Activate module
You can activate the Ceph Manager module by running:
```
$ ceph mgr module enable test_orchestrator
$ ceph orch set backend test_orchestrator
```
# Check status
```
ceph orch status
```
# Import dummy data
```
$ ceph test_orchestrator load_data -i ./dummy_data.json
```
| 290 | 16.117647 | 55 |
md
|
null |
ceph-main/src/pybind/mgr/test_orchestrator/__init__.py
|
from .module import TestOrchestrator
| 37 | 18 | 36 |
py
|
null |
ceph-main/src/pybind/mgr/test_orchestrator/module.py
|
import errno
import json
import re
import os
import threading
import functools
import itertools
from subprocess import check_output, CalledProcessError
from ceph.deployment.service_spec import ServiceSpec, NFSServiceSpec, IscsiServiceSpec
try:
from typing import Callable, List, Sequence, Tuple
except ImportError:
pass # type checking
from ceph.deployment import inventory
from ceph.deployment.drive_group import DriveGroupSpec
from mgr_module import CLICommand, HandleCommandResult
from mgr_module import MgrModule
import orchestrator
from orchestrator import handle_orch_error, raise_if_exception
class TestOrchestrator(MgrModule, orchestrator.Orchestrator):
"""
This is an orchestrator implementation used for internal testing. It's meant for
development environments and integration testing.
It does not actually do anything.
The implementation is similar to the Rook orchestrator, but simpler.
"""
@CLICommand('test_orchestrator load_data', perm='w')
def _load_data(self, inbuf):
"""
load dummy data into test orchestrator
"""
try:
data = json.loads(inbuf)
self._init_data(data)
return HandleCommandResult()
except json.decoder.JSONDecodeError as e:
msg = 'Invalid JSON file: {}'.format(e)
return HandleCommandResult(retval=-errno.EINVAL, stderr=msg)
except orchestrator.OrchestratorValidationError as e:
return HandleCommandResult(retval=-errno.EINVAL, stderr=str(e))
def available(self):
return True, "", {}
def __init__(self, *args, **kwargs):
super(TestOrchestrator, self).__init__(*args, **kwargs)
self._initialized = threading.Event()
self._shutdown = threading.Event()
self._init_data({})
def shutdown(self):
self._shutdown.set()
def serve(self):
self._initialized.set()
while not self._shutdown.is_set():
self._shutdown.wait(5)
def _init_data(self, data=None):
self._inventory = [orchestrator.InventoryHost.from_json(inventory_host)
for inventory_host in data.get('inventory', [])]
self._services = [orchestrator.ServiceDescription.from_json(service)
for service in data.get('services', [])]
self._daemons = [orchestrator.DaemonDescription.from_json(daemon)
for daemon in data.get('daemons', [])]
@handle_orch_error
def get_inventory(self, host_filter=None, refresh=False):
"""
There is no guarantee which devices are returned by get_inventory.
"""
if host_filter and host_filter.hosts is not None:
assert isinstance(host_filter.hosts, list)
if self._inventory:
if host_filter:
return list(filter(lambda host: host.name in host_filter.hosts,
self._inventory))
return self._inventory
try:
c_v_out = check_output(['ceph-volume', 'inventory', '--format', 'json'])
except OSError:
cmd = """
. {tmpdir}/ceph-volume-virtualenv/bin/activate
ceph-volume inventory --format json
"""
try:
c_v_out = check_output(cmd.format(tmpdir=os.environ.get('TMPDIR', '/tmp')), shell=True)
except (OSError, CalledProcessError):
c_v_out = check_output(cmd.format(tmpdir='.'),shell=True)
for out in c_v_out.splitlines():
self.log.error(out)
devs = inventory.Devices.from_json(json.loads(out))
return [orchestrator.InventoryHost('localhost', devs)]
self.log.error('c-v failed: ' + str(c_v_out))
raise Exception('c-v failed')
def _get_ceph_daemons(self):
# type: () -> List[orchestrator.DaemonDescription]
""" Return ceph daemons on the running host."""
types = ("mds", "osd", "mon", "rgw", "mgr", "nfs", "iscsi")
out = map(str, check_output(['ps', 'aux']).splitlines())
processes = [p for p in out if any(
[('ceph-{} '.format(t) in p) for t in types])]
daemons = []
for p in processes:
# parse daemon type
m = re.search('ceph-([^ ]+)', p)
if m:
_daemon_type = m.group(1)
else:
raise AssertionError('Fail to determine daemon type from {}'.format(p))
# parse daemon ID. Possible options: `-i <id>`, `--id=<id>`, `--id <id>`
patterns = [r'-i\s(\w+)', r'--id[\s=](\w+)']
for pattern in patterns:
m = re.search(pattern, p)
if m:
daemon_id = m.group(1)
break
else:
raise AssertionError('Fail to determine daemon ID from {}'.format(p))
daemon = orchestrator.DaemonDescription(
daemon_type=_daemon_type, daemon_id=daemon_id, hostname='localhost')
daemons.append(daemon)
return daemons
@handle_orch_error
def describe_service(self, service_type=None, service_name=None, refresh=False):
if self._services:
# Dummy data
services = self._services
if service_type is not None:
services = list(filter(lambda s: s.spec.service_type == service_type, services))
else:
# Deduce services from daemons running on localhost
all_daemons = self._get_ceph_daemons()
services = []
for daemon_type, daemons in itertools.groupby(all_daemons, key=lambda d: d.daemon_type):
if service_type is not None and service_type != daemon_type:
continue
daemon_size = len(list(daemons))
services.append(orchestrator.ServiceDescription(
spec=ServiceSpec(
service_type=daemon_type, # type: ignore
),
size=daemon_size, running=daemon_size))
def _filter_func(svc):
if service_name is not None and service_name != svc.spec.service_name():
return False
return True
return list(filter(_filter_func, services))
@handle_orch_error
def list_daemons(self, service_name=None, daemon_type=None, daemon_id=None, host=None, refresh=False):
"""
There is no guarantee which daemons are returned by describe_service, except that
it returns the mgr we're running in.
"""
if daemon_type:
daemon_types = ("mds", "osd", "mon", "rgw", "mgr", "iscsi", "crash", "nfs")
assert daemon_type in daemon_types, daemon_type + " unsupported"
daemons = self._daemons if self._daemons else self._get_ceph_daemons()
def _filter_func(d):
if service_name is not None and service_name != d.service_name():
return False
if daemon_type is not None and daemon_type != d.daemon_type:
return False
if daemon_id is not None and daemon_id != d.daemon_id:
return False
if host is not None and host != d.hostname:
return False
return True
return list(filter(_filter_func, daemons))
def preview_drivegroups(self, drive_group_name=None, dg_specs=None):
return [{}]
@handle_orch_error
def create_osds(self, drive_group):
# type: (DriveGroupSpec) -> str
""" Creates OSDs from a drive group specification.
$: ceph orch osd create -i <dg.file>
The drivegroup file must only contain one spec at a time.
"""
return self._create_osds(drive_group)
def _create_osds(self, drive_group):
# type: (DriveGroupSpec) -> str
drive_group.validate()
all_hosts = raise_if_exception(self.get_hosts())
if not drive_group.placement.filter_matching_hostspecs(all_hosts):
raise orchestrator.OrchestratorValidationError('failed to match')
return ''
@handle_orch_error
def apply_drivegroups(self, specs):
# type: (List[DriveGroupSpec]) -> List[str]
return [self._create_osds(dg) for dg in specs]
@handle_orch_error
def remove_daemons(self, names):
assert isinstance(names, list)
return 'done'
@handle_orch_error
def remove_service(self, service_name, force = False):
assert isinstance(service_name, str)
return 'done'
@handle_orch_error
def blink_device_light(self, ident_fault, on, locations):
assert ident_fault in ("ident", "fault")
assert len(locations)
return ''
@handle_orch_error
def service_action(self, action, service_name):
return 'done'
@handle_orch_error
def daemon_action(self, action, daemon_name, image=None):
return 'done'
@handle_orch_error
def add_daemon(self, spec: ServiceSpec):
return [spec.one_line_str()]
@handle_orch_error
def apply_nfs(self, spec):
return spec.one_line_str()
@handle_orch_error
def apply_iscsi(self, spec):
# type: (IscsiServiceSpec) -> str
return spec.one_line_str()
@handle_orch_error
def get_hosts(self):
if self._inventory:
return [orchestrator.HostSpec(i.name, i.addr, i.labels) for i in self._inventory]
return [orchestrator.HostSpec('localhost')]
@handle_orch_error
def add_host(self, spec):
# type: (orchestrator.HostSpec) -> str
host = spec.hostname
if host == 'raise_validation_error':
raise orchestrator.OrchestratorValidationError("MON count must be either 1, 3 or 5")
if host == 'raise_error':
raise orchestrator.OrchestratorError("host address is empty")
if host == 'raise_bug':
raise ZeroDivisionError()
if host == 'raise_not_implemented':
raise NotImplementedError()
if host == 'raise_no_orchestrator':
raise orchestrator.NoOrchestrator()
if host == 'raise_import_error':
raise ImportError("test_orchestrator not enabled")
assert isinstance(host, str)
return ''
@handle_orch_error
def remove_host(self, host, force: bool, offline: bool):
assert isinstance(host, str)
return 'done'
@handle_orch_error
def apply_mgr(self, spec):
# type: (ServiceSpec) -> str
assert not spec.placement.hosts or len(spec.placement.hosts) == spec.placement.count
assert all([isinstance(h, str) for h in spec.placement.hosts])
return spec.one_line_str()
@handle_orch_error
def apply_mon(self, spec):
# type: (ServiceSpec) -> str
assert not spec.placement.hosts or len(spec.placement.hosts) == spec.placement.count
assert all([isinstance(h[0], str) for h in spec.placement.hosts])
assert all([isinstance(h[1], str) or h[1] is None for h in spec.placement.hosts])
return spec.one_line_str()
| 11,160 | 35.355049 | 106 |
py
|
null |
ceph-main/src/pybind/mgr/tests/__init__.py
|
# type: ignore
import json
import logging
import os
if 'UNITTEST' in os.environ:
# Mock ceph_module. Otherwise every module that is involved in a testcase and imports it will
# raise an ImportError
import sys
try:
from unittest import mock
except ImportError:
import mock
M_classes = set()
class M(object):
"""
Note that:
* self.set_store() populates self._store
* self.set_module_option() populates self._store[module_name]
* self.get(thing) comes from self._store['_ceph_get' + thing]
"""
def mock_store_get(self, kind, key, default):
if not hasattr(self, '_store'):
self._store = {}
return self._store.get(f'mock_store/{kind}/{key}', default)
def mock_store_set(self, kind, key, value):
if not hasattr(self, '_store'):
self._store = {}
k = f'mock_store/{kind}/{key}'
if value is None:
if k in self._store:
del self._store[k]
else:
self._store[k] = value
def mock_store_prefix(self, kind, prefix):
if not hasattr(self, '_store'):
self._store = {}
full_prefix = f'mock_store/{kind}/{prefix}'
kind_len = len(f'mock_store/{kind}/')
return {
k[kind_len:]: v for k, v in self._store.items()
if k.startswith(full_prefix)
}
def _ceph_get_store(self, k):
return self.mock_store_get('store', k, None)
def _ceph_set_store(self, k, v):
self.mock_store_set('store', k, v)
def _ceph_get_store_prefix(self, prefix):
return self.mock_store_prefix('store', prefix)
def _ceph_get_module_option(self, module, key, localized_prefix=None):
try:
_, val, _ = self.check_mon_command({
'prefix': 'config get',
'who': 'mgr',
'key': f'mgr/{module}/{key}'
})
except FileNotFoundError:
val = None
mo = [o for o in self.MODULE_OPTIONS if o['name'] == key]
if len(mo) >= 1: # >= 1, cause self.MODULE_OPTIONS. otherwise it
# fails when importing multiple modules.
if 'default' in mo and val is None:
val = mo[0]['default']
if val is not None:
cls = {
'str': str,
'secs': int,
'bool': lambda s: bool(s) and s != 'false' and s != 'False',
'int': int,
}[mo[0].get('type', 'str')]
return cls(val)
return val
else:
return val if val is not None else ''
def _ceph_set_module_option(self, module, key, val):
_, _, _ = self.check_mon_command({
'prefix': 'config set',
'who': 'mgr',
'name': f'mgr/{module}/{key}',
'value': val
})
return val
def _ceph_get(self, data_name):
return self.mock_store_get('_ceph_get', data_name, mock.MagicMock())
def _ceph_send_command(self, res, svc_type, svc_id, command, tag, inbuf):
cmd = json.loads(command)
getattr(self, '_mon_commands_sent', []).append(cmd)
# Mocking the config store is handy sometimes:
def config_get():
who = cmd['who'].split('.')
whos = ['global'] + ['.'.join(who[:i + 1]) for i in range(len(who))]
for attepmt in reversed(whos):
val = self.mock_store_get('config', f'{attepmt}/{cmd["key"]}', None)
if val is not None:
return val
return None
def config_set():
self.mock_store_set('config', f'{cmd["who"]}/{cmd["name"]}', cmd['value'])
return ''
def config_rm():
self.mock_store_set('config', f'{cmd["who"]}/{cmd["name"]}', None)
return ''
def config_dump():
r = []
for prefix, value in self.mock_store_prefix('config', '').items():
section, name = prefix.split('/', 1)
r.append({
'name': name,
'section': section,
'value': value
})
return json.dumps(r)
outb = ''
if cmd['prefix'] == 'config get':
outb = config_get()
elif cmd['prefix'] == 'config set':
outb = config_set()
elif cmd['prefix'] == 'config dump':
outb = config_dump()
elif cmd['prefix'] == 'config rm':
outb = config_rm()
elif hasattr(self, '_mon_command_mock_' + cmd['prefix'].replace(' ', '_')):
a = getattr(self, '_mon_command_mock_' + cmd['prefix'].replace(' ', '_'))
outb = a(cmd)
res.complete(0, outb, '')
def _ceph_get_foreign_option(self, entity, name):
who = entity.split('.')
whos = ['global'] + ['.'.join(who[:i + 1]) for i in range(len(who))]
for attepmt in reversed(whos):
val = self.mock_store_get('config', f'{attepmt}/{name}', None)
if val is not None:
return val
return None
def assert_issued_mon_command(self, command):
assert command in self._mon_commands_sent, self._mon_commands_sent
@property
def _logger(self):
return logging.getLogger(__name__)
@_logger.setter
def _logger(self, _):
pass
def __init__(self, *args):
self._mon_commands_sent = []
if not hasattr(self, '_store'):
self._store = {}
if self.__class__ not in M_classes:
# call those only once.
self._register_commands('')
self._register_options('')
M_classes.add(self.__class__)
super(M, self).__init__()
self._ceph_get_version = mock.Mock()
self._ceph_get_ceph_conf_path = mock.MagicMock()
self._ceph_get_option = mock.MagicMock()
self._ceph_get_context = mock.MagicMock()
self._ceph_register_client = mock.MagicMock()
self._ceph_set_health_checks = mock.MagicMock()
self._configure_logging = lambda *_: None
self._unconfigure_logging = mock.MagicMock()
self._ceph_log = mock.MagicMock()
self._ceph_dispatch_remote = lambda *_: None
self._ceph_get_mgr_id = mock.MagicMock()
cm = mock.Mock()
cm.BaseMgrModule = M
cm.BaseMgrStandbyModule = M
sys.modules['ceph_module'] = cm
def mock_ceph_modules():
class MockRadosError(Exception):
def __init__(self, message, errno=None):
super(MockRadosError, self).__init__(message)
self.errno = errno
def __str__(self):
msg = super(MockRadosError, self).__str__()
if self.errno is None:
return msg
return '[errno {0}] {1}'.format(self.errno, msg)
class MockObjectNotFound(Exception):
pass
sys.modules.update({
'rados': mock.MagicMock(
Error=MockRadosError,
OSError=MockRadosError,
ObjectNotFound=MockObjectNotFound),
'rbd': mock.Mock(),
'cephfs': mock.Mock(),
})
# Unconditionally mock the rados objects when we're imported
mock_ceph_modules() # type: ignore
| 7,991 | 34.207048 | 97 |
py
|
null |
ceph-main/src/pybind/mgr/tests/test_mgr_util.py
|
import datetime
import mgr_util
import pytest
@pytest.mark.parametrize(
"delta, out",
[
(datetime.timedelta(minutes=90), '90m'),
(datetime.timedelta(minutes=190), '3h'),
(datetime.timedelta(days=3), '3d'),
(datetime.timedelta(hours=3), '3h'),
(datetime.timedelta(days=365 * 3.1), '3y'),
(datetime.timedelta(minutes=90), '90m'),
]
)
def test_pretty_timedelta(delta: datetime.timedelta, out: str):
assert mgr_util.to_pretty_timedelta(delta) == out
| 513 | 24.7 | 63 |
py
|
null |
ceph-main/src/pybind/mgr/tests/test_object_format.py
|
import errno
from typing import (
Any,
Dict,
Optional,
Tuple,
Type,
TypeVar,
)
import pytest
from mgr_module import CLICommand
import object_format
T = TypeVar("T", bound="Parent")
class Simpler:
def __init__(self, name, val=None):
self.name = name
self.val = val or {}
self.version = 1
def to_simplified(self) -> Dict[str, Any]:
return {
"version": self.version,
"name": self.name,
"value": self.val,
}
class JSONer(Simpler):
def to_json(self) -> Dict[str, Any]:
d = self.to_simplified()
d["_json"] = True
return d
@classmethod
def from_json(cls: Type[T], data) -> T:
o = cls(data.get("name", ""), data.get("value"))
o.version = data.get("version", 1) + 1
return o
class YAMLer(Simpler):
def to_yaml(self) -> Dict[str, Any]:
d = self.to_simplified()
d["_yaml"] = True
return d
@pytest.mark.parametrize(
"obj, compatible, json_val",
[
({}, False, "{}"),
({"name": "foobar"}, False, '{"name": "foobar"}'),
([1, 2, 3], False, "[1, 2, 3]"),
(JSONer("bob"), False, '{"name": "bob", "value": {}, "version": 1}'),
(
JSONer("betty", 77),
False,
'{"name": "betty", "value": 77, "version": 1}',
),
({}, True, "{}"),
({"name": "foobar"}, True, '{"name": "foobar"}'),
(
JSONer("bob"),
True,
'{"_json": true, "name": "bob", "value": {}, "version": 1}',
),
],
)
def test_format_json(obj: Any, compatible: bool, json_val: str):
assert (
object_format.ObjectFormatAdapter(
obj, compatible=compatible, json_indent=None
).format_json()
== json_val
)
@pytest.mark.parametrize(
"obj, compatible, yaml_val",
[
({}, False, "{}\n"),
({"name": "foobar"}, False, "name: foobar\n"),
(
{"stuff": [1, 88, 909, 32]},
False,
"stuff:\n- 1\n- 88\n- 909\n- 32\n",
),
(
JSONer("zebulon", "999"),
False,
"name: zebulon\nvalue: '999'\nversion: 1\n",
),
({}, True, "{}\n"),
({"name": "foobar"}, True, "name: foobar\n"),
(
YAMLer("thingy", "404"),
True,
"_yaml: true\nname: thingy\nvalue: '404'\nversion: 1\n",
),
],
)
def test_format_yaml(obj: Any, compatible: bool, yaml_val: str):
assert (
object_format.ObjectFormatAdapter(
obj, compatible=compatible
).format_yaml()
== yaml_val
)
class Retty:
def __init__(self, v) -> None:
self.value = v
def mgr_return_value(self) -> int:
return self.value
@pytest.mark.parametrize(
"obj, ret",
[
({}, 0),
({"fish": "sticks"}, 0),
(-55, 0),
(Retty(0), 0),
(Retty(-55), -55),
],
)
def test_return_value(obj: Any, ret: int):
rva = object_format.ReturnValueAdapter(obj)
# a ReturnValueAdapter instance meets the ReturnValueProvider protocol.
assert object_format._is_return_value_provider(rva)
assert rva.mgr_return_value() == ret
def test_valid_formats():
ofa = object_format.ObjectFormatAdapter({"fred": "wilma"})
vf = ofa.valid_formats()
assert "json" in vf
assert "yaml" in vf
assert "xml" in vf
assert "plain" in vf
def test_error_response_exceptions():
err = object_format.ErrorResponseBase()
with pytest.raises(NotImplementedError):
err.format_response()
err = object_format.UnsupportedFormat("cheese")
assert err.format_response() == (-22, "", "Unsupported format: cheese")
err = object_format.UnknownFormat("chocolate")
assert err.format_response() == (-22, "", "Unknown format name: chocolate")
@pytest.mark.parametrize(
"value, format, result",
[
({}, None, (0, "{}", "")),
({"blat": True}, "json", (0, '{\n "blat": true\n}', "")),
({"blat": True}, "yaml", (0, "blat: true\n", "")),
({"blat": True}, "toml", (-22, "", "Unknown format name: toml")),
({"blat": True}, "xml", (-22, "", "Unsupported format: xml")),
(
JSONer("hoop", "303"),
"yaml",
(0, "name: hoop\nvalue: '303'\nversion: 1\n", ""),
),
],
)
def test_responder_decorator_default(
value: Any, format: Optional[str], result: Tuple[int, str, str]
) -> None:
@object_format.Responder()
def orf_value(format: Optional[str] = None):
return value
assert orf_value(format=format) == result
class PhonyMultiYAMLFormatAdapter(object_format.ObjectFormatAdapter):
"""This adapter puts a yaml document/directive separator line
before all output. It doesn't actully support multiple documents.
"""
def format_yaml(self):
yml = super().format_yaml()
return "---\n{}".format(yml)
@pytest.mark.parametrize(
"value, format, result",
[
({}, None, (0, "{}", "")),
({"blat": True}, "json", (0, '{\n "blat": true\n}', "")),
({"blat": True}, "yaml", (0, "---\nblat: true\n", "")),
({"blat": True}, "toml", (-22, "", "Unknown format name: toml")),
({"blat": True}, "xml", (-22, "", "Unsupported format: xml")),
(
JSONer("hoop", "303"),
"yaml",
(0, "---\nname: hoop\nvalue: '303'\nversion: 1\n", ""),
),
],
)
def test_responder_decorator_custom(
value: Any, format: Optional[str], result: Tuple[int, str, str]
) -> None:
@object_format.Responder(PhonyMultiYAMLFormatAdapter)
def orf_value(format: Optional[str] = None):
return value
assert orf_value(format=format) == result
class FancyDemoAdapter(PhonyMultiYAMLFormatAdapter):
"""This adapter demonstrates adding formatting for other formats
like xml and plain text.
"""
def format_xml(self) -> str:
name = self.obj.get("name")
size = self.obj.get("size")
return f'<object name="{name}" size="{size}" />'
def format_plain(self) -> str:
name = self.obj.get("name")
size = self.obj.get("size")
es = 'es' if size != 1 else ''
return f"{size} box{es} of {name}"
class DecoDemo:
"""Class to stand in for a mgr module, used to test CLICommand integration."""
@CLICommand("alpha one", perm="rw")
@object_format.Responder()
def alpha_one(self, name: str = "default") -> Dict[str, str]:
return {
"alpha": "one",
"name": name,
"weight": 300,
}
@CLICommand("beta two", perm="r")
@object_format.Responder()
def beta_two(
self, name: str = "default", format: Optional[str] = None
) -> Dict[str, str]:
return {
"beta": "two",
"name": name,
"weight": 72,
}
@CLICommand("gamma three", perm="rw")
@object_format.Responder(FancyDemoAdapter)
def gamma_three(self, size: int = 0) -> Dict[str, Any]:
return {"name": "funnystuff", "size": size}
@CLICommand("z_err", perm="rw")
@object_format.ErrorResponseHandler()
def z_err(self, name: str = "default") -> Tuple[int, str, str]:
if "z" in name:
raise object_format.ErrorResponse(f"{name} bad")
return 0, name, ""
@CLICommand("empty one", perm="rw")
@object_format.EmptyResponder()
def empty_one(self, name: str = "default", retval: Optional[int] = None) -> None:
# in real code, this would be making some sort of state change
# but we need to handle erors still
if retval is None:
retval = -5
if name in ["pow"]:
raise object_format.ErrorResponse(name, return_value=retval)
return
@CLICommand("empty bad", perm="rw")
@object_format.EmptyResponder()
def empty_bad(self, name: str = "default") -> int:
# in real code, this would be making some sort of state change
return 5
@pytest.mark.parametrize(
"prefix, can_format, args, response",
[
(
"alpha one",
True,
{"name": "moonbase"},
(
0,
'{\n "alpha": "one",\n "name": "moonbase",\n "weight": 300\n}',
"",
),
),
# ---
(
"alpha one",
True,
{"name": "moonbase2", "format": "yaml"},
(
0,
"alpha: one\nname: moonbase2\nweight: 300\n",
"",
),
),
# ---
(
"alpha one",
True,
{"name": "moonbase2", "format": "chocolate"},
(
-22,
"",
"Unknown format name: chocolate",
),
),
# ---
(
"beta two",
True,
{"name": "blocker"},
(
0,
'{\n "beta": "two",\n "name": "blocker",\n "weight": 72\n}',
"",
),
),
# ---
(
"beta two",
True,
{"name": "test", "format": "yaml"},
(
0,
"beta: two\nname: test\nweight: 72\n",
"",
),
),
# ---
(
"beta two",
True,
{"name": "test", "format": "plain"},
(
-22,
"",
"Unsupported format: plain",
),
),
# ---
(
"gamma three",
True,
{},
(
0,
'{\n "name": "funnystuff",\n "size": 0\n}',
"",
),
),
# ---
(
"gamma three",
True,
{"size": 1, "format": "json"},
(
0,
'{\n "name": "funnystuff",\n "size": 1\n}',
"",
),
),
# ---
(
"gamma three",
True,
{"size": 1, "format": "plain"},
(
0,
"1 box of funnystuff",
"",
),
),
# ---
(
"gamma three",
True,
{"size": 2, "format": "plain"},
(
0,
"2 boxes of funnystuff",
"",
),
),
# ---
(
"gamma three",
True,
{"size": 2, "format": "xml"},
(
0,
'<object name="funnystuff" size="2" />',
"",
),
),
# ---
(
"gamma three",
True,
{"size": 2, "format": "toml"},
(
-22,
"",
"Unknown format name: toml",
),
),
# ---
(
"z_err",
False,
{"name": "foobar"},
(
0,
"foobar",
"",
),
),
# ---
(
"z_err",
False,
{"name": "zamboni"},
(
-22,
"",
"zamboni bad",
),
),
# ---
(
"empty one",
False,
{"name": "zucchini"},
(
0,
"",
"",
),
),
# ---
(
"empty one",
False,
{"name": "pow"},
(
-5,
"",
"pow",
),
),
# Ensure setting return_value to zero even on an exception is honored
(
"empty one",
False,
{"name": "pow", "retval": 0},
(
0,
"",
"pow",
),
),
],
)
def test_cli_with_decorators(prefix, can_format, args, response):
dd = DecoDemo()
cmd = CLICommand.COMMANDS[prefix]
assert cmd.call(dd, args, None) == response
# slighly hacky way to check that the CLI "knows" about a --format option
# checking the extra_args feature of the Decorators that provide them (Responder)
if can_format:
assert 'name=format,' in cmd.args
def test_error_response():
e1 = object_format.ErrorResponse("nope")
assert e1.format_response() == (-22, "", "nope")
assert e1.return_value == -22
assert e1.errno == 22
assert "ErrorResponse" in repr(e1)
assert "nope" in repr(e1)
assert e1.mgr_return_value() == -22
try:
open("/this/is_/extremely_/unlikely/_to/exist.txt")
except Exception as e:
e2 = object_format.ErrorResponse.wrap(e)
r = e2.format_response()
assert r[0] == -errno.ENOENT
assert r[1] == ""
assert "No such file or directory" in r[2]
assert "ErrorResponse" in repr(e2)
assert "No such file or directory" in repr(e2)
assert r[0] == e2.mgr_return_value()
e3 = object_format.ErrorResponse.wrap(RuntimeError("blat"))
r = e3.format_response()
assert r[0] == -errno.EINVAL
assert r[1] == ""
assert "blat" in r[2]
assert r[0] == e3.mgr_return_value()
# A custom exception type with an errno property
class MyCoolException(Exception):
def __init__(self, err_msg: str, errno: int = 0) -> None:
super().__init__(errno, err_msg)
self.errno = errno
self.err_msg = err_msg
def __str__(self) -> str:
return self.err_msg
e4 = object_format.ErrorResponse.wrap(MyCoolException("beep", -17))
r = e4.format_response()
assert r[0] == -17
assert r[1] == ""
assert r[2] == "beep"
assert e4.mgr_return_value() == -17
e5 = object_format.ErrorResponse.wrap(MyCoolException("ok, fine", 0))
r = e5.format_response()
assert r[0] == 0
assert r[1] == ""
assert r[2] == "ok, fine"
e5 = object_format.ErrorResponse.wrap(MyCoolException("no can do", 8))
r = e5.format_response()
assert r[0] == -8
assert r[1] == ""
assert r[2] == "no can do"
# A custom exception type that inherits from ErrorResponseBase
class MyErrorResponse(object_format.ErrorResponseBase):
def __init__(self, err_msg: str, return_value: int):
super().__init__(self, err_msg)
self.msg = err_msg
self.return_value = return_value
def format_response(self):
return self.return_value, "", self.msg
e6 = object_format.ErrorResponse.wrap(MyErrorResponse("yeah, sure", 0))
r = e6.format_response()
assert r[0] == 0
assert r[1] == ""
assert r[2] == "yeah, sure"
assert isinstance(e5, object_format.ErrorResponseBase)
assert isinstance(e6, MyErrorResponse)
e7 = object_format.ErrorResponse.wrap(MyErrorResponse("no can do", -8))
r = e7.format_response()
assert r[0] == -8
assert r[1] == ""
assert r[2] == "no can do"
assert isinstance(e7, object_format.ErrorResponseBase)
assert isinstance(e7, MyErrorResponse)
def test_empty_responder_return_check():
dd = DecoDemo()
with pytest.raises(ValueError):
CLICommand.COMMANDS["empty bad"].call(dd, {}, None)
| 15,575 | 25.716981 | 85 |
py
|
null |
ceph-main/src/pybind/mgr/tests/test_tls.py
|
from mgr_util import create_self_signed_cert, verify_tls, ServerConfigException, get_cert_issuer_info
from OpenSSL import crypto, SSL
import unittest
valid_ceph_cert = """-----BEGIN CERTIFICATE-----\nMIICxjCCAa4CEQCpHIQuSYhCII1J0SVGYnT1MA0GCSqGSIb3DQEBDQUAMCExDTAL\nBgNVBAoMBENlcGgxEDAOBgNVBAMMB2NlcGhhZG0wHhcNMjIwNzA2MTE1MjUyWhcN\nMzIwNzAzMTE1MjUyWjAhMQ0wCwYDVQQKDARDZXBoMRAwDgYDVQQDDAdjZXBoYWRt\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAn2ApFna2CVYE7RDtjJVk\ncJTcJQrjzDOlCoZtxb1QMCQZMXjx/7d6bseQP+dkkeA0hZxnjJZWeu6c/YnQ1JiT\n2aDuDpWoJAaiinHRJyZuY5tqG+ggn95RdToZVbeC+0uALzYi4UFacC3sfpkyIKBR\nic43+2fQNz0PZ+8INSTtm75Y53gbWuGF7Dv95200AmAN2/u8LKWZIvdhbRborxOF\nlK2T40qbj9eH3ewIN/6Eibxrvg4va3pIoOaq0XdJHAL/MjDGJAtahPIenwcjuega\n4PSlB0h3qiyFXz7BG8P0QsPP6slyD58ZJtCGtJiWPOhlq47DlnWlJzRGDEFLLryf\n8wIDAQABMA0GCSqGSIb3DQEBDQUAA4IBAQBixd7RZawlYiTZaCmv3Vy7X/hhabac\nE/YiuFt1YMe0C9+D8IcCQN/IRww/Bi7Af6tm+ncHT9GsOGWX6hahXDKTw3b9nSDi\nETvjkUTYOayZGfhYpRA6m6e/2ypcUYsiXRDY9zneDKCdPREIA1D6L2fROHetFX9r\nX9rSry01xrYwNlYA1e6GLMXm2NaGsLT3JJlRBtT3P7f1jtRGXcwkc7ns0AtW0uNj\nGqRLHfJazdgWJFsj8vBdMs7Ci0C/b5/f7J/DLpPCvUA3Fqwn9MzHl01UwlDsKy1a\nROi4cfQNOLbWX8g3PfIlqtdGYNA77UPxvy1SUimmtdopZaEVWKkqeWYK\n-----END CERTIFICATE-----\n
"""
invalid_cert = """-----BEGIN CERTIFICATE-----\nMIICxjCCAa4CEQCpHIQuSYhCII1J0SVGYnT1MA0GCSqGSIb3DQEBDQUAMCExDTAL\nBgNVBAoMBENlcGgxEDAOBgNVBAMMB2NlcGhhZG0wHhcNMjIwNzA2MTE1MjUyWhcN\nMzIwNzAzMTE1MjUyWjAhMQ0wCwYDVQQKDARDZXBoMRAwDgYDVQQDDAdjZXBoYWRt\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEBn2ApFna2CVYE7RDtjJVk\ncJTcJQrjzDOlCoZtxb1QMCQZMXjx/7d6bseQP+dkkeA0hZxnjJZWeu6c/YnQ1JiT\n2aDuDpWoJAaiinHRJyZuY5tqG+ggn95RdToZVbeC+0uALzYi4UFacC3sfpkyIKBR\nic43+2fQNz0PZ+8INSTtm75Y53gbWuGF7Dv95200AmAN2/u8LKWZIvdhbRborxOF\nlK2T40qbj9eH3ewIN/6Eibxrvg4va3pIoOaq0XdJHAL/MjDGJAtahPIenwcjuega\n4PSlB0h3qiyFXz7BG8P0QsPP6slyD58ZJtCGtJiWPOhlq47DlnWlJzRGDEFLLryf\n8wIDAQABMA0GCSqGSIb3DQEBDQUAA4IBAQBixd7RZawlYiTZaCmv3Vy7X/hhabac\nE/YiuFt1YMe0C9+D8IcCQN/IRww/Bi7Af6tm+ncHT9GsOGWX6hahXDKTw3b9nSDi\nETvjkUTYOayZGfhYpRA6m6e/2ypcUYsiXRDY9zneDKCdPREIA1D6L2fROHetFX9r\nX9rSry01xrYwNlYA1e6GLMXm2NaGsLT3JJlRBtT3P7f1jtRGXcwkc7ns0AtW0uNj\nGqRLHfJazdgWJFsj8vBdMs7Ci0C/b5/f7J/DLpPCvUA3Fqwn9MzHl01UwlDsKy1a\nROi4cfQNOLbWX8g3PfIlqtdGYNA77UPxvy1SUimmtdopZa\n-----END CERTIFICATE-----\n
"""
class TLSchecks(unittest.TestCase):
def test_defaults(self):
crt, key = create_self_signed_cert()
verify_tls(crt, key)
def test_specific_dname(self):
crt, key = create_self_signed_cert(dname={'O': 'Ceph', 'OU': 'testsuite'})
verify_tls(crt, key)
def test_invalid_RDN(self):
self.assertRaises(ValueError, create_self_signed_cert,
dname={'O': 'Ceph', 'Bogus': 'testsuite'})
def test_invalid_key(self):
crt, key = create_self_signed_cert()
# fudge the key, to force an error to be detected during verify_tls
fudged = f"{key[:-35]}c0ffee==\n{key[-25:]}".encode('utf-8')
self.assertRaises(ServerConfigException, verify_tls, crt, fudged)
def test_mismatched_tls(self):
crt, _ = create_self_signed_cert()
# generate another key
new_key = crypto.PKey()
new_key.generate_key(crypto.TYPE_RSA, 2048)
new_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, new_key).decode('utf-8')
self.assertRaises(ServerConfigException, verify_tls, crt, new_key)
def test_get_cert_issuer_info(self):
# valid certificate
org, cn = get_cert_issuer_info(valid_ceph_cert)
assert org == 'Ceph'
assert cn == 'cephadm'
# empty certificate
self.assertRaises(ServerConfigException, get_cert_issuer_info, '')
# invalid certificate
self.assertRaises(ServerConfigException, get_cert_issuer_info, invalid_cert)
| 3,786 | 66.625 | 1,059 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/__init__.py
|
from .module import Module
| 28 | 8.666667 | 26 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/module.py
|
import errno
import logging
import traceback
import threading
from mgr_module import MgrModule, Option
import orchestrator
from .fs.volume import VolumeClient
log = logging.getLogger(__name__)
goodchars = '[A-Za-z0-9-_.]'
class VolumesInfoWrapper():
def __init__(self, f, context):
self.f = f
self.context = context
def __enter__(self):
log.info("Starting {}".format(self.context))
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
log.error("Failed {}:\n{}".format(self.context, "".join(traceback.format_exception(exc_type, exc_value, tb))))
else:
log.info("Finishing {}".format(self.context))
def mgr_cmd_wrap(f):
def wrap(self, inbuf, cmd):
astr = []
for k in cmd:
astr.append("{}:{}".format(k, cmd[k]))
context = "{}({}) < \"{}\"".format(f.__name__, ", ".join(astr), inbuf)
with VolumesInfoWrapper(f, context):
return f(self, inbuf, cmd)
return wrap
class Module(orchestrator.OrchestratorClientMixin, MgrModule):
COMMANDS = [
{
'cmd': 'fs volume ls',
'desc': "List volumes",
'perm': 'r'
},
{
'cmd': 'fs volume create '
f'name=name,type=CephString,goodchars={goodchars} '
'name=placement,type=CephString,req=false ',
'desc': "Create a CephFS volume",
'perm': 'rw'
},
{
'cmd': 'fs volume rm '
'name=vol_name,type=CephString '
'name=yes-i-really-mean-it,type=CephString,req=false ',
'desc': "Delete a FS volume by passing --yes-i-really-mean-it flag",
'perm': 'rw'
},
{
'cmd': 'fs volume rename '
f'name=vol_name,type=CephString,goodchars={goodchars} '
f'name=new_vol_name,type=CephString,goodchars={goodchars} '
'name=yes_i_really_mean_it,type=CephBool,req=false ',
'desc': "Rename a CephFS volume by passing --yes-i-really-mean-it flag",
'perm': 'rw'
},
{
'cmd': 'fs volume info '
'name=vol_name,type=CephString '
'name=human_readable,type=CephBool,req=false ',
'desc': "Get the information of a CephFS volume",
'perm': 'r'
},
{
'cmd': 'fs subvolumegroup ls '
'name=vol_name,type=CephString ',
'desc': "List subvolumegroups",
'perm': 'r'
},
{
'cmd': 'fs subvolumegroup create '
'name=vol_name,type=CephString '
f'name=group_name,type=CephString,goodchars={goodchars} '
'name=size,type=CephInt,req=false '
'name=pool_layout,type=CephString,req=false '
'name=uid,type=CephInt,req=false '
'name=gid,type=CephInt,req=false '
'name=mode,type=CephString,req=false ',
'desc': "Create a CephFS subvolume group in a volume, and optionally, "
"with a specific data pool layout, and a specific numeric mode",
'perm': 'rw'
},
{
'cmd': 'fs subvolumegroup rm '
'name=vol_name,type=CephString '
'name=group_name,type=CephString '
'name=force,type=CephBool,req=false ',
'desc': "Delete a CephFS subvolume group in a volume",
'perm': 'rw'
},
{
'cmd': 'fs subvolumegroup info '
'name=vol_name,type=CephString '
'name=group_name,type=CephString ',
'desc': "Get the metadata of a CephFS subvolume group in a volume, ",
'perm': 'r'
},
{
'cmd': 'fs subvolumegroup resize '
'name=vol_name,type=CephString '
'name=group_name,type=CephString '
'name=new_size,type=CephString,req=true '
'name=no_shrink,type=CephBool,req=false ',
'desc': "Resize a CephFS subvolume group",
'perm': 'rw'
},
{
'cmd': 'fs subvolumegroup exist '
'name=vol_name,type=CephString ',
'desc': "Check a volume for the existence of subvolumegroup",
'perm': 'r'
},
{
'cmd': 'fs subvolume ls '
'name=vol_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "List subvolumes",
'perm': 'r'
},
{
'cmd': 'fs subvolume create '
'name=vol_name,type=CephString '
f'name=sub_name,type=CephString,goodchars={goodchars} '
'name=size,type=CephInt,req=false '
'name=group_name,type=CephString,req=false '
'name=pool_layout,type=CephString,req=false '
'name=uid,type=CephInt,req=false '
'name=gid,type=CephInt,req=false '
'name=mode,type=CephString,req=false '
'name=namespace_isolated,type=CephBool,req=false ',
'desc': "Create a CephFS subvolume in a volume, and optionally, "
"with a specific size (in bytes), a specific data pool layout, "
"a specific mode, in a specific subvolume group and in separate "
"RADOS namespace",
'perm': 'rw'
},
{
'cmd': 'fs subvolume rm '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=group_name,type=CephString,req=false '
'name=force,type=CephBool,req=false '
'name=retain_snapshots,type=CephBool,req=false ',
'desc': "Delete a CephFS subvolume in a volume, and optionally, "
"in a specific subvolume group, force deleting a cancelled or failed "
"clone, and retaining existing subvolume snapshots",
'perm': 'rw'
},
{
'cmd': 'fs subvolume authorize '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=auth_id,type=CephString '
'name=group_name,type=CephString,req=false '
'name=access_level,type=CephString,req=false '
'name=tenant_id,type=CephString,req=false '
'name=allow_existing_id,type=CephBool,req=false ',
'desc': "Allow a cephx auth ID access to a subvolume",
'perm': 'rw'
},
{
'cmd': 'fs subvolume deauthorize '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=auth_id,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Deny a cephx auth ID access to a subvolume",
'perm': 'rw'
},
{
'cmd': 'fs subvolume authorized_list '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "List auth IDs that have access to a subvolume",
'perm': 'r'
},
{
'cmd': 'fs subvolume evict '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=auth_id,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Evict clients based on auth IDs and subvolume mounted",
'perm': 'rw'
},
{
'cmd': 'fs subvolumegroup getpath '
'name=vol_name,type=CephString '
'name=group_name,type=CephString ',
'desc': "Get the mountpath of a CephFS subvolume group in a volume",
'perm': 'r'
},
{
'cmd': 'fs subvolume getpath '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Get the mountpath of a CephFS subvolume in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'rw'
},
{
'cmd': 'fs subvolume info '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Get the information of a CephFS subvolume in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'r'
},
{
'cmd': 'fs subvolume exist '
'name=vol_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Check a volume for the existence of a subvolume, "
"optionally in a specified subvolume group",
'perm': 'r'
},
{
'cmd': 'fs subvolume metadata set '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=key_name,type=CephString '
'name=value,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Set custom metadata (key-value) for a CephFS subvolume in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'rw'
},
{
'cmd': 'fs subvolume metadata get '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=key_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Get custom metadata associated with the key of a CephFS subvolume in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'r'
},
{
'cmd': 'fs subvolume metadata ls '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "List custom metadata (key-value pairs) of a CephFS subvolume in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'r'
},
{
'cmd': 'fs subvolume metadata rm '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=key_name,type=CephString '
'name=group_name,type=CephString,req=false '
'name=force,type=CephBool,req=false ',
'desc': "Remove custom metadata (key-value) associated with the key of a CephFS subvolume in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'rw'
},
{
'cmd': 'fs subvolumegroup pin'
' name=vol_name,type=CephString'
' name=group_name,type=CephString,req=true'
' name=pin_type,type=CephChoices,strings=export|distributed|random'
' name=pin_setting,type=CephString,req=true',
'desc': "Set MDS pinning policy for subvolumegroup",
'perm': 'rw'
},
{
'cmd': 'fs subvolumegroup snapshot ls '
'name=vol_name,type=CephString '
'name=group_name,type=CephString ',
'desc': "List subvolumegroup snapshots",
'perm': 'r'
},
{
'cmd': 'fs subvolumegroup snapshot create '
'name=vol_name,type=CephString '
'name=group_name,type=CephString '
'name=snap_name,type=CephString ',
'desc': "Create a snapshot of a CephFS subvolume group in a volume",
'perm': 'rw'
},
{
'cmd': 'fs subvolumegroup snapshot rm '
'name=vol_name,type=CephString '
'name=group_name,type=CephString '
'name=snap_name,type=CephString '
'name=force,type=CephBool,req=false ',
'desc': "Delete a snapshot of a CephFS subvolume group in a volume",
'perm': 'rw'
},
{
'cmd': 'fs subvolume snapshot ls '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "List subvolume snapshots",
'perm': 'r'
},
{
'cmd': 'fs subvolume snapshot create '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=snap_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Create a snapshot of a CephFS subvolume in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'rw'
},
{
'cmd': 'fs subvolume snapshot info '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=snap_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Get the information of a CephFS subvolume snapshot "
"and optionally, in a specific subvolume group",
'perm': 'r'
},
{
'cmd': 'fs subvolume snapshot metadata set '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=snap_name,type=CephString '
'name=key_name,type=CephString '
'name=value,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Set custom metadata (key-value) for a CephFS subvolume snapshot in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'rw'
},
{
'cmd': 'fs subvolume snapshot metadata get '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=snap_name,type=CephString '
'name=key_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Get custom metadata associated with the key of a CephFS subvolume snapshot in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'r'
},
{
'cmd': 'fs subvolume snapshot metadata ls '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=snap_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "List custom metadata (key-value pairs) of a CephFS subvolume snapshot in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'r'
},
{
'cmd': 'fs subvolume snapshot metadata rm '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=snap_name,type=CephString '
'name=key_name,type=CephString '
'name=group_name,type=CephString,req=false '
'name=force,type=CephBool,req=false ',
'desc': "Remove custom metadata (key-value) associated with the key of a CephFS subvolume snapshot in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'rw'
},
{
'cmd': 'fs subvolume snapshot rm '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=snap_name,type=CephString '
'name=group_name,type=CephString,req=false '
'name=force,type=CephBool,req=false ',
'desc': "Delete a snapshot of a CephFS subvolume in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'rw'
},
{
'cmd': 'fs subvolume resize '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=new_size,type=CephString,req=true '
'name=group_name,type=CephString,req=false '
'name=no_shrink,type=CephBool,req=false ',
'desc': "Resize a CephFS subvolume",
'perm': 'rw'
},
{
'cmd': 'fs subvolume pin'
' name=vol_name,type=CephString'
' name=sub_name,type=CephString'
' name=pin_type,type=CephChoices,strings=export|distributed|random'
' name=pin_setting,type=CephString,req=true'
' name=group_name,type=CephString,req=false',
'desc': "Set MDS pinning policy for subvolume",
'perm': 'rw'
},
{
'cmd': 'fs subvolume snapshot protect '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=snap_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "(deprecated) Protect snapshot of a CephFS subvolume in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'rw'
},
{
'cmd': 'fs subvolume snapshot unprotect '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=snap_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "(deprecated) Unprotect a snapshot of a CephFS subvolume in a volume, "
"and optionally, in a specific subvolume group",
'perm': 'rw'
},
{
'cmd': 'fs subvolume snapshot clone '
'name=vol_name,type=CephString '
'name=sub_name,type=CephString '
'name=snap_name,type=CephString '
'name=target_sub_name,type=CephString '
'name=pool_layout,type=CephString,req=false '
'name=group_name,type=CephString,req=false '
'name=target_group_name,type=CephString,req=false ',
'desc': "Clone a snapshot to target subvolume",
'perm': 'rw'
},
{
'cmd': 'fs clone status '
'name=vol_name,type=CephString '
'name=clone_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Get status on a cloned subvolume.",
'perm': 'r'
},
{
'cmd': 'fs clone cancel '
'name=vol_name,type=CephString '
'name=clone_name,type=CephString '
'name=group_name,type=CephString,req=false ',
'desc': "Cancel an pending or ongoing clone operation.",
'perm': 'r'
},
# volume ls [recursive]
# subvolume ls <volume>
# volume authorize/deauthorize
# subvolume authorize/deauthorize
# volume describe (free space, etc)
# volume auth list (vc.get_authorized_ids)
# snapshots?
# FIXME: we're doing CephFSVolumeClient.recover on every
# path where we instantiate and connect a client. Perhaps
# keep clients alive longer, or just pass a "don't recover"
# flag in if it's the >1st time we connected a particular
# volume in the lifetime of this module instance.
]
MODULE_OPTIONS = [
Option(
'max_concurrent_clones',
type='int',
default=4,
desc='Number of asynchronous cloner threads'),
Option(
'snapshot_clone_delay',
type='int',
default=0,
desc='Delay clone begin operation by snapshot_clone_delay seconds')
]
def __init__(self, *args, **kwargs):
self.inited = False
# for mypy
self.max_concurrent_clones = None
self.snapshot_clone_delay = None
self.lock = threading.Lock()
super(Module, self).__init__(*args, **kwargs)
# Initialize config option members
self.config_notify()
with self.lock:
self.vc = VolumeClient(self)
self.inited = True
def __del__(self):
self.vc.shutdown()
def shutdown(self):
self.vc.shutdown()
def config_notify(self):
"""
This method is called whenever one of our config options is changed.
"""
with self.lock:
for opt in self.MODULE_OPTIONS:
setattr(self,
opt['name'], # type: ignore
self.get_module_option(opt['name'])) # type: ignore
self.log.debug(' mgr option %s = %s',
opt['name'], getattr(self, opt['name'])) # type: ignore
if self.inited:
if opt['name'] == "max_concurrent_clones":
self.vc.cloner.reconfigure_max_concurrent_clones(self.max_concurrent_clones)
elif opt['name'] == "snapshot_clone_delay":
self.vc.cloner.reconfigure_snapshot_clone_delay(self.snapshot_clone_delay)
def handle_command(self, inbuf, cmd):
handler_name = "_cmd_" + cmd['prefix'].replace(" ", "_")
try:
handler = getattr(self, handler_name)
except AttributeError:
return -errno.EINVAL, "", "Unknown command"
return handler(inbuf, cmd)
@mgr_cmd_wrap
def _cmd_fs_volume_create(self, inbuf, cmd):
vol_id = cmd['name']
placement = cmd.get('placement', '')
return self.vc.create_fs_volume(vol_id, placement)
@mgr_cmd_wrap
def _cmd_fs_volume_rm(self, inbuf, cmd):
vol_name = cmd['vol_name']
confirm = cmd.get('yes-i-really-mean-it', None)
return self.vc.delete_fs_volume(vol_name, confirm)
@mgr_cmd_wrap
def _cmd_fs_volume_ls(self, inbuf, cmd):
return self.vc.list_fs_volumes()
@mgr_cmd_wrap
def _cmd_fs_volume_rename(self, inbuf, cmd):
return self.vc.rename_fs_volume(cmd['vol_name'],
cmd['new_vol_name'],
cmd.get('yes_i_really_mean_it', False))
@mgr_cmd_wrap
def _cmd_fs_volume_info(self, inbuf, cmd):
return self.vc.volume_info(vol_name=cmd['vol_name'],
human_readable=cmd.get('human_readable', False))
@mgr_cmd_wrap
def _cmd_fs_subvolumegroup_create(self, inbuf, cmd):
"""
:return: a 3-tuple of return code(int), empty string(str), error message (str)
"""
return self.vc.create_subvolume_group(
vol_name=cmd['vol_name'], group_name=cmd['group_name'], size=cmd.get('size', None),
pool_layout=cmd.get('pool_layout', None), mode=cmd.get('mode', '755'),
uid=cmd.get('uid', None), gid=cmd.get('gid', None))
@mgr_cmd_wrap
def _cmd_fs_subvolumegroup_rm(self, inbuf, cmd):
"""
:return: a 3-tuple of return code(int), empty string(str), error message (str)
"""
return self.vc.remove_subvolume_group(vol_name=cmd['vol_name'],
group_name=cmd['group_name'],
force=cmd.get('force', False))
@mgr_cmd_wrap
def _cmd_fs_subvolumegroup_info(self, inbuf, cmd):
return self.vc.subvolumegroup_info(vol_name=cmd['vol_name'],
group_name=cmd['group_name'])
@mgr_cmd_wrap
def _cmd_fs_subvolumegroup_resize(self, inbuf, cmd):
return self.vc.resize_subvolume_group(vol_name=cmd['vol_name'],
group_name=cmd['group_name'],
new_size=cmd['new_size'],
no_shrink=cmd.get('no_shrink', False))
@mgr_cmd_wrap
def _cmd_fs_subvolumegroup_ls(self, inbuf, cmd):
return self.vc.list_subvolume_groups(vol_name=cmd['vol_name'])
@mgr_cmd_wrap
def _cmd_fs_subvolumegroup_exist(self, inbuf, cmd):
return self.vc.subvolume_group_exists(vol_name=cmd['vol_name'])
@mgr_cmd_wrap
def _cmd_fs_subvolume_create(self, inbuf, cmd):
"""
:return: a 3-tuple of return code(int), empty string(str), error message (str)
"""
return self.vc.create_subvolume(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
group_name=cmd.get('group_name', None),
size=cmd.get('size', None),
pool_layout=cmd.get('pool_layout', None),
uid=cmd.get('uid', None),
gid=cmd.get('gid', None),
mode=cmd.get('mode', '755'),
namespace_isolated=cmd.get('namespace_isolated', False))
@mgr_cmd_wrap
def _cmd_fs_subvolume_rm(self, inbuf, cmd):
"""
:return: a 3-tuple of return code(int), empty string(str), error message (str)
"""
return self.vc.remove_subvolume(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
group_name=cmd.get('group_name', None),
force=cmd.get('force', False),
retain_snapshots=cmd.get('retain_snapshots', False))
@mgr_cmd_wrap
def _cmd_fs_subvolume_authorize(self, inbuf, cmd):
"""
:return: a 3-tuple of return code(int), secret key(str), error message (str)
"""
return self.vc.authorize_subvolume(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
auth_id=cmd['auth_id'],
group_name=cmd.get('group_name', None),
access_level=cmd.get('access_level', 'rw'),
tenant_id=cmd.get('tenant_id', None),
allow_existing_id=cmd.get('allow_existing_id', False))
@mgr_cmd_wrap
def _cmd_fs_subvolume_deauthorize(self, inbuf, cmd):
"""
:return: a 3-tuple of return code(int), empty string(str), error message (str)
"""
return self.vc.deauthorize_subvolume(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
auth_id=cmd['auth_id'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_authorized_list(self, inbuf, cmd):
"""
:return: a 3-tuple of return code(int), list of authids(json), error message (str)
"""
return self.vc.authorized_list(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_evict(self, inbuf, cmd):
"""
:return: a 3-tuple of return code(int), empyt string(str), error message (str)
"""
return self.vc.evict(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
auth_id=cmd['auth_id'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_ls(self, inbuf, cmd):
return self.vc.list_subvolumes(vol_name=cmd['vol_name'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolumegroup_getpath(self, inbuf, cmd):
return self.vc.getpath_subvolume_group(
vol_name=cmd['vol_name'], group_name=cmd['group_name'])
@mgr_cmd_wrap
def _cmd_fs_subvolume_getpath(self, inbuf, cmd):
return self.vc.subvolume_getpath(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_info(self, inbuf, cmd):
return self.vc.subvolume_info(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_exist(self, inbuf, cmd):
return self.vc.subvolume_exists(vol_name=cmd['vol_name'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_metadata_set(self, inbuf, cmd):
return self.vc.set_user_metadata(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
key_name=cmd['key_name'],
value=cmd['value'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_metadata_get(self, inbuf, cmd):
return self.vc.get_user_metadata(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
key_name=cmd['key_name'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_metadata_ls(self, inbuf, cmd):
return self.vc.list_user_metadata(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_metadata_rm(self, inbuf, cmd):
return self.vc.remove_user_metadata(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
key_name=cmd['key_name'],
group_name=cmd.get('group_name', None),
force=cmd.get('force', False))
@mgr_cmd_wrap
def _cmd_fs_subvolumegroup_pin(self, inbuf, cmd):
return self.vc.pin_subvolume_group(vol_name=cmd['vol_name'],
group_name=cmd['group_name'], pin_type=cmd['pin_type'],
pin_setting=cmd['pin_setting'])
@mgr_cmd_wrap
def _cmd_fs_subvolumegroup_snapshot_create(self, inbuf, cmd):
return self.vc.create_subvolume_group_snapshot(vol_name=cmd['vol_name'],
group_name=cmd['group_name'],
snap_name=cmd['snap_name'])
@mgr_cmd_wrap
def _cmd_fs_subvolumegroup_snapshot_rm(self, inbuf, cmd):
return self.vc.remove_subvolume_group_snapshot(vol_name=cmd['vol_name'],
group_name=cmd['group_name'],
snap_name=cmd['snap_name'],
force=cmd.get('force', False))
@mgr_cmd_wrap
def _cmd_fs_subvolumegroup_snapshot_ls(self, inbuf, cmd):
return self.vc.list_subvolume_group_snapshots(vol_name=cmd['vol_name'],
group_name=cmd['group_name'])
@mgr_cmd_wrap
def _cmd_fs_subvolume_snapshot_create(self, inbuf, cmd):
return self.vc.create_subvolume_snapshot(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
snap_name=cmd['snap_name'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_snapshot_rm(self, inbuf, cmd):
return self.vc.remove_subvolume_snapshot(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
snap_name=cmd['snap_name'],
group_name=cmd.get('group_name', None),
force=cmd.get('force', False))
@mgr_cmd_wrap
def _cmd_fs_subvolume_snapshot_info(self, inbuf, cmd):
return self.vc.subvolume_snapshot_info(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
snap_name=cmd['snap_name'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_snapshot_metadata_set(self, inbuf, cmd):
return self.vc.set_subvolume_snapshot_metadata(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
snap_name=cmd['snap_name'],
key_name=cmd['key_name'],
value=cmd['value'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_snapshot_metadata_get(self, inbuf, cmd):
return self.vc.get_subvolume_snapshot_metadata(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
snap_name=cmd['snap_name'],
key_name=cmd['key_name'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_snapshot_metadata_ls(self, inbuf, cmd):
return self.vc.list_subvolume_snapshot_metadata(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
snap_name=cmd['snap_name'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_snapshot_metadata_rm(self, inbuf, cmd):
return self.vc.remove_subvolume_snapshot_metadata(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
snap_name=cmd['snap_name'],
key_name=cmd['key_name'],
group_name=cmd.get('group_name', None),
force=cmd.get('force', False))
@mgr_cmd_wrap
def _cmd_fs_subvolume_snapshot_ls(self, inbuf, cmd):
return self.vc.list_subvolume_snapshots(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_resize(self, inbuf, cmd):
return self.vc.resize_subvolume(vol_name=cmd['vol_name'], sub_name=cmd['sub_name'],
new_size=cmd['new_size'], group_name=cmd.get('group_name', None),
no_shrink=cmd.get('no_shrink', False))
@mgr_cmd_wrap
def _cmd_fs_subvolume_pin(self, inbuf, cmd):
return self.vc.subvolume_pin(vol_name=cmd['vol_name'],
sub_name=cmd['sub_name'], pin_type=cmd['pin_type'],
pin_setting=cmd['pin_setting'],
group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_snapshot_protect(self, inbuf, cmd):
return self.vc.protect_subvolume_snapshot(vol_name=cmd['vol_name'], sub_name=cmd['sub_name'],
snap_name=cmd['snap_name'], group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_snapshot_unprotect(self, inbuf, cmd):
return self.vc.unprotect_subvolume_snapshot(vol_name=cmd['vol_name'], sub_name=cmd['sub_name'],
snap_name=cmd['snap_name'], group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_subvolume_snapshot_clone(self, inbuf, cmd):
return self.vc.clone_subvolume_snapshot(
vol_name=cmd['vol_name'], sub_name=cmd['sub_name'], snap_name=cmd['snap_name'],
group_name=cmd.get('group_name', None), pool_layout=cmd.get('pool_layout', None),
target_sub_name=cmd['target_sub_name'], target_group_name=cmd.get('target_group_name', None))
@mgr_cmd_wrap
def _cmd_fs_clone_status(self, inbuf, cmd):
return self.vc.clone_status(
vol_name=cmd['vol_name'], clone_name=cmd['clone_name'], group_name=cmd.get('group_name', None))
@mgr_cmd_wrap
def _cmd_fs_clone_cancel(self, inbuf, cmd):
return self.vc.clone_cancel(
vol_name=cmd['vol_name'], clone_name=cmd['clone_name'], group_name=cmd.get('group_name', None))
| 38,389 | 44.271226 | 125 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/pybind/mgr/volumes/fs/async_cloner.py
|
import os
import stat
import time
import errno
import logging
from contextlib import contextmanager
from typing import Optional
import cephfs
from mgr_util import lock_timeout_log
from .async_job import AsyncJobs
from .exception import IndexException, MetadataMgrException, OpSmException, VolumeException
from .fs_util import copy_file
from .operations.versions.op_sm import SubvolumeOpSm
from .operations.versions.subvolume_attrs import SubvolumeTypes, SubvolumeStates, SubvolumeActions
from .operations.resolver import resolve
from .operations.volume import open_volume, open_volume_lockless
from .operations.group import open_group
from .operations.subvolume import open_subvol
from .operations.clone_index import open_clone_index
from .operations.template import SubvolumeOpType
log = logging.getLogger(__name__)
# helper for fetching a clone entry for a given volume
def get_next_clone_entry(fs_client, volspec, volname, running_jobs):
log.debug("fetching clone entry for volume '{0}'".format(volname))
try:
with open_volume_lockless(fs_client, volname) as fs_handle:
try:
with open_clone_index(fs_handle, volspec) as clone_index:
job = clone_index.get_oldest_clone_entry(running_jobs)
return 0, job
except IndexException as ve:
if ve.errno == -errno.ENOENT:
return 0, None
raise ve
except VolumeException as ve:
log.error("error fetching clone entry for volume '{0}' ({1})".format(volname, ve))
return ve.errno, None
@contextmanager
def open_at_volume(fs_client, volspec, volname, groupname, subvolname, op_type):
with open_volume(fs_client, volname) as fs_handle:
with open_group(fs_handle, volspec, groupname) as group:
with open_subvol(fs_client.mgr, fs_handle, volspec, group, subvolname, op_type) as subvolume:
yield subvolume
@contextmanager
def open_at_group(fs_client, fs_handle, volspec, groupname, subvolname, op_type):
with open_group(fs_handle, volspec, groupname) as group:
with open_subvol(fs_client.mgr, fs_handle, volspec, group, subvolname, op_type) as subvolume:
yield subvolume
@contextmanager
def open_at_group_unique(fs_client, fs_handle, volspec, s_groupname, s_subvolname, c_subvolume, c_groupname, c_subvolname, op_type):
# if a snapshot of a retained subvolume is being cloned to recreate the same subvolume, return
# the clone subvolume as the source subvolume
if s_groupname == c_groupname and s_subvolname == c_subvolname:
yield c_subvolume
else:
with open_at_group(fs_client, fs_handle, volspec, s_groupname, s_subvolname, op_type) as s_subvolume:
yield s_subvolume
@contextmanager
def open_clone_subvolume_pair(fs_client, fs_handle, volspec, volname, groupname, subvolname):
with open_at_group(fs_client, fs_handle, volspec, groupname, subvolname, SubvolumeOpType.CLONE_INTERNAL) as clone_subvolume:
s_volname, s_groupname, s_subvolname, s_snapname = get_clone_source(clone_subvolume)
if groupname == s_groupname and subvolname == s_subvolname:
# use the same subvolume to avoid metadata overwrites
yield (clone_subvolume, clone_subvolume, s_snapname)
else:
with open_at_group(fs_client, fs_handle, volspec, s_groupname, s_subvolname, SubvolumeOpType.CLONE_SOURCE) as source_subvolume:
yield (clone_subvolume, source_subvolume, s_snapname)
def get_clone_state(fs_client, volspec, volname, groupname, subvolname):
with open_at_volume(fs_client, volspec, volname, groupname, subvolname, SubvolumeOpType.CLONE_INTERNAL) as subvolume:
return subvolume.state
def set_clone_state(fs_client, volspec, volname, groupname, subvolname, state):
with open_at_volume(fs_client, volspec, volname, groupname, subvolname, SubvolumeOpType.CLONE_INTERNAL) as subvolume:
subvolume.state = (state, True)
def get_clone_source(clone_subvolume):
source = clone_subvolume._get_clone_source()
return (source['volume'], source.get('group', None), source['subvolume'], source['snapshot'])
def get_next_state_on_error(errnum):
if errnum == -errno.EINTR:
next_state = SubvolumeOpSm.transition(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_INPROGRESS,
SubvolumeActions.ACTION_CANCELLED)
else:
# jump to failed state, on all other errors
next_state = SubvolumeOpSm.transition(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_INPROGRESS,
SubvolumeActions.ACTION_FAILED)
return next_state
def handle_clone_pending(fs_client, volspec, volname, index, groupname, subvolname, should_cancel):
try:
if should_cancel():
next_state = SubvolumeOpSm.transition(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_PENDING,
SubvolumeActions.ACTION_CANCELLED)
update_clone_failure_status(fs_client, volspec, volname, groupname, subvolname,
VolumeException(-errno.EINTR, "user interrupted clone operation"))
else:
next_state = SubvolumeOpSm.transition(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_PENDING,
SubvolumeActions.ACTION_SUCCESS)
except OpSmException as oe:
raise VolumeException(oe.errno, oe.error_str)
return (next_state, False)
def sync_attrs(fs_handle, target_path, source_statx):
try:
fs_handle.lchown(target_path, source_statx["uid"], source_statx["gid"])
fs_handle.lutimes(target_path, (time.mktime(source_statx["atime"].timetuple()),
time.mktime(source_statx["mtime"].timetuple())))
fs_handle.lchmod(target_path, source_statx["mode"])
except cephfs.Error as e:
log.warning("error synchronizing attrs for {0} ({1})".format(target_path, e))
raise e
def bulk_copy(fs_handle, source_path, dst_path, should_cancel):
"""
bulk copy data from source to destination -- only directories, symlinks
and regular files are synced.
"""
log.info("copying data from {0} to {1}".format(source_path, dst_path))
def cptree(src_root_path, dst_root_path):
log.debug("cptree: {0} -> {1}".format(src_root_path, dst_root_path))
try:
with fs_handle.opendir(src_root_path) as dir_handle:
d = fs_handle.readdir(dir_handle)
while d and not should_cancel():
if d.d_name not in (b".", b".."):
log.debug("d={0}".format(d))
d_full_src = os.path.join(src_root_path, d.d_name)
d_full_dst = os.path.join(dst_root_path, d.d_name)
stx = fs_handle.statx(d_full_src, cephfs.CEPH_STATX_MODE |
cephfs.CEPH_STATX_UID |
cephfs.CEPH_STATX_GID |
cephfs.CEPH_STATX_ATIME |
cephfs.CEPH_STATX_MTIME |
cephfs.CEPH_STATX_SIZE,
cephfs.AT_SYMLINK_NOFOLLOW)
handled = True
mo = stx["mode"] & ~stat.S_IFMT(stx["mode"])
if stat.S_ISDIR(stx["mode"]):
log.debug("cptree: (DIR) {0}".format(d_full_src))
try:
fs_handle.mkdir(d_full_dst, mo)
except cephfs.Error as e:
if not e.args[0] == errno.EEXIST:
raise
cptree(d_full_src, d_full_dst)
elif stat.S_ISLNK(stx["mode"]):
log.debug("cptree: (SYMLINK) {0}".format(d_full_src))
target = fs_handle.readlink(d_full_src, 4096)
try:
fs_handle.symlink(target[:stx["size"]], d_full_dst)
except cephfs.Error as e:
if not e.args[0] == errno.EEXIST:
raise
elif stat.S_ISREG(stx["mode"]):
log.debug("cptree: (REG) {0}".format(d_full_src))
copy_file(fs_handle, d_full_src, d_full_dst, mo, cancel_check=should_cancel)
else:
handled = False
log.warning("cptree: (IGNORE) {0}".format(d_full_src))
if handled:
sync_attrs(fs_handle, d_full_dst, stx)
d = fs_handle.readdir(dir_handle)
stx_root = fs_handle.statx(src_root_path, cephfs.CEPH_STATX_ATIME |
cephfs.CEPH_STATX_MTIME,
cephfs.AT_SYMLINK_NOFOLLOW)
fs_handle.lutimes(dst_root_path, (time.mktime(stx_root["atime"].timetuple()),
time.mktime(stx_root["mtime"].timetuple())))
except cephfs.Error as e:
if not e.args[0] == errno.ENOENT:
raise VolumeException(-e.args[0], e.args[1])
cptree(source_path, dst_path)
if should_cancel():
raise VolumeException(-errno.EINTR, "user interrupted clone operation")
def set_quota_on_clone(fs_handle, clone_volumes_pair):
src_path = clone_volumes_pair[1].snapshot_data_path(clone_volumes_pair[2])
dst_path = clone_volumes_pair[0].path
quota = None # type: Optional[int]
try:
quota = int(fs_handle.getxattr(src_path, 'ceph.quota.max_bytes').decode('utf-8'))
except cephfs.NoData:
pass
if quota is not None:
try:
fs_handle.setxattr(dst_path, 'ceph.quota.max_bytes', str(quota).encode('utf-8'), 0)
except cephfs.InvalidValue:
raise VolumeException(-errno.EINVAL, "invalid size specified: '{0}'".format(quota))
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
quota_files = None # type: Optional[int]
try:
quota_files = int(fs_handle.getxattr(src_path, 'ceph.quota.max_files').decode('utf-8'))
except cephfs.NoData:
pass
if quota_files is not None:
try:
fs_handle.setxattr(dst_path, 'ceph.quota.max_files', str(quota_files).encode('utf-8'), 0)
except cephfs.InvalidValue:
raise VolumeException(-errno.EINVAL, "invalid file count specified: '{0}'".format(quota_files))
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
def do_clone(fs_client, volspec, volname, groupname, subvolname, should_cancel):
with open_volume_lockless(fs_client, volname) as fs_handle:
with open_clone_subvolume_pair(fs_client, fs_handle, volspec, volname, groupname, subvolname) as clone_volumes:
src_path = clone_volumes[1].snapshot_data_path(clone_volumes[2])
dst_path = clone_volumes[0].path
bulk_copy(fs_handle, src_path, dst_path, should_cancel)
set_quota_on_clone(fs_handle, clone_volumes)
def update_clone_failure_status(fs_client, volspec, volname, groupname, subvolname, ve):
with open_volume_lockless(fs_client, volname) as fs_handle:
with open_clone_subvolume_pair(fs_client, fs_handle, volspec, volname, groupname, subvolname) as clone_volumes:
if ve.errno == -errno.EINTR:
clone_volumes[0].add_clone_failure(-ve.errno, "user interrupted clone operation")
else:
clone_volumes[0].add_clone_failure(-ve.errno, ve.error_str)
def log_clone_failure(volname, groupname, subvolname, ve):
if ve.errno == -errno.EINTR:
log.info("Clone cancelled: ({0}, {1}, {2})".format(volname, groupname, subvolname))
elif ve.errno == -errno.EDQUOT:
log.error("Clone failed: ({0}, {1}, {2}, reason -> Disk quota exceeded)".format(volname, groupname, subvolname))
else:
log.error("Clone failed: ({0}, {1}, {2}, reason -> {3})".format(volname, groupname, subvolname, ve))
def handle_clone_in_progress(fs_client, volspec, volname, index, groupname, subvolname, should_cancel):
try:
do_clone(fs_client, volspec, volname, groupname, subvolname, should_cancel)
next_state = SubvolumeOpSm.transition(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_INPROGRESS,
SubvolumeActions.ACTION_SUCCESS)
except VolumeException as ve:
update_clone_failure_status(fs_client, volspec, volname, groupname, subvolname, ve)
log_clone_failure(volname, groupname, subvolname, ve)
next_state = get_next_state_on_error(ve.errno)
except OpSmException as oe:
raise VolumeException(oe.errno, oe.error_str)
return (next_state, False)
def handle_clone_failed(fs_client, volspec, volname, index, groupname, subvolname, should_cancel):
try:
with open_volume(fs_client, volname) as fs_handle:
# detach source but leave the clone section intact for later inspection
with open_clone_subvolume_pair(fs_client, fs_handle, volspec, volname, groupname, subvolname) as clone_volumes:
clone_volumes[1].detach_snapshot(clone_volumes[2], index)
except (MetadataMgrException, VolumeException) as e:
log.error("failed to detach clone from snapshot: {0}".format(e))
return (None, True)
def handle_clone_complete(fs_client, volspec, volname, index, groupname, subvolname, should_cancel):
try:
with open_volume(fs_client, volname) as fs_handle:
with open_clone_subvolume_pair(fs_client, fs_handle, volspec, volname, groupname, subvolname) as clone_volumes:
clone_volumes[1].detach_snapshot(clone_volumes[2], index)
clone_volumes[0].remove_clone_source(flush=True)
except (MetadataMgrException, VolumeException) as e:
log.error("failed to detach clone from snapshot: {0}".format(e))
return (None, True)
def start_clone_sm(fs_client, volspec, volname, index, groupname, subvolname, state_table, should_cancel, snapshot_clone_delay):
finished = False
current_state = None
try:
current_state = get_clone_state(fs_client, volspec, volname, groupname, subvolname)
log.debug("cloning ({0}, {1}, {2}) -- starting state \"{3}\"".format(volname, groupname, subvolname, current_state))
if current_state == SubvolumeStates.STATE_PENDING:
time.sleep(snapshot_clone_delay)
log.info("Delayed cloning ({0}, {1}, {2}) -- by {3} seconds".format(volname, groupname, subvolname, snapshot_clone_delay))
while not finished:
handler = state_table.get(current_state, None)
if not handler:
raise VolumeException(-errno.EINVAL, "invalid clone state: \"{0}\"".format(current_state))
(next_state, finished) = handler(fs_client, volspec, volname, index, groupname, subvolname, should_cancel)
if next_state:
log.debug("({0}, {1}, {2}) transition state [\"{3}\" => \"{4}\"]".format(volname, groupname, subvolname,\
current_state, next_state))
set_clone_state(fs_client, volspec, volname, groupname, subvolname, next_state)
current_state = next_state
except (MetadataMgrException, VolumeException) as e:
log.error(f"clone failed for ({volname}, {groupname}, {subvolname}) "
f"(current_state: {current_state}, reason: {e} {os.strerror(-e.args[0])})")
raise
def clone(fs_client, volspec, volname, index, clone_path, state_table, should_cancel, snapshot_clone_delay):
log.info("cloning to subvolume path: {0}".format(clone_path))
resolved = resolve(volspec, clone_path)
groupname = resolved[0]
subvolname = resolved[1]
log.debug("resolved to [group: {0}, subvolume: {1}]".format(groupname, subvolname))
try:
log.info("starting clone: ({0}, {1}, {2})".format(volname, groupname, subvolname))
start_clone_sm(fs_client, volspec, volname, index, groupname, subvolname, state_table, should_cancel, snapshot_clone_delay)
log.info("finished clone: ({0}, {1}, {2})".format(volname, groupname, subvolname))
except (MetadataMgrException, VolumeException) as e:
log.error(f"clone failed for ({volname}, {groupname}, {subvolname}), reason: {e} {os.strerror(-e.args[0])}")
class Cloner(AsyncJobs):
"""
Asynchronous cloner: pool of threads to copy data from a snapshot to a subvolume.
this relies on a simple state machine (which mimics states from SubvolumeOpSm class) as
the driver. file types supported are directories, symbolic links and regular files.
"""
def __init__(self, volume_client, tp_size, snapshot_clone_delay):
self.vc = volume_client
self.snapshot_clone_delay = snapshot_clone_delay
self.state_table = {
SubvolumeStates.STATE_PENDING : handle_clone_pending,
SubvolumeStates.STATE_INPROGRESS : handle_clone_in_progress,
SubvolumeStates.STATE_COMPLETE : handle_clone_complete,
SubvolumeStates.STATE_FAILED : handle_clone_failed,
SubvolumeStates.STATE_CANCELED : handle_clone_failed,
}
super(Cloner, self).__init__(volume_client, "cloner", tp_size)
def reconfigure_max_concurrent_clones(self, tp_size):
return super(Cloner, self).reconfigure_max_async_threads(tp_size)
def reconfigure_snapshot_clone_delay(self, timeout):
self.snapshot_clone_delay = timeout
def is_clone_cancelable(self, clone_state):
return not (SubvolumeOpSm.is_complete_state(clone_state) or SubvolumeOpSm.is_failed_state(clone_state))
def get_clone_tracking_index(self, fs_handle, clone_subvolume):
with open_clone_index(fs_handle, self.vc.volspec) as index:
return index.find_clone_entry_index(clone_subvolume.base_path)
def _cancel_pending_clone(self, fs_handle, clone_subvolume, clone_subvolname, clone_groupname, status, track_idx):
clone_state = SubvolumeStates.from_value(status['state'])
assert self.is_clone_cancelable(clone_state)
s_groupname = status['source'].get('group', None)
s_subvolname = status['source']['subvolume']
s_snapname = status['source']['snapshot']
with open_at_group_unique(self.fs_client, fs_handle, self.vc.volspec, s_groupname, s_subvolname, clone_subvolume,
clone_groupname, clone_subvolname, SubvolumeOpType.CLONE_SOURCE) as s_subvolume:
next_state = SubvolumeOpSm.transition(SubvolumeTypes.TYPE_CLONE,
clone_state,
SubvolumeActions.ACTION_CANCELLED)
clone_subvolume.state = (next_state, True)
clone_subvolume.add_clone_failure(errno.EINTR, "user interrupted clone operation")
s_subvolume.detach_snapshot(s_snapname, track_idx.decode('utf-8'))
def cancel_job(self, volname, job):
"""
override base class `cancel_job`. interpret @job as (clone, group) tuple.
"""
clonename = job[0]
groupname = job[1]
track_idx = None
try:
with open_volume(self.fs_client, volname) as fs_handle:
with open_group(fs_handle, self.vc.volspec, groupname) as group:
with open_subvol(self.fs_client.mgr, fs_handle, self.vc.volspec, group, clonename, SubvolumeOpType.CLONE_CANCEL) as clone_subvolume:
status = clone_subvolume.status
clone_state = SubvolumeStates.from_value(status['state'])
if not self.is_clone_cancelable(clone_state):
raise VolumeException(-errno.EINVAL, "cannot cancel -- clone finished (check clone status)")
track_idx = self.get_clone_tracking_index(fs_handle, clone_subvolume)
if not track_idx:
log.warning("cannot lookup clone tracking index for {0}".format(clone_subvolume.base_path))
raise VolumeException(-errno.EINVAL, "error canceling clone")
clone_job = (track_idx, clone_subvolume.base_path)
jobs = [j[0] for j in self.jobs[volname]]
with lock_timeout_log(self.lock):
if SubvolumeOpSm.is_init_state(SubvolumeTypes.TYPE_CLONE, clone_state) and not clone_job in jobs:
logging.debug("Cancelling pending job {0}".format(clone_job))
# clone has not started yet -- cancel right away.
self._cancel_pending_clone(fs_handle, clone_subvolume, clonename, groupname, status, track_idx)
return
# cancelling an on-going clone would persist "canceled" state in subvolume metadata.
# to persist the new state, async cloner accesses the volume in exclusive mode.
# accessing the volume in exclusive mode here would lead to deadlock.
assert track_idx is not None
with lock_timeout_log(self.lock):
with open_volume_lockless(self.fs_client, volname) as fs_handle:
with open_group(fs_handle, self.vc.volspec, groupname) as group:
with open_subvol(self.fs_client.mgr, fs_handle, self.vc.volspec, group, clonename, SubvolumeOpType.CLONE_CANCEL) as clone_subvolume:
if not self._cancel_job(volname, (track_idx, clone_subvolume.base_path)):
raise VolumeException(-errno.EINVAL, "cannot cancel -- clone finished (check clone status)")
except (IndexException, MetadataMgrException) as e:
log.error("error cancelling clone {0}: ({1})".format(job, e))
raise VolumeException(-errno.EINVAL, "error canceling clone")
def get_next_job(self, volname, running_jobs):
return get_next_clone_entry(self.fs_client, self.vc.volspec, volname, running_jobs)
def execute_job(self, volname, job, should_cancel):
clone(self.fs_client, self.vc.volspec, volname, job[0].decode('utf-8'), job[1].decode('utf-8'), self.state_table, should_cancel, self.snapshot_clone_delay)
| 23,263 | 55.193237 | 163 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/async_job.py
|
import sys
import time
import logging
import threading
import traceback
from collections import deque
from mgr_util import lock_timeout_log, CephfsClient
from .exception import NotImplementedException
log = logging.getLogger(__name__)
class JobThread(threading.Thread):
# this is "not" configurable and there is no need for it to be
# configurable. if a thread encounters an exception, we retry
# until it hits this many consecutive exceptions.
MAX_RETRIES_ON_EXCEPTION = 10
def __init__(self, async_job, volume_client, name):
self.vc = volume_client
self.async_job = async_job
# event object to cancel jobs
self.cancel_event = threading.Event()
threading.Thread.__init__(self, name=name)
def run(self):
retries = 0
thread_id = threading.currentThread()
assert isinstance(thread_id, JobThread)
thread_name = thread_id.getName()
log.debug("thread [{0}] starting".format(thread_name))
while retries < JobThread.MAX_RETRIES_ON_EXCEPTION:
vol_job = None
try:
# fetch next job to execute
with self.async_job.lock:
while True:
if self.should_reconfigure_num_threads():
log.info("thread [{0}] terminating due to reconfigure".format(thread_name))
self.async_job.threads.remove(self)
return
vol_job = self.async_job.get_job()
if vol_job:
break
self.async_job.cv.wait()
self.async_job.register_async_job(vol_job[0], vol_job[1], thread_id)
# execute the job (outside lock)
self.async_job.execute_job(vol_job[0], vol_job[1], should_cancel=lambda: thread_id.should_cancel())
retries = 0
except NotImplementedException:
raise
except Exception:
# unless the jobs fetching and execution routines are not implemented
# retry till we hit cap limit.
retries += 1
log.warning("thread [{0}] encountered fatal error: (attempt#"
" {1}/{2})".format(thread_name, retries, JobThread.MAX_RETRIES_ON_EXCEPTION))
exc_type, exc_value, exc_traceback = sys.exc_info()
log.warning("traceback: {0}".format("".join(
traceback.format_exception(exc_type, exc_value, exc_traceback))))
finally:
# when done, unregister the job
if vol_job:
with self.async_job.lock:
self.async_job.unregister_async_job(vol_job[0], vol_job[1], thread_id)
time.sleep(1)
log.error("thread [{0}] reached exception limit, bailing out...".format(thread_name))
self.vc.cluster_log("thread {0} bailing out due to exception".format(thread_name))
with self.async_job.lock:
self.async_job.threads.remove(self)
def should_reconfigure_num_threads(self):
# reconfigure of max_concurrent_clones
return len(self.async_job.threads) > self.async_job.nr_concurrent_jobs
def cancel_job(self):
self.cancel_event.set()
def should_cancel(self):
return self.cancel_event.is_set()
def reset_cancel(self):
self.cancel_event.clear()
class AsyncJobs(threading.Thread):
"""
Class providing asynchronous execution of jobs via worker threads.
`jobs` are grouped by `volume`, so a `volume` can have N number of
`jobs` executing concurrently (capped by number of concurrent jobs).
Usability is simple: subclass this and implement the following:
- get_next_job(volname, running_jobs)
- execute_job(volname, job, should_cancel)
... and do not forget to invoke base class constructor.
Job cancelation is for a volume as a whole, i.e., all executing jobs
for a volume are canceled. Cancelation is poll based -- jobs need to
periodically check if cancelation is requested, after which the job
should return as soon as possible. Cancelation check is provided
via `should_cancel()` lambda passed to `execute_job()`.
"""
def __init__(self, volume_client, name_pfx, nr_concurrent_jobs):
threading.Thread.__init__(self, name="{0}.tick".format(name_pfx))
self.vc = volume_client
# queue of volumes for starting async jobs
self.q = deque() # type: deque
# volume => job tracking
self.jobs = {}
# lock, cv for kickstarting jobs
self.lock = threading.Lock()
self.cv = threading.Condition(self.lock)
# cv for job cancelation
self.waiting = False
self.stopping = threading.Event()
self.cancel_cv = threading.Condition(self.lock)
self.nr_concurrent_jobs = nr_concurrent_jobs
self.name_pfx = name_pfx
# each async job group uses its own libcephfs connection (pool)
self.fs_client = CephfsClient(self.vc.mgr)
self.threads = []
for i in range(self.nr_concurrent_jobs):
self.threads.append(JobThread(self, volume_client, name="{0}.{1}".format(self.name_pfx, i)))
self.threads[-1].start()
self.start()
def run(self):
log.debug("tick thread {} starting".format(self.name))
with lock_timeout_log(self.lock):
while not self.stopping.is_set():
c = len(self.threads)
if c > self.nr_concurrent_jobs:
# Decrease concurrency: notify threads which are waiting for a job to terminate.
log.debug("waking threads to terminate due to job reduction")
self.cv.notifyAll()
elif c < self.nr_concurrent_jobs:
# Increase concurrency: create more threads.
log.debug("creating new threads to job increase")
for i in range(c, self.nr_concurrent_jobs):
self.threads.append(JobThread(self, self.vc, name="{0}.{1}.{2}".format(self.name_pfx, time.time(), i)))
self.threads[-1].start()
self.cv.wait(timeout=5)
def shutdown(self):
self.stopping.set()
self.cancel_all_jobs()
with self.lock:
self.cv.notifyAll()
self.join()
def reconfigure_max_async_threads(self, nr_concurrent_jobs):
"""
reconfigure number of cloner threads
"""
self.nr_concurrent_jobs = nr_concurrent_jobs
def get_job(self):
log.debug("processing {0} volume entries".format(len(self.q)))
nr_vols = len(self.q)
to_remove = []
next_job = None
while nr_vols > 0:
volname = self.q[0]
# do this now so that the other thread pick up jobs for other volumes
self.q.rotate(1)
running_jobs = [j[0] for j in self.jobs[volname]]
(ret, job) = self.get_next_job(volname, running_jobs)
if job:
next_job = (volname, job)
break
# this is an optimization when for a given volume there are no more
# jobs and no jobs are in progress. in such cases we remove the volume
# from the tracking list so as to:
#
# a. not query the filesystem for jobs over and over again
# b. keep the filesystem connection idle so that it can be freed
# from the connection pool
#
# if at all there are jobs for a volume, the volume gets added again
# to the tracking list and the jobs get kickstarted.
# note that, we do not iterate the volume list fully if there is a
# jobs to process (that will take place eventually).
if ret == 0 and not job and not running_jobs:
to_remove.append(volname)
nr_vols -= 1
for vol in to_remove:
log.debug("auto removing volume '{0}' from tracked volumes".format(vol))
self.q.remove(vol)
self.jobs.pop(vol)
return next_job
def register_async_job(self, volname, job, thread_id):
log.debug("registering async job {0}.{1} with thread {2}".format(volname, job, thread_id))
self.jobs[volname].append((job, thread_id))
def unregister_async_job(self, volname, job, thread_id):
log.debug("unregistering async job {0}.{1} from thread {2}".format(volname, job, thread_id))
self.jobs[volname].remove((job, thread_id))
cancelled = thread_id.should_cancel()
thread_id.reset_cancel()
# wake up cancellation waiters if needed
if cancelled:
logging.info("waking up cancellation waiters")
self.cancel_cv.notifyAll()
def queue_job(self, volname):
"""
queue a volume for asynchronous job execution.
"""
log.info("queuing job for volume '{0}'".format(volname))
with lock_timeout_log(self.lock):
if volname not in self.q:
self.q.append(volname)
self.jobs[volname] = []
self.cv.notifyAll()
def _cancel_jobs(self, volname):
"""
cancel all jobs for the volume. do nothing is the no jobs are
executing for the given volume. this would wait until all jobs
get interrupted and finish execution.
"""
log.info("cancelling jobs for volume '{0}'".format(volname))
try:
if volname not in self.q and volname not in self.jobs:
return
self.q.remove(volname)
# cancel in-progress operation and wait until complete
for j in self.jobs[volname]:
j[1].cancel_job()
# wait for cancellation to complete
while self.jobs[volname]:
log.debug("waiting for {0} in-progress jobs for volume '{1}' to "
"cancel".format(len(self.jobs[volname]), volname))
self.cancel_cv.wait()
self.jobs.pop(volname)
except (KeyError, ValueError):
pass
def _cancel_job(self, volname, job):
"""
cancel a executing job for a given volume. return True if canceled, False
otherwise (volume/job not found).
"""
canceled = False
log.info("canceling job {0} for volume {1}".format(job, volname))
try:
vol_jobs = [j[0] for j in self.jobs.get(volname, [])]
if volname not in self.q and job not in vol_jobs:
return canceled
for j in self.jobs[volname]:
if j[0] == job:
j[1].cancel_job()
# be safe against _cancel_jobs() running concurrently
while j in self.jobs.get(volname, []):
self.cancel_cv.wait()
canceled = True
break
except (KeyError, ValueError):
pass
return canceled
def cancel_job(self, volname, job):
with lock_timeout_log(self.lock):
return self._cancel_job(volname, job)
def cancel_jobs(self, volname):
"""
cancel all executing jobs for a given volume.
"""
with lock_timeout_log(self.lock):
self._cancel_jobs(volname)
def cancel_all_jobs(self):
"""
call all executing jobs for all volumes.
"""
with lock_timeout_log(self.lock):
for volname in list(self.q):
self._cancel_jobs(volname)
def get_next_job(self, volname, running_jobs):
"""
get the next job for asynchronous execution as (retcode, job) tuple. if no
jobs are available return (0, None) else return (0, job). on error return
(-ret, None). called under `self.lock`.
"""
raise NotImplementedException()
def execute_job(self, volname, job, should_cancel):
"""
execute a job for a volume. the job can block on I/O operations, sleep for long
hours and do all kinds of synchronous work. called outside `self.lock`.
"""
raise NotImplementedException()
| 12,384 | 39.740132 | 127 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/exception.py
|
class VolumeException(Exception):
def __init__(self, error_code, error_message):
self.errno = error_code
self.error_str = error_message
def to_tuple(self):
return self.errno, "", self.error_str
def __str__(self):
return "{0} ({1})".format(self.errno, self.error_str)
class MetadataMgrException(Exception):
def __init__(self, error_code, error_message):
self.errno = error_code
self.error_str = error_message
def __str__(self):
return "{0} ({1})".format(self.errno, self.error_str)
class IndexException(Exception):
def __init__(self, error_code, error_message):
self.errno = error_code
self.error_str = error_message
def __str__(self):
return "{0} ({1})".format(self.errno, self.error_str)
class OpSmException(Exception):
def __init__(self, error_code, error_message):
self.errno = error_code
self.error_str = error_message
def __str__(self):
return "{0} ({1})".format(self.errno, self.error_str)
class NotImplementedException(Exception):
pass
class ClusterTimeout(Exception):
"""
Exception indicating that we timed out trying to talk to the Ceph cluster,
either to the mons, or to any individual daemon that the mons indicate ought
to be up but isn't responding to us.
"""
pass
class ClusterError(Exception):
"""
Exception indicating that the cluster returned an error to a command that
we thought should be successful based on our last knowledge of the cluster
state.
"""
def __init__(self, action, result_code, result_str):
self._action = action
self._result_code = result_code
self._result_str = result_str
def __str__(self):
return "Error {0} (\"{1}\") while {2}".format(
self._result_code, self._result_str, self._action)
class EvictionError(Exception):
pass
| 1,921 | 29.03125 | 80 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/fs_util.py
|
import os
import errno
import logging
from ceph.deployment.service_spec import ServiceSpec, PlacementSpec
import cephfs
import orchestrator
from .exception import VolumeException
log = logging.getLogger(__name__)
def create_pool(mgr, pool_name):
# create the given pool
command = {'prefix': 'osd pool create', 'pool': pool_name}
return mgr.mon_command(command)
def remove_pool(mgr, pool_name):
command = {'prefix': 'osd pool rm', 'pool': pool_name, 'pool2': pool_name,
'yes_i_really_really_mean_it': True}
return mgr.mon_command(command)
def rename_pool(mgr, pool_name, new_pool_name):
command = {'prefix': 'osd pool rename', 'srcpool': pool_name,
'destpool': new_pool_name}
return mgr.mon_command(command)
def create_filesystem(mgr, fs_name, metadata_pool, data_pool):
command = {'prefix': 'fs new', 'fs_name': fs_name, 'metadata': metadata_pool,
'data': data_pool}
return mgr.mon_command(command)
def remove_filesystem(mgr, fs_name):
command = {'prefix': 'fs fail', 'fs_name': fs_name}
r, outb, outs = mgr.mon_command(command)
if r != 0:
return r, outb, outs
command = {'prefix': 'fs rm', 'fs_name': fs_name, 'yes_i_really_mean_it': True}
return mgr.mon_command(command)
def rename_filesystem(mgr, fs_name, new_fs_name):
command = {'prefix': 'fs rename', 'fs_name': fs_name, 'new_fs_name': new_fs_name,
'yes_i_really_mean_it': True}
return mgr.mon_command(command)
def create_mds(mgr, fs_name, placement):
spec = ServiceSpec(service_type='mds',
service_id=fs_name,
placement=PlacementSpec.from_string(placement))
try:
completion = mgr.apply([spec], no_overwrite=True)
orchestrator.raise_if_exception(completion)
except (ImportError, orchestrator.OrchestratorError):
return 0, "", "Volume created successfully (no MDS daemons created)"
except Exception as e:
# Don't let detailed orchestrator exceptions (python backtraces)
# bubble out to the user
log.exception("Failed to create MDS daemons")
return -errno.EINVAL, "", str(e)
return 0, "", ""
def volume_exists(mgr, fs_name):
fs_map = mgr.get('fs_map')
for fs in fs_map['filesystems']:
if fs['mdsmap']['fs_name'] == fs_name:
return True
return False
def listdir(fs, dirpath, filter_entries=None):
"""
Get the directory names (only dirs) for a given path
"""
dirs = []
if filter_entries is None:
filter_entries = [b".", b".."]
else:
filter_entries.extend([b".", b".."])
try:
with fs.opendir(dirpath) as dir_handle:
d = fs.readdir(dir_handle)
while d:
if (d.d_name not in filter_entries) and d.is_dir():
dirs.append(d.d_name)
d = fs.readdir(dir_handle)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
return dirs
def has_subdir(fs, dirpath, filter_entries=None):
"""
Check the presence of directory (only dirs) for a given path
"""
res = False
if filter_entries is None:
filter_entries = [b".", b".."]
else:
filter_entries.extend([b".", b".."])
try:
with fs.opendir(dirpath) as dir_handle:
d = fs.readdir(dir_handle)
while d:
if (d.d_name not in filter_entries) and d.is_dir():
res = True
break
d = fs.readdir(dir_handle)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
return res
def is_inherited_snap(snapname):
"""
Returns True if the snapname is inherited else False
"""
return snapname.startswith("_")
def listsnaps(fs, volspec, snapdirpath, filter_inherited_snaps=False):
"""
Get the snap names from a given snap directory path
"""
if os.path.basename(snapdirpath) != volspec.snapshot_prefix.encode('utf-8'):
raise VolumeException(-errno.EINVAL, "Not a snap directory: {0}".format(snapdirpath))
snaps = []
try:
with fs.opendir(snapdirpath) as dir_handle:
d = fs.readdir(dir_handle)
while d:
if (d.d_name not in (b".", b"..")) and d.is_dir():
d_name = d.d_name.decode('utf-8')
if not is_inherited_snap(d_name):
snaps.append(d.d_name)
elif is_inherited_snap(d_name) and not filter_inherited_snaps:
snaps.append(d.d_name)
d = fs.readdir(dir_handle)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
return snaps
def list_one_entry_at_a_time(fs, dirpath):
"""
Get a directory entry (one entry a time)
"""
try:
with fs.opendir(dirpath) as dir_handle:
d = fs.readdir(dir_handle)
while d:
if d.d_name not in (b".", b".."):
yield d
d = fs.readdir(dir_handle)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
def copy_file(fs, src, dst, mode, cancel_check=None):
"""
Copy a regular file from @src to @dst. @dst is overwritten if it exists.
"""
src_fd = dst_fd = None
try:
src_fd = fs.open(src, os.O_RDONLY)
dst_fd = fs.open(dst, os.O_CREAT | os.O_TRUNC | os.O_WRONLY, mode)
except cephfs.Error as e:
if src_fd is not None:
fs.close(src_fd)
if dst_fd is not None:
fs.close(dst_fd)
raise VolumeException(-e.args[0], e.args[1])
IO_SIZE = 8 * 1024 * 1024
try:
while True:
if cancel_check and cancel_check():
raise VolumeException(-errno.EINTR, "copy operation interrupted")
data = fs.read(src_fd, -1, IO_SIZE)
if not len(data):
break
written = 0
while written < len(data):
written += fs.write(dst_fd, data[written:], -1)
fs.fsync(dst_fd, 0)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
finally:
fs.close(src_fd)
fs.close(dst_fd)
def get_ancestor_xattr(fs, path, attr):
"""
Helper for reading layout information: if this xattr is missing
on the requested path, keep checking parents until we find it.
"""
try:
return fs.getxattr(path, attr).decode('utf-8')
except cephfs.NoData as e:
if path == "/":
raise VolumeException(-e.args[0], e.args[1])
else:
return get_ancestor_xattr(fs, os.path.split(path)[0], attr)
def create_base_dir(fs, path, mode):
"""
Create volspec base/group directory if it doesn't exist
"""
try:
fs.stat(path)
except cephfs.Error as e:
if e.args[0] == errno.ENOENT:
fs.mkdirs(path, mode)
else:
raise VolumeException(-e.args[0], e.args[1])
| 7,101 | 32.5 | 93 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/purge_queue.py
|
import errno
import logging
import os
import stat
import cephfs
from .async_job import AsyncJobs
from .exception import VolumeException
from .operations.resolver import resolve_trash
from .operations.template import SubvolumeOpType
from .operations.group import open_group
from .operations.subvolume import open_subvol
from .operations.volume import open_volume, open_volume_lockless
from .operations.trash import open_trashcan
log = logging.getLogger(__name__)
# helper for fetching a trash entry for a given volume
def get_trash_entry_for_volume(fs_client, volspec, volname, running_jobs):
log.debug("fetching trash entry for volume '{0}'".format(volname))
try:
with open_volume_lockless(fs_client, volname) as fs_handle:
try:
with open_trashcan(fs_handle, volspec) as trashcan:
path = trashcan.get_trash_entry(running_jobs)
return 0, path
except VolumeException as ve:
if ve.errno == -errno.ENOENT:
return 0, None
raise ve
except VolumeException as ve:
log.error("error fetching trash entry for volume '{0}' ({1})".format(volname, ve))
return ve.errno, None
def subvolume_purge(fs_client, volspec, volname, trashcan, subvolume_trash_entry, should_cancel):
groupname, subvolname = resolve_trash(volspec, subvolume_trash_entry.decode('utf-8'))
log.debug("subvolume resolved to {0}/{1}".format(groupname, subvolname))
try:
with open_volume(fs_client, volname) as fs_handle:
with open_group(fs_handle, volspec, groupname) as group:
with open_subvol(fs_client.mgr, fs_handle, volspec, group, subvolname, SubvolumeOpType.REMOVE) as subvolume:
log.debug("subvolume.path={0}, purgeable={1}".format(subvolume.path, subvolume.purgeable))
if not subvolume.purgeable:
return
# this is fine under the global lock -- there are just a handful
# of entries in the subvolume to purge. moreover, the purge needs
# to be guarded since a create request might sneak in.
trashcan.purge(subvolume.base_path, should_cancel)
except VolumeException as ve:
if not ve.errno == -errno.ENOENT:
raise
# helper for starting a purge operation on a trash entry
def purge_trash_entry_for_volume(fs_client, volspec, volname, purge_entry, should_cancel):
log.debug("purging trash entry '{0}' for volume '{1}'".format(purge_entry, volname))
ret = 0
try:
with open_volume_lockless(fs_client, volname) as fs_handle:
with open_trashcan(fs_handle, volspec) as trashcan:
try:
pth = os.path.join(trashcan.path, purge_entry)
stx = fs_handle.statx(pth, cephfs.CEPH_STATX_MODE | cephfs.CEPH_STATX_SIZE,
cephfs.AT_SYMLINK_NOFOLLOW)
if stat.S_ISLNK(stx['mode']):
tgt = fs_handle.readlink(pth, 4096)
tgt = tgt[:stx['size']]
log.debug("purging entry pointing to subvolume trash: {0}".format(tgt))
delink = True
try:
trashcan.purge(tgt, should_cancel)
except VolumeException as ve:
if not ve.errno == -errno.ENOENT:
delink = False
return ve.errno
finally:
if delink:
subvolume_purge(fs_client, volspec, volname, trashcan, tgt, should_cancel)
log.debug("purging trash link: {0}".format(purge_entry))
trashcan.delink(purge_entry)
else:
log.debug("purging entry pointing to trash: {0}".format(pth))
trashcan.purge(pth, should_cancel)
except cephfs.Error as e:
log.warn("failed to remove trash entry: {0}".format(e))
except VolumeException as ve:
ret = ve.errno
return ret
class ThreadPoolPurgeQueueMixin(AsyncJobs):
"""
Purge queue mixin class maintaining a pool of threads for purging trash entries.
Subvolumes are chosen from volumes in a round robin fashion. If some of the purge
entries (belonging to a set of volumes) have huge directory tree's (such as, lots
of small files in a directory w/ deep directory trees), this model may lead to
_all_ threads purging entries for one volume (starving other volumes).
"""
def __init__(self, volume_client, tp_size):
self.vc = volume_client
super(ThreadPoolPurgeQueueMixin, self).__init__(volume_client, "purgejob", tp_size)
def get_next_job(self, volname, running_jobs):
return get_trash_entry_for_volume(self.fs_client, self.vc.volspec, volname, running_jobs)
def execute_job(self, volname, job, should_cancel):
purge_trash_entry_for_volume(self.fs_client, self.vc.volspec, volname, job, should_cancel)
| 5,253 | 45.087719 | 124 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/vol_spec.py
|
from .operations.index import Index
from .operations.group import Group
from .operations.trash import Trash
from .operations.versions.subvolume_base import SubvolumeBase
class VolSpec(object):
"""
specification of a "volume" -- base directory and various prefixes.
"""
# where shall we (by default) create subvolumes
DEFAULT_SUBVOL_PREFIX = "/volumes"
# and the default namespace
DEFAULT_NS_PREFIX = "fsvolumens_"
# default mode for subvol prefix and group
DEFAULT_MODE = 0o755
# internal directories
INTERNAL_DIRS = [Group.NO_GROUP_NAME, Index.GROUP_NAME, Trash.GROUP_NAME, SubvolumeBase.LEGACY_CONF_DIR]
def __init__(self, snapshot_prefix, subvolume_prefix=None, pool_ns_prefix=None):
self.snapshot_prefix = snapshot_prefix
self.subvolume_prefix = subvolume_prefix if subvolume_prefix else VolSpec.DEFAULT_SUBVOL_PREFIX
self.pool_ns_prefix = pool_ns_prefix if pool_ns_prefix else VolSpec.DEFAULT_NS_PREFIX
@property
def snapshot_dir_prefix(self):
"""
Return the snapshot directory prefix
"""
return self.snapshot_prefix
@property
def base_dir(self):
"""
Return the top level directory under which subvolumes/groups are created
"""
return self.subvolume_prefix
@property
def fs_namespace(self):
"""
return a filesystem namespace by stashing pool namespace prefix and subvolume-id
"""
return self.pool_ns_prefix
| 1,511 | 31.869565 | 108 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/volume.py
|
import json
import errno
import logging
import os
import mgr_util
from typing import TYPE_CHECKING
import cephfs
from mgr_util import CephfsClient
from .fs_util import listdir, has_subdir
from .operations.group import open_group, create_group, remove_group, \
open_group_unique, set_group_attrs
from .operations.volume import create_volume, delete_volume, rename_volume, \
list_volumes, open_volume, get_pool_names, get_pool_ids, get_pending_subvol_deletions_count
from .operations.subvolume import open_subvol, create_subvol, remove_subvol, \
create_clone
from .operations.trash import Trash
from .vol_spec import VolSpec
from .exception import VolumeException, ClusterError, ClusterTimeout, EvictionError
from .async_cloner import Cloner
from .purge_queue import ThreadPoolPurgeQueueMixin
from .operations.template import SubvolumeOpType
if TYPE_CHECKING:
from volumes import Module
log = logging.getLogger(__name__)
ALLOWED_ACCESS_LEVELS = ('r', 'rw')
def octal_str_to_decimal_int(mode):
try:
return int(mode, 8)
except ValueError:
raise VolumeException(-errno.EINVAL, "Invalid mode '{0}'".format(mode))
def name_to_json(names):
"""
convert the list of names to json
"""
namedict = []
for i in range(len(names)):
namedict.append({'name': names[i].decode('utf-8')})
return json.dumps(namedict, indent=4, sort_keys=True)
class VolumeClient(CephfsClient["Module"]):
def __init__(self, mgr):
super().__init__(mgr)
# volume specification
self.volspec = VolSpec(mgr.rados.conf_get('client_snapdir'))
self.cloner = Cloner(self, self.mgr.max_concurrent_clones, self.mgr.snapshot_clone_delay)
self.purge_queue = ThreadPoolPurgeQueueMixin(self, 4)
# on startup, queue purge job for available volumes to kickstart
# purge for leftover subvolume entries in trash. note that, if the
# trash directory does not exist or if there are no purge entries
# available for a volume, the volume is removed from the purge
# job list.
fs_map = self.mgr.get('fs_map')
for fs in fs_map['filesystems']:
self.cloner.queue_job(fs['mdsmap']['fs_name'])
self.purge_queue.queue_job(fs['mdsmap']['fs_name'])
def shutdown(self):
# Overrides CephfsClient.shutdown()
log.info("shutting down")
# stop clones
self.cloner.shutdown()
# stop purge threads
self.purge_queue.shutdown()
# last, delete all libcephfs handles from connection pool
self.connection_pool.del_all_connections()
def cluster_log(self, msg, lvl=None):
"""
log to cluster log with default log level as WARN.
"""
if not lvl:
lvl = self.mgr.ClusterLogPrio.WARN
self.mgr.cluster_log("cluster", lvl, msg)
def volume_exception_to_retval(self, ve):
"""
return a tuple representation from a volume exception
"""
return ve.to_tuple()
### volume operations -- create, rm, ls
def create_fs_volume(self, volname, placement):
return create_volume(self.mgr, volname, placement)
def delete_fs_volume(self, volname, confirm):
if confirm != "--yes-i-really-mean-it":
return -errno.EPERM, "", "WARNING: this will *PERMANENTLY DESTROY* all data " \
"stored in the filesystem '{0}'. If you are *ABSOLUTELY CERTAIN* " \
"that is what you want, re-issue the command followed by " \
"--yes-i-really-mean-it.".format(volname)
ret, out, err = self.mgr.check_mon_command({
'prefix': 'config get',
'key': 'mon_allow_pool_delete',
'who': 'mon',
'format': 'json',
})
mon_allow_pool_delete = json.loads(out)
if not mon_allow_pool_delete:
return -errno.EPERM, "", "pool deletion is disabled; you must first " \
"set the mon_allow_pool_delete config option to true before volumes " \
"can be deleted"
metadata_pool, data_pools = get_pool_names(self.mgr, volname)
if not metadata_pool:
return -errno.ENOENT, "", "volume {0} doesn't exist".format(volname)
self.purge_queue.cancel_jobs(volname)
self.connection_pool.del_connections(volname, wait=True)
return delete_volume(self.mgr, volname, metadata_pool, data_pools)
def list_fs_volumes(self):
volumes = list_volumes(self.mgr)
return 0, json.dumps(volumes, indent=4, sort_keys=True), ""
def rename_fs_volume(self, volname, newvolname, sure):
if not sure:
return (
-errno.EPERM, "",
"WARNING: This will rename the filesystem and possibly its "
"pools. It is a potentially disruptive operation, clients' "
"cephx credentials need reauthorized to access the file system "
"and its pools with the new name. Add --yes-i-really-mean-it "
"if you are sure you wish to continue.")
return rename_volume(self.mgr, volname, newvolname)
def volume_info(self, **kwargs):
ret = None
volname = kwargs['vol_name']
human_readable = kwargs['human_readable']
try:
with open_volume(self, volname) as fs_handle:
path = self.volspec.base_dir
vol_info_dict = {}
try:
st = fs_handle.statx(path.encode('utf-8'), cephfs.CEPH_STATX_SIZE,
cephfs.AT_SYMLINK_NOFOLLOW)
usedbytes = st['size']
vol_info_dict = get_pending_subvol_deletions_count(path)
if human_readable:
vol_info_dict['used_size'] = mgr_util.format_bytes(int(usedbytes), 5)
else:
vol_info_dict['used_size'] = int(usedbytes)
except cephfs.Error as e:
if e.args[0] == errno.ENOENT:
pass
df = self.mgr.get("df")
pool_stats = dict([(p['id'], p['stats']) for p in df['pools']])
osdmap = self.mgr.get("osd_map")
pools = dict([(p['pool'], p) for p in osdmap['pools']])
metadata_pool_id, data_pool_ids = get_pool_ids(self.mgr, volname)
vol_info_dict["pools"] = {"metadata": [], "data": []}
for pool_id in [metadata_pool_id] + data_pool_ids:
if pool_id == metadata_pool_id:
pool_type = "metadata"
else:
pool_type = "data"
if human_readable:
vol_info_dict["pools"][pool_type].append({
'name': pools[pool_id]['pool_name'],
'used': mgr_util.format_bytes(pool_stats[pool_id]['bytes_used'], 5),
'avail': mgr_util.format_bytes(pool_stats[pool_id]['max_avail'], 5)})
else:
vol_info_dict["pools"][pool_type].append({
'name': pools[pool_id]['pool_name'],
'used': pool_stats[pool_id]['bytes_used'],
'avail': pool_stats[pool_id]['max_avail']})
mon_addr_lst = []
mon_map_mons = self.mgr.get('mon_map')['mons']
for mon in mon_map_mons:
ip_port = mon['addr'].split("/")[0]
mon_addr_lst.append(ip_port)
vol_info_dict["mon_addrs"] = mon_addr_lst
ret = 0, json.dumps(vol_info_dict, indent=4, sort_keys=True), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
### subvolume operations
def _create_subvolume(self, fs_handle, volname, group, subvolname, **kwargs):
size = kwargs['size']
pool = kwargs['pool_layout']
uid = kwargs['uid']
gid = kwargs['gid']
mode = kwargs['mode']
isolate_nspace = kwargs['namespace_isolated']
oct_mode = octal_str_to_decimal_int(mode)
try:
create_subvol(
self.mgr, fs_handle, self.volspec, group, subvolname, size, isolate_nspace, pool, oct_mode, uid, gid)
except VolumeException as ve:
# kick the purge threads for async removal -- note that this
# assumes that the subvolume is moved to trashcan for cleanup on error.
self.purge_queue.queue_job(volname)
raise ve
def create_subvolume(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
groupname = kwargs['group_name']
size = kwargs['size']
pool = kwargs['pool_layout']
uid = kwargs['uid']
gid = kwargs['gid']
mode = kwargs['mode']
isolate_nspace = kwargs['namespace_isolated']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
try:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.CREATE) as subvolume:
# idempotent creation -- valid. Attributes set is supported.
attrs = {
'uid': uid if uid else subvolume.uid,
'gid': gid if gid else subvolume.gid,
'mode': octal_str_to_decimal_int(mode),
'data_pool': pool,
'pool_namespace': subvolume.namespace if isolate_nspace else None,
'quota': size
}
subvolume.set_attrs(subvolume.path, attrs)
except VolumeException as ve:
if ve.errno == -errno.ENOENT:
self._create_subvolume(fs_handle, volname, group, subvolname, **kwargs)
else:
raise
except VolumeException as ve:
# volume/group does not exist or subvolume creation failed
ret = self.volume_exception_to_retval(ve)
return ret
def remove_subvolume(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
groupname = kwargs['group_name']
force = kwargs['force']
retainsnaps = kwargs['retain_snapshots']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
remove_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, force, retainsnaps)
# kick the purge threads for async removal -- note that this
# assumes that the subvolume is moved to trash can.
# TODO: make purge queue as singleton so that trash can kicks
# the purge threads on dump.
self.purge_queue.queue_job(volname)
except VolumeException as ve:
if ve.errno == -errno.EAGAIN and not force:
ve = VolumeException(ve.errno, ve.error_str + " (use --force to override)")
ret = self.volume_exception_to_retval(ve)
elif not (ve.errno == -errno.ENOENT and force):
ret = self.volume_exception_to_retval(ve)
return ret
def authorize_subvolume(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
authid = kwargs['auth_id']
groupname = kwargs['group_name']
accesslevel = kwargs['access_level']
tenant_id = kwargs['tenant_id']
allow_existing_id = kwargs['allow_existing_id']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.ALLOW_ACCESS) as subvolume:
key = subvolume.authorize(authid, accesslevel, tenant_id, allow_existing_id)
ret = 0, key, ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def deauthorize_subvolume(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
authid = kwargs['auth_id']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.DENY_ACCESS) as subvolume:
subvolume.deauthorize(authid)
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def authorized_list(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.AUTH_LIST) as subvolume:
auths = subvolume.authorized_list()
ret = 0, json.dumps(auths, indent=4, sort_keys=True), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def evict(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
authid = kwargs['auth_id']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.EVICT) as subvolume:
key = subvolume.evict(volname, authid)
ret = 0, "", ""
except (VolumeException, ClusterTimeout, ClusterError, EvictionError) as e:
if isinstance(e, VolumeException):
ret = self.volume_exception_to_retval(e)
elif isinstance(e, ClusterTimeout):
ret = -errno.ETIMEDOUT , "", "Timedout trying to talk to ceph cluster"
elif isinstance(e, ClusterError):
ret = e._result_code , "", e._result_str
elif isinstance(e, EvictionError):
ret = -errno.EINVAL, "", str(e)
return ret
def resize_subvolume(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
newsize = kwargs['new_size']
noshrink = kwargs['no_shrink']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.RESIZE) as subvolume:
nsize, usedbytes = subvolume.resize(newsize, noshrink)
ret = 0, json.dumps(
[{'bytes_used': usedbytes},{'bytes_quota': nsize},
{'bytes_pcent': "undefined" if nsize == 0 else '{0:.2f}'.format((float(usedbytes) / nsize) * 100.0)}],
indent=4, sort_keys=True), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def subvolume_pin(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
pin_type = kwargs['pin_type']
pin_setting = kwargs['pin_setting']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.PIN) as subvolume:
subvolume.pin(pin_type, pin_setting)
ret = 0, json.dumps({}), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def subvolume_getpath(self, **kwargs):
ret = None
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.GETPATH) as subvolume:
subvolpath = subvolume.path
ret = 0, subvolpath.decode("utf-8"), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def subvolume_info(self, **kwargs):
ret = None
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.INFO) as subvolume:
mon_addr_lst = []
mon_map_mons = self.mgr.get('mon_map')['mons']
for mon in mon_map_mons:
ip_port = mon['addr'].split("/")[0]
mon_addr_lst.append(ip_port)
subvol_info_dict = subvolume.info()
subvol_info_dict["mon_addrs"] = mon_addr_lst
ret = 0, json.dumps(subvol_info_dict, indent=4, sort_keys=True), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def set_user_metadata(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
groupname = kwargs['group_name']
keyname = kwargs['key_name']
value = kwargs['value']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.USER_METADATA_SET) as subvolume:
subvolume.set_user_metadata(keyname, value)
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def get_user_metadata(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
groupname = kwargs['group_name']
keyname = kwargs['key_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.USER_METADATA_GET) as subvolume:
value = subvolume.get_user_metadata(keyname)
ret = 0, value, ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def list_user_metadata(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.USER_METADATA_LIST) as subvolume:
subvol_metadata_dict = subvolume.list_user_metadata()
ret = 0, json.dumps(subvol_metadata_dict, indent=4, sort_keys=True), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def remove_user_metadata(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
groupname = kwargs['group_name']
keyname = kwargs['key_name']
force = kwargs['force']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.USER_METADATA_REMOVE) as subvolume:
subvolume.remove_user_metadata(keyname)
except VolumeException as ve:
if not (ve.errno == -errno.ENOENT and force):
ret = self.volume_exception_to_retval(ve)
return ret
def list_subvolumes(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
subvolumes = group.list_subvolumes()
ret = 0, name_to_json(subvolumes), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def subvolume_exists(self, **kwargs):
volname = kwargs['vol_name']
groupname = kwargs['group_name']
ret = 0, "", ""
volume_exists = False
try:
with open_volume(self, volname) as fs_handle:
volume_exists = True
with open_group(fs_handle, self.volspec, groupname) as group:
res = group.has_subvolumes()
if res:
ret = 0, "subvolume exists", ""
else:
ret = 0, "no subvolume exists", ""
except VolumeException as ve:
if volume_exists and ve.errno == -errno.ENOENT:
ret = 0, "no subvolume exists", ""
else:
ret = self.volume_exception_to_retval(ve)
return ret
### subvolume snapshot
def create_subvolume_snapshot(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
snapname = kwargs['snap_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_CREATE) as subvolume:
subvolume.create_snapshot(snapname)
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def remove_subvolume_snapshot(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
snapname = kwargs['snap_name']
groupname = kwargs['group_name']
force = kwargs['force']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_REMOVE) as subvolume:
subvolume.remove_snapshot(snapname, force)
except VolumeException as ve:
# ESTALE serves as an error to state that subvolume is currently stale due to internal removal and,
# we should tickle the purge jobs to purge the same
if ve.errno == -errno.ESTALE:
self.purge_queue.queue_job(volname)
elif not (ve.errno == -errno.ENOENT and force):
ret = self.volume_exception_to_retval(ve)
return ret
def subvolume_snapshot_info(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
snapname = kwargs['snap_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_INFO) as subvolume:
snap_info_dict = subvolume.snapshot_info(snapname)
ret = 0, json.dumps(snap_info_dict, indent=4, sort_keys=True), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def set_subvolume_snapshot_metadata(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
snapname = kwargs['snap_name']
groupname = kwargs['group_name']
keyname = kwargs['key_name']
value = kwargs['value']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_METADATA_SET) as subvolume:
if not snapname.encode('utf-8') in subvolume.list_snapshots():
raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(snapname))
subvolume.set_snapshot_metadata(snapname, keyname, value)
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def get_subvolume_snapshot_metadata(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
snapname = kwargs['snap_name']
groupname = kwargs['group_name']
keyname = kwargs['key_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_METADATA_GET) as subvolume:
if not snapname.encode('utf-8') in subvolume.list_snapshots():
raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(snapname))
value = subvolume.get_snapshot_metadata(snapname, keyname)
ret = 0, value, ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def list_subvolume_snapshot_metadata(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
snapname = kwargs['snap_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_METADATA_LIST) as subvolume:
if not snapname.encode('utf-8') in subvolume.list_snapshots():
raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(snapname))
snap_metadata_dict = subvolume.list_snapshot_metadata(snapname)
ret = 0, json.dumps(snap_metadata_dict, indent=4, sort_keys=True), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def remove_subvolume_snapshot_metadata(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
snapname = kwargs['snap_name']
groupname = kwargs['group_name']
keyname = kwargs['key_name']
force = kwargs['force']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_METADATA_REMOVE) as subvolume:
if not snapname.encode('utf-8') in subvolume.list_snapshots():
raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(snapname))
subvolume.remove_snapshot_metadata(snapname, keyname)
except VolumeException as ve:
if not (ve.errno == -errno.ENOENT and force):
ret = self.volume_exception_to_retval(ve)
return ret
def list_subvolume_snapshots(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_LIST) as subvolume:
snapshots = subvolume.list_snapshots()
ret = 0, name_to_json(snapshots), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def protect_subvolume_snapshot(self, **kwargs):
ret = 0, "", "Deprecation warning: 'snapshot protect' call is deprecated and will be removed in a future release"
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_PROTECT) as subvolume:
log.warning("snapshot protect call is deprecated and will be removed in a future release")
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def unprotect_subvolume_snapshot(self, **kwargs):
ret = 0, "", "Deprecation warning: 'snapshot unprotect' call is deprecated and will be removed in a future release"
volname = kwargs['vol_name']
subvolname = kwargs['sub_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_UNPROTECT) as subvolume:
log.warning("snapshot unprotect call is deprecated and will be removed in a future release")
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def _prepare_clone_subvolume(self, fs_handle, volname, s_subvolume, s_snapname, t_group, t_subvolname, **kwargs):
t_pool = kwargs['pool_layout']
s_subvolname = kwargs['sub_name']
s_groupname = kwargs['group_name']
t_groupname = kwargs['target_group_name']
create_clone(self.mgr, fs_handle, self.volspec, t_group, t_subvolname, t_pool, volname, s_subvolume, s_snapname)
with open_subvol(self.mgr, fs_handle, self.volspec, t_group, t_subvolname, SubvolumeOpType.CLONE_INTERNAL) as t_subvolume:
try:
if t_groupname == s_groupname and t_subvolname == s_subvolname:
t_subvolume.attach_snapshot(s_snapname, t_subvolume)
else:
s_subvolume.attach_snapshot(s_snapname, t_subvolume)
self.cloner.queue_job(volname)
except VolumeException as ve:
try:
t_subvolume.remove()
self.purge_queue.queue_job(volname)
except Exception as e:
log.warning("failed to cleanup clone subvolume '{0}' ({1})".format(t_subvolname, e))
raise ve
def _clone_subvolume_snapshot(self, fs_handle, volname, s_group, s_subvolume, **kwargs):
s_snapname = kwargs['snap_name']
target_subvolname = kwargs['target_sub_name']
target_groupname = kwargs['target_group_name']
s_groupname = kwargs['group_name']
if not s_snapname.encode('utf-8') in s_subvolume.list_snapshots():
raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(s_snapname))
with open_group_unique(fs_handle, self.volspec, target_groupname, s_group, s_groupname) as target_group:
try:
with open_subvol(self.mgr, fs_handle, self.volspec, target_group, target_subvolname, SubvolumeOpType.CLONE_CREATE):
raise VolumeException(-errno.EEXIST, "subvolume '{0}' exists".format(target_subvolname))
except VolumeException as ve:
if ve.errno == -errno.ENOENT:
self._prepare_clone_subvolume(fs_handle, volname, s_subvolume, s_snapname,
target_group, target_subvolname, **kwargs)
else:
raise
def clone_subvolume_snapshot(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
s_subvolname = kwargs['sub_name']
s_groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, s_groupname) as s_group:
with open_subvol(self.mgr, fs_handle, self.volspec, s_group, s_subvolname, SubvolumeOpType.CLONE_SOURCE) as s_subvolume:
self._clone_subvolume_snapshot(fs_handle, volname, s_group, s_subvolume, **kwargs)
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def clone_status(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
clonename = kwargs['clone_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
with open_subvol(self.mgr, fs_handle, self.volspec, group, clonename, SubvolumeOpType.CLONE_STATUS) as subvolume:
ret = 0, json.dumps({'status' : subvolume.status}, indent=2), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def clone_cancel(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
clonename = kwargs['clone_name']
groupname = kwargs['group_name']
try:
self.cloner.cancel_job(volname, (clonename, groupname))
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
### group operations
def create_subvolume_group(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
groupname = kwargs['group_name']
size = kwargs['size']
pool = kwargs['pool_layout']
uid = kwargs['uid']
gid = kwargs['gid']
mode = kwargs['mode']
try:
with open_volume(self, volname) as fs_handle:
try:
with open_group(fs_handle, self.volspec, groupname) as group:
# idempotent creation -- valid.
attrs = {
'uid': uid,
'gid': gid,
'mode': octal_str_to_decimal_int(mode),
'data_pool': pool,
'quota': size
}
set_group_attrs(fs_handle, group.path, attrs)
except VolumeException as ve:
if ve.errno == -errno.ENOENT:
oct_mode = octal_str_to_decimal_int(mode)
create_group(fs_handle, self.volspec, groupname, size, pool, oct_mode, uid, gid)
else:
raise
except VolumeException as ve:
# volume does not exist or subvolume group creation failed
ret = self.volume_exception_to_retval(ve)
return ret
def remove_subvolume_group(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
groupname = kwargs['group_name']
force = kwargs['force']
try:
with open_volume(self, volname) as fs_handle:
remove_group(fs_handle, self.volspec, groupname)
except VolumeException as ve:
if not (ve.errno == -errno.ENOENT and force):
ret = self.volume_exception_to_retval(ve)
return ret
def subvolumegroup_info(self, **kwargs):
ret = None
volname = kwargs['vol_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
mon_addr_lst = []
mon_map_mons = self.mgr.get('mon_map')['mons']
for mon in mon_map_mons:
ip_port = mon['addr'].split("/")[0]
mon_addr_lst.append(ip_port)
group_info_dict = group.info()
group_info_dict["mon_addrs"] = mon_addr_lst
ret = 0, json.dumps(group_info_dict, indent=4, sort_keys=True), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def resize_subvolume_group(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
groupname = kwargs['group_name']
newsize = kwargs['new_size']
noshrink = kwargs['no_shrink']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
nsize, usedbytes = group.resize(newsize, noshrink)
ret = 0, json.dumps(
[{'bytes_used': usedbytes},{'bytes_quota': nsize},
{'bytes_pcent': "undefined" if nsize == 0 else '{0:.2f}'.format((float(usedbytes) / nsize) * 100.0)}],
indent=4, sort_keys=True), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def getpath_subvolume_group(self, **kwargs):
volname = kwargs['vol_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
return 0, group.path.decode('utf-8'), ""
except VolumeException as ve:
return self.volume_exception_to_retval(ve)
def list_subvolume_groups(self, **kwargs):
volname = kwargs['vol_name']
ret = 0, '[]', ""
volume_exists = False
try:
with open_volume(self, volname) as fs_handle:
volume_exists = True
groups = listdir(fs_handle, self.volspec.base_dir, filter_entries=[dir.encode('utf-8') for dir in self.volspec.INTERNAL_DIRS])
ret = 0, name_to_json(groups), ""
except VolumeException as ve:
if not ve.errno == -errno.ENOENT or not volume_exists:
ret = self.volume_exception_to_retval(ve)
return ret
def pin_subvolume_group(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
groupname = kwargs['group_name']
pin_type = kwargs['pin_type']
pin_setting = kwargs['pin_setting']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
group.pin(pin_type, pin_setting)
ret = 0, json.dumps({}), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def subvolume_group_exists(self, **kwargs):
volname = kwargs['vol_name']
ret = 0, "", ""
volume_exists = False
try:
with open_volume(self, volname) as fs_handle:
volume_exists = True
res = has_subdir(fs_handle, self.volspec.base_dir, filter_entries=[
dir.encode('utf-8') for dir in self.volspec.INTERNAL_DIRS])
if res:
ret = 0, "subvolumegroup exists", ""
else:
ret = 0, "no subvolumegroup exists", ""
except VolumeException as ve:
if volume_exists and ve.errno == -errno.ENOENT:
ret = 0, "no subvolumegroup exists", ""
else:
ret = self.volume_exception_to_retval(ve)
return ret
### group snapshot
def create_subvolume_group_snapshot(self, **kwargs):
ret = -errno.ENOSYS, "", "subvolume group snapshots are not supported"
volname = kwargs['vol_name']
groupname = kwargs['group_name']
# snapname = kwargs['snap_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
# as subvolumes are marked with the vxattr ceph.dir.subvolume deny snapshots
# at the subvolume group (see: https://tracker.ceph.com/issues/46074)
# group.create_snapshot(snapname)
pass
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
def remove_subvolume_group_snapshot(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
groupname = kwargs['group_name']
snapname = kwargs['snap_name']
force = kwargs['force']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
group.remove_snapshot(snapname)
except VolumeException as ve:
if not (ve.errno == -errno.ENOENT and force):
ret = self.volume_exception_to_retval(ve)
return ret
def list_subvolume_group_snapshots(self, **kwargs):
ret = 0, "", ""
volname = kwargs['vol_name']
groupname = kwargs['group_name']
try:
with open_volume(self, volname) as fs_handle:
with open_group(fs_handle, self.volspec, groupname) as group:
snapshots = group.list_snapshots()
ret = 0, name_to_json(snapshots), ""
except VolumeException as ve:
ret = self.volume_exception_to_retval(ve)
return ret
| 45,005 | 43.871386 | 142 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/__init__.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/access.py
|
import errno
import json
from typing import List
def prepare_updated_caps_list(existing_caps, mds_cap_str, osd_cap_str, authorize=True):
caps_list = [] # type: List[str]
for k, v in existing_caps['caps'].items():
if k == 'mds' or k == 'osd':
continue
elif k == 'mon':
if not authorize and v == 'allow r':
continue
caps_list.extend((k, v))
if mds_cap_str:
caps_list.extend(('mds', mds_cap_str))
if osd_cap_str:
caps_list.extend(('osd', osd_cap_str))
if authorize and 'mon' not in caps_list:
caps_list.extend(('mon', 'allow r'))
return caps_list
def allow_access(mgr, client_entity, want_mds_cap, want_osd_cap,
unwanted_mds_cap, unwanted_osd_cap, existing_caps):
if existing_caps is None:
ret, out, err = mgr.mon_command({
"prefix": "auth get-or-create",
"entity": client_entity,
"caps": ['mds', want_mds_cap, 'osd', want_osd_cap, 'mon', 'allow r'],
"format": "json"})
else:
cap = existing_caps[0]
def cap_update(
orig_mds_caps, orig_osd_caps, want_mds_cap,
want_osd_cap, unwanted_mds_cap, unwanted_osd_cap):
if not orig_mds_caps:
return want_mds_cap, want_osd_cap
mds_cap_tokens = [x.strip() for x in orig_mds_caps.split(",")]
osd_cap_tokens = [x.strip() for x in orig_osd_caps.split(",")]
if want_mds_cap in mds_cap_tokens:
return orig_mds_caps, orig_osd_caps
if unwanted_mds_cap in mds_cap_tokens:
mds_cap_tokens.remove(unwanted_mds_cap)
osd_cap_tokens.remove(unwanted_osd_cap)
mds_cap_tokens.append(want_mds_cap)
osd_cap_tokens.append(want_osd_cap)
return ",".join(mds_cap_tokens), ",".join(osd_cap_tokens)
orig_mds_caps = cap['caps'].get('mds', "")
orig_osd_caps = cap['caps'].get('osd', "")
mds_cap_str, osd_cap_str = cap_update(
orig_mds_caps, orig_osd_caps, want_mds_cap, want_osd_cap,
unwanted_mds_cap, unwanted_osd_cap)
caps_list = prepare_updated_caps_list(cap, mds_cap_str, osd_cap_str)
mgr.mon_command(
{
"prefix": "auth caps",
'entity': client_entity,
'caps': caps_list
})
ret, out, err = mgr.mon_command(
{
'prefix': 'auth get',
'entity': client_entity,
'format': 'json'
})
# Result expected like this:
# [
# {
# "entity": "client.foobar",
# "key": "AQBY0\/pViX\/wBBAAUpPs9swy7rey1qPhzmDVGQ==",
# "caps": {
# "mds": "allow *",
# "mon": "allow *"
# }
# }
# ]
caps = json.loads(out)
assert len(caps) == 1
assert caps[0]['entity'] == client_entity
return caps[0]['key']
def deny_access(mgr, client_entity, want_mds_caps, want_osd_caps):
ret, out, err = mgr.mon_command({
"prefix": "auth get",
"entity": client_entity,
"format": "json",
})
if ret == -errno.ENOENT:
# Already gone, great.
return
def cap_remove(orig_mds_caps, orig_osd_caps, want_mds_caps, want_osd_caps):
mds_cap_tokens = [x.strip() for x in orig_mds_caps.split(",")]
osd_cap_tokens = [x.strip() for x in orig_osd_caps.split(",")]
for want_mds_cap, want_osd_cap in zip(want_mds_caps, want_osd_caps):
if want_mds_cap in mds_cap_tokens:
mds_cap_tokens.remove(want_mds_cap)
osd_cap_tokens.remove(want_osd_cap)
break
return ",".join(mds_cap_tokens), ",".join(osd_cap_tokens)
cap = json.loads(out)[0]
orig_mds_caps = cap['caps'].get('mds', "")
orig_osd_caps = cap['caps'].get('osd', "")
mds_cap_str, osd_cap_str = cap_remove(orig_mds_caps, orig_osd_caps,
want_mds_caps, want_osd_caps)
caps_list = prepare_updated_caps_list(cap, mds_cap_str, osd_cap_str, authorize=False)
if not caps_list:
mgr.mon_command(
{
'prefix': 'auth rm',
'entity': client_entity
})
else:
mgr.mon_command(
{
"prefix": "auth caps",
'entity': client_entity,
'caps': caps_list
})
| 4,551 | 31.056338 | 89 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/clone_index.py
|
import os
import uuid
import stat
import logging
from contextlib import contextmanager
import cephfs
from .index import Index
from ..exception import IndexException, VolumeException
from ..fs_util import list_one_entry_at_a_time
log = logging.getLogger(__name__)
class CloneIndex(Index):
SUB_GROUP_NAME = "clone"
PATH_MAX = 4096
@property
def path(self):
return os.path.join(super(CloneIndex, self).path, CloneIndex.SUB_GROUP_NAME.encode('utf-8'))
def _track(self, sink_path):
tracking_id = str(uuid.uuid4())
source_path = os.path.join(self.path, tracking_id.encode('utf-8'))
log.info("tracking-id {0} for path {1}".format(tracking_id, sink_path))
self.fs.symlink(sink_path, source_path)
return tracking_id
def track(self, sink_path):
try:
return self._track(sink_path)
except (VolumeException, cephfs.Error) as e:
if isinstance(e, cephfs.Error):
e = IndexException(-e.args[0], e.args[1])
elif isinstance(e, VolumeException):
e = IndexException(e.errno, e.error_str)
raise e
def untrack(self, tracking_id):
log.info("untracking {0}".format(tracking_id))
source_path = os.path.join(self.path, tracking_id.encode('utf-8'))
try:
self.fs.unlink(source_path)
except cephfs.Error as e:
raise IndexException(-e.args[0], e.args[1])
def get_oldest_clone_entry(self, exclude=[]):
min_ctime_entry = None
exclude_tracking_ids = [v[0] for v in exclude]
log.debug("excluded tracking ids: {0}".format(exclude_tracking_ids))
for entry in list_one_entry_at_a_time(self.fs, self.path):
dname = entry.d_name
dpath = os.path.join(self.path, dname)
st = self.fs.lstat(dpath)
if dname not in exclude_tracking_ids and stat.S_ISLNK(st.st_mode):
if min_ctime_entry is None or st.st_ctime < min_ctime_entry[1].st_ctime:
min_ctime_entry = (dname, st)
if min_ctime_entry:
try:
linklen = min_ctime_entry[1].st_size
sink_path = self.fs.readlink(os.path.join(self.path, min_ctime_entry[0]), CloneIndex.PATH_MAX)
return (min_ctime_entry[0], sink_path[:linklen])
except cephfs.Error as e:
raise IndexException(-e.args[0], e.args[1])
return None
def find_clone_entry_index(self, sink_path):
try:
for entry in list_one_entry_at_a_time(self.fs, self.path):
dname = entry.d_name
dpath = os.path.join(self.path, dname)
st = self.fs.lstat(dpath)
if stat.S_ISLNK(st.st_mode):
target_path = self.fs.readlink(dpath, CloneIndex.PATH_MAX)
if sink_path == target_path[:st.st_size]:
return dname
return None
except cephfs.Error as e:
raise IndexException(-e.args[0], e.args[1])
def create_clone_index(fs, vol_spec):
clone_index = CloneIndex(fs, vol_spec)
try:
fs.mkdirs(clone_index.path, 0o700)
except cephfs.Error as e:
raise IndexException(-e.args[0], e.args[1])
@contextmanager
def open_clone_index(fs, vol_spec):
clone_index = CloneIndex(fs, vol_spec)
try:
fs.stat(clone_index.path)
except cephfs.Error as e:
raise IndexException(-e.args[0], e.args[1])
yield clone_index
| 3,541 | 34.069307 | 110 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/group.py
|
import os
import errno
import logging
from contextlib import contextmanager
import cephfs
from .snapshot_util import mksnap, rmsnap
from .pin_util import pin
from .template import GroupTemplate
from ..fs_util import listdir, listsnaps, get_ancestor_xattr, create_base_dir, has_subdir
from ..exception import VolumeException
log = logging.getLogger(__name__)
class Group(GroupTemplate):
# Reserved subvolume group name which we use in paths for subvolumes
# that are not assigned to a group (i.e. created with group=None)
NO_GROUP_NAME = "_nogroup"
def __init__(self, fs, vol_spec, groupname):
if groupname == Group.NO_GROUP_NAME:
raise VolumeException(-errno.EPERM, "Operation not permitted for group '{0}' as it is an internal group.".format(groupname))
if groupname in vol_spec.INTERNAL_DIRS:
raise VolumeException(-errno.EINVAL, "'{0}' is an internal directory and not a valid group name.".format(groupname))
self.fs = fs
self.user_id = None
self.group_id = None
self.vol_spec = vol_spec
self.groupname = groupname if groupname else Group.NO_GROUP_NAME
@property
def path(self):
return os.path.join(self.vol_spec.base_dir.encode('utf-8'), self.groupname.encode('utf-8'))
@property
def group_name(self):
return self.groupname
@property
def uid(self):
return self.user_id
@uid.setter
def uid(self, val):
self.user_id = val
@property
def gid(self):
return self.group_id
@gid.setter
def gid(self, val):
self.group_id = val
def is_default_group(self):
return self.groupname == Group.NO_GROUP_NAME
def list_subvolumes(self):
try:
return listdir(self.fs, self.path)
except VolumeException as ve:
# listing a default group when it's not yet created
if ve.errno == -errno.ENOENT and self.is_default_group():
return []
raise
def has_subvolumes(self):
try:
return has_subdir(self.fs, self.path)
except VolumeException as ve:
# listing a default group when it's not yet created
if ve.errno == -errno.ENOENT and self.is_default_group():
return False
raise
def pin(self, pin_type, pin_setting):
return pin(self.fs, self.path, pin_type, pin_setting)
def create_snapshot(self, snapname):
snappath = os.path.join(self.path,
self.vol_spec.snapshot_dir_prefix.encode('utf-8'),
snapname.encode('utf-8'))
mksnap(self.fs, snappath)
def remove_snapshot(self, snapname):
snappath = os.path.join(self.path,
self.vol_spec.snapshot_dir_prefix.encode('utf-8'),
snapname.encode('utf-8'))
rmsnap(self.fs, snappath)
def list_snapshots(self):
try:
dirpath = os.path.join(self.path,
self.vol_spec.snapshot_dir_prefix.encode('utf-8'))
return listsnaps(self.fs, self.vol_spec, dirpath, filter_inherited_snaps=True)
except VolumeException as ve:
if ve.errno == -errno.ENOENT:
return []
raise
def info(self):
st = self.fs.statx(self.path, cephfs.CEPH_STATX_BTIME | cephfs.CEPH_STATX_SIZE
| cephfs.CEPH_STATX_UID | cephfs.CEPH_STATX_GID | cephfs.CEPH_STATX_MODE
| cephfs.CEPH_STATX_ATIME | cephfs.CEPH_STATX_MTIME | cephfs.CEPH_STATX_CTIME,
cephfs.AT_SYMLINK_NOFOLLOW)
usedbytes = st["size"]
try:
nsize = int(self.fs.getxattr(self.path, 'ceph.quota.max_bytes').decode('utf-8'))
except cephfs.NoData:
nsize = 0
try:
data_pool = self.fs.getxattr(self.path, 'ceph.dir.layout.pool').decode('utf-8')
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
return {'uid': int(st["uid"]),
'gid': int(st["gid"]),
'atime': str(st["atime"]),
'mtime': str(st["mtime"]),
'ctime': str(st["ctime"]),
'mode': int(st["mode"]),
'data_pool': data_pool,
'created_at': str(st["btime"]),
'bytes_quota': "infinite" if nsize == 0 else nsize,
'bytes_used': int(usedbytes),
'bytes_pcent': "undefined" if nsize == 0 else '{0:.2f}'.format((float(usedbytes) / nsize) * 100.0)}
def resize(self, newsize, noshrink):
try:
newsize = int(newsize)
if newsize <= 0:
raise VolumeException(-errno.EINVAL, "Invalid subvolume group size")
except ValueError:
newsize = newsize.lower()
if not (newsize == "inf" or newsize == "infinite"):
raise (VolumeException(-errno.EINVAL, "invalid size option '{0}'".format(newsize)))
newsize = 0
noshrink = False
try:
maxbytes = int(self.fs.getxattr(self.path, 'ceph.quota.max_bytes').decode('utf-8'))
except cephfs.NoData:
maxbytes = 0
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
group_stat = self.fs.stat(self.path)
if newsize > 0 and newsize < group_stat.st_size:
if noshrink:
raise VolumeException(-errno.EINVAL, "Can't resize the subvolume group. The new size"
" '{0}' would be lesser than the current used size '{1}'"
.format(newsize, group_stat.st_size))
if not newsize == maxbytes:
try:
self.fs.setxattr(self.path, 'ceph.quota.max_bytes', str(newsize).encode('utf-8'), 0)
except cephfs.Error as e:
raise (VolumeException(-e.args[0],
"Cannot set new size for the subvolume group. '{0}'".format(e.args[1])))
return newsize, group_stat.st_size
def set_group_attrs(fs, path, attrs):
# set subvolume group attrs
# set size
quota = attrs.get("quota")
if quota is not None:
try:
fs.setxattr(path, 'ceph.quota.max_bytes', str(quota).encode('utf-8'), 0)
except cephfs.InvalidValue:
raise VolumeException(-errno.EINVAL, "invalid size specified: '{0}'".format(quota))
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
# set pool layout
pool = attrs.get("data_pool")
if not pool:
pool = get_ancestor_xattr(fs, path, "ceph.dir.layout.pool")
try:
fs.setxattr(path, 'ceph.dir.layout.pool', pool.encode('utf-8'), 0)
except cephfs.InvalidValue:
raise VolumeException(-errno.EINVAL,
"Invalid pool layout '{0}'. It must be a valid data pool".format(pool))
# set uid/gid
uid = attrs.get("uid")
if uid is None:
uid = 0
else:
try:
uid = int(uid)
if uid < 0:
raise ValueError
except ValueError:
raise VolumeException(-errno.EINVAL, "invalid UID")
gid = attrs.get("gid")
if gid is None:
gid = 0
else:
try:
gid = int(gid)
if gid < 0:
raise ValueError
except ValueError:
raise VolumeException(-errno.EINVAL, "invalid GID")
fs.chown(path, uid, gid)
# set mode
mode = attrs.get("mode", None)
if mode is not None:
fs.lchmod(path, mode)
def create_group(fs, vol_spec, groupname, size, pool, mode, uid, gid):
"""
create a subvolume group.
:param fs: ceph filesystem handle
:param vol_spec: volume specification
:param groupname: subvolume group name
:param size: In bytes, or None for no size limit
:param pool: the RADOS pool where the data objects of the subvolumes will be stored
:param mode: the user permissions
:param uid: the user identifier
:param gid: the group identifier
:return: None
"""
group = Group(fs, vol_spec, groupname)
path = group.path
vol_spec_base_dir = group.vol_spec.base_dir.encode('utf-8')
# create vol_spec base directory with default mode(0o755) if it doesn't exist
create_base_dir(fs, vol_spec_base_dir, vol_spec.DEFAULT_MODE)
fs.mkdir(path, mode)
try:
attrs = {
'uid': uid,
'gid': gid,
'data_pool': pool,
'quota': size
}
set_group_attrs(fs, path, attrs)
except (cephfs.Error, VolumeException) as e:
try:
# cleanup group path on best effort basis
log.debug("cleaning up subvolume group path: {0}".format(path))
fs.rmdir(path)
except cephfs.Error as ce:
log.debug("failed to clean up subvolume group {0} with path: {1} ({2})".format(groupname, path, ce))
if isinstance(e, cephfs.Error):
e = VolumeException(-e.args[0], e.args[1])
raise e
def remove_group(fs, vol_spec, groupname):
"""
remove a subvolume group.
:param fs: ceph filesystem handle
:param vol_spec: volume specification
:param groupname: subvolume group name
:return: None
"""
group = Group(fs, vol_spec, groupname)
try:
fs.rmdir(group.path)
except cephfs.Error as e:
if e.args[0] == errno.ENOENT:
raise VolumeException(-errno.ENOENT, "subvolume group '{0}' does not exist".format(groupname))
raise VolumeException(-e.args[0], e.args[1])
@contextmanager
def open_group(fs, vol_spec, groupname):
"""
open a subvolume group. This API is to be used as a context manager.
:param fs: ceph filesystem handle
:param vol_spec: volume specification
:param groupname: subvolume group name
:return: yields a group object (subclass of GroupTemplate)
"""
group = Group(fs, vol_spec, groupname)
try:
st = fs.stat(group.path)
group.uid = int(st.st_uid)
group.gid = int(st.st_gid)
except cephfs.Error as e:
if e.args[0] == errno.ENOENT:
if not group.is_default_group():
raise VolumeException(-errno.ENOENT, "subvolume group '{0}' does not exist".format(groupname))
else:
raise VolumeException(-e.args[0], e.args[1])
yield group
@contextmanager
def open_group_unique(fs, vol_spec, groupname, c_group, c_groupname):
if groupname == c_groupname:
yield c_group
else:
with open_group(fs, vol_spec, groupname) as group:
yield group
| 10,815 | 34.346405 | 136 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/index.py
|
import errno
import os
from ..exception import VolumeException
from .template import GroupTemplate
class Index(GroupTemplate):
GROUP_NAME = "_index"
def __init__(self, fs, vol_spec):
self.fs = fs
self.vol_spec = vol_spec
self.groupname = Index.GROUP_NAME
@property
def path(self):
return os.path.join(self.vol_spec.base_dir.encode('utf-8'), self.groupname.encode('utf-8'))
def track(self, *args):
raise VolumeException(-errno.EINVAL, "operation not supported.")
def untrack(self, tracking_id):
raise VolumeException(-errno.EINVAL, "operation not supported.")
| 637 | 25.583333 | 99 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/lock.py
|
from contextlib import contextmanager
import logging
from threading import Lock
from typing import Dict
log = logging.getLogger(__name__)
# singleton design pattern taken from http://www.aleax.it/5ep.html
class GlobalLock(object):
"""
Global lock to serialize operations in mgr/volumes. This lock
is currently held when accessing (opening) a volume to perform
group/subvolume operations. Since this is a big lock, it's rather
inefficient -- but right now it's ok since mgr/volumes does not
expect concurrent operations via its APIs.
As and when features get added (such as clone, where mgr/volumes
would maintain subvolume states in the filesystem), there might
be a need to allow concurrent operations. In that case it would
be nice to implement an efficient path based locking mechanism.
See: https://people.eecs.berkeley.edu/~kubitron/courses/cs262a-F14/projects/reports/project6_report.pdf
"""
_shared_state = {
'lock' : Lock(),
'init' : False
} # type: Dict
def __init__(self):
with self._shared_state['lock']:
if not self._shared_state['init']:
self._shared_state['init'] = True
# share this state among all instances
self.__dict__ = self._shared_state
@contextmanager
def lock_op(self):
log.debug("entering global lock")
with self._shared_state['lock']:
log.debug("acquired global lock")
yield
log.debug("exited global lock")
| 1,524 | 33.659091 | 107 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/pin_util.py
|
import os
import errno
import cephfs
from ..exception import VolumeException
from distutils.util import strtobool
_pin_value = {
"export": lambda x: int(x),
"distributed": lambda x: int(strtobool(x)),
"random": lambda x: float(x),
}
_pin_xattr = {
"export": "ceph.dir.pin",
"distributed": "ceph.dir.pin.distributed",
"random": "ceph.dir.pin.random",
}
def pin(fs, path, pin_type, pin_setting):
"""
Set a pin on a directory.
"""
assert pin_type in _pin_xattr
try:
pin_setting = _pin_value[pin_type](pin_setting)
except ValueError as e:
raise VolumeException(-errno.EINVAL, f"pin value wrong type: {pin_setting}")
try:
fs.setxattr(path, _pin_xattr[pin_type], str(pin_setting).encode('utf-8'), 0)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
| 859 | 23.571429 | 84 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/rankevicter.py
|
import errno
import json
import logging
import threading
import time
from .volume import get_mds_map
from ..exception import ClusterTimeout, ClusterError
log = logging.getLogger(__name__)
class RankEvicter(threading.Thread):
"""
Thread for evicting client(s) from a particular MDS daemon instance.
This is more complex than simply sending a command, because we have to
handle cases where MDS daemons might not be fully up yet, and/or might
be transiently unresponsive to commands.
"""
class GidGone(Exception):
pass
POLL_PERIOD = 5
def __init__(self, mgr, fs, client_spec, volname, rank, gid, mds_map, ready_timeout):
"""
:param client_spec: list of strings, used as filter arguments to "session evict"
pass ["id=123"] to evict a single client with session id 123.
"""
self.volname = volname
self.rank = rank
self.gid = gid
self._mds_map = mds_map
self._client_spec = client_spec
self._fs = fs
self._ready_timeout = ready_timeout
self._ready_waited = 0
self.mgr = mgr
self.success = False
self.exception = None
super(RankEvicter, self).__init__()
def _ready_to_evict(self):
if self._mds_map['up'].get("mds_{0}".format(self.rank), None) != self.gid:
log.info("Evicting {0} from {1}/{2}: rank no longer associated with gid, done.".format(
self._client_spec, self.rank, self.gid
))
raise RankEvicter.GidGone()
info = self._mds_map['info']["gid_{0}".format(self.gid)]
log.debug("_ready_to_evict: state={0}".format(info['state']))
return info['state'] in ["up:active", "up:clientreplay"]
def _wait_for_ready(self):
"""
Wait for that MDS rank to reach an active or clientreplay state, and
not be laggy.
"""
while not self._ready_to_evict():
if self._ready_waited > self._ready_timeout:
raise ClusterTimeout()
time.sleep(self.POLL_PERIOD)
self._ready_waited += self.POLL_PERIOD
self._mds_map = get_mds_map(self.mgr, self.volname)
def _evict(self):
"""
Run the eviction procedure. Return true on success, false on errors.
"""
# Wait til the MDS is believed by the mon to be available for commands
try:
self._wait_for_ready()
except self.GidGone:
return True
# Then send it an evict
ret = -errno.ETIMEDOUT
while ret == -errno.ETIMEDOUT:
log.debug("mds_command: {0}, {1}".format(
"%s" % self.gid, ["session", "evict"] + self._client_spec
))
ret, outb, outs = self._fs.mds_command(
"%s" % self.gid,
json.dumps({
"prefix": "session evict",
"filters": self._client_spec
}), "")
log.debug("mds_command: complete {0} {1}".format(ret, outs))
# If we get a clean response, great, it's gone from that rank.
if ret == 0:
return True
elif ret == -errno.ETIMEDOUT:
# Oh no, the MDS went laggy (that's how libcephfs knows to emit this error)
self._mds_map = get_mds_map(self.mgr, self.volname)
try:
self._wait_for_ready()
except self.GidGone:
return True
else:
raise ClusterError("Sending evict to mds.{0}".format(self.gid), ret, outs)
def run(self):
try:
self._evict()
except Exception as e:
self.success = False
self.exception = e
else:
self.success = True
| 3,879 | 32.73913 | 99 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/resolver.py
|
import os
from .group import Group
def splitall(path):
if path == "/":
return ["/"]
s = os.path.split(path)
return splitall(s[0]) + [s[1]]
def resolve(vol_spec, path):
parts = splitall(path)
if len(parts) != 4 or os.path.join(parts[0], parts[1]) != vol_spec.subvolume_prefix:
return None
groupname = None if parts[2] == Group.NO_GROUP_NAME else parts[2]
subvolname = parts[3]
return (groupname, subvolname)
def resolve_trash(vol_spec, path):
parts = splitall(path)
if len(parts) != 6 or os.path.join(parts[0], parts[1]) != vol_spec.subvolume_prefix or \
parts[4] != '.trash':
return None
groupname = None if parts[2] == Group.NO_GROUP_NAME else parts[2]
subvolname = parts[3]
return (groupname, subvolname)
| 797 | 25.6 | 92 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/snapshot_util.py
|
import os
import errno
import cephfs
from ..exception import VolumeException
def mksnap(fs, snappath):
"""
Create a snapshot, or do nothing if it already exists.
"""
try:
# snap create does not accept mode -- use default
fs.mkdir(snappath, 0o755)
except cephfs.ObjectExists:
return
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
def rmsnap(fs, snappath):
"""
Remove a snapshot
"""
try:
fs.stat(snappath)
fs.rmdir(snappath)
except cephfs.ObjectNotFound:
raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(os.path.basename(snappath)))
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
| 779 | 22.636364 | 112 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/subvolume.py
|
from contextlib import contextmanager
from .template import SubvolumeOpType
from .versions import loaded_subvolumes
def create_subvol(mgr, fs, vol_spec, group, subvolname, size, isolate_nspace, pool, mode, uid, gid):
"""
create a subvolume (create a subvolume with the max known version).
:param fs: ceph filesystem handle
:param vol_spec: volume specification
:param group: group object for the subvolume
:param size: In bytes, or None for no size limit
:param isolate_nspace: If true, use separate RADOS namespace for this subvolume
:param pool: the RADOS pool where the data objects of the subvolumes will be stored
:param mode: the user permissions
:param uid: the user identifier
:param gid: the group identifier
:return: None
"""
subvolume = loaded_subvolumes.get_subvolume_object_max(mgr, fs, vol_spec, group, subvolname)
subvolume.create(size, isolate_nspace, pool, mode, uid, gid)
def create_clone(mgr, fs, vol_spec, group, subvolname, pool, source_volume, source_subvolume, snapname):
"""
create a cloned subvolume.
:param fs: ceph filesystem handle
:param vol_spec: volume specification
:param group: group object for the clone
:param subvolname: clone subvolume nam
:param pool: the RADOS pool where the data objects of the cloned subvolume will be stored
:param source_volume: source subvolumes volume name
:param source_subvolume: source (parent) subvolume object
:param snapname: source subvolume snapshot
:return None
"""
subvolume = loaded_subvolumes.get_subvolume_object_max(mgr, fs, vol_spec, group, subvolname)
subvolume.create_clone(pool, source_volume, source_subvolume, snapname)
def remove_subvol(mgr, fs, vol_spec, group, subvolname, force=False, retainsnaps=False):
"""
remove a subvolume.
:param fs: ceph filesystem handle
:param vol_spec: volume specification
:param group: group object for the subvolume
:param subvolname: subvolume name
:param force: force remove subvolumes
:return: None
"""
op_type = SubvolumeOpType.REMOVE if not force else SubvolumeOpType.REMOVE_FORCE
with open_subvol(mgr, fs, vol_spec, group, subvolname, op_type) as subvolume:
subvolume.remove(retainsnaps)
@contextmanager
def open_subvol(mgr, fs, vol_spec, group, subvolname, op_type):
"""
open a subvolume. This API is to be used as a context manager.
:param fs: ceph filesystem handle
:param vol_spec: volume specification
:param group: group object for the subvolume
:param subvolname: subvolume name
:param op_type: operation type for which subvolume is being opened
:return: yields a subvolume object (subclass of SubvolumeTemplate)
"""
subvolume = loaded_subvolumes.get_subvolume_object(mgr, fs, vol_spec, group, subvolname)
subvolume.open(op_type)
yield subvolume
| 2,905 | 37.746667 | 104 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/template.py
|
import errno
from enum import Enum, unique
from ..exception import VolumeException
class GroupTemplate(object):
def list_subvolumes(self):
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def create_snapshot(self, snapname):
"""
create a subvolume group snapshot.
:param: group snapshot name
:return: None
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def remove_snapshot(self, snapname):
"""
remove a subvolume group snapshot.
:param: group snapshot name
:return: None
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def list_snapshots(self):
"""
list all subvolume group snapshots.
:param: None
:return: None
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
@unique
class SubvolumeOpType(Enum):
CREATE = 'create'
REMOVE = 'rm'
REMOVE_FORCE = 'rm-force'
PIN = 'pin'
LIST = 'ls'
GETPATH = 'getpath'
INFO = 'info'
RESIZE = 'resize'
SNAP_CREATE = 'snap-create'
SNAP_REMOVE = 'snap-rm'
SNAP_LIST = 'snap-ls'
SNAP_INFO = 'snap-info'
SNAP_PROTECT = 'snap-protect'
SNAP_UNPROTECT = 'snap-unprotect'
CLONE_SOURCE = 'clone-source'
CLONE_CREATE = 'clone-create'
CLONE_STATUS = 'clone-status'
CLONE_CANCEL = 'clone-cancel'
CLONE_INTERNAL = 'clone_internal'
ALLOW_ACCESS = 'allow-access'
DENY_ACCESS = 'deny-access'
AUTH_LIST = 'auth-list'
EVICT = 'evict'
USER_METADATA_SET = 'user-metadata-set'
USER_METADATA_GET = 'user-metadata-get'
USER_METADATA_LIST = 'user-metadata-ls'
USER_METADATA_REMOVE = 'user-metadata-rm'
SNAP_METADATA_SET = 'snap-metadata-set'
SNAP_METADATA_GET = 'snap-metadata-get'
SNAP_METADATA_LIST = 'snap-metadata-ls'
SNAP_METADATA_REMOVE = 'snap-metadata-rm'
class SubvolumeTemplate(object):
VERSION = None # type: int
@staticmethod
def version():
return SubvolumeTemplate.VERSION
def open(self, op_type):
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def status(self):
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def create(self, size, isolate_nspace, pool, mode, uid, gid):
"""
set up metadata, pools and auth for a subvolume.
This function is idempotent. It is safe to call this again
for an already-created subvolume, even if it is in use.
:param size: In bytes, or None for no size limit
:param isolate_nspace: If true, use separate RADOS namespace for this subvolume
:param pool: the RADOS pool where the data objects of the subvolumes will be stored
:param mode: the user permissions
:param uid: the user identifier
:param gid: the group identifier
:return: None
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def create_clone(self, pool, source_volname, source_subvolume, snapname):
"""
prepare a subvolume to be cloned.
:param pool: the RADOS pool where the data objects of the cloned subvolume will be stored
:param source_volname: source volume of snapshot
:param source_subvolume: source subvolume of snapshot
:param snapname: snapshot name to be cloned from
:return: None
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def remove(self):
"""
make a subvolume inaccessible to guests.
This function is idempotent. It is safe to call this again
:param: None
:return: None
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def resize(self, newsize, nshrink):
"""
resize a subvolume
:param newsize: new size In bytes (or inf/infinite)
:return: new quota size and used bytes as a tuple
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def pin(self, pin_type, pin_setting):
"""
pin a subvolume
:param pin_type: type of pin
:param pin_setting: setting for pin
:return: None
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def create_snapshot(self, snapname):
"""
snapshot a subvolume.
:param: subvolume snapshot name
:return: None
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def remove_snapshot(self, snapname):
"""
remove a subvolume snapshot.
:param: subvolume snapshot name
:return: None
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def list_snapshots(self):
"""
list all subvolume snapshots.
:param: None
:return: None
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def attach_snapshot(self, snapname, tgt_subvolume):
"""
attach a snapshot to a target cloned subvolume. the target subvolume
should be an empty subvolume (type "clone") in "pending" state.
:param: snapname: snapshot to attach to a clone
:param: tgt_subvolume: target clone subvolume
:return: None
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
def detach_snapshot(self, snapname, tgt_subvolume):
"""
detach a snapshot from a target cloned subvolume. the target subvolume
should either be in "failed" or "completed" state.
:param: snapname: snapshot to detach from a clone
:param: tgt_subvolume: target clone subvolume
:return: None
"""
raise VolumeException(-errno.ENOTSUP, "operation not supported.")
| 6,234 | 31.473958 | 97 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/trash.py
|
import os
import uuid
import logging
from contextlib import contextmanager
import cephfs
from .template import GroupTemplate
from ..fs_util import listdir
from ..exception import VolumeException
log = logging.getLogger(__name__)
class Trash(GroupTemplate):
GROUP_NAME = "_deleting"
def __init__(self, fs, vol_spec):
self.fs = fs
self.vol_spec = vol_spec
self.groupname = Trash.GROUP_NAME
@property
def path(self):
return os.path.join(self.vol_spec.base_dir.encode('utf-8'), self.groupname.encode('utf-8'))
@property
def unique_trash_path(self):
"""
return a unique trash directory entry path
"""
return os.path.join(self.path, str(uuid.uuid4()).encode('utf-8'))
def _get_single_dir_entry(self, exclude_list=[]):
exclude_list.extend((b".", b".."))
try:
with self.fs.opendir(self.path) as d:
entry = self.fs.readdir(d)
while entry:
if entry.d_name not in exclude_list:
return entry.d_name
entry = self.fs.readdir(d)
return None
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
def get_trash_entry(self, exclude_list):
"""
get a trash entry excluding entries provided.
:praram exclude_list: entries to exclude
:return: trash entry
"""
return self._get_single_dir_entry(exclude_list)
def purge(self, trashpath, should_cancel):
"""
purge a trash entry.
:praram trash_entry: the trash entry to purge
:praram should_cancel: callback to check if the purge should be aborted
:return: None
"""
def rmtree(root_path):
log.debug("rmtree {0}".format(root_path))
try:
with self.fs.opendir(root_path) as dir_handle:
d = self.fs.readdir(dir_handle)
while d and not should_cancel():
if d.d_name not in (b".", b".."):
d_full = os.path.join(root_path, d.d_name)
if d.is_dir():
rmtree(d_full)
else:
self.fs.unlink(d_full)
d = self.fs.readdir(dir_handle)
except cephfs.ObjectNotFound:
return
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
# remove the directory only if we were not asked to cancel
# (else we would fail to remove this anyway)
if not should_cancel():
self.fs.rmdir(root_path)
# catch any unlink errors
try:
rmtree(trashpath)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
def dump(self, path):
"""
move an filesystem entity to trash can.
:praram path: the filesystem path to be moved
:return: None
"""
try:
self.fs.rename(path, self.unique_trash_path)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
def link(self, path, bname):
pth = os.path.join(self.path, bname)
try:
self.fs.symlink(path, pth)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
def delink(self, bname):
pth = os.path.join(self.path, bname)
try:
self.fs.unlink(pth)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
def create_trashcan(fs, vol_spec):
"""
create a trash can.
:param fs: ceph filesystem handle
:param vol_spec: volume specification
:return: None
"""
trashcan = Trash(fs, vol_spec)
try:
fs.mkdirs(trashcan.path, 0o700)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
@contextmanager
def open_trashcan(fs, vol_spec):
"""
open a trash can. This API is to be used as a context manager.
:param fs: ceph filesystem handle
:param vol_spec: volume specification
:return: yields a trash can object (subclass of GroupTemplate)
"""
trashcan = Trash(fs, vol_spec)
try:
fs.stat(trashcan.path)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
yield trashcan
| 4,512 | 29.910959 | 99 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/volume.py
|
import errno
import logging
import os
from typing import List, Tuple
from contextlib import contextmanager
import orchestrator
from .lock import GlobalLock
from ..exception import VolumeException
from ..fs_util import create_pool, remove_pool, rename_pool, create_filesystem, \
remove_filesystem, rename_filesystem, create_mds, volume_exists
from .trash import Trash
from mgr_util import open_filesystem, CephfsConnectionException
log = logging.getLogger(__name__)
def gen_pool_names(volname):
"""
return metadata and data pool name (from a filesystem/volume name) as a tuple
"""
return "cephfs.{}.meta".format(volname), "cephfs.{}.data".format(volname)
def get_mds_map(mgr, volname):
"""
return mdsmap for a volname
"""
mds_map = None
fs_map = mgr.get("fs_map")
for f in fs_map['filesystems']:
if volname == f['mdsmap']['fs_name']:
return f['mdsmap']
return mds_map
def get_pool_names(mgr, volname):
"""
return metadata and data pools (list) names of volume as a tuple
"""
fs_map = mgr.get("fs_map")
metadata_pool_id = None
data_pool_ids = [] # type: List[int]
for f in fs_map['filesystems']:
if volname == f['mdsmap']['fs_name']:
metadata_pool_id = f['mdsmap']['metadata_pool']
data_pool_ids = f['mdsmap']['data_pools']
break
if metadata_pool_id is None:
return None, None
osdmap = mgr.get("osd_map")
pools = dict([(p['pool'], p['pool_name']) for p in osdmap['pools']])
metadata_pool = pools[metadata_pool_id]
data_pools = [pools[id] for id in data_pool_ids]
return metadata_pool, data_pools
def get_pool_ids(mgr, volname):
"""
return metadata and data pools (list) id of volume as a tuple
"""
fs_map = mgr.get("fs_map")
metadata_pool_id = None
data_pool_ids = [] # type: List[int]
for f in fs_map['filesystems']:
if volname == f['mdsmap']['fs_name']:
metadata_pool_id = f['mdsmap']['metadata_pool']
data_pool_ids = f['mdsmap']['data_pools']
break
if metadata_pool_id is None:
return None, None
return metadata_pool_id, data_pool_ids
def create_volume(mgr, volname, placement):
"""
create volume (pool, filesystem and mds)
"""
metadata_pool, data_pool = gen_pool_names(volname)
# create pools
r, outb, outs = create_pool(mgr, metadata_pool)
if r != 0:
return r, outb, outs
r, outb, outs = create_pool(mgr, data_pool)
if r != 0:
#cleanup
remove_pool(mgr, metadata_pool)
return r, outb, outs
# create filesystem
r, outb, outs = create_filesystem(mgr, volname, metadata_pool, data_pool)
if r != 0:
log.error("Filesystem creation error: {0} {1} {2}".format(r, outb, outs))
#cleanup
remove_pool(mgr, data_pool)
remove_pool(mgr, metadata_pool)
return r, outb, outs
return create_mds(mgr, volname, placement)
def delete_volume(mgr, volname, metadata_pool, data_pools):
"""
delete the given module (tear down mds, remove filesystem, remove pools)
"""
# Tear down MDS daemons
try:
completion = mgr.remove_service('mds.' + volname)
orchestrator.raise_if_exception(completion)
except (ImportError, orchestrator.OrchestratorError):
log.warning("OrchestratorError, not tearing down MDS daemons")
except Exception as e:
# Don't let detailed orchestrator exceptions (python backtraces)
# bubble out to the user
log.exception("Failed to tear down MDS daemons")
return -errno.EINVAL, "", str(e)
# In case orchestrator didn't tear down MDS daemons cleanly, or
# there was no orchestrator, we force the daemons down.
if volume_exists(mgr, volname):
r, outb, outs = remove_filesystem(mgr, volname)
if r != 0:
return r, outb, outs
else:
err = "Filesystem not found for volume '{0}'".format(volname)
log.warning(err)
return -errno.ENOENT, "", err
r, outb, outs = remove_pool(mgr, metadata_pool)
if r != 0:
return r, outb, outs
for data_pool in data_pools:
r, outb, outs = remove_pool(mgr, data_pool)
if r != 0:
return r, outb, outs
result_str = "metadata pool: {0} data pool: {1} removed".format(metadata_pool, str(data_pools))
return r, result_str, ""
def rename_volume(mgr, volname: str, newvolname: str) -> Tuple[int, str, str]:
"""
rename volume (orch MDS service, file system, pools)
"""
# To allow volume rename to be idempotent, check whether orch managed MDS
# service is already renamed. If so, skip renaming MDS service.
completion = None
rename_mds_service = True
try:
completion = mgr.describe_service(
service_type='mds', service_name=f"mds.{newvolname}", refresh=True)
orchestrator.raise_if_exception(completion)
except (ImportError, orchestrator.OrchestratorError):
log.warning("Failed to fetch orch service mds.%s", newvolname)
except Exception as e:
# Don't let detailed orchestrator exceptions (python backtraces)
# bubble out to the user
log.exception("Failed to fetch orch service mds.%s", newvolname)
return -errno.EINVAL, "", str(e)
if completion and completion.result:
rename_mds_service = False
# Launch new MDS service matching newvolname
completion = None
remove_mds_service = False
if rename_mds_service:
try:
completion = mgr.describe_service(
service_type='mds', service_name=f"mds.{volname}", refresh=True)
orchestrator.raise_if_exception(completion)
except (ImportError, orchestrator.OrchestratorError):
log.warning("Failed to fetch orch service mds.%s", volname)
except Exception as e:
# Don't let detailed orchestrator exceptions (python backtraces)
# bubble out to the user
log.exception("Failed to fetch orch service mds.%s", volname)
return -errno.EINVAL, "", str(e)
if completion and completion.result:
svc = completion.result[0]
placement = svc.spec.placement.pretty_str()
create_mds(mgr, newvolname, placement)
remove_mds_service = True
# rename_filesytem is idempotent
r, outb, outs = rename_filesystem(mgr, volname, newvolname)
if r != 0:
errmsg = f"Failed to rename file system '{volname}' to '{newvolname}'"
log.error("Failed to rename file system '%s' to '%s'", volname, newvolname)
outs = f'{errmsg}; {outs}'
return r, outb, outs
# Rename file system's metadata and data pools
metadata_pool, data_pools = get_pool_names(mgr, newvolname)
new_metadata_pool, new_data_pool = gen_pool_names(newvolname)
if metadata_pool != new_metadata_pool:
r, outb, outs = rename_pool(mgr, metadata_pool, new_metadata_pool)
if r != 0:
errmsg = f"Failed to rename metadata pool '{metadata_pool}' to '{new_metadata_pool}'"
log.error("Failed to rename metadata pool '%s' to '%s'", metadata_pool, new_metadata_pool)
outs = f'{errmsg}; {outs}'
return r, outb, outs
data_pool_rename_failed = False
# If file system has more than one data pool, then skip renaming
# the data pools, and proceed to remove the old MDS service.
if len(data_pools) > 1:
data_pool_rename_failed = True
else:
data_pool = data_pools[0]
if data_pool != new_data_pool:
r, outb, outs = rename_pool(mgr, data_pool, new_data_pool)
if r != 0:
errmsg = f"Failed to rename data pool '{data_pool}' to '{new_data_pool}'"
log.error("Failed to rename data pool '%s' to '%s'", data_pool, new_data_pool)
outs = f'{errmsg}; {outs}'
return r, outb, outs
# Tear down old MDS service
if remove_mds_service:
try:
completion = mgr.remove_service('mds.' + volname)
orchestrator.raise_if_exception(completion)
except (ImportError, orchestrator.OrchestratorError):
log.warning("Failed to tear down orch service mds.%s", volname)
except Exception as e:
# Don't let detailed orchestrator exceptions (python backtraces)
# bubble out to the user
log.exception("Failed to tear down orch service mds.%s", volname)
return -errno.EINVAL, "", str(e)
outb = f"FS volume '{volname}' renamed to '{newvolname}'"
if data_pool_rename_failed:
outb += ". But failed to rename data pools as more than one data pool was found."
return r, outb, ""
def list_volumes(mgr):
"""
list all filesystem volumes.
:param: None
:return: None
"""
result = []
fs_map = mgr.get("fs_map")
for f in fs_map['filesystems']:
result.append({'name': f['mdsmap']['fs_name']})
return result
def get_pending_subvol_deletions_count(path):
"""
Get the number of pending subvolumes deletions.
"""
trashdir = os.path.join(path, Trash.GROUP_NAME)
try:
num_pending_subvol_del = len(os.listdir(trashdir))
except OSError as e:
if e.errno == errno.ENOENT:
num_pending_subvol_del = 0
return {'pending_subvolume_deletions': num_pending_subvol_del}
@contextmanager
def open_volume(vc, volname):
"""
open a volume for exclusive access. This API is to be used as a contextr
manager.
:param vc: volume client instance
:param volname: volume name
:return: yields a volume handle (ceph filesystem handle)
"""
g_lock = GlobalLock()
with g_lock.lock_op():
try:
with open_filesystem(vc, volname) as fs_handle:
yield fs_handle
except CephfsConnectionException as ce:
raise VolumeException(ce.errno, ce.error_str)
@contextmanager
def open_volume_lockless(vc, volname):
"""
open a volume with shared access. This API is to be used as a context
manager.
:param vc: volume client instance
:param volname: volume name
:return: yields a volume handle (ceph filesystem handle)
"""
try:
with open_filesystem(vc, volname) as fs_handle:
yield fs_handle
except CephfsConnectionException as ce:
raise VolumeException(ce.errno, ce.error_str)
| 10,509 | 34.627119 | 102 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/versions/__init__.py
|
import errno
import logging
import importlib
import cephfs
from .subvolume_base import SubvolumeBase
from .subvolume_attrs import SubvolumeTypes
from .subvolume_v1 import SubvolumeV1
from .subvolume_v2 import SubvolumeV2
from .metadata_manager import MetadataManager
from .op_sm import SubvolumeOpSm
from ..template import SubvolumeOpType
from ...exception import MetadataMgrException, OpSmException, VolumeException
log = logging.getLogger(__name__)
class SubvolumeLoader(object):
INVALID_VERSION = -1
SUPPORTED_MODULES = ['subvolume_v1.SubvolumeV1', 'subvolume_v2.SubvolumeV2']
def __init__(self):
self.max_version = SubvolumeLoader.INVALID_VERSION
self.versions = {}
def _load_module(self, mod_cls):
mod_name, cls_name = mod_cls.split('.')
mod = importlib.import_module('.versions.{0}'.format(mod_name), package='volumes.fs.operations')
return getattr(mod, cls_name)
def _load_supported_versions(self):
for mod_cls in SubvolumeLoader.SUPPORTED_MODULES:
cls = self._load_module(mod_cls)
log.info("loaded v{0} subvolume".format(cls.version()))
if self.max_version is not None or cls.version() > self.max_version:
self.max_version = cls.version()
self.versions[cls.version()] = cls
if self.max_version == SubvolumeLoader.INVALID_VERSION:
raise VolumeException(-errno.EINVAL, "no subvolume version available")
log.info("max subvolume version is v{0}".format(self.max_version))
def _get_subvolume_version(self, version):
try:
return self.versions[version]
except KeyError:
raise VolumeException(-errno.EINVAL, "subvolume class v{0} does not exist".format(version))
def get_subvolume_object_max(self, mgr, fs, vol_spec, group, subvolname):
return self._get_subvolume_version(self.max_version)(mgr, fs, vol_spec, group, subvolname)
def upgrade_to_v2_subvolume(self, subvolume):
# legacy mode subvolumes cannot be upgraded to v2
if subvolume.legacy_mode:
return
version = int(subvolume.metadata_mgr.get_global_option('version'))
if version >= SubvolumeV2.version():
return
v1_subvolume = self._get_subvolume_version(version)(subvolume.mgr, subvolume.fs, subvolume.vol_spec, subvolume.group, subvolume.subvolname)
try:
v1_subvolume.open(SubvolumeOpType.SNAP_LIST)
except VolumeException as ve:
# if volume is not ready for snapshot listing, do not upgrade at present
if ve.errno == -errno.EAGAIN:
return
raise
# v1 subvolumes with snapshots cannot be upgraded to v2
if v1_subvolume.list_snapshots():
return
subvolume.metadata_mgr.update_global_section(MetadataManager.GLOBAL_META_KEY_VERSION, SubvolumeV2.version())
subvolume.metadata_mgr.flush()
def upgrade_legacy_subvolume(self, fs, subvolume):
assert subvolume.legacy_mode
try:
fs.mkdirs(subvolume.legacy_dir, 0o700)
except cephfs.Error as e:
raise VolumeException(-e.args[0], "error accessing subvolume")
subvolume_type = SubvolumeTypes.TYPE_NORMAL
try:
initial_state = SubvolumeOpSm.get_init_state(subvolume_type)
except OpSmException as oe:
raise VolumeException(-errno.EINVAL, "subvolume creation failed: internal error")
qpath = subvolume.base_path.decode('utf-8')
# legacy is only upgradable to v1
subvolume.init_config(SubvolumeV1.version(), subvolume_type, qpath, initial_state)
def get_subvolume_object(self, mgr, fs, vol_spec, group, subvolname, upgrade=True):
subvolume = SubvolumeBase(mgr, fs, vol_spec, group, subvolname)
try:
subvolume.discover()
self.upgrade_to_v2_subvolume(subvolume)
version = int(subvolume.metadata_mgr.get_global_option('version'))
subvolume_version_object = self._get_subvolume_version(version)(mgr, fs, vol_spec, group, subvolname, legacy=subvolume.legacy_mode)
subvolume_version_object.metadata_mgr.refresh()
subvolume_version_object.clean_stale_snapshot_metadata()
return subvolume_version_object
except MetadataMgrException as me:
if me.errno == -errno.ENOENT and upgrade:
self.upgrade_legacy_subvolume(fs, subvolume)
return self.get_subvolume_object(mgr, fs, vol_spec, group, subvolname, upgrade=False)
else:
# log the actual error and generalize error string returned to user
log.error("error accessing subvolume metadata for '{0}' ({1})".format(subvolname, me))
raise VolumeException(-errno.EINVAL, "error accessing subvolume metadata")
loaded_subvolumes = SubvolumeLoader()
loaded_subvolumes._load_supported_versions()
| 4,982 | 43.097345 | 147 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/versions/auth_metadata.py
|
from contextlib import contextmanager
import os
import fcntl
import json
import logging
import struct
import uuid
import cephfs
from ..group import Group
log = logging.getLogger(__name__)
class AuthMetadataError(Exception):
pass
class AuthMetadataManager(object):
# Current version
version = 6
# Filename extensions for meta files.
META_FILE_EXT = ".meta"
DEFAULT_VOL_PREFIX = "/volumes"
def __init__(self, fs):
self.fs = fs
self._id = struct.unpack(">Q", uuid.uuid1().bytes[0:8])[0]
self.volume_prefix = self.DEFAULT_VOL_PREFIX
def _to_bytes(self, param):
'''
Helper method that returns byte representation of the given parameter.
'''
if isinstance(param, str):
return param.encode('utf-8')
elif param is None:
return param
else:
return str(param).encode('utf-8')
def _subvolume_metadata_path(self, group_name, subvol_name):
return os.path.join(self.volume_prefix, "_{0}:{1}{2}".format(
group_name if group_name != Group.NO_GROUP_NAME else "",
subvol_name,
self.META_FILE_EXT))
def _check_compat_version(self, compat_version):
if self.version < compat_version:
msg = ("The current version of AuthMetadataManager, version {0} "
"does not support the required feature. Need version {1} "
"or greater".format(self.version, compat_version)
)
log.error(msg)
raise AuthMetadataError(msg)
def _metadata_get(self, path):
"""
Return a deserialized JSON object, or None
"""
fd = self.fs.open(path, "r")
# TODO iterate instead of assuming file < 4MB
read_bytes = self.fs.read(fd, 0, 4096 * 1024)
self.fs.close(fd)
if read_bytes:
return json.loads(read_bytes.decode())
else:
return None
def _metadata_set(self, path, data):
serialized = json.dumps(data)
fd = self.fs.open(path, "w")
try:
self.fs.write(fd, self._to_bytes(serialized), 0)
self.fs.fsync(fd, 0)
finally:
self.fs.close(fd)
def _lock(self, path):
@contextmanager
def fn():
while(1):
fd = self.fs.open(path, os.O_CREAT, 0o755)
self.fs.flock(fd, fcntl.LOCK_EX, self._id)
# The locked file will be cleaned up sometime. It could be
# unlinked by consumer e.g., an another manila-share service
# instance, before lock was applied on it. Perform checks to
# ensure that this does not happen.
try:
statbuf = self.fs.stat(path)
except cephfs.ObjectNotFound:
self.fs.close(fd)
continue
fstatbuf = self.fs.fstat(fd)
if statbuf.st_ino == fstatbuf.st_ino:
break
try:
yield
finally:
self.fs.flock(fd, fcntl.LOCK_UN, self._id)
self.fs.close(fd)
return fn()
def _auth_metadata_path(self, auth_id):
return os.path.join(self.volume_prefix, "${0}{1}".format(
auth_id, self.META_FILE_EXT))
def auth_lock(self, auth_id):
return self._lock(self._auth_metadata_path(auth_id))
def auth_metadata_get(self, auth_id):
"""
Call me with the metadata locked!
Check whether a auth metadata structure can be decoded by the current
version of AuthMetadataManager.
Return auth metadata that the current version of AuthMetadataManager
can decode.
"""
auth_metadata = self._metadata_get(self._auth_metadata_path(auth_id))
if auth_metadata:
self._check_compat_version(auth_metadata['compat_version'])
return auth_metadata
def auth_metadata_set(self, auth_id, data):
"""
Call me with the metadata locked!
Fsync the auth metadata.
Add two version attributes to the auth metadata,
'compat_version', the minimum AuthMetadataManager version that can
decode the metadata, and 'version', the AuthMetadataManager version
that encoded the metadata.
"""
data['compat_version'] = 6
data['version'] = self.version
return self._metadata_set(self._auth_metadata_path(auth_id), data)
def create_subvolume_metadata_file(self, group_name, subvol_name):
"""
Create a subvolume metadata file, if it does not already exist, to store
data about auth ids having access to the subvolume
"""
fd = self.fs.open(self._subvolume_metadata_path(group_name, subvol_name),
os.O_CREAT, 0o755)
self.fs.close(fd)
def delete_subvolume_metadata_file(self, group_name, subvol_name):
vol_meta_path = self._subvolume_metadata_path(group_name, subvol_name)
try:
self.fs.unlink(vol_meta_path)
except cephfs.ObjectNotFound:
pass
def subvol_metadata_lock(self, group_name, subvol_name):
"""
Return a ContextManager which locks the authorization metadata for
a particular subvolume, and persists a flag to the metadata indicating
that it is currently locked, so that we can detect dirty situations
during recovery.
This lock isn't just to make access to the metadata safe: it's also
designed to be used over the two-step process of checking the
metadata and then responding to an authorization request, to
ensure that at the point we respond the metadata hasn't changed
in the background. It's key to how we avoid security holes
resulting from races during that problem ,
"""
return self._lock(self._subvolume_metadata_path(group_name, subvol_name))
def subvol_metadata_get(self, group_name, subvol_name):
"""
Call me with the metadata locked!
Check whether a subvolume metadata structure can be decoded by the current
version of AuthMetadataManager.
Return a subvolume_metadata structure that the current version of
AuthMetadataManager can decode.
"""
subvolume_metadata = self._metadata_get(self._subvolume_metadata_path(group_name, subvol_name))
if subvolume_metadata:
self._check_compat_version(subvolume_metadata['compat_version'])
return subvolume_metadata
def subvol_metadata_set(self, group_name, subvol_name, data):
"""
Call me with the metadata locked!
Add two version attributes to the subvolume metadata,
'compat_version', the minimum AuthMetadataManager version that can
decode the metadata and 'version', the AuthMetadataManager version
that encoded the metadata.
"""
data['compat_version'] = 1
data['version'] = self.version
return self._metadata_set(self._subvolume_metadata_path(group_name, subvol_name), data)
| 7,199 | 33.123223 | 103 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/versions/metadata_manager.py
|
import os
import errno
import logging
import sys
import threading
import configparser
import re
import cephfs
from ...exception import MetadataMgrException
log = logging.getLogger(__name__)
# _lock needs to be shared across all instances of MetadataManager.
# that is why we have a file level instance
_lock = threading.Lock()
def _conf_reader(fs, fd, offset=0, length=4096):
while True:
buf = fs.read(fd, offset, length)
offset += len(buf)
if not buf:
return
yield buf.decode('utf-8')
class _ConfigWriter:
def __init__(self, fs, fd):
self._fs = fs
self._fd = fd
self._wrote = 0
def write(self, value):
buf = value.encode('utf-8')
wrote = self._fs.write(self._fd, buf, -1)
self._wrote += wrote
return wrote
def fsync(self):
self._fs.fsync(self._fd, 0)
@property
def wrote(self):
return self._wrote
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self._fs.close(self._fd)
class MetadataManager(object):
GLOBAL_SECTION = "GLOBAL"
USER_METADATA_SECTION = "USER_METADATA"
GLOBAL_META_KEY_VERSION = "version"
GLOBAL_META_KEY_TYPE = "type"
GLOBAL_META_KEY_PATH = "path"
GLOBAL_META_KEY_STATE = "state"
CLONE_FAILURE_SECTION = "CLONE_FAILURE"
CLONE_FAILURE_META_KEY_ERRNO = "errno"
CLONE_FAILURE_META_KEY_ERROR_MSG = "error_msg"
def __init__(self, fs, config_path, mode):
self.fs = fs
self.mode = mode
self.config_path = config_path
self.config = configparser.ConfigParser()
def refresh(self):
fd = None
try:
log.debug("opening config {0}".format(self.config_path))
with _lock:
fd = self.fs.open(self.config_path, os.O_RDONLY)
cfg = ''.join(_conf_reader(self.fs, fd))
self.config.read_string(cfg, source=self.config_path)
except UnicodeDecodeError:
raise MetadataMgrException(-errno.EINVAL,
"failed to decode, erroneous metadata config '{0}'".format(self.config_path))
except cephfs.ObjectNotFound:
raise MetadataMgrException(-errno.ENOENT, "metadata config '{0}' not found".format(self.config_path))
except cephfs.Error as e:
raise MetadataMgrException(-e.args[0], e.args[1])
except configparser.Error:
raise MetadataMgrException(-errno.EINVAL, "failed to parse, erroneous metadata config "
"'{0}'".format(self.config_path))
finally:
if fd is not None:
self.fs.close(fd)
def flush(self):
# cull empty sections
for section in list(self.config.sections()):
if len(self.config.items(section)) == 0:
self.config.remove_section(section)
try:
with _lock:
tmp_config_path = self.config_path + b'.tmp'
fd = self.fs.open(tmp_config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, self.mode)
with _ConfigWriter(self.fs, fd) as cfg_writer:
self.config.write(cfg_writer)
cfg_writer.fsync()
self.fs.rename(tmp_config_path, self.config_path)
log.info(f"wrote {cfg_writer.wrote} bytes to config {tmp_config_path}")
log.info(f"Renamed {tmp_config_path} to config {self.config_path}")
except cephfs.Error as e:
raise MetadataMgrException(-e.args[0], e.args[1])
def init(self, version, typ, path, state):
# you may init just once before refresh (helps to overwrite conf)
if self.config.has_section(MetadataManager.GLOBAL_SECTION):
raise MetadataMgrException(-errno.EINVAL, "init called on an existing config")
self.add_section(MetadataManager.GLOBAL_SECTION)
self.update_section_multi(
MetadataManager.GLOBAL_SECTION, {MetadataManager.GLOBAL_META_KEY_VERSION : str(version),
MetadataManager.GLOBAL_META_KEY_TYPE : str(typ),
MetadataManager.GLOBAL_META_KEY_PATH : str(path),
MetadataManager.GLOBAL_META_KEY_STATE : str(state)
})
def add_section(self, section):
try:
self.config.add_section(section)
except configparser.DuplicateSectionError:
return
except:
raise MetadataMgrException(-errno.EINVAL, "error adding section to config")
def remove_option(self, section, key):
if not self.config.has_section(section):
raise MetadataMgrException(-errno.ENOENT, "section '{0}' does not exist".format(section))
return self.config.remove_option(section, key)
def remove_section(self, section):
self.config.remove_section(section)
def update_section(self, section, key, value):
if not self.config.has_section(section):
raise MetadataMgrException(-errno.ENOENT, "section '{0}' does not exist".format(section))
self.config.set(section, key, str(value))
def update_section_multi(self, section, dct):
if not self.config.has_section(section):
raise MetadataMgrException(-errno.ENOENT, "section '{0}' does not exist".format(section))
for key,value in dct.items():
self.config.set(section, key, str(value))
def update_global_section(self, key, value):
self.update_section(MetadataManager.GLOBAL_SECTION, key, str(value))
def get_option(self, section, key):
if not self.config.has_section(section):
raise MetadataMgrException(-errno.ENOENT, "section '{0}' does not exist".format(section))
if not self.config.has_option(section, key):
raise MetadataMgrException(-errno.ENOENT, "no config '{0}' in section '{1}'".format(key, section))
return self.config.get(section, key)
def get_global_option(self, key):
return self.get_option(MetadataManager.GLOBAL_SECTION, key)
def list_all_options_from_section(self, section):
metadata_dict = {}
if self.config.has_section(section):
options = self.config.options(section)
for option in options:
metadata_dict[option] = self.config.get(section,option)
return metadata_dict
def list_all_keys_with_specified_values_from_section(self, section, value):
keys = []
if self.config.has_section(section):
options = self.config.options(section)
for option in options:
if (value == self.config.get(section, option)) :
keys.append(option)
return keys
def section_has_item(self, section, item):
if not self.config.has_section(section):
raise MetadataMgrException(-errno.ENOENT, "section '{0}' does not exist".format(section))
return item in [v[1] for v in self.config.items(section)]
def has_snap_metadata_section(self):
sections = self.config.sections()
r = re.compile('SNAP_METADATA_.*')
for section in sections:
if r.match(section):
return True
return False
def list_snaps_with_metadata(self):
sections = self.config.sections()
r = re.compile('SNAP_METADATA_.*')
return [section[len("SNAP_METADATA_"):] for section in sections if r.match(section)]
| 7,554 | 36.587065 | 113 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/versions/op_sm.py
|
import errno
from typing import Dict
from ...exception import OpSmException
from .subvolume_attrs import SubvolumeTypes, SubvolumeStates, SubvolumeActions
class TransitionKey(object):
def __init__(self, subvol_type, state, action_type):
self.transition_key = [subvol_type, state, action_type]
def __hash__(self):
return hash(tuple(self.transition_key))
def __eq__(self, other):
return self.transition_key == other.transition_key
def __neq__(self, other):
return not(self == other)
class SubvolumeOpSm(object):
transition_table = {} # type: Dict
@staticmethod
def is_complete_state(state):
if not isinstance(state, SubvolumeStates):
raise OpSmException(-errno.EINVAL, "unknown state '{0}'".format(state))
return state == SubvolumeStates.STATE_COMPLETE
@staticmethod
def is_failed_state(state):
if not isinstance(state, SubvolumeStates):
raise OpSmException(-errno.EINVAL, "unknown state '{0}'".format(state))
return state == SubvolumeStates.STATE_FAILED or state == SubvolumeStates.STATE_CANCELED
@staticmethod
def is_init_state(stm_type, state):
if not isinstance(state, SubvolumeStates):
raise OpSmException(-errno.EINVAL, "unknown state '{0}'".format(state))
return state == SubvolumeOpSm.get_init_state(stm_type)
@staticmethod
def get_init_state(stm_type):
if not isinstance(stm_type, SubvolumeTypes):
raise OpSmException(-errno.EINVAL, "unknown state machine '{0}'".format(stm_type))
init_state = SubvolumeOpSm.transition_table[TransitionKey(stm_type,
SubvolumeStates.STATE_INIT,
SubvolumeActions.ACTION_NONE)]
if not init_state:
raise OpSmException(-errno.ENOENT, "initial state for state machine '{0}' not found".format(stm_type))
return init_state
@staticmethod
def transition(stm_type, current_state, action):
if not isinstance(stm_type, SubvolumeTypes):
raise OpSmException(-errno.EINVAL, "unknown state machine '{0}'".format(stm_type))
if not isinstance(current_state, SubvolumeStates):
raise OpSmException(-errno.EINVAL, "unknown state '{0}'".format(current_state))
if not isinstance(action, SubvolumeActions):
raise OpSmException(-errno.EINVAL, "unknown action '{0}'".format(action))
transition = SubvolumeOpSm.transition_table[TransitionKey(stm_type, current_state, action)]
if not transition:
raise OpSmException(-errno.EINVAL, "invalid action '{0}' on current state {1} for state machine '{2}'".format(action, current_state, stm_type))
return transition
SubvolumeOpSm.transition_table = {
# state transitions for state machine type TYPE_NORMAL
TransitionKey(SubvolumeTypes.TYPE_NORMAL,
SubvolumeStates.STATE_INIT,
SubvolumeActions.ACTION_NONE) : SubvolumeStates.STATE_COMPLETE,
TransitionKey(SubvolumeTypes.TYPE_NORMAL,
SubvolumeStates.STATE_COMPLETE,
SubvolumeActions.ACTION_RETAINED) : SubvolumeStates.STATE_RETAINED,
# state transitions for state machine type TYPE_CLONE
TransitionKey(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_INIT,
SubvolumeActions.ACTION_NONE) : SubvolumeStates.STATE_PENDING,
TransitionKey(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_PENDING,
SubvolumeActions.ACTION_SUCCESS) : SubvolumeStates.STATE_INPROGRESS,
TransitionKey(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_PENDING,
SubvolumeActions.ACTION_CANCELLED) : SubvolumeStates.STATE_CANCELED,
TransitionKey(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_INPROGRESS,
SubvolumeActions.ACTION_SUCCESS) : SubvolumeStates.STATE_COMPLETE,
TransitionKey(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_INPROGRESS,
SubvolumeActions.ACTION_CANCELLED) : SubvolumeStates.STATE_CANCELED,
TransitionKey(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_INPROGRESS,
SubvolumeActions.ACTION_FAILED) : SubvolumeStates.STATE_FAILED,
TransitionKey(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_COMPLETE,
SubvolumeActions.ACTION_RETAINED) : SubvolumeStates.STATE_RETAINED,
TransitionKey(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_CANCELED,
SubvolumeActions.ACTION_RETAINED) : SubvolumeStates.STATE_RETAINED,
TransitionKey(SubvolumeTypes.TYPE_CLONE,
SubvolumeStates.STATE_FAILED,
SubvolumeActions.ACTION_RETAINED) : SubvolumeStates.STATE_RETAINED,
}
| 4,959 | 42.130435 | 155 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/versions/subvolume_attrs.py
|
import errno
from enum import Enum, unique
from ...exception import VolumeException
@unique
class SubvolumeTypes(Enum):
TYPE_NORMAL = "subvolume"
TYPE_CLONE = "clone"
@staticmethod
def from_value(value):
if value == "subvolume":
return SubvolumeTypes.TYPE_NORMAL
if value == "clone":
return SubvolumeTypes.TYPE_CLONE
raise VolumeException(-errno.EINVAL, "invalid subvolume type '{0}'".format(value))
@unique
class SubvolumeStates(Enum):
STATE_INIT = 'init'
STATE_PENDING = 'pending'
STATE_INPROGRESS = 'in-progress'
STATE_FAILED = 'failed'
STATE_COMPLETE = 'complete'
STATE_CANCELED = 'canceled'
STATE_RETAINED = 'snapshot-retained'
@staticmethod
def from_value(value):
if value == "init":
return SubvolumeStates.STATE_INIT
if value == "pending":
return SubvolumeStates.STATE_PENDING
if value == "in-progress":
return SubvolumeStates.STATE_INPROGRESS
if value == "failed":
return SubvolumeStates.STATE_FAILED
if value == "complete":
return SubvolumeStates.STATE_COMPLETE
if value == "canceled":
return SubvolumeStates.STATE_CANCELED
if value == "snapshot-retained":
return SubvolumeStates.STATE_RETAINED
raise VolumeException(-errno.EINVAL, "invalid state '{0}'".format(value))
@unique
class SubvolumeActions(Enum):
ACTION_NONE = 0
ACTION_SUCCESS = 1
ACTION_FAILED = 2
ACTION_CANCELLED = 3
ACTION_RETAINED = 4
@unique
class SubvolumeFeatures(Enum):
FEATURE_SNAPSHOT_CLONE = "snapshot-clone"
FEATURE_SNAPSHOT_RETENTION = "snapshot-retention"
FEATURE_SNAPSHOT_AUTOPROTECT = "snapshot-autoprotect"
| 1,788 | 26.106061 | 90 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/versions/subvolume_base.py
|
import os
import stat
import errno
import logging
import hashlib
from typing import Dict, Union
from pathlib import Path
import cephfs
from ..pin_util import pin
from .subvolume_attrs import SubvolumeTypes
from .metadata_manager import MetadataManager
from ..trash import create_trashcan, open_trashcan
from ...fs_util import get_ancestor_xattr
from ...exception import MetadataMgrException, VolumeException
from .auth_metadata import AuthMetadataManager
from .subvolume_attrs import SubvolumeStates
log = logging.getLogger(__name__)
class SubvolumeBase(object):
LEGACY_CONF_DIR = "_legacy"
def __init__(self, mgr, fs, vol_spec, group, subvolname, legacy=False):
self.mgr = mgr
self.fs = fs
self.auth_mdata_mgr = AuthMetadataManager(fs)
self.cmode = None
self.user_id = None
self.group_id = None
self.vol_spec = vol_spec
self.group = group
self.subvolname = subvolname
self.legacy_mode = legacy
self.load_config()
@property
def uid(self):
return self.user_id
@uid.setter
def uid(self, val):
self.user_id = val
@property
def gid(self):
return self.group_id
@gid.setter
def gid(self, val):
self.group_id = val
@property
def mode(self):
return self.cmode
@mode.setter
def mode(self, val):
self.cmode = val
@property
def base_path(self):
return os.path.join(self.group.path, self.subvolname.encode('utf-8'))
@property
def config_path(self):
return os.path.join(self.base_path, b".meta")
@property
def legacy_dir(self):
return (os.path.join(self.vol_spec.base_dir.encode('utf-8'),
SubvolumeBase.LEGACY_CONF_DIR.encode('utf-8')))
@property
def legacy_config_path(self):
try:
m = hashlib.md5(self.base_path)
except ValueError:
try:
m = hashlib.md5(self.base_path, usedforsecurity=False) # type: ignore
except TypeError:
raise VolumeException(-errno.EINVAL,
"require python's hashlib library to support usedforsecurity flag in FIPS enabled systems")
meta_config = "{0}.meta".format(m.hexdigest())
return os.path.join(self.legacy_dir, meta_config.encode('utf-8'))
@property
def namespace(self):
return "{0}{1}".format(self.vol_spec.fs_namespace, self.subvolname)
@property
def group_name(self):
return self.group.group_name
@property
def subvol_name(self):
return self.subvolname
@property
def legacy_mode(self):
return self.legacy
@legacy_mode.setter
def legacy_mode(self, mode):
self.legacy = mode
@property
def path(self):
""" Path to subvolume data directory """
raise NotImplementedError
@property
def features(self):
"""
List of features supported by the subvolume,
containing items from SubvolumeFeatures
"""
raise NotImplementedError
@property
def state(self):
""" Subvolume state, one of SubvolumeStates """
return SubvolumeStates.from_value(self.metadata_mgr.get_global_option(MetadataManager.GLOBAL_META_KEY_STATE))
@property
def subvol_type(self):
return (SubvolumeTypes.from_value(self.metadata_mgr.get_global_option
(MetadataManager.GLOBAL_META_KEY_TYPE)))
@property
def purgeable(self):
""" Boolean declaring if subvolume can be purged """
raise NotImplementedError
def clean_stale_snapshot_metadata(self):
""" Clean up stale snapshot metadata """
raise NotImplementedError
def load_config(self):
try:
self.fs.stat(self.legacy_config_path)
self.legacy_mode = True
except cephfs.Error as e:
pass
log.debug("loading config "
"'{0}' [mode: {1}]".format(self.subvolname, "legacy"
if self.legacy_mode else "new"))
if self.legacy_mode:
self.metadata_mgr = MetadataManager(self.fs,
self.legacy_config_path,
0o640)
else:
self.metadata_mgr = MetadataManager(self.fs,
self.config_path, 0o640)
def get_attrs(self, pathname):
# get subvolume attributes
attrs = {} # type: Dict[str, Union[int, str, None]]
stx = self.fs.statx(pathname,
cephfs.CEPH_STATX_UID | cephfs.CEPH_STATX_GID
| cephfs.CEPH_STATX_MODE,
cephfs.AT_SYMLINK_NOFOLLOW)
attrs["uid"] = int(stx["uid"])
attrs["gid"] = int(stx["gid"])
attrs["mode"] = int(int(stx["mode"]) & ~stat.S_IFMT(stx["mode"]))
try:
attrs["data_pool"] = self.fs.getxattr(pathname,
'ceph.dir.layout.pool'
).decode('utf-8')
except cephfs.NoData:
attrs["data_pool"] = None
try:
attrs["pool_namespace"] = self.fs.getxattr(pathname,
'ceph.dir.layout'
'.pool_namespace'
).decode('utf-8')
except cephfs.NoData:
attrs["pool_namespace"] = None
try:
attrs["quota"] = int(self.fs.getxattr(pathname,
'ceph.quota.max_bytes'
).decode('utf-8'))
except cephfs.NoData:
attrs["quota"] = None
return attrs
def set_attrs(self, path, attrs):
# set subvolume attributes
# set size
quota = attrs.get("quota")
if quota is not None:
try:
self.fs.setxattr(path, 'ceph.quota.max_bytes',
str(quota).encode('utf-8'), 0)
except cephfs.InvalidValue:
raise VolumeException(-errno.EINVAL,
"invalid size specified: '{0}'".format(quota))
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
# set pool layout
data_pool = attrs.get("data_pool")
if data_pool is not None:
try:
self.fs.setxattr(path, 'ceph.dir.layout.pool',
data_pool.encode('utf-8'), 0)
except cephfs.InvalidValue:
raise VolumeException(-errno.EINVAL,
"invalid pool layout '{0}'"
"--need a valid data pool"
.format(data_pool))
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
# isolate namespace
xattr_key = xattr_val = None
pool_namespace = attrs.get("pool_namespace")
if pool_namespace is not None:
# enforce security isolation, use separate namespace
# for this subvolume
xattr_key = 'ceph.dir.layout.pool_namespace'
xattr_val = pool_namespace
elif not data_pool:
# If subvolume's namespace layout is not set,
# then the subvolume's pool
# layout remains unset and will undesirably change with ancestor's
# pool layout changes.
xattr_key = 'ceph.dir.layout.pool'
xattr_val = None
try:
self.fs.getxattr(path, 'ceph.dir.layout.pool').decode('utf-8')
except cephfs.NoData:
xattr_val = get_ancestor_xattr(self.fs, os.path.split(path)[0],
"ceph.dir.layout.pool")
if xattr_key and xattr_val:
try:
self.fs.setxattr(path, xattr_key, xattr_val.encode('utf-8'), 0)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
# set uid/gid
uid = attrs.get("uid")
if uid is None:
uid = self.group.uid
else:
try:
if uid < 0:
raise ValueError
except ValueError:
raise VolumeException(-errno.EINVAL, "invalid UID")
gid = attrs.get("gid")
if gid is None:
gid = self.group.gid
else:
try:
if gid < 0:
raise ValueError
except ValueError:
raise VolumeException(-errno.EINVAL, "invalid GID")
if uid is not None and gid is not None:
self.fs.chown(path, uid, gid)
# set mode
mode = attrs.get("mode", None)
if mode is not None:
self.fs.lchmod(path, mode)
def _resize(self, path, newsize, noshrink):
try:
newsize = int(newsize)
if newsize <= 0:
raise VolumeException(-errno.EINVAL,
"Invalid subvolume size")
except ValueError:
newsize = newsize.lower()
if not (newsize == "inf" or newsize == "infinite"):
raise (VolumeException(-errno.EINVAL,
"invalid size option '{0}'"
.format(newsize)))
newsize = 0
noshrink = False
try:
maxbytes = int(self.fs.getxattr(path,
'ceph.quota.max_bytes'
).decode('utf-8'))
except cephfs.NoData:
maxbytes = 0
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
subvolstat = self.fs.stat(path)
if newsize > 0 and newsize < subvolstat.st_size:
if noshrink:
raise VolumeException(-errno.EINVAL,
"Can't resize the subvolume. "
"The new size '{0}' would be "
"lesser than the current "
"used size '{1}'"
.format(newsize,
subvolstat.st_size))
if not newsize == maxbytes:
try:
self.fs.setxattr(path, 'ceph.quota.max_bytes',
str(newsize).encode('utf-8'), 0)
except cephfs.Error as e:
raise (VolumeException(-e.args[0],
"Cannot set new size"
"for the subvolume. '{0}'"
.format(e.args[1])))
return newsize, subvolstat.st_size
def pin(self, pin_type, pin_setting):
return pin(self.fs, self.base_path, pin_type, pin_setting)
def init_config(self, version, subvolume_type,
subvolume_path, subvolume_state):
self.metadata_mgr.init(version, subvolume_type.value,
subvolume_path, subvolume_state.value)
self.metadata_mgr.flush()
def discover(self):
log.debug("discovering subvolume "
"'{0}' [mode: {1}]".format(self.subvolname, "legacy"
if self.legacy_mode else "new"))
try:
self.fs.stat(self.base_path)
self.metadata_mgr.refresh()
log.debug("loaded subvolume '{0}'".format(self.subvolname))
subvolpath = self.metadata_mgr.get_global_option(MetadataManager.GLOBAL_META_KEY_PATH)
# subvolume with retained snapshots has empty path, don't mistake it for
# fabricated metadata.
if (not self.legacy_mode and self.state != SubvolumeStates.STATE_RETAINED and
self.base_path.decode('utf-8') != str(Path(subvolpath).parent)):
raise MetadataMgrException(-errno.ENOENT, 'fabricated .meta')
except MetadataMgrException as me:
if me.errno in (-errno.ENOENT, -errno.EINVAL) and not self.legacy_mode:
log.warn("subvolume '{0}', {1}, "
"assuming legacy_mode".format(self.subvolname, me.error_str))
self.legacy_mode = True
self.load_config()
self.discover()
else:
raise
except cephfs.Error as e:
if e.args[0] == errno.ENOENT:
raise (VolumeException(-errno.ENOENT,
"subvolume '{0}' "
"does not exist"
.format(self.subvolname)))
raise VolumeException(-e.args[0],
"error accessing subvolume '{0}'"
.format(self.subvolname))
def _trash_dir(self, path):
create_trashcan(self.fs, self.vol_spec)
with open_trashcan(self.fs, self.vol_spec) as trashcan:
trashcan.dump(path)
log.info("subvolume path '{0}' moved to trashcan".format(path))
def _link_dir(self, path, bname):
create_trashcan(self.fs, self.vol_spec)
with open_trashcan(self.fs, self.vol_spec) as trashcan:
trashcan.link(path, bname)
log.info("subvolume path '{0}' "
"linked in trashcan bname {1}".format(path, bname))
def trash_base_dir(self):
if self.legacy_mode:
self.fs.unlink(self.legacy_config_path)
self._trash_dir(self.base_path)
def create_base_dir(self, mode):
try:
self.fs.mkdirs(self.base_path, mode)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
def info(self):
subvolpath = (self.metadata_mgr.get_global_option(
MetadataManager.GLOBAL_META_KEY_PATH))
etype = self.subvol_type
st = self.fs.statx(subvolpath, cephfs.CEPH_STATX_BTIME
| cephfs.CEPH_STATX_SIZE
| cephfs.CEPH_STATX_UID | cephfs.CEPH_STATX_GID
| cephfs.CEPH_STATX_MODE | cephfs.CEPH_STATX_ATIME
| cephfs.CEPH_STATX_MTIME
| cephfs.CEPH_STATX_CTIME,
cephfs.AT_SYMLINK_NOFOLLOW)
usedbytes = st["size"]
try:
nsize = int(self.fs.getxattr(subvolpath,
'ceph.quota.max_bytes'
).decode('utf-8'))
except cephfs.NoData:
nsize = 0
try:
data_pool = self.fs.getxattr(subvolpath,
'ceph.dir.layout.pool'
).decode('utf-8')
pool_namespace = self.fs.getxattr(subvolpath,
'ceph.dir.layout.pool_namespace'
).decode('utf-8')
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
return {'path': subvolpath,
'type': etype.value,
'uid': int(st["uid"]),
'gid': int(st["gid"]),
'atime': str(st["atime"]),
'mtime': str(st["mtime"]),
'ctime': str(st["ctime"]),
'mode': int(st["mode"]),
'data_pool': data_pool,
'created_at': str(st["btime"]),
'bytes_quota': "infinite" if nsize == 0 else nsize,
'bytes_used': int(usedbytes),
'bytes_pcent': "undefined"
if nsize == 0
else '{0:.2f}'.format((float(usedbytes) / nsize) * 100.0),
'pool_namespace': pool_namespace,
'features': self.features, 'state': self.state.value}
def set_user_metadata(self, keyname, value):
try:
self.metadata_mgr.add_section(MetadataManager.USER_METADATA_SECTION)
self.metadata_mgr.update_section(MetadataManager.USER_METADATA_SECTION, keyname, str(value))
self.metadata_mgr.flush()
except MetadataMgrException as me:
log.error(f"Failed to set user metadata key={keyname} value={value} on subvolume={self.subvol_name} "
f"group={self.group_name} reason={me.args[1]}, errno:{-me.args[0]}, {os.strerror(-me.args[0])}")
raise VolumeException(-me.args[0], me.args[1])
def get_user_metadata(self, keyname):
try:
value = self.metadata_mgr.get_option(MetadataManager.USER_METADATA_SECTION, keyname)
except MetadataMgrException as me:
if me.errno == -errno.ENOENT:
raise VolumeException(-errno.ENOENT, "key '{0}' does not exist.".format(keyname))
raise VolumeException(-me.args[0], me.args[1])
return value
def list_user_metadata(self):
return self.metadata_mgr.list_all_options_from_section(MetadataManager.USER_METADATA_SECTION)
def remove_user_metadata(self, keyname):
try:
ret = self.metadata_mgr.remove_option(MetadataManager.USER_METADATA_SECTION, keyname)
if not ret:
raise VolumeException(-errno.ENOENT, "key '{0}' does not exist.".format(keyname))
self.metadata_mgr.flush()
except MetadataMgrException as me:
if me.errno == -errno.ENOENT:
raise VolumeException(-errno.ENOENT, "subvolume metadata does not exist")
log.error(f"Failed to remove user metadata key={keyname} on subvolume={self.subvol_name} "
f"group={self.group_name} reason={me.args[1]}, errno:{-me.args[0]}, {os.strerror(-me.args[0])}")
raise VolumeException(-me.args[0], me.args[1])
def get_snap_section_name(self, snapname):
section = "SNAP_METADATA" + "_" + snapname;
return section;
def set_snapshot_metadata(self, snapname, keyname, value):
try:
section = self.get_snap_section_name(snapname)
self.metadata_mgr.add_section(section)
self.metadata_mgr.update_section(section, keyname, str(value))
self.metadata_mgr.flush()
except MetadataMgrException as me:
log.error(f"Failed to set snapshot metadata key={keyname} value={value} on snap={snapname} "
f"subvolume={self.subvol_name} group={self.group_name} "
f"reason={me.args[1]}, errno:{-me.args[0]}, {os.strerror(-me.args[0])}")
raise VolumeException(-me.args[0], me.args[1])
def get_snapshot_metadata(self, snapname, keyname):
try:
value = self.metadata_mgr.get_option(self.get_snap_section_name(snapname), keyname)
except MetadataMgrException as me:
if me.errno == -errno.ENOENT:
raise VolumeException(-errno.ENOENT, "key '{0}' does not exist.".format(keyname))
log.error(f"Failed to get snapshot metadata key={keyname} on snap={snapname} "
f"subvolume={self.subvol_name} group={self.group_name} "
f"reason={me.args[1]}, errno:{-me.args[0]}, {os.strerror(-me.args[0])}")
raise VolumeException(-me.args[0], me.args[1])
return value
def list_snapshot_metadata(self, snapname):
return self.metadata_mgr.list_all_options_from_section(self.get_snap_section_name(snapname))
def remove_snapshot_metadata(self, snapname, keyname):
try:
ret = self.metadata_mgr.remove_option(self.get_snap_section_name(snapname), keyname)
if not ret:
raise VolumeException(-errno.ENOENT, "key '{0}' does not exist.".format(keyname))
self.metadata_mgr.flush()
except MetadataMgrException as me:
if me.errno == -errno.ENOENT:
raise VolumeException(-errno.ENOENT, "snapshot metadata not does not exist")
log.error(f"Failed to remove snapshot metadata key={keyname} on snap={snapname} "
f"subvolume={self.subvol_name} group={self.group_name} "
f"reason={me.args[1]}, errno:{-me.args[0]}, {os.strerror(-me.args[0])}")
raise VolumeException(-me.args[0], me.args[1])
| 20,763 | 39.084942 | 129 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/versions/subvolume_v1.py
|
import os
import sys
import stat
import uuid
import errno
import logging
import json
from datetime import datetime
from typing import Any, List, Dict
from pathlib import Path
import cephfs
from .metadata_manager import MetadataManager
from .subvolume_attrs import SubvolumeTypes, SubvolumeStates, SubvolumeFeatures
from .op_sm import SubvolumeOpSm
from .subvolume_base import SubvolumeBase
from ..template import SubvolumeTemplate
from ..snapshot_util import mksnap, rmsnap
from ..access import allow_access, deny_access
from ...exception import IndexException, OpSmException, VolumeException, MetadataMgrException, EvictionError
from ...fs_util import listsnaps, is_inherited_snap, create_base_dir
from ..template import SubvolumeOpType
from ..group import Group
from ..rankevicter import RankEvicter
from ..volume import get_mds_map
from ..clone_index import open_clone_index, create_clone_index
log = logging.getLogger(__name__)
class SubvolumeV1(SubvolumeBase, SubvolumeTemplate):
"""
Version 1 subvolumes creates a subvolume with path as follows,
volumes/<group-name>/<subvolume-name>/<uuid>/
- The directory under which user data resides is <uuid>
- Snapshots of the subvolume are taken within the <uuid> directory
- A meta file is maintained under the <subvolume-name> directory as a metadata store, typically storing,
- global information about the subvolume (version, path, type, state)
- snapshots attached to an ongoing clone operation
- clone snapshot source if subvolume is a clone of a snapshot
- It retains backward compatability with legacy subvolumes by creating the meta file for legacy subvolumes under
/volumes/_legacy/ (see legacy_config_path), thus allowing cloning of older legacy volumes that lack the <uuid>
component in the path.
"""
VERSION = 1
@staticmethod
def version():
return SubvolumeV1.VERSION
@property
def path(self):
try:
# no need to stat the path -- open() does that
return self.metadata_mgr.get_global_option(MetadataManager.GLOBAL_META_KEY_PATH).encode('utf-8')
except MetadataMgrException as me:
raise VolumeException(-errno.EINVAL, "error fetching subvolume metadata")
@property
def features(self):
return [SubvolumeFeatures.FEATURE_SNAPSHOT_CLONE.value, SubvolumeFeatures.FEATURE_SNAPSHOT_AUTOPROTECT.value]
def mark_subvolume(self):
# set subvolume attr, on subvolume root, marking it as a CephFS subvolume
# subvolume root is where snapshots would be taken, and hence is the <uuid> dir for v1 subvolumes
try:
# MDS treats this as a noop for already marked subvolume
self.fs.setxattr(self.path, 'ceph.dir.subvolume', b'1', 0)
except cephfs.InvalidValue as e:
raise VolumeException(-errno.EINVAL, "invalid value specified for ceph.dir.subvolume")
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
def snapshot_base_path(self):
""" Base path for all snapshots """
return os.path.join(self.path, self.vol_spec.snapshot_dir_prefix.encode('utf-8'))
def snapshot_path(self, snapname):
""" Path to a specific snapshot named 'snapname' """
return os.path.join(self.snapshot_base_path(), snapname.encode('utf-8'))
def snapshot_data_path(self, snapname):
""" Path to user data directory within a subvolume snapshot named 'snapname' """
return self.snapshot_path(snapname)
def create(self, size, isolate_nspace, pool, mode, uid, gid):
subvolume_type = SubvolumeTypes.TYPE_NORMAL
try:
initial_state = SubvolumeOpSm.get_init_state(subvolume_type)
except OpSmException as oe:
raise VolumeException(-errno.EINVAL, "subvolume creation failed: internal error")
subvol_path = os.path.join(self.base_path, str(uuid.uuid4()).encode('utf-8'))
try:
# create group directory with default mode(0o755) if it doesn't exist.
create_base_dir(self.fs, self.group.path, self.vol_spec.DEFAULT_MODE)
# create directory and set attributes
self.fs.mkdirs(subvol_path, mode)
self.mark_subvolume()
attrs = {
'uid': uid,
'gid': gid,
'data_pool': pool,
'pool_namespace': self.namespace if isolate_nspace else None,
'quota': size
}
self.set_attrs(subvol_path, attrs)
# persist subvolume metadata
qpath = subvol_path.decode('utf-8')
self.init_config(SubvolumeV1.VERSION, subvolume_type, qpath, initial_state)
except (VolumeException, MetadataMgrException, cephfs.Error) as e:
try:
log.info("cleaning up subvolume with path: {0}".format(self.subvolname))
self.remove()
except VolumeException as ve:
log.info("failed to cleanup subvolume '{0}' ({1})".format(self.subvolname, ve))
if isinstance(e, MetadataMgrException):
log.error("metadata manager exception: {0}".format(e))
e = VolumeException(-errno.EINVAL, f"exception in subvolume metadata: {os.strerror(-e.args[0])}")
elif isinstance(e, cephfs.Error):
e = VolumeException(-e.args[0], e.args[1])
raise e
def add_clone_source(self, volname, subvolume, snapname, flush=False):
self.metadata_mgr.add_section("source")
self.metadata_mgr.update_section("source", "volume", volname)
if not subvolume.group.is_default_group():
self.metadata_mgr.update_section("source", "group", subvolume.group_name)
self.metadata_mgr.update_section("source", "subvolume", subvolume.subvol_name)
self.metadata_mgr.update_section("source", "snapshot", snapname)
if flush:
self.metadata_mgr.flush()
def remove_clone_source(self, flush=False):
self.metadata_mgr.remove_section("source")
if flush:
self.metadata_mgr.flush()
def add_clone_failure(self, errno, error_msg):
try:
self.metadata_mgr.add_section(MetadataManager.CLONE_FAILURE_SECTION)
self.metadata_mgr.update_section(MetadataManager.CLONE_FAILURE_SECTION,
MetadataManager.CLONE_FAILURE_META_KEY_ERRNO, errno)
self.metadata_mgr.update_section(MetadataManager.CLONE_FAILURE_SECTION,
MetadataManager.CLONE_FAILURE_META_KEY_ERROR_MSG, error_msg)
self.metadata_mgr.flush()
except MetadataMgrException as me:
log.error(f"Failed to add clone failure status clone={self.subvol_name} group={self.group_name} "
f"reason={me.args[1]}, errno:{-me.args[0]}, {os.strerror(-me.args[0])}")
def create_clone(self, pool, source_volname, source_subvolume, snapname):
subvolume_type = SubvolumeTypes.TYPE_CLONE
try:
initial_state = SubvolumeOpSm.get_init_state(subvolume_type)
except OpSmException as oe:
raise VolumeException(-errno.EINVAL, "clone failed: internal error")
subvol_path = os.path.join(self.base_path, str(uuid.uuid4()).encode('utf-8'))
try:
# source snapshot attrs are used to create clone subvolume.
# attributes of subvolume's content though, are synced during the cloning process.
attrs = source_subvolume.get_attrs(source_subvolume.snapshot_data_path(snapname))
# The source of the clone may have exceeded its quota limit as
# CephFS quotas are imprecise. Cloning such a source may fail if
# the quota on the destination is set before starting the clone
# copy. So always set the quota on destination after cloning is
# successful.
attrs["quota"] = None
# override snapshot pool setting, if one is provided for the clone
if pool is not None:
attrs["data_pool"] = pool
attrs["pool_namespace"] = None
# create directory and set attributes
self.fs.mkdirs(subvol_path, attrs.get("mode"))
self.mark_subvolume()
self.set_attrs(subvol_path, attrs)
# persist subvolume metadata and clone source
qpath = subvol_path.decode('utf-8')
self.metadata_mgr.init(SubvolumeV1.VERSION, subvolume_type.value, qpath, initial_state.value)
self.add_clone_source(source_volname, source_subvolume, snapname)
self.metadata_mgr.flush()
except (VolumeException, MetadataMgrException, cephfs.Error) as e:
try:
log.info("cleaning up subvolume with path: {0}".format(self.subvolname))
self.remove()
except VolumeException as ve:
log.info("failed to cleanup subvolume '{0}' ({1})".format(self.subvolname, ve))
if isinstance(e, MetadataMgrException):
log.error("metadata manager exception: {0}".format(e))
e = VolumeException(-errno.EINVAL, f"exception in subvolume metadata: {os.strerror(-e.args[0])}")
elif isinstance(e, cephfs.Error):
e = VolumeException(-e.args[0], e.args[1])
raise e
def allowed_ops_by_type(self, vol_type):
if vol_type == SubvolumeTypes.TYPE_CLONE:
return {op_type for op_type in SubvolumeOpType}
if vol_type == SubvolumeTypes.TYPE_NORMAL:
return {op_type for op_type in SubvolumeOpType} - {SubvolumeOpType.CLONE_STATUS,
SubvolumeOpType.CLONE_CANCEL,
SubvolumeOpType.CLONE_INTERNAL}
return {}
def allowed_ops_by_state(self, vol_state):
if vol_state == SubvolumeStates.STATE_COMPLETE:
return {op_type for op_type in SubvolumeOpType}
return {SubvolumeOpType.REMOVE_FORCE,
SubvolumeOpType.CLONE_CREATE,
SubvolumeOpType.CLONE_STATUS,
SubvolumeOpType.CLONE_CANCEL,
SubvolumeOpType.CLONE_INTERNAL}
def open(self, op_type):
if not isinstance(op_type, SubvolumeOpType):
raise VolumeException(-errno.ENOTSUP, "operation {0} not supported on subvolume '{1}'".format(
op_type.value, self.subvolname))
try:
self.metadata_mgr.refresh()
etype = self.subvol_type
if op_type not in self.allowed_ops_by_type(etype):
raise VolumeException(-errno.ENOTSUP, "operation '{0}' is not allowed on subvolume '{1}' of type {2}".format(
op_type.value, self.subvolname, etype.value))
estate = self.state
if op_type not in self.allowed_ops_by_state(estate):
raise VolumeException(-errno.EAGAIN, "subvolume '{0}' is not ready for operation {1}".format(
self.subvolname, op_type.value))
subvol_path = self.path
log.debug("refreshed metadata, checking subvolume path '{0}'".format(subvol_path))
st = self.fs.stat(subvol_path)
# unconditionally mark as subvolume, to handle pre-existing subvolumes without the mark
self.mark_subvolume()
self.uid = int(st.st_uid)
self.gid = int(st.st_gid)
self.mode = int(st.st_mode & ~stat.S_IFMT(st.st_mode))
except MetadataMgrException as me:
if me.errno == -errno.ENOENT:
raise VolumeException(-errno.ENOENT, "subvolume '{0}' does not exist".format(self.subvolname))
raise VolumeException(me.args[0], me.args[1])
except cephfs.ObjectNotFound:
log.debug("missing subvolume path '{0}' for subvolume '{1}'".format(subvol_path, self.subvolname))
raise VolumeException(-errno.ENOENT, "mount path missing for subvolume '{0}'".format(self.subvolname))
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
def _recover_auth_meta(self, auth_id, auth_meta):
"""
Call me after locking the auth meta file.
"""
remove_subvolumes = []
for subvol, subvol_data in auth_meta['subvolumes'].items():
if not subvol_data['dirty']:
continue
(group_name, subvol_name) = subvol.split('/')
group_name = group_name if group_name != 'None' else Group.NO_GROUP_NAME
access_level = subvol_data['access_level']
with self.auth_mdata_mgr.subvol_metadata_lock(group_name, subvol_name):
subvol_meta = self.auth_mdata_mgr.subvol_metadata_get(group_name, subvol_name)
# No SVMeta update indicates that there was no auth update
# in Ceph either. So it's safe to remove corresponding
# partial update in AMeta.
if not subvol_meta or auth_id not in subvol_meta['auths']:
remove_subvolumes.append(subvol)
continue
want_auth = {
'access_level': access_level,
'dirty': False,
}
# SVMeta update looks clean. Ceph auth update must have been
# clean. Update the dirty flag and continue
if subvol_meta['auths'][auth_id] == want_auth:
auth_meta['subvolumes'][subvol]['dirty'] = False
self.auth_mdata_mgr.auth_metadata_set(auth_id, auth_meta)
continue
client_entity = "client.{0}".format(auth_id)
ret, out, err = self.mgr.mon_command(
{
'prefix': 'auth get',
'entity': client_entity,
'format': 'json'
})
if ret == 0:
existing_caps = json.loads(out)
elif ret == -errno.ENOENT:
existing_caps = None
else:
log.error(err)
raise VolumeException(ret, err)
self._authorize_subvolume(auth_id, access_level, existing_caps)
# Recovered from partial auth updates for the auth ID's access
# to a subvolume.
auth_meta['subvolumes'][subvol]['dirty'] = False
self.auth_mdata_mgr.auth_metadata_set(auth_id, auth_meta)
for subvol in remove_subvolumes:
del auth_meta['subvolumes'][subvol]
if not auth_meta['subvolumes']:
# Clean up auth meta file
self.fs.unlink(self.auth_mdata_mgr._auth_metadata_path(auth_id))
return
# Recovered from all partial auth updates for the auth ID.
auth_meta['dirty'] = False
self.auth_mdata_mgr.auth_metadata_set(auth_id, auth_meta)
def authorize(self, auth_id, access_level, tenant_id=None, allow_existing_id=False):
"""
Get-or-create a Ceph auth identity for `auth_id` and grant them access
to
:param auth_id:
:param access_level:
:param tenant_id: Optionally provide a stringizable object to
restrict any created cephx IDs to other callers
passing the same tenant ID.
:allow_existing_id: Optionally authorize existing auth-ids not
created by ceph_volume_client.
:return:
"""
with self.auth_mdata_mgr.auth_lock(auth_id):
client_entity = "client.{0}".format(auth_id)
ret, out, err = self.mgr.mon_command(
{
'prefix': 'auth get',
'entity': client_entity,
'format': 'json'
})
if ret == 0:
existing_caps = json.loads(out)
elif ret == -errno.ENOENT:
existing_caps = None
else:
log.error(err)
raise VolumeException(ret, err)
# Existing meta, or None, to be updated
auth_meta = self.auth_mdata_mgr.auth_metadata_get(auth_id)
# subvolume data to be inserted
group_name = self.group.groupname if self.group.groupname != Group.NO_GROUP_NAME else None
group_subvol_id = "{0}/{1}".format(group_name, self.subvolname)
subvolume = {
group_subvol_id : {
# The access level at which the auth_id is authorized to
# access the volume.
'access_level': access_level,
'dirty': True,
}
}
if auth_meta is None:
if not allow_existing_id and existing_caps is not None:
msg = "auth ID: {0} exists and not created by mgr plugin. Not allowed to modify".format(auth_id)
log.error(msg)
raise VolumeException(-errno.EPERM, msg)
# non-existent auth IDs
sys.stderr.write("Creating meta for ID {0} with tenant {1}\n".format(
auth_id, tenant_id
))
log.debug("Authorize: no existing meta")
auth_meta = {
'dirty': True,
'tenant_id': str(tenant_id) if tenant_id else None,
'subvolumes': subvolume
}
else:
# Update 'volumes' key (old style auth metadata file) to 'subvolumes' key
if 'volumes' in auth_meta:
auth_meta['subvolumes'] = auth_meta.pop('volumes')
# Disallow tenants to share auth IDs
if str(auth_meta['tenant_id']) != str(tenant_id):
msg = "auth ID: {0} is already in use".format(auth_id)
log.error(msg)
raise VolumeException(-errno.EPERM, msg)
if auth_meta['dirty']:
self._recover_auth_meta(auth_id, auth_meta)
log.debug("Authorize: existing tenant {tenant}".format(
tenant=auth_meta['tenant_id']
))
auth_meta['dirty'] = True
auth_meta['subvolumes'].update(subvolume)
self.auth_mdata_mgr.auth_metadata_set(auth_id, auth_meta)
with self.auth_mdata_mgr.subvol_metadata_lock(self.group.groupname, self.subvolname):
key = self._authorize_subvolume(auth_id, access_level, existing_caps)
auth_meta['dirty'] = False
auth_meta['subvolumes'][group_subvol_id]['dirty'] = False
self.auth_mdata_mgr.auth_metadata_set(auth_id, auth_meta)
if tenant_id:
return key
else:
# Caller wasn't multi-tenant aware: be safe and don't give
# them a key
return ""
def _authorize_subvolume(self, auth_id, access_level, existing_caps):
subvol_meta = self.auth_mdata_mgr.subvol_metadata_get(self.group.groupname, self.subvolname)
auth = {
auth_id: {
'access_level': access_level,
'dirty': True,
}
}
if subvol_meta is None:
subvol_meta = {
'auths': auth
}
else:
subvol_meta['auths'].update(auth)
self.auth_mdata_mgr.subvol_metadata_set(self.group.groupname, self.subvolname, subvol_meta)
key = self._authorize(auth_id, access_level, existing_caps)
subvol_meta['auths'][auth_id]['dirty'] = False
self.auth_mdata_mgr.subvol_metadata_set(self.group.groupname, self.subvolname, subvol_meta)
return key
def _authorize(self, auth_id, access_level, existing_caps):
subvol_path = self.path
log.debug("Authorizing Ceph id '{0}' for path '{1}'".format(auth_id, subvol_path))
# First I need to work out what the data pool is for this share:
# read the layout
try:
pool = self.fs.getxattr(subvol_path, 'ceph.dir.layout.pool').decode('utf-8')
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
try:
namespace = self.fs.getxattr(subvol_path, 'ceph.dir.layout.pool_namespace').decode('utf-8')
except cephfs.NoData:
namespace = None
# Now construct auth capabilities that give the guest just enough
# permissions to access the share
client_entity = "client.{0}".format(auth_id)
want_mds_cap = "allow {0} path={1}".format(access_level, subvol_path.decode('utf-8'))
want_osd_cap = "allow {0} pool={1}{2}".format(
access_level, pool, " namespace={0}".format(namespace) if namespace else "")
# Construct auth caps that if present might conflict with the desired
# auth caps.
unwanted_access_level = 'r' if access_level == 'rw' else 'rw'
unwanted_mds_cap = 'allow {0} path={1}'.format(unwanted_access_level, subvol_path.decode('utf-8'))
unwanted_osd_cap = "allow {0} pool={1}{2}".format(
unwanted_access_level, pool, " namespace={0}".format(namespace) if namespace else "")
return allow_access(self.mgr, client_entity, want_mds_cap, want_osd_cap,
unwanted_mds_cap, unwanted_osd_cap, existing_caps)
def deauthorize(self, auth_id):
with self.auth_mdata_mgr.auth_lock(auth_id):
# Existing meta, or None, to be updated
auth_meta = self.auth_mdata_mgr.auth_metadata_get(auth_id)
if auth_meta is None:
msg = "auth ID: {0} doesn't exist".format(auth_id)
log.error(msg)
raise VolumeException(-errno.ENOENT, msg)
# Update 'volumes' key (old style auth metadata file) to 'subvolumes' key
if 'volumes' in auth_meta:
auth_meta['subvolumes'] = auth_meta.pop('volumes')
group_name = self.group.groupname if self.group.groupname != Group.NO_GROUP_NAME else None
group_subvol_id = "{0}/{1}".format(group_name, self.subvolname)
if (auth_meta is None) or (not auth_meta['subvolumes']):
log.warning("deauthorized called for already-removed auth"
"ID '{auth_id}' for subvolume '{subvolume}'".format(
auth_id=auth_id, subvolume=self.subvolname
))
# Clean up the auth meta file of an auth ID
self.fs.unlink(self.auth_mdata_mgr._auth_metadata_path(auth_id))
return
if group_subvol_id not in auth_meta['subvolumes']:
log.warning("deauthorized called for already-removed auth"
"ID '{auth_id}' for subvolume '{subvolume}'".format(
auth_id=auth_id, subvolume=self.subvolname
))
return
if auth_meta['dirty']:
self._recover_auth_meta(auth_id, auth_meta)
auth_meta['dirty'] = True
auth_meta['subvolumes'][group_subvol_id]['dirty'] = True
self.auth_mdata_mgr.auth_metadata_set(auth_id, auth_meta)
self._deauthorize_subvolume(auth_id)
# Filter out the volume we're deauthorizing
del auth_meta['subvolumes'][group_subvol_id]
# Clean up auth meta file
if not auth_meta['subvolumes']:
self.fs.unlink(self.auth_mdata_mgr._auth_metadata_path(auth_id))
return
auth_meta['dirty'] = False
self.auth_mdata_mgr.auth_metadata_set(auth_id, auth_meta)
def _deauthorize_subvolume(self, auth_id):
with self.auth_mdata_mgr.subvol_metadata_lock(self.group.groupname, self.subvolname):
subvol_meta = self.auth_mdata_mgr.subvol_metadata_get(self.group.groupname, self.subvolname)
if (subvol_meta is None) or (auth_id not in subvol_meta['auths']):
log.warning("deauthorized called for already-removed auth"
"ID '{auth_id}' for subvolume '{subvolume}'".format(
auth_id=auth_id, subvolume=self.subvolname
))
return
subvol_meta['auths'][auth_id]['dirty'] = True
self.auth_mdata_mgr.subvol_metadata_set(self.group.groupname, self.subvolname, subvol_meta)
self._deauthorize(auth_id)
# Remove the auth_id from the metadata *after* removing it
# from ceph, so that if we crashed here, we would actually
# recreate the auth ID during recovery (i.e. end up with
# a consistent state).
# Filter out the auth we're removing
del subvol_meta['auths'][auth_id]
self.auth_mdata_mgr.subvol_metadata_set(self.group.groupname, self.subvolname, subvol_meta)
def _deauthorize(self, auth_id):
"""
The volume must still exist.
"""
client_entity = "client.{0}".format(auth_id)
subvol_path = self.path
try:
pool_name = self.fs.getxattr(subvol_path, 'ceph.dir.layout.pool').decode('utf-8')
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
try:
namespace = self.fs.getxattr(subvol_path, 'ceph.dir.layout.pool_namespace').decode('utf-8')
except cephfs.NoData:
namespace = None
# The auth_id might have read-only or read-write mount access for the
# subvolume path.
access_levels = ('r', 'rw')
want_mds_caps = ['allow {0} path={1}'.format(access_level, subvol_path.decode('utf-8'))
for access_level in access_levels]
want_osd_caps = ['allow {0} pool={1}{2}'.format(
access_level, pool_name, " namespace={0}".format(namespace) if namespace else "")
for access_level in access_levels]
deny_access(self.mgr, client_entity, want_mds_caps, want_osd_caps)
def authorized_list(self):
"""
Expose a list of auth IDs that have access to a subvolume.
return: a list of (auth_id, access_level) tuples, where
the access_level can be 'r' , or 'rw'.
None if no auth ID is given access to the subvolume.
"""
with self.auth_mdata_mgr.subvol_metadata_lock(self.group.groupname, self.subvolname):
meta = self.auth_mdata_mgr.subvol_metadata_get(self.group.groupname, self.subvolname)
auths = [] # type: List[Dict[str,str]]
if not meta or not meta['auths']:
return auths
for auth, auth_data in meta['auths'].items():
# Skip partial auth updates.
if not auth_data['dirty']:
auths.append({auth: auth_data['access_level']})
return auths
def evict(self, volname, auth_id, timeout=30):
"""
Evict all clients based on the authorization ID and the subvolume path mounted.
Assumes that the authorization key has been revoked prior to calling this function.
This operation can throw an exception if the mon cluster is unresponsive, or
any individual MDS daemon is unresponsive for longer than the timeout passed in.
"""
client_spec = ["auth_name={0}".format(auth_id), ]
client_spec.append("client_metadata.root={0}".
format(self.path.decode('utf-8')))
log.info("evict clients with {0}".format(', '.join(client_spec)))
mds_map = get_mds_map(self.mgr, volname)
if not mds_map:
raise VolumeException(-errno.ENOENT, "mdsmap for volume {0} not found".format(volname))
up = {}
for name, gid in mds_map['up'].items():
# Quirk of the MDSMap JSON dump: keys in the up dict are like "mds_0"
assert name.startswith("mds_")
up[int(name[4:])] = gid
# For all MDS ranks held by a daemon
# Do the parallelism in python instead of using "tell mds.*", because
# the latter doesn't give us per-mds output
threads = []
for rank, gid in up.items():
thread = RankEvicter(self.mgr, self.fs, client_spec, volname, rank, gid, mds_map, timeout)
thread.start()
threads.append(thread)
for t in threads:
t.join()
log.info("evict: joined all")
for t in threads:
if not t.success:
msg = ("Failed to evict client with {0} from mds {1}/{2}: {3}".
format(', '.join(client_spec), t.rank, t.gid, t.exception)
)
log.error(msg)
raise EvictionError(msg)
def _get_clone_source(self):
try:
clone_source = {
'volume' : self.metadata_mgr.get_option("source", "volume"),
'subvolume': self.metadata_mgr.get_option("source", "subvolume"),
'snapshot' : self.metadata_mgr.get_option("source", "snapshot"),
}
try:
clone_source["group"] = self.metadata_mgr.get_option("source", "group")
except MetadataMgrException as me:
if me.errno == -errno.ENOENT:
pass
else:
raise
except MetadataMgrException as me:
raise VolumeException(-errno.EINVAL, "error fetching subvolume metadata")
return clone_source
def _get_clone_failure(self):
clone_failure = {
'errno' : self.metadata_mgr.get_option(MetadataManager.CLONE_FAILURE_SECTION, MetadataManager.CLONE_FAILURE_META_KEY_ERRNO),
'error_msg' : self.metadata_mgr.get_option(MetadataManager.CLONE_FAILURE_SECTION, MetadataManager.CLONE_FAILURE_META_KEY_ERROR_MSG),
}
return clone_failure
@property
def status(self):
state = SubvolumeStates.from_value(self.metadata_mgr.get_global_option(MetadataManager.GLOBAL_META_KEY_STATE))
subvolume_type = self.subvol_type
subvolume_status = {
'state' : state.value
}
if not SubvolumeOpSm.is_complete_state(state) and subvolume_type == SubvolumeTypes.TYPE_CLONE:
subvolume_status["source"] = self._get_clone_source()
if SubvolumeOpSm.is_failed_state(state) and subvolume_type == SubvolumeTypes.TYPE_CLONE:
try:
subvolume_status["failure"] = self._get_clone_failure()
except MetadataMgrException:
pass
return subvolume_status
@property
def state(self):
return super(SubvolumeV1, self).state
@state.setter
def state(self, val):
state = val[0].value
flush = val[1]
self.metadata_mgr.update_global_section(MetadataManager.GLOBAL_META_KEY_STATE, state)
if flush:
self.metadata_mgr.flush()
def remove(self, retainsnaps=False):
if retainsnaps:
raise VolumeException(-errno.EINVAL, "subvolume '{0}' does not support snapshot retention on delete".format(self.subvolname))
if self.list_snapshots():
raise VolumeException(-errno.ENOTEMPTY, "subvolume '{0}' has snapshots".format(self.subvolname))
self.trash_base_dir()
def resize(self, newsize, noshrink):
subvol_path = self.path
return self._resize(subvol_path, newsize, noshrink)
def create_snapshot(self, snapname):
try:
group_snapshot_path = os.path.join(self.group.path,
self.vol_spec.snapshot_dir_prefix.encode('utf-8'),
snapname.encode('utf-8'))
self.fs.stat(group_snapshot_path)
except cephfs.Error as e:
if e.args[0] == errno.ENOENT:
snappath = self.snapshot_path(snapname)
mksnap(self.fs, snappath)
else:
raise VolumeException(-e.args[0], e.args[1])
else:
raise VolumeException(-errno.EINVAL, "subvolumegroup and subvolume snapshot name can't be same")
def has_pending_clones(self, snapname):
try:
return self.metadata_mgr.section_has_item('clone snaps', snapname)
except MetadataMgrException as me:
if me.errno == -errno.ENOENT:
return False
raise
def get_pending_clones(self, snapname):
pending_clones_info = {"has_pending_clones": "no"} # type: Dict[str, Any]
pending_track_id_list = []
pending_clone_list = []
index_path = ""
orphan_clones_count = 0
try:
if self.has_pending_clones(snapname):
pending_track_id_list = self.metadata_mgr.list_all_keys_with_specified_values_from_section('clone snaps', snapname)
else:
return pending_clones_info
except MetadataMgrException as me:
if me.errno != -errno.ENOENT:
raise VolumeException(-me.args[0], me.args[1])
try:
with open_clone_index(self.fs, self.vol_spec) as index:
index_path = index.path.decode('utf-8')
except IndexException as e:
log.warning("failed to open clone index '{0}' for snapshot '{1}'".format(e, snapname))
raise VolumeException(-errno.EINVAL, "failed to open clone index")
for track_id in pending_track_id_list:
try:
link_path = self.fs.readlink(os.path.join(index_path, track_id), 4096)
except cephfs.Error as e:
if e.errno != errno.ENOENT:
raise VolumeException(-e.args[0], e.args[1])
else:
try:
# If clone is completed between 'list_all_keys_with_specified_values_from_section'
# and readlink(track_id_path) call then readlink will fail with error ENOENT (2)
# Hence we double check whether track_id is exist in .meta file or not.
value = self.metadata_mgr.get_option('clone snaps', track_id)
# Edge case scenario.
# If track_id for clone exist but path /volumes/_index/clone/{track_id} not found
# then clone is orphan.
orphan_clones_count += 1
continue
except MetadataMgrException as me:
if me.errno != -errno.ENOENT:
raise VolumeException(-me.args[0], me.args[1])
path = Path(link_path.decode('utf-8'))
clone_name = os.path.basename(link_path).decode('utf-8')
group_name = os.path.basename(path.parent.absolute())
details = {"name": clone_name} # type: Dict[str, str]
if group_name != Group.NO_GROUP_NAME:
details["target_group"] = group_name
pending_clone_list.append(details)
if len(pending_clone_list) != 0:
pending_clones_info["has_pending_clones"] = "yes"
pending_clones_info["pending_clones"] = pending_clone_list
else:
pending_clones_info["has_pending_clones"] = "no"
if orphan_clones_count > 0:
pending_clones_info["orphan_clones_count"] = orphan_clones_count
return pending_clones_info
def remove_snapshot(self, snapname, force=False):
if self.has_pending_clones(snapname):
raise VolumeException(-errno.EAGAIN, "snapshot '{0}' has pending clones".format(snapname))
snappath = self.snapshot_path(snapname)
try:
self.metadata_mgr.remove_section(self.get_snap_section_name(snapname))
self.metadata_mgr.flush()
except MetadataMgrException as me:
if force:
log.info(f"Allowing snapshot removal on failure of it's metadata removal with force on "
f"snap={snapname} subvol={self.subvol_name} group={self.group_name} reason={me.args[1]}, "
f"errno:{-me.args[0]}, {os.strerror(-me.args[0])}")
pass
else:
log.error(f"Failed to remove snapshot metadata on snap={snapname} subvol={self.subvol_name} "
f"group={self.group_name} reason={me.args[1]}, errno:{-me.args[0]}, {os.strerror(-me.args[0])}")
raise VolumeException(-errno.EAGAIN,
f"failed to remove snapshot metadata on snap={snapname} reason={me.args[0]} {me.args[1]}")
rmsnap(self.fs, snappath)
def snapshot_info(self, snapname):
if is_inherited_snap(snapname):
raise VolumeException(-errno.EINVAL,
"snapshot name '{0}' is invalid".format(snapname))
snappath = self.snapshot_data_path(snapname)
snap_info = {}
try:
snap_attrs = {'created_at':'ceph.snap.btime',
'data_pool':'ceph.dir.layout.pool'}
for key, val in snap_attrs.items():
snap_info[key] = self.fs.getxattr(snappath, val)
pending_clones_info = self.get_pending_clones(snapname)
info_dict = {'created_at': str(datetime.fromtimestamp(float(snap_info['created_at']))),
'data_pool': snap_info['data_pool'].decode('utf-8')} # type: Dict[str, Any]
info_dict.update(pending_clones_info);
return info_dict
except cephfs.Error as e:
if e.errno == errno.ENOENT:
raise VolumeException(-errno.ENOENT,
"snapshot '{0}' does not exist".format(snapname))
raise VolumeException(-e.args[0], e.args[1])
def list_snapshots(self):
try:
dirpath = self.snapshot_base_path()
return listsnaps(self.fs, self.vol_spec, dirpath, filter_inherited_snaps=True)
except VolumeException as ve:
if ve.errno == -errno.ENOENT:
return []
raise
def clean_stale_snapshot_metadata(self):
""" Clean up stale snapshot metadata """
if self.metadata_mgr.has_snap_metadata_section():
snap_list = self.list_snapshots()
snaps_with_metadata_list = self.metadata_mgr.list_snaps_with_metadata()
for snap_with_metadata in snaps_with_metadata_list:
if snap_with_metadata.encode('utf-8') not in snap_list:
try:
self.metadata_mgr.remove_section(self.get_snap_section_name(snap_with_metadata))
self.metadata_mgr.flush()
except MetadataMgrException as me:
log.error(f"Failed to remove stale snap metadata on snap={snap_with_metadata} "
f"subvol={self.subvol_name} group={self.group_name} reason={me.args[1]}, "
f"errno:{-me.args[0]}, {os.strerror(-me.args[0])}")
pass
def _add_snap_clone(self, track_id, snapname):
self.metadata_mgr.add_section("clone snaps")
self.metadata_mgr.update_section("clone snaps", track_id, snapname)
self.metadata_mgr.flush()
def _remove_snap_clone(self, track_id):
self.metadata_mgr.remove_option("clone snaps", track_id)
self.metadata_mgr.flush()
def attach_snapshot(self, snapname, tgt_subvolume):
if not snapname.encode('utf-8') in self.list_snapshots():
raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(snapname))
try:
create_clone_index(self.fs, self.vol_spec)
with open_clone_index(self.fs, self.vol_spec) as index:
track_idx = index.track(tgt_subvolume.base_path)
self._add_snap_clone(track_idx, snapname)
except (IndexException, MetadataMgrException) as e:
log.warning("error creating clone index: {0}".format(e))
raise VolumeException(-errno.EINVAL, "error cloning subvolume")
def detach_snapshot(self, snapname, track_id):
try:
with open_clone_index(self.fs, self.vol_spec) as index:
index.untrack(track_id)
self._remove_snap_clone(track_id)
except (IndexException, MetadataMgrException) as e:
log.warning("error delining snapshot from clone: {0}".format(e))
raise VolumeException(-errno.EINVAL, "error delinking snapshot from clone")
| 41,080 | 44.39337 | 144 |
py
|
null |
ceph-main/src/pybind/mgr/volumes/fs/operations/versions/subvolume_v2.py
|
import os
import stat
import uuid
import errno
import logging
import cephfs
from .metadata_manager import MetadataManager
from .subvolume_attrs import SubvolumeTypes, SubvolumeStates, SubvolumeFeatures
from .op_sm import SubvolumeOpSm
from .subvolume_v1 import SubvolumeV1
from ..template import SubvolumeTemplate
from ...exception import OpSmException, VolumeException, MetadataMgrException
from ...fs_util import listdir, create_base_dir
from ..template import SubvolumeOpType
log = logging.getLogger(__name__)
class SubvolumeV2(SubvolumeV1):
"""
Version 2 subvolumes creates a subvolume with path as follows,
volumes/<group-name>/<subvolume-name>/<uuid>/
The distinguishing feature of V2 subvolume as compared to V1 subvolumes is its ability to retain snapshots
of a subvolume on removal. This is done by creating snapshots under the <subvolume-name> directory,
rather than under the <uuid> directory, as is the case of V1 subvolumes.
- The directory under which user data resides is <uuid>
- Snapshots of the subvolume are taken within the <subvolume-name> directory
- A meta file is maintained under the <subvolume-name> directory as a metadata store, storing information similar
to V1 subvolumes
- On a request to remove subvolume but retain its snapshots, only the <uuid> directory is moved to trash, retaining
the rest of the subvolume and its meta file.
- The <uuid> directory, when present, is the current incarnation of the subvolume, which may have snapshots of
older incarnations of the same subvolume.
- V1 subvolumes that currently do not have any snapshots are upgraded to V2 subvolumes automatically, to support the
snapshot retention feature
"""
VERSION = 2
@staticmethod
def version():
return SubvolumeV2.VERSION
@property
def features(self):
return [SubvolumeFeatures.FEATURE_SNAPSHOT_CLONE.value,
SubvolumeFeatures.FEATURE_SNAPSHOT_AUTOPROTECT.value,
SubvolumeFeatures.FEATURE_SNAPSHOT_RETENTION.value]
@property
def retained(self):
try:
self.metadata_mgr.refresh()
if self.state == SubvolumeStates.STATE_RETAINED:
return True
return False
except MetadataMgrException as me:
if me.errno != -errno.ENOENT:
raise VolumeException(me.errno, "internal error while processing subvolume '{0}'".format(self.subvolname))
return False
@property
def purgeable(self):
if not self.retained or self.list_snapshots() or self.has_pending_purges:
return False
return True
@property
def has_pending_purges(self):
try:
return not listdir(self.fs, self.trash_dir) == []
except VolumeException as ve:
if ve.errno == -errno.ENOENT:
return False
raise
@property
def trash_dir(self):
return os.path.join(self.base_path, b".trash")
def create_trashcan(self):
"""per subvolume trash directory"""
try:
self.fs.stat(self.trash_dir)
except cephfs.Error as e:
if e.args[0] == errno.ENOENT:
try:
self.fs.mkdir(self.trash_dir, 0o700)
except cephfs.Error as ce:
raise VolumeException(-ce.args[0], ce.args[1])
else:
raise VolumeException(-e.args[0], e.args[1])
def mark_subvolume(self):
# set subvolume attr, on subvolume root, marking it as a CephFS subvolume
# subvolume root is where snapshots would be taken, and hence is the base_path for v2 subvolumes
try:
# MDS treats this as a noop for already marked subvolume
self.fs.setxattr(self.base_path, 'ceph.dir.subvolume', b'1', 0)
except cephfs.InvalidValue as e:
raise VolumeException(-errno.EINVAL, "invalid value specified for ceph.dir.subvolume")
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
@staticmethod
def is_valid_uuid(uuid_str):
try:
uuid.UUID(uuid_str)
return True
except ValueError:
return False
def snapshot_base_path(self):
return os.path.join(self.base_path, self.vol_spec.snapshot_dir_prefix.encode('utf-8'))
def snapshot_data_path(self, snapname):
snap_base_path = self.snapshot_path(snapname)
uuid_str = None
try:
with self.fs.opendir(snap_base_path) as dir_handle:
d = self.fs.readdir(dir_handle)
while d:
if d.d_name not in (b".", b".."):
d_full_path = os.path.join(snap_base_path, d.d_name)
stx = self.fs.statx(d_full_path, cephfs.CEPH_STATX_MODE, cephfs.AT_SYMLINK_NOFOLLOW)
if stat.S_ISDIR(stx.get('mode')):
if self.is_valid_uuid(d.d_name.decode('utf-8')):
uuid_str = d.d_name
d = self.fs.readdir(dir_handle)
except cephfs.Error as e:
if e.errno == errno.ENOENT:
raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(snapname))
raise VolumeException(-e.args[0], e.args[1])
if not uuid_str:
raise VolumeException(-errno.ENOENT, "snapshot '{0}' does not exist".format(snapname))
return os.path.join(snap_base_path, uuid_str)
def _remove_on_failure(self, subvol_path, retained):
if retained:
log.info("cleaning up subvolume incarnation with path: {0}".format(subvol_path))
try:
self.fs.rmdir(subvol_path)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
else:
log.info("cleaning up subvolume with path: {0}".format(self.subvolname))
self.remove(internal_cleanup=True)
def _set_incarnation_metadata(self, subvolume_type, qpath, initial_state):
self.metadata_mgr.update_global_section(MetadataManager.GLOBAL_META_KEY_TYPE, subvolume_type.value)
self.metadata_mgr.update_global_section(MetadataManager.GLOBAL_META_KEY_PATH, qpath)
self.metadata_mgr.update_global_section(MetadataManager.GLOBAL_META_KEY_STATE, initial_state.value)
def create(self, size, isolate_nspace, pool, mode, uid, gid):
subvolume_type = SubvolumeTypes.TYPE_NORMAL
try:
initial_state = SubvolumeOpSm.get_init_state(subvolume_type)
except OpSmException as oe:
raise VolumeException(-errno.EINVAL, "subvolume creation failed: internal error")
retained = self.retained
if retained and self.has_pending_purges:
raise VolumeException(-errno.EAGAIN, "asynchronous purge of subvolume in progress")
subvol_path = os.path.join(self.base_path, str(uuid.uuid4()).encode('utf-8'))
try:
# create group directory with default mode(0o755) if it doesn't exist.
create_base_dir(self.fs, self.group.path, self.vol_spec.DEFAULT_MODE)
self.fs.mkdirs(subvol_path, mode)
self.mark_subvolume()
attrs = {
'uid': uid,
'gid': gid,
'data_pool': pool,
'pool_namespace': self.namespace if isolate_nspace else None,
'quota': size
}
self.set_attrs(subvol_path, attrs)
# persist subvolume metadata
qpath = subvol_path.decode('utf-8')
if retained:
self._set_incarnation_metadata(subvolume_type, qpath, initial_state)
self.metadata_mgr.flush()
else:
self.init_config(SubvolumeV2.VERSION, subvolume_type, qpath, initial_state)
# Create the subvolume metadata file which manages auth-ids if it doesn't exist
self.auth_mdata_mgr.create_subvolume_metadata_file(self.group.groupname, self.subvolname)
except (VolumeException, MetadataMgrException, cephfs.Error) as e:
try:
self._remove_on_failure(subvol_path, retained)
except VolumeException as ve:
log.info("failed to cleanup subvolume '{0}' ({1})".format(self.subvolname, ve))
if isinstance(e, MetadataMgrException):
log.error("metadata manager exception: {0}".format(e))
e = VolumeException(-errno.EINVAL, f"exception in subvolume metadata: {os.strerror(-e.args[0])}")
elif isinstance(e, cephfs.Error):
e = VolumeException(-e.args[0], e.args[1])
raise e
def create_clone(self, pool, source_volname, source_subvolume, snapname):
subvolume_type = SubvolumeTypes.TYPE_CLONE
try:
initial_state = SubvolumeOpSm.get_init_state(subvolume_type)
except OpSmException as oe:
raise VolumeException(-errno.EINVAL, "clone failed: internal error")
retained = self.retained
if retained and self.has_pending_purges:
raise VolumeException(-errno.EAGAIN, "asynchronous purge of subvolume in progress")
subvol_path = os.path.join(self.base_path, str(uuid.uuid4()).encode('utf-8'))
try:
# source snapshot attrs are used to create clone subvolume
# attributes of subvolume's content though, are synced during the cloning process.
attrs = source_subvolume.get_attrs(source_subvolume.snapshot_data_path(snapname))
# The source of the clone may have exceeded its quota limit as
# CephFS quotas are imprecise. Cloning such a source may fail if
# the quota on the destination is set before starting the clone
# copy. So always set the quota on destination after cloning is
# successful.
attrs["quota"] = None
# override snapshot pool setting, if one is provided for the clone
if pool is not None:
attrs["data_pool"] = pool
attrs["pool_namespace"] = None
# create directory and set attributes
self.fs.mkdirs(subvol_path, attrs.get("mode"))
self.mark_subvolume()
self.set_attrs(subvol_path, attrs)
# persist subvolume metadata and clone source
qpath = subvol_path.decode('utf-8')
if retained:
self._set_incarnation_metadata(subvolume_type, qpath, initial_state)
else:
self.metadata_mgr.init(SubvolumeV2.VERSION, subvolume_type.value, qpath, initial_state.value)
self.add_clone_source(source_volname, source_subvolume, snapname)
self.metadata_mgr.flush()
except (VolumeException, MetadataMgrException, cephfs.Error) as e:
try:
self._remove_on_failure(subvol_path, retained)
except VolumeException as ve:
log.info("failed to cleanup subvolume '{0}' ({1})".format(self.subvolname, ve))
if isinstance(e, MetadataMgrException):
log.error("metadata manager exception: {0}".format(e))
e = VolumeException(-errno.EINVAL, f"exception in subvolume metadata: {os.strerror(-e.args[0])}")
elif isinstance(e, cephfs.Error):
e = VolumeException(-e.args[0], e.args[1])
raise e
def allowed_ops_by_type(self, vol_type):
if vol_type == SubvolumeTypes.TYPE_CLONE:
return {op_type for op_type in SubvolumeOpType}
if vol_type == SubvolumeTypes.TYPE_NORMAL:
return {op_type for op_type in SubvolumeOpType} - {SubvolumeOpType.CLONE_STATUS,
SubvolumeOpType.CLONE_CANCEL,
SubvolumeOpType.CLONE_INTERNAL}
return {}
def allowed_ops_by_state(self, vol_state):
if vol_state == SubvolumeStates.STATE_COMPLETE:
return {op_type for op_type in SubvolumeOpType}
if vol_state == SubvolumeStates.STATE_RETAINED:
return {
SubvolumeOpType.REMOVE,
SubvolumeOpType.REMOVE_FORCE,
SubvolumeOpType.LIST,
SubvolumeOpType.INFO,
SubvolumeOpType.SNAP_REMOVE,
SubvolumeOpType.SNAP_LIST,
SubvolumeOpType.SNAP_INFO,
SubvolumeOpType.SNAP_PROTECT,
SubvolumeOpType.SNAP_UNPROTECT,
SubvolumeOpType.CLONE_SOURCE
}
return {SubvolumeOpType.REMOVE_FORCE,
SubvolumeOpType.CLONE_CREATE,
SubvolumeOpType.CLONE_STATUS,
SubvolumeOpType.CLONE_CANCEL,
SubvolumeOpType.CLONE_INTERNAL,
SubvolumeOpType.CLONE_SOURCE}
def open(self, op_type):
if not isinstance(op_type, SubvolumeOpType):
raise VolumeException(-errno.ENOTSUP, "operation {0} not supported on subvolume '{1}'".format(
op_type.value, self.subvolname))
try:
self.metadata_mgr.refresh()
# unconditionally mark as subvolume, to handle pre-existing subvolumes without the mark
self.mark_subvolume()
etype = self.subvol_type
if op_type not in self.allowed_ops_by_type(etype):
raise VolumeException(-errno.ENOTSUP, "operation '{0}' is not allowed on subvolume '{1}' of type {2}".format(
op_type.value, self.subvolname, etype.value))
estate = self.state
if op_type not in self.allowed_ops_by_state(estate) and estate == SubvolumeStates.STATE_RETAINED:
raise VolumeException(-errno.ENOENT, "subvolume '{0}' is removed and has only snapshots retained".format(
self.subvolname))
if op_type not in self.allowed_ops_by_state(estate) and estate != SubvolumeStates.STATE_RETAINED:
raise VolumeException(-errno.EAGAIN, "subvolume '{0}' is not ready for operation {1}".format(
self.subvolname, op_type.value))
if estate != SubvolumeStates.STATE_RETAINED:
subvol_path = self.path
log.debug("refreshed metadata, checking subvolume path '{0}'".format(subvol_path))
st = self.fs.stat(subvol_path)
self.uid = int(st.st_uid)
self.gid = int(st.st_gid)
self.mode = int(st.st_mode & ~stat.S_IFMT(st.st_mode))
except MetadataMgrException as me:
if me.errno == -errno.ENOENT:
raise VolumeException(-errno.ENOENT, "subvolume '{0}' does not exist".format(self.subvolname))
raise VolumeException(me.args[0], me.args[1])
except cephfs.ObjectNotFound:
log.debug("missing subvolume path '{0}' for subvolume '{1}'".format(subvol_path, self.subvolname))
raise VolumeException(-errno.ENOENT, "mount path missing for subvolume '{0}'".format(self.subvolname))
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
def trash_incarnation_dir(self):
"""rename subvolume (uuid component) to trash"""
self.create_trashcan()
try:
bname = os.path.basename(self.path)
tpath = os.path.join(self.trash_dir, bname)
log.debug("trash: {0} -> {1}".format(self.path, tpath))
self.fs.rename(self.path, tpath)
self._link_dir(tpath, bname)
except cephfs.Error as e:
raise VolumeException(-e.args[0], e.args[1])
@staticmethod
def safe_to_remove_subvolume_clone(subvol_state):
# Both the STATE_FAILED and STATE_CANCELED are handled by 'handle_clone_failed' in the state
# machine which removes the entry from the index. Hence, it's safe to removed clone with
# force option for both.
acceptable_rm_clone_states = [SubvolumeStates.STATE_COMPLETE, SubvolumeStates.STATE_CANCELED,
SubvolumeStates.STATE_FAILED, SubvolumeStates.STATE_RETAINED]
if subvol_state not in acceptable_rm_clone_states:
return False
return True
def remove(self, retainsnaps=False, internal_cleanup=False):
if self.list_snapshots():
if not retainsnaps:
raise VolumeException(-errno.ENOTEMPTY, "subvolume '{0}' has snapshots".format(self.subvolname))
else:
if not internal_cleanup and not self.safe_to_remove_subvolume_clone(self.state):
raise VolumeException(-errno.EAGAIN,
"{0} clone in-progress -- please cancel the clone and retry".format(self.subvolname))
if not self.has_pending_purges:
self.trash_base_dir()
# Delete the volume meta file, if it's not already deleted
self.auth_mdata_mgr.delete_subvolume_metadata_file(self.group.groupname, self.subvolname)
return
if self.state != SubvolumeStates.STATE_RETAINED:
self.trash_incarnation_dir()
self.metadata_mgr.remove_section(MetadataManager.USER_METADATA_SECTION)
self.metadata_mgr.update_global_section(MetadataManager.GLOBAL_META_KEY_PATH, "")
self.metadata_mgr.update_global_section(MetadataManager.GLOBAL_META_KEY_STATE, SubvolumeStates.STATE_RETAINED.value)
self.metadata_mgr.flush()
# Delete the volume meta file, if it's not already deleted
self.auth_mdata_mgr.delete_subvolume_metadata_file(self.group.groupname, self.subvolname)
def info(self):
if self.state != SubvolumeStates.STATE_RETAINED:
return super(SubvolumeV2, self).info()
return {'type': self.subvol_type.value, 'features': self.features, 'state': SubvolumeStates.STATE_RETAINED.value}
def remove_snapshot(self, snapname, force=False):
super(SubvolumeV2, self).remove_snapshot(snapname, force)
if self.purgeable:
self.trash_base_dir()
# tickle the volume purge job to purge this entry, using ESTALE
raise VolumeException(-errno.ESTALE, "subvolume '{0}' has been removed as the last retained snapshot is removed".format(self.subvolname))
# if not purgeable, subvol is not retained, or has snapshots, or already has purge jobs that will garbage collect this subvol
| 18,707 | 46.362025 | 149 |
py
|
null |
ceph-main/src/pybind/mgr/zabbix/__init__.py
|
from .module import Module
| 27 | 13 | 26 |
py
|
null |
ceph-main/src/pybind/mgr/zabbix/module.py
|
"""
Zabbix module for ceph-mgr
Collect statistics from Ceph cluster and every X seconds send data to a Zabbix
server using the zabbix_sender executable.
"""
import logging
import json
import errno
import re
from subprocess import Popen, PIPE
from threading import Event
from mgr_module import CLIReadCommand, CLIWriteCommand, MgrModule, Option, OptionValue
from typing import cast, Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
def avg(data: Sequence[Union[int, float]]) -> float:
if len(data):
return sum(data) / float(len(data))
else:
return 0
class ZabbixSender(object):
def __init__(self, sender: str, host: str, port: int, log: logging.Logger) -> None:
self.sender = sender
self.host = host
self.port = port
self.log = log
def send(self, hostname: str, data: Mapping[str, Union[int, float, str]]) -> None:
if len(data) == 0:
return
cmd = [self.sender, '-z', self.host, '-p', str(self.port), '-s',
hostname, '-vv', '-i', '-']
self.log.debug('Executing: %s', cmd)
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, encoding='utf-8')
for key, value in data.items():
assert proc.stdin
proc.stdin.write('{0} ceph.{1} {2}\n'.format(hostname, key, value))
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise RuntimeError('%s exited non-zero: %s' % (self.sender,
stderr))
self.log.debug('Zabbix Sender: %s', stdout.rstrip())
class Module(MgrModule):
run = False
config: Dict[str, OptionValue] = {}
ceph_health_mapping = {'HEALTH_OK': 0, 'HEALTH_WARN': 1, 'HEALTH_ERR': 2}
_zabbix_hosts: List[Dict[str, Union[str, int]]] = list()
@property
def config_keys(self) -> Dict[str, OptionValue]:
return dict((o['name'], o.get('default', None))
for o in self.MODULE_OPTIONS)
MODULE_OPTIONS = [
Option(
name='zabbix_sender',
default='/usr/bin/zabbix_sender'),
Option(
name='zabbix_host',
type='str',
default=None),
Option(
name='zabbix_port',
type='int',
default=10051),
Option(
name='identifier',
default=""),
Option(
name='interval',
type='secs',
default=60),
Option(
name='discovery_interval',
type='uint',
default=100)
]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(Module, self).__init__(*args, **kwargs)
self.event = Event()
def init_module_config(self) -> None:
self.fsid = self.get('mon_map')['fsid']
self.log.debug('Found Ceph fsid %s', self.fsid)
for key, default in self.config_keys.items():
self.set_config_option(key, self.get_module_option(key, default))
if self.config['zabbix_host']:
self._parse_zabbix_hosts()
def set_config_option(self, option: str, value: OptionValue) -> bool:
if option not in self.config_keys.keys():
raise RuntimeError('{0} is a unknown configuration '
'option'.format(option))
if option in ['zabbix_port', 'interval', 'discovery_interval']:
try:
int_value = int(value) # type: ignore
except (ValueError, TypeError):
raise RuntimeError('invalid {0} configured. Please specify '
'a valid integer'.format(option))
if option == 'interval' and int_value < 10:
raise RuntimeError('interval should be set to at least 10 seconds')
if option == 'discovery_interval' and int_value < 10:
raise RuntimeError(
"discovery_interval should not be more frequent "
"than once in 10 regular data collection"
)
self.log.debug('Setting in-memory config option %s to: %s', option,
value)
self.config[option] = value
return True
def _parse_zabbix_hosts(self) -> None:
self._zabbix_hosts = list()
servers = cast(str, self.config['zabbix_host']).split(",")
for server in servers:
uri = re.match("(?:(?:\[?)([a-z0-9-\.]+|[a-f0-9:\.]+)(?:\]?))(?:((?::))([0-9]{1,5}))?$", server)
if uri:
zabbix_host, sep, opt_zabbix_port = uri.groups()
if sep == ':':
zabbix_port = int(opt_zabbix_port)
else:
zabbix_port = cast(int, self.config['zabbix_port'])
self._zabbix_hosts.append({'zabbix_host': zabbix_host, 'zabbix_port': zabbix_port})
else:
self.log.error('Zabbix host "%s" is not valid', server)
self.log.error('Parsed Zabbix hosts: %s', self._zabbix_hosts)
def get_pg_stats(self) -> Dict[str, int]:
stats = dict()
pg_states = ['active', 'peering', 'clean', 'scrubbing', 'undersized',
'backfilling', 'recovering', 'degraded', 'inconsistent',
'remapped', 'backfill_toofull', 'backfill_wait',
'recovery_wait']
for state in pg_states:
stats['num_pg_{0}'.format(state)] = 0
pg_status = self.get('pg_status')
stats['num_pg'] = pg_status['num_pgs']
for state in pg_status['pgs_by_state']:
states = state['state_name'].split('+')
for s in pg_states:
key = 'num_pg_{0}'.format(s)
if s in states:
stats[key] += state['count']
return stats
def get_data(self) -> Dict[str, Union[int, float]]:
data = dict()
health = json.loads(self.get('health')['json'])
# 'status' is luminous+, 'overall_status' is legacy mode.
data['overall_status'] = health.get('status',
health.get('overall_status'))
data['overall_status_int'] = \
self.ceph_health_mapping.get(data['overall_status'])
mon_status = json.loads(self.get('mon_status')['json'])
data['num_mon'] = len(mon_status['monmap']['mons'])
df = self.get('df')
data['num_pools'] = len(df['pools'])
data['total_used_bytes'] = df['stats']['total_used_bytes']
data['total_bytes'] = df['stats']['total_bytes']
data['total_avail_bytes'] = df['stats']['total_avail_bytes']
wr_ops = 0
rd_ops = 0
wr_bytes = 0
rd_bytes = 0
for pool in df['pools']:
wr_ops += pool['stats']['wr']
rd_ops += pool['stats']['rd']
wr_bytes += pool['stats']['wr_bytes']
rd_bytes += pool['stats']['rd_bytes']
data['[{0},rd_bytes]'.format(pool['name'])] = pool['stats']['rd_bytes']
data['[{0},wr_bytes]'.format(pool['name'])] = pool['stats']['wr_bytes']
data['[{0},rd_ops]'.format(pool['name'])] = pool['stats']['rd']
data['[{0},wr_ops]'.format(pool['name'])] = pool['stats']['wr']
data['[{0},bytes_used]'.format(pool['name'])] = pool['stats']['bytes_used']
data['[{0},stored_raw]'.format(pool['name'])] = pool['stats']['stored_raw']
data['[{0},percent_used]'.format(pool['name'])] = pool['stats']['percent_used'] * 100
data['wr_ops'] = wr_ops
data['rd_ops'] = rd_ops
data['wr_bytes'] = wr_bytes
data['rd_bytes'] = rd_bytes
osd_map = self.get('osd_map')
data['num_osd'] = len(osd_map['osds'])
data['osd_nearfull_ratio'] = osd_map['nearfull_ratio']
data['osd_full_ratio'] = osd_map['full_ratio']
data['osd_backfillfull_ratio'] = osd_map['backfillfull_ratio']
data['num_pg_temp'] = len(osd_map['pg_temp'])
num_up = 0
num_in = 0
for osd in osd_map['osds']:
data['[osd.{0},up]'.format(int(osd['osd']))] = osd['up']
if osd['up'] == 1:
num_up += 1
data['[osd.{0},in]'.format(int(osd['osd']))] = osd['in']
if osd['in'] == 1:
num_in += 1
data['num_osd_up'] = num_up
data['num_osd_in'] = num_in
osd_fill = list()
osd_pgs = list()
osd_apply_latency_ns = list()
osd_commit_latency_ns = list()
osd_stats = self.get('osd_stats')
for osd in osd_stats['osd_stats']:
try:
osd_fill.append((float(osd['kb_used']) / float(osd['kb'])) * 100)
data['[osd.{0},osd_fill]'.format(osd['osd'])] = (
float(osd['kb_used']) / float(osd['kb'])) * 100
except ZeroDivisionError:
continue
osd_pgs.append(osd['num_pgs'])
osd_apply_latency_ns.append(osd['perf_stat']['apply_latency_ns'])
osd_commit_latency_ns.append(osd['perf_stat']['commit_latency_ns'])
data['[osd.{0},num_pgs]'.format(osd['osd'])] = osd['num_pgs']
data[
'[osd.{0},osd_latency_apply]'.format(osd['osd'])
] = osd['perf_stat']['apply_latency_ns'] / 1000000.0 # ns -> ms
data[
'[osd.{0},osd_latency_commit]'.format(osd['osd'])
] = osd['perf_stat']['commit_latency_ns'] / 1000000.0 # ns -> ms
try:
data['osd_max_fill'] = max(osd_fill)
data['osd_min_fill'] = min(osd_fill)
data['osd_avg_fill'] = avg(osd_fill)
data['osd_max_pgs'] = max(osd_pgs)
data['osd_min_pgs'] = min(osd_pgs)
data['osd_avg_pgs'] = avg(osd_pgs)
except ValueError:
pass
try:
data['osd_latency_apply_max'] = max(osd_apply_latency_ns) / 1000000.0 # ns -> ms
data['osd_latency_apply_min'] = min(osd_apply_latency_ns) / 1000000.0 # ns -> ms
data['osd_latency_apply_avg'] = avg(osd_apply_latency_ns) / 1000000.0 # ns -> ms
data['osd_latency_commit_max'] = max(osd_commit_latency_ns) / 1000000.0 # ns -> ms
data['osd_latency_commit_min'] = min(osd_commit_latency_ns) / 1000000.0 # ns -> ms
data['osd_latency_commit_avg'] = avg(osd_commit_latency_ns) / 1000000.0 # ns -> ms
except ValueError:
pass
data.update(self.get_pg_stats())
return data
def send(self, data: Mapping[str, Union[int, float, str]]) -> bool:
identifier = cast(Optional[str], self.config['identifier'])
if identifier is None or len(identifier) == 0:
identifier = 'ceph-{0}'.format(self.fsid)
if not self.config['zabbix_host'] or not self._zabbix_hosts:
self.log.error('Zabbix server not set, please configure using: '
'ceph zabbix config-set zabbix_host <zabbix_host>')
self.set_health_checks({
'MGR_ZABBIX_NO_SERVER': {
'severity': 'warning',
'summary': 'No Zabbix server configured',
'detail': ['Configuration value zabbix_host not configured']
}
})
return False
result = True
for server in self._zabbix_hosts:
self.log.info(
'Sending data to Zabbix server %s, port %s as host/identifier %s',
server['zabbix_host'], server['zabbix_port'], identifier)
self.log.debug(data)
try:
zabbix = ZabbixSender(cast(str, self.config['zabbix_sender']),
cast(str, server['zabbix_host']),
cast(int, server['zabbix_port']), self.log)
zabbix.send(identifier, data)
except Exception as exc:
self.log.exception('Failed to send.')
self.set_health_checks({
'MGR_ZABBIX_SEND_FAILED': {
'severity': 'warning',
'summary': 'Failed to send data to Zabbix',
'detail': [str(exc)]
}
})
result = False
self.set_health_checks(dict())
return result
def discovery(self) -> bool:
osd_map = self.get('osd_map')
osd_map_crush = self.get('osd_map_crush')
# Discovering ceph pools
pool_discovery = {
pool['pool_name']: step['item_name']
for pool in osd_map['pools']
for rule in osd_map_crush['rules'] if rule['rule_id'] == pool['crush_rule']
for step in rule['steps'] if step['op'] == "take"
}
pools_discovery_data = {"data": [
{
"{#POOL}": pool,
"{#CRUSH_RULE}": rule
}
for pool, rule in pool_discovery.items()
]}
# Discovering OSDs
# Getting hosts for found crush rules
osd_roots = {
step['item_name']: [
item['id']
for item in root_bucket['items']
]
for rule in osd_map_crush['rules']
for step in rule['steps'] if step['op'] == "take"
for root_bucket in osd_map_crush['buckets']
if root_bucket['id'] == step['item']
}
# Getting osds for hosts with map to crush_rule
osd_discovery = {
item['id']: crush_rule
for crush_rule, roots in osd_roots.items()
for root in roots
for bucket in osd_map_crush['buckets']
if bucket['id'] == root
for item in bucket['items']
}
osd_discovery_data = {"data": [
{
"{#OSD}": osd,
"{#CRUSH_RULE}": rule
}
for osd, rule in osd_discovery.items()
]}
# Preparing recieved data for sending
data = {
"zabbix.pool.discovery": json.dumps(pools_discovery_data),
"zabbix.osd.discovery": json.dumps(osd_discovery_data)
}
return bool(self.send(data))
@CLIReadCommand('zabbix config-show')
def config_show(self) -> Tuple[int, str, str]:
"""
Show current configuration
"""
return 0, json.dumps(self.config, indent=4, sort_keys=True), ''
@CLIWriteCommand('zabbix config-set')
def config_set(self, key: str, value: str) -> Tuple[int, str, str]:
"""
Set a configuration value
"""
if not value:
return -errno.EINVAL, '', 'Value should not be empty or None'
self.log.debug('Setting configuration option %s to %s', key, value)
if self.set_config_option(key, value):
self.set_module_option(key, value)
if key == 'zabbix_host' or key == 'zabbix_port':
self._parse_zabbix_hosts()
return 0, 'Configuration option {0} updated'.format(key), ''
return 1,\
'Failed to update configuration option {0}'.format(key), ''
@CLIReadCommand('zabbix send')
def do_send(self) -> Tuple[int, str, str]:
"""
Force sending data to Zabbix
"""
data = self.get_data()
if self.send(data):
return 0, 'Sending data to Zabbix', ''
return 1, 'Failed to send data to Zabbix', ''
@CLIReadCommand('zabbix discovery')
def do_discovery(self) -> Tuple[int, str, str]:
"""
Discovering Zabbix data
"""
if self.discovery():
return 0, 'Sending discovery data to Zabbix', ''
return 1, 'Failed to send discovery data to Zabbix', ''
def shutdown(self) -> None:
self.log.info('Stopping zabbix')
self.run = False
self.event.set()
def serve(self) -> None:
self.log.info('Zabbix module starting up')
self.run = True
self.init_module_config()
discovery_interval = self.config['discovery_interval']
# We are sending discovery once plugin is loaded
discovery_counter = cast(int, discovery_interval)
while self.run:
self.log.debug('Waking up for new iteration')
if discovery_counter == discovery_interval:
try:
self.discovery()
except Exception:
# Shouldn't happen, but let's log it and retry next interval,
# rather than dying completely.
self.log.exception("Unexpected error during discovery():")
finally:
discovery_counter = 0
try:
data = self.get_data()
self.send(data)
except Exception:
# Shouldn't happen, but let's log it and retry next interval,
# rather than dying completely.
self.log.exception("Unexpected error during send():")
interval = cast(float, self.config['interval'])
self.log.debug('Sleeping for %d seconds', interval)
discovery_counter += 1
self.event.wait(interval)
def self_test(self) -> None:
data = self.get_data()
if data['overall_status'] not in self.ceph_health_mapping:
raise RuntimeError('No valid overall_status found in data')
int(data['overall_status_int'])
if data['num_mon'] < 1:
raise RuntimeError('num_mon is smaller than 1')
| 17,629 | 35.960168 | 108 |
py
|
null |
ceph-main/src/pybind/rados/setup.py
|
import pkgutil
if not pkgutil.find_loader('setuptools'):
from distutils.core import setup
from distutils.extension import Extension
else:
from setuptools import setup
from setuptools.extension import Extension
import distutils.sysconfig
from distutils.errors import CompileError, LinkError
from distutils.ccompiler import new_compiler
from itertools import filterfalse, takewhile
import os
import shutil
import sys
import tempfile
import textwrap
def filter_unsupported_flags(compiler, flags):
args = takewhile(lambda argv: not argv.startswith('-'), [compiler] + flags)
if any('clang' in arg for arg in args):
return list(filterfalse(lambda f:
f in ('-mcet',
'-fstack-clash-protection',
'-fno-var-tracking-assignments',
'-Wno-deprecated-register',
'-Wno-gnu-designator') or
f.startswith('-fcf-protection'),
flags))
else:
return flags
def monkey_with_compiler(customize):
def patched(compiler):
customize(compiler)
if compiler.compiler_type != 'unix':
return
compiler.compiler[1:] = \
filter_unsupported_flags(compiler.compiler[0],
compiler.compiler[1:])
compiler.compiler_so[1:] = \
filter_unsupported_flags(compiler.compiler_so[0],
compiler.compiler_so[1:])
return patched
distutils.sysconfig.customize_compiler = \
monkey_with_compiler(distutils.sysconfig.customize_compiler)
# PEP 440 versioning of the Rados package on PyPI
# Bump this version, after every changeset
__version__ = '2.0.0'
def get_python_flags(libs):
py_libs = sum((libs.split() for libs in
distutils.sysconfig.get_config_vars('LIBS', 'SYSLIBS')), [])
ldflags = list(filterfalse(lambda lib: lib.startswith('-l'), py_libs))
py_libs = [lib.replace('-l', '') for lib in
filter(lambda lib: lib.startswith('-l'), py_libs)]
compiler = new_compiler()
distutils.sysconfig.customize_compiler(compiler)
return dict(
include_dirs=[distutils.sysconfig.get_python_inc()],
library_dirs=distutils.sysconfig.get_config_vars('LIBDIR', 'LIBPL'),
libraries=libs + py_libs,
extra_compile_args=filter_unsupported_flags(
compiler.compiler[0],
compiler.compiler[1:] + distutils.sysconfig.get_config_var('CFLAGS').split()),
extra_link_args=(distutils.sysconfig.get_config_var('LDFLAGS').split() +
ldflags))
def check_sanity():
"""
Test if development headers and library for rados is available by compiling a dummy C program.
"""
CEPH_SRC_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..',
'..'
)
tmp_dir = tempfile.mkdtemp(dir=os.environ.get('TMPDIR', os.path.dirname(__file__)))
tmp_file = os.path.join(tmp_dir, 'rados_dummy.c')
with open(tmp_file, 'w') as fp:
dummy_prog = textwrap.dedent("""
#include <rados/librados.h>
int main(void) {
rados_t cluster;
rados_create(&cluster, NULL);
return 0;
}
""")
fp.write(dummy_prog)
compiler = new_compiler()
distutils.sysconfig.customize_compiler(compiler)
if 'CEPH_LIBDIR' in os.environ:
# The setup.py has been invoked by a top-level Ceph make.
# Set the appropriate CFLAGS and LDFLAGS
compiler.set_include_dirs([os.path.join(CEPH_SRC_DIR, 'include')])
compiler.set_library_dirs([os.environ.get('CEPH_LIBDIR')])
try:
link_objects = compiler.compile(
sources=[tmp_file],
output_dir=tmp_dir
)
compiler.link_executable(
objects=link_objects,
output_progname=os.path.join(tmp_dir, 'rados_dummy'),
libraries=['rados'],
output_dir=tmp_dir,
)
except CompileError:
print('\nCompile Error: RADOS development headers not found', file=sys.stderr)
return False
except LinkError:
print('\nLink Error: RADOS library not found', file=sys.stderr)
return False
else:
return True
finally:
shutil.rmtree(tmp_dir)
if 'BUILD_DOC' in os.environ or 'READTHEDOCS' in os.environ:
ext_args = {}
cython_constants = dict(BUILD_DOC=True)
elif check_sanity():
ext_args = get_python_flags(['rados'])
cython_constants = dict(BUILD_DOC=False)
else:
sys.exit(1)
cmdclass = {}
try:
from Cython.Build import cythonize
from Cython.Distutils import build_ext
cmdclass = {'build_ext': build_ext}
except ImportError:
print("WARNING: Cython is not installed.")
if not os.path.isfile('rados.c'):
print('ERROR: Cannot find Cythonized file rados.c', file=sys.stderr)
sys.exit(1)
else:
def cythonize(x, **kwargs):
return x
source = "rados.c"
else:
source = "rados.pyx"
# Disable cythonification if we're not really building anything
if (len(sys.argv) >= 2 and
any(i in sys.argv[1:] for i in ('--help', 'clean', 'egg_info', '--version')
)):
def cythonize(x, **kwargs):
return x
setup(
name='rados',
version=__version__,
description="Python bindings for the Ceph librados library",
long_description=(
"This package contains Python bindings for interacting with Ceph's "
"RADOS library. RADOS is a reliable, autonomic distributed object "
"storage cluster developed as part of the Ceph distributed storage "
"system. This is a shared library allowing applications to access "
"the distributed object store using a simple file-like interface."
),
url='https://github.com/ceph/ceph/tree/master/src/pybind/rados',
license='LGPLv2+',
platforms='Linux',
ext_modules=cythonize(
[
Extension(
"rados",
[source],
**ext_args
)
],
# use "3str" when Cython 3.0 is available
compiler_directives={'language_level': sys.version_info.major},
compile_time_env=cython_constants,
build_dir=os.environ.get("CYTHON_BUILD_DIR", None),
),
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Cython',
'Programming Language :: Python :: 3'
],
cmdclass=cmdclass,
)
| 6,854 | 32.115942 | 98 |
py
|
null |
ceph-main/src/pybind/rbd/setup.py
|
import os
import pkgutil
import shutil
import subprocess
import sys
import tempfile
import textwrap
if not pkgutil.find_loader('setuptools'):
from distutils.core import setup
from distutils.extension import Extension
else:
from setuptools import setup
from setuptools.extension import Extension
from distutils.ccompiler import new_compiler
from distutils.errors import CompileError, LinkError
from itertools import filterfalse, takewhile
import distutils.sysconfig
def filter_unsupported_flags(compiler, flags):
args = takewhile(lambda argv: not argv.startswith('-'), [compiler] + flags)
if any('clang' in arg for arg in args):
return list(filterfalse(lambda f:
f in ('-mcet',
'-fstack-clash-protection',
'-fno-var-tracking-assignments',
'-Wno-deprecated-register',
'-Wno-gnu-designator') or
f.startswith('-fcf-protection'),
flags))
else:
return flags
def monkey_with_compiler(customize):
def patched(compiler):
customize(compiler)
if compiler.compiler_type != 'unix':
return
compiler.compiler[1:] = \
filter_unsupported_flags(compiler.compiler[0],
compiler.compiler[1:])
compiler.compiler_so[1:] = \
filter_unsupported_flags(compiler.compiler_so[0],
compiler.compiler_so[1:])
return patched
distutils.sysconfig.customize_compiler = \
monkey_with_compiler(distutils.sysconfig.customize_compiler)
# PEP 440 versioning of the RBD package on PyPI
# Bump this version, after every changeset
__version__ = '2.0.0'
def get_python_flags(libs):
py_libs = sum((libs.split() for libs in
distutils.sysconfig.get_config_vars('LIBS', 'SYSLIBS')), [])
ldflags = list(filterfalse(lambda lib: lib.startswith('-l'), py_libs))
py_libs = [lib.replace('-l', '') for lib in
filter(lambda lib: lib.startswith('-l'), py_libs)]
compiler = new_compiler()
distutils.sysconfig.customize_compiler(compiler)
return dict(
include_dirs=[distutils.sysconfig.get_python_inc()],
library_dirs=distutils.sysconfig.get_config_vars('LIBDIR', 'LIBPL'),
libraries=libs + py_libs,
extra_compile_args=filter_unsupported_flags(
compiler.compiler[0],
compiler.compiler[1:] + distutils.sysconfig.get_config_var('CFLAGS').split()),
extra_link_args=(distutils.sysconfig.get_config_var('LDFLAGS').split() +
ldflags))
def check_sanity():
"""
Test if development headers and library for rbd is available by compiling a dummy C program.
"""
CEPH_SRC_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..',
'..'
)
tmp_dir = tempfile.mkdtemp(dir=os.environ.get('TMPDIR', os.path.dirname(__file__)))
tmp_file = os.path.join(tmp_dir, 'rbd_dummy.c')
with open(tmp_file, 'w') as fp:
dummy_prog = textwrap.dedent("""
#include <stddef.h>
#include <rbd/librbd.h>
int main(void) {
rados_t cluster;
rados_create(&cluster, NULL);
return 0;
}
""")
fp.write(dummy_prog)
compiler = new_compiler()
distutils.sysconfig.customize_compiler(compiler)
if 'CEPH_LIBDIR' in os.environ:
# The setup.py has been invoked by a top-level Ceph make.
# Set the appropriate CFLAGS and LDFLAGS
compiler.set_include_dirs([os.path.join(CEPH_SRC_DIR, 'include')])
compiler.set_library_dirs([os.environ.get('CEPH_LIBDIR')])
try:
compiler.define_macro('_FILE_OFFSET_BITS', '64')
link_objects = compiler.compile(
sources=[tmp_file],
output_dir=tmp_dir
)
compiler.link_executable(
objects=link_objects,
output_progname=os.path.join(tmp_dir, 'rbd_dummy'),
libraries=['rbd', 'rados'],
output_dir=tmp_dir,
)
except CompileError:
print('\nCompile Error: RBD development headers not found', file=sys.stderr)
return False
except LinkError:
print('\nLink Error: RBD library not found', file=sys.stderr)
return False
else:
return True
finally:
shutil.rmtree(tmp_dir)
if 'BUILD_DOC' in os.environ or 'READTHEDOCS' in os.environ:
ext_args = {}
cython_constants = dict(BUILD_DOC=True)
cythonize_args = dict(compile_time_env=cython_constants)
elif check_sanity():
ext_args = get_python_flags(['rados', 'rbd'])
cython_constants = dict(BUILD_DOC=False)
include_path = [os.path.join(os.path.dirname(__file__), "..", "rados")]
cythonize_args = dict(compile_time_env=cython_constants,
include_path=include_path)
else:
sys.exit(1)
cmdclass = {}
try:
from Cython.Build import cythonize
from Cython.Distutils import build_ext
cmdclass = {'build_ext': build_ext}
except ImportError:
print("WARNING: Cython is not installed.")
if not os.path.isfile('rbd.c'):
print('ERROR: Cannot find Cythonized file rbd.c', file=sys.stderr)
sys.exit(1)
else:
def cythonize(x, **kwargs):
return x
source = "rbd.c"
else:
source = "rbd.pyx"
# Disable cythonification if we're not really building anything
if (len(sys.argv) >= 2 and
any(i in sys.argv[1:] for i in ('--help', 'clean', 'egg_info', '--version')
)):
def cythonize(x, **kwargs):
return x
setup(
name='rbd',
version=__version__,
description="Python bindings for the RBD library",
long_description=(
"This package contains Python bindings for interacting with the "
"RADOS Block Device (RBD) library. rbd is a utility for manipulating "
"rados block device images, used by the Linux rbd driver and the rbd "
"storage driver for QEMU/KVM. RBD images are simple block devices that "
"are striped over objects and stored in a RADOS object store. The size "
"of the objects the image is striped over must be a power of two."
),
url='https://github.com/ceph/ceph/tree/master/src/pybind/rbd',
license='LGPLv2+',
platforms='Linux',
ext_modules=cythonize(
[
Extension(
"rbd",
[source],
**ext_args
)
],
compiler_directives={'language_level': sys.version_info.major},
build_dir=os.environ.get("CYTHON_BUILD_DIR", None),
**cythonize_args
),
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Cython',
'Programming Language :: Python :: 3'
],
cmdclass=cmdclass,
)
| 7,202 | 32.658879 | 96 |
py
|
null |
ceph-main/src/pybind/rgw/setup.py
|
import pkgutil
if not pkgutil.find_loader('setuptools'):
from distutils.core import setup
from distutils.extension import Extension
else:
from setuptools import setup
from setuptools.extension import Extension
import distutils.core
import os
import shutil
import sys
import tempfile
import textwrap
from distutils.ccompiler import new_compiler
from distutils.errors import CompileError, LinkError
from itertools import filterfalse, takewhile
import distutils.sysconfig
def filter_unsupported_flags(compiler, flags):
args = takewhile(lambda argv: not argv.startswith('-'), [compiler] + flags)
if any('clang' in arg for arg in args):
return list(filterfalse(lambda f:
f in ('-mcet',
'-fstack-clash-protection',
'-fno-var-tracking-assignments',
'-Wno-deprecated-register',
'-Wno-gnu-designator') or
f.startswith('-fcf-protection'),
flags))
else:
return flags
def monkey_with_compiler(customize):
def patched(compiler):
customize(compiler)
if compiler.compiler_type != 'unix':
return
compiler.compiler[1:] = \
filter_unsupported_flags(compiler.compiler[0],
compiler.compiler[1:])
compiler.compiler_so[1:] = \
filter_unsupported_flags(compiler.compiler_so[0],
compiler.compiler_so[1:])
return patched
distutils.sysconfig.customize_compiler = \
monkey_with_compiler(distutils.sysconfig.customize_compiler)
# PEP 440 versioning of the RGW package on PyPI
# Bump this version, after every changeset
__version__ = '2.0.0'
def get_python_flags(libs):
py_libs = sum((libs.split() for libs in
distutils.sysconfig.get_config_vars('LIBS', 'SYSLIBS')), [])
ldflags = list(filterfalse(lambda lib: lib.startswith('-l'), py_libs))
py_libs = [lib.replace('-l', '') for lib in
filter(lambda lib: lib.startswith('-l'), py_libs)]
compiler = new_compiler()
distutils.sysconfig.customize_compiler(compiler)
return dict(
include_dirs=[distutils.sysconfig.get_python_inc()],
library_dirs=distutils.sysconfig.get_config_vars('LIBDIR', 'LIBPL'),
libraries=libs + py_libs,
extra_compile_args=filter_unsupported_flags(
compiler.compiler[0],
compiler.compiler[1:] + distutils.sysconfig.get_config_var('CFLAGS').split()),
extra_link_args=(distutils.sysconfig.get_config_var('LDFLAGS').split() +
ldflags))
def check_sanity():
"""
Test if development headers and library for rgw is available by compiling a dummy C program.
"""
CEPH_SRC_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..',
'..'
)
tmp_dir = tempfile.mkdtemp(dir=os.environ.get('TMPDIR', os.path.dirname(__file__)))
tmp_file = os.path.join(tmp_dir, 'rgw_dummy.c')
with open(tmp_file, 'w') as fp:
dummy_prog = textwrap.dedent("""
#include <stddef.h>
#include "rados/rgw_file.h"
int main(void) {
rgwfile_version(NULL, NULL, NULL);
return 0;
}
""")
fp.write(dummy_prog)
compiler = new_compiler()
distutils.sysconfig.customize_compiler(compiler)
if 'CEPH_LIBDIR' in os.environ:
# The setup.py has been invoked by a top-level Ceph make.
# Set the appropriate CFLAGS and LDFLAGS
compiler.set_include_dirs([os.path.join(CEPH_SRC_DIR, 'include')])
compiler.set_library_dirs([os.environ.get('CEPH_LIBDIR')])
try:
compiler.define_macro('_FILE_OFFSET_BITS', '64')
link_objects = compiler.compile(
sources=[tmp_file],
output_dir=tmp_dir,
)
compiler.link_executable(
objects=link_objects,
output_progname=os.path.join(tmp_dir, 'rgw_dummy'),
libraries=['rgw', 'rados'],
output_dir=tmp_dir,
)
except CompileError:
print('\nCompile Error: RGW development headers not found', file=sys.stderr)
return False
except LinkError:
print('\nLink Error: RGW library not found', file=sys.stderr)
return False
else:
return True
finally:
shutil.rmtree(tmp_dir)
if 'BUILD_DOC' in os.environ or 'READTHEDOCS' in os.environ:
ext_args = {}
cython_constants = dict(BUILD_DOC=True)
cythonize_args = dict(compile_time_env=cython_constants)
elif check_sanity():
ext_args = get_python_flags(['rados', 'rgw'])
cython_constants = dict(BUILD_DOC=False)
include_path = [os.path.join(os.path.dirname(__file__), "..", "rados")]
cythonize_args = dict(compile_time_env=cython_constants,
include_path=include_path)
else:
sys.exit(1)
cmdclass = {}
try:
from Cython.Build import cythonize
from Cython.Distutils import build_ext
cmdclass = {'build_ext': build_ext}
except ImportError:
print("WARNING: Cython is not installed.")
if not os.path.isfile('rgw.c'):
print('ERROR: Cannot find Cythonized file rgw.c', file=sys.stderr)
sys.exit(1)
else:
def cythonize(x, **kwargs):
return x
source = "rgw.c"
else:
source = "rgw.pyx"
# Disable cythonification if we're not really building anything
if (len(sys.argv) >= 2 and
any(i in sys.argv[1:] for i in ('--help', 'clean', 'egg_info', '--version')
)):
def cythonize(x, **kwargs):
return x
setup(
name='rgw',
version=__version__,
description="Python bindings for the RGW library",
long_description=(
"This package contains Python bindings for interacting with the "
"RGW library. RGW is a Object Storage Gateway "
"that uses a Ceph Storage Cluster to store its data. The "
"Ceph Object Storage support S3 and Swift APIs, "
"and file operations."
),
url='https://github.com/ceph/ceph/tree/master/src/pybind/rgw',
license='LGPLv2+',
platforms='Linux',
ext_modules=cythonize(
[
Extension(
"rgw",
[source],
**ext_args
)
],
compiler_directives={'language_level': sys.version_info.major},
build_dir=os.environ.get("CYTHON_BUILD_DIR", None),
**cythonize_args
),
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
],
cmdclass=cmdclass,
)
| 7,007 | 31.747664 | 96 |
py
|
null |
ceph-main/src/python-common/setup.py
|
from setuptools import setup, find_packages
with open("README.rst", "r") as fh:
long_description = fh.read()
setup(
name='ceph',
version='1.0.0',
packages=find_packages(),
author='',
author_email='[email protected]',
description='Ceph common library',
long_description=long_description,
license='LGPLv2+',
keywords='ceph',
url="https://github.com/ceph/ceph",
zip_safe = False,
install_requires=(
'pyyaml',
),
classifiers = [
'Intended Audience :: Developer',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
| 872 | 25.454545 | 93 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.