Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/_base_controller.py
|
import inspect
import json
import logging
from functools import wraps
from typing import ClassVar, List, Optional, Type
from urllib.parse import unquote
import cherrypy
from ..plugins import PLUGIN_MANAGER
from ..services.auth import AuthManager, JwtManager
from ..tools import get_request_body_params
from ._helpers import _get_function_params
from ._version import APIVersion
logger = logging.getLogger(__name__)
class BaseController:
"""
Base class for all controllers providing API endpoints.
"""
_registry: ClassVar[List[Type['BaseController']]] = []
_routed = False
def __init_subclass__(cls, skip_registry: bool = False, **kwargs) -> None:
super().__init_subclass__(**kwargs) # type: ignore
if not skip_registry:
BaseController._registry.append(cls)
@classmethod
def load_controllers(cls):
import importlib
from pathlib import Path
path = Path(__file__).parent
logger.debug('Controller import path: %s', path)
modules = [
f.stem for f in path.glob('*.py') if
not f.name.startswith('_') and f.is_file() and not f.is_symlink()]
logger.debug('Controller files found: %r', modules)
for module in modules:
importlib.import_module(f'{__package__}.{module}')
# pylint: disable=protected-access
controllers = [
controller for controller in BaseController._registry if
controller._routed
]
for clist in PLUGIN_MANAGER.hook.get_controllers() or []:
controllers.extend(clist)
return controllers
class Endpoint:
"""
An instance of this class represents an endpoint.
"""
def __init__(self, ctrl, func):
self.ctrl = ctrl
self.inst = None
self.func = func
if not self.config['proxy']:
setattr(self.ctrl, func.__name__, self.function)
@property
def config(self):
func = self.func
while not hasattr(func, '_endpoint'):
if hasattr(func, "__wrapped__"):
func = func.__wrapped__
else:
return None
return func._endpoint # pylint: disable=protected-access
@property
def function(self):
# pylint: disable=protected-access
return self.ctrl._request_wrapper(self.func, self.method,
self.config['json_response'],
self.config['xml'],
self.config['version'])
@property
def method(self):
return self.config['method']
@property
def proxy(self):
return self.config['proxy']
@property
def url(self):
ctrl_path = self.ctrl.get_path()
if ctrl_path == "/":
ctrl_path = ""
if self.config['path'] is not None:
url = "{}{}".format(ctrl_path, self.config['path'])
else:
url = "{}/{}".format(ctrl_path, self.func.__name__)
ctrl_path_params = self.ctrl.get_path_param_names(
self.config['path'])
path_params = [p['name'] for p in self.path_params
if p['name'] not in ctrl_path_params]
path_params = ["{{{}}}".format(p) for p in path_params]
if path_params:
url += "/{}".format("/".join(path_params))
return url
@property
def action(self):
return self.func.__name__
@property
def path_params(self):
ctrl_path_params = self.ctrl.get_path_param_names(
self.config['path'])
func_params = _get_function_params(self.func)
if self.method in ['GET', 'DELETE']:
assert self.config['path_params'] is None
return [p for p in func_params if p['name'] in ctrl_path_params
or (p['name'] not in self.config['query_params']
and p['required'])]
# elif self.method in ['POST', 'PUT']:
return [p for p in func_params if p['name'] in ctrl_path_params
or p['name'] in self.config['path_params']]
@property
def query_params(self):
if self.method in ['GET', 'DELETE']:
func_params = _get_function_params(self.func)
path_params = [p['name'] for p in self.path_params]
return [p for p in func_params if p['name'] not in path_params]
# elif self.method in ['POST', 'PUT']:
func_params = _get_function_params(self.func)
return [p for p in func_params
if p['name'] in self.config['query_params']]
@property
def body_params(self):
func_params = _get_function_params(self.func)
path_params = [p['name'] for p in self.path_params]
query_params = [p['name'] for p in self.query_params]
return [p for p in func_params
if p['name'] not in path_params
and p['name'] not in query_params]
@property
def group(self):
return self.ctrl.__name__
@property
def is_api(self):
# changed from hasattr to getattr: some ui-based api inherit _api_endpoint
return getattr(self.ctrl, '_api_endpoint', False)
@property
def is_secure(self):
return self.ctrl._cp_config['tools.authenticate.on'] # pylint: disable=protected-access
def __repr__(self):
return "Endpoint({}, {}, {})".format(self.url, self.method,
self.action)
def __init__(self):
logger.info('Initializing controller: %s -> %s',
self.__class__.__name__, self._cp_path_) # type: ignore
super().__init__()
def _has_permissions(self, permissions, scope=None):
if not self._cp_config['tools.authenticate.on']: # type: ignore
raise Exception("Cannot verify permission in non secured "
"controllers")
if not isinstance(permissions, list):
permissions = [permissions]
if scope is None:
scope = getattr(self, '_security_scope', None)
if scope is None:
raise Exception("Cannot verify permissions without scope security"
" defined")
username = JwtManager.LOCAL_USER.username
return AuthManager.authorize(username, scope, permissions)
@classmethod
def get_path_param_names(cls, path_extension=None):
if path_extension is None:
path_extension = ""
full_path = cls._cp_path_[1:] + path_extension # type: ignore
path_params = []
for step in full_path.split('/'):
param = None
if not step:
continue
if step[0] == ':':
param = step[1:]
elif step[0] == '{' and step[-1] == '}':
param, _, _ = step[1:-1].partition(':')
if param:
path_params.append(param)
return path_params
@classmethod
def get_path(cls):
return cls._cp_path_ # type: ignore
@classmethod
def endpoints(cls):
"""
This method iterates over all the methods decorated with ``@endpoint``
and creates an Endpoint object for each one of the methods.
:return: A list of endpoint objects
:rtype: list[BaseController.Endpoint]
"""
result = []
for _, func in inspect.getmembers(cls, predicate=callable):
if hasattr(func, '_endpoint'):
result.append(cls.Endpoint(cls, func))
return result
@staticmethod
def get_client_version():
try:
client_version = APIVersion.from_mime_type(
cherrypy.request.headers['Accept'])
except Exception:
raise cherrypy.HTTPError(
415, "Unable to find version in request header")
return client_version
@staticmethod
def _request_wrapper(func, method, json_response, xml, # pylint: disable=unused-argument
version: Optional[APIVersion]):
# pylint: disable=too-many-branches
@wraps(func)
def inner(*args, **kwargs):
client_version = None
for key, value in kwargs.items():
if isinstance(value, str):
kwargs[key] = unquote(value)
# Process method arguments.
params = get_request_body_params(cherrypy.request)
kwargs.update(params)
if version is not None:
client_version = BaseController.get_client_version()
if version.supports(client_version):
ret = func(*args, **kwargs)
else:
raise cherrypy.HTTPError(
415,
f"Incorrect version: endpoint is '{version!s}', "
f"client requested '{client_version!s}'"
)
else:
ret = func(*args, **kwargs)
if isinstance(ret, bytes):
ret = ret.decode('utf-8')
if xml:
cherrypy.response.headers['Content-Type'] = (version.to_mime_type(subtype='xml')
if version else 'application/xml')
return ret.encode('utf8')
if json_response:
cherrypy.response.headers['Content-Type'] = (version.to_mime_type(subtype='json')
if version else 'application/json')
ret = json.dumps(ret).encode('utf8')
return ret
return inner
@property
def _request(self):
return self.Request(cherrypy.request)
class Request(object):
def __init__(self, cherrypy_req):
self._creq = cherrypy_req
@property
def scheme(self):
return self._creq.scheme
@property
def host(self):
base = self._creq.base
base = base[len(self.scheme)+3:]
return base[:base.find(":")] if ":" in base else base
@property
def port(self):
base = self._creq.base
base = base[len(self.scheme)+3:]
default_port = 443 if self.scheme == 'https' else 80
return int(base[base.find(":")+1:]) if ":" in base else default_port
@property
def path_info(self):
return self._creq.path_info
| 10,918 | 33.553797 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/_crud.py
|
from enum import Enum
from functools import wraps
from inspect import isclass
from typing import Any, Callable, Dict, Generator, Iterable, Iterator, List, \
NamedTuple, Optional, Tuple, Union, get_type_hints
from ._api_router import APIRouter
from ._docs import APIDoc, EndpointDoc
from ._rest_controller import RESTController
from ._ui_router import UIRouter
class SecretStr(str):
pass
class MethodType(Enum):
POST = 'post'
PUT = 'put'
def isnamedtuple(o):
return isinstance(o, tuple) and hasattr(o, '_asdict') and hasattr(o, '_fields')
class SerializableClass:
def __iter__(self):
for attr in self.__dict__:
if not attr.startswith("__"):
yield attr, getattr(self, attr)
def __contains__(self, value):
return value in self.__dict__
def __len__(self):
return len(self.__dict__)
def serialize(o, expected_type=None):
# pylint: disable=R1705,W1116
if isnamedtuple(o):
hints = get_type_hints(o)
return {k: serialize(v, hints[k]) for k, v in zip(o._fields, o)}
elif isinstance(o, (list, tuple, set)):
# json serializes list and tuples to arrays, hence we also serialize
# sets to lists.
# NOTE: we could add a metadata value in a list to indentify tuples and,
# sets if we wanted but for now let's go for lists.
return [serialize(i) for i in o]
elif isinstance(o, SerializableClass):
return {serialize(k): serialize(v) for k, v in o}
elif isinstance(o, (Iterator, Generator)):
return [serialize(i) for i in o]
elif expected_type and isclass(expected_type) and issubclass(expected_type, SecretStr):
return "***********"
else:
return o
class TableColumn(NamedTuple):
prop: str
cellTemplate: str = ''
isHidden: bool = False
filterable: bool = True
flexGrow: int = 1
class TableAction(NamedTuple):
name: str
permission: str
icon: str
routerLink: str = '' # redirect to...
click: str = ''
disable: bool = False # disable without selection
class SelectionType(Enum):
NONE = ''
SINGLE = 'single'
MULTI = 'multiClick'
class TableComponent(SerializableClass):
def __init__(self) -> None:
self.columns: List[TableColumn] = []
self.columnMode: str = 'flex'
self.toolHeader: bool = True
self.selectionType: str = SelectionType.SINGLE.value
def set_selection_type(self, type_: SelectionType):
self.selectionType = type_.value
class Icon(Enum):
ADD = 'fa fa-plus'
DESTROY = 'fa fa-times'
IMPORT = 'fa fa-upload'
EXPORT = 'fa fa-download'
EDIT = 'fa fa-pencil'
class Validator(Enum):
JSON = 'json'
RGW_ROLE_NAME = 'rgwRoleName'
RGW_ROLE_PATH = 'rgwRolePath'
FILE = 'file'
class FormField(NamedTuple):
"""
The key of a FormField is then used to send the data related to that key into the
POST and PUT endpoints. It is imperative for the developer to map keys of fields and containers
to the input of the POST and PUT endpoints.
"""
name: str
key: str
field_type: Any = str
default_value: Optional[Any] = None
optional: bool = False
readonly: bool = False
help: str = ''
validators: List[Validator] = []
def get_type(self):
_type = ''
if self.field_type == str:
_type = 'string'
elif self.field_type == int:
_type = 'int'
elif self.field_type == bool:
_type = 'boolean'
elif self.field_type == 'textarea':
_type = 'textarea'
elif self.field_type == "file":
_type = 'file'
else:
raise NotImplementedError(f'Unimplemented type {self.field_type}')
return _type
class Container:
def __init__(self, name: str, key: str, fields: List[Union[FormField, "Container"]],
optional: bool = False, readonly: bool = False, min_items=1):
self.name = name
self.key = key
self.fields = fields
self.optional = optional
self.readonly = readonly
self.min_items = min_items
def layout_type(self):
raise NotImplementedError
def _property_type(self):
raise NotImplementedError
def to_dict(self, key=''):
# intialize the schema of this container
ui_schemas = []
control_schema = {
'type': self._property_type(),
'title': self.name
}
items = None # layout items alias as it depends on the type of container
properties = None # control schema properties alias
required = None
if self._property_type() == 'array':
control_schema['required'] = []
control_schema['minItems'] = self.min_items
control_schema['items'] = {
'type': 'object',
'properties': {},
'required': []
}
properties = control_schema['items']['properties']
required = control_schema['required']
control_schema['items']['required'] = required
ui_schemas.append({
'key': key,
'templateOptions': {
'objectTemplateOptions': {
'layoutType': self.layout_type()
}
},
'items': []
})
items = ui_schemas[-1]['items']
else:
control_schema['properties'] = {}
control_schema['required'] = []
required = control_schema['required']
properties = control_schema['properties']
ui_schemas.append({
'templateOptions': {
'layoutType': self.layout_type()
},
'key': key,
'items': []
})
if key:
items = ui_schemas[-1]['items']
else:
items = ui_schemas
assert items is not None
assert properties is not None
assert required is not None
# include fields in this container's schema
for field in self.fields:
field_ui_schema: Dict[str, Any] = {}
properties[field.key] = {}
field_key = field.key
if key:
if self._property_type() == 'array':
field_key = key + '[].' + field.key
else:
field_key = key + '.' + field.key
if isinstance(field, FormField):
_type = field.get_type()
properties[field.key]['type'] = _type
properties[field.key]['title'] = field.name
field_ui_schema['key'] = field_key
field_ui_schema['readonly'] = field.readonly
field_ui_schema['help'] = f'{field.help}'
field_ui_schema['validators'] = [i.value for i in field.validators]
items.append(field_ui_schema)
elif isinstance(field, Container):
container_schema = field.to_dict(key+'.'+field.key if key else field.key)
properties[field.key] = container_schema['control_schema']
ui_schemas.extend(container_schema['ui_schema'])
if not field.optional:
required.append(field.key)
return {
'ui_schema': ui_schemas,
'control_schema': control_schema,
}
class VerticalContainer(Container):
def layout_type(self):
return 'column'
def _property_type(self):
return 'object'
class HorizontalContainer(Container):
def layout_type(self):
return 'row'
def _property_type(self):
return 'object'
class ArrayVerticalContainer(Container):
def layout_type(self):
return 'column'
def _property_type(self):
return 'array'
class ArrayHorizontalContainer(Container):
def layout_type(self):
return 'row'
def _property_type(self):
return 'array'
class FormTaskInfo:
def __init__(self, message: str, metadata_fields: List[str]) -> None:
self.message = message
self.metadata_fields = metadata_fields
def to_dict(self):
return {'message': self.message, 'metadataFields': self.metadata_fields}
class Form:
def __init__(self, path, root_container, method_type='',
task_info: FormTaskInfo = FormTaskInfo("Unknown task", []),
model_callback=None):
self.path = path
self.root_container: Container = root_container
self.method_type = method_type
self.task_info = task_info
self.model_callback = model_callback
def to_dict(self):
res = self.root_container.to_dict()
res['method_type'] = self.method_type
res['task_info'] = self.task_info.to_dict()
res['path'] = self.path
res['ask'] = self.path
return res
class CRUDMeta(SerializableClass):
def __init__(self):
self.table = TableComponent()
self.permissions = []
self.actions = []
self.forms = []
self.columnKey = ''
self.detail_columns = []
class CRUDCollectionMethod(NamedTuple):
func: Callable[..., Iterable[Any]]
doc: EndpointDoc
class CRUDResourceMethod(NamedTuple):
func: Callable[..., Any]
doc: EndpointDoc
# pylint: disable=R0902
class CRUDEndpoint:
# for testing purposes
CRUDClass: Optional[RESTController] = None
CRUDClassMetadata: Optional[RESTController] = None
def __init__(self, router: APIRouter, doc: APIDoc,
set_column: Optional[Dict[str, Dict[str, str]]] = None,
actions: Optional[List[TableAction]] = None,
permissions: Optional[List[str]] = None, forms: Optional[List[Form]] = None,
column_key: Optional[str] = None,
meta: CRUDMeta = CRUDMeta(), get_all: Optional[CRUDCollectionMethod] = None,
create: Optional[CRUDCollectionMethod] = None,
delete: Optional[CRUDCollectionMethod] = None,
selection_type: SelectionType = SelectionType.SINGLE,
extra_endpoints: Optional[List[Tuple[str, CRUDCollectionMethod]]] = None,
edit: Optional[CRUDCollectionMethod] = None,
detail_columns: Optional[List[str]] = None):
self.router = router
self.doc = doc
self.set_column = set_column
self.actions = actions if actions is not None else []
self.forms = forms if forms is not None else []
self.meta = meta
self.get_all = get_all
self.create = create
self.delete = delete
self.edit = edit
self.permissions = permissions if permissions is not None else []
self.column_key = column_key if column_key is not None else ''
self.detail_columns = detail_columns if detail_columns is not None else []
self.extra_endpoints = extra_endpoints if extra_endpoints is not None else []
self.selection_type = selection_type
def __call__(self, cls: Any):
self.create_crud_class(cls)
self.meta.table.columns.extend(TableColumn(prop=field) for field in cls._fields)
self.create_meta_class(cls)
return cls
def create_crud_class(self, cls):
outer_self: CRUDEndpoint = self
funcs = {}
if self.get_all:
@self.get_all.doc
@wraps(self.get_all.func)
def _list(self, *args, **kwargs):
items = []
for item in outer_self.get_all.func(self, *args, **kwargs): # type: ignore
items.append(serialize(cls(**item)))
return items
funcs['list'] = _list
if self.create:
@self.create.doc
@wraps(self.create.func)
def _create(self, *args, **kwargs):
return outer_self.create.func(self, *args, **kwargs) # type: ignore
funcs['create'] = _create
if self.delete:
@self.delete.doc
@wraps(self.delete.func)
def delete(self, *args, **kwargs):
return outer_self.delete.func(self, *args, **kwargs) # type: ignore
funcs['delete'] = delete
if self.edit:
@self.edit.doc
@wraps(self.edit.func)
def singleton_set(self, *args, **kwargs):
return outer_self.edit.func(self, *args, **kwargs) # type: ignore
funcs['singleton_set'] = singleton_set
for extra_endpoint in self.extra_endpoints:
funcs[extra_endpoint[0]] = extra_endpoint[1].doc(extra_endpoint[1].func)
class_name = self.router.path.replace('/', '')
crud_class = type(f'{class_name}_CRUDClass',
(RESTController,),
{
**funcs,
'outer_self': self,
})
self.router(self.doc(crud_class))
cls.CRUDClass = crud_class
def create_meta_class(self, cls):
def _list(self, model_key: str = ''):
self.update_columns()
self.generate_actions()
self.generate_forms(model_key)
self.set_permissions()
self.set_column_key()
self.get_detail_columns()
selection_type = self.__class__.outer_self.selection_type
self.__class__.outer_self.meta.table.set_selection_type(selection_type)
return serialize(self.__class__.outer_self.meta)
def get_detail_columns(self):
columns = self.__class__.outer_self.detail_columns
self.__class__.outer_self.meta.detail_columns = columns
def update_columns(self):
if self.__class__.outer_self.set_column:
for i, column in enumerate(self.__class__.outer_self.meta.table.columns):
if column.prop in dict(self.__class__.outer_self.set_column):
prop = self.__class__.outer_self.set_column[column.prop]
new_template = ""
if "cellTemplate" in prop:
new_template = prop["cellTemplate"]
hidden = prop['isHidden'] if 'isHidden' in prop else False
flex_grow = prop['flexGrow'] if 'flexGrow' in prop else column.flexGrow
new_column = TableColumn(column.prop,
new_template,
hidden,
column.filterable,
flex_grow)
self.__class__.outer_self.meta.table.columns[i] = new_column
def generate_actions(self):
self.__class__.outer_self.meta.actions.clear()
for action in self.__class__.outer_self.actions:
self.__class__.outer_self.meta.actions.append(action._asdict())
def generate_forms(self, model_key):
self.__class__.outer_self.meta.forms.clear()
for form in self.__class__.outer_self.forms:
form_as_dict = form.to_dict()
model = {}
if form.model_callback and model_key:
model = form.model_callback(model_key)
form_as_dict['model'] = model
self.__class__.outer_self.meta.forms.append(form_as_dict)
def set_permissions(self):
self.__class__.outer_self.meta.permissions.clear()
if self.__class__.outer_self.permissions:
self.outer_self.meta.permissions.extend(self.__class__.outer_self.permissions)
def set_column_key(self):
if self.__class__.outer_self.column_key:
self.outer_self.meta.columnKey = self.__class__.outer_self.column_key
class_name = self.router.path.replace('/', '')
meta_class = type(f'{class_name}_CRUDClassMetadata',
(RESTController,),
{
'list': _list,
'update_columns': update_columns,
'generate_actions': generate_actions,
'generate_forms': generate_forms,
'set_permissions': set_permissions,
'set_column_key': set_column_key,
'get_detail_columns': get_detail_columns,
'outer_self': self,
})
UIRouter(self.router.path, self.router.security_scope)(meta_class)
cls.CRUDClassMetadata = meta_class
| 16,887 | 33.748971 | 99 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/_docs.py
|
from typing import Any, Dict, List, Optional, Tuple, Union
from ..api.doc import SchemaInput, SchemaType
class EndpointDoc: # noqa: N802
DICT_TYPE = Union[Dict[str, Any], Dict[int, Any]]
def __init__(self, description: str = "", group: str = "",
parameters: Optional[Union[DICT_TYPE, List[Any], Tuple[Any, ...]]] = None,
responses: Optional[DICT_TYPE] = None) -> None:
self.description = description
self.group = group
self.parameters = parameters
self.responses = responses
self.validate_args()
if not self.parameters:
self.parameters = {} # type: ignore
self.resp = {}
if self.responses:
for status_code, response_body in self.responses.items():
schema_input = SchemaInput()
schema_input.type = SchemaType.ARRAY if \
isinstance(response_body, list) else SchemaType.OBJECT
schema_input.params = self._split_parameters(response_body)
self.resp[str(status_code)] = schema_input
def validate_args(self) -> None:
if not isinstance(self.description, str):
raise Exception("%s has been called with a description that is not a string: %s"
% (EndpointDoc.__name__, self.description))
if not isinstance(self.group, str):
raise Exception("%s has been called with a groupname that is not a string: %s"
% (EndpointDoc.__name__, self.group))
if self.parameters and not isinstance(self.parameters, dict):
raise Exception("%s has been called with parameters that is not a dict: %s"
% (EndpointDoc.__name__, self.parameters))
if self.responses and not isinstance(self.responses, dict):
raise Exception("%s has been called with responses that is not a dict: %s"
% (EndpointDoc.__name__, self.responses))
def _split_param(self, name: str, p_type: Union[type, DICT_TYPE, List[Any], Tuple[Any, ...]],
description: str, optional: bool = False, default_value: Any = None,
nested: bool = False) -> Dict[str, Any]:
param = {
'name': name,
'description': description,
'required': not optional,
'nested': nested,
}
if default_value:
param['default'] = default_value
if isinstance(p_type, type):
param['type'] = p_type
else:
nested_params = self._split_parameters(p_type, nested=True)
if nested_params:
param['type'] = type(p_type)
param['nested_params'] = nested_params
else:
param['type'] = p_type
return param
# Optional must be set to True in order to set default value and parameters format must be:
# 'name: (type or nested parameters, description, [optional], [default value])'
def _split_dict(self, data: DICT_TYPE, nested: bool) -> List[Any]:
splitted = []
for name, props in data.items():
if isinstance(name, str) and isinstance(props, tuple):
if len(props) == 2:
param = self._split_param(name, props[0], props[1], nested=nested)
elif len(props) == 3:
param = self._split_param(
name, props[0], props[1], optional=props[2], nested=nested)
if len(props) == 4:
param = self._split_param(name, props[0], props[1], props[2], props[3], nested)
splitted.append(param)
else:
raise Exception(
"""Parameter %s in %s has not correct format. Valid formats are:
<name>: (<type>, <description>, [optional], [default value])
<name>: (<[type]>, <description>, [optional], [default value])
<name>: (<[nested parameters]>, <description>, [optional], [default value])
<name>: (<{nested parameters}>, <description>, [optional], [default value])"""
% (name, EndpointDoc.__name__))
return splitted
def _split_list(self, data: Union[List[Any], Tuple[Any, ...]], nested: bool) -> List[Any]:
splitted = [] # type: List[Any]
for item in data:
splitted.extend(self._split_parameters(item, nested))
return splitted
# nested = True means parameters are inside a dict or array
def _split_parameters(self, data: Optional[Union[DICT_TYPE, List[Any], Tuple[Any, ...]]],
nested: bool = False) -> List[Any]:
param_list = [] # type: List[Any]
if isinstance(data, dict):
param_list.extend(self._split_dict(data, nested))
elif isinstance(data, (list, tuple)):
param_list.extend(self._split_list(data, True))
return param_list
def __call__(self, func: Any) -> Any:
func.doc_info = {
'summary': self.description,
'tag': self.group,
'parameters': self._split_parameters(self.parameters),
'response': self.resp
}
return func
class APIDoc(object):
def __init__(self, description="", group=""):
self.tag = group
self.tag_descr = description
def __call__(self, cls):
cls.doc_info = {
'tag': self.tag,
'tag_descr': self.tag_descr
}
return cls
| 5,587 | 42.317829 | 99 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/_endpoint.py
|
from typing import Optional
from ._helpers import _get_function_params
from ._version import APIVersion
class Endpoint:
def __init__(self, method=None, path=None, path_params=None, query_params=None, # noqa: N802
json_response=True, proxy=False, xml=False,
version: Optional[APIVersion] = APIVersion.DEFAULT):
if method is None:
method = 'GET'
elif not isinstance(method, str) or \
method.upper() not in ['GET', 'POST', 'DELETE', 'PUT']:
raise TypeError("Possible values for method are: 'GET', 'POST', "
"'DELETE', or 'PUT'")
method = method.upper()
if method in ['GET', 'DELETE']:
if path_params is not None:
raise TypeError("path_params should not be used for {} "
"endpoints. All function params are considered"
" path parameters by default".format(method))
if path_params is None:
if method in ['POST', 'PUT']:
path_params = []
if query_params is None:
query_params = []
self.method = method
self.path = path
self.path_params = path_params
self.query_params = query_params
self.json_response = json_response
self.proxy = proxy
self.xml = xml
self.version = version
def __call__(self, func):
if self.method in ['POST', 'PUT']:
func_params = _get_function_params(func)
for param in func_params:
if param['name'] in self.path_params and not param['required']:
raise TypeError("path_params can only reference "
"non-optional function parameters")
if func.__name__ == '__call__' and self.path is None:
e_path = ""
else:
e_path = self.path
if e_path is not None:
e_path = e_path.strip()
if e_path and e_path[0] != "/":
e_path = "/" + e_path
elif e_path == "/":
e_path = ""
func._endpoint = {
'method': self.method,
'path': e_path,
'path_params': self.path_params,
'query_params': self.query_params,
'json_response': self.json_response,
'proxy': self.proxy,
'xml': self.xml,
'version': self.version
}
return func
def Proxy(path=None): # noqa: N802
if path is None:
path = ""
elif path == "/":
path = ""
path += "/{path:.*}"
return Endpoint(path=path, proxy=True)
| 2,703 | 31.578313 | 97 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/_helpers.py
|
import collections
import json
import logging
import re
from functools import wraps
import cherrypy
from ceph_argparse import ArgumentFormat # pylint: disable=import-error
from ..exceptions import DashboardException
from ..tools import getargspec
logger = logging.getLogger(__name__)
ENDPOINT_MAP = collections.defaultdict(list) # type: dict
def _get_function_params(func):
"""
Retrieves the list of parameters declared in function.
Each parameter is represented as dict with keys:
* name (str): the name of the parameter
* required (bool): whether the parameter is required or not
* default (obj): the parameter's default value
"""
fspec = getargspec(func)
func_params = []
nd = len(fspec.args) if not fspec.defaults else -len(fspec.defaults)
for param in fspec.args[1:nd]:
func_params.append({'name': param, 'required': True})
if fspec.defaults:
for param, val in zip(fspec.args[nd:], fspec.defaults):
func_params.append({
'name': param,
'required': False,
'default': val
})
return func_params
def generate_controller_routes(endpoint, mapper, base_url):
inst = endpoint.inst
ctrl_class = endpoint.ctrl
if endpoint.proxy:
conditions = None
else:
conditions = dict(method=[endpoint.method])
# base_url can be empty or a URL path that starts with "/"
# we will remove the trailing "/" if exists to help with the
# concatenation with the endpoint url below
if base_url.endswith("/"):
base_url = base_url[:-1]
endp_url = endpoint.url
if endp_url.find("/", 1) == -1:
parent_url = "{}{}".format(base_url, endp_url)
else:
parent_url = "{}{}".format(base_url, endp_url[:endp_url.find("/", 1)])
# parent_url might be of the form "/.../{...}" where "{...}" is a path parameter
# we need to remove the path parameter definition
parent_url = re.sub(r'(?:/\{[^}]+\})$', '', parent_url)
if not parent_url: # root path case
parent_url = "/"
url = "{}{}".format(base_url, endp_url)
logger.debug("Mapped [%s] to %s:%s restricted to %s",
url, ctrl_class.__name__, endpoint.action,
endpoint.method)
ENDPOINT_MAP[endpoint.url].append(endpoint)
name = ctrl_class.__name__ + ":" + endpoint.action
mapper.connect(name, url, controller=inst, action=endpoint.action,
conditions=conditions)
# adding route with trailing slash
name += "/"
url += "/"
mapper.connect(name, url, controller=inst, action=endpoint.action,
conditions=conditions)
return parent_url
def json_error_page(status, message, traceback, version):
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps(dict(status=status, detail=message, traceback=traceback,
version=version))
def allow_empty_body(func): # noqa: N802
"""
The POST/PUT request methods decorated with ``@allow_empty_body``
are allowed to send empty request body.
"""
# pylint: disable=protected-access
try:
func._cp_config['tools.json_in.force'] = False
except (AttributeError, KeyError):
func._cp_config = {'tools.json_in.force': False}
return func
def validate_ceph_type(validations, component=''):
def decorator(func):
@wraps(func)
def validate_args(*args, **kwargs):
input_values = kwargs
for key, ceph_type in validations:
try:
ceph_type.valid(input_values[key])
except ArgumentFormat as e:
raise DashboardException(msg=e,
code='ceph_type_not_valid',
component=component)
return func(*args, **kwargs)
return validate_args
return decorator
| 3,990 | 30.179688 | 84 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/_paginate.py
| 0 | 0 | 0 |
py
|
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/_permissions.py
|
"""
Role-based access permissions decorators
"""
import logging
from ..exceptions import PermissionNotValid
from ..security import Permission
logger = logging.getLogger(__name__)
def _set_func_permissions(func, permissions):
if not isinstance(permissions, list):
permissions = [permissions]
for perm in permissions:
if not Permission.valid_permission(perm):
logger.debug("Invalid security permission: %s\n "
"Possible values: %s", perm,
Permission.all_permissions())
raise PermissionNotValid(perm)
# pylint: disable=protected-access
if not hasattr(func, '_security_permissions'):
func._security_permissions = permissions
else:
permissions.extend(func._security_permissions)
func._security_permissions = list(set(permissions))
def ReadPermission(func): # noqa: N802
"""
:raises PermissionNotValid: If the permission is missing.
"""
_set_func_permissions(func, Permission.READ)
return func
def CreatePermission(func): # noqa: N802
"""
:raises PermissionNotValid: If the permission is missing.
"""
_set_func_permissions(func, Permission.CREATE)
return func
def DeletePermission(func): # noqa: N802
"""
:raises PermissionNotValid: If the permission is missing.
"""
_set_func_permissions(func, Permission.DELETE)
return func
def UpdatePermission(func): # noqa: N802
"""
:raises PermissionNotValid: If the permission is missing.
"""
_set_func_permissions(func, Permission.UPDATE)
return func
| 1,618 | 25.540984 | 61 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/_rest_controller.py
|
import collections
import inspect
from functools import wraps
from typing import Optional
import cherrypy
from ..security import Permission
from ._base_controller import BaseController
from ._endpoint import Endpoint
from ._helpers import _get_function_params
from ._permissions import _set_func_permissions
from ._version import APIVersion
class RESTController(BaseController, skip_registry=True):
"""
Base class for providing a RESTful interface to a resource.
To use this class, simply derive a class from it and implement the methods
you want to support. The list of possible methods are:
* list()
* bulk_set(data)
* create(data)
* bulk_delete()
* get(key)
* set(data, key)
* singleton_set(data)
* delete(key)
Test with curl:
curl -H "Content-Type: application/json" -X POST \
-d '{"username":"xyz","password":"xyz"}' https://127.0.0.1:8443/foo
curl https://127.0.0.1:8443/foo
curl https://127.0.0.1:8443/foo/0
"""
# resource id parameter for using in get, set, and delete methods
# should be overridden by subclasses.
# to specify a composite id (two parameters) use '/'. e.g., "param1/param2".
# If subclasses don't override this property we try to infer the structure
# of the resource ID.
RESOURCE_ID: Optional[str] = None
_permission_map = {
'GET': Permission.READ,
'POST': Permission.CREATE,
'PUT': Permission.UPDATE,
'DELETE': Permission.DELETE
}
_method_mapping = collections.OrderedDict([
('list', {'method': 'GET', 'resource': False, 'status': 200, 'version': APIVersion.DEFAULT}), # noqa E501 #pylint: disable=line-too-long
('create', {'method': 'POST', 'resource': False, 'status': 201, 'version': APIVersion.DEFAULT}), # noqa E501 #pylint: disable=line-too-long
('bulk_set', {'method': 'PUT', 'resource': False, 'status': 200, 'version': APIVersion.DEFAULT}), # noqa E501 #pylint: disable=line-too-long
('bulk_delete', {'method': 'DELETE', 'resource': False, 'status': 204, 'version': APIVersion.DEFAULT}), # noqa E501 #pylint: disable=line-too-long
('get', {'method': 'GET', 'resource': True, 'status': 200, 'version': APIVersion.DEFAULT}),
('delete', {'method': 'DELETE', 'resource': True, 'status': 204, 'version': APIVersion.DEFAULT}), # noqa E501 #pylint: disable=line-too-long
('set', {'method': 'PUT', 'resource': True, 'status': 200, 'version': APIVersion.DEFAULT}),
('singleton_set', {'method': 'PUT', 'resource': False, 'status': 200, 'version': APIVersion.DEFAULT}) # noqa E501 #pylint: disable=line-too-long
])
@classmethod
def infer_resource_id(cls):
if cls.RESOURCE_ID is not None:
return cls.RESOURCE_ID.split('/')
for k, v in cls._method_mapping.items():
func = getattr(cls, k, None)
while hasattr(func, "__wrapped__"):
assert func
func = func.__wrapped__
if v['resource'] and func:
path_params = cls.get_path_param_names()
params = _get_function_params(func)
return [p['name'] for p in params
if p['required'] and p['name'] not in path_params]
return None
@classmethod
def endpoints(cls):
result = super().endpoints()
res_id_params = cls.infer_resource_id()
for name, func in inspect.getmembers(cls, predicate=callable):
endpoint_params = {
'no_resource_id_params': False,
'status': 200,
'method': None,
'query_params': None,
'path': '',
'version': APIVersion.DEFAULT,
'sec_permissions': hasattr(func, '_security_permissions'),
'permission': None,
}
if name in cls._method_mapping:
cls._update_endpoint_params_method_map(
func, res_id_params, endpoint_params, name=name)
elif hasattr(func, "__collection_method__"):
cls._update_endpoint_params_collection_map(func, endpoint_params)
elif hasattr(func, "__resource_method__"):
cls._update_endpoint_params_resource_method(
res_id_params, endpoint_params, func)
else:
continue
if endpoint_params['no_resource_id_params']:
raise TypeError("Could not infer the resource ID parameters for"
" method {} of controller {}. "
"Please specify the resource ID parameters "
"using the RESOURCE_ID class property"
.format(func.__name__, cls.__name__))
if endpoint_params['method'] in ['GET', 'DELETE']:
params = _get_function_params(func)
if res_id_params is None:
res_id_params = []
if endpoint_params['query_params'] is None:
endpoint_params['query_params'] = [p['name'] for p in params # type: ignore
if p['name'] not in res_id_params]
func = cls._status_code_wrapper(func, endpoint_params['status'])
endp_func = Endpoint(endpoint_params['method'], path=endpoint_params['path'],
query_params=endpoint_params['query_params'],
version=endpoint_params['version'])(func) # type: ignore
if endpoint_params['permission']:
_set_func_permissions(endp_func, [endpoint_params['permission']])
result.append(cls.Endpoint(cls, endp_func))
return result
@classmethod
def _update_endpoint_params_resource_method(cls, res_id_params, endpoint_params, func):
if not res_id_params:
endpoint_params['no_resource_id_params'] = True
else:
path_params = ["{{{}}}".format(p) for p in res_id_params]
endpoint_params['path'] += "/{}".format("/".join(path_params))
if func.__resource_method__['path']:
endpoint_params['path'] += func.__resource_method__['path']
else:
endpoint_params['path'] += "/{}".format(func.__name__)
endpoint_params['status'] = func.__resource_method__['status']
endpoint_params['method'] = func.__resource_method__['method']
endpoint_params['version'] = func.__resource_method__['version']
endpoint_params['query_params'] = func.__resource_method__['query_params']
if not endpoint_params['sec_permissions']:
endpoint_params['permission'] = cls._permission_map[endpoint_params['method']]
@classmethod
def _update_endpoint_params_collection_map(cls, func, endpoint_params):
if func.__collection_method__['path']:
endpoint_params['path'] = func.__collection_method__['path']
else:
endpoint_params['path'] = "/{}".format(func.__name__)
endpoint_params['status'] = func.__collection_method__['status']
endpoint_params['method'] = func.__collection_method__['method']
endpoint_params['query_params'] = func.__collection_method__['query_params']
endpoint_params['version'] = func.__collection_method__['version']
if not endpoint_params['sec_permissions']:
endpoint_params['permission'] = cls._permission_map[endpoint_params['method']]
@classmethod
def _update_endpoint_params_method_map(cls, func, res_id_params, endpoint_params, name=None):
meth = cls._method_mapping[func.__name__ if not name else name] # type: dict
if meth['resource']:
if not res_id_params:
endpoint_params['no_resource_id_params'] = True
else:
path_params = ["{{{}}}".format(p) for p in res_id_params]
endpoint_params['path'] += "/{}".format("/".join(path_params))
endpoint_params['status'] = meth['status']
endpoint_params['method'] = meth['method']
if hasattr(func, "__method_map_method__"):
endpoint_params['version'] = func.__method_map_method__['version']
if not endpoint_params['sec_permissions']:
endpoint_params['permission'] = cls._permission_map[endpoint_params['method']]
@classmethod
def _status_code_wrapper(cls, func, status_code):
@wraps(func)
def wrapper(*vpath, **params):
cherrypy.response.status = status_code
return func(*vpath, **params)
return wrapper
@staticmethod
def Resource(method=None, path=None, status=None, query_params=None, # noqa: N802
version: Optional[APIVersion] = APIVersion.DEFAULT):
if not method:
method = 'GET'
if status is None:
status = 200
def _wrapper(func):
func.__resource_method__ = {
'method': method,
'path': path,
'status': status,
'query_params': query_params,
'version': version
}
return func
return _wrapper
@staticmethod
def MethodMap(resource=False, status=None,
version: Optional[APIVersion] = APIVersion.DEFAULT): # noqa: N802
if status is None:
status = 200
def _wrapper(func):
func.__method_map_method__ = {
'resource': resource,
'status': status,
'version': version
}
return func
return _wrapper
@staticmethod
def Collection(method=None, path=None, status=None, query_params=None, # noqa: N802
version: Optional[APIVersion] = APIVersion.DEFAULT):
if not method:
method = 'GET'
if status is None:
status = 200
def _wrapper(func):
func.__collection_method__ = {
'method': method,
'path': path,
'status': status,
'query_params': query_params,
'version': version
}
return func
return _wrapper
| 10,337 | 40.352 | 155 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/_router.py
|
import logging
import cherrypy
from ..exceptions import ScopeNotValid
from ..security import Scope
from ._base_controller import BaseController
from ._helpers import generate_controller_routes
logger = logging.getLogger(__name__)
class Router(object):
def __init__(self, path, base_url=None, security_scope=None, secure=True):
if security_scope and not Scope.valid_scope(security_scope):
raise ScopeNotValid(security_scope)
self.path = path
self.base_url = base_url
self.security_scope = security_scope
self.secure = secure
if self.path and self.path[0] != "/":
self.path = "/" + self.path
if self.base_url is None:
self.base_url = ""
elif self.base_url == "/":
self.base_url = ""
if self.base_url == "" and self.path == "":
self.base_url = "/"
def __call__(self, cls):
cls._routed = True
cls._cp_path_ = "{}{}".format(self.base_url, self.path)
cls._security_scope = self.security_scope
config = {
'tools.dashboard_exception_handler.on': True,
'tools.authenticate.on': self.secure,
}
if not hasattr(cls, '_cp_config'):
cls._cp_config = {}
cls._cp_config.update(config)
return cls
@classmethod
def generate_routes(cls, url_prefix):
controllers = BaseController.load_controllers()
logger.debug("controllers=%r", controllers)
mapper = cherrypy.dispatch.RoutesDispatcher()
parent_urls = set()
endpoint_list = []
for ctrl in controllers:
inst = ctrl()
for endpoint in ctrl.endpoints():
endpoint.inst = inst
endpoint_list.append(endpoint)
endpoint_list = sorted(endpoint_list, key=lambda e: e.url)
for endpoint in endpoint_list:
parent_urls.add(generate_controller_routes(endpoint, mapper,
"{}".format(url_prefix)))
logger.debug("list of parent paths: %s", parent_urls)
return mapper, parent_urls
| 2,158 | 29.842857 | 80 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/_task.py
|
from functools import wraps
import cherrypy
from ..tools import TaskManager
from ._helpers import _get_function_params
class Task:
def __init__(self, name, metadata, wait_for=5.0, exception_handler=None):
self.name = name
if isinstance(metadata, list):
self.metadata = {e[1:-1]: e for e in metadata}
else:
self.metadata = metadata
self.wait_for = wait_for
self.exception_handler = exception_handler
def _gen_arg_map(self, func, args, kwargs):
arg_map = {}
params = _get_function_params(func)
args = args[1:] # exclude self
for idx, param in enumerate(params):
if idx < len(args):
arg_map[param['name']] = args[idx]
else:
if param['name'] in kwargs:
arg_map[param['name']] = kwargs[param['name']]
else:
assert not param['required'], "{0} is required".format(param['name'])
arg_map[param['name']] = param['default']
if param['name'] in arg_map:
# This is not a type error. We are using the index here.
arg_map[idx+1] = arg_map[param['name']]
return arg_map
def _get_metadata(self, arg_map):
metadata = {}
for k, v in self.metadata.items():
if isinstance(v, str) and v and v[0] == '{' and v[-1] == '}':
param = v[1:-1]
try:
pos = int(param)
metadata[k] = arg_map[pos]
except ValueError:
if param.find('.') == -1:
metadata[k] = arg_map[param]
else:
path = param.split('.')
metadata[k] = arg_map[path[0]]
for i in range(1, len(path)):
metadata[k] = metadata[k][path[i]]
else:
metadata[k] = v
return metadata
def __call__(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
arg_map = self._gen_arg_map(func, args, kwargs)
metadata = self._get_metadata(arg_map)
task = TaskManager.run(self.name, metadata, func, args, kwargs,
exception_handler=self.exception_handler)
try:
status, value = task.wait(self.wait_for)
except Exception as ex:
if task.ret_value:
# exception was handled by task.exception_handler
if 'status' in task.ret_value:
status = task.ret_value['status']
else:
status = getattr(ex, 'status', 500)
cherrypy.response.status = status
return task.ret_value
raise ex
if status == TaskManager.VALUE_EXECUTING:
cherrypy.response.status = 202
return {'name': self.name, 'metadata': metadata}
return value
return wrapper
| 3,121 | 35.729412 | 89 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/_ui_router.py
|
from ._router import Router
class UIRouter(Router):
def __init__(self, path, security_scope=None, secure=True):
super().__init__(path, base_url="/ui-api",
security_scope=security_scope,
secure=secure)
def __call__(self, cls):
cls = super().__call__(cls)
cls._api_endpoint = False
return cls
| 384 | 26.5 | 63 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/_version.py
|
import re
from typing import NamedTuple
class APIVersion(NamedTuple):
"""
>>> APIVersion(1,0)
APIVersion(major=1, minor=0)
>>> APIVersion._make([1,0])
APIVersion(major=1, minor=0)
>>> f'{APIVersion(1, 0)!r}'
'APIVersion(major=1, minor=0)'
"""
major: int
minor: int
DEFAULT = ... # type: ignore
EXPERIMENTAL = ... # type: ignore
NONE = ... # type: ignore
__MIME_TYPE_REGEX = re.compile( # type: ignore
r'^application/vnd\.ceph\.api\.v(\d+\.\d+)\+json$')
@classmethod
def from_string(cls, version_string: str) -> 'APIVersion':
"""
>>> APIVersion.from_string("1.0")
APIVersion(major=1, minor=0)
"""
return cls._make(int(s) for s in version_string.split('.'))
@classmethod
def from_mime_type(cls, mime_type: str) -> 'APIVersion':
"""
>>> APIVersion.from_mime_type('application/vnd.ceph.api.v1.0+json')
APIVersion(major=1, minor=0)
"""
return cls.from_string(cls.__MIME_TYPE_REGEX.match(mime_type).group(1))
def __str__(self):
"""
>>> f'{APIVersion(1, 0)}'
'1.0'
"""
return f'{self.major}.{self.minor}'
def to_mime_type(self, subtype='json'):
"""
>>> APIVersion(1, 0).to_mime_type(subtype='xml')
'application/vnd.ceph.api.v1.0+xml'
"""
return f'application/vnd.ceph.api.v{self!s}+{subtype}'
def supports(self, client_version: "APIVersion") -> bool:
"""
>>> APIVersion(1, 1).supports(APIVersion(1, 0))
True
>>> APIVersion(1, 0).supports(APIVersion(1, 1))
False
>>> APIVersion(2, 0).supports(APIVersion(1, 1))
False
"""
return (self.major == client_version.major
and client_version.minor <= self.minor)
# Sentinel Values
APIVersion.DEFAULT = APIVersion(1, 0) # type: ignore
APIVersion.EXPERIMENTAL = APIVersion(0, 1) # type: ignore
APIVersion.NONE = APIVersion(0, 0) # type: ignore
| 2,038 | 25.828947 | 79 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/auth.py
|
# -*- coding: utf-8 -*-
import http.cookies
import logging
import sys
from .. import mgr
from ..exceptions import InvalidCredentialsError, UserDoesNotExist
from ..services.auth import AuthManager, JwtManager
from ..services.cluster import ClusterModel
from ..settings import Settings
from . import APIDoc, APIRouter, ControllerAuthMixin, EndpointDoc, RESTController, allow_empty_body
# Python 3.8 introduced `samesite` attribute:
# https://docs.python.org/3/library/http.cookies.html#morsel-objects
if sys.version_info < (3, 8):
http.cookies.Morsel._reserved["samesite"] = "SameSite" # type: ignore # pylint: disable=W0212
logger = logging.getLogger('controllers.auth')
AUTH_CHECK_SCHEMA = {
"username": (str, "Username"),
"permissions": ({
"cephfs": ([str], "")
}, "List of permissions acquired"),
"sso": (bool, "Uses single sign on?"),
"pwdUpdateRequired": (bool, "Is password update required?")
}
@APIRouter('/auth', secure=False)
@APIDoc("Initiate a session with Ceph", "Auth")
class Auth(RESTController, ControllerAuthMixin):
"""
Provide authenticates and returns JWT token.
"""
def create(self, username, password):
user_data = AuthManager.authenticate(username, password)
user_perms, pwd_expiration_date, pwd_update_required = None, None, None
max_attempt = Settings.ACCOUNT_LOCKOUT_ATTEMPTS
if max_attempt == 0 or mgr.ACCESS_CTRL_DB.get_attempt(username) < max_attempt:
if user_data:
user_perms = user_data.get('permissions')
pwd_expiration_date = user_data.get('pwdExpirationDate', None)
pwd_update_required = user_data.get('pwdUpdateRequired', False)
if user_perms is not None:
url_prefix = 'https' if mgr.get_localized_module_option('ssl') else 'http'
logger.info('Login successful: %s', username)
mgr.ACCESS_CTRL_DB.reset_attempt(username)
mgr.ACCESS_CTRL_DB.save()
token = JwtManager.gen_token(username)
# For backward-compatibility: PyJWT versions < 2.0.0 return bytes.
token = token.decode('utf-8') if isinstance(token, bytes) else token
self._set_token_cookie(url_prefix, token)
return {
'token': token,
'username': username,
'permissions': user_perms,
'pwdExpirationDate': pwd_expiration_date,
'sso': mgr.SSO_DB.protocol == 'saml2',
'pwdUpdateRequired': pwd_update_required
}
mgr.ACCESS_CTRL_DB.increment_attempt(username)
mgr.ACCESS_CTRL_DB.save()
else:
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
user.enabled = False
mgr.ACCESS_CTRL_DB.save()
logging.warning('Maximum number of unsuccessful log-in attempts '
'(%d) reached for '
'username "%s" so the account was blocked. '
'An administrator will need to re-enable the account',
max_attempt, username)
raise InvalidCredentialsError
except UserDoesNotExist:
raise InvalidCredentialsError
logger.info('Login failed: %s', username)
raise InvalidCredentialsError
@RESTController.Collection('POST')
@allow_empty_body
def logout(self):
logger.debug('Logout successful')
token = JwtManager.get_token_from_header()
JwtManager.blocklist_token(token)
self._delete_token_cookie(token)
redirect_url = '#/login'
if mgr.SSO_DB.protocol == 'saml2':
redirect_url = 'auth/saml2/slo'
return {
'redirect_url': redirect_url
}
def _get_login_url(self):
if mgr.SSO_DB.protocol == 'saml2':
return 'auth/saml2/login'
return '#/login'
@RESTController.Collection('POST', query_params=['token'])
@EndpointDoc("Check token Authentication",
parameters={'token': (str, 'Authentication Token')},
responses={201: AUTH_CHECK_SCHEMA})
def check(self, token):
if token:
user = JwtManager.get_user(token)
if user:
return {
'username': user.username,
'permissions': user.permissions_dict(),
'sso': mgr.SSO_DB.protocol == 'saml2',
'pwdUpdateRequired': user.pwd_update_required
}
return {
'login_url': self._get_login_url(),
'cluster_status': ClusterModel.from_db().dict()['status']
}
| 4,829 | 38.268293 | 99 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/ceph_users.py
|
import logging
from errno import EINVAL
from typing import List, NamedTuple, Optional
from ..exceptions import DashboardException
from ..security import Scope
from ..services.ceph_service import CephService, SendCommandError
from . import APIDoc, APIRouter, CRUDCollectionMethod, CRUDEndpoint, \
EndpointDoc, RESTController, SecretStr
from ._crud import ArrayHorizontalContainer, CRUDMeta, Form, FormField, \
FormTaskInfo, Icon, MethodType, SelectionType, TableAction, Validator, \
VerticalContainer
logger = logging.getLogger("controllers.ceph_users")
class CephUserCaps(NamedTuple):
mon: str
osd: str
mgr: str
mds: str
class Cap(NamedTuple):
entity: str
cap: str
class CephUserEndpoints:
@staticmethod
def _run_auth_command(command: str, *args, **kwargs):
try:
return CephService.send_command('mon', command, *args, **kwargs)
except SendCommandError as ex:
msg = f'{ex} in command {ex.prefix}'
if ex.errno == -EINVAL:
raise DashboardException(msg, code=400)
raise DashboardException(msg, code=500)
@staticmethod
def user_list(_):
"""
Get list of ceph users and its respective data
"""
return CephUserEndpoints._run_auth_command('auth ls')["auth_dump"]
@staticmethod
def user_create(_, user_entity: str = '', capabilities: Optional[List[Cap]] = None,
import_data: str = ''):
"""
Add a ceph user with its defined capabilities.
:param user_entity: Entity to change
:param capabilities: List of capabilities to add to user_entity
"""
# Caps are represented as a vector in mon auth add commands.
# Look at AuthMonitor.cc::valid_caps for reference.
if import_data:
logger.debug("Sending import command 'auth import' \n%s", import_data)
CephUserEndpoints._run_auth_command('auth import', inbuf=import_data)
return "Successfully imported user"
assert user_entity
caps = []
for cap in capabilities:
caps.append(cap['entity'])
caps.append(cap['cap'])
logger.debug("Sending command 'auth add' of entity '%s' with caps '%s'",
user_entity, str(caps))
CephUserEndpoints._run_auth_command('auth add', entity=user_entity, caps=caps)
return f"Successfully created user '{user_entity}'"
@staticmethod
def user_delete(_, user_entity: str):
"""
Delete a ceph user and it's defined capabilities.
:param user_entity: Entity to delete
"""
logger.debug("Sending command 'auth del' of entity '%s'", user_entity)
CephUserEndpoints._run_auth_command('auth del', entity=user_entity)
return f"Successfully deleted user '{user_entity}'"
@staticmethod
def export(_, entities: List[str]):
export_string = ""
for entity in entities:
out = CephUserEndpoints._run_auth_command('auth export', entity=entity, to_json=False)
export_string += f'{out}\n'
return export_string
@staticmethod
def user_edit(_, user_entity: str = '', capabilities: List[Cap] = None):
"""
Change the ceph user capabilities.
Setting new capabilities will overwrite current ones.
:param user_entity: Entity to change
:param capabilities: List of updated capabilities to user_entity
"""
caps = []
for cap in capabilities:
caps.append(cap['entity'])
caps.append(cap['cap'])
logger.debug("Sending command 'auth caps' of entity '%s' with caps '%s'",
user_entity, str(caps))
CephUserEndpoints._run_auth_command('auth caps', entity=user_entity, caps=caps)
return f"Successfully edited user '{user_entity}'"
@staticmethod
def model(user_entity: str):
user_data = CephUserEndpoints._run_auth_command('auth get', entity=user_entity)[0]
model = {'user_entity': '', 'capabilities': []}
model['user_entity'] = user_data['entity']
for entity, cap in user_data['caps'].items():
model['capabilities'].append({'entity': entity, 'cap': cap})
return model
cap_container = ArrayHorizontalContainer('Capabilities', 'capabilities', fields=[
FormField('Entity', 'entity',
field_type=str),
FormField('Entity Capabilities',
'cap', field_type=str)
], min_items=1)
create_container = VerticalContainer('Create User', 'create_user', fields=[
FormField('User entity', 'user_entity',
field_type=str),
cap_container,
])
edit_container = VerticalContainer('Edit User', 'edit_user', fields=[
FormField('User entity', 'user_entity',
field_type=str, readonly=True),
cap_container,
])
create_form = Form(path='/cluster/user/create',
root_container=create_container,
method_type=MethodType.POST.value,
task_info=FormTaskInfo("Ceph user '{user_entity}' successfully",
['user_entity']))
# pylint: disable=C0301
import_user_help = (
'The imported file should be a keyring file and it must follow the schema described <a ' # noqa: E501
'href="https://docs.ceph.com/en/latest/rados/operations/user-management/#authorization-capabilities"' # noqa: E501
'target="_blank">here.</a>'
)
import_container = VerticalContainer('Import User', 'import_user', fields=[
FormField('User file import', 'import_data',
field_type="file", validators=[Validator.FILE],
help=import_user_help),
])
import_user_form = Form(path='/cluster/user/import',
root_container=import_container,
task_info=FormTaskInfo("successfully", []),
method_type=MethodType.POST.value)
edit_form = Form(path='/cluster/user/edit',
root_container=edit_container,
method_type=MethodType.PUT.value,
task_info=FormTaskInfo("Ceph user '{user_entity}' successfully",
['user_entity']),
model_callback=CephUserEndpoints.model)
@CRUDEndpoint(
router=APIRouter('/cluster/user', Scope.CONFIG_OPT),
doc=APIDoc("Get Ceph Users", "Cluster"),
set_column={"caps": {"cellTemplate": "badgeDict"}},
actions=[
TableAction(name='Create', permission='create', icon=Icon.ADD.value,
routerLink='/cluster/user/create'),
TableAction(name='Edit', permission='update', icon=Icon.EDIT.value,
click='edit'),
TableAction(name='Delete', permission='delete', icon=Icon.DESTROY.value,
click='delete', disable=True),
TableAction(name='Import', permission='create', icon=Icon.IMPORT.value,
routerLink='/cluster/user/import'),
TableAction(name='Export', permission='read', icon=Icon.EXPORT.value,
click='authExport', disable=True)
],
permissions=[Scope.CONFIG_OPT],
forms=[create_form, edit_form, import_user_form],
column_key='entity',
get_all=CRUDCollectionMethod(
func=CephUserEndpoints.user_list,
doc=EndpointDoc("Get Ceph Users")
),
create=CRUDCollectionMethod(
func=CephUserEndpoints.user_create,
doc=EndpointDoc("Create Ceph User")
),
edit=CRUDCollectionMethod(
func=CephUserEndpoints.user_edit,
doc=EndpointDoc("Edit Ceph User")
),
delete=CRUDCollectionMethod(
func=CephUserEndpoints.user_delete,
doc=EndpointDoc("Delete Ceph User")
),
extra_endpoints=[
('export', CRUDCollectionMethod(
func=RESTController.Collection('POST', 'export')(CephUserEndpoints.export),
doc=EndpointDoc("Export Ceph Users")
))
],
selection_type=SelectionType.MULTI,
meta=CRUDMeta()
)
class CephUser(NamedTuple):
entity: str
caps: List[CephUserCaps]
key: SecretStr
| 8,139 | 36.511521 | 119 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/cephfs.py
|
# -*- coding: utf-8 -*-
import logging
import os
from collections import defaultdict
import cephfs
import cherrypy
from .. import mgr
from ..exceptions import DashboardException
from ..security import Scope
from ..services.ceph_service import CephService
from ..services.cephfs import CephFS as CephFS_
from ..services.exception import handle_cephfs_error
from ..tools import ViewCache
from . import APIDoc, APIRouter, EndpointDoc, RESTController, UIRouter, allow_empty_body
GET_QUOTAS_SCHEMA = {
'max_bytes': (int, ''),
'max_files': (int, '')
}
logger = logging.getLogger("controllers.rgw")
@APIRouter('/cephfs', Scope.CEPHFS)
@APIDoc("Cephfs Management API", "Cephfs")
class CephFS(RESTController):
def __init__(self): # pragma: no cover
super().__init__()
# Stateful instances of CephFSClients, hold cached results. Key to
# dict is FSCID
self.cephfs_clients = {}
def list(self):
fsmap = mgr.get("fs_map")
return fsmap['filesystems']
def get(self, fs_id):
fs_id = self.fs_id_to_int(fs_id)
return self.fs_status(fs_id)
@RESTController.Resource('GET')
def clients(self, fs_id):
fs_id = self.fs_id_to_int(fs_id)
return self._clients(fs_id)
@RESTController.Resource('DELETE', path='/client/{client_id}')
def evict(self, fs_id, client_id):
fs_id = self.fs_id_to_int(fs_id)
client_id = self.client_id_to_int(client_id)
return self._evict(fs_id, client_id)
@RESTController.Resource('GET')
def mds_counters(self, fs_id, counters=None):
fs_id = self.fs_id_to_int(fs_id)
return self._mds_counters(fs_id, counters)
def _mds_counters(self, fs_id, counters=None):
"""
Result format: map of daemon name to map of counter to list of datapoints
rtype: dict[str, dict[str, list]]
"""
if counters is None:
# Opinionated list of interesting performance counters for the GUI
counters = [
"mds_server.handle_client_request",
"mds_log.ev",
"mds_cache.num_strays",
"mds.exported",
"mds.exported_inodes",
"mds.imported",
"mds.imported_inodes",
"mds.inodes",
"mds.caps",
"mds.subtrees",
"mds_mem.ino"
]
result: dict = {}
mds_names = self._get_mds_names(fs_id)
for mds_name in mds_names:
result[mds_name] = {}
for counter in counters:
data = mgr.get_counter("mds", mds_name, counter)
if data is not None:
result[mds_name][counter] = data[counter]
else:
result[mds_name][counter] = []
return dict(result)
@staticmethod
def fs_id_to_int(fs_id):
try:
return int(fs_id)
except ValueError:
raise DashboardException(code='invalid_cephfs_id',
msg="Invalid cephfs ID {}".format(fs_id),
component='cephfs')
@staticmethod
def client_id_to_int(client_id):
try:
return int(client_id)
except ValueError:
raise DashboardException(code='invalid_cephfs_client_id',
msg="Invalid cephfs client ID {}".format(client_id),
component='cephfs')
def _get_mds_names(self, filesystem_id=None):
names = []
fsmap = mgr.get("fs_map")
for fs in fsmap['filesystems']:
if filesystem_id is not None and fs['id'] != filesystem_id:
continue
names.extend([info['name']
for _, info in fs['mdsmap']['info'].items()])
if filesystem_id is None:
names.extend(info['name'] for info in fsmap['standbys'])
return names
def _append_mds_metadata(self, mds_versions, metadata_key):
metadata = mgr.get_metadata('mds', metadata_key)
if metadata is None:
return
mds_versions[metadata.get('ceph_version', 'unknown')].append(metadata_key)
def _find_standby_replays(self, mdsmap_info, rank_table):
# pylint: disable=unused-variable
for gid_str, daemon_info in mdsmap_info.items():
if daemon_info['state'] != "up:standby-replay":
continue
inos = mgr.get_latest("mds", daemon_info['name'], "mds_mem.ino")
dns = mgr.get_latest("mds", daemon_info['name'], "mds_mem.dn")
dirs = mgr.get_latest("mds", daemon_info['name'], "mds_mem.dir")
caps = mgr.get_latest("mds", daemon_info['name'], "mds_mem.cap")
activity = CephService.get_rate(
"mds", daemon_info['name'], "mds_log.replay")
rank_table.append(
{
"rank": "{0}-s".format(daemon_info['rank']),
"state": "standby-replay",
"mds": daemon_info['name'],
"activity": activity,
"dns": dns,
"inos": inos,
"dirs": dirs,
"caps": caps
}
)
def get_standby_table(self, standbys, mds_versions):
standby_table = []
for standby in standbys:
self._append_mds_metadata(mds_versions, standby['name'])
standby_table.append({
'name': standby['name']
})
return standby_table
# pylint: disable=too-many-statements,too-many-branches
def fs_status(self, fs_id):
mds_versions: dict = defaultdict(list)
fsmap = mgr.get("fs_map")
filesystem = None
for fs in fsmap['filesystems']:
if fs['id'] == fs_id:
filesystem = fs
break
if filesystem is None:
raise cherrypy.HTTPError(404,
"CephFS id {0} not found".format(fs_id))
rank_table = []
mdsmap = filesystem['mdsmap']
client_count = 0
for rank in mdsmap["in"]:
up = "mds_{0}".format(rank) in mdsmap["up"]
if up:
gid = mdsmap['up']["mds_{0}".format(rank)]
info = mdsmap['info']['gid_{0}'.format(gid)]
dns = mgr.get_latest("mds", info['name'], "mds_mem.dn")
inos = mgr.get_latest("mds", info['name'], "mds_mem.ino")
dirs = mgr.get_latest("mds", info['name'], "mds_mem.dir")
caps = mgr.get_latest("mds", info['name'], "mds_mem.cap")
# In case rank 0 was down, look at another rank's
# sessionmap to get an indication of clients.
if rank == 0 or client_count == 0:
client_count = mgr.get_latest("mds", info['name'],
"mds_sessions.session_count")
laggy = "laggy_since" in info
state = info['state'].split(":")[1]
if laggy:
state += "(laggy)"
# Populate based on context of state, e.g. client
# ops for an active daemon, replay progress, reconnect
# progress
if state == "active":
activity = CephService.get_rate("mds",
info['name'],
"mds_server.handle_client_request")
else:
activity = 0.0 # pragma: no cover
self._append_mds_metadata(mds_versions, info['name'])
rank_table.append(
{
"rank": rank,
"state": state,
"mds": info['name'],
"activity": activity,
"dns": dns,
"inos": inos,
"dirs": dirs,
"caps": caps
}
)
else:
rank_table.append(
{
"rank": rank,
"state": "failed",
"mds": "",
"activity": 0.0,
"dns": 0,
"inos": 0,
"dirs": 0,
"caps": 0
}
)
self._find_standby_replays(mdsmap['info'], rank_table)
df = mgr.get("df")
pool_stats = {p['id']: p['stats'] for p in df['pools']}
osdmap = mgr.get("osd_map")
pools = {p['pool']: p for p in osdmap['pools']}
metadata_pool_id = mdsmap['metadata_pool']
data_pool_ids = mdsmap['data_pools']
pools_table = []
for pool_id in [metadata_pool_id] + data_pool_ids:
pool_type = "metadata" if pool_id == metadata_pool_id else "data"
stats = pool_stats[pool_id]
pools_table.append({
"pool": pools[pool_id]['pool_name'],
"type": pool_type,
"used": stats['stored'],
"avail": stats['max_avail']
})
standby_table = self.get_standby_table(fsmap['standbys'], mds_versions)
return {
"cephfs": {
"id": fs_id,
"name": mdsmap['fs_name'],
"client_count": client_count,
"ranks": rank_table,
"pools": pools_table
},
"standbys": standby_table,
"versions": mds_versions
}
def _clients(self, fs_id):
cephfs_clients = self.cephfs_clients.get(fs_id, None)
if cephfs_clients is None:
cephfs_clients = CephFSClients(mgr, fs_id)
self.cephfs_clients[fs_id] = cephfs_clients
try:
status, clients = cephfs_clients.get()
except AttributeError:
raise cherrypy.HTTPError(404,
"No cephfs with id {0}".format(fs_id))
if clients is None:
raise cherrypy.HTTPError(404,
"No cephfs with id {0}".format(fs_id))
# Decorate the metadata with some fields that will be
# indepdendent of whether it's a kernel or userspace
# client, so that the javascript doesn't have to grok that.
for client in clients:
if "ceph_version" in client['client_metadata']: # pragma: no cover - no complexity
client['type'] = "userspace"
client['version'] = client['client_metadata']['ceph_version']
client['hostname'] = client['client_metadata']['hostname']
client['root'] = client['client_metadata']['root']
elif "kernel_version" in client['client_metadata']: # pragma: no cover - no complexity
client['type'] = "kernel"
client['version'] = client['client_metadata']['kernel_version']
client['hostname'] = client['client_metadata']['hostname']
client['root'] = client['client_metadata']['root']
else: # pragma: no cover - no complexity there
client['type'] = "unknown"
client['version'] = ""
client['hostname'] = ""
return {
'status': status,
'data': clients
}
def _evict(self, fs_id, client_id):
clients = self._clients(fs_id)
if not [c for c in clients['data'] if c['id'] == client_id]:
raise cherrypy.HTTPError(404,
"Client {0} does not exist in cephfs {1}".format(client_id,
fs_id))
filters = [f'id={client_id}']
CephService.send_command('mds', 'client evict',
srv_spec='{0}:0'.format(fs_id), filters=filters)
@staticmethod
def _cephfs_instance(fs_id):
"""
:param fs_id: The filesystem identifier.
:type fs_id: int | str
:return: A instance of the CephFS class.
"""
fs_name = CephFS_.fs_name_from_id(fs_id)
if fs_name is None:
raise cherrypy.HTTPError(404, "CephFS id {} not found".format(fs_id))
return CephFS_(fs_name)
@RESTController.Resource('GET')
def get_root_directory(self, fs_id):
"""
The root directory that can't be fetched using ls_dir (api).
:param fs_id: The filesystem identifier.
:return: The root directory
:rtype: dict
"""
try:
return self._get_root_directory(self._cephfs_instance(fs_id))
except (cephfs.PermissionError, cephfs.ObjectNotFound): # pragma: no cover
return None
def _get_root_directory(self, cfs):
"""
The root directory that can't be fetched using ls_dir (api).
It's used in ls_dir (ui-api) and in get_root_directory (api).
:param cfs: CephFS service instance
:type cfs: CephFS
:return: The root directory
:rtype: dict
"""
return cfs.get_directory(os.sep.encode())
@handle_cephfs_error()
@RESTController.Resource('GET')
def ls_dir(self, fs_id, path=None, depth=1):
"""
List directories of specified path.
:param fs_id: The filesystem identifier.
:param path: The path where to start listing the directory content.
Defaults to '/' if not set.
:type path: str | bytes
:param depth: The number of steps to go down the directory tree.
:type depth: int | str
:return: The names of the directories below the specified path.
:rtype: list
"""
path = self._set_ls_dir_path(path)
try:
cfs = self._cephfs_instance(fs_id)
paths = cfs.ls_dir(path, depth)
except (cephfs.PermissionError, cephfs.ObjectNotFound): # pragma: no cover
paths = []
return paths
def _set_ls_dir_path(self, path):
"""
Transforms input path parameter of ls_dir methods (api and ui-api).
:param path: The path where to start listing the directory content.
Defaults to '/' if not set.
:type path: str | bytes
:return: Normalized path or root path
:return: str
"""
if path is None:
path = os.sep
else:
path = os.path.normpath(path)
return path
@RESTController.Resource('POST', path='/tree')
@allow_empty_body
def mk_tree(self, fs_id, path):
"""
Create a directory.
:param fs_id: The filesystem identifier.
:param path: The path of the directory.
"""
cfs = self._cephfs_instance(fs_id)
cfs.mk_dirs(path)
@RESTController.Resource('DELETE', path='/tree')
def rm_tree(self, fs_id, path):
"""
Remove a directory.
:param fs_id: The filesystem identifier.
:param path: The path of the directory.
"""
cfs = self._cephfs_instance(fs_id)
cfs.rm_dir(path)
@RESTController.Resource('PUT', path='/quota')
@allow_empty_body
def quota(self, fs_id, path, max_bytes=None, max_files=None):
"""
Set the quotas of the specified path.
:param fs_id: The filesystem identifier.
:param path: The path of the directory/file.
:param max_bytes: The byte limit.
:param max_files: The file limit.
"""
cfs = self._cephfs_instance(fs_id)
return cfs.set_quotas(path, max_bytes, max_files)
@RESTController.Resource('GET', path='/quota')
@EndpointDoc("Get Cephfs Quotas of the specified path",
parameters={
'fs_id': (str, 'File System Identifier'),
'path': (str, 'File System Path'),
},
responses={200: GET_QUOTAS_SCHEMA})
def get_quota(self, fs_id, path):
"""
Get the quotas of the specified path.
:param fs_id: The filesystem identifier.
:param path: The path of the directory/file.
:return: Returns a dictionary containing 'max_bytes'
and 'max_files'.
:rtype: dict
"""
cfs = self._cephfs_instance(fs_id)
return cfs.get_quotas(path)
@RESTController.Resource('POST', path='/snapshot')
@allow_empty_body
def snapshot(self, fs_id, path, name=None):
"""
Create a snapshot.
:param fs_id: The filesystem identifier.
:param path: The path of the directory.
:param name: The name of the snapshot. If not specified, a name using the
current time in RFC3339 UTC format will be generated.
:return: The name of the snapshot.
:rtype: str
"""
cfs = self._cephfs_instance(fs_id)
list_snaps = cfs.ls_snapshots(path)
for snap in list_snaps:
if name == snap['name']:
raise DashboardException(code='Snapshot name already in use',
msg='Snapshot name {} is already in use.'
'Please use another name'.format(name),
component='cephfs')
return cfs.mk_snapshot(path, name)
@RESTController.Resource('DELETE', path='/snapshot')
def rm_snapshot(self, fs_id, path, name):
"""
Remove a snapshot.
:param fs_id: The filesystem identifier.
:param path: The path of the directory.
:param name: The name of the snapshot.
"""
cfs = self._cephfs_instance(fs_id)
cfs.rm_snapshot(path, name)
class CephFSClients(object):
def __init__(self, module_inst, fscid):
self._module = module_inst
self.fscid = fscid
@ViewCache()
def get(self):
try:
ret = CephService.send_command('mds', 'session ls', srv_spec='{0}:0'.format(self.fscid))
except RuntimeError:
ret = []
return ret
@UIRouter('/cephfs', Scope.CEPHFS)
@APIDoc("Dashboard UI helper function; not part of the public API", "CephFSUi")
class CephFsUi(CephFS):
RESOURCE_ID = 'fs_id'
@RESTController.Resource('GET')
def tabs(self, fs_id):
data = {}
fs_id = self.fs_id_to_int(fs_id)
# Needed for detail tab
fs_status = self.fs_status(fs_id)
for pool in fs_status['cephfs']['pools']:
pool['size'] = pool['used'] + pool['avail']
data['pools'] = fs_status['cephfs']['pools']
data['ranks'] = fs_status['cephfs']['ranks']
data['name'] = fs_status['cephfs']['name']
data['standbys'] = ', '.join([x['name'] for x in fs_status['standbys']])
counters = self._mds_counters(fs_id)
for k, v in counters.items():
v['name'] = k
data['mds_counters'] = counters
# Needed for client tab
data['clients'] = self._clients(fs_id)
return data
@handle_cephfs_error()
@RESTController.Resource('GET')
def ls_dir(self, fs_id, path=None, depth=1):
"""
The difference to the API version is that the root directory will be send when listing
the root directory.
To only do one request this endpoint was created.
:param fs_id: The filesystem identifier.
:type fs_id: int | str
:param path: The path where to start listing the directory content.
Defaults to '/' if not set.
:type path: str | bytes
:param depth: The number of steps to go down the directory tree.
:type depth: int | str
:return: The names of the directories below the specified path.
:rtype: list
"""
path = self._set_ls_dir_path(path)
try:
cfs = self._cephfs_instance(fs_id)
paths = cfs.ls_dir(path, depth)
if path == os.sep:
paths = [self._get_root_directory(cfs)] + paths
except (cephfs.PermissionError, cephfs.ObjectNotFound): # pragma: no cover
paths = []
return paths
| 20,408 | 34.994709 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/cluster.py
|
# -*- coding: utf-8 -*-
from typing import Dict, List, Optional
from ..security import Scope
from ..services.cluster import ClusterModel
from ..services.exception import handle_orchestrator_error
from ..services.orchestrator import OrchClient, OrchFeature
from ..tools import str_to_bool
from . import APIDoc, APIRouter, CreatePermission, Endpoint, EndpointDoc, \
ReadPermission, RESTController, UpdatePermission, allow_empty_body
from ._version import APIVersion
from .orchestrator import raise_if_no_orchestrator
@APIRouter('/cluster', Scope.CONFIG_OPT)
@APIDoc("Get Cluster Details", "Cluster")
class Cluster(RESTController):
@RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
@EndpointDoc("Get the cluster status")
def list(self):
return ClusterModel.from_db().dict()
@RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
@EndpointDoc("Update the cluster status",
parameters={'status': (str, 'Cluster Status')})
def singleton_set(self, status: str):
ClusterModel(status).to_db() # -*- coding: utf-8 -*-
@APIRouter('/cluster/upgrade', Scope.CONFIG_OPT)
@APIDoc("Upgrade Management API", "Upgrade")
class ClusterUpgrade(RESTController):
@RESTController.MethodMap()
@raise_if_no_orchestrator([OrchFeature.UPGRADE_LIST])
@handle_orchestrator_error('upgrade')
@EndpointDoc("Get the available versions to upgrade",
parameters={
'image': (str, 'Ceph Image'),
'tags': (bool, 'Show all image tags'),
'show_all_versions': (bool, 'Show all available versions')
})
@ReadPermission
def list(self, tags: bool = False, image: Optional[str] = None,
show_all_versions: Optional[bool] = False) -> Dict:
orch = OrchClient.instance()
available_upgrades = orch.upgrades.list(image, str_to_bool(tags),
str_to_bool(show_all_versions))
return available_upgrades
@Endpoint()
@raise_if_no_orchestrator([OrchFeature.UPGRADE_STATUS])
@handle_orchestrator_error('upgrade')
@EndpointDoc("Get the cluster upgrade status")
@ReadPermission
def status(self) -> Dict:
orch = OrchClient.instance()
status = orch.upgrades.status().to_json()
return status
@Endpoint('POST')
@raise_if_no_orchestrator([OrchFeature.UPGRADE_START])
@handle_orchestrator_error('upgrade')
@EndpointDoc("Start the cluster upgrade")
@CreatePermission
def start(self, image: Optional[str] = None, version: Optional[str] = None,
daemon_types: Optional[List[str]] = None, host_placement: Optional[str] = None,
services: Optional[List[str]] = None, limit: Optional[int] = None) -> str:
orch = OrchClient.instance()
start = orch.upgrades.start(image, version, daemon_types, host_placement, services, limit)
return start
@Endpoint('PUT')
@raise_if_no_orchestrator([OrchFeature.UPGRADE_PAUSE])
@handle_orchestrator_error('upgrade')
@EndpointDoc("Pause the cluster upgrade")
@UpdatePermission
@allow_empty_body
def pause(self) -> str:
orch = OrchClient.instance()
return orch.upgrades.pause()
@Endpoint('PUT')
@raise_if_no_orchestrator([OrchFeature.UPGRADE_RESUME])
@handle_orchestrator_error('upgrade')
@EndpointDoc("Resume the cluster upgrade")
@UpdatePermission
@allow_empty_body
def resume(self) -> str:
orch = OrchClient.instance()
return orch.upgrades.resume()
@Endpoint('PUT')
@raise_if_no_orchestrator([OrchFeature.UPGRADE_STOP])
@handle_orchestrator_error('upgrade')
@EndpointDoc("Stop the cluster upgrade")
@UpdatePermission
@allow_empty_body
def stop(self) -> str:
orch = OrchClient.instance()
return orch.upgrades.stop()
| 3,916 | 37.401961 | 98 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/cluster_configuration.py
|
# -*- coding: utf-8 -*-
import cherrypy
from .. import mgr
from ..exceptions import DashboardException
from ..security import Scope
from ..services.ceph_service import CephService
from . import APIDoc, APIRouter, EndpointDoc, RESTController
FILTER_SCHEMA = [{
"name": (str, 'Name of the config option'),
"type": (str, 'Config option type'),
"level": (str, 'Config option level'),
"desc": (str, 'Description of the configuration'),
"long_desc": (str, 'Elaborated description'),
"default": (str, 'Default value for the config option'),
"daemon_default": (str, 'Daemon specific default value'),
"tags": ([str], 'Tags associated with the cluster'),
"services": ([str], 'Services associated with the config option'),
"see_also": ([str], 'Related config options'),
"enum_values": ([str], 'List of enums allowed'),
"min": (str, 'Minimum value'),
"max": (str, 'Maximum value'),
"can_update_at_runtime": (bool, 'Check if can update at runtime'),
"flags": ([str], 'List of flags associated')
}]
@APIRouter('/cluster_conf', Scope.CONFIG_OPT)
@APIDoc("Manage Cluster Configurations", "ClusterConfiguration")
class ClusterConfiguration(RESTController):
def _append_config_option_values(self, options):
"""
Appends values from the config database (if available) to the given options
:param options: list of config options
:return: list of config options extended by their current values
"""
config_dump = CephService.send_command('mon', 'config dump')
mgr_config = mgr.get('config')
config_dump.append({'name': 'fsid', 'section': 'mgr', 'value': mgr_config['fsid']})
for config_dump_entry in config_dump:
for i, elem in enumerate(options):
if config_dump_entry['name'] == elem['name']:
if 'value' not in elem:
options[i]['value'] = []
options[i]['source'] = 'mon'
options[i]['value'].append({'section': config_dump_entry['section'],
'value': config_dump_entry['value']})
return options
def list(self):
options = mgr.get('config_options')['options']
return self._append_config_option_values(options)
def get(self, name):
return self._get_config_option(name)
@RESTController.Collection('GET', query_params=['name'])
@EndpointDoc("Get Cluster Configuration by name",
parameters={
'names': (str, 'Config option names'),
},
responses={200: FILTER_SCHEMA})
def filter(self, names=None):
config_options = []
if names:
for name in names.split(','):
try:
config_options.append(self._get_config_option(name))
except cherrypy.HTTPError:
pass
if not config_options:
raise cherrypy.HTTPError(404, 'Config options `{}` not found'.format(names))
return config_options
def create(self, name, value):
# Check if config option is updateable at runtime
self._updateable_at_runtime([name])
# Update config option
avail_sections = ['global', 'mon', 'mgr', 'osd', 'mds', 'client']
for section in avail_sections:
for entry in value:
if entry['value'] is None:
break
if entry['section'] == section:
CephService.send_command('mon', 'config set', who=section, name=name,
value=str(entry['value']))
break
else:
CephService.send_command('mon', 'config rm', who=section, name=name)
def delete(self, name, section):
return CephService.send_command('mon', 'config rm', who=section, name=name)
def bulk_set(self, options):
self._updateable_at_runtime(options.keys())
for name, value in options.items():
CephService.send_command('mon', 'config set', who=value['section'],
name=name, value=str(value['value']))
def _get_config_option(self, name):
for option in mgr.get('config_options')['options']:
if option['name'] == name:
return self._append_config_option_values([option])[0]
raise cherrypy.HTTPError(404)
def _updateable_at_runtime(self, config_option_names):
not_updateable = []
for name in config_option_names:
config_option = self._get_config_option(name)
if not config_option['can_update_at_runtime']:
not_updateable.append(name)
if not_updateable:
raise DashboardException(
msg='Config option {} is/are not updatable at runtime'.format(
', '.join(not_updateable)),
code='config_option_not_updatable_at_runtime',
component='cluster_configuration')
| 5,085 | 37.240602 | 91 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/crush_rule.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from cherrypy import NotFound
from .. import mgr
from ..security import Scope
from ..services.ceph_service import CephService
from . import APIDoc, APIRouter, Endpoint, EndpointDoc, ReadPermission, RESTController, UIRouter
from ._version import APIVersion
LIST_SCHEMA = {
"rule_id": (int, 'Rule ID'),
"rule_name": (str, 'Rule Name'),
"ruleset": (int, 'RuleSet related to the rule'),
"type": (int, 'Type of Rule'),
"min_size": (int, 'Minimum size of Rule'),
"max_size": (int, 'Maximum size of Rule'),
'steps': ([{str}], 'Steps included in the rule')
}
@APIRouter('/crush_rule', Scope.POOL)
@APIDoc("Crush Rule Management API", "CrushRule")
class CrushRule(RESTController):
@EndpointDoc("List Crush Rule Configuration",
responses={200: LIST_SCHEMA})
@RESTController.MethodMap(version=APIVersion(2, 0))
def list(self):
return mgr.get('osd_map_crush')['rules']
@RESTController.MethodMap(version=APIVersion(2, 0))
def get(self, name):
rules = mgr.get('osd_map_crush')['rules']
for r in rules:
if r['rule_name'] == name:
return r
raise NotFound('No such crush rule')
def create(self, name, root, failure_domain, device_class=None):
rule = {
'name': name,
'root': root,
'type': failure_domain,
'class': device_class
}
CephService.send_command('mon', 'osd crush rule create-replicated', **rule)
def delete(self, name):
CephService.send_command('mon', 'osd crush rule rm', name=name)
@UIRouter('/crush_rule', Scope.POOL)
@APIDoc("Dashboard UI helper function; not part of the public API", "CrushRuleUi")
class CrushRuleUi(CrushRule):
@Endpoint()
@ReadPermission
def info(self):
'''Used for crush rule creation modal'''
osd_map = mgr.get_osdmap()
crush = osd_map.get_crush()
crush.dump()
return {
'names': [r['rule_name'] for r in mgr.get('osd_map_crush')['rules']],
'nodes': mgr.get('osd_map_tree')['nodes'],
'roots': crush.find_roots()
}
| 2,213 | 31.086957 | 96 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/daemon.py
|
# -*- coding: utf-8 -*-
from typing import Optional
from ..exceptions import DashboardException
from ..security import Scope
from ..services.exception import handle_orchestrator_error
from ..services.orchestrator import OrchClient, OrchFeature
from . import APIDoc, APIRouter, RESTController
from ._version import APIVersion
from .orchestrator import raise_if_no_orchestrator
@APIRouter('/daemon', Scope.HOSTS)
@APIDoc("Perform actions on daemons", "Daemon")
class Daemon(RESTController):
@raise_if_no_orchestrator([OrchFeature.DAEMON_ACTION])
@handle_orchestrator_error('daemon')
@RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
def set(self, daemon_name: str, action: str = '',
container_image: Optional[str] = None):
if action not in ['start', 'stop', 'restart', 'redeploy']:
raise DashboardException(
code='invalid_daemon_action',
msg=f'Daemon action "{action}" is either not valid or not supported.')
# non 'None' container_images change need a redeploy
if container_image == '' and action != 'redeploy':
container_image = None
orch = OrchClient.instance()
res = orch.daemons.action(action=action, daemon_name=daemon_name, image=container_image)
return res
| 1,310 | 37.558824 | 96 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/docs.py
|
# -*- coding: utf-8 -*-
import logging
from typing import Any, Dict, List, Optional, Union
import cherrypy
from .. import mgr
from ..api.doc import Schema, SchemaInput, SchemaType
from . import ENDPOINT_MAP, BaseController, Endpoint, Router
from ._version import APIVersion
NO_DESCRIPTION_AVAILABLE = "*No description available*"
logger = logging.getLogger('controllers.docs')
@Router('/docs', secure=False)
class Docs(BaseController):
@classmethod
def _gen_tags(cls, all_endpoints):
""" Generates a list of all tags and corresponding descriptions. """
# Scenarios to consider:
# * Intentionally make up a new tag name at controller => New tag name displayed.
# * Misspell or make up a new tag name at endpoint => Neither tag or endpoint displayed.
# * Misspell tag name at controller (when referring to another controller) =>
# Tag displayed but no endpoints assigned
# * Description for a tag added at multiple locations => Only one description displayed.
list_of_ctrl = set()
for endpoints in ENDPOINT_MAP.values():
for endpoint in endpoints:
if endpoint.is_api or all_endpoints:
list_of_ctrl.add(endpoint.ctrl)
tag_map: Dict[str, str] = {}
for ctrl in sorted(list_of_ctrl, key=lambda ctrl: ctrl.__name__):
tag_name = ctrl.__name__
tag_descr = ""
if hasattr(ctrl, 'doc_info'):
if ctrl.doc_info['tag']:
tag_name = ctrl.doc_info['tag']
tag_descr = ctrl.doc_info['tag_descr']
if tag_name not in tag_map or not tag_map[tag_name]:
tag_map[tag_name] = tag_descr
tags = [{'name': k, 'description': v if v else NO_DESCRIPTION_AVAILABLE}
for k, v in tag_map.items()]
tags.sort(key=lambda e: e['name'])
return tags
@classmethod
def _get_tag(cls, endpoint):
""" Returns the name of a tag to assign to a path. """
ctrl = endpoint.ctrl
func = endpoint.func
tag = ctrl.__name__
if hasattr(func, 'doc_info') and func.doc_info['tag']:
tag = func.doc_info['tag']
elif hasattr(ctrl, 'doc_info') and ctrl.doc_info['tag']:
tag = ctrl.doc_info['tag']
return tag
@classmethod
def _gen_type(cls, param):
# pylint: disable=too-many-return-statements
"""
Generates the type of parameter based on its name and default value,
using very simple heuristics.
Used if type is not explicitly defined.
"""
param_name = param['name']
def_value = param['default'] if 'default' in param else None
if param_name.startswith("is_"):
return str(SchemaType.BOOLEAN)
if "size" in param_name:
return str(SchemaType.INTEGER)
if "count" in param_name:
return str(SchemaType.INTEGER)
if "num" in param_name:
return str(SchemaType.INTEGER)
if isinstance(def_value, bool):
return str(SchemaType.BOOLEAN)
if isinstance(def_value, int):
return str(SchemaType.INTEGER)
return str(SchemaType.STRING)
@classmethod
# isinstance doesn't work: input is always <type 'type'>.
def _type_to_str(cls, type_as_type):
""" Used if type is explicitly defined. """
if type_as_type is str:
type_as_str = str(SchemaType.STRING)
elif type_as_type is int:
type_as_str = str(SchemaType.INTEGER)
elif type_as_type is bool:
type_as_str = str(SchemaType.BOOLEAN)
elif type_as_type is list or type_as_type is tuple:
type_as_str = str(SchemaType.ARRAY)
elif type_as_type is float:
type_as_str = str(SchemaType.NUMBER)
else:
type_as_str = str(SchemaType.OBJECT)
return type_as_str
@classmethod
def _add_param_info(cls, parameters, p_info):
# Cases to consider:
# * Parameter name (if not nested) misspelt in decorator => parameter not displayed
# * Sometimes a parameter is used for several endpoints (e.g. fs_id in CephFS).
# Currently, there is no possibility of reuse. Should there be?
# But what if there are two parameters with same name but different functionality?
"""
Adds explicitly described information for parameters of an endpoint.
There are two cases:
* Either the parameter in p_info corresponds to an endpoint parameter. Implicit information
has higher priority, so only information that doesn't already exist is added.
* Or the parameter in p_info describes a nested parameter inside an endpoint parameter.
In that case there is no implicit information at all so all explicitly described info needs
to be added.
"""
for p in p_info:
if not p['nested']:
for parameter in parameters:
if p['name'] == parameter['name']:
parameter['type'] = p['type']
parameter['description'] = p['description']
if 'nested_params' in p:
parameter['nested_params'] = cls._add_param_info([], p['nested_params'])
else:
nested_p = {
'name': p['name'],
'type': p['type'],
'description': p['description'],
'required': p['required'],
}
if 'default' in p:
nested_p['default'] = p['default']
if 'nested_params' in p:
nested_p['nested_params'] = cls._add_param_info([], p['nested_params'])
parameters.append(nested_p)
return parameters
@classmethod
def _gen_schema_for_content(cls, params: List[Any]) -> Dict[str, Any]:
"""
Generates information to the content-object in OpenAPI Spec.
Used to for request body and responses.
"""
required_params = []
properties = {}
schema_type = SchemaType.OBJECT
if isinstance(params, SchemaInput):
schema_type = params.type
params = params.params
for param in params:
if param['required']:
required_params.append(param['name'])
props = {}
if 'type' in param:
props['type'] = cls._type_to_str(param['type'])
if 'nested_params' in param:
if props['type'] == str(SchemaType.ARRAY): # dict in array
props['items'] = cls._gen_schema_for_content(param['nested_params'])
else: # dict in dict
props = cls._gen_schema_for_content(param['nested_params'])
elif props['type'] == str(SchemaType.OBJECT): # e.g. [int]
props['type'] = str(SchemaType.ARRAY)
props['items'] = {'type': cls._type_to_str(param['type'][0])}
else:
props['type'] = cls._gen_type(param)
if 'description' in param:
props['description'] = param['description']
if 'default' in param:
props['default'] = param['default']
properties[param['name']] = props
schema = Schema(schema_type=schema_type, properties=properties,
required=required_params)
return schema.as_dict()
@classmethod
def _gen_responses(cls, method, resp_object=None,
version: Optional[APIVersion] = None):
resp: Dict[str, Dict[str, Union[str, Any]]] = {
'400': {
"description": "Operation exception. Please check the "
"response body for details."
},
'401': {
"description": "Unauthenticated access. Please login first."
},
'403': {
"description": "Unauthorized access. Please check your "
"permissions."
},
'500': {
"description": "Unexpected error. Please check the "
"response body for the stack trace."
}
}
if not version:
version = APIVersion.DEFAULT
if method.lower() == 'get':
resp['200'] = {'description': "OK",
'content': {version.to_mime_type():
{'type': 'object'}}}
if method.lower() == 'post':
resp['201'] = {'description': "Resource created.",
'content': {version.to_mime_type():
{'type': 'object'}}}
if method.lower() == 'put':
resp['200'] = {'description': "Resource updated.",
'content': {version.to_mime_type():
{'type': 'object'}}}
if method.lower() == 'delete':
resp['204'] = {'description': "Resource deleted.",
'content': {version.to_mime_type():
{'type': 'object'}}}
if method.lower() in ['post', 'put', 'delete']:
resp['202'] = {'description': "Operation is still executing."
" Please check the task queue.",
'content': {version.to_mime_type():
{'type': 'object'}}}
if resp_object:
for status_code, response_body in resp_object.items():
if status_code in resp:
resp[status_code].update(
{'content':
{version.to_mime_type():
{'schema': cls._gen_schema_for_content(response_body)}
}})
return resp
@classmethod
def _gen_params(cls, params, location):
parameters = []
for param in params:
if 'type' in param:
_type = cls._type_to_str(param['type'])
else:
_type = cls._gen_type(param)
res = {
'name': param['name'],
'in': location,
'schema': {
'type': _type
},
}
if param.get('description'):
res['description'] = param['description']
if param['required']:
res['required'] = True
elif param['default'] is None:
res['allowEmptyValue'] = True
else:
res['default'] = param['default']
parameters.append(res)
return parameters
@staticmethod
def _process_func_attr(func):
summary = ''
version = None
response = {}
p_info = []
if hasattr(func, '__method_map_method__'):
version = func.__method_map_method__['version']
elif hasattr(func, '__resource_method__'):
version = func.__resource_method__['version']
elif hasattr(func, '__collection_method__'):
version = func.__collection_method__['version']
if hasattr(func, 'doc_info'):
if func.doc_info['summary']:
summary = func.doc_info['summary']
response = func.doc_info['response']
p_info = func.doc_info['parameters']
return summary, version, response, p_info
@classmethod
def _get_params(cls, endpoint, para_info):
params = []
def extend_params(endpoint_params, param_name):
if endpoint_params:
params.extend(
cls._gen_params(
cls._add_param_info(endpoint_params, para_info), param_name))
extend_params(endpoint.path_params, 'path')
extend_params(endpoint.query_params, 'query')
return params
@classmethod
def set_request_body_param(cls, endpoint_param, method, methods, p_info):
if endpoint_param:
params_info = cls._add_param_info(endpoint_param, p_info)
methods[method.lower()]['requestBody'] = {
'content': {
'application/json': {
'schema': cls._gen_schema_for_content(params_info)}}}
@classmethod
def gen_paths(cls, all_endpoints):
# pylint: disable=R0912
method_order = ['get', 'post', 'put', 'delete']
paths = {}
for path, endpoints in sorted(list(ENDPOINT_MAP.items()),
key=lambda p: p[0]):
methods = {}
skip = False
endpoint_list = sorted(endpoints, key=lambda e:
method_order.index(e.method.lower()))
for endpoint in endpoint_list:
if not endpoint.is_api and not all_endpoints:
skip = True
break
method = endpoint.method
func = endpoint.func
summary, version, resp, p_info = cls._process_func_attr(func)
params = cls._get_params(endpoint, p_info)
methods[method.lower()] = {
'tags': [cls._get_tag(endpoint)],
'description': func.__doc__,
'parameters': params,
'responses': cls._gen_responses(method, resp, version)
}
if summary:
methods[method.lower()]['summary'] = summary
if method.lower() in ['post', 'put']:
cls.set_request_body_param(endpoint.body_params, method, methods, p_info)
cls.set_request_body_param(endpoint.query_params, method, methods, p_info)
if endpoint.is_secure:
methods[method.lower()]['security'] = [{'jwt': []}]
if not skip:
paths[path] = methods
return paths
@classmethod
def _gen_spec(cls, all_endpoints=False, base_url="", offline=False):
if all_endpoints:
base_url = ""
host = cherrypy.request.base.split('://', 1)[1] if not offline else 'example.com'
logger.debug("Host: %s", host)
paths = cls.gen_paths(all_endpoints)
if not base_url:
base_url = "/"
scheme = 'https' if offline or mgr.get_localized_module_option('ssl') else 'http'
spec = {
'openapi': "3.0.0",
'info': {
'description': "This is the official Ceph REST API",
'version': "v1",
'title': "Ceph REST API"
},
'host': host,
'basePath': base_url,
'servers': [{'url': "{}{}".format(
cherrypy.request.base if not offline else '',
base_url)}],
'tags': cls._gen_tags(all_endpoints),
'schemes': [scheme],
'paths': paths,
'components': {
'securitySchemes': {
'jwt': {
'type': 'http',
'scheme': 'bearer',
'bearerFormat': 'JWT'
}
}
}
}
return spec
@Endpoint(path="openapi.json", version=None)
def open_api_json(self):
return self._gen_spec(False, "/")
@Endpoint(path="api-all.json", version=None)
def api_all_json(self):
return self._gen_spec(True, "/")
if __name__ == "__main__":
import sys
import yaml
def fix_null_descr(obj):
"""
A hot fix for errors caused by null description values when generating
static documentation: better fix would be default values in source
to be 'None' strings: however, decorator changes didn't resolve
"""
return {k: fix_null_descr(v) for k, v in obj.items() if v is not None} \
if isinstance(obj, dict) else obj
Router.generate_routes("/api")
try:
with open(sys.argv[1], 'w') as f:
# pylint: disable=protected-access
yaml.dump(
fix_null_descr(Docs._gen_spec(all_endpoints=False, base_url="/", offline=True)),
f)
except IndexError:
sys.exit("Output file name missing; correct syntax is: `cmd <file.yml>`")
except IsADirectoryError:
sys.exit("Specified output is a directory; correct syntax is: `cmd <file.yml>`")
| 16,752 | 37.424312 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/erasure_code_profile.py
|
# -*- coding: utf-8 -*-
from cherrypy import NotFound
from .. import mgr
from ..security import Scope
from ..services.ceph_service import CephService
from . import APIDoc, APIRouter, Endpoint, EndpointDoc, ReadPermission, RESTController, UIRouter
LIST_CODE__SCHEMA = {
"crush-failure-domain": (str, ''),
"k": (int, 'Number of data chunks'),
"m": (int, 'Number of coding chunks'),
"plugin": (str, 'Plugin Info'),
"technique": (str, ''),
"name": (str, 'Name of the profile')
}
@APIRouter('/erasure_code_profile', Scope.POOL)
@APIDoc("Erasure Code Profile Management API", "ErasureCodeProfile")
class ErasureCodeProfile(RESTController):
"""
create() supports additional key-value arguments that are passed to the
ECP plugin.
"""
@EndpointDoc("List Erasure Code Profile Information",
responses={'200': [LIST_CODE__SCHEMA]})
def list(self):
return CephService.get_erasure_code_profiles()
def get(self, name):
profiles = CephService.get_erasure_code_profiles()
for p in profiles:
if p['name'] == name:
return p
raise NotFound('No such erasure code profile')
def create(self, name, **kwargs):
profile = ['{}={}'.format(key, value) for key, value in kwargs.items()]
CephService.send_command('mon', 'osd erasure-code-profile set', name=name,
profile=profile)
def delete(self, name):
CephService.send_command('mon', 'osd erasure-code-profile rm', name=name)
@UIRouter('/erasure_code_profile', Scope.POOL)
@APIDoc("Dashboard UI helper function; not part of the public API", "ErasureCodeProfileUi")
class ErasureCodeProfileUi(ErasureCodeProfile):
@Endpoint()
@ReadPermission
def info(self):
"""
Used for profile creation and editing
"""
config = mgr.get('config')
return {
# Because 'shec' and 'clay' are experimental they're not included
'plugins': config['osd_erasure_code_plugins'].split() + ['shec', 'clay'],
'directory': config['erasure_code_dir'],
'nodes': mgr.get('osd_map_tree')['nodes'],
'names': [name for name, _ in
mgr.get('osd_map').get('erasure_code_profiles', {}).items()]
}
| 2,324 | 34.227273 | 96 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/feedback.py
|
# # -*- coding: utf-8 -*-
from .. import mgr
from ..exceptions import DashboardException
from ..security import Scope
from . import APIDoc, APIRouter, BaseController, Endpoint, ReadPermission, RESTController, UIRouter
from ._version import APIVersion
@APIRouter('/feedback', Scope.CONFIG_OPT)
@APIDoc("Feedback API", "Report")
class FeedbackController(RESTController):
@RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
def list(self):
"""
List all issues details.
"""
try:
response = mgr.remote('feedback', 'get_issues')
except RuntimeError as error:
raise DashboardException(msg=f'Error in fetching issue list: {str(error)}',
http_status_code=error.status_code,
component='feedback')
return response
@RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
def create(self, project, tracker, subject, description, api_key=None):
"""
Create an issue.
:param project: The affected ceph component.
:param tracker: The tracker type.
:param subject: The title of the issue.
:param description: The description of the issue.
:param api_key: Ceph tracker api key.
"""
try:
response = mgr.remote('feedback', 'validate_and_create_issue',
project, tracker, subject, description, api_key)
except RuntimeError as error:
if "Invalid issue tracker API key" in str(error):
raise DashboardException(msg='Error in creating tracker issue: Invalid API key',
component='feedback')
if "KeyError" in str(error):
raise DashboardException(msg=f'Error in creating tracker issue: {error}',
component='feedback')
raise DashboardException(msg=f'{error}',
http_status_code=500,
component='feedback')
return response
@APIRouter('/feedback/api_key', Scope.CONFIG_OPT)
@APIDoc(group="Report")
class FeedbackApiController(RESTController):
@RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
def list(self):
"""
Returns Ceph tracker API key.
"""
try:
api_key = mgr.remote('feedback', 'get_api_key')
except ImportError:
raise DashboardException(msg='Feedback module not found.',
http_status_code=404,
component='feedback')
except RuntimeError as error:
raise DashboardException(msg=f'{error}',
http_status_code=500,
component='feedback')
if api_key is None:
raise DashboardException(msg='Issue tracker API key is not set',
component='feedback')
return api_key
@RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
def create(self, api_key):
"""
Sets Ceph tracker API key.
:param api_key: The Ceph tracker API key.
"""
try:
response = mgr.remote('feedback', 'set_api_key', api_key)
except RuntimeError as error:
raise DashboardException(msg=f'{error}',
component='feedback')
return response
@RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
def bulk_delete(self):
"""
Deletes Ceph tracker API key.
"""
try:
response = mgr.remote('feedback', 'delete_api_key')
except RuntimeError as error:
raise DashboardException(msg=f'{error}',
http_status_code=500,
component='feedback')
return response
@UIRouter('/feedback/api_key', Scope.CONFIG_OPT)
class FeedbackUiController(BaseController):
@Endpoint()
@ReadPermission
def exist(self):
"""
Checks if Ceph tracker API key is stored.
"""
try:
response = mgr.remote('feedback', 'is_api_key_set')
except RuntimeError:
raise DashboardException(msg='Feedback module is not enabled',
http_status_code=404,
component='feedback')
return response
| 4,560 | 36.694215 | 99 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/frontend_logging.py
|
import logging
from . import BaseController, Endpoint, UIRouter
logger = logging.getLogger('frontend.error')
@UIRouter('/logging', secure=False)
class FrontendLogging(BaseController):
@Endpoint('POST', path='js-error')
def jsError(self, url, message, stack=None): # noqa: N802
logger.error('(%s): %s\n %s\n', url, message, stack)
| 352 | 24.214286 | 62 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/grafana.py
|
# -*- coding: utf-8 -*-
from .. import mgr
from ..grafana import GrafanaRestClient, push_local_dashboards
from ..security import Scope
from ..services.exception import handle_error
from ..settings import Settings
from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \
ReadPermission, UpdatePermission
URL_SCHEMA = {
"instance": (str, "grafana instance")
}
@APIRouter('/grafana', Scope.GRAFANA)
@APIDoc("Grafana Management API", "Grafana")
class Grafana(BaseController):
@Endpoint()
@ReadPermission
@EndpointDoc("List Grafana URL Instance", responses={200: URL_SCHEMA})
def url(self):
grafana_url = mgr.get_module_option('GRAFANA_API_URL')
grafana_frontend_url = mgr.get_module_option('GRAFANA_FRONTEND_API_URL')
if grafana_frontend_url != '' and grafana_url == '':
url = ''
else:
url = (mgr.get_module_option('GRAFANA_FRONTEND_API_URL')
or mgr.get_module_option('GRAFANA_API_URL')).rstrip('/')
response = {'instance': url}
return response
@Endpoint()
@ReadPermission
@handle_error('grafana')
def validation(self, params):
grafana = GrafanaRestClient()
method = 'GET'
url = str(Settings.GRAFANA_API_URL).rstrip('/') + \
'/api/dashboards/uid/' + params
response = grafana.url_validation(method, url)
return response
@Endpoint(method='POST')
@UpdatePermission
@handle_error('grafana', 500)
def dashboards(self):
response = dict()
response['success'] = push_local_dashboards()
return response
| 1,636 | 31.74 | 80 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/health.py
|
# -*- coding: utf-8 -*-
import json
from .. import mgr
from ..rest_client import RequestException
from ..security import Permission, Scope
from ..services.ceph_service import CephService
from ..services.cluster import ClusterModel
from ..services.iscsi_cli import IscsiGatewaysConfig
from ..services.iscsi_client import IscsiClient
from ..tools import partial_dict
from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc
from .host import get_hosts
HEALTH_MINIMAL_SCHEMA = ({
'client_perf': ({
'read_bytes_sec': (int, ''),
'read_op_per_sec': (int, ''),
'recovering_bytes_per_sec': (int, ''),
'write_bytes_sec': (int, ''),
'write_op_per_sec': (int, ''),
}, ''),
'df': ({
'stats': ({
'total_avail_bytes': (int, ''),
'total_bytes': (int, ''),
'total_used_raw_bytes': (int, ''),
}, '')
}, ''),
'fs_map': ({
'filesystems': ([{
'mdsmap': ({
'session_autoclose': (int, ''),
'balancer': (str, ''),
'up': (str, ''),
'last_failure_osd_epoch': (int, ''),
'in': ([int], ''),
'last_failure': (int, ''),
'max_file_size': (int, ''),
'explicitly_allowed_features': (int, ''),
'damaged': ([int], ''),
'tableserver': (int, ''),
'failed': ([int], ''),
'metadata_pool': (int, ''),
'epoch': (int, ''),
'stopped': ([int], ''),
'max_mds': (int, ''),
'compat': ({
'compat': (str, ''),
'ro_compat': (str, ''),
'incompat': (str, ''),
}, ''),
'required_client_features': (str, ''),
'data_pools': ([int], ''),
'info': (str, ''),
'fs_name': (str, ''),
'created': (str, ''),
'standby_count_wanted': (int, ''),
'enabled': (bool, ''),
'modified': (str, ''),
'session_timeout': (int, ''),
'flags': (int, ''),
'ever_allowed_features': (int, ''),
'root': (int, ''),
}, ''),
'standbys': (str, ''),
}], ''),
}, ''),
'health': ({
'checks': (str, ''),
'mutes': (str, ''),
'status': (str, ''),
}, ''),
'hosts': (int, ''),
'iscsi_daemons': ({
'up': (int, ''),
'down': (int, '')
}, ''),
'mgr_map': ({
'active_name': (str, ''),
'standbys': (str, '')
}, ''),
'mon_status': ({
'monmap': ({
'mons': (str, ''),
}, ''),
'quorum': ([int], '')
}, ''),
'osd_map': ({
'osds': ([{
'in': (int, ''),
'up': (int, ''),
}], '')
}, ''),
'pg_info': ({
'object_stats': ({
'num_objects': (int, ''),
'num_object_copies': (int, ''),
'num_objects_degraded': (int, ''),
'num_objects_misplaced': (int, ''),
'num_objects_unfound': (int, ''),
}, ''),
'pgs_per_osd': (int, ''),
'statuses': (str, '')
}, ''),
'pools': (str, ''),
'rgw': (int, ''),
'scrub_status': (str, '')
})
class HealthData(object):
"""
A class to be used in combination with BaseController to allow either
"full" or "minimal" sets of health data to be collected.
To function properly, it needs BaseCollector._has_permissions to be passed
in as ``auth_callback``.
"""
def __init__(self, auth_callback, minimal=True):
self._has_permissions = auth_callback
self._minimal = minimal
def all_health(self):
result = {
"health": self.basic_health(),
}
if self._has_permissions(Permission.READ, Scope.MONITOR):
result['mon_status'] = self.mon_status()
if self._has_permissions(Permission.READ, Scope.CEPHFS):
result['fs_map'] = self.fs_map()
if self._has_permissions(Permission.READ, Scope.OSD):
result['osd_map'] = self.osd_map()
result['scrub_status'] = self.scrub_status()
result['pg_info'] = self.pg_info()
if self._has_permissions(Permission.READ, Scope.MANAGER):
result['mgr_map'] = self.mgr_map()
if self._has_permissions(Permission.READ, Scope.POOL):
result['pools'] = self.pools()
result['df'] = self.df()
result['client_perf'] = self.client_perf()
if self._has_permissions(Permission.READ, Scope.HOSTS):
result['hosts'] = self.host_count()
if self._has_permissions(Permission.READ, Scope.RGW):
result['rgw'] = self.rgw_count()
if self._has_permissions(Permission.READ, Scope.ISCSI):
result['iscsi_daemons'] = self.iscsi_daemons()
return result
def basic_health(self):
health_data = mgr.get("health")
health = json.loads(health_data['json'])
# Transform the `checks` dict into a list for the convenience
# of rendering from javascript.
checks = []
for k, v in health['checks'].items():
v['type'] = k
checks.append(v)
checks = sorted(checks, key=lambda c: c['severity'])
health['checks'] = checks
return health
def client_perf(self):
result = CephService.get_client_perf()
if self._minimal:
result = partial_dict(
result,
['read_bytes_sec', 'read_op_per_sec',
'recovering_bytes_per_sec', 'write_bytes_sec',
'write_op_per_sec']
)
return result
def df(self):
df = mgr.get('df')
del df['stats_by_class']
if self._minimal:
df = dict(stats=partial_dict(
df['stats'],
['total_avail_bytes', 'total_bytes',
'total_used_raw_bytes']
))
return df
def fs_map(self):
fs_map = mgr.get('fs_map')
if self._minimal:
fs_map = partial_dict(fs_map, ['filesystems', 'standbys'])
fs_map['filesystems'] = [partial_dict(item, ['mdsmap']) for
item in fs_map['filesystems']]
for fs in fs_map['filesystems']:
mdsmap_info = fs['mdsmap']['info']
min_mdsmap_info = dict()
for k, v in mdsmap_info.items():
min_mdsmap_info[k] = partial_dict(v, ['state'])
return fs_map
def host_count(self):
return len(get_hosts())
def iscsi_daemons(self):
up_counter = 0
down_counter = 0
for gateway_name in IscsiGatewaysConfig.get_gateways_config()['gateways']:
try:
IscsiClient.instance(gateway_name=gateway_name).ping()
up_counter += 1
except RequestException:
down_counter += 1
return {'up': up_counter, 'down': down_counter}
def mgr_map(self):
mgr_map = mgr.get('mgr_map')
if self._minimal:
mgr_map = partial_dict(mgr_map, ['active_name', 'standbys'])
return mgr_map
def mon_status(self):
mon_status = json.loads(mgr.get('mon_status')['json'])
if self._minimal:
mon_status = partial_dict(mon_status, ['monmap', 'quorum'])
mon_status['monmap'] = partial_dict(
mon_status['monmap'], ['mons']
)
mon_status['monmap']['mons'] = [{}] * \
len(mon_status['monmap']['mons'])
return mon_status
def osd_map(self):
osd_map = mgr.get('osd_map')
assert osd_map is not None
# Not needed, skip the effort of transmitting this to UI
del osd_map['pg_temp']
if self._minimal:
osd_map = partial_dict(osd_map, ['osds'])
osd_map['osds'] = [
partial_dict(item, ['in', 'up', 'state'])
for item in osd_map['osds']
]
else:
osd_map['tree'] = mgr.get('osd_map_tree')
osd_map['crush'] = mgr.get('osd_map_crush')
osd_map['crush_map_text'] = mgr.get('osd_map_crush_map_text')
osd_map['osd_metadata'] = mgr.get('osd_metadata')
return osd_map
def pg_info(self):
return CephService.get_pg_info()
def pools(self):
pools = CephService.get_pool_list_with_stats()
if self._minimal:
pools = [{}] * len(pools)
return pools
def rgw_count(self):
return len(CephService.get_service_list('rgw'))
def scrub_status(self):
return CephService.get_scrub_status()
@APIRouter('/health')
@APIDoc("Display Detailed Cluster health Status", "Health")
class Health(BaseController):
def __init__(self):
super().__init__()
self.health_full = HealthData(self._has_permissions, minimal=False)
self.health_minimal = HealthData(self._has_permissions, minimal=True)
@Endpoint()
def full(self):
return self.health_full.all_health()
@Endpoint()
@EndpointDoc("Get Cluster's minimal health report",
responses={200: HEALTH_MINIMAL_SCHEMA})
def minimal(self):
return self.health_minimal.all_health()
@Endpoint()
def get_cluster_capacity(self):
return ClusterModel.get_capacity()
@Endpoint()
def get_cluster_fsid(self):
return mgr.get('config')['fsid']
| 9,694 | 30.9967 | 82 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/home.py
|
# -*- coding: utf-8 -*-
import json
import logging
import os
import re
try:
from functools import lru_cache
except ImportError:
from ..plugins.lru_cache import lru_cache
import cherrypy
from cherrypy.lib.static import serve_file
from .. import mgr
from . import BaseController, Endpoint, Proxy, Router, UIRouter
logger = logging.getLogger("controllers.home")
class LanguageMixin(object):
def __init__(self):
try:
self.LANGUAGES = {
f
for f in os.listdir(mgr.get_frontend_path())
if os.path.isdir(os.path.join(mgr.get_frontend_path(), f))
}
except FileNotFoundError:
logger.exception("Build directory missing")
self.LANGUAGES = {}
self.LANGUAGES_PATH_MAP = {
f.lower(): {
'lang': f,
'path': os.path.join(mgr.get_frontend_path(), f)
}
for f in self.LANGUAGES
}
# pre-populating with the primary language subtag.
for lang in list(self.LANGUAGES_PATH_MAP.keys()):
if '-' in lang:
self.LANGUAGES_PATH_MAP[lang.split('-')[0]] = {
'lang': self.LANGUAGES_PATH_MAP[lang]['lang'],
'path': self.LANGUAGES_PATH_MAP[lang]['path']
}
with open(os.path.normpath("{}/../package.json".format(mgr.get_frontend_path())),
"r") as f:
config = json.load(f)
self.DEFAULT_LANGUAGE = config['config']['locale']
self.DEFAULT_LANGUAGE_PATH = os.path.join(mgr.get_frontend_path(),
self.DEFAULT_LANGUAGE)
super().__init__()
@Router("/", secure=False)
class HomeController(BaseController, LanguageMixin):
LANG_TAG_SEQ_RE = re.compile(r'\s*([^,]+)\s*,?\s*')
LANG_TAG_RE = re.compile(
r'^(?P<locale>[a-zA-Z]{1,8}(-[a-zA-Z0-9]{1,8})*|\*)(;q=(?P<weight>[01]\.\d{0,3}))?$')
MAX_ACCEPTED_LANGS = 10
@lru_cache()
def _parse_accept_language(self, accept_lang_header):
result = []
for i, m in enumerate(self.LANG_TAG_SEQ_RE.finditer(accept_lang_header)):
if i >= self.MAX_ACCEPTED_LANGS:
logger.debug("reached max accepted languages, skipping remaining")
break
tag_match = self.LANG_TAG_RE.match(m.group(1))
if tag_match is None:
raise cherrypy.HTTPError(400, "Malformed 'Accept-Language' header")
locale = tag_match.group('locale').lower()
weight = tag_match.group('weight')
if weight:
try:
ratio = float(weight)
except ValueError:
raise cherrypy.HTTPError(400, "Malformed 'Accept-Language' header")
else:
ratio = 1.0
result.append((locale, ratio))
result.sort(key=lambda l: l[0])
result.sort(key=lambda l: l[1], reverse=True)
logger.debug("language preference: %s", result)
return [r[0] for r in result]
def _language_dir(self, langs):
for lang in langs:
if lang in self.LANGUAGES_PATH_MAP:
logger.debug("found directory for language '%s'", lang)
cherrypy.response.headers[
'Content-Language'] = self.LANGUAGES_PATH_MAP[lang]['lang']
return self.LANGUAGES_PATH_MAP[lang]['path']
logger.debug("Languages '%s' not available, falling back to %s",
langs, self.DEFAULT_LANGUAGE)
cherrypy.response.headers['Content-Language'] = self.DEFAULT_LANGUAGE
return self.DEFAULT_LANGUAGE_PATH
@Proxy()
def __call__(self, path, **params):
if not path:
path = "index.html"
if 'cd-lang' in cherrypy.request.cookie:
langs = [cherrypy.request.cookie['cd-lang'].value.lower()]
logger.debug("frontend language from cookie: %s", langs)
else:
if 'Accept-Language' in cherrypy.request.headers:
accept_lang_header = cherrypy.request.headers['Accept-Language']
langs = self._parse_accept_language(accept_lang_header)
else:
langs = [self.DEFAULT_LANGUAGE.lower()]
logger.debug("frontend language from headers: %s", langs)
base_dir = self._language_dir(langs)
full_path = os.path.join(base_dir, path)
# Block uplevel attacks
if not os.path.normpath(full_path).startswith(os.path.normpath(base_dir)):
raise cherrypy.HTTPError(403) # Forbidden
logger.debug("serving static content: %s", full_path)
if 'Vary' in cherrypy.response.headers:
cherrypy.response.headers['Vary'] = "{}, Accept-Language"
else:
cherrypy.response.headers['Vary'] = "Accept-Language"
cherrypy.response.headers['Cache-control'] = "no-cache"
return serve_file(full_path)
@UIRouter("/langs", secure=False)
class LangsController(BaseController, LanguageMixin):
@Endpoint('GET')
def __call__(self):
return list(self.LANGUAGES)
@UIRouter("/login", secure=False)
class LoginController(BaseController):
@Endpoint('GET', 'custom_banner')
def __call__(self):
return mgr.get_store('custom_login_banner')
| 5,379 | 35.107383 | 93 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/host.py
|
# -*- coding: utf-8 -*-
import copy
import os
import time
from collections import Counter
from typing import Dict, List, Optional
import cherrypy
from mgr_util import merge_dicts
from orchestrator import HostSpec
from .. import mgr
from ..exceptions import DashboardException
from ..security import Scope
from ..services.ceph_service import CephService
from ..services.exception import handle_orchestrator_error
from ..services.orchestrator import OrchClient, OrchFeature
from ..tools import TaskManager, merge_list_of_dicts_by_key, str_to_bool
from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \
ReadPermission, RESTController, Task, UIRouter, UpdatePermission, \
allow_empty_body
from ._version import APIVersion
from .orchestrator import raise_if_no_orchestrator
LIST_HOST_SCHEMA = {
"hostname": (str, "Hostname"),
"services": ([{
"type": (str, "type of service"),
"id": (str, "Service Id"),
}], "Services related to the host"),
"service_instances": ([{
"type": (str, "type of service"),
"count": (int, "Number of instances of the service"),
}], "Service instances related to the host"),
"ceph_version": (str, "Ceph version"),
"addr": (str, "Host address"),
"labels": ([str], "Labels related to the host"),
"service_type": (str, ""),
"sources": ({
"ceph": (bool, ""),
"orchestrator": (bool, "")
}, "Host Sources"),
"status": (str, "")
}
INVENTORY_SCHEMA = {
"name": (str, "Hostname"),
"addr": (str, "Host address"),
"devices": ([{
"rejected_reasons": ([str], ""),
"available": (bool, "If the device can be provisioned to an OSD"),
"path": (str, "Device path"),
"sys_api": ({
"removable": (str, ""),
"ro": (str, ""),
"vendor": (str, ""),
"model": (str, ""),
"rev": (str, ""),
"sas_address": (str, ""),
"sas_device_handle": (str, ""),
"support_discard": (str, ""),
"rotational": (str, ""),
"nr_requests": (str, ""),
"scheduler_mode": (str, ""),
"partitions": ({
"partition_name": ({
"start": (str, ""),
"sectors": (str, ""),
"sectorsize": (int, ""),
"size": (int, ""),
"human_readable_size": (str, ""),
"holders": ([str], "")
}, "")
}, ""),
"sectors": (int, ""),
"sectorsize": (str, ""),
"size": (int, ""),
"human_readable_size": (str, ""),
"path": (str, ""),
"locked": (int, "")
}, ""),
"lvs": ([{
"name": (str, ""),
"osd_id": (str, ""),
"cluster_name": (str, ""),
"type": (str, ""),
"osd_fsid": (str, ""),
"cluster_fsid": (str, ""),
"osdspec_affinity": (str, ""),
"block_uuid": (str, ""),
}], ""),
"human_readable_type": (str, "Device type. ssd or hdd"),
"device_id": (str, "Device's udev ID"),
"lsm_data": ({
"serialNum": (str, ""),
"transport": (str, ""),
"mediaType": (str, ""),
"rpm": (str, ""),
"linkSpeed": (str, ""),
"health": (str, ""),
"ledSupport": ({
"IDENTsupport": (str, ""),
"IDENTstatus": (str, ""),
"FAILsupport": (str, ""),
"FAILstatus": (str, ""),
}, ""),
"errors": ([str], "")
}, ""),
"osd_ids": ([int], "Device OSD IDs")
}], "Host devices"),
"labels": ([str], "Host labels")
}
def host_task(name, metadata, wait_for=10.0):
return Task("host/{}".format(name), metadata, wait_for)
def merge_hosts_by_hostname(ceph_hosts, orch_hosts):
# type: (List[dict], List[HostSpec]) -> List[dict]
"""
Merge Ceph hosts with orchestrator hosts by hostnames.
:param ceph_hosts: hosts returned from mgr
:type ceph_hosts: list of dict
:param orch_hosts: hosts returned from ochestrator
:type orch_hosts: list of HostSpec
:return list of dict
"""
hosts = copy.deepcopy(ceph_hosts)
orch_hosts_map = {host.hostname: host.to_json() for host in orch_hosts}
# Sort labels.
for hostname in orch_hosts_map:
orch_hosts_map[hostname]['labels'].sort()
# Hosts in both Ceph and Orchestrator.
for host in hosts:
hostname = host['hostname']
if hostname in orch_hosts_map:
host.update(orch_hosts_map[hostname])
host['sources']['orchestrator'] = True
orch_hosts_map.pop(hostname)
# Hosts only in Orchestrator.
orch_hosts_only = [
merge_dicts(
{
'ceph_version': '',
'services': [],
'sources': {
'ceph': False,
'orchestrator': True
}
}, orch_hosts_map[hostname]) for hostname in orch_hosts_map
]
hosts.extend(orch_hosts_only)
for host in hosts:
host['service_instances'] = populate_service_instances(
host['hostname'], host['services'])
return hosts
def populate_service_instances(hostname, services):
orch = OrchClient.instance()
if orch.available():
services = (daemon['daemon_type']
for daemon in (d.to_dict()
for d in orch.services.list_daemons(hostname=hostname)))
else:
services = (daemon['type'] for daemon in services)
return [{'type': k, 'count': v} for k, v in Counter(services).items()]
def get_hosts(sources=None):
"""
Get hosts from various sources.
"""
from_ceph = True
from_orchestrator = True
if sources:
_sources = sources.split(',')
from_ceph = 'ceph' in _sources
from_orchestrator = 'orchestrator' in _sources
ceph_hosts = []
if from_ceph:
ceph_hosts = [
merge_dicts(
server, {
'addr': '',
'labels': [],
'sources': {
'ceph': True,
'orchestrator': False
},
'status': ''
}) for server in mgr.list_servers()
]
if from_orchestrator:
orch = OrchClient.instance()
if orch.available():
return merge_hosts_by_hostname(ceph_hosts, orch.hosts.list())
for host in ceph_hosts:
host['service_instances'] = populate_service_instances(host['hostname'], host['services'])
return ceph_hosts
def get_host(hostname: str) -> Dict:
"""
Get a specific host from Ceph or Orchestrator (if available).
:param hostname: The name of the host to fetch.
:raises: cherrypy.HTTPError: If host not found.
"""
for host in get_hosts():
if host['hostname'] == hostname:
return host
raise cherrypy.HTTPError(404)
def get_device_osd_map():
"""Get mappings from inventory devices to OSD IDs.
:return: Returns a dictionary containing mappings. Note one device might
shared between multiple OSDs.
e.g. {
'node1': {
'nvme0n1': [0, 1],
'vdc': [0],
'vdb': [1]
},
'node2': {
'vdc': [2]
}
}
:rtype: dict
"""
result: dict = {}
for osd_id, osd_metadata in mgr.get('osd_metadata').items():
hostname = osd_metadata.get('hostname')
devices = osd_metadata.get('devices')
if not hostname or not devices:
continue
if hostname not in result:
result[hostname] = {}
# for OSD contains multiple devices, devices is in `sda,sdb`
for device in devices.split(','):
if device not in result[hostname]:
result[hostname][device] = [int(osd_id)]
else:
result[hostname][device].append(int(osd_id))
return result
def get_inventories(hosts: Optional[List[str]] = None,
refresh: Optional[bool] = None) -> List[dict]:
"""Get inventories from the Orchestrator and link devices with OSD IDs.
:param hosts: Hostnames to query.
:param refresh: Ask the Orchestrator to refresh the inventories. Note the this is an
asynchronous operation, the updated version of inventories need to
be re-qeuried later.
:return: Returns list of inventory.
:rtype: list
"""
do_refresh = False
if refresh is not None:
do_refresh = str_to_bool(refresh)
orch = OrchClient.instance()
inventory_hosts = [host.to_json()
for host in orch.inventory.list(hosts=hosts, refresh=do_refresh)]
device_osd_map = get_device_osd_map()
for inventory_host in inventory_hosts:
host_osds = device_osd_map.get(inventory_host['name'])
for device in inventory_host['devices']:
if host_osds: # pragma: no cover
dev_name = os.path.basename(device['path'])
device['osd_ids'] = sorted(host_osds.get(dev_name, []))
else:
device['osd_ids'] = []
return inventory_hosts
@allow_empty_body
def add_host(hostname: str, addr: Optional[str] = None,
labels: Optional[List[str]] = None,
status: Optional[str] = None):
orch_client = OrchClient.instance()
host = Host()
host.check_orchestrator_host_op(orch_client, hostname)
orch_client.hosts.add(hostname, addr, labels)
if status == 'maintenance':
orch_client.hosts.enter_maintenance(hostname)
@APIRouter('/host', Scope.HOSTS)
@APIDoc("Get Host Details", "Host")
class Host(RESTController):
@EndpointDoc("List Host Specifications",
parameters={
'sources': (str, 'Host Sources'),
'facts': (bool, 'Host Facts')
},
responses={200: LIST_HOST_SCHEMA})
@RESTController.MethodMap(version=APIVersion(1, 2))
def list(self, sources=None, facts=False):
hosts = get_hosts(sources)
orch = OrchClient.instance()
if str_to_bool(facts):
if orch.available():
if not orch.get_missing_features(['get_facts']):
hosts_facts = orch.hosts.get_facts()
return merge_list_of_dicts_by_key(hosts, hosts_facts, 'hostname')
raise DashboardException(
code='invalid_orchestrator_backend', # pragma: no cover
msg="Please enable the cephadm orchestrator backend "
"(try `ceph orch set backend cephadm`)",
component='orchestrator',
http_status_code=400)
raise DashboardException(code='orchestrator_status_unavailable', # pragma: no cover
msg="Please configure and enable the orchestrator if you "
"really want to gather facts from hosts",
component='orchestrator',
http_status_code=400)
return hosts
@raise_if_no_orchestrator([OrchFeature.HOST_LIST, OrchFeature.HOST_ADD])
@handle_orchestrator_error('host')
@host_task('add', {'hostname': '{hostname}'})
@EndpointDoc('',
parameters={
'hostname': (str, 'Hostname'),
'addr': (str, 'Network Address'),
'labels': ([str], 'Host Labels'),
'status': (str, 'Host Status')
},
responses={200: None, 204: None})
@RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
def create(self, hostname: str,
addr: Optional[str] = None,
labels: Optional[List[str]] = None,
status: Optional[str] = None): # pragma: no cover - requires realtime env
add_host(hostname, addr, labels, status)
@raise_if_no_orchestrator([OrchFeature.HOST_LIST, OrchFeature.HOST_REMOVE])
@handle_orchestrator_error('host')
@host_task('remove', {'hostname': '{hostname}'})
@allow_empty_body
def delete(self, hostname): # pragma: no cover - requires realtime env
orch_client = OrchClient.instance()
self.check_orchestrator_host_op(orch_client, hostname, False)
orch_client.hosts.remove(hostname)
def check_orchestrator_host_op(self, orch_client, hostname, add=True): # pragma:no cover
"""Check if we can adding or removing a host with orchestrator
:param orch_client: Orchestrator client
:param add: True for adding host operation, False for removing host
:raise DashboardException
"""
host = orch_client.hosts.get(hostname)
if add and host:
raise DashboardException(
code='orchestrator_add_existed_host',
msg='{} is already in orchestrator'.format(hostname),
component='orchestrator')
if not add and not host:
raise DashboardException(
code='orchestrator_remove_nonexistent_host',
msg='Remove a non-existent host {} from orchestrator'.format(hostname),
component='orchestrator')
@RESTController.Resource('GET')
def devices(self, hostname):
# (str) -> List
return CephService.get_devices_by_host(hostname)
@RESTController.Resource('GET')
def smart(self, hostname):
# type: (str) -> dict
return CephService.get_smart_data_by_host(hostname)
@RESTController.Resource('GET')
@raise_if_no_orchestrator([OrchFeature.DEVICE_LIST])
@handle_orchestrator_error('host')
@EndpointDoc('Get inventory of a host',
parameters={
'hostname': (str, 'Hostname'),
'refresh': (str, 'Trigger asynchronous refresh'),
},
responses={200: INVENTORY_SCHEMA})
def inventory(self, hostname, refresh=None):
inventory = get_inventories([hostname], refresh)
if inventory:
return inventory[0]
return {}
@RESTController.Resource('POST')
@UpdatePermission
@raise_if_no_orchestrator([OrchFeature.DEVICE_BLINK_LIGHT])
@handle_orchestrator_error('host')
@host_task('identify_device', ['{hostname}', '{device}'], wait_for=2.0)
def identify_device(self, hostname, device, duration):
# type: (str, str, int) -> None
"""
Identify a device by switching on the device light for N seconds.
:param hostname: The hostname of the device to process.
:param device: The device identifier to process, e.g. ``/dev/dm-0`` or
``ABC1234DEF567-1R1234_ABC8DE0Q``.
:param duration: The duration in seconds how long the LED should flash.
"""
orch = OrchClient.instance()
TaskManager.current_task().set_progress(0)
orch.blink_device_light(hostname, device, 'ident', True)
for i in range(int(duration)):
percentage = int(round(i / float(duration) * 100))
TaskManager.current_task().set_progress(percentage)
time.sleep(1)
orch.blink_device_light(hostname, device, 'ident', False)
TaskManager.current_task().set_progress(100)
@RESTController.Resource('GET')
@raise_if_no_orchestrator([OrchFeature.DAEMON_LIST])
def daemons(self, hostname: str) -> List[dict]:
orch = OrchClient.instance()
daemons = orch.services.list_daemons(hostname=hostname)
return [d.to_dict() for d in daemons]
@handle_orchestrator_error('host')
def get(self, hostname: str) -> Dict:
"""
Get the specified host.
:raises: cherrypy.HTTPError: If host not found.
"""
return get_host(hostname)
@raise_if_no_orchestrator([OrchFeature.HOST_LABEL_ADD,
OrchFeature.HOST_LABEL_REMOVE,
OrchFeature.HOST_MAINTENANCE_ENTER,
OrchFeature.HOST_MAINTENANCE_EXIT,
OrchFeature.HOST_DRAIN])
@handle_orchestrator_error('host')
@EndpointDoc('',
parameters={
'hostname': (str, 'Hostname'),
'update_labels': (bool, 'Update Labels'),
'labels': ([str], 'Host Labels'),
'maintenance': (bool, 'Enter/Exit Maintenance'),
'force': (bool, 'Force Enter Maintenance'),
'drain': (bool, 'Drain Host')
},
responses={200: None, 204: None})
@RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
def set(self, hostname: str, update_labels: bool = False,
labels: List[str] = None, maintenance: bool = False,
force: bool = False, drain: bool = False):
"""
Update the specified host.
Note, this is only supported when Ceph Orchestrator is enabled.
:param hostname: The name of the host to be processed.
:param update_labels: To update the labels.
:param labels: List of labels.
:param maintenance: Enter/Exit maintenance mode.
:param force: Force enter maintenance mode.
:param drain: Drain host
"""
orch = OrchClient.instance()
host = get_host(hostname)
if maintenance:
status = host['status']
if status != 'maintenance':
orch.hosts.enter_maintenance(hostname, force)
if status == 'maintenance':
orch.hosts.exit_maintenance(hostname)
if drain:
orch.hosts.drain(hostname)
if update_labels:
# only allow List[str] type for labels
if not isinstance(labels, list):
raise DashboardException(
msg='Expected list of labels. Please check API documentation.',
http_status_code=400,
component='orchestrator')
current_labels = set(host['labels'])
# Remove labels.
remove_labels = list(current_labels.difference(set(labels)))
for label in remove_labels:
orch.hosts.remove_label(hostname, label)
# Add labels.
add_labels = list(set(labels).difference(current_labels))
for label in add_labels:
orch.hosts.add_label(hostname, label)
@UIRouter('/host', Scope.HOSTS)
class HostUi(BaseController):
@Endpoint('GET')
@ReadPermission
@handle_orchestrator_error('host')
def labels(self) -> List[str]:
"""
Get all host labels.
Note, host labels are only supported when Ceph Orchestrator is enabled.
If Ceph Orchestrator is not enabled, an empty list is returned.
:return: A list of all host labels.
"""
labels = []
orch = OrchClient.instance()
if orch.available():
for host in orch.hosts.list():
labels.extend(host.labels)
labels.sort()
return list(set(labels)) # Filter duplicate labels.
@Endpoint('GET')
@ReadPermission
@raise_if_no_orchestrator([OrchFeature.DEVICE_LIST])
@handle_orchestrator_error('host')
def inventory(self, refresh=None):
return get_inventories(None, refresh)
| 19,748 | 36.403409 | 98 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/iscsi.py
|
# -*- coding: utf-8 -*-
# pylint: disable=C0302
# pylint: disable=too-many-branches
# pylint: disable=too-many-lines
import json
import re
from copy import deepcopy
from typing import Any, Dict, List, no_type_check
import cherrypy
import rados
import rbd
from .. import mgr
from ..exceptions import DashboardException
from ..rest_client import RequestException
from ..security import Scope
from ..services.exception import handle_request_error
from ..services.iscsi_cli import IscsiGatewaysConfig
from ..services.iscsi_client import IscsiClient
from ..services.iscsi_config import IscsiGatewayDoesNotExist
from ..services.rbd import format_bitmask
from ..services.tcmu_service import TcmuService
from ..tools import TaskManager, str_to_bool
from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \
ReadPermission, RESTController, Task, UIRouter, UpdatePermission
ISCSI_SCHEMA = {
'user': (str, 'username'),
'password': (str, 'password'),
'mutual_user': (str, ''),
'mutual_password': (str, '')
}
@UIRouter('/iscsi', Scope.ISCSI)
class IscsiUi(BaseController):
REQUIRED_CEPH_ISCSI_CONFIG_MIN_VERSION = 10
REQUIRED_CEPH_ISCSI_CONFIG_MAX_VERSION = 11
@Endpoint()
@ReadPermission
@no_type_check
def status(self):
status = {'available': False}
try:
gateway = get_available_gateway()
except DashboardException as e:
status['message'] = str(e)
return status
try:
config = IscsiClient.instance(gateway_name=gateway).get_config()
if config['version'] < IscsiUi.REQUIRED_CEPH_ISCSI_CONFIG_MIN_VERSION or \
config['version'] > IscsiUi.REQUIRED_CEPH_ISCSI_CONFIG_MAX_VERSION:
status['message'] = 'Unsupported `ceph-iscsi` config version. ' \
'Expected >= {} and <= {} but found' \
' {}.'.format(IscsiUi.REQUIRED_CEPH_ISCSI_CONFIG_MIN_VERSION,
IscsiUi.REQUIRED_CEPH_ISCSI_CONFIG_MAX_VERSION,
config['version'])
return status
status['available'] = True
except RequestException as e:
if e.content:
try:
content = json.loads(e.content)
content_message = content.get('message')
except ValueError:
content_message = e.content
if content_message:
status['message'] = content_message
return status
@Endpoint()
@ReadPermission
def version(self):
gateway = get_available_gateway()
config = IscsiClient.instance(gateway_name=gateway).get_config()
return {
'ceph_iscsi_config_version': config['version']
}
@Endpoint()
@ReadPermission
def settings(self):
gateway = get_available_gateway()
settings = IscsiClient.instance(gateway_name=gateway).get_settings()
if 'target_controls_limits' in settings:
target_default_controls = settings['target_default_controls']
for ctrl_k, ctrl_v in target_default_controls.items():
limits = settings['target_controls_limits'].get(ctrl_k, {})
if 'type' not in limits:
# default
limits['type'] = 'int'
# backward compatibility
if target_default_controls[ctrl_k] in ['Yes', 'No']:
limits['type'] = 'bool'
target_default_controls[ctrl_k] = str_to_bool(ctrl_v)
settings['target_controls_limits'][ctrl_k] = limits
if 'disk_controls_limits' in settings:
for backstore, disk_controls_limits in settings['disk_controls_limits'].items():
disk_default_controls = settings['disk_default_controls'][backstore]
for ctrl_k, ctrl_v in disk_default_controls.items():
limits = disk_controls_limits.get(ctrl_k, {})
if 'type' not in limits:
# default
limits['type'] = 'int'
settings['disk_controls_limits'][backstore][ctrl_k] = limits
return settings
@Endpoint()
@ReadPermission
def portals(self):
portals = []
gateways_config = IscsiGatewaysConfig.get_gateways_config()
for name in gateways_config['gateways']:
try:
ip_addresses = IscsiClient.instance(gateway_name=name).get_ip_addresses()
portals.append({'name': name, 'ip_addresses': ip_addresses['data']})
except RequestException:
pass
return sorted(portals, key=lambda p: '{}.{}'.format(p['name'], p['ip_addresses']))
@Endpoint()
@ReadPermission
def overview(self):
gateways_names = IscsiGatewaysConfig.get_gateways_config()['gateways'].keys()
config = None
for gateway_name in gateways_names:
try:
config = IscsiClient.instance(gateway_name=gateway_name).get_config()
break
except RequestException:
pass
result_gateways = self._get_gateways_info(gateways_names, config)
result_images = self._get_images_info(config)
return {
'gateways': sorted(result_gateways, key=lambda g: g['name']),
'images': sorted(result_images, key=lambda i: '{}/{}'.format(i['pool'], i['image']))
}
def _get_images_info(self, config):
# Images info
result_images = []
if config:
tcmu_info = TcmuService.get_iscsi_info()
for _, disk_config in config['disks'].items():
image = {
'pool': disk_config['pool'],
'image': disk_config['image'],
'backstore': disk_config['backstore'],
'optimized_since': None,
'stats': None,
'stats_history': None
}
tcmu_image_info = TcmuService.get_image_info(image['pool'],
image['image'],
tcmu_info)
if tcmu_image_info:
if 'optimized_since' in tcmu_image_info:
image['optimized_since'] = tcmu_image_info['optimized_since']
if 'stats' in tcmu_image_info:
image['stats'] = tcmu_image_info['stats']
if 'stats_history' in tcmu_image_info:
image['stats_history'] = tcmu_image_info['stats_history']
result_images.append(image)
return result_images
def _get_gateways_info(self, gateways_names, config):
result_gateways = []
# Gateways info
for gateway_name in gateways_names:
gateway = {
'name': gateway_name,
'state': '',
'num_targets': 'n/a',
'num_sessions': 'n/a'
}
try:
IscsiClient.instance(gateway_name=gateway_name).ping()
gateway['state'] = 'up'
if config:
gateway['num_sessions'] = 0
if gateway_name in config['gateways']:
gatewayinfo = IscsiClient.instance(
gateway_name=gateway_name).get_gatewayinfo()
gateway['num_sessions'] = gatewayinfo['num_sessions']
except RequestException:
gateway['state'] = 'down'
if config:
gateway['num_targets'] = len([target for _, target in config['targets'].items()
if gateway_name in target['portals']])
result_gateways.append(gateway)
return result_gateways
@APIRouter('/iscsi', Scope.ISCSI)
@APIDoc("Iscsi Management API", "Iscsi")
class Iscsi(BaseController):
@Endpoint('GET', 'discoveryauth')
@ReadPermission
@EndpointDoc("Get Iscsi discoveryauth Details",
responses={'200': [ISCSI_SCHEMA]})
def get_discoveryauth(self):
gateway = get_available_gateway()
return self._get_discoveryauth(gateway)
@Endpoint('PUT', 'discoveryauth',
query_params=['user', 'password', 'mutual_user', 'mutual_password'])
@UpdatePermission
@EndpointDoc("Set Iscsi discoveryauth",
parameters={
'user': (str, 'Username'),
'password': (str, 'Password'),
'mutual_user': (str, 'Mutual UserName'),
'mutual_password': (str, 'Mutual Password'),
})
def set_discoveryauth(self, user, password, mutual_user, mutual_password):
validate_auth({
'user': user,
'password': password,
'mutual_user': mutual_user,
'mutual_password': mutual_password
})
gateway = get_available_gateway()
config = IscsiClient.instance(gateway_name=gateway).get_config()
gateway_names = list(config['gateways'].keys())
validate_rest_api(gateway_names)
IscsiClient.instance(gateway_name=gateway).update_discoveryauth(user,
password,
mutual_user,
mutual_password)
return self._get_discoveryauth(gateway)
def _get_discoveryauth(self, gateway):
config = IscsiClient.instance(gateway_name=gateway).get_config()
user = config['discovery_auth']['username']
password = config['discovery_auth']['password']
mutual_user = config['discovery_auth']['mutual_username']
mutual_password = config['discovery_auth']['mutual_password']
return {
'user': user,
'password': password,
'mutual_user': mutual_user,
'mutual_password': mutual_password
}
def iscsi_target_task(name, metadata, wait_for=2.0):
return Task("iscsi/target/{}".format(name), metadata, wait_for)
@APIRouter('/iscsi/target', Scope.ISCSI)
@APIDoc("Get Iscsi Target Details", "IscsiTarget")
class IscsiTarget(RESTController):
def list(self):
gateway = get_available_gateway()
config = IscsiClient.instance(gateway_name=gateway).get_config()
targets = []
for target_iqn in config['targets'].keys():
target = IscsiTarget._config_to_target(target_iqn, config)
IscsiTarget._set_info(target)
targets.append(target)
return targets
def get(self, target_iqn):
gateway = get_available_gateway()
config = IscsiClient.instance(gateway_name=gateway).get_config()
if target_iqn not in config['targets']:
raise cherrypy.HTTPError(404)
target = IscsiTarget._config_to_target(target_iqn, config)
IscsiTarget._set_info(target)
return target
@iscsi_target_task('delete', {'target_iqn': '{target_iqn}'})
def delete(self, target_iqn):
gateway = get_available_gateway()
config = IscsiClient.instance(gateway_name=gateway).get_config()
if target_iqn not in config['targets']:
raise DashboardException(msg='Target does not exist',
code='target_does_not_exist',
component='iscsi')
portal_names = list(config['targets'][target_iqn]['portals'].keys())
validate_rest_api(portal_names)
if portal_names:
portal_name = portal_names[0]
target_info = IscsiClient.instance(gateway_name=portal_name).get_targetinfo(target_iqn)
if target_info['num_sessions'] > 0:
raise DashboardException(msg='Target has active sessions',
code='target_has_active_sessions',
component='iscsi')
IscsiTarget._delete(target_iqn, config, 0, 100)
@iscsi_target_task('create', {'target_iqn': '{target_iqn}'})
def create(self, target_iqn=None, target_controls=None, acl_enabled=None,
auth=None, portals=None, disks=None, clients=None, groups=None):
target_controls = target_controls or {}
portals = portals or []
disks = disks or []
clients = clients or []
groups = groups or []
validate_auth(auth)
for client in clients:
validate_auth(client['auth'])
gateway = get_available_gateway()
config = IscsiClient.instance(gateway_name=gateway).get_config()
if target_iqn in config['targets']:
raise DashboardException(msg='Target already exists',
code='target_already_exists',
component='iscsi')
settings = IscsiClient.instance(gateway_name=gateway).get_settings()
IscsiTarget._validate(target_iqn, target_controls, portals, disks, groups, settings)
IscsiTarget._create(target_iqn, target_controls, acl_enabled, auth, portals, disks,
clients, groups, 0, 100, config, settings)
@iscsi_target_task('edit', {'target_iqn': '{target_iqn}'})
def set(self, target_iqn, new_target_iqn=None, target_controls=None, acl_enabled=None,
auth=None, portals=None, disks=None, clients=None, groups=None):
target_controls = target_controls or {}
portals = IscsiTarget._sorted_portals(portals)
disks = IscsiTarget._sorted_disks(disks)
clients = IscsiTarget._sorted_clients(clients)
groups = IscsiTarget._sorted_groups(groups)
validate_auth(auth)
for client in clients:
validate_auth(client['auth'])
gateway = get_available_gateway()
config = IscsiClient.instance(gateway_name=gateway).get_config()
if target_iqn not in config['targets']:
raise DashboardException(msg='Target does not exist',
code='target_does_not_exist',
component='iscsi')
if target_iqn != new_target_iqn and new_target_iqn in config['targets']:
raise DashboardException(msg='Target IQN already in use',
code='target_iqn_already_in_use',
component='iscsi')
settings = IscsiClient.instance(gateway_name=gateway).get_settings()
new_portal_names = {p['host'] for p in portals}
old_portal_names = set(config['targets'][target_iqn]['portals'].keys())
deleted_portal_names = list(old_portal_names - new_portal_names)
validate_rest_api(deleted_portal_names)
IscsiTarget._validate(new_target_iqn, target_controls, portals, disks, groups, settings)
IscsiTarget._validate_delete(gateway, target_iqn, config, new_target_iqn, target_controls,
disks, clients, groups)
config = IscsiTarget._delete(target_iqn, config, 0, 50, new_target_iqn, target_controls,
portals, disks, clients, groups)
IscsiTarget._create(new_target_iqn, target_controls, acl_enabled, auth, portals, disks,
clients, groups, 50, 100, config, settings)
@staticmethod
def _delete(target_iqn, config, task_progress_begin, task_progress_end, new_target_iqn=None,
new_target_controls=None, new_portals=None, new_disks=None, new_clients=None,
new_groups=None):
new_target_controls = new_target_controls or {}
new_portals = new_portals or []
new_disks = new_disks or []
new_clients = new_clients or []
new_groups = new_groups or []
TaskManager.current_task().set_progress(task_progress_begin)
target_config = config['targets'][target_iqn]
if not target_config['portals'].keys():
raise DashboardException(msg="Cannot delete a target that doesn't contain any portal",
code='cannot_delete_target_without_portals',
component='iscsi')
target = IscsiTarget._config_to_target(target_iqn, config)
n_groups = len(target_config['groups'])
n_clients = len(target_config['clients'])
n_target_disks = len(target_config['disks'])
task_progress_steps = n_groups + n_clients + n_target_disks
task_progress_inc = 0
if task_progress_steps != 0:
task_progress_inc = int((task_progress_end - task_progress_begin) / task_progress_steps)
gateway_name = list(target_config['portals'].keys())[0]
IscsiTarget._delete_groups(target_config, target, new_target_iqn,
new_target_controls, new_groups, gateway_name,
target_iqn, task_progress_inc)
deleted_clients, deleted_client_luns = IscsiTarget._delete_clients(
target_config, target, new_target_iqn, new_target_controls, new_clients,
gateway_name, target_iqn, new_groups, task_progress_inc)
IscsiTarget._delete_disks(target_config, target, new_target_iqn, new_target_controls,
new_disks, deleted_clients, new_groups, deleted_client_luns,
gateway_name, target_iqn, task_progress_inc)
IscsiTarget._delete_gateways(target, new_portals, gateway_name, target_iqn)
if IscsiTarget._target_deletion_required(target, new_target_iqn, new_target_controls):
IscsiClient.instance(gateway_name=gateway_name).delete_target(target_iqn)
TaskManager.current_task().set_progress(task_progress_end)
return IscsiClient.instance(gateway_name=gateway_name).get_config()
@staticmethod
def _delete_gateways(target, new_portals, gateway_name, target_iqn):
old_portals_by_host = IscsiTarget._get_portals_by_host(target['portals'])
new_portals_by_host = IscsiTarget._get_portals_by_host(new_portals)
for old_portal_host, old_portal_ip_list in old_portals_by_host.items():
if IscsiTarget._target_portal_deletion_required(old_portal_host,
old_portal_ip_list,
new_portals_by_host):
IscsiClient.instance(gateway_name=gateway_name).delete_gateway(target_iqn,
old_portal_host)
@staticmethod
def _delete_disks(target_config, target, new_target_iqn, new_target_controls,
new_disks, deleted_clients, new_groups, deleted_client_luns,
gateway_name, target_iqn, task_progress_inc):
for image_id in target_config['disks']:
if IscsiTarget._target_lun_deletion_required(target, new_target_iqn,
new_target_controls, new_disks, image_id):
all_clients = target_config['clients'].keys()
not_deleted_clients = [c for c in all_clients if c not in deleted_clients
and not IscsiTarget._client_in_group(target['groups'], c)
and not IscsiTarget._client_in_group(new_groups, c)]
for client_iqn in not_deleted_clients:
client_image_ids = target_config['clients'][client_iqn]['luns'].keys()
for client_image_id in client_image_ids:
if image_id == client_image_id and \
(client_iqn, client_image_id) not in deleted_client_luns:
IscsiClient.instance(gateway_name=gateway_name).delete_client_lun(
target_iqn, client_iqn, client_image_id)
IscsiClient.instance(gateway_name=gateway_name).delete_target_lun(target_iqn,
image_id)
pool, image = image_id.split('/', 1)
IscsiClient.instance(gateway_name=gateway_name).delete_disk(pool, image)
TaskManager.current_task().inc_progress(task_progress_inc)
@staticmethod
def _delete_clients(target_config, target, new_target_iqn, new_target_controls,
new_clients, gateway_name, target_iqn, new_groups, task_progress_inc):
deleted_clients = []
deleted_client_luns = []
for client_iqn, client_config in target_config['clients'].items():
if IscsiTarget._client_deletion_required(target, new_target_iqn, new_target_controls,
new_clients, client_iqn):
deleted_clients.append(client_iqn)
IscsiClient.instance(gateway_name=gateway_name).delete_client(target_iqn,
client_iqn)
else:
for image_id in list(client_config.get('luns', {}).keys()):
if IscsiTarget._client_lun_deletion_required(target, client_iqn, image_id,
new_clients, new_groups):
deleted_client_luns.append((client_iqn, image_id))
IscsiClient.instance(gateway_name=gateway_name).delete_client_lun(
target_iqn, client_iqn, image_id)
TaskManager.current_task().inc_progress(task_progress_inc)
return deleted_clients, deleted_client_luns
@staticmethod
def _delete_groups(target_config, target, new_target_iqn, new_target_controls,
new_groups, gateway_name, target_iqn, task_progress_inc):
for group_id in list(target_config['groups'].keys()):
if IscsiTarget._group_deletion_required(target, new_target_iqn, new_target_controls,
new_groups, group_id):
IscsiClient.instance(gateway_name=gateway_name).delete_group(target_iqn,
group_id)
else:
group = IscsiTarget._get_group(new_groups, group_id)
old_group_disks = set(target_config['groups'][group_id]['disks'].keys())
new_group_disks = {'{}/{}'.format(x['pool'], x['image']) for x in group['disks']}
local_deleted_disks = list(old_group_disks - new_group_disks)
old_group_members = set(target_config['groups'][group_id]['members'])
new_group_members = set(group['members'])
local_deleted_members = list(old_group_members - new_group_members)
if local_deleted_disks or local_deleted_members:
IscsiClient.instance(gateway_name=gateway_name).update_group(
target_iqn, group_id, local_deleted_members, local_deleted_disks)
TaskManager.current_task().inc_progress(task_progress_inc)
@staticmethod
def _get_group(groups, group_id):
for group in groups:
if group['group_id'] == group_id:
return group
return None
@staticmethod
def _group_deletion_required(target, new_target_iqn, new_target_controls,
new_groups, group_id):
if IscsiTarget._target_deletion_required(target, new_target_iqn, new_target_controls):
return True
new_group = IscsiTarget._get_group(new_groups, group_id)
if not new_group:
return True
return False
@staticmethod
def _get_client(clients, client_iqn):
for client in clients:
if client['client_iqn'] == client_iqn:
return client
return None
@staticmethod
def _client_deletion_required(target, new_target_iqn, new_target_controls,
new_clients, client_iqn):
if IscsiTarget._target_deletion_required(target, new_target_iqn, new_target_controls):
return True
new_client = IscsiTarget._get_client(new_clients, client_iqn)
if not new_client:
return True
return False
@staticmethod
def _client_in_group(groups, client_iqn):
for group in groups:
if client_iqn in group['members']:
return True
return False
@staticmethod
def _client_lun_deletion_required(target, client_iqn, image_id, new_clients, new_groups):
new_client = IscsiTarget._get_client(new_clients, client_iqn)
if not new_client:
return True
# Disks inherited from groups must be considered
was_in_group = IscsiTarget._client_in_group(target['groups'], client_iqn)
is_in_group = IscsiTarget._client_in_group(new_groups, client_iqn)
if not was_in_group and is_in_group:
return True
if is_in_group:
return False
new_lun = IscsiTarget._get_disk(new_client.get('luns', []), image_id)
if not new_lun:
return True
old_client = IscsiTarget._get_client(target['clients'], client_iqn)
if not old_client:
return False
old_lun = IscsiTarget._get_disk(old_client.get('luns', []), image_id)
return new_lun != old_lun
@staticmethod
def _get_disk(disks, image_id):
for disk in disks:
if '{}/{}'.format(disk['pool'], disk['image']) == image_id:
return disk
return None
@staticmethod
def _target_lun_deletion_required(target, new_target_iqn, new_target_controls,
new_disks, image_id):
if IscsiTarget._target_deletion_required(target, new_target_iqn, new_target_controls):
return True
new_disk = IscsiTarget._get_disk(new_disks, image_id)
if not new_disk:
return True
old_disk = IscsiTarget._get_disk(target['disks'], image_id)
new_disk_without_controls = deepcopy(new_disk)
new_disk_without_controls.pop('controls')
old_disk_without_controls = deepcopy(old_disk)
old_disk_without_controls.pop('controls')
if new_disk_without_controls != old_disk_without_controls:
return True
return False
@staticmethod
def _target_portal_deletion_required(old_portal_host, old_portal_ip_list, new_portals_by_host):
if old_portal_host not in new_portals_by_host:
return True
if sorted(old_portal_ip_list) != sorted(new_portals_by_host[old_portal_host]):
return True
return False
@staticmethod
def _target_deletion_required(target, new_target_iqn, new_target_controls):
gateway = get_available_gateway()
settings = IscsiClient.instance(gateway_name=gateway).get_settings()
if target['target_iqn'] != new_target_iqn:
return True
if settings['api_version'] < 2 and target['target_controls'] != new_target_controls:
return True
return False
@staticmethod
def _validate(target_iqn, target_controls, portals, disks, groups, settings):
if not target_iqn:
raise DashboardException(msg='Target IQN is required',
code='target_iqn_required',
component='iscsi')
minimum_gateways = max(1, settings['config']['minimum_gateways'])
portals_by_host = IscsiTarget._get_portals_by_host(portals)
if len(portals_by_host.keys()) < minimum_gateways:
if minimum_gateways == 1:
msg = 'At least one portal is required'
else:
msg = 'At least {} portals are required'.format(minimum_gateways)
raise DashboardException(msg=msg,
code='portals_required',
component='iscsi')
# 'target_controls_limits' was introduced in ceph-iscsi > 3.2
# When using an older `ceph-iscsi` version these validations will
# NOT be executed beforehand
IscsiTarget._validate_target_controls_limits(settings, target_controls)
portal_names = [p['host'] for p in portals]
validate_rest_api(portal_names)
IscsiTarget._validate_disks(disks, settings)
IscsiTarget._validate_initiators(groups)
@staticmethod
def _validate_initiators(groups):
initiators = [] # type: List[Any]
for group in groups:
initiators = initiators + group['members']
if len(initiators) != len(set(initiators)):
raise DashboardException(msg='Each initiator can only be part of 1 group at a time',
code='initiator_in_multiple_groups',
component='iscsi')
@staticmethod
def _validate_disks(disks, settings):
for disk in disks:
pool = disk['pool']
image = disk['image']
backstore = disk['backstore']
required_rbd_features = settings['required_rbd_features'][backstore]
unsupported_rbd_features = settings['unsupported_rbd_features'][backstore]
IscsiTarget._validate_image(pool, image, backstore, required_rbd_features,
unsupported_rbd_features)
IscsiTarget._validate_disk_controls_limits(settings, disk, backstore)
@staticmethod
def _validate_disk_controls_limits(settings, disk, backstore):
# 'disk_controls_limits' was introduced in ceph-iscsi > 3.2
# When using an older `ceph-iscsi` version these validations will
# NOT be executed beforehand
if 'disk_controls_limits' in settings:
for disk_control_name, disk_control_value in disk['controls'].items():
limits = settings['disk_controls_limits'][backstore].get(disk_control_name)
if limits is not None:
min_value = limits.get('min')
if min_value is not None and disk_control_value < min_value:
raise DashboardException(msg='Disk control {} must be >= '
'{}'.format(disk_control_name, min_value),
code='disk_control_invalid_min',
component='iscsi')
max_value = limits.get('max')
if max_value is not None and disk_control_value > max_value:
raise DashboardException(msg='Disk control {} must be <= '
'{}'.format(disk_control_name, max_value),
code='disk_control_invalid_max',
component='iscsi')
@staticmethod
def _validate_target_controls_limits(settings, target_controls):
if 'target_controls_limits' in settings:
for target_control_name, target_control_value in target_controls.items():
limits = settings['target_controls_limits'].get(target_control_name)
if limits is not None:
min_value = limits.get('min')
if min_value is not None and target_control_value < min_value:
raise DashboardException(msg='Target control {} must be >= '
'{}'.format(target_control_name, min_value),
code='target_control_invalid_min',
component='iscsi')
max_value = limits.get('max')
if max_value is not None and target_control_value > max_value:
raise DashboardException(msg='Target control {} must be <= '
'{}'.format(target_control_name, max_value),
code='target_control_invalid_max',
component='iscsi')
@staticmethod
def _validate_image(pool, image, backstore, required_rbd_features, unsupported_rbd_features):
try:
ioctx = mgr.rados.open_ioctx(pool)
try:
with rbd.Image(ioctx, image) as img:
if img.features() & required_rbd_features != required_rbd_features:
raise DashboardException(msg='Image {} cannot be exported using {} '
'backstore because required features are '
'missing (required features are '
'{})'.format(image,
backstore,
format_bitmask(
required_rbd_features)),
code='image_missing_required_features',
component='iscsi')
if img.features() & unsupported_rbd_features != 0:
raise DashboardException(msg='Image {} cannot be exported using {} '
'backstore because it contains unsupported '
'features ('
'{})'.format(image,
backstore,
format_bitmask(
unsupported_rbd_features)),
code='image_contains_unsupported_features',
component='iscsi')
except rbd.ImageNotFound:
raise DashboardException(msg='Image {} does not exist'.format(image),
code='image_does_not_exist',
component='iscsi')
except rados.ObjectNotFound:
raise DashboardException(msg='Pool {} does not exist'.format(pool),
code='pool_does_not_exist',
component='iscsi')
@staticmethod
def _validate_delete(gateway, target_iqn, config, new_target_iqn=None, new_target_controls=None,
new_disks=None, new_clients=None, new_groups=None):
new_target_controls = new_target_controls or {}
new_disks = new_disks or []
new_clients = new_clients or []
new_groups = new_groups or []
target_config = config['targets'][target_iqn]
target = IscsiTarget._config_to_target(target_iqn, config)
for client_iqn in list(target_config['clients'].keys()):
if IscsiTarget._client_deletion_required(target, new_target_iqn, new_target_controls,
new_clients, client_iqn):
client_info = IscsiClient.instance(gateway_name=gateway).get_clientinfo(target_iqn,
client_iqn)
if client_info.get('state', {}).get('LOGGED_IN', []):
raise DashboardException(msg="Client '{}' cannot be deleted until it's logged "
"out".format(client_iqn),
code='client_logged_in',
component='iscsi')
@staticmethod
def _update_targetauth(config, target_iqn, auth, gateway_name):
# Target level authentication was introduced in ceph-iscsi config v11
if config['version'] > 10:
user = auth['user']
password = auth['password']
mutual_user = auth['mutual_user']
mutual_password = auth['mutual_password']
IscsiClient.instance(gateway_name=gateway_name).update_targetauth(target_iqn,
user,
password,
mutual_user,
mutual_password)
@staticmethod
def _update_targetacl(target_config, target_iqn, acl_enabled, gateway_name):
if not target_config or target_config['acl_enabled'] != acl_enabled:
targetauth_action = ('enable_acl' if acl_enabled else 'disable_acl')
IscsiClient.instance(gateway_name=gateway_name).update_targetacl(target_iqn,
targetauth_action)
@staticmethod
def _is_auth_equal(auth_config, auth):
return auth['user'] == auth_config['username'] and \
auth['password'] == auth_config['password'] and \
auth['mutual_user'] == auth_config['mutual_username'] and \
auth['mutual_password'] == auth_config['mutual_password']
@staticmethod
@handle_request_error('iscsi')
def _create(target_iqn, target_controls, acl_enabled,
auth, portals, disks, clients, groups,
task_progress_begin, task_progress_end, config, settings):
target_config = config['targets'].get(target_iqn, None)
TaskManager.current_task().set_progress(task_progress_begin)
portals_by_host = IscsiTarget._get_portals_by_host(portals)
n_hosts = len(portals_by_host)
n_disks = len(disks)
n_clients = len(clients)
n_groups = len(groups)
task_progress_steps = n_hosts + n_disks + n_clients + n_groups
task_progress_inc = 0
if task_progress_steps != 0:
task_progress_inc = int((task_progress_end - task_progress_begin) / task_progress_steps)
gateway_name = portals[0]['host']
if not target_config:
IscsiClient.instance(gateway_name=gateway_name).create_target(target_iqn,
target_controls)
IscsiTarget._create_gateways(portals_by_host, target_config,
gateway_name, target_iqn, task_progress_inc)
update_acl = not target_config or \
acl_enabled != target_config['acl_enabled'] or \
not IscsiTarget._is_auth_equal(target_config['auth'], auth)
if update_acl:
IscsiTarget._update_acl(acl_enabled, config, target_iqn,
auth, gateway_name, target_config)
IscsiTarget._create_disks(disks, config, gateway_name, target_config,
target_iqn, settings, task_progress_inc)
IscsiTarget._create_clients(clients, target_config, gateway_name,
target_iqn, groups, task_progress_inc)
IscsiTarget._create_groups(groups, target_config, gateway_name,
target_iqn, task_progress_inc, target_controls,
task_progress_end)
@staticmethod
def _update_acl(acl_enabled, config, target_iqn, auth, gateway_name, target_config):
if acl_enabled:
IscsiTarget._update_targetauth(config, target_iqn, auth, gateway_name)
IscsiTarget._update_targetacl(target_config, target_iqn, acl_enabled,
gateway_name)
else:
IscsiTarget._update_targetacl(target_config, target_iqn, acl_enabled,
gateway_name)
IscsiTarget._update_targetauth(config, target_iqn, auth, gateway_name)
@staticmethod
def _create_gateways(portals_by_host, target_config, gateway_name, target_iqn,
task_progress_inc):
for host, ip_list in portals_by_host.items():
if not target_config or host not in target_config['portals']:
IscsiClient.instance(gateway_name=gateway_name).create_gateway(target_iqn,
host,
ip_list)
TaskManager.current_task().inc_progress(task_progress_inc)
@staticmethod
def _create_groups(groups, target_config, gateway_name, target_iqn, task_progress_inc,
target_controls, task_progress_end):
for group in groups:
group_id = group['group_id']
members = group['members']
image_ids = []
for disk in group['disks']:
image_ids.append('{}/{}'.format(disk['pool'], disk['image']))
if target_config and group_id in target_config['groups']:
old_members = target_config['groups'][group_id]['members']
old_disks = target_config['groups'][group_id]['disks'].keys()
if not target_config or group_id not in target_config['groups'] or \
list(set(group['members']) - set(old_members)) or \
list(set(image_ids) - set(old_disks)):
IscsiClient.instance(gateway_name=gateway_name).create_group(
target_iqn, group_id, members, image_ids)
TaskManager.current_task().inc_progress(task_progress_inc)
if target_controls:
if not target_config or target_controls != target_config['controls']:
IscsiClient.instance(gateway_name=gateway_name).reconfigure_target(
target_iqn, target_controls)
TaskManager.current_task().set_progress(task_progress_end)
@staticmethod
def _create_clients(clients, target_config, gateway_name, target_iqn, groups,
task_progress_inc):
for client in clients:
client_iqn = client['client_iqn']
if not target_config or client_iqn not in target_config['clients']:
IscsiClient.instance(gateway_name=gateway_name).create_client(target_iqn,
client_iqn)
if not target_config or client_iqn not in target_config['clients'] or \
not IscsiTarget._is_auth_equal(target_config['clients'][client_iqn]['auth'],
client['auth']):
user = client['auth']['user']
password = client['auth']['password']
m_user = client['auth']['mutual_user']
m_password = client['auth']['mutual_password']
IscsiClient.instance(gateway_name=gateway_name).create_client_auth(
target_iqn, client_iqn, user, password, m_user, m_password)
for lun in client['luns']:
pool = lun['pool']
image = lun['image']
image_id = '{}/{}'.format(pool, image)
# Disks inherited from groups must be considered
group_disks = []
for group in groups:
if client_iqn in group['members']:
group_disks = ['{}/{}'.format(x['pool'], x['image'])
for x in group['disks']]
if not target_config or client_iqn not in target_config['clients'] or \
(image_id not in target_config['clients'][client_iqn]['luns']
and image_id not in group_disks):
IscsiClient.instance(gateway_name=gateway_name).create_client_lun(
target_iqn, client_iqn, image_id)
TaskManager.current_task().inc_progress(task_progress_inc)
@staticmethod
def _create_disks(disks, config, gateway_name, target_config, target_iqn, settings,
task_progress_inc):
for disk in disks:
pool = disk['pool']
image = disk['image']
image_id = '{}/{}'.format(pool, image)
backstore = disk['backstore']
wwn = disk.get('wwn')
lun = disk.get('lun')
if image_id not in config['disks']:
IscsiClient.instance(gateway_name=gateway_name).create_disk(pool,
image,
backstore,
wwn)
if not target_config or image_id not in target_config['disks']:
IscsiClient.instance(gateway_name=gateway_name).create_target_lun(target_iqn,
image_id,
lun)
controls = disk['controls']
d_conf_controls = {}
if image_id in config['disks']:
d_conf_controls = config['disks'][image_id]['controls']
disk_default_controls = settings['disk_default_controls'][backstore]
for old_control in d_conf_controls.keys():
# If control was removed, restore the default value
if old_control not in controls:
controls[old_control] = disk_default_controls[old_control]
if (image_id not in config['disks'] or d_conf_controls != controls) and controls:
IscsiClient.instance(gateway_name=gateway_name).reconfigure_disk(pool,
image,
controls)
TaskManager.current_task().inc_progress(task_progress_inc)
@staticmethod
def _config_to_target(target_iqn, config):
target_config = config['targets'][target_iqn]
portals = []
for host, portal_config in target_config['portals'].items():
for portal_ip in portal_config['portal_ip_addresses']:
portal = {
'host': host,
'ip': portal_ip
}
portals.append(portal)
portals = IscsiTarget._sorted_portals(portals)
disks = []
for target_disk in target_config['disks']:
disk_config = config['disks'][target_disk]
disk = {
'pool': disk_config['pool'],
'image': disk_config['image'],
'controls': disk_config['controls'],
'backstore': disk_config['backstore'],
'wwn': disk_config['wwn']
}
# lun_id was introduced in ceph-iscsi config v11
if config['version'] > 10:
disk['lun'] = target_config['disks'][target_disk]['lun_id']
disks.append(disk)
disks = IscsiTarget._sorted_disks(disks)
clients = []
for client_iqn, client_config in target_config['clients'].items():
luns = []
for client_lun in client_config['luns'].keys():
pool, image = client_lun.split('/', 1)
lun = {
'pool': pool,
'image': image
}
luns.append(lun)
user = client_config['auth']['username']
password = client_config['auth']['password']
mutual_user = client_config['auth']['mutual_username']
mutual_password = client_config['auth']['mutual_password']
client = {
'client_iqn': client_iqn,
'luns': luns,
'auth': {
'user': user,
'password': password,
'mutual_user': mutual_user,
'mutual_password': mutual_password
}
}
clients.append(client)
clients = IscsiTarget._sorted_clients(clients)
groups = []
for group_id, group_config in target_config['groups'].items():
group_disks = []
for group_disk_key, _ in group_config['disks'].items():
pool, image = group_disk_key.split('/', 1)
group_disk = {
'pool': pool,
'image': image
}
group_disks.append(group_disk)
group = {
'group_id': group_id,
'disks': group_disks,
'members': group_config['members'],
}
groups.append(group)
groups = IscsiTarget._sorted_groups(groups)
target_controls = target_config['controls']
acl_enabled = target_config['acl_enabled']
target = {
'target_iqn': target_iqn,
'portals': portals,
'disks': disks,
'clients': clients,
'groups': groups,
'target_controls': target_controls,
'acl_enabled': acl_enabled
}
# Target level authentication was introduced in ceph-iscsi config v11
if config['version'] > 10:
target_user = target_config['auth']['username']
target_password = target_config['auth']['password']
target_mutual_user = target_config['auth']['mutual_username']
target_mutual_password = target_config['auth']['mutual_password']
target['auth'] = {
'user': target_user,
'password': target_password,
'mutual_user': target_mutual_user,
'mutual_password': target_mutual_password
}
return target
@staticmethod
def _is_executing(target_iqn):
executing_tasks, _ = TaskManager.list()
for t in executing_tasks:
if t.name.startswith('iscsi/target') and t.metadata.get('target_iqn') == target_iqn:
return True
return False
@staticmethod
def _set_info(target):
if not target['portals']:
return
target_iqn = target['target_iqn']
# During task execution, additional info is not available
if IscsiTarget._is_executing(target_iqn):
return
# If any portal is down, additional info is not available
for portal in target['portals']:
try:
IscsiClient.instance(gateway_name=portal['host']).ping()
except (IscsiGatewayDoesNotExist, RequestException):
return
gateway_name = target['portals'][0]['host']
try:
target_info = IscsiClient.instance(gateway_name=gateway_name).get_targetinfo(
target_iqn)
target['info'] = target_info
for client in target['clients']:
client_iqn = client['client_iqn']
client_info = IscsiClient.instance(gateway_name=gateway_name).get_clientinfo(
target_iqn, client_iqn)
client['info'] = client_info
except RequestException as e:
# Target/Client has been removed in the meanwhile (e.g. using gwcli)
if e.status_code != 404:
raise e
@staticmethod
def _sorted_portals(portals):
portals = portals or []
return sorted(portals, key=lambda p: '{}.{}'.format(p['host'], p['ip']))
@staticmethod
def _sorted_disks(disks):
disks = disks or []
return sorted(disks, key=lambda d: '{}.{}'.format(d['pool'], d['image']))
@staticmethod
def _sorted_clients(clients):
clients = clients or []
for client in clients:
client['luns'] = sorted(client['luns'],
key=lambda d: '{}.{}'.format(d['pool'], d['image']))
return sorted(clients, key=lambda c: c['client_iqn'])
@staticmethod
def _sorted_groups(groups):
groups = groups or []
for group in groups:
group['disks'] = sorted(group['disks'],
key=lambda d: '{}.{}'.format(d['pool'], d['image']))
group['members'] = sorted(group['members'])
return sorted(groups, key=lambda g: g['group_id'])
@staticmethod
def _get_portals_by_host(portals):
# type: (List[dict]) -> Dict[str, List[str]]
portals_by_host = {} # type: Dict[str, List[str]]
for portal in portals:
host = portal['host']
ip = portal['ip']
if host not in portals_by_host:
portals_by_host[host] = []
portals_by_host[host].append(ip)
return portals_by_host
def get_available_gateway():
gateways = IscsiGatewaysConfig.get_gateways_config()['gateways']
if not gateways:
raise DashboardException(msg='There are no gateways defined',
code='no_gateways_defined',
component='iscsi')
for gateway in gateways:
try:
IscsiClient.instance(gateway_name=gateway).ping()
return gateway
except RequestException:
pass
raise DashboardException(msg='There are no gateways available',
code='no_gateways_available',
component='iscsi')
def validate_rest_api(gateways):
for gateway in gateways:
try:
IscsiClient.instance(gateway_name=gateway).ping()
except RequestException:
raise DashboardException(msg='iSCSI REST Api not available for gateway '
'{}'.format(gateway),
code='ceph_iscsi_rest_api_not_available_for_gateway',
component='iscsi')
def validate_auth(auth):
username_regex = re.compile(r'^[\w\.:@_-]{8,64}$')
password_regex = re.compile(r'^[\w@\-_\/]{12,16}$')
result = True
if auth['user'] or auth['password']:
result = bool(username_regex.match(auth['user'])) and \
bool(password_regex.match(auth['password']))
if auth['mutual_user'] or auth['mutual_password']:
result = result and bool(username_regex.match(auth['mutual_user'])) and \
bool(password_regex.match(auth['mutual_password'])) and auth['user']
if not result:
raise DashboardException(msg='Bad authentication',
code='target_bad_auth',
component='iscsi')
| 55,574 | 47.707274 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/logs.py
|
# -*- coding: utf-8 -*-
import collections
from ..security import Scope
from ..services.ceph_service import CephService
from ..tools import NotificationQueue
from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, ReadPermission
LOG_BUFFER_SIZE = 30
LOGS_SCHEMA = {
"clog": ([str], ""),
"audit_log": ([{
"name": (str, ""),
"rank": (str, ""),
"addrs": ({
"addrvec": ([{
"type": (str, ""),
"addr": (str, "IP Address"),
"nonce": (int, ""),
}], ""),
}, ""),
"stamp": (str, ""),
"seq": (int, ""),
"channel": (str, ""),
"priority": (str, ""),
"message": (str, ""),
}], "Audit log")
}
@APIRouter('/logs', Scope.LOG)
@APIDoc("Logs Management API", "Logs")
class Logs(BaseController):
def __init__(self):
super().__init__()
self._log_initialized = False
self.log_buffer = collections.deque(maxlen=LOG_BUFFER_SIZE)
self.audit_buffer = collections.deque(maxlen=LOG_BUFFER_SIZE)
def append_log(self, log_struct):
if log_struct['channel'] == 'audit':
self.audit_buffer.appendleft(log_struct)
else:
self.log_buffer.appendleft(log_struct)
def load_buffer(self, buf, channel_name):
lines = CephService.send_command(
'mon', 'log last', channel=channel_name, num=LOG_BUFFER_SIZE, level='debug')
for line in lines:
buf.appendleft(line)
def initialize_buffers(self):
if not self._log_initialized:
self._log_initialized = True
self.load_buffer(self.log_buffer, 'cluster')
self.load_buffer(self.audit_buffer, 'audit')
NotificationQueue.register(self.append_log, 'clog')
@Endpoint()
@ReadPermission
@EndpointDoc("Display Logs Configuration",
responses={200: LOGS_SCHEMA})
def all(self):
self.initialize_buffers()
return dict(
clog=list(self.log_buffer),
audit_log=list(self.audit_buffer),
)
| 2,117 | 28.013699 | 88 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/mgr_modules.py
|
# -*- coding: utf-8 -*-
from .. import mgr
from ..security import Scope
from ..services.ceph_service import CephService
from ..services.exception import handle_send_command_error
from ..tools import find_object_in_list, str_to_bool
from . import APIDoc, APIRouter, EndpointDoc, RESTController, allow_empty_body
MGR_MODULE_SCHEMA = ([{
"name": (str, "Module Name"),
"enabled": (bool, "Is Module Enabled"),
"always_on": (bool, "Is it an always on module?"),
"options": ({
"Option_name": ({
"name": (str, "Name of the option"),
"type": (str, "Type of the option"),
"level": (str, "Option level"),
"flags": (int, "List of flags associated"),
"default_value": (int, "Default value for the option"),
"min": (str, "Minimum value"),
"max": (str, "Maximum value"),
"enum_allowed": ([str], ""),
"desc": (str, "Description of the option"),
"long_desc": (str, "Elaborated description"),
"tags": ([str], "Tags associated with the option"),
"see_also": ([str], "Related options")
}, "Options")
}, "Module Options")
}])
@APIRouter('/mgr/module', Scope.CONFIG_OPT)
@APIDoc("Get details of MGR Module", "MgrModule")
class MgrModules(RESTController):
ignore_modules = ['selftest']
@EndpointDoc("List Mgr modules",
responses={200: MGR_MODULE_SCHEMA})
def list(self):
"""
Get the list of managed modules.
:return: A list of objects with the fields 'enabled', 'name' and 'options'.
:rtype: list
"""
result = []
mgr_map = mgr.get('mgr_map')
always_on_modules = mgr_map['always_on_modules'].get(mgr.release_name, [])
for module_config in mgr_map['available_modules']:
module_name = module_config['name']
if module_name not in self.ignore_modules:
always_on = module_name in always_on_modules
enabled = module_name in mgr_map['modules'] or always_on
result.append({
'name': module_name,
'enabled': enabled,
'always_on': always_on,
'options': self._convert_module_options(
module_config['module_options'])
})
return result
def get(self, module_name):
"""
Retrieve the values of the persistent configuration settings.
:param module_name: The name of the Ceph Mgr module.
:type module_name: str
:return: The values of the module options.
:rtype: dict
"""
assert self._is_module_managed(module_name)
options = self._get_module_options(module_name)
result = {}
for name, option in options.items():
result[name] = mgr.get_module_option_ex(module_name, name,
option['default_value'])
return result
@RESTController.Resource('PUT')
def set(self, module_name, config):
"""
Set the values of the persistent configuration settings.
:param module_name: The name of the Ceph Mgr module.
:type module_name: str
:param config: The values of the module options to be stored.
:type config: dict
"""
assert self._is_module_managed(module_name)
options = self._get_module_options(module_name)
for name in options.keys():
if name in config:
mgr.set_module_option_ex(module_name, name, config[name])
@RESTController.Resource('POST')
@handle_send_command_error('mgr_modules')
@allow_empty_body
def enable(self, module_name):
"""
Enable the specified Ceph Mgr module.
:param module_name: The name of the Ceph Mgr module.
:type module_name: str
"""
assert self._is_module_managed(module_name)
CephService.send_command(
'mon', 'mgr module enable', module=module_name)
@RESTController.Resource('POST')
@handle_send_command_error('mgr_modules')
@allow_empty_body
def disable(self, module_name):
"""
Disable the specified Ceph Mgr module.
:param module_name: The name of the Ceph Mgr module.
:type module_name: str
"""
assert self._is_module_managed(module_name)
CephService.send_command(
'mon', 'mgr module disable', module=module_name)
@RESTController.Resource('GET')
def options(self, module_name):
"""
Get the module options of the specified Ceph Mgr module.
:param module_name: The name of the Ceph Mgr module.
:type module_name: str
:return: The module options as list of dicts.
:rtype: list
"""
assert self._is_module_managed(module_name)
return self._get_module_options(module_name)
def _is_module_managed(self, module_name):
"""
Check if the specified Ceph Mgr module is managed by this service.
:param module_name: The name of the Ceph Mgr module.
:type module_name: str
:return: Returns ``true`` if the Ceph Mgr module is managed by
this service, otherwise ``false``.
:rtype: bool
"""
if module_name in self.ignore_modules:
return False
mgr_map = mgr.get('mgr_map')
for module_config in mgr_map['available_modules']:
if module_name == module_config['name']:
return True
return False
def _get_module_config(self, module_name):
"""
Helper function to get detailed module configuration.
:param module_name: The name of the Ceph Mgr module.
:type module_name: str
:return: The module information, e.g. module name, can run,
error string and available module options.
:rtype: dict or None
"""
mgr_map = mgr.get('mgr_map')
return find_object_in_list('name', module_name,
mgr_map['available_modules'])
def _get_module_options(self, module_name):
"""
Helper function to get the module options.
:param module_name: The name of the Ceph Mgr module.
:type module_name: str
:return: The module options.
:rtype: dict
"""
options = self._get_module_config(module_name)['module_options']
return self._convert_module_options(options)
def _convert_module_options(self, options):
# Workaround a possible bug in the Ceph Mgr implementation.
# Various fields (e.g. default_value, min, max) are always
# returned as a string.
for option in options.values():
if option['type'] == 'str':
if option['default_value'] == 'None': # This is Python None
option['default_value'] = ''
elif option['type'] == 'bool':
if option['default_value'] == '':
option['default_value'] = False
else:
option['default_value'] = str_to_bool(
option['default_value'])
elif option['type'] in ['float', 'uint', 'int', 'size', 'secs']:
cls = {
'float': float
}.get(option['type'], int)
for name in ['default_value', 'min', 'max']:
if option[name] == 'None': # This is Python None
option[name] = None
elif option[name]: # Skip empty entries
option[name] = cls(option[name])
return options
| 7,733 | 38.258883 | 83 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/monitor.py
|
# -*- coding: utf-8 -*-
import json
from .. import mgr
from ..security import Scope
from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, ReadPermission
MONITOR_SCHEMA = {
"mon_status": ({
"name": (str, ""),
"rank": (int, ""),
"state": (str, ""),
"election_epoch": (int, ""),
"quorum": ([int], ""),
"quorum_age": (int, ""),
"features": ({
"required_con": (str, ""),
"required_mon": ([int], ""),
"quorum_con": (str, ""),
"quorum_mon": ([str], "")
}, ""),
"outside_quorum": ([str], ""),
"extra_probe_peers": ([str], ""),
"sync_provider": ([str], ""),
"monmap": ({
"epoch": (int, ""),
"fsid": (str, ""),
"modified": (str, ""),
"created": (str, ""),
"min_mon_release": (int, ""),
"min_mon_release_name": (str, ""),
"features": ({
"persistent": ([str], ""),
"optional": ([str], "")
}, ""),
"mons": ([{
"rank": (int, ""),
"name": (str, ""),
"public_addrs": ({
"addrvec": ([{
"type": (str, ""),
"addr": (str, ""),
"nonce": (int, "")
}], "")
}, ""),
"addr": (str, ""),
"public_addr": (str, ""),
"priority": (int, ""),
"weight": (int, ""),
"stats": ({
"num_sessions": ([int], ""),
}, "")
}], "")
}, ""),
"feature_map": ({
"mon": ([{
"features": (str, ""),
"release": (str, ""),
"num": (int, "")
}], ""),
"mds": ([{
"features": (str, ""),
"release": (str, ""),
"num": (int, "")
}], ""),
"client": ([{
"features": (str, ""),
"release": (str, ""),
"num": (int, "")
}], ""),
"mgr": ([{
"features": (str, ""),
"release": (str, ""),
"num": (int, "")
}], ""),
}, "")
}, ""),
"in_quorum": ([{
"rank": (int, ""),
"name": (str, ""),
"public_addrs": ({
"addrvec": ([{
"type": (str, ""),
"addr": (str, ""),
"nonce": (int, "")
}], "")
}, ""),
"addr": (str, ""),
"public_addr": (str, ""),
"priority": (int, ""),
"weight": (int, ""),
"stats": ({
"num_sessions": ([int], "")
}, "")
}], ""),
"out_quorum": ([int], "")
}
@APIRouter('/monitor', Scope.MONITOR)
@APIDoc("Get Monitor Details", "Monitor")
class Monitor(BaseController):
@Endpoint()
@ReadPermission
@EndpointDoc("Get Monitor Details",
responses={200: MONITOR_SCHEMA})
def __call__(self):
in_quorum, out_quorum = [], []
counters = ['mon.num_sessions']
mon_status_json = mgr.get("mon_status")
mon_status = json.loads(mon_status_json['json'])
for mon in mon_status["monmap"]["mons"]:
mon["stats"] = {}
for counter in counters:
data = mgr.get_counter("mon", mon["name"], counter)
if data is not None:
mon["stats"][counter.split(".")[1]] = data[counter]
else:
mon["stats"][counter.split(".")[1]] = []
if mon["rank"] in mon_status["quorum"]:
in_quorum.append(mon)
else:
out_quorum.append(mon)
return {
'mon_status': mon_status,
'in_quorum': in_quorum,
'out_quorum': out_quorum
}
| 4,011 | 28.940299 | 86 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/nfs.py
|
# -*- coding: utf-8 -*-
import json
import logging
import os
from functools import partial
from typing import Any, Dict, List, Optional
import cephfs
from mgr_module import NFS_GANESHA_SUPPORTED_FSALS
from .. import mgr
from ..security import Scope
from ..services.cephfs import CephFS
from ..services.exception import DashboardException, handle_cephfs_error, \
serialize_dashboard_exception
from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \
ReadPermission, RESTController, Task, UIRouter
from ._version import APIVersion
logger = logging.getLogger('controllers.nfs')
class NFSException(DashboardException):
def __init__(self, msg):
super(NFSException, self).__init__(component="nfs", msg=msg)
# documentation helpers
EXPORT_SCHEMA = {
'export_id': (int, 'Export ID'),
'path': (str, 'Export path'),
'cluster_id': (str, 'Cluster identifier'),
'pseudo': (str, 'Pseudo FS path'),
'access_type': (str, 'Export access type'),
'squash': (str, 'Export squash policy'),
'security_label': (str, 'Security label'),
'protocols': ([int], 'List of protocol types'),
'transports': ([str], 'List of transport types'),
'fsal': ({
'name': (str, 'name of FSAL'),
'fs_name': (str, 'CephFS filesystem name', True),
'sec_label_xattr': (str, 'Name of xattr for security label', True),
'user_id': (str, 'User id', True)
}, 'FSAL configuration'),
'clients': ([{
'addresses': ([str], 'list of IP addresses'),
'access_type': (str, 'Client access type'),
'squash': (str, 'Client squash policy')
}], 'List of client configurations'),
}
CREATE_EXPORT_SCHEMA = {
'path': (str, 'Export path'),
'cluster_id': (str, 'Cluster identifier'),
'pseudo': (str, 'Pseudo FS path'),
'access_type': (str, 'Export access type'),
'squash': (str, 'Export squash policy'),
'security_label': (str, 'Security label'),
'protocols': ([int], 'List of protocol types'),
'transports': ([str], 'List of transport types'),
'fsal': ({
'name': (str, 'name of FSAL'),
'fs_name': (str, 'CephFS filesystem name', True),
'sec_label_xattr': (str, 'Name of xattr for security label', True)
}, 'FSAL configuration'),
'clients': ([{
'addresses': ([str], 'list of IP addresses'),
'access_type': (str, 'Client access type'),
'squash': (str, 'Client squash policy')
}], 'List of client configurations')
}
# pylint: disable=not-callable
def NfsTask(name, metadata, wait_for): # noqa: N802
def composed_decorator(func):
return Task("nfs/{}".format(name), metadata, wait_for,
partial(serialize_dashboard_exception,
include_http_status=True))(func)
return composed_decorator
@APIRouter('/nfs-ganesha/cluster', Scope.NFS_GANESHA)
@APIDoc("NFS-Ganesha Cluster Management API", "NFS-Ganesha")
class NFSGaneshaCluster(RESTController):
@ReadPermission
@RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
def list(self):
return mgr.remote('nfs', 'cluster_ls')
@APIRouter('/nfs-ganesha/export', Scope.NFS_GANESHA)
@APIDoc(group="NFS-Ganesha")
class NFSGaneshaExports(RESTController):
RESOURCE_ID = "cluster_id/export_id"
@staticmethod
def _get_schema_export(export: Dict[str, Any]) -> Dict[str, Any]:
"""
Method that avoids returning export info not exposed in the export schema
e.g., rgw user access/secret keys.
"""
schema_fsal_info = {}
for key in export['fsal'].keys():
if key in EXPORT_SCHEMA['fsal'][0].keys(): # type: ignore
schema_fsal_info[key] = export['fsal'][key]
export['fsal'] = schema_fsal_info
return export
@EndpointDoc("List all NFS-Ganesha exports",
responses={200: [EXPORT_SCHEMA]})
def list(self) -> List[Dict[str, Any]]:
exports = []
for export in mgr.remote('nfs', 'export_ls'):
exports.append(self._get_schema_export(export))
return exports
@handle_cephfs_error()
@NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
'cluster_id': '{cluster_id}'}, 2.0)
@EndpointDoc("Creates a new NFS-Ganesha export",
parameters=CREATE_EXPORT_SCHEMA,
responses={201: EXPORT_SCHEMA})
@RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
def create(self, path, cluster_id, pseudo, access_type,
squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]:
export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
if export_mgr.get_export_by_pseudo(cluster_id, pseudo):
raise DashboardException(msg=f'Pseudo {pseudo} is already in use.',
component='nfs')
if hasattr(fsal, 'user_id'):
fsal.pop('user_id') # mgr/nfs does not let you customize user_id
raw_ex = {
'path': path,
'pseudo': pseudo,
'cluster_id': cluster_id,
'access_type': access_type,
'squash': squash,
'security_label': security_label,
'protocols': protocols,
'transports': transports,
'fsal': fsal,
'clients': clients
}
applied_exports = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
if not applied_exports.has_error:
return self._get_schema_export(
export_mgr.get_export_by_pseudo(cluster_id, pseudo))
raise NFSException(f"Export creation failed {applied_exports.changes[0].msg}")
@EndpointDoc("Get an NFS-Ganesha export",
parameters={
'cluster_id': (str, 'Cluster identifier'),
'export_id': (str, "Export ID")
},
responses={200: EXPORT_SCHEMA})
def get(self, cluster_id, export_id) -> Optional[Dict[str, Any]]:
export_id = int(export_id)
export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
if export:
export = self._get_schema_export(export)
return export
@NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
2.0)
@EndpointDoc("Updates an NFS-Ganesha export",
parameters=dict(export_id=(int, "Export ID"),
**CREATE_EXPORT_SCHEMA),
responses={200: EXPORT_SCHEMA})
@RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
def set(self, cluster_id, export_id, path, pseudo, access_type,
squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]:
if hasattr(fsal, 'user_id'):
fsal.pop('user_id') # mgr/nfs does not let you customize user_id
raw_ex = {
'path': path,
'pseudo': pseudo,
'cluster_id': cluster_id,
'export_id': export_id,
'access_type': access_type,
'squash': squash,
'security_label': security_label,
'protocols': protocols,
'transports': transports,
'fsal': fsal,
'clients': clients
}
export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
applied_exports = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
if not applied_exports.has_error:
return self._get_schema_export(
export_mgr.get_export_by_pseudo(cluster_id, pseudo))
raise NFSException(f"Export creation failed {applied_exports.changes[0].msg}")
@NfsTask('delete', {'cluster_id': '{cluster_id}',
'export_id': '{export_id}'}, 2.0)
@EndpointDoc("Deletes an NFS-Ganesha export",
parameters={
'cluster_id': (str, 'Cluster identifier'),
'export_id': (int, "Export ID")
})
@RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
def delete(self, cluster_id, export_id):
export_id = int(export_id)
export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
if not export:
raise DashboardException(
http_status_code=404,
msg=f'Export with id {export_id} not found.',
component='nfs')
mgr.remote('nfs', 'export_rm', cluster_id, export['pseudo'])
@UIRouter('/nfs-ganesha', Scope.NFS_GANESHA)
class NFSGaneshaUi(BaseController):
@Endpoint('GET', '/fsals')
@ReadPermission
def fsals(self):
return NFS_GANESHA_SUPPORTED_FSALS
@Endpoint('GET', '/lsdir')
@ReadPermission
def lsdir(self, fs_name, root_dir=None, depth=1): # pragma: no cover
if root_dir is None:
root_dir = "/"
if not root_dir.startswith('/'):
root_dir = '/{}'.format(root_dir)
root_dir = os.path.normpath(root_dir)
try:
depth = int(depth)
error_msg = ''
if depth < 0:
error_msg = '`depth` must be greater or equal to 0.'
if depth > 5:
logger.warning("Limiting depth to maximum value of 5: "
"input depth=%s", depth)
depth = 5
except ValueError:
error_msg = '`depth` must be an integer.'
finally:
if error_msg:
raise DashboardException(code=400,
component='nfs',
msg=error_msg)
try:
cfs = CephFS(fs_name)
paths = [root_dir]
paths.extend([p['path'].rstrip('/')
for p in cfs.ls_dir(root_dir, depth)])
except (cephfs.ObjectNotFound, cephfs.PermissionError):
paths = []
return {'paths': paths}
@Endpoint('GET', '/cephfs/filesystems')
@ReadPermission
def filesystems(self):
return CephFS.list_filesystems()
@Endpoint()
@ReadPermission
def status(self):
status = {'available': True, 'message': None}
try:
mgr.remote('nfs', 'cluster_ls')
except (ImportError, RuntimeError) as error:
logger.exception(error)
status['available'] = False
status['message'] = str(error) # type: ignore
return status
| 10,503 | 36.514286 | 95 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/orchestrator.py
|
# -*- coding: utf-8 -*-
from functools import wraps
from .. import mgr
from ..exceptions import DashboardException
from ..services.orchestrator import OrchClient
from . import APIDoc, Endpoint, EndpointDoc, ReadPermission, RESTController, UIRouter
STATUS_SCHEMA = {
"available": (bool, "Orchestrator status"),
"message": (str, "Error message")
}
def raise_if_no_orchestrator(features=None):
def inner(method):
@wraps(method)
def _inner(self, *args, **kwargs):
orch = OrchClient.instance()
if not orch.available():
raise DashboardException(code='orchestrator_status_unavailable', # pragma: no cover
msg='Orchestrator is unavailable',
component='orchestrator',
http_status_code=503)
if features is not None:
missing = orch.get_missing_features(features)
if missing:
msg = 'Orchestrator feature(s) are unavailable: {}'.format(', '.join(missing))
raise DashboardException(code='orchestrator_features_unavailable',
msg=msg,
component='orchestrator',
http_status_code=503)
return method(self, *args, **kwargs)
return _inner
return inner
@UIRouter('/orchestrator')
@APIDoc("Orchestrator Management API", "Orchestrator")
class Orchestrator(RESTController):
@Endpoint()
@ReadPermission
@EndpointDoc("Display Orchestrator Status",
responses={200: STATUS_SCHEMA})
def status(self):
return OrchClient.instance().status()
@Endpoint()
def get_name(self):
return mgr.get_module_option_ex('orchestrator', 'orchestrator')
| 1,906 | 34.981132 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/osd.py
|
# -*- coding: utf-8 -*-
import json
import logging
import time
from typing import Any, Dict, List, Optional, Union
from ceph.deployment.drive_group import DriveGroupSpec, DriveGroupValidationError # type: ignore
from mgr_util import get_most_recent_rate
from .. import mgr
from ..exceptions import DashboardException
from ..security import Scope
from ..services.ceph_service import CephService, SendCommandError
from ..services.exception import handle_orchestrator_error, handle_send_command_error
from ..services.orchestrator import OrchClient, OrchFeature
from ..services.osd import HostStorageSummary, OsdDeploymentOptions
from ..tools import str_to_bool
from . import APIDoc, APIRouter, CreatePermission, DeletePermission, Endpoint, \
EndpointDoc, ReadPermission, RESTController, Task, UIRouter, \
UpdatePermission, allow_empty_body
from ._version import APIVersion
from .orchestrator import raise_if_no_orchestrator
logger = logging.getLogger('controllers.osd')
SAFE_TO_DESTROY_SCHEMA = {
"safe_to_destroy": ([str], "Is OSD safe to destroy?"),
"active": ([int], ""),
"missing_stats": ([str], ""),
"stored_pgs": ([str], "Stored Pool groups in Osd"),
"is_safe_to_destroy": (bool, "Is OSD safe to destroy?")
}
EXPORT_FLAGS_SCHEMA = {
"list_of_flags": ([str], "")
}
EXPORT_INDIV_FLAGS_SCHEMA = {
"added": ([str], "List of added flags"),
"removed": ([str], "List of removed flags"),
"ids": ([int], "List of updated OSDs")
}
EXPORT_INDIV_FLAGS_GET_SCHEMA = {
"osd": (int, "OSD ID"),
"flags": ([str], "List of active flags")
}
class DeploymentOptions:
def __init__(self):
self.options = {
OsdDeploymentOptions.COST_CAPACITY:
HostStorageSummary(OsdDeploymentOptions.COST_CAPACITY,
title='Cost/Capacity-optimized',
desc='All the available HDDs are selected'),
OsdDeploymentOptions.THROUGHPUT:
HostStorageSummary(OsdDeploymentOptions.THROUGHPUT,
title='Throughput-optimized',
desc="HDDs/SSDs are selected for data"
"devices and SSDs/NVMes for DB/WAL devices"),
OsdDeploymentOptions.IOPS:
HostStorageSummary(OsdDeploymentOptions.IOPS,
title='IOPS-optimized',
desc='All the available NVMes are selected'),
}
self.recommended_option = None
def as_dict(self):
return {
'options': {k: v.as_dict() for k, v in self.options.items()},
'recommended_option': self.recommended_option
}
predefined_drive_groups = {
OsdDeploymentOptions.COST_CAPACITY: {
'service_type': 'osd',
'service_id': 'cost_capacity',
'placement': {
'host_pattern': '*'
},
'data_devices': {
'rotational': 1
},
'encrypted': False
},
OsdDeploymentOptions.THROUGHPUT: {
'service_type': 'osd',
'service_id': 'throughput_optimized',
'placement': {
'host_pattern': '*'
},
'data_devices': {
'rotational': 1
},
'db_devices': {
'rotational': 0
},
'encrypted': False
},
OsdDeploymentOptions.IOPS: {
'service_type': 'osd',
'service_id': 'iops_optimized',
'placement': {
'host_pattern': '*'
},
'data_devices': {
'rotational': 0
},
'encrypted': False
},
}
def osd_task(name, metadata, wait_for=2.0):
return Task("osd/{}".format(name), metadata, wait_for)
@APIRouter('/osd', Scope.OSD)
@APIDoc('OSD management API', 'OSD')
class Osd(RESTController):
def list(self):
osds = self.get_osd_map()
# Extending by osd stats information
for stat in mgr.get('osd_stats')['osd_stats']:
if stat['osd'] in osds:
osds[stat['osd']]['osd_stats'] = stat
# Extending by osd node information
nodes = mgr.get('osd_map_tree')['nodes']
for node in nodes:
if node['type'] == 'osd' and node['id'] in osds:
osds[node['id']]['tree'] = node
# Extending by osd parent node information
for host in [n for n in nodes if n['type'] == 'host']:
for osd_id in host['children']:
if osd_id >= 0 and osd_id in osds:
osds[osd_id]['host'] = host
removing_osd_ids = self.get_removing_osds()
# Extending by osd histogram and orchestrator data
for osd_id, osd in osds.items():
osd['stats'] = {}
osd['stats_history'] = {}
osd_spec = str(osd_id)
if 'osd' not in osd:
continue # pragma: no cover - simple early continue
self.gauge_stats(osd, osd_spec)
osd['operational_status'] = self._get_operational_status(osd_id, removing_osd_ids)
return list(osds.values())
@staticmethod
def gauge_stats(osd, osd_spec):
for stat in ['osd.op_w', 'osd.op_in_bytes', 'osd.op_r', 'osd.op_out_bytes']:
prop = stat.split('.')[1]
rates = CephService.get_rates('osd', osd_spec, stat)
osd['stats'][prop] = get_most_recent_rate(rates)
osd['stats_history'][prop] = rates
# Gauge stats
for stat in ['osd.numpg', 'osd.stat_bytes', 'osd.stat_bytes_used']:
osd['stats'][stat.split('.')[1]] = mgr.get_latest('osd', osd_spec, stat)
@RESTController.Collection('GET', version=APIVersion.EXPERIMENTAL)
@ReadPermission
def settings(self):
result = CephService.send_command('mon', 'osd dump')
return {
'nearfull_ratio': result['nearfull_ratio'],
'full_ratio': result['full_ratio']
}
def _get_operational_status(self, osd_id: int, removing_osd_ids: Optional[List[int]]):
if removing_osd_ids is None:
return 'unmanaged'
if osd_id in removing_osd_ids:
return 'deleting'
return 'working'
@staticmethod
def get_removing_osds() -> Optional[List[int]]:
orch = OrchClient.instance()
if orch.available(features=[OrchFeature.OSD_GET_REMOVE_STATUS]):
return [osd.osd_id for osd in orch.osds.removing_status()]
return None
@staticmethod
def get_osd_map(svc_id=None):
# type: (Union[int, None]) -> Dict[int, Union[dict, Any]]
def add_id(osd):
osd['id'] = osd['osd']
return osd
resp = {
osd['osd']: add_id(osd)
for osd in mgr.get('osd_map')['osds'] if svc_id is None or osd['osd'] == int(svc_id)
}
return resp if svc_id is None else resp[int(svc_id)]
@staticmethod
def _get_smart_data(osd_id):
# type: (str) -> dict
"""Returns S.M.A.R.T data for the given OSD ID."""
logger.debug('[SMART] retrieving data from OSD with ID %s', osd_id)
return CephService.get_smart_data_by_daemon('osd', osd_id)
@RESTController.Resource('GET')
def smart(self, svc_id):
# type: (str) -> dict
return self._get_smart_data(svc_id)
@handle_send_command_error('osd')
def get(self, svc_id):
"""
Returns collected data about an OSD.
:return: Returns the requested data.
"""
return {
'osd_map': self.get_osd_map(svc_id),
'osd_metadata': mgr.get_metadata('osd', svc_id),
'operational_status': self._get_operational_status(int(svc_id),
self.get_removing_osds())
}
@RESTController.Resource('GET')
@handle_send_command_error('osd')
def histogram(self, svc_id):
# type: (int) -> Dict[str, Any]
"""
:return: Returns the histogram data.
"""
try:
histogram = CephService.send_command(
'osd', srv_spec=svc_id, prefix='perf histogram dump')
except SendCommandError as e: # pragma: no cover - the handling is too obvious
raise DashboardException(
component='osd', http_status_code=400, msg=str(e))
return histogram
def set(self, svc_id, device_class): # pragma: no cover
old_device_class = CephService.send_command('mon', 'osd crush get-device-class',
ids=[svc_id])
old_device_class = old_device_class[0]['device_class']
if old_device_class != device_class:
CephService.send_command('mon', 'osd crush rm-device-class',
ids=[svc_id])
if device_class:
CephService.send_command('mon', 'osd crush set-device-class', **{
'class': device_class,
'ids': [svc_id]
})
def _check_delete(self, osd_ids):
# type: (List[str]) -> Dict[str, Any]
"""
Check if it's safe to remove OSD(s).
:param osd_ids: list of OSD IDs
:return: a dictionary contains the following attributes:
`safe`: bool, indicate if it's safe to remove OSDs.
`message`: str, help message if it's not safe to remove OSDs.
"""
_ = osd_ids
health_data = mgr.get('health') # type: ignore
health = json.loads(health_data['json'])
checks = health['checks'].keys()
unsafe_checks = set(['OSD_FULL', 'OSD_BACKFILLFULL', 'OSD_NEARFULL'])
failed_checks = checks & unsafe_checks
msg = 'Removing OSD(s) is not recommended because of these failed health check(s): {}.'.\
format(', '.join(failed_checks)) if failed_checks else ''
return {
'safe': not bool(failed_checks),
'message': msg
}
@DeletePermission
@raise_if_no_orchestrator([OrchFeature.OSD_DELETE, OrchFeature.OSD_GET_REMOVE_STATUS])
@handle_orchestrator_error('osd')
@osd_task('delete', {'svc_id': '{svc_id}'})
def delete(self, svc_id, preserve_id=None, force=None): # pragma: no cover
replace = False
check: Union[Dict[str, Any], bool] = False
try:
if preserve_id is not None:
replace = str_to_bool(preserve_id)
if force is not None:
check = not str_to_bool(force)
except ValueError:
raise DashboardException(
component='osd', http_status_code=400, msg='Invalid parameter(s)')
orch = OrchClient.instance()
if check:
logger.info('Check for removing osd.%s...', svc_id)
check = self._check_delete([svc_id])
if not check['safe']:
logger.error('Unable to remove osd.%s: %s', svc_id, check['message'])
raise DashboardException(component='osd', msg=check['message'])
logger.info('Start removing osd.%s (replace: %s)...', svc_id, replace)
orch.osds.remove([svc_id], replace)
while True:
removal_osds = orch.osds.removing_status()
logger.info('Current removing OSDs %s', removal_osds)
pending = [osd for osd in removal_osds if osd.osd_id == int(svc_id)]
if not pending:
break
logger.info('Wait until osd.%s is removed...', svc_id)
time.sleep(60)
@RESTController.Resource('POST', query_params=['deep'])
@UpdatePermission
@allow_empty_body
def scrub(self, svc_id, deep=False):
api_scrub = "osd deep-scrub" if str_to_bool(deep) else "osd scrub"
CephService.send_command("mon", api_scrub, who=svc_id)
@RESTController.Resource('PUT')
@EndpointDoc("Mark OSD flags (out, in, down, lost, ...)",
parameters={'svc_id': (str, 'SVC ID')})
def mark(self, svc_id, action):
"""
Note: osd must be marked `down` before marking lost.
"""
valid_actions = ['out', 'in', 'down', 'lost']
args = {'srv_type': 'mon', 'prefix': 'osd ' + action}
if action.lower() in valid_actions:
if action == 'lost':
args['id'] = int(svc_id)
args['yes_i_really_mean_it'] = True
else:
args['ids'] = [svc_id]
CephService.send_command(**args)
else:
logger.error("Invalid OSD mark action: %s attempted on SVC_ID: %s", action, svc_id)
@RESTController.Resource('POST')
@allow_empty_body
def reweight(self, svc_id, weight):
"""
Reweights the OSD temporarily.
Note that ‘ceph osd reweight’ is not a persistent setting. When an OSD
gets marked out, the osd weight will be set to 0. When it gets marked
in again, the weight will be changed to 1.
Because of this ‘ceph osd reweight’ is a temporary solution. You should
only use it to keep your cluster running while you’re ordering more
hardware.
- Craig Lewis (http://lists.ceph.com/pipermail/ceph-users-ceph.com/2014-June/040967.html)
"""
CephService.send_command(
'mon',
'osd reweight',
id=int(svc_id),
weight=float(weight))
def _create_predefined_drive_group(self, data):
orch = OrchClient.instance()
option = OsdDeploymentOptions(data[0]['option'])
if option in list(OsdDeploymentOptions):
try:
predefined_drive_groups[
option]['encrypted'] = data[0]['encrypted']
orch.osds.create([DriveGroupSpec.from_json(
predefined_drive_groups[option])])
except (ValueError, TypeError, KeyError, DriveGroupValidationError) as e:
raise DashboardException(e, component='osd')
def _create_bare(self, data):
"""Create a OSD container that has no associated device.
:param data: contain attributes to create a bare OSD.
: `uuid`: will be set automatically if the OSD starts up
: `svc_id`: the ID is only used if a valid uuid is given.
"""
try:
uuid = data['uuid']
svc_id = int(data['svc_id'])
except (KeyError, ValueError) as e:
raise DashboardException(e, component='osd', http_status_code=400)
result = CephService.send_command(
'mon', 'osd create', id=svc_id, uuid=uuid)
return {
'result': result,
'svc_id': svc_id,
'uuid': uuid,
}
@raise_if_no_orchestrator([OrchFeature.OSD_CREATE])
@handle_orchestrator_error('osd')
def _create_with_drive_groups(self, drive_groups):
"""Create OSDs with DriveGroups."""
orch = OrchClient.instance()
try:
dg_specs = [DriveGroupSpec.from_json(dg) for dg in drive_groups]
orch.osds.create(dg_specs)
except (ValueError, TypeError, DriveGroupValidationError) as e:
raise DashboardException(e, component='osd')
@CreatePermission
@osd_task('create', {'tracking_id': '{tracking_id}'})
def create(self, method, data, tracking_id): # pylint: disable=unused-argument
if method == 'bare':
return self._create_bare(data)
if method == 'drive_groups':
return self._create_with_drive_groups(data)
if method == 'predefined':
return self._create_predefined_drive_group(data)
raise DashboardException(
component='osd', http_status_code=400, msg='Unknown method: {}'.format(method))
@RESTController.Resource('POST')
@allow_empty_body
def purge(self, svc_id):
"""
Note: osd must be marked `down` before removal.
"""
CephService.send_command('mon', 'osd purge-actual', id=int(svc_id),
yes_i_really_mean_it=True)
@RESTController.Resource('POST')
@allow_empty_body
def destroy(self, svc_id):
"""
Mark osd as being destroyed. Keeps the ID intact (allowing reuse), but
removes cephx keys, config-key data and lockbox keys, rendering data
permanently unreadable.
The osd must be marked down before being destroyed.
"""
CephService.send_command(
'mon', 'osd destroy-actual', id=int(svc_id), yes_i_really_mean_it=True)
@Endpoint('GET', query_params=['ids'])
@ReadPermission
@EndpointDoc("Check If OSD is Safe to Destroy",
parameters={
'ids': (str, 'OSD Service Identifier'),
},
responses={200: SAFE_TO_DESTROY_SCHEMA})
def safe_to_destroy(self, ids):
"""
:type ids: int|[int]
"""
ids = json.loads(ids)
if isinstance(ids, list):
ids = list(map(str, ids))
else:
ids = [str(ids)]
try:
result = CephService.send_command(
'mon', 'osd safe-to-destroy', ids=ids, target=('mgr', ''))
result['is_safe_to_destroy'] = set(result['safe_to_destroy']) == set(map(int, ids))
return result
except SendCommandError as e:
return {
'message': str(e),
'is_safe_to_destroy': False,
}
@Endpoint('GET', query_params=['svc_ids'])
@ReadPermission
@raise_if_no_orchestrator()
@handle_orchestrator_error('osd')
def safe_to_delete(self, svc_ids):
"""
:type ids: int|[int]
"""
check = self._check_delete(svc_ids)
return {
'is_safe_to_delete': check.get('safe', False),
'message': check.get('message', '')
}
@RESTController.Resource('GET')
def devices(self, svc_id):
# type: (str) -> Union[list, str]
devices: Union[list, str] = CephService.send_command(
'mon', 'device ls-by-daemon', who='osd.{}'.format(svc_id))
mgr_map = mgr.get('mgr_map')
available_modules = [m['name'] for m in mgr_map['available_modules']]
life_expectancy_enabled = any(
item.startswith('diskprediction_') for item in available_modules)
for device in devices:
device['life_expectancy_enabled'] = life_expectancy_enabled
return devices
@UIRouter('/osd', Scope.OSD)
@APIDoc("Dashboard UI helper function; not part of the public API", "OsdUI")
class OsdUi(Osd):
@Endpoint('GET')
@ReadPermission
@raise_if_no_orchestrator([OrchFeature.DAEMON_LIST])
@handle_orchestrator_error('host')
def deployment_options(self):
orch = OrchClient.instance()
hdds = 0
ssds = 0
nvmes = 0
res = DeploymentOptions()
for inventory_host in orch.inventory.list(hosts=None, refresh=True):
for device in inventory_host.devices.devices:
if device.available:
if device.human_readable_type == 'hdd':
hdds += 1
# SSDs and NVMe are both counted as 'ssd'
# so differentiating nvme using its path
elif '/dev/nvme' in device.path:
nvmes += 1
else:
ssds += 1
if hdds:
res.options[OsdDeploymentOptions.COST_CAPACITY].available = True
res.recommended_option = OsdDeploymentOptions.COST_CAPACITY
if hdds and ssds:
res.options[OsdDeploymentOptions.THROUGHPUT].available = True
res.recommended_option = OsdDeploymentOptions.THROUGHPUT
if nvmes:
res.options[OsdDeploymentOptions.IOPS].available = True
return res.as_dict()
@APIRouter('/osd/flags', Scope.OSD)
@APIDoc(group='OSD')
class OsdFlagsController(RESTController):
@staticmethod
def _osd_flags():
enabled_flags = mgr.get('osd_map')['flags_set']
if 'pauserd' in enabled_flags and 'pausewr' in enabled_flags:
# 'pause' is set by calling `ceph osd set pause` and unset by
# calling `set osd unset pause`, but `ceph osd dump | jq '.flags'`
# will contain 'pauserd,pausewr' if pause is set.
# Let's pretend to the API that 'pause' is in fact a proper flag.
enabled_flags = list(
set(enabled_flags) - {'pauserd', 'pausewr'} | {'pause'})
return sorted(enabled_flags)
@staticmethod
def _update_flags(action, flags, ids=None):
if ids:
if flags:
ids = list(map(str, ids))
CephService.send_command('mon', 'osd ' + action, who=ids,
flags=','.join(flags))
else:
for flag in flags:
CephService.send_command('mon', 'osd ' + action, '', key=flag)
@EndpointDoc("Display OSD Flags",
responses={200: EXPORT_FLAGS_SCHEMA})
def list(self):
return self._osd_flags()
@EndpointDoc('Sets OSD flags for the entire cluster.',
parameters={
'flags': ([str], 'List of flags to set. The flags `recovery_deletes`, '
'`sortbitwise` and `pglog_hardlimit` cannot be unset. '
'Additionally `purged_snapshots` cannot even be set.')
},
responses={200: EXPORT_FLAGS_SCHEMA})
def bulk_set(self, flags):
"""
The `recovery_deletes`, `sortbitwise` and `pglog_hardlimit` flags cannot be unset.
`purged_snapshots` cannot even be set. It is therefore required to at
least include those four flags for a successful operation.
"""
assert isinstance(flags, list)
enabled_flags = set(self._osd_flags())
data = set(flags)
added = data - enabled_flags
removed = enabled_flags - data
self._update_flags('set', added)
self._update_flags('unset', removed)
logger.info('Changed OSD flags: added=%s removed=%s', added, removed)
return sorted(enabled_flags - removed | added)
@Endpoint('PUT', 'individual')
@UpdatePermission
@EndpointDoc('Sets OSD flags for a subset of individual OSDs.',
parameters={
'flags': ({'noout': (bool, 'Sets/unsets `noout`', True, None),
'noin': (bool, 'Sets/unsets `noin`', True, None),
'noup': (bool, 'Sets/unsets `noup`', True, None),
'nodown': (bool, 'Sets/unsets `nodown`', True, None)},
'Directory of flags to set or unset. The flags '
'`noin`, `noout`, `noup` and `nodown` are going to '
'be considered only.'),
'ids': ([int], 'List of OSD ids the flags should be applied '
'to.')
},
responses={200: EXPORT_INDIV_FLAGS_SCHEMA})
def set_individual(self, flags, ids):
"""
Updates flags (`noout`, `noin`, `nodown`, `noup`) for an individual
subset of OSDs.
"""
assert isinstance(flags, dict)
assert isinstance(ids, list)
assert all(isinstance(id, int) for id in ids)
# These are to only flags that can be applied to an OSD individually.
all_flags = {'noin', 'noout', 'nodown', 'noup'}
added = set()
removed = set()
for flag, activated in flags.items():
if flag in all_flags:
if activated is not None:
if activated:
added.add(flag)
else:
removed.add(flag)
self._update_flags('set-group', added, ids)
self._update_flags('unset-group', removed, ids)
logger.error('Changed individual OSD flags: added=%s removed=%s for ids=%s',
added, removed, ids)
return {'added': sorted(added),
'removed': sorted(removed),
'ids': ids}
@Endpoint('GET', 'individual')
@ReadPermission
@EndpointDoc('Displays individual OSD flags',
responses={200: EXPORT_INDIV_FLAGS_GET_SCHEMA})
def get_individual(self):
osd_map = mgr.get('osd_map')['osds']
resp = []
for osd in osd_map:
resp.append({
'osd': osd['osd'],
'flags': osd['state']
})
return resp
| 24,740 | 36.543247 | 97 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/perf_counters.py
|
# -*- coding: utf-8 -*-
import cherrypy
from .. import mgr
from ..security import Scope
from ..services.ceph_service import CephService
from . import APIDoc, APIRouter, EndpointDoc, RESTController
PERF_SCHEMA = {
"mon.a": ({
".cache_bytes": ({
"description": (str, ""),
"nick": (str, ""),
"type": (int, ""),
"priority": (int, ""),
"units": (int, ""),
"value": (int, "")
}, ""),
}, "Service ID"),
}
class PerfCounter(RESTController):
service_type = None # type: str
def get(self, service_id):
try:
return CephService.get_service_perf_counters(self.service_type, str(service_id))
except KeyError as error:
raise cherrypy.HTTPError(404, "{0} not found".format(error))
@APIRouter('perf_counters/mds', Scope.CEPHFS)
@APIDoc("Mds Perf Counters Management API", "MdsPerfCounter")
class MdsPerfCounter(PerfCounter):
service_type = 'mds'
@APIRouter('perf_counters/mon', Scope.MONITOR)
@APIDoc("Mon Perf Counters Management API", "MonPerfCounter")
class MonPerfCounter(PerfCounter):
service_type = 'mon'
@APIRouter('perf_counters/osd', Scope.OSD)
@APIDoc("OSD Perf Counters Management API", "OsdPerfCounter")
class OsdPerfCounter(PerfCounter):
service_type = 'osd'
@APIRouter('perf_counters/rgw', Scope.RGW)
@APIDoc("Rgw Perf Counters Management API", "RgwPerfCounter")
class RgwPerfCounter(PerfCounter):
service_type = 'rgw'
@APIRouter('perf_counters/rbd-mirror', Scope.RBD_MIRRORING)
@APIDoc("Rgw Mirroring Perf Counters Management API", "RgwMirrorPerfCounter")
class RbdMirrorPerfCounter(PerfCounter):
service_type = 'rbd-mirror'
@APIRouter('perf_counters/mgr', Scope.MANAGER)
@APIDoc("Mgr Perf Counters Management API", "MgrPerfCounter")
class MgrPerfCounter(PerfCounter):
service_type = 'mgr'
@APIRouter('perf_counters/tcmu-runner', Scope.ISCSI)
@APIDoc("Tcmu Runner Perf Counters Management API", "TcmuRunnerPerfCounter")
class TcmuRunnerPerfCounter(PerfCounter):
service_type = 'tcmu-runner'
@APIRouter('perf_counters')
@APIDoc("Perf Counters Management API", "PerfCounters")
class PerfCounters(RESTController):
@EndpointDoc("Display Perf Counters",
responses={200: PERF_SCHEMA})
def list(self):
return mgr.get_unlabeled_perf_counters()
| 2,362 | 27.46988 | 92 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/pool.py
|
# -*- coding: utf-8 -*-
import time
from typing import Any, Dict, Iterable, List, Optional, Union, cast
import cherrypy
from .. import mgr
from ..security import Scope
from ..services.ceph_service import CephService
from ..services.exception import handle_send_command_error
from ..services.rbd import RbdConfiguration
from ..tools import TaskManager, str_to_bool
from . import APIDoc, APIRouter, Endpoint, EndpointDoc, ReadPermission, \
RESTController, Task, UIRouter
POOL_SCHEMA = ([{
"pool": (int, "pool id"),
"pool_name": (str, "pool name"),
"flags": (int, ""),
"flags_names": (str, "flags name"),
"type": (str, "type of pool"),
"size": (int, "pool size"),
"min_size": (int, ""),
"crush_rule": (str, ""),
"object_hash": (int, ""),
"pg_autoscale_mode": (str, ""),
"pg_num": (int, ""),
"pg_placement_num": (int, ""),
"pg_placement_num_target": (int, ""),
"pg_num_target": (int, ""),
"pg_num_pending": (int, ""),
"last_pg_merge_meta": ({
"ready_epoch": (int, ""),
"last_epoch_started": (int, ""),
"last_epoch_clean": (int, ""),
"source_pgid": (str, ""),
"source_version": (str, ""),
"target_version": (str, ""),
}, ""),
"auid": (int, ""),
"snap_mode": (str, ""),
"snap_seq": (int, ""),
"snap_epoch": (int, ""),
"pool_snaps": ([str], ""),
"quota_max_bytes": (int, ""),
"quota_max_objects": (int, ""),
"tiers": ([str], ""),
"tier_of": (int, ""),
"read_tier": (int, ""),
"write_tier": (int, ""),
"cache_mode": (str, ""),
"target_max_bytes": (int, ""),
"target_max_objects": (int, ""),
"cache_target_dirty_ratio_micro": (int, ""),
"cache_target_dirty_high_ratio_micro": (int, ""),
"cache_target_full_ratio_micro": (int, ""),
"cache_min_flush_age": (int, ""),
"cache_min_evict_age": (int, ""),
"erasure_code_profile": (str, ""),
"hit_set_params": ({
"type": (str, "")
}, ""),
"hit_set_period": (int, ""),
"hit_set_count": (int, ""),
"use_gmt_hitset": (bool, ""),
"min_read_recency_for_promote": (int, ""),
"min_write_recency_for_promote": (int, ""),
"hit_set_grade_decay_rate": (int, ""),
"hit_set_search_last_n": (int, ""),
"grade_table": ([str], ""),
"stripe_width": (int, ""),
"expected_num_objects": (int, ""),
"fast_read": (bool, ""),
"options": ({
"pg_num_min": (int, ""),
"pg_num_max": (int, "")
}, ""),
"application_metadata": ([str], ""),
"create_time": (str, ""),
"last_change": (str, ""),
"last_force_op_resend": (str, ""),
"last_force_op_resend_prenautilus": (str, ""),
"last_force_op_resend_preluminous": (str, ""),
"removed_snaps": ([str], "")
}])
def pool_task(name, metadata, wait_for=2.0):
return Task("pool/{}".format(name), metadata, wait_for)
@APIRouter('/pool', Scope.POOL)
@APIDoc("Get pool details by pool name", "Pool")
class Pool(RESTController):
@staticmethod
def _serialize_pool(pool, attrs):
if not attrs or not isinstance(attrs, list):
attrs = pool.keys()
crush_rules = {r['rule_id']: r["rule_name"] for r in mgr.get('osd_map_crush')['rules']}
res: Dict[Union[int, str], Union[str, List[Any]]] = {}
for attr in attrs:
if attr not in pool:
continue
if attr == 'type':
res[attr] = {1: 'replicated', 3: 'erasure'}[pool[attr]]
elif attr == 'crush_rule':
res[attr] = crush_rules[pool[attr]]
elif attr == 'application_metadata':
res[attr] = list(pool[attr].keys())
else:
res[attr] = pool[attr]
# pool_name is mandatory
res['pool_name'] = pool['pool_name']
return res
@classmethod
def _pool_list(cls, attrs=None, stats=False):
if attrs:
attrs = attrs.split(',')
if str_to_bool(stats):
pools = CephService.get_pool_list_with_stats()
else:
pools = CephService.get_pool_list()
return [cls._serialize_pool(pool, attrs) for pool in pools]
@EndpointDoc("Display Pool List",
parameters={
'attrs': (str, 'Pool Attributes'),
'stats': (bool, 'Pool Stats')
},
responses={200: POOL_SCHEMA})
def list(self, attrs=None, stats=False):
return self._pool_list(attrs, stats)
@classmethod
def _get(cls, pool_name: str, attrs: Optional[str] = None, stats: bool = False) -> dict:
pools = cls._pool_list(attrs, stats)
pool = [p for p in pools if p['pool_name'] == pool_name]
if not pool:
raise cherrypy.NotFound('No such pool')
return pool[0]
def get(self, pool_name: str, attrs: Optional[str] = None, stats: bool = False) -> dict:
pool = self._get(pool_name, attrs, stats)
pool['configuration'] = RbdConfiguration(pool_name).list()
return pool
@pool_task('delete', ['{pool_name}'])
@handle_send_command_error('pool')
def delete(self, pool_name):
return CephService.send_command('mon', 'osd pool delete', pool=pool_name, pool2=pool_name,
yes_i_really_really_mean_it=True)
@pool_task('edit', ['{pool_name}'])
def set(self, pool_name, flags=None, application_metadata=None, configuration=None, **kwargs):
self._set_pool_values(pool_name, application_metadata, flags, True, kwargs)
if kwargs.get('pool'):
pool_name = kwargs['pool']
RbdConfiguration(pool_name).set_configuration(configuration)
self._wait_for_pgs(pool_name)
@pool_task('create', {'pool_name': '{pool}'})
@handle_send_command_error('pool')
def create(self, pool, pg_num, pool_type, erasure_code_profile=None, flags=None,
application_metadata=None, rule_name=None, configuration=None, **kwargs):
ecp = erasure_code_profile if erasure_code_profile else None
CephService.send_command('mon', 'osd pool create', pool=pool, pg_num=int(pg_num),
pgp_num=int(pg_num), pool_type=pool_type, erasure_code_profile=ecp,
rule=rule_name)
self._set_pool_values(pool, application_metadata, flags, False, kwargs)
RbdConfiguration(pool).set_configuration(configuration)
self._wait_for_pgs(pool)
def _set_pool_values(self, pool, application_metadata, flags, update_existing, kwargs):
current_pool = self._get(pool)
if update_existing and kwargs.get('compression_mode') == 'unset':
self._prepare_compression_removal(current_pool.get('options'), kwargs)
if flags and 'ec_overwrites' in flags:
CephService.send_command('mon', 'osd pool set', pool=pool, var='allow_ec_overwrites',
val='true')
if application_metadata is not None:
def set_app(app_metadata, set_app_what):
for app in app_metadata:
CephService.send_command('mon', 'osd pool application ' + set_app_what,
pool=pool, app=app, yes_i_really_mean_it=True)
if update_existing:
original_app_metadata = set(
cast(Iterable[Any], current_pool.get('application_metadata')))
else:
original_app_metadata = set()
set_app(original_app_metadata - set(application_metadata), 'disable')
set_app(set(application_metadata) - original_app_metadata, 'enable')
quotas = {}
quotas['max_objects'] = kwargs.pop('quota_max_objects', None)
quotas['max_bytes'] = kwargs.pop('quota_max_bytes', None)
self._set_quotas(pool, quotas)
self._set_pool_keys(pool, kwargs)
def _set_pool_keys(self, pool, pool_items):
def set_key(key, value):
CephService.send_command('mon', 'osd pool set', pool=pool, var=key, val=str(value))
update_name = False
for key, value in pool_items.items():
if key == 'pool':
update_name = True
destpool = value
else:
set_key(key, value)
if key == 'pg_num':
set_key('pgp_num', value)
if update_name:
CephService.send_command('mon', 'osd pool rename', srcpool=pool, destpool=destpool)
def _set_quotas(self, pool, quotas):
for field, value in quotas.items():
if value is not None:
CephService.send_command('mon', 'osd pool set-quota',
pool=pool, field=field, val=str(value))
def _prepare_compression_removal(self, options, kwargs):
"""
Presets payload with values to remove compression attributes in case they are not
needed anymore.
In case compression is not needed the dashboard will send 'compression_mode' with the
value 'unset'.
:param options: All set options for the current pool.
:param kwargs: Payload of the PUT / POST call
"""
if options is not None:
def reset_arg(arg, value):
if options.get(arg):
kwargs[arg] = value
for arg in ['compression_min_blob_size', 'compression_max_blob_size',
'compression_required_ratio']:
reset_arg(arg, '0')
reset_arg('compression_algorithm', 'unset')
@classmethod
def _wait_for_pgs(cls, pool_name):
"""
Keep the task waiting for until all pg changes are complete
:param pool_name: The name of the pool.
:type pool_name: string
"""
current_pool = cls._get(pool_name)
initial_pgs = int(current_pool['pg_placement_num']) + int(current_pool['pg_num'])
cls._pg_wait_loop(current_pool, initial_pgs)
@classmethod
def _pg_wait_loop(cls, pool, initial_pgs):
"""
Compares if all pg changes are completed, if not it will call itself
until all changes are completed.
:param pool: The dict that represents a pool.
:type pool: dict
:param initial_pgs: The pg and pg_num count before any change happened.
:type initial_pgs: int
"""
if 'pg_num_target' in pool:
target = int(pool['pg_num_target']) + int(pool['pg_placement_num_target'])
current = int(pool['pg_placement_num']) + int(pool['pg_num'])
if current != target:
max_diff = abs(target - initial_pgs)
diff = max_diff - abs(target - current)
percentage = int(round(diff / float(max_diff) * 100))
TaskManager.current_task().set_progress(percentage)
time.sleep(4)
cls._pg_wait_loop(cls._get(pool['pool_name']), initial_pgs)
@RESTController.Resource()
@ReadPermission
def configuration(self, pool_name):
return RbdConfiguration(pool_name).list()
@UIRouter('/pool', Scope.POOL)
@APIDoc("Dashboard UI helper function; not part of the public API", "PoolUi")
class PoolUi(Pool):
@Endpoint()
@ReadPermission
def info(self):
"""Used by the create-pool dialog"""
osd_map_crush = mgr.get('osd_map_crush')
options = mgr.get('config_options')['options']
def rules(pool_type):
return [r
for r in osd_map_crush['rules']
if r['type'] == pool_type]
def all_bluestore():
return all(o['osd_objectstore'] == 'bluestore'
for o in mgr.get('osd_metadata').values())
def get_config_option_enum(conf_name):
return [[v for v in o['enum_values'] if len(v) > 0]
for o in options
if o['name'] == conf_name][0]
profiles = CephService.get_erasure_code_profiles()
used_rules: Dict[str, List[str]] = {}
used_profiles: Dict[str, List[str]] = {}
pool_names = []
for p in self._pool_list():
name = p['pool_name']
pool_names.append(name)
rule = p['crush_rule']
if rule in used_rules:
used_rules[rule].append(name)
else:
used_rules[rule] = [name]
profile = p['erasure_code_profile']
if profile in used_profiles:
used_profiles[profile].append(name)
else:
used_profiles[profile] = [name]
mgr_config = mgr.get('config')
return {
"pool_names": pool_names,
"crush_rules_replicated": rules(1),
"crush_rules_erasure": rules(3),
"is_all_bluestore": all_bluestore(),
"osd_count": len(mgr.get('osd_map')['osds']),
"bluestore_compression_algorithm": mgr_config['bluestore_compression_algorithm'],
"compression_algorithms": get_config_option_enum('bluestore_compression_algorithm'),
"compression_modes": get_config_option_enum('bluestore_compression_mode'),
"pg_autoscale_default_mode": mgr_config['osd_pool_default_pg_autoscale_mode'],
"pg_autoscale_modes": get_config_option_enum('osd_pool_default_pg_autoscale_mode'),
"erasure_code_profiles": profiles,
"used_rules": used_rules,
"used_profiles": used_profiles,
'nodes': mgr.get('osd_map_tree')['nodes']
}
class RBDPool(Pool):
def create(self, pool='rbd-mirror'): # pylint: disable=arguments-differ
super().create(pool, pg_num=1, pool_type='replicated',
rule_name='replicated_rule', application_metadata=['rbd'])
| 13,864 | 38.166667 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/prometheus.py
|
# -*- coding: utf-8 -*-
import json
import os
import tempfile
from datetime import datetime
import requests
from .. import mgr
from ..exceptions import DashboardException
from ..security import Scope
from ..services import ceph_service
from ..services.settings import SettingsService
from ..settings import Options, Settings
from . import APIDoc, APIRouter, BaseController, Endpoint, RESTController, Router, UIRouter
@Router('/api/prometheus_receiver', secure=False)
class PrometheusReceiver(BaseController):
"""
The receiver is needed in order to receive alert notifications (reports)
"""
notifications = []
@Endpoint('POST', path='/', version=None)
def fetch_alert(self, **notification):
notification['notified'] = datetime.now().isoformat()
notification['id'] = str(len(self.notifications))
self.notifications.append(notification)
class PrometheusRESTController(RESTController):
def prometheus_proxy(self, method, path, params=None, payload=None):
# type (str, str, dict, dict)
user, password, cert_file = self.get_access_info('prometheus')
verify = cert_file.name if cert_file else Settings.PROMETHEUS_API_SSL_VERIFY
response = self._proxy(self._get_api_url(Settings.PROMETHEUS_API_HOST),
method, path, 'Prometheus', params, payload,
user=user, password=password, verify=verify)
if cert_file:
cert_file.close()
os.unlink(cert_file.name)
return response
def alert_proxy(self, method, path, params=None, payload=None):
# type (str, str, dict, dict)
user, password, cert_file = self.get_access_info('alertmanager')
verify = cert_file.name if cert_file else Settings.ALERTMANAGER_API_SSL_VERIFY
response = self._proxy(self._get_api_url(Settings.ALERTMANAGER_API_HOST),
method, path, 'Alertmanager', params, payload,
user=user, password=password, verify=verify)
if cert_file:
cert_file.close()
os.unlink(cert_file.name)
return response
def get_access_info(self, module_name):
# type (str, str, str)
if module_name not in ['prometheus', 'alertmanager']:
raise DashboardException(f'Invalid module name {module_name}', component='prometheus')
user = None
password = None
cert_file = None
secure_monitoring_stack = bool(mgr.get_module_option_ex('cephadm',
'secure_monitoring_stack',
'false'))
if secure_monitoring_stack:
cmd = {'prefix': f'orch {module_name} access info'}
ret, out, _ = mgr.mon_command(cmd)
if ret == 0 and out is not None:
access_info = json.loads(out)
user = access_info['user']
password = access_info['password']
certificate = access_info['certificate']
cert_file = tempfile.NamedTemporaryFile(delete=False)
cert_file.write(certificate.encode('utf-8'))
cert_file.flush()
return user, password, cert_file
def _get_api_url(self, host):
return host.rstrip('/') + '/api/v1'
def balancer_status(self):
return ceph_service.CephService.send_command('mon', 'balancer status')
def _proxy(self, base_url, method, path, api_name, params=None, payload=None, verify=True,
user=None, password=None):
# type (str, str, str, str, dict, dict, bool)
try:
from requests.auth import HTTPBasicAuth
auth = HTTPBasicAuth(user, password) if user and password else None
response = requests.request(method, base_url + path, params=params,
json=payload, verify=verify,
auth=auth)
except Exception:
raise DashboardException(
"Could not reach {}'s API on {}".format(api_name, base_url),
http_status_code=404,
component='prometheus')
try:
content = json.loads(response.content, strict=False)
except json.JSONDecodeError as e:
raise DashboardException(
"Error parsing Prometheus Alertmanager response: {}".format(e.msg),
component='prometheus')
balancer_status = self.balancer_status()
if content['status'] == 'success': # pylint: disable=R1702
alerts_info = []
if 'data' in content:
if balancer_status['active'] and balancer_status['no_optimization_needed'] and path == '/alerts': # noqa E501 #pylint: disable=line-too-long
alerts_info = [alert for alert in content['data'] if alert['labels']['alertname'] != 'CephPGImbalance'] # noqa E501 #pylint: disable=line-too-long
return alerts_info
return content['data']
return content
raise DashboardException(content, http_status_code=400, component='prometheus')
@APIRouter('/prometheus', Scope.PROMETHEUS)
@APIDoc("Prometheus Management API", "Prometheus")
class Prometheus(PrometheusRESTController):
def list(self, **params):
return self.alert_proxy('GET', '/alerts', params)
@RESTController.Collection(method='GET')
def rules(self, **params):
return self.prometheus_proxy('GET', '/rules', params)
@RESTController.Collection(method='GET', path='/data')
def get_prometeus_data(self, **params):
params['query'] = params.pop('params')
return self.prometheus_proxy('GET', '/query_range', params)
@RESTController.Collection(method='GET', path='/silences')
def get_silences(self, **params):
return self.alert_proxy('GET', '/silences', params)
@RESTController.Collection(method='POST', path='/silence', status=201)
def create_silence(self, **params):
return self.alert_proxy('POST', '/silences', payload=params)
@RESTController.Collection(method='DELETE', path='/silence/{s_id}', status=204)
def delete_silence(self, s_id):
return self.alert_proxy('DELETE', '/silence/' + s_id) if s_id else None
@APIRouter('/prometheus/notifications', Scope.PROMETHEUS)
@APIDoc("Prometheus Notifications Management API", "PrometheusNotifications")
class PrometheusNotifications(RESTController):
def list(self, **params):
if 'from' in params:
f = params['from']
if f == 'last':
return PrometheusReceiver.notifications[-1:]
return PrometheusReceiver.notifications[int(f) + 1:]
return PrometheusReceiver.notifications
@UIRouter('/prometheus', Scope.PROMETHEUS)
class PrometheusSettings(RESTController):
def get(self, name):
with SettingsService.attribute_handler(name) as settings_name:
setting = getattr(Options, settings_name)
return {
'name': settings_name,
'default': setting.default_value,
'type': setting.types_as_str(),
'value': getattr(Settings, settings_name)
}
| 7,293 | 41.905882 | 168 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/rbd.py
|
# -*- coding: utf-8 -*-
# pylint: disable=unused-argument
# pylint: disable=too-many-statements,too-many-branches
import logging
import math
from datetime import datetime
from functools import partial
import cherrypy
import rbd
from .. import mgr
from ..controllers.pool import RBDPool
from ..exceptions import DashboardException
from ..security import Scope
from ..services.ceph_service import CephService
from ..services.exception import handle_rados_error, handle_rbd_error, serialize_dashboard_exception
from ..services.rbd import MIRROR_IMAGE_MODE, RbdConfiguration, \
RbdImageMetadataService, RbdMirroringService, RbdService, \
RbdSnapshotService, format_bitmask, format_features, get_image_spec, \
parse_image_spec, rbd_call, rbd_image_call
from ..tools import ViewCache, str_to_bool
from . import APIDoc, APIRouter, BaseController, CreatePermission, \
DeletePermission, Endpoint, EndpointDoc, ReadPermission, RESTController, \
Task, UIRouter, UpdatePermission, allow_empty_body
from ._version import APIVersion
logger = logging.getLogger(__name__)
RBD_SCHEMA = ([{
"value": ([str], ''),
"pool_name": (str, 'pool name')
}])
RBD_TRASH_SCHEMA = [{
"status": (int, ''),
"value": ([str], ''),
"pool_name": (str, 'pool name')
}]
# pylint: disable=not-callable
def RbdTask(name, metadata, wait_for): # noqa: N802
def composed_decorator(func):
func = handle_rados_error('pool')(func)
func = handle_rbd_error()(func)
return Task("rbd/{}".format(name), metadata, wait_for,
partial(serialize_dashboard_exception, include_http_status=True))(func)
return composed_decorator
@APIRouter('/block/image', Scope.RBD_IMAGE)
@APIDoc("RBD Management API", "Rbd")
class Rbd(RESTController):
DEFAULT_LIMIT = 5
def _rbd_list(self, pool_name=None, offset=0, limit=DEFAULT_LIMIT, search='', sort=''):
if pool_name:
pools = [pool_name]
else:
pools = [p['pool_name'] for p in CephService.get_pool_list('rbd')]
images, num_total_images = RbdService.rbd_pool_list(
pools, offset=offset, limit=limit, search=search, sort=sort)
cherrypy.response.headers['X-Total-Count'] = num_total_images
pool_result = {}
for i, image in enumerate(images):
pool = image['pool_name']
if pool not in pool_result:
pool_result[pool] = {'value': [], 'pool_name': image['pool_name']}
pool_result[pool]['value'].append(image)
images[i]['configuration'] = RbdConfiguration(
pool, image['namespace'], image['name']).list()
images[i]['metadata'] = rbd_image_call(
pool, image['namespace'], image['name'],
lambda ioctx, image: RbdImageMetadataService(image).list())
return list(pool_result.values())
@handle_rbd_error()
@handle_rados_error('pool')
@EndpointDoc("Display Rbd Images",
parameters={
'pool_name': (str, 'Pool Name'),
'limit': (int, 'limit'),
'offset': (int, 'offset'),
},
responses={200: RBD_SCHEMA})
@RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
def list(self, pool_name=None, offset: int = 0, limit: int = DEFAULT_LIMIT,
search: str = '', sort: str = ''):
return self._rbd_list(pool_name, offset=int(offset), limit=int(limit),
search=search, sort=sort)
@handle_rbd_error()
@handle_rados_error('pool')
@EndpointDoc("Get Rbd Image Info",
parameters={
'image_spec': (str, 'URL-encoded "pool/rbd_name". e.g. "rbd%2Ffoo"'),
'omit_usage': (bool, 'When true, usage information is not returned'),
},
responses={200: RBD_SCHEMA})
def get(self, image_spec, omit_usage=False):
try:
omit_usage_bool = str_to_bool(omit_usage)
except ValueError:
omit_usage_bool = False
return RbdService.get_image(image_spec, omit_usage_bool)
@RbdTask('create',
{'pool_name': '{pool_name}', 'namespace': '{namespace}', 'image_name': '{name}'}, 2.0)
def create(self, name, pool_name, size, namespace=None, schedule_interval='',
obj_size=None, features=None, stripe_unit=None, stripe_count=None,
data_pool=None, configuration=None, metadata=None,
mirror_mode=None):
RbdService.create(name, pool_name, size, namespace,
obj_size, features, stripe_unit, stripe_count,
data_pool, configuration, metadata)
if mirror_mode:
RbdMirroringService.enable_image(name, pool_name, namespace,
MIRROR_IMAGE_MODE[mirror_mode])
if schedule_interval:
image_spec = get_image_spec(pool_name, namespace, name)
RbdMirroringService.snapshot_schedule_add(image_spec, schedule_interval)
@RbdTask('delete', ['{image_spec}'], 2.0)
def delete(self, image_spec):
return RbdService.delete(image_spec)
@RbdTask('edit', ['{image_spec}', '{name}'], 4.0)
def set(self, image_spec, name=None, size=None, features=None,
configuration=None, metadata=None, enable_mirror=None, primary=None,
force=False, resync=False, mirror_mode=None, schedule_interval='',
remove_scheduling=False):
return RbdService.set(image_spec, name, size, features,
configuration, metadata, enable_mirror, primary,
force, resync, mirror_mode, schedule_interval,
remove_scheduling)
@RbdTask('copy',
{'src_image_spec': '{image_spec}',
'dest_pool_name': '{dest_pool_name}',
'dest_namespace': '{dest_namespace}',
'dest_image_name': '{dest_image_name}'}, 2.0)
@RESTController.Resource('POST')
@allow_empty_body
def copy(self, image_spec, dest_pool_name, dest_namespace, dest_image_name,
snapshot_name=None, obj_size=None, features=None,
stripe_unit=None, stripe_count=None, data_pool=None,
configuration=None, metadata=None):
return RbdService.copy(image_spec, dest_pool_name, dest_namespace, dest_image_name,
snapshot_name, obj_size, features,
stripe_unit, stripe_count, data_pool,
configuration, metadata)
@RbdTask('flatten', ['{image_spec}'], 2.0)
@RESTController.Resource('POST')
@UpdatePermission
@allow_empty_body
def flatten(self, image_spec):
return RbdService.flatten(image_spec)
@RESTController.Collection('GET')
def default_features(self):
rbd_default_features = mgr.get('config')['rbd_default_features']
return format_bitmask(int(rbd_default_features))
@RESTController.Collection('GET')
def clone_format_version(self):
"""Return the RBD clone format version.
"""
rbd_default_clone_format = mgr.get('config')['rbd_default_clone_format']
if rbd_default_clone_format != 'auto':
return int(rbd_default_clone_format)
osd_map = mgr.get_osdmap().dump()
min_compat_client = osd_map.get('min_compat_client', '')
require_min_compat_client = osd_map.get('require_min_compat_client', '')
if max(min_compat_client, require_min_compat_client) < 'mimic':
return 1
return 2
@RbdTask('trash/move', ['{image_spec}'], 2.0)
@RESTController.Resource('POST')
@allow_empty_body
def move_trash(self, image_spec, delay=0):
"""Move an image to the trash.
Images, even ones actively in-use by clones,
can be moved to the trash and deleted at a later time.
"""
return RbdService.move_image_to_trash(image_spec, delay)
@UIRouter('/block/rbd')
class RbdStatus(BaseController):
@EndpointDoc("Display RBD Image feature status")
@Endpoint()
@ReadPermission
def status(self):
status = {'available': True, 'message': None}
if not CephService.get_pool_list('rbd'):
status['available'] = False
status['message'] = 'No Block Pool is available in the cluster. Please click ' \
'on \"Configure Default Pool\" button to ' \
'get started.' # type: ignore
return status
@Endpoint('POST')
@EndpointDoc('Configure Default Block Pool')
@CreatePermission
def configure(self):
rbd_pool = RBDPool()
if not CephService.get_pool_list('rbd'):
rbd_pool.create('rbd')
@APIRouter('/block/image/{image_spec}/snap', Scope.RBD_IMAGE)
@APIDoc("RBD Snapshot Management API", "RbdSnapshot")
class RbdSnapshot(RESTController):
RESOURCE_ID = "snapshot_name"
@RbdTask('snap/create',
['{image_spec}', '{snapshot_name}', '{mirrorImageSnapshot}'], 2.0)
def create(self, image_spec, snapshot_name, mirrorImageSnapshot):
pool_name, namespace, image_name = parse_image_spec(image_spec)
def _create_snapshot(ioctx, img, snapshot_name):
mirror_info = img.mirror_image_get_info()
mirror_mode = img.mirror_image_get_mode()
if (mirror_info['state'] == rbd.RBD_MIRROR_IMAGE_ENABLED and mirror_mode == rbd.RBD_MIRROR_IMAGE_MODE_SNAPSHOT) and mirrorImageSnapshot: # noqa E501 #pylint: disable=line-too-long
img.mirror_image_create_snapshot()
else:
img.create_snap(snapshot_name)
return rbd_image_call(pool_name, namespace, image_name, _create_snapshot,
snapshot_name)
@RbdTask('snap/delete',
['{image_spec}', '{snapshot_name}'], 2.0)
def delete(self, image_spec, snapshot_name):
return RbdSnapshotService.remove_snapshot(image_spec, snapshot_name)
@RbdTask('snap/edit',
['{image_spec}', '{snapshot_name}'], 4.0)
def set(self, image_spec, snapshot_name, new_snap_name=None,
is_protected=None):
def _edit(ioctx, img, snapshot_name):
if new_snap_name and new_snap_name != snapshot_name:
img.rename_snap(snapshot_name, new_snap_name)
snapshot_name = new_snap_name
if is_protected is not None and \
is_protected != img.is_protected_snap(snapshot_name):
if is_protected:
img.protect_snap(snapshot_name)
else:
img.unprotect_snap(snapshot_name)
pool_name, namespace, image_name = parse_image_spec(image_spec)
return rbd_image_call(pool_name, namespace, image_name, _edit, snapshot_name)
@RbdTask('snap/rollback',
['{image_spec}', '{snapshot_name}'], 5.0)
@RESTController.Resource('POST')
@UpdatePermission
@allow_empty_body
def rollback(self, image_spec, snapshot_name):
def _rollback(ioctx, img, snapshot_name):
img.rollback_to_snap(snapshot_name)
pool_name, namespace, image_name = parse_image_spec(image_spec)
return rbd_image_call(pool_name, namespace, image_name, _rollback, snapshot_name)
@RbdTask('clone',
{'parent_image_spec': '{image_spec}',
'child_pool_name': '{child_pool_name}',
'child_namespace': '{child_namespace}',
'child_image_name': '{child_image_name}'}, 2.0)
@RESTController.Resource('POST')
@allow_empty_body
def clone(self, image_spec, snapshot_name, child_pool_name,
child_image_name, child_namespace=None, obj_size=None, features=None,
stripe_unit=None, stripe_count=None, data_pool=None,
configuration=None, metadata=None):
"""
Clones a snapshot to an image
"""
pool_name, namespace, image_name = parse_image_spec(image_spec)
def _parent_clone(p_ioctx):
def _clone(ioctx):
# Set order
l_order = None
if obj_size and obj_size > 0:
l_order = int(round(math.log(float(obj_size), 2)))
# Set features
feature_bitmask = format_features(features)
rbd_inst = rbd.RBD()
rbd_inst.clone(p_ioctx, image_name, snapshot_name, ioctx,
child_image_name, feature_bitmask, l_order,
stripe_unit, stripe_count, data_pool)
RbdConfiguration(pool_ioctx=ioctx, image_name=child_image_name).set_configuration(
configuration)
if metadata:
with rbd.Image(ioctx, child_image_name) as image:
RbdImageMetadataService(image).set_metadata(metadata)
return rbd_call(child_pool_name, child_namespace, _clone)
rbd_call(pool_name, namespace, _parent_clone)
@APIRouter('/block/image/trash', Scope.RBD_IMAGE)
@APIDoc("RBD Trash Management API", "RbdTrash")
class RbdTrash(RESTController):
RESOURCE_ID = "image_id_spec"
def __init__(self):
super().__init__()
self.rbd_inst = rbd.RBD()
@ViewCache()
def _trash_pool_list(self, pool_name):
with mgr.rados.open_ioctx(pool_name) as ioctx:
result = []
namespaces = self.rbd_inst.namespace_list(ioctx)
# images without namespace
namespaces.append('')
for namespace in namespaces:
ioctx.set_namespace(namespace)
images = self.rbd_inst.trash_list(ioctx)
for trash in images:
trash['pool_name'] = pool_name
trash['namespace'] = namespace
trash['deletion_time'] = "{}Z".format(trash['deletion_time'].isoformat())
trash['deferment_end_time'] = "{}Z".format(
trash['deferment_end_time'].isoformat())
result.append(trash)
return result
def _trash_list(self, pool_name=None):
if pool_name:
pools = [pool_name]
else:
pools = [p['pool_name'] for p in CephService.get_pool_list('rbd')]
result = []
for pool in pools:
# pylint: disable=unbalanced-tuple-unpacking
status, value = self._trash_pool_list(pool)
result.append({'status': status, 'value': value, 'pool_name': pool})
return result
@handle_rbd_error()
@handle_rados_error('pool')
@EndpointDoc("Get RBD Trash Details by pool name",
parameters={
'pool_name': (str, 'Name of the pool'),
},
responses={200: RBD_TRASH_SCHEMA})
def list(self, pool_name=None):
"""List all entries from trash."""
return self._trash_list(pool_name)
@handle_rbd_error()
@handle_rados_error('pool')
@RbdTask('trash/purge', ['{pool_name}'], 2.0)
@RESTController.Collection('POST', query_params=['pool_name'])
@DeletePermission
@allow_empty_body
def purge(self, pool_name=None):
"""Remove all expired images from trash."""
now = "{}Z".format(datetime.utcnow().isoformat())
pools = self._trash_list(pool_name)
for pool in pools:
for image in pool['value']:
if image['deferment_end_time'] < now:
logger.info('Removing trash image %s (pool=%s, namespace=%s, name=%s)',
image['id'], pool['pool_name'], image['namespace'], image['name'])
rbd_call(pool['pool_name'], image['namespace'],
self.rbd_inst.trash_remove, image['id'], 0)
@RbdTask('trash/restore', ['{image_id_spec}', '{new_image_name}'], 2.0)
@RESTController.Resource('POST')
@CreatePermission
@allow_empty_body
def restore(self, image_id_spec, new_image_name):
"""Restore an image from trash."""
pool_name, namespace, image_id = parse_image_spec(image_id_spec)
return rbd_call(pool_name, namespace, self.rbd_inst.trash_restore, image_id,
new_image_name)
@RbdTask('trash/remove', ['{image_id_spec}'], 2.0)
def delete(self, image_id_spec, force=False):
"""Delete an image from trash.
If image deferment time has not expired you can not removed it unless use force.
But an actively in-use by clones or has snapshots can not be removed.
"""
pool_name, namespace, image_id = parse_image_spec(image_id_spec)
return rbd_call(pool_name, namespace, self.rbd_inst.trash_remove, image_id,
int(str_to_bool(force)))
@APIRouter('/block/pool/{pool_name}/namespace', Scope.RBD_IMAGE)
@APIDoc("RBD Namespace Management API", "RbdNamespace")
class RbdNamespace(RESTController):
def __init__(self):
super().__init__()
self.rbd_inst = rbd.RBD()
def create(self, pool_name, namespace):
with mgr.rados.open_ioctx(pool_name) as ioctx:
namespaces = self.rbd_inst.namespace_list(ioctx)
if namespace in namespaces:
raise DashboardException(
msg='Namespace already exists',
code='namespace_already_exists',
component='rbd')
return self.rbd_inst.namespace_create(ioctx, namespace)
def delete(self, pool_name, namespace):
with mgr.rados.open_ioctx(pool_name) as ioctx:
# pylint: disable=unbalanced-tuple-unpacking
images, _ = RbdService.rbd_pool_list([pool_name], namespace=namespace)
if images:
raise DashboardException(
msg='Namespace contains images which must be deleted first',
code='namespace_contains_images',
component='rbd')
return self.rbd_inst.namespace_remove(ioctx, namespace)
def list(self, pool_name):
with mgr.rados.open_ioctx(pool_name) as ioctx:
result = []
namespaces = self.rbd_inst.namespace_list(ioctx)
for namespace in namespaces:
# pylint: disable=unbalanced-tuple-unpacking
images, _ = RbdService.rbd_pool_list([pool_name], namespace=namespace)
result.append({
'namespace': namespace,
'num_images': len(images) if images else 0
})
return result
| 18,767 | 40.067834 | 192 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/rbd_mirroring.py
|
# -*- coding: utf-8 -*-
import json
import logging
import re
from enum import IntEnum
from functools import partial
from typing import NamedTuple, Optional, no_type_check
import cherrypy
import rbd
from .. import mgr
from ..controllers.pool import RBDPool
from ..controllers.service import Service
from ..security import Scope
from ..services.ceph_service import CephService
from ..services.exception import handle_rados_error, handle_rbd_error, serialize_dashboard_exception
from ..services.orchestrator import OrchClient
from ..services.rbd import rbd_call
from ..tools import ViewCache
from . import APIDoc, APIRouter, BaseController, CreatePermission, Endpoint, \
EndpointDoc, ReadPermission, RESTController, Task, UIRouter, \
UpdatePermission, allow_empty_body
logger = logging.getLogger('controllers.rbd_mirror')
class MirrorHealth(IntEnum):
# RBD defined mirroring health states in in src/tools/rbd/action/MirrorPool.cc where the order
# is relevant.
MIRROR_HEALTH_OK = 0
MIRROR_HEALTH_UNKNOWN = 1
MIRROR_HEALTH_WARNING = 2
MIRROR_HEALTH_ERROR = 3
# extra states for the dashboard
MIRROR_HEALTH_DISABLED = 4
MIRROR_HEALTH_INFO = 5
# pylint: disable=not-callable
def handle_rbd_mirror_error():
def composed_decorator(func):
func = handle_rados_error('rbd-mirroring')(func)
return handle_rbd_error()(func)
return composed_decorator
# pylint: disable=not-callable
def RbdMirroringTask(name, metadata, wait_for): # noqa: N802
def composed_decorator(func):
func = handle_rbd_mirror_error()(func)
return Task("rbd/mirroring/{}".format(name), metadata, wait_for,
partial(serialize_dashboard_exception, include_http_status=True))(func)
return composed_decorator
def get_daemons():
daemons = []
for hostname, server in CephService.get_service_map('rbd-mirror').items():
for service in server['services']:
id = service['id'] # pylint: disable=W0622
metadata = service['metadata']
status = service['status'] or {}
try:
status = json.loads(status['json'])
except (ValueError, KeyError):
status = {}
instance_id = metadata['instance_id']
if id == instance_id:
# new version that supports per-cluster leader elections
id = metadata['id']
# extract per-daemon service data and health
daemon = {
'id': id,
'instance_id': instance_id,
'version': metadata['ceph_version'],
'server_hostname': hostname,
'service': service,
'server': server,
'metadata': metadata,
'status': status
}
daemon = dict(daemon, **get_daemon_health(daemon))
daemons.append(daemon)
return sorted(daemons, key=lambda k: k['instance_id'])
def get_daemon_health(daemon):
health = {
'health': MirrorHealth.MIRROR_HEALTH_DISABLED
}
for _, pool_data in daemon['status'].items():
if (health['health'] != MirrorHealth.MIRROR_HEALTH_ERROR
and [k for k, v in pool_data.get('callouts', {}).items()
if v['level'] == 'error']):
health = {
'health': MirrorHealth.MIRROR_HEALTH_ERROR
}
elif (health['health'] != MirrorHealth.MIRROR_HEALTH_ERROR
and [k for k, v in pool_data.get('callouts', {}).items()
if v['level'] == 'warning']):
health = {
'health': MirrorHealth.MIRROR_HEALTH_WARNING
}
elif health['health'] == MirrorHealth.MIRROR_HEALTH_DISABLED:
health = {
'health': MirrorHealth.MIRROR_HEALTH_OK
}
return health
def get_pools(daemons): # pylint: disable=R0912, R0915
pool_names = [pool['pool_name'] for pool in CephService.get_pool_list('rbd')
if pool.get('type', 1) == 1]
pool_stats = _get_pool_stats(pool_names)
_update_pool_stats(daemons, pool_stats)
return pool_stats
def transform_mirror_health(stat):
health = 'OK'
health_color = 'success'
if stat['health'] == MirrorHealth.MIRROR_HEALTH_ERROR:
health = 'Error'
health_color = 'error'
elif stat['health'] == MirrorHealth.MIRROR_HEALTH_WARNING:
health = 'Warning'
health_color = 'warning'
elif stat['health'] == MirrorHealth.MIRROR_HEALTH_UNKNOWN:
health = 'Unknown'
health_color = 'warning'
elif stat['health'] == MirrorHealth.MIRROR_HEALTH_OK:
health = 'OK'
health_color = 'success'
elif stat['health'] == MirrorHealth.MIRROR_HEALTH_DISABLED:
health = 'Disabled'
health_color = 'info'
stat['health'] = health
stat['health_color'] = health_color
def _update_pool_stats(daemons, pool_stats):
_update_pool_stats_with_daemons(daemons, pool_stats)
for pool_stat in pool_stats.values():
transform_mirror_health(pool_stat)
def _update_pool_stats_with_daemons(daemons, pool_stats):
for daemon in daemons:
for _, pool_data in daemon['status'].items():
pool_stat = pool_stats.get(pool_data['name'], None) # type: ignore
if pool_stat is None:
continue
if pool_data.get('leader', False):
# leader instance stores image counts
pool_stat['leader_id'] = daemon['metadata']['instance_id']
pool_stat['image_local_count'] = pool_data.get('image_local_count', 0)
pool_stat['image_remote_count'] = pool_data.get('image_remote_count', 0)
pool_stat['health'] = max(pool_stat['health'], daemon['health'])
def _get_pool_stats(pool_names):
pool_stats = {}
rbdctx = rbd.RBD()
for pool_name in pool_names:
logger.debug("Constructing IOCtx %s", pool_name)
try:
ioctx = mgr.rados.open_ioctx(pool_name)
except TypeError:
logger.exception("Failed to open pool %s", pool_name)
continue
try:
mirror_mode = rbdctx.mirror_mode_get(ioctx)
peer_uuids = [x['uuid'] for x in rbdctx.mirror_peer_list(ioctx)]
except: # noqa pylint: disable=W0702
logger.exception("Failed to query mirror settings %s", pool_name)
mirror_mode = None
peer_uuids = []
stats = {}
if mirror_mode == rbd.RBD_MIRROR_MODE_DISABLED:
mirror_mode = "disabled"
stats['health'] = MirrorHealth.MIRROR_HEALTH_DISABLED
elif mirror_mode == rbd.RBD_MIRROR_MODE_IMAGE:
mirror_mode = "image"
elif mirror_mode == rbd.RBD_MIRROR_MODE_POOL:
mirror_mode = "pool"
else:
mirror_mode = "unknown"
if mirror_mode != "disabled":
# In case of a pool being enabled we will infer the health like the RBD cli tool does
# in src/tools/rbd/action/MirrorPool.cc::execute_status
mirror_image_health: MirrorHealth = MirrorHealth.MIRROR_HEALTH_OK
for status, _ in rbdctx.mirror_image_status_summary(ioctx):
if (mirror_image_health < MirrorHealth.MIRROR_HEALTH_WARNING
and status != rbd.MIRROR_IMAGE_STATUS_STATE_REPLAYING
and status != rbd.MIRROR_IMAGE_STATUS_STATE_STOPPED):
mirror_image_health = MirrorHealth.MIRROR_HEALTH_WARNING
if (mirror_image_health < MirrorHealth.MIRROR_HEALTH_ERROR
and status == rbd.MIRROR_IMAGE_STATUS_STATE_ERROR):
mirror_image_health = MirrorHealth.MIRROR_HEALTH_ERROR
stats['health'] = mirror_image_health
pool_stats[pool_name] = dict(stats, **{
'mirror_mode': mirror_mode,
'peer_uuids': peer_uuids
})
return pool_stats
@ViewCache()
def get_daemons_and_pools(): # pylint: disable=R0915
daemons = get_daemons()
daemons_and_pools = {
'daemons': daemons,
'pools': get_pools(daemons)
}
for daemon in daemons:
transform_mirror_health(daemon)
return daemons_and_pools
class ReplayingData(NamedTuple):
bytes_per_second: Optional[int] = None
seconds_until_synced: Optional[int] = None
syncing_percent: Optional[float] = None
entries_behind_primary: Optional[int] = None
@ViewCache()
@no_type_check
def _get_pool_datum(pool_name):
data = {}
logger.debug("Constructing IOCtx %s", pool_name)
try:
ioctx = mgr.rados.open_ioctx(pool_name)
except TypeError:
logger.exception("Failed to open pool %s", pool_name)
return None
mirror_state = {
'down': {
'health': 'issue',
'state_color': 'warning',
'state': 'Unknown',
'description': None
},
rbd.MIRROR_IMAGE_STATUS_STATE_UNKNOWN: {
'health': 'issue',
'state_color': 'warning',
'state': 'Unknown'
},
rbd.MIRROR_IMAGE_STATUS_STATE_ERROR: {
'health': 'issue',
'state_color': 'error',
'state': 'Error'
},
rbd.MIRROR_IMAGE_STATUS_STATE_SYNCING: {
'health': 'syncing',
'state_color': 'success',
'state': 'Syncing'
},
rbd.MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY: {
'health': 'syncing',
'state_color': 'success',
'state': 'Starting'
},
rbd.MIRROR_IMAGE_STATUS_STATE_REPLAYING: {
'health': 'syncing',
'state_color': 'success',
'state': 'Replaying'
},
rbd.MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY: {
'health': 'ok',
'state_color': 'success',
'state': 'Stopping'
},
rbd.MIRROR_IMAGE_STATUS_STATE_STOPPED: {
'health': 'ok',
'state_color': 'info',
'state': 'Stopped'
}
}
rbdctx = rbd.RBD()
try:
mirror_image_status = rbdctx.mirror_image_status_list(ioctx)
data['mirror_images'] = sorted([
dict({
'name': image['name'],
'description': image['description']
}, **mirror_state['down' if not image['up'] else image['state']])
for image in mirror_image_status
], key=lambda k: k['name'])
except rbd.ImageNotFound:
pass
except: # noqa pylint: disable=W0702
logger.exception("Failed to list mirror image status %s", pool_name)
raise
return data
def _update_syncing_image_data(mirror_image, image):
if mirror_image['state'] == 'Replaying':
p = re.compile("replaying, ({.*})")
replaying_data = p.findall(mirror_image['description'])
assert len(replaying_data) == 1
replaying_data = json.loads(replaying_data[0])
if 'replay_state' in replaying_data and replaying_data['replay_state'] == 'idle':
image.update({
'state_color': 'info',
'state': 'Idle'
})
for field in ReplayingData._fields:
try:
image[field] = replaying_data[field]
except KeyError:
pass
else:
p = re.compile("bootstrapping, IMAGE_COPY/COPY_OBJECT (.*)%")
image.update({
'progress': (p.findall(mirror_image['description']) or [0])[0]
})
@ViewCache()
def _get_content_data(): # pylint: disable=R0914
pool_names = [pool['pool_name'] for pool in CephService.get_pool_list('rbd')
if pool.get('type', 1) == 1]
_, data = get_daemons_and_pools()
daemons = data.get('daemons', [])
pool_stats = data.get('pools', {})
pools = []
image_error = []
image_syncing = []
image_ready = []
for pool_name in pool_names:
_, pool = _get_pool_datum(pool_name)
if not pool:
pool = {}
stats = pool_stats.get(pool_name, {})
if stats.get('mirror_mode', None) is None:
continue
mirror_images = pool.get('mirror_images', [])
for mirror_image in mirror_images:
image = {
'pool_name': pool_name,
'name': mirror_image['name'],
'state_color': mirror_image['state_color'],
'state': mirror_image['state']
}
if mirror_image['health'] == 'ok':
image.update({
'description': mirror_image['description']
})
image_ready.append(image)
elif mirror_image['health'] == 'syncing':
_update_syncing_image_data(mirror_image, image)
image_syncing.append(image)
else:
image.update({
'description': mirror_image['description']
})
image_error.append(image)
pools.append(dict({
'name': pool_name
}, **stats))
return {
'daemons': daemons,
'pools': pools,
'image_error': image_error,
'image_syncing': image_syncing,
'image_ready': image_ready
}
def _reset_view_cache():
get_daemons_and_pools.reset()
_get_pool_datum.reset()
_get_content_data.reset()
RBD_MIRROR_SCHEMA = {
"site_name": (str, "Site Name")
}
RBDM_POOL_SCHEMA = {
"mirror_mode": (str, "Mirror Mode")
}
RBDM_SUMMARY_SCHEMA = {
"site_name": (str, "site name"),
"status": (int, ""),
"content_data": ({
"daemons": ([str], ""),
"pools": ([{
"name": (str, "Pool name"),
"health_color": (str, ""),
"health": (str, "pool health"),
"mirror_mode": (str, "status"),
"peer_uuids": ([str], "")
}], "Pools"),
"image_error": ([str], ""),
"image_syncing": ([str], ""),
"image_ready": ([str], "")
}, "")
}
@APIRouter('/block/mirroring', Scope.RBD_MIRRORING)
@APIDoc("RBD Mirroring Management API", "RbdMirroring")
class RbdMirroring(BaseController):
@Endpoint(method='GET', path='site_name')
@handle_rbd_mirror_error()
@ReadPermission
@EndpointDoc("Display Rbd Mirroring sitename",
responses={200: RBD_MIRROR_SCHEMA})
def get(self):
return self._get_site_name()
@Endpoint(method='PUT', path='site_name')
@handle_rbd_mirror_error()
@UpdatePermission
def set(self, site_name):
rbd.RBD().mirror_site_name_set(mgr.rados, site_name)
return self._get_site_name()
def _get_site_name(self):
return {'site_name': rbd.RBD().mirror_site_name_get(mgr.rados)}
@APIRouter('/block/mirroring/summary', Scope.RBD_MIRRORING)
@APIDoc("RBD Mirroring Summary Management API", "RbdMirroringSummary")
class RbdMirroringSummary(BaseController):
@Endpoint()
@handle_rbd_mirror_error()
@ReadPermission
@EndpointDoc("Display Rbd Mirroring Summary",
responses={200: RBDM_SUMMARY_SCHEMA})
def __call__(self):
site_name = rbd.RBD().mirror_site_name_get(mgr.rados)
status, content_data = _get_content_data()
return {'site_name': site_name,
'status': status,
'content_data': content_data}
@APIRouter('/block/mirroring/pool', Scope.RBD_MIRRORING)
@APIDoc("RBD Mirroring Pool Mode Management API", "RbdMirroringPoolMode")
class RbdMirroringPoolMode(RESTController):
RESOURCE_ID = "pool_name"
MIRROR_MODES = {
rbd.RBD_MIRROR_MODE_DISABLED: 'disabled',
rbd.RBD_MIRROR_MODE_IMAGE: 'image',
rbd.RBD_MIRROR_MODE_POOL: 'pool'
}
@handle_rbd_mirror_error()
@EndpointDoc("Display Rbd Mirroring Summary",
parameters={
'pool_name': (str, 'Pool Name'),
},
responses={200: RBDM_POOL_SCHEMA})
def get(self, pool_name):
ioctx = mgr.rados.open_ioctx(pool_name)
mode = rbd.RBD().mirror_mode_get(ioctx)
data = {
'mirror_mode': self.MIRROR_MODES.get(mode, 'unknown')
}
return data
@RbdMirroringTask('pool/edit', {'pool_name': '{pool_name}'}, 5.0)
def set(self, pool_name, mirror_mode=None):
def _edit(ioctx, mirror_mode=None):
if mirror_mode:
mode_enum = {x[1]: x[0] for x in
self.MIRROR_MODES.items()}.get(mirror_mode, None)
if mode_enum is None:
raise rbd.Error('invalid mirror mode "{}"'.format(mirror_mode))
current_mode_enum = rbd.RBD().mirror_mode_get(ioctx)
if mode_enum != current_mode_enum:
rbd.RBD().mirror_mode_set(ioctx, mode_enum)
_reset_view_cache()
return rbd_call(pool_name, None, _edit, mirror_mode)
@APIRouter('/block/mirroring/pool/{pool_name}/bootstrap', Scope.RBD_MIRRORING)
@APIDoc("RBD Mirroring Pool Bootstrap Management API", "RbdMirroringPoolBootstrap")
class RbdMirroringPoolBootstrap(BaseController):
@Endpoint(method='POST', path='token')
@handle_rbd_mirror_error()
@UpdatePermission
@allow_empty_body
def create_token(self, pool_name):
ioctx = mgr.rados.open_ioctx(pool_name)
token = rbd.RBD().mirror_peer_bootstrap_create(ioctx)
return {'token': token}
@Endpoint(method='POST', path='peer')
@handle_rbd_mirror_error()
@UpdatePermission
@allow_empty_body
def import_token(self, pool_name, direction, token):
ioctx = mgr.rados.open_ioctx(pool_name)
directions = {
'rx': rbd.RBD_MIRROR_PEER_DIRECTION_RX,
'rx-tx': rbd.RBD_MIRROR_PEER_DIRECTION_RX_TX
}
direction_enum = directions.get(direction)
if direction_enum is None:
raise rbd.Error('invalid direction "{}"'.format(direction))
rbd.RBD().mirror_peer_bootstrap_import(ioctx, direction_enum, token)
return {}
@APIRouter('/block/mirroring/pool/{pool_name}/peer', Scope.RBD_MIRRORING)
@APIDoc("RBD Mirroring Pool Peer Management API", "RbdMirroringPoolPeer")
class RbdMirroringPoolPeer(RESTController):
RESOURCE_ID = "peer_uuid"
@handle_rbd_mirror_error()
def list(self, pool_name):
ioctx = mgr.rados.open_ioctx(pool_name)
peer_list = rbd.RBD().mirror_peer_list(ioctx)
return [x['uuid'] for x in peer_list]
@handle_rbd_mirror_error()
def create(self, pool_name, cluster_name, client_id, mon_host=None,
key=None):
ioctx = mgr.rados.open_ioctx(pool_name)
mode = rbd.RBD().mirror_mode_get(ioctx)
if mode == rbd.RBD_MIRROR_MODE_DISABLED:
raise rbd.Error('mirroring must be enabled')
uuid = rbd.RBD().mirror_peer_add(ioctx, cluster_name,
'client.{}'.format(client_id))
attributes = {}
if mon_host is not None:
attributes[rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST] = mon_host
if key is not None:
attributes[rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY] = key
if attributes:
rbd.RBD().mirror_peer_set_attributes(ioctx, uuid, attributes)
_reset_view_cache()
return {'uuid': uuid}
@handle_rbd_mirror_error()
def get(self, pool_name, peer_uuid):
ioctx = mgr.rados.open_ioctx(pool_name)
peer_list = rbd.RBD().mirror_peer_list(ioctx)
peer = next((x for x in peer_list if x['uuid'] == peer_uuid), None)
if not peer:
raise cherrypy.HTTPError(404)
# convert full client name to just the client id
peer['client_id'] = peer['client_name'].split('.', 1)[-1]
del peer['client_name']
# convert direction enum to string
directions = {
rbd.RBD_MIRROR_PEER_DIRECTION_RX: 'rx',
rbd.RBD_MIRROR_PEER_DIRECTION_TX: 'tx',
rbd.RBD_MIRROR_PEER_DIRECTION_RX_TX: 'rx-tx'
}
peer['direction'] = directions[peer.get('direction', rbd.RBD_MIRROR_PEER_DIRECTION_RX)]
try:
attributes = rbd.RBD().mirror_peer_get_attributes(ioctx, peer_uuid)
except rbd.ImageNotFound:
attributes = {}
peer['mon_host'] = attributes.get(rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST, '')
peer['key'] = attributes.get(rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY, '')
return peer
@handle_rbd_mirror_error()
def delete(self, pool_name, peer_uuid):
ioctx = mgr.rados.open_ioctx(pool_name)
rbd.RBD().mirror_peer_remove(ioctx, peer_uuid)
_reset_view_cache()
@handle_rbd_mirror_error()
def set(self, pool_name, peer_uuid, cluster_name=None, client_id=None,
mon_host=None, key=None):
ioctx = mgr.rados.open_ioctx(pool_name)
if cluster_name:
rbd.RBD().mirror_peer_set_cluster(ioctx, peer_uuid, cluster_name)
if client_id:
rbd.RBD().mirror_peer_set_client(ioctx, peer_uuid,
'client.{}'.format(client_id))
if mon_host is not None or key is not None:
try:
attributes = rbd.RBD().mirror_peer_get_attributes(ioctx, peer_uuid)
except rbd.ImageNotFound:
attributes = {}
if mon_host is not None:
attributes[rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_MON_HOST] = mon_host
if key is not None:
attributes[rbd.RBD_MIRROR_PEER_ATTRIBUTE_NAME_KEY] = key
rbd.RBD().mirror_peer_set_attributes(ioctx, peer_uuid, attributes)
_reset_view_cache()
@UIRouter('/block/mirroring', Scope.RBD_MIRRORING)
class RbdMirroringStatus(BaseController):
@EndpointDoc('Display RBD Mirroring Status')
@Endpoint()
@ReadPermission
def status(self):
status = {'available': True, 'message': None}
orch_status = OrchClient.instance().status()
# if the orch is not available we can't create the service
# using dashboard.
if not orch_status['available']:
return status
if not CephService.get_service_list('rbd-mirror') and not CephService.get_pool_list('rbd'):
status['available'] = False
status['message'] = 'No default "rbd" pool or "rbd-mirror" service ' \
'in the cluster. Please click on ' \
'"Configure Block Mirroring" ' \
'button to get started.' # type: ignore
return status
@Endpoint('POST')
@EndpointDoc('Configure RBD Mirroring')
@CreatePermission
def configure(self):
rbd_pool = RBDPool()
service = Service()
service_spec = {
'service_type': 'rbd-mirror',
'placement': {},
'unmanaged': False
}
if not CephService.get_service_list('rbd-mirror'):
service.create(service_spec, 'rbd-mirror')
if not CephService.get_pool_list('rbd'):
rbd_pool.create()
| 23,261 | 33.309735 | 100 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/rgw.py
|
# -*- coding: utf-8 -*-
import json
import logging
from typing import Any, Dict, List, NamedTuple, Optional, Union
import cherrypy
from .. import mgr
from ..exceptions import DashboardException
from ..rest_client import RequestException
from ..security import Permission, Scope
from ..services.auth import AuthManager, JwtManager
from ..services.ceph_service import CephService
from ..services.rgw_client import NoRgwDaemonsException, RgwClient, RgwMultisite
from ..tools import json_str_to_object, str_to_bool
from . import APIDoc, APIRouter, BaseController, CreatePermission, \
CRUDCollectionMethod, CRUDEndpoint, Endpoint, EndpointDoc, ReadPermission, \
RESTController, UIRouter, UpdatePermission, allow_empty_body
from ._crud import CRUDMeta, Form, FormField, FormTaskInfo, Icon, MethodType, \
TableAction, Validator, VerticalContainer
from ._version import APIVersion
logger = logging.getLogger("controllers.rgw")
RGW_SCHEMA = {
"available": (bool, "Is RGW available?"),
"message": (str, "Descriptions")
}
RGW_DAEMON_SCHEMA = {
"id": (str, "Daemon ID"),
"version": (str, "Ceph Version"),
"server_hostname": (str, ""),
"zonegroup_name": (str, "Zone Group"),
"zone_name": (str, "Zone"),
"port": (int, "Port"),
}
RGW_USER_SCHEMA = {
"list_of_users": ([str], "list of rgw users")
}
@UIRouter('/rgw', Scope.RGW)
@APIDoc("RGW Management API", "Rgw")
class Rgw(BaseController):
@Endpoint()
@ReadPermission
@EndpointDoc("Display RGW Status",
responses={200: RGW_SCHEMA})
def status(self) -> dict:
status = {'available': False, 'message': None}
try:
instance = RgwClient.admin_instance()
# Check if the service is online.
try:
is_online = instance.is_service_online()
except RequestException as e:
# Drop this instance because the RGW client seems not to
# exist anymore (maybe removed via orchestrator). Removing
# the instance from the cache will result in the correct
# error message next time when the backend tries to
# establish a new connection (-> 'No RGW found' instead
# of 'RGW REST API failed request ...').
# Note, this only applies to auto-detected RGW clients.
RgwClient.drop_instance(instance)
raise e
if not is_online:
msg = 'Failed to connect to the Object Gateway\'s Admin Ops API.'
raise RequestException(msg)
# Ensure the system flag is set for the API user ID.
if not instance.is_system_user(): # pragma: no cover - no complexity there
msg = 'The system flag is not set for user "{}".'.format(
instance.userid)
raise RequestException(msg)
status['available'] = True
except (DashboardException, RequestException, NoRgwDaemonsException) as ex:
status['message'] = str(ex) # type: ignore
return status
@UIRouter('/rgw/multisite')
class RgwMultisiteStatus(RESTController):
@Endpoint()
@ReadPermission
# pylint: disable=R0801
def status(self):
status = {'available': True, 'message': None}
multisite_instance = RgwMultisite()
is_multisite_configured = multisite_instance.get_multisite_status()
if not is_multisite_configured:
status['available'] = False
status['message'] = 'Multi-site provides disaster recovery and may also \
serve as a foundation for content delivery networks' # type: ignore
return status
@RESTController.Collection(method='PUT', path='/migrate')
@allow_empty_body
# pylint: disable=W0102,W0613
def migrate(self, daemon_name=None, realm_name=None, zonegroup_name=None, zone_name=None,
zonegroup_endpoints=None, zone_endpoints=None, access_key=None,
secret_key=None):
multisite_instance = RgwMultisite()
result = multisite_instance.migrate_to_multisite(realm_name, zonegroup_name,
zone_name, zonegroup_endpoints,
zone_endpoints, access_key,
secret_key)
return result
@APIRouter('/rgw/daemon', Scope.RGW)
@APIDoc("RGW Daemon Management API", "RgwDaemon")
class RgwDaemon(RESTController):
@EndpointDoc("Display RGW Daemons",
responses={200: [RGW_DAEMON_SCHEMA]})
def list(self) -> List[dict]:
daemons: List[dict] = []
try:
instance = RgwClient.admin_instance()
except NoRgwDaemonsException:
return daemons
for hostname, server in CephService.get_service_map('rgw').items():
for service in server['services']:
metadata = service['metadata']
# extract per-daemon service data and health
daemon = {
'id': metadata['id'],
'service_map_id': service['id'],
'version': metadata['ceph_version'],
'server_hostname': hostname,
'realm_name': metadata['realm_name'],
'zonegroup_name': metadata['zonegroup_name'],
'zone_name': metadata['zone_name'],
'default': instance.daemon.name == metadata['id'],
'port': int(metadata['frontend_config#0'].split('port=')[1])
}
daemons.append(daemon)
return sorted(daemons, key=lambda k: k['id'])
def get(self, svc_id):
# type: (str) -> dict
daemon = {
'rgw_metadata': [],
'rgw_id': svc_id,
'rgw_status': []
}
service = CephService.get_service('rgw', svc_id)
if not service:
raise cherrypy.NotFound('Service rgw {} is not available'.format(svc_id))
metadata = service['metadata']
status = service['status']
if 'json' in status:
try:
status = json.loads(status['json'])
except ValueError:
logger.warning('%s had invalid status json', service['id'])
status = {}
else:
logger.warning('%s has no key "json" in status', service['id'])
daemon['rgw_metadata'] = metadata
daemon['rgw_status'] = status
return daemon
@RESTController.Collection(method='PUT', path='/set_multisite_config')
@allow_empty_body
def set_multisite_config(self, realm_name=None, zonegroup_name=None,
zone_name=None, daemon_name=None):
CephService.set_multisite_config(realm_name, zonegroup_name, zone_name, daemon_name)
class RgwRESTController(RESTController):
def proxy(self, daemon_name, method, path, params=None, json_response=True):
try:
instance = RgwClient.admin_instance(daemon_name=daemon_name)
result = instance.proxy(method, path, params, None)
if json_response:
result = json_str_to_object(result)
return result
except (DashboardException, RequestException) as e:
http_status_code = e.status if isinstance(e, DashboardException) else 500
raise DashboardException(e, http_status_code=http_status_code, component='rgw')
@APIRouter('/rgw/site', Scope.RGW)
@APIDoc("RGW Site Management API", "RgwSite")
class RgwSite(RgwRESTController):
def list(self, query=None, daemon_name=None):
if query == 'placement-targets':
return RgwClient.admin_instance(daemon_name=daemon_name).get_placement_targets()
if query == 'realms':
return RgwClient.admin_instance(daemon_name=daemon_name).get_realms()
if query == 'default-realm':
return RgwClient.admin_instance(daemon_name=daemon_name).get_default_realm()
# @TODO: for multisite: by default, retrieve cluster topology/map.
raise DashboardException(http_status_code=501, component='rgw', msg='Not Implemented')
@APIRouter('/rgw/bucket', Scope.RGW)
@APIDoc("RGW Bucket Management API", "RgwBucket")
class RgwBucket(RgwRESTController):
def _append_bid(self, bucket):
"""
Append the bucket identifier that looks like [<tenant>/]<bucket>.
See http://docs.ceph.com/docs/nautilus/radosgw/multitenancy/ for
more information.
:param bucket: The bucket parameters.
:type bucket: dict
:return: The modified bucket parameters including the 'bid' parameter.
:rtype: dict
"""
if isinstance(bucket, dict):
bucket['bid'] = '{}/{}'.format(bucket['tenant'], bucket['bucket']) \
if bucket['tenant'] else bucket['bucket']
return bucket
def _get_versioning(self, owner, daemon_name, bucket_name):
rgw_client = RgwClient.instance(owner, daemon_name)
return rgw_client.get_bucket_versioning(bucket_name)
def _set_versioning(self, owner, daemon_name, bucket_name, versioning_state, mfa_delete,
mfa_token_serial, mfa_token_pin):
bucket_versioning = self._get_versioning(owner, daemon_name, bucket_name)
if versioning_state != bucket_versioning['Status']\
or (mfa_delete and mfa_delete != bucket_versioning['MfaDelete']):
rgw_client = RgwClient.instance(owner, daemon_name)
rgw_client.set_bucket_versioning(bucket_name, versioning_state, mfa_delete,
mfa_token_serial, mfa_token_pin)
def _set_encryption(self, bid, encryption_type, key_id, daemon_name, owner):
rgw_client = RgwClient.instance(owner, daemon_name)
rgw_client.set_bucket_encryption(bid, key_id, encryption_type)
# pylint: disable=W0613
def _set_encryption_config(self, encryption_type, kms_provider, auth_method, secret_engine,
secret_path, namespace, address, token, daemon_name, owner,
ssl_cert, client_cert, client_key):
CephService.set_encryption_config(encryption_type, kms_provider, auth_method,
secret_engine, secret_path, namespace, address,
token, daemon_name, ssl_cert, client_cert, client_key)
def _get_encryption(self, bucket_name, daemon_name, owner):
rgw_client = RgwClient.instance(owner, daemon_name)
return rgw_client.get_bucket_encryption(bucket_name)
def _delete_encryption(self, bucket_name, daemon_name, owner):
rgw_client = RgwClient.instance(owner, daemon_name)
return rgw_client.delete_bucket_encryption(bucket_name)
def _get_locking(self, owner, daemon_name, bucket_name):
rgw_client = RgwClient.instance(owner, daemon_name)
return rgw_client.get_bucket_locking(bucket_name)
def _set_locking(self, owner, daemon_name, bucket_name, mode,
retention_period_days, retention_period_years):
rgw_client = RgwClient.instance(owner, daemon_name)
return rgw_client.set_bucket_locking(bucket_name, mode,
retention_period_days,
retention_period_years)
@staticmethod
def strip_tenant_from_bucket_name(bucket_name):
# type (str) -> str
"""
>>> RgwBucket.strip_tenant_from_bucket_name('tenant/bucket-name')
'bucket-name'
>>> RgwBucket.strip_tenant_from_bucket_name('bucket-name')
'bucket-name'
"""
return bucket_name[bucket_name.find('/') + 1:]
@staticmethod
def get_s3_bucket_name(bucket_name, tenant=None):
# type (str, str) -> str
"""
>>> RgwBucket.get_s3_bucket_name('bucket-name', 'tenant')
'tenant:bucket-name'
>>> RgwBucket.get_s3_bucket_name('tenant/bucket-name', 'tenant')
'tenant:bucket-name'
>>> RgwBucket.get_s3_bucket_name('bucket-name')
'bucket-name'
"""
bucket_name = RgwBucket.strip_tenant_from_bucket_name(bucket_name)
if tenant:
bucket_name = '{}:{}'.format(tenant, bucket_name)
return bucket_name
@RESTController.MethodMap(version=APIVersion(1, 1)) # type: ignore
def list(self, stats: bool = False, daemon_name: Optional[str] = None,
uid: Optional[str] = None) -> List[Union[str, Dict[str, Any]]]:
query_params = f'?stats={str_to_bool(stats)}'
if uid and uid.strip():
query_params = f'{query_params}&uid={uid.strip()}'
result = self.proxy(daemon_name, 'GET', 'bucket{}'.format(query_params))
if stats:
result = [self._append_bid(bucket) for bucket in result]
return result
def get(self, bucket, daemon_name=None):
# type: (str, Optional[str]) -> dict
result = self.proxy(daemon_name, 'GET', 'bucket', {'bucket': bucket})
bucket_name = RgwBucket.get_s3_bucket_name(result['bucket'],
result['tenant'])
# Append the versioning configuration.
versioning = self._get_versioning(result['owner'], daemon_name, bucket_name)
encryption = self._get_encryption(bucket_name, daemon_name, result['owner'])
result['encryption'] = encryption['Status']
result['versioning'] = versioning['Status']
result['mfa_delete'] = versioning['MfaDelete']
# Append the locking configuration.
locking = self._get_locking(result['owner'], daemon_name, bucket_name)
result.update(locking)
return self._append_bid(result)
@allow_empty_body
def create(self, bucket, uid, zonegroup=None, placement_target=None,
lock_enabled='false', lock_mode=None,
lock_retention_period_days=None,
lock_retention_period_years=None, encryption_state='false',
encryption_type=None, key_id=None, daemon_name=None):
lock_enabled = str_to_bool(lock_enabled)
encryption_state = str_to_bool(encryption_state)
try:
rgw_client = RgwClient.instance(uid, daemon_name)
result = rgw_client.create_bucket(bucket, zonegroup,
placement_target,
lock_enabled)
if lock_enabled:
self._set_locking(uid, daemon_name, bucket, lock_mode,
lock_retention_period_days,
lock_retention_period_years)
if encryption_state:
self._set_encryption(bucket, encryption_type, key_id, daemon_name, uid)
return result
except RequestException as e: # pragma: no cover - handling is too obvious
raise DashboardException(e, http_status_code=500, component='rgw')
@allow_empty_body
def set(self, bucket, bucket_id, uid, versioning_state=None,
encryption_state='false', encryption_type=None, key_id=None,
mfa_delete=None, mfa_token_serial=None, mfa_token_pin=None,
lock_mode=None, lock_retention_period_days=None,
lock_retention_period_years=None, daemon_name=None):
encryption_state = str_to_bool(encryption_state)
# When linking a non-tenant-user owned bucket to a tenanted user, we
# need to prefix bucket name with '/'. e.g. photos -> /photos
if '$' in uid and '/' not in bucket:
bucket = '/{}'.format(bucket)
# Link bucket to new user:
result = self.proxy(daemon_name,
'PUT',
'bucket', {
'bucket': bucket,
'bucket-id': bucket_id,
'uid': uid
},
json_response=False)
uid_tenant = uid[:uid.find('$')] if uid.find('$') >= 0 else None
bucket_name = RgwBucket.get_s3_bucket_name(bucket, uid_tenant)
locking = self._get_locking(uid, daemon_name, bucket_name)
if versioning_state:
if versioning_state == 'Suspended' and locking['lock_enabled']:
raise DashboardException(msg='Bucket versioning cannot be disabled/suspended '
'on buckets with object lock enabled ',
http_status_code=409, component='rgw')
self._set_versioning(uid, daemon_name, bucket_name, versioning_state,
mfa_delete, mfa_token_serial, mfa_token_pin)
# Update locking if it is enabled.
if locking['lock_enabled']:
self._set_locking(uid, daemon_name, bucket_name, lock_mode,
lock_retention_period_days,
lock_retention_period_years)
encryption_status = self._get_encryption(bucket_name, daemon_name, uid)
if encryption_state and encryption_status['Status'] != 'Enabled':
self._set_encryption(bucket_name, encryption_type, key_id, daemon_name, uid)
if encryption_status['Status'] == 'Enabled' and (not encryption_state):
self._delete_encryption(bucket_name, daemon_name, uid)
return self._append_bid(result)
def delete(self, bucket, purge_objects='true', daemon_name=None):
return self.proxy(daemon_name, 'DELETE', 'bucket', {
'bucket': bucket,
'purge-objects': purge_objects
}, json_response=False)
@RESTController.Collection(method='PUT', path='/setEncryptionConfig')
@allow_empty_body
def set_encryption_config(self, encryption_type=None, kms_provider=None, auth_method=None,
secret_engine=None, secret_path='', namespace='', address=None,
token=None, daemon_name=None, owner=None, ssl_cert=None,
client_cert=None, client_key=None):
return self._set_encryption_config(encryption_type, kms_provider, auth_method,
secret_engine, secret_path, namespace,
address, token, daemon_name, owner, ssl_cert,
client_cert, client_key)
@RESTController.Collection(method='GET', path='/getEncryption')
@allow_empty_body
def get_encryption(self, bucket_name, daemon_name=None, owner=None):
return self._get_encryption(bucket_name, daemon_name, owner)
@RESTController.Collection(method='DELETE', path='/deleteEncryption')
@allow_empty_body
def delete_encryption(self, bucket_name, daemon_name=None, owner=None):
return self._delete_encryption(bucket_name, daemon_name, owner)
@RESTController.Collection(method='GET', path='/getEncryptionConfig')
@allow_empty_body
def get_encryption_config(self, daemon_name=None, owner=None):
return CephService.get_encryption_config(daemon_name)
@APIRouter('/rgw/user', Scope.RGW)
@APIDoc("RGW User Management API", "RgwUser")
class RgwUser(RgwRESTController):
def _append_uid(self, user):
"""
Append the user identifier that looks like [<tenant>$]<user>.
See http://docs.ceph.com/docs/jewel/radosgw/multitenancy/ for
more information.
:param user: The user parameters.
:type user: dict
:return: The modified user parameters including the 'uid' parameter.
:rtype: dict
"""
if isinstance(user, dict):
user['uid'] = '{}${}'.format(user['tenant'], user['user_id']) \
if user['tenant'] else user['user_id']
return user
@staticmethod
def _keys_allowed():
permissions = AuthManager.get_user(JwtManager.get_username()).permissions_dict()
edit_permissions = [Permission.CREATE, Permission.UPDATE, Permission.DELETE]
return Scope.RGW in permissions and Permission.READ in permissions[Scope.RGW] \
and len(set(edit_permissions).intersection(set(permissions[Scope.RGW]))) > 0
@EndpointDoc("Display RGW Users",
responses={200: RGW_USER_SCHEMA})
def list(self, daemon_name=None):
# type: (Optional[str]) -> List[str]
users = [] # type: List[str]
marker = None
while True:
params = {} # type: dict
if marker:
params['marker'] = marker
result = self.proxy(daemon_name, 'GET', 'user?list', params)
users.extend(result['keys'])
if not result['truncated']:
break
# Make sure there is a marker.
assert result['marker']
# Make sure the marker has changed.
assert marker != result['marker']
marker = result['marker']
return users
def get(self, uid, daemon_name=None, stats=True) -> dict:
query_params = '?stats' if stats else ''
result = self.proxy(daemon_name, 'GET', 'user{}'.format(query_params),
{'uid': uid, 'stats': stats})
if not self._keys_allowed():
del result['keys']
del result['swift_keys']
return self._append_uid(result)
@Endpoint()
@ReadPermission
def get_emails(self, daemon_name=None):
# type: (Optional[str]) -> List[str]
emails = []
for uid in json.loads(self.list(daemon_name)): # type: ignore
user = json.loads(self.get(uid, daemon_name)) # type: ignore
if user["email"]:
emails.append(user["email"])
return emails
@allow_empty_body
def create(self, uid, display_name, email=None, max_buckets=None,
suspended=None, generate_key=None, access_key=None,
secret_key=None, daemon_name=None):
params = {'uid': uid}
if display_name is not None:
params['display-name'] = display_name
if email is not None:
params['email'] = email
if max_buckets is not None:
params['max-buckets'] = max_buckets
if suspended is not None:
params['suspended'] = suspended
if generate_key is not None:
params['generate-key'] = generate_key
if access_key is not None:
params['access-key'] = access_key
if secret_key is not None:
params['secret-key'] = secret_key
result = self.proxy(daemon_name, 'PUT', 'user', params)
return self._append_uid(result)
@allow_empty_body
def set(self, uid, display_name=None, email=None, max_buckets=None,
suspended=None, daemon_name=None):
params = {'uid': uid}
if display_name is not None:
params['display-name'] = display_name
if email is not None:
params['email'] = email
if max_buckets is not None:
params['max-buckets'] = max_buckets
if suspended is not None:
params['suspended'] = suspended
result = self.proxy(daemon_name, 'POST', 'user', params)
return self._append_uid(result)
def delete(self, uid, daemon_name=None):
try:
instance = RgwClient.admin_instance(daemon_name=daemon_name)
# Ensure the user is not configured to access the RGW Object Gateway.
if instance.userid == uid:
raise DashboardException(msg='Unable to delete "{}" - this user '
'account is required for managing the '
'Object Gateway'.format(uid))
# Finally redirect request to the RGW proxy.
return self.proxy(daemon_name, 'DELETE', 'user', {'uid': uid}, json_response=False)
except (DashboardException, RequestException) as e: # pragma: no cover
raise DashboardException(e, component='rgw')
# pylint: disable=redefined-builtin
@RESTController.Resource(method='POST', path='/capability', status=201)
@allow_empty_body
def create_cap(self, uid, type, perm, daemon_name=None):
return self.proxy(daemon_name, 'PUT', 'user?caps', {
'uid': uid,
'user-caps': '{}={}'.format(type, perm)
})
# pylint: disable=redefined-builtin
@RESTController.Resource(method='DELETE', path='/capability', status=204)
def delete_cap(self, uid, type, perm, daemon_name=None):
return self.proxy(daemon_name, 'DELETE', 'user?caps', {
'uid': uid,
'user-caps': '{}={}'.format(type, perm)
})
@RESTController.Resource(method='POST', path='/key', status=201)
@allow_empty_body
def create_key(self, uid, key_type='s3', subuser=None, generate_key='true',
access_key=None, secret_key=None, daemon_name=None):
params = {'uid': uid, 'key-type': key_type, 'generate-key': generate_key}
if subuser is not None:
params['subuser'] = subuser
if access_key is not None:
params['access-key'] = access_key
if secret_key is not None:
params['secret-key'] = secret_key
return self.proxy(daemon_name, 'PUT', 'user?key', params)
@RESTController.Resource(method='DELETE', path='/key', status=204)
def delete_key(self, uid, key_type='s3', subuser=None, access_key=None, daemon_name=None):
params = {'uid': uid, 'key-type': key_type}
if subuser is not None:
params['subuser'] = subuser
if access_key is not None:
params['access-key'] = access_key
return self.proxy(daemon_name, 'DELETE', 'user?key', params, json_response=False)
@RESTController.Resource(method='GET', path='/quota')
def get_quota(self, uid, daemon_name=None):
return self.proxy(daemon_name, 'GET', 'user?quota', {'uid': uid})
@RESTController.Resource(method='PUT', path='/quota')
@allow_empty_body
def set_quota(self, uid, quota_type, enabled, max_size_kb, max_objects, daemon_name=None):
return self.proxy(daemon_name, 'PUT', 'user?quota', {
'uid': uid,
'quota-type': quota_type,
'enabled': enabled,
'max-size-kb': max_size_kb,
'max-objects': max_objects
}, json_response=False)
@RESTController.Resource(method='POST', path='/subuser', status=201)
@allow_empty_body
def create_subuser(self, uid, subuser, access, key_type='s3',
generate_secret='true', access_key=None,
secret_key=None, daemon_name=None):
# pylint: disable=R1705
subusr_array = []
user = json.loads(self.get(uid, daemon_name)) # type: ignore
subusers = user["subusers"]
for sub_usr in subusers:
subusr_array.append(sub_usr["id"])
if subuser in subusr_array:
return self.proxy(daemon_name, 'POST', 'user', {
'uid': uid,
'subuser': subuser,
'key-type': key_type,
'access': access,
'generate-secret': generate_secret,
'access-key': access_key,
'secret-key': secret_key
})
else:
return self.proxy(daemon_name, 'PUT', 'user', {
'uid': uid,
'subuser': subuser,
'key-type': key_type,
'access': access,
'generate-secret': generate_secret,
'access-key': access_key,
'secret-key': secret_key
})
@RESTController.Resource(method='DELETE', path='/subuser/{subuser}', status=204)
def delete_subuser(self, uid, subuser, purge_keys='true', daemon_name=None):
"""
:param purge_keys: Set to False to do not purge the keys.
Note, this only works for s3 subusers.
"""
return self.proxy(daemon_name, 'DELETE', 'user', {
'uid': uid,
'subuser': subuser,
'purge-keys': purge_keys
}, json_response=False)
class RGWRoleEndpoints:
@staticmethod
def role_list(_):
rgw_client = RgwClient.admin_instance()
roles = rgw_client.list_roles()
return roles
@staticmethod
def role_create(_, role_name: str = '', role_path: str = '', role_assume_policy_doc: str = ''):
assert role_name
assert role_path
rgw_client = RgwClient.admin_instance()
rgw_client.create_role(role_name, role_path, role_assume_policy_doc)
return f'Role {role_name} created successfully'
# pylint: disable=C0301
assume_role_policy_help = (
'Paste a json assume role policy document, to find more information on how to get this document, <a ' # noqa: E501
'href="https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-role.html#cfn-iam-role-assumerolepolicydocument"' # noqa: E501
'target="_blank">click here.</a>'
)
create_container = VerticalContainer('Create Role', 'create_role', fields=[
FormField('Role name', 'role_name', validators=[Validator.RGW_ROLE_NAME]),
FormField('Path', 'role_path', validators=[Validator.RGW_ROLE_PATH]),
FormField('Assume Role Policy Document',
'role_assume_policy_doc',
help=assume_role_policy_help,
field_type='textarea',
validators=[Validator.JSON]),
])
create_role_form = Form(path='/rgw/roles/create',
root_container=create_container,
task_info=FormTaskInfo("IAM RGW Role '{role_name}' created successfully",
['role_name']),
method_type=MethodType.POST.value)
@CRUDEndpoint(
router=APIRouter('/rgw/roles', Scope.RGW),
doc=APIDoc("List of RGW roles", "RGW"),
actions=[
TableAction(name='Create', permission='create', icon=Icon.ADD.value,
routerLink='/rgw/roles/create')
],
forms=[create_role_form],
permissions=[Scope.CONFIG_OPT],
get_all=CRUDCollectionMethod(
func=RGWRoleEndpoints.role_list,
doc=EndpointDoc("List RGW roles")
),
create=CRUDCollectionMethod(
func=RGWRoleEndpoints.role_create,
doc=EndpointDoc("Create Ceph User")
),
set_column={
"CreateDate": {'cellTemplate': 'date'},
"MaxSessionDuration": {'cellTemplate': 'duration'},
"RoleId": {'isHidden': True},
"AssumeRolePolicyDocument": {'isHidden': True}
},
detail_columns=['RoleId', 'AssumeRolePolicyDocument'],
meta=CRUDMeta()
)
class RgwUserRole(NamedTuple):
RoleId: int
RoleName: str
Path: str
Arn: str
CreateDate: str
MaxSessionDuration: int
AssumeRolePolicyDocument: str
@APIRouter('/rgw/realm', Scope.RGW)
class RgwRealm(RESTController):
@allow_empty_body
# pylint: disable=W0613
def create(self, realm_name, default):
multisite_instance = RgwMultisite()
result = multisite_instance.create_realm(realm_name, default)
return result
@allow_empty_body
# pylint: disable=W0613
def list(self):
multisite_instance = RgwMultisite()
result = multisite_instance.list_realms()
return result
@allow_empty_body
# pylint: disable=W0613
def get(self, realm_name):
multisite_instance = RgwMultisite()
result = multisite_instance.get_realm(realm_name)
return result
@Endpoint()
@ReadPermission
def get_all_realms_info(self):
multisite_instance = RgwMultisite()
result = multisite_instance.get_all_realms_info()
return result
@allow_empty_body
# pylint: disable=W0613
def set(self, realm_name: str, new_realm_name: str, default: str = ''):
multisite_instance = RgwMultisite()
result = multisite_instance.edit_realm(realm_name, new_realm_name, default)
return result
@Endpoint()
@ReadPermission
def get_realm_tokens(self):
try:
result = CephService.get_realm_tokens()
return result
except NoRgwDaemonsException as e:
raise DashboardException(e, http_status_code=404, component='rgw')
@Endpoint(method='POST')
@UpdatePermission
@allow_empty_body
# pylint: disable=W0613
def import_realm_token(self, realm_token, zone_name, daemon_name=None):
try:
multisite_instance = RgwMultisite()
result = CephService.import_realm_token(realm_token, zone_name)
multisite_instance.update_period()
return result
except NoRgwDaemonsException as e:
raise DashboardException(e, http_status_code=404, component='rgw')
def delete(self, realm_name):
multisite_instance = RgwMultisite()
result = multisite_instance.delete_realm(realm_name)
return result
@APIRouter('/rgw/zonegroup', Scope.RGW)
class RgwZonegroup(RESTController):
@allow_empty_body
# pylint: disable=W0613
def create(self, realm_name, zonegroup_name, default=None, master=None,
zonegroup_endpoints=None):
multisite_instance = RgwMultisite()
result = multisite_instance.create_zonegroup(realm_name, zonegroup_name, default,
master, zonegroup_endpoints)
return result
@allow_empty_body
# pylint: disable=W0613
def list(self):
multisite_instance = RgwMultisite()
result = multisite_instance.list_zonegroups()
return result
@allow_empty_body
# pylint: disable=W0613
def get(self, zonegroup_name):
multisite_instance = RgwMultisite()
result = multisite_instance.get_zonegroup(zonegroup_name)
return result
@Endpoint()
@ReadPermission
def get_all_zonegroups_info(self):
multisite_instance = RgwMultisite()
result = multisite_instance.get_all_zonegroups_info()
return result
def delete(self, zonegroup_name, delete_pools, pools: Optional[List[str]] = None):
if pools is None:
pools = []
try:
multisite_instance = RgwMultisite()
result = multisite_instance.delete_zonegroup(zonegroup_name, delete_pools, pools)
return result
except NoRgwDaemonsException as e:
raise DashboardException(e, http_status_code=404, component='rgw')
@allow_empty_body
# pylint: disable=W0613,W0102
def set(self, zonegroup_name: str, realm_name: str, new_zonegroup_name: str,
default: str = '', master: str = '', zonegroup_endpoints: str = '',
add_zones: List[str] = [], remove_zones: List[str] = [],
placement_targets: List[Dict[str, str]] = []):
multisite_instance = RgwMultisite()
result = multisite_instance.edit_zonegroup(realm_name, zonegroup_name, new_zonegroup_name,
default, master, zonegroup_endpoints, add_zones,
remove_zones, placement_targets)
return result
@APIRouter('/rgw/zone', Scope.RGW)
class RgwZone(RESTController):
@allow_empty_body
# pylint: disable=W0613
def create(self, zone_name, zonegroup_name=None, default=False, master=False,
zone_endpoints=None, access_key=None, secret_key=None):
multisite_instance = RgwMultisite()
result = multisite_instance.create_zone(zone_name, zonegroup_name, default,
master, zone_endpoints, access_key,
secret_key)
return result
@allow_empty_body
# pylint: disable=W0613
def list(self):
multisite_instance = RgwMultisite()
result = multisite_instance.list_zones()
return result
@allow_empty_body
# pylint: disable=W0613
def get(self, zone_name):
multisite_instance = RgwMultisite()
result = multisite_instance.get_zone(zone_name)
return result
@Endpoint()
@ReadPermission
def get_all_zones_info(self):
multisite_instance = RgwMultisite()
result = multisite_instance.get_all_zones_info()
return result
def delete(self, zone_name, delete_pools, pools: Optional[List[str]] = None,
zonegroup_name=None):
if pools is None:
pools = []
if zonegroup_name is None:
zonegroup_name = ''
try:
multisite_instance = RgwMultisite()
result = multisite_instance.delete_zone(zone_name, delete_pools, pools, zonegroup_name)
return result
except NoRgwDaemonsException as e:
raise DashboardException(e, http_status_code=404, component='rgw')
@allow_empty_body
# pylint: disable=W0613,W0102
def set(self, zone_name: str, new_zone_name: str, zonegroup_name: str, default: str = '',
master: str = '', zone_endpoints: str = '', access_key: str = '', secret_key: str = '',
placement_target: str = '', data_pool: str = '', index_pool: str = '',
data_extra_pool: str = '', storage_class: str = '', data_pool_class: str = '',
compression: str = ''):
multisite_instance = RgwMultisite()
result = multisite_instance.edit_zone(zone_name, new_zone_name, zonegroup_name, default,
master, zone_endpoints, access_key, secret_key,
placement_target, data_pool, index_pool,
data_extra_pool, storage_class, data_pool_class,
compression)
return result
@Endpoint()
@ReadPermission
def get_pool_names(self):
pool_names = []
ret, out, _ = mgr.check_mon_command({
'prefix': 'osd lspools',
'format': 'json',
})
if ret == 0 and out is not None:
pool_names = json.loads(out)
return pool_names
@Endpoint('PUT')
@CreatePermission
def create_system_user(self, userName: str, zoneName: str):
multisite_instance = RgwMultisite()
result = multisite_instance.create_system_user(userName, zoneName)
return result
@Endpoint()
@ReadPermission
def get_user_list(self, zoneName=None):
multisite_instance = RgwMultisite()
result = multisite_instance.get_user_list(zoneName)
return result
| 38,899 | 40.827957 | 154 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/role.py
|
# -*- coding: utf-8 -*-
import cherrypy
from .. import mgr
from ..exceptions import DashboardException, RoleAlreadyExists, \
RoleDoesNotExist, RoleIsAssociatedWithUser
from ..security import Permission
from ..security import Scope as SecurityScope
from ..services.access_control import SYSTEM_ROLES
from . import APIDoc, APIRouter, CreatePermission, EndpointDoc, RESTController, UIRouter
ROLE_SCHEMA = [{
"name": (str, "Role Name"),
"description": (str, "Role Descriptions"),
"scopes_permissions": ({
"cephfs": ([str], "")
}, ""),
"system": (bool, "")
}]
@APIRouter('/role', SecurityScope.USER)
@APIDoc("Role Management API", "Role")
class Role(RESTController):
@staticmethod
def _role_to_dict(role):
role_dict = role.to_dict()
role_dict['system'] = role_dict['name'] in SYSTEM_ROLES
return role_dict
@staticmethod
def _validate_permissions(scopes_permissions):
if scopes_permissions:
for scope, permissions in scopes_permissions.items():
if scope not in SecurityScope.all_scopes():
raise DashboardException(msg='Invalid scope',
code='invalid_scope',
component='role')
if any(permission not in Permission.all_permissions()
for permission in permissions):
raise DashboardException(msg='Invalid permission',
code='invalid_permission',
component='role')
@staticmethod
def _set_permissions(role, scopes_permissions):
role.reset_scope_permissions()
if scopes_permissions:
for scope, permissions in scopes_permissions.items():
if permissions:
role.set_scope_permissions(scope, permissions)
@EndpointDoc("Display Role list",
responses={200: ROLE_SCHEMA})
def list(self):
# type: () -> list
roles = dict(mgr.ACCESS_CTRL_DB.roles)
roles.update(SYSTEM_ROLES)
roles = sorted(roles.values(), key=lambda role: role.name)
return [Role._role_to_dict(r) for r in roles]
@staticmethod
def _get(name):
role = SYSTEM_ROLES.get(name)
if not role:
try:
role = mgr.ACCESS_CTRL_DB.get_role(name)
except RoleDoesNotExist:
raise cherrypy.HTTPError(404)
return Role._role_to_dict(role)
def get(self, name):
# type: (str) -> dict
return Role._get(name)
@staticmethod
def _create(name=None, description=None, scopes_permissions=None):
if not name:
raise DashboardException(msg='Name is required',
code='name_required',
component='role')
Role._validate_permissions(scopes_permissions)
try:
role = mgr.ACCESS_CTRL_DB.create_role(name, description)
except RoleAlreadyExists:
raise DashboardException(msg='Role already exists',
code='role_already_exists',
component='role')
Role._set_permissions(role, scopes_permissions)
mgr.ACCESS_CTRL_DB.save()
return Role._role_to_dict(role)
def create(self, name=None, description=None, scopes_permissions=None):
# type: (str, str, dict) -> dict
return Role._create(name, description, scopes_permissions)
def set(self, name, description=None, scopes_permissions=None):
# type: (str, str, dict) -> dict
try:
role = mgr.ACCESS_CTRL_DB.get_role(name)
except RoleDoesNotExist:
if name in SYSTEM_ROLES:
raise DashboardException(msg='Cannot update system role',
code='cannot_update_system_role',
component='role')
raise cherrypy.HTTPError(404)
Role._validate_permissions(scopes_permissions)
Role._set_permissions(role, scopes_permissions)
role.description = description
mgr.ACCESS_CTRL_DB.update_users_with_roles(role)
mgr.ACCESS_CTRL_DB.save()
return Role._role_to_dict(role)
def delete(self, name):
# type: (str) -> None
try:
mgr.ACCESS_CTRL_DB.delete_role(name)
except RoleDoesNotExist:
if name in SYSTEM_ROLES:
raise DashboardException(msg='Cannot delete system role',
code='cannot_delete_system_role',
component='role')
raise cherrypy.HTTPError(404)
except RoleIsAssociatedWithUser:
raise DashboardException(msg='Role is associated with user',
code='role_is_associated_with_user',
component='role')
mgr.ACCESS_CTRL_DB.save()
@RESTController.Resource('POST', status=201)
@CreatePermission
def clone(self, name, new_name):
# type: (str, str) -> dict
role = Role._get(name)
return Role._create(new_name, role.get('description'),
role.get('scopes_permissions'))
@UIRouter('/scope', SecurityScope.USER)
class Scope(RESTController):
def list(self):
return SecurityScope.all_scopes()
| 5,530 | 37.409722 | 88 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/saml2.py
|
# -*- coding: utf-8 -*-
import cherrypy
try:
from onelogin.saml2.auth import OneLogin_Saml2_Auth
from onelogin.saml2.errors import OneLogin_Saml2_Error
from onelogin.saml2.settings import OneLogin_Saml2_Settings
python_saml_imported = True
except ImportError:
python_saml_imported = False
from .. import mgr
from ..exceptions import UserDoesNotExist
from ..services.auth import JwtManager
from ..tools import prepare_url_prefix
from . import BaseController, ControllerAuthMixin, Endpoint, Router, allow_empty_body
@Router('/auth/saml2', secure=False)
class Saml2(BaseController, ControllerAuthMixin):
@staticmethod
def _build_req(request, post_data):
return {
'https': 'on' if request.scheme == 'https' else 'off',
'http_host': request.host,
'script_name': request.path_info,
'server_port': str(request.port),
'get_data': {},
'post_data': post_data
}
@staticmethod
def _check_python_saml():
if not python_saml_imported:
raise cherrypy.HTTPError(400, 'Required library not found: `python3-saml`')
try:
OneLogin_Saml2_Settings(mgr.SSO_DB.saml2.onelogin_settings)
except OneLogin_Saml2_Error:
raise cherrypy.HTTPError(400, 'Single Sign-On is not configured.')
@Endpoint('POST', path="", version=None)
@allow_empty_body
def auth_response(self, **kwargs):
Saml2._check_python_saml()
req = Saml2._build_req(self._request, kwargs)
auth = OneLogin_Saml2_Auth(req, mgr.SSO_DB.saml2.onelogin_settings)
auth.process_response()
errors = auth.get_errors()
if auth.is_authenticated():
JwtManager.reset_user()
username_attribute = auth.get_attribute(mgr.SSO_DB.saml2.get_username_attribute())
if username_attribute is None:
raise cherrypy.HTTPError(400,
'SSO error - `{}` not found in auth attributes. '
'Received attributes: {}'
.format(
mgr.SSO_DB.saml2.get_username_attribute(),
auth.get_attributes()))
username = username_attribute[0]
url_prefix = prepare_url_prefix(mgr.get_module_option('url_prefix', default=''))
try:
mgr.ACCESS_CTRL_DB.get_user(username)
except UserDoesNotExist:
raise cherrypy.HTTPRedirect("{}/#/sso/404".format(url_prefix))
token = JwtManager.gen_token(username)
JwtManager.set_user(JwtManager.decode_token(token))
# For backward-compatibility: PyJWT versions < 2.0.0 return bytes.
token = token.decode('utf-8') if isinstance(token, bytes) else token
self._set_token_cookie(url_prefix, token)
raise cherrypy.HTTPRedirect("{}/#/login?access_token={}".format(url_prefix, token))
return {
'is_authenticated': auth.is_authenticated(),
'errors': errors,
'reason': auth.get_last_error_reason()
}
@Endpoint(xml=True, version=None)
def metadata(self):
Saml2._check_python_saml()
saml_settings = OneLogin_Saml2_Settings(mgr.SSO_DB.saml2.onelogin_settings)
return saml_settings.get_sp_metadata()
@Endpoint(json_response=False, version=None)
def login(self):
Saml2._check_python_saml()
req = Saml2._build_req(self._request, {})
auth = OneLogin_Saml2_Auth(req, mgr.SSO_DB.saml2.onelogin_settings)
raise cherrypy.HTTPRedirect(auth.login())
@Endpoint(json_response=False, version=None)
def slo(self):
Saml2._check_python_saml()
req = Saml2._build_req(self._request, {})
auth = OneLogin_Saml2_Auth(req, mgr.SSO_DB.saml2.onelogin_settings)
raise cherrypy.HTTPRedirect(auth.logout())
@Endpoint(json_response=False, version=None)
def logout(self, **kwargs):
# pylint: disable=unused-argument
Saml2._check_python_saml()
JwtManager.reset_user()
token = JwtManager.get_token_from_header()
self._delete_token_cookie(token)
url_prefix = prepare_url_prefix(mgr.get_module_option('url_prefix', default=''))
raise cherrypy.HTTPRedirect("{}/#/login".format(url_prefix))
| 4,462 | 38.149123 | 95 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/service.py
|
from typing import Dict, List, Optional
import cherrypy
from ceph.deployment.service_spec import ServiceSpec
from ..security import Scope
from ..services.exception import handle_custom_error, handle_orchestrator_error
from ..services.orchestrator import OrchClient, OrchFeature
from . import APIDoc, APIRouter, CreatePermission, DeletePermission, Endpoint, \
ReadPermission, RESTController, Task, UpdatePermission
from ._version import APIVersion
from .orchestrator import raise_if_no_orchestrator
def service_task(name, metadata, wait_for=2.0):
return Task("service/{}".format(name), metadata, wait_for)
@APIRouter('/service', Scope.HOSTS)
@APIDoc("Service Management API", "Service")
class Service(RESTController):
@Endpoint()
@ReadPermission
def known_types(self) -> List[str]:
"""
Get a list of known service types, e.g. 'alertmanager',
'node-exporter', 'osd' or 'rgw'.
"""
return ServiceSpec.KNOWN_SERVICE_TYPES
@raise_if_no_orchestrator([OrchFeature.SERVICE_LIST])
@RESTController.MethodMap(version=APIVersion(2, 0)) # type: ignore
def list(self, service_name: Optional[str] = None, offset: int = 0, limit: int = 5,
search: str = '', sort: str = '+service_name') -> List[dict]:
orch = OrchClient.instance()
services, count = orch.services.list(service_name=service_name, offset=int(offset),
limit=int(limit), search=search, sort=sort)
cherrypy.response.headers['X-Total-Count'] = count
return services
@raise_if_no_orchestrator([OrchFeature.SERVICE_LIST])
def get(self, service_name: str) -> List[dict]:
orch = OrchClient.instance()
services = orch.services.get(service_name)
if not services:
raise cherrypy.HTTPError(404, 'Service {} not found'.format(service_name))
return services[0].to_json()
@RESTController.Resource('GET')
@raise_if_no_orchestrator([OrchFeature.DAEMON_LIST])
def daemons(self, service_name: str) -> List[dict]:
orch = OrchClient.instance()
daemons = orch.services.list_daemons(service_name=service_name)
return [d.to_dict() for d in daemons]
@CreatePermission
@handle_custom_error('service', exceptions=(ValueError, TypeError))
@raise_if_no_orchestrator([OrchFeature.SERVICE_CREATE])
@handle_orchestrator_error('service')
@service_task('create', {'service_name': '{service_name}'})
def create(self, service_spec: Dict, service_name: str): # pylint: disable=W0613
"""
:param service_spec: The service specification as JSON.
:param service_name: The service name, e.g. 'alertmanager'.
:return: None
"""
OrchClient.instance().services.apply(service_spec, no_overwrite=True)
@UpdatePermission
@handle_custom_error('service', exceptions=(ValueError, TypeError))
@raise_if_no_orchestrator([OrchFeature.SERVICE_CREATE])
@handle_orchestrator_error('service')
@service_task('edit', {'service_name': '{service_name}'})
def set(self, service_spec: Dict, service_name: str): # pylint: disable=W0613
"""
:param service_spec: The service specification as JSON.
:param service_name: The service name, e.g. 'alertmanager'.
:return: None
"""
OrchClient.instance().services.apply(service_spec, no_overwrite=False)
@DeletePermission
@raise_if_no_orchestrator([OrchFeature.SERVICE_DELETE])
@handle_orchestrator_error('service')
@service_task('delete', {'service_name': '{service_name}'})
def delete(self, service_name: str):
"""
:param service_name: The service name, e.g. 'mds' or 'crash.foo'.
:return: None
"""
orch = OrchClient.instance()
orch.services.remove(service_name)
| 3,866 | 39.28125 | 91 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/settings.py
|
# -*- coding: utf-8 -*-
from ..security import Scope
from ..services.settings import SettingsService, _to_native
from ..settings import Options
from ..settings import Settings as SettingsModule
from . import APIDoc, APIRouter, EndpointDoc, RESTController, UIRouter
SETTINGS_SCHEMA = [{
"name": (str, 'Settings Name'),
"default": (bool, 'Default Settings'),
"type": (str, 'Type of Settings'),
"value": (bool, 'Settings Value')
}]
@APIRouter('/settings', Scope.CONFIG_OPT)
@APIDoc("Settings Management API", "Settings")
class Settings(RESTController):
"""
Enables to manage the settings of the dashboard (not the Ceph cluster).
"""
@EndpointDoc("Display Settings Information",
parameters={
'names': (str, 'Name of Settings'),
},
responses={200: SETTINGS_SCHEMA})
def list(self, names=None):
"""
Get the list of available options.
:param names: A comma separated list of option names that should
be processed. Defaults to ``None``.
:type names: None|str
:return: A list of available options.
:rtype: list[dict]
"""
option_names = [
name for name in Options.__dict__
if name.isupper() and not name.startswith('_')
]
if names:
names = names.split(',')
option_names = list(set(option_names) & set(names))
return [self._get(name) for name in option_names]
def _get(self, name):
with SettingsService.attribute_handler(name) as sname:
setting = getattr(Options, sname)
return {
'name': sname,
'default': setting.default_value,
'type': setting.types_as_str(),
'value': getattr(SettingsModule, sname)
}
def get(self, name):
"""
Get the given option.
:param name: The name of the option.
:return: Returns a dict containing the name, type,
default value and current value of the given option.
:rtype: dict
"""
return self._get(name)
def set(self, name, value):
with SettingsService.attribute_handler(name) as sname:
setattr(SettingsModule, _to_native(sname), value)
def delete(self, name):
with SettingsService.attribute_handler(name) as sname:
delattr(SettingsModule, _to_native(sname))
def bulk_set(self, **kwargs):
with SettingsService.attribute_handler(kwargs) as data:
for name, value in data.items():
setattr(SettingsModule, _to_native(name), value)
@UIRouter('/standard_settings')
class StandardSettings(RESTController):
def list(self):
"""
Get various Dashboard related settings.
:return: Returns a dictionary containing various Dashboard
settings.
:rtype: dict
"""
return { # pragma: no cover - no complexity there
'user_pwd_expiration_span':
SettingsModule.USER_PWD_EXPIRATION_SPAN,
'user_pwd_expiration_warning_1':
SettingsModule.USER_PWD_EXPIRATION_WARNING_1,
'user_pwd_expiration_warning_2':
SettingsModule.USER_PWD_EXPIRATION_WARNING_2,
'pwd_policy_enabled':
SettingsModule.PWD_POLICY_ENABLED,
'pwd_policy_min_length':
SettingsModule.PWD_POLICY_MIN_LENGTH,
'pwd_policy_check_length_enabled':
SettingsModule.PWD_POLICY_CHECK_LENGTH_ENABLED,
'pwd_policy_check_oldpwd_enabled':
SettingsModule.PWD_POLICY_CHECK_OLDPWD_ENABLED,
'pwd_policy_check_username_enabled':
SettingsModule.PWD_POLICY_CHECK_USERNAME_ENABLED,
'pwd_policy_check_exclusion_list_enabled':
SettingsModule.PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED,
'pwd_policy_check_repetitive_chars_enabled':
SettingsModule.PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED,
'pwd_policy_check_sequential_chars_enabled':
SettingsModule.PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED,
'pwd_policy_check_complexity_enabled':
SettingsModule.PWD_POLICY_CHECK_COMPLEXITY_ENABLED
}
| 4,271 | 36.473684 | 75 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/summary.py
|
# -*- coding: utf-8 -*-
import json
from .. import mgr
from ..controllers.rbd_mirroring import get_daemons_and_pools
from ..exceptions import ViewCacheNoDataException
from ..security import Permission, Scope
from ..services import progress
from ..tools import TaskManager
from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc
SUMMARY_SCHEMA = {
"health_status": (str, ""),
"mgr_id": (str, ""),
"mgr_host": (str, ""),
"have_mon_connection": (str, ""),
"executing_tasks": ([str], ""),
"finished_tasks": ([{
"name": (str, ""),
"metadata": ({
"pool": (int, ""),
}, ""),
"begin_time": (str, ""),
"end_time": (str, ""),
"duration": (int, ""),
"progress": (int, ""),
"success": (bool, ""),
"ret_value": (str, ""),
"exception": (str, ""),
}], ""),
"version": (str, ""),
"rbd_mirroring": ({
"warnings": (int, ""),
"errors": (int, "")
}, "")
}
@APIRouter('/summary')
@APIDoc("Get Ceph Summary Details", "Summary")
class Summary(BaseController):
def _health_status(self):
health_data = mgr.get("health")
return json.loads(health_data["json"])['status']
def _rbd_mirroring(self):
try:
_, data = get_daemons_and_pools()
except ViewCacheNoDataException: # pragma: no cover
return {} # pragma: no cover
daemons = data.get('daemons', [])
pools = data.get('pools', {})
warnings = 0
errors = 0
for daemon in daemons:
if daemon['health_color'] == 'error': # pragma: no cover
errors += 1
elif daemon['health_color'] == 'warning': # pragma: no cover
warnings += 1
for _, pool in pools.items():
if pool['health_color'] == 'error': # pragma: no cover
errors += 1
elif pool['health_color'] == 'warning': # pragma: no cover
warnings += 1
return {'warnings': warnings, 'errors': errors}
def _task_permissions(self, name): # pragma: no cover
result = True
if name == 'pool/create':
result = self._has_permissions(Permission.CREATE, Scope.POOL)
elif name == 'pool/edit':
result = self._has_permissions(Permission.UPDATE, Scope.POOL)
elif name == 'pool/delete':
result = self._has_permissions(Permission.DELETE, Scope.POOL)
elif name in [
'rbd/create', 'rbd/copy', 'rbd/snap/create',
'rbd/clone', 'rbd/trash/restore']:
result = self._has_permissions(Permission.CREATE, Scope.RBD_IMAGE)
elif name in [
'rbd/edit', 'rbd/snap/edit', 'rbd/flatten',
'rbd/snap/rollback']:
result = self._has_permissions(Permission.UPDATE, Scope.RBD_IMAGE)
elif name in [
'rbd/delete', 'rbd/snap/delete', 'rbd/trash/move',
'rbd/trash/remove', 'rbd/trash/purge']:
result = self._has_permissions(Permission.DELETE, Scope.RBD_IMAGE)
return result
def _get_host(self):
# type: () -> str
services = mgr.get('mgr_map')['services']
return services['dashboard'] if 'dashboard' in services else ''
@Endpoint()
@EndpointDoc("Display Summary",
responses={200: SUMMARY_SCHEMA})
def __call__(self):
exe_t, fin_t = TaskManager.list_serializable()
executing_tasks = [task for task in exe_t if self._task_permissions(task['name'])]
finished_tasks = [task for task in fin_t if self._task_permissions(task['name'])]
e, f = progress.get_progress_tasks()
executing_tasks.extend(e)
finished_tasks.extend(f)
executing_tasks.sort(key=lambda t: t['begin_time'], reverse=True)
finished_tasks.sort(key=lambda t: t['end_time'], reverse=True)
result = {
'health_status': self._health_status(),
'mgr_id': mgr.get_mgr_id(),
'mgr_host': self._get_host(),
'have_mon_connection': mgr.have_mon_connection(),
'executing_tasks': executing_tasks,
'finished_tasks': finished_tasks,
'version': mgr.version
}
if self._has_permissions(Permission.READ, Scope.RBD_MIRRORING):
result['rbd_mirroring'] = self._rbd_mirroring()
return result
| 4,446 | 34.862903 | 90 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/task.py
|
# -*- coding: utf-8 -*-
from ..services import progress
from ..tools import TaskManager
from . import APIDoc, APIRouter, EndpointDoc, RESTController
TASK_SCHEMA = {
"executing_tasks": (str, "ongoing executing tasks"),
"finished_tasks": ([{
"name": (str, "finished tasks name"),
"metadata": ({
"pool": (int, "")
}, ""),
"begin_time": (str, "Task begin time"),
"end_time": (str, "Task end time"),
"duration": (int, ""),
"progress": (int, "Progress of tasks"),
"success": (bool, ""),
"ret_value": (bool, ""),
"exception": (bool, "")
}], "")
}
@APIRouter('/task')
@APIDoc("Task Management API", "Task")
class Task(RESTController):
@EndpointDoc("Display Tasks",
parameters={
'name': (str, 'Task Name'),
},
responses={200: TASK_SCHEMA})
def list(self, name=None):
executing_t, finished_t = TaskManager.list_serializable(name)
e, f = progress.get_progress_tasks()
executing_t.extend(e)
finished_t.extend(f)
executing_t.sort(key=lambda t: t['begin_time'], reverse=True)
finished_t.sort(key=lambda t: t['end_time'], reverse=True)
return {
'executing_tasks': executing_t,
'finished_tasks': finished_t
}
| 1,372 | 28.212766 | 69 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/telemetry.py
|
# -*- coding: utf-8 -*-
from .. import mgr
from ..exceptions import DashboardException
from ..security import Scope
from . import APIDoc, APIRouter, EndpointDoc, RESTController
REPORT_SCHEMA = {
"report": ({
"leaderboard": (bool, ""),
"report_version": (int, ""),
"report_timestamp": (str, ""),
"report_id": (str, ""),
"channels": ([str], ""),
"channels_available": ([str], ""),
"license": (str, ""),
"created": (str, ""),
"mon": ({
"count": (int, ""),
"features": ({
"persistent": ([str], ""),
"optional": ([int], "")
}, ""),
"min_mon_release": (int, ""),
"v1_addr_mons": (int, ""),
"v2_addr_mons": (int, ""),
"ipv4_addr_mons": (int, ""),
"ipv6_addr_mons": (int, ""),
}, ""),
"config": ({
"cluster_changed": ([str], ""),
"active_changed": ([str], "")
}, ""),
"rbd": ({
"num_pools": (int, ""),
"num_images_by_pool": ([int], ""),
"mirroring_by_pool": ([bool], ""),
}, ""),
"pools": ([{
"pool": (int, ""),
"type": (str, ""),
"pg_num": (int, ""),
"pgp_num": (int, ""),
"size": (int, ""),
"min_size": (int, ""),
"pg_autoscale_mode": (str, ""),
"target_max_bytes": (int, ""),
"target_max_objects": (int, ""),
"erasure_code_profile": (str, ""),
"cache_mode": (str, ""),
}], ""),
"osd": ({
"count": (int, ""),
"require_osd_release": (str, ""),
"require_min_compat_client": (str, ""),
"cluster_network": (bool, ""),
}, ""),
"crush": ({
"num_devices": (int, ""),
"num_types": (int, ""),
"num_buckets": (int, ""),
"num_rules": (int, ""),
"device_classes": ([int], ""),
"tunables": ({
"choose_local_tries": (int, ""),
"choose_local_fallback_tries": (int, ""),
"choose_total_tries": (int, ""),
"chooseleaf_descend_once": (int, ""),
"chooseleaf_vary_r": (int, ""),
"chooseleaf_stable": (int, ""),
"straw_calc_version": (int, ""),
"allowed_bucket_algs": (int, ""),
"profile": (str, ""),
"optimal_tunables": (int, ""),
"legacy_tunables": (int, ""),
"minimum_required_version": (str, ""),
"require_feature_tunables": (int, ""),
"require_feature_tunables2": (int, ""),
"has_v2_rules": (int, ""),
"require_feature_tunables3": (int, ""),
"has_v3_rules": (int, ""),
"has_v4_buckets": (int, ""),
"require_feature_tunables5": (int, ""),
"has_v5_rules": (int, ""),
}, ""),
"compat_weight_set": (bool, ""),
"num_weight_sets": (int, ""),
"bucket_algs": ({
"straw2": (int, ""),
}, ""),
"bucket_sizes": ({
"1": (int, ""),
"3": (int, ""),
}, ""),
"bucket_types": ({
"1": (int, ""),
"11": (int, ""),
}, ""),
}, ""),
"fs": ({
"count": (int, ""),
"feature_flags": ({
"enable_multiple": (bool, ""),
"ever_enabled_multiple": (bool, ""),
}, ""),
"num_standby_mds": (int, ""),
"filesystems": ([int], ""),
"total_num_mds": (int, ""),
}, ""),
"metadata": ({
"osd": ({
"osd_objectstore": ({
"bluestore": (int, ""),
}, ""),
"rotational": ({
"1": (int, ""),
}, ""),
"arch": ({
"x86_64": (int, ""),
}, ""),
"ceph_version": ({
"ceph version 16.0.0-3151-gf202994fcf": (int, ""),
}, ""),
"os": ({
"Linux": (int, ""),
}, ""),
"cpu": ({
"Intel(R) Core(TM) i7-8665U CPU @ 1.90GHz": (int, ""),
}, ""),
"kernel_description": ({
"#1 SMP Wed Jul 1 19:53:01 UTC 2020": (int, ""),
}, ""),
"kernel_version": ({
"5.7.7-200.fc32.x86_64": (int, ""),
}, ""),
"distro_description": ({
"CentOS Linux 8 (Core)": (int, ""),
}, ""),
"distro": ({
"centos": (int, ""),
}, ""),
}, ""),
"mon": ({
"arch": ({
"x86_64": (int, ""),
}, ""),
"ceph_version": ({
"ceph version 16.0.0-3151-gf202994fcf": (int, ""),
}, ""),
"os": ({
"Linux": (int, ""),
}, ""),
"cpu": ({
"Intel(R) Core(TM) i7-8665U CPU @ 1.90GHz": (int, ""),
}, ""),
"kernel_description": ({
"#1 SMP Wed Jul 1 19:53:01 UTC 2020": (int, ""),
}, ""),
"kernel_version": ({
"5.7.7-200.fc32.x86_64": (int, ""),
}, ""),
"distro_description": ({
"CentOS Linux 8 (Core)": (int, ""),
}, ""),
"distro": ({
"centos": (int, ""),
}, ""),
}, ""),
}, ""),
"hosts": ({
"num": (int, ""),
"num_with_mon": (int, ""),
"num_with_mds": (int, ""),
"num_with_osd": (int, ""),
"num_with_mgr": (int, ""),
}, ""),
"usage": ({
"pools": (int, ""),
"pg_num": (int, ""),
"total_used_bytes": (int, ""),
"total_bytes": (int, ""),
"total_avail_bytes": (int, ""),
}, ""),
"services": ({
"rgw": (int, ""),
}, ""),
"rgw": ({
"count": (int, ""),
"zones": (int, ""),
"zonegroups": (int, ""),
"frontends": ([str], "")
}, ""),
"balancer": ({
"active": (bool, ""),
"mode": (str, ""),
}, ""),
"crashes": ([int], "")
}, ""),
"device_report": (str, "")
}
@APIRouter('/telemetry', Scope.CONFIG_OPT)
@APIDoc("Display Telemetry Report", "Telemetry")
class Telemetry(RESTController):
@RESTController.Collection('GET')
@EndpointDoc("Get Detailed Telemetry report",
responses={200: REPORT_SCHEMA})
def report(self):
"""
Get Ceph and device report data
:return: Ceph and device report data
:rtype: dict
"""
return mgr.remote('telemetry', 'get_report_locked', 'all')
def singleton_set(self, enable=True, license_name=None):
"""
Enables or disables sending data collected by the Telemetry
module.
:param enable: Enable or disable sending data
:type enable: bool
:param license_name: License string e.g. 'sharing-1-0' to
make sure the user is aware of and accepts the license
for sharing Telemetry data.
:type license_name: string
"""
if enable:
if not license_name or (license_name != 'sharing-1-0'):
raise DashboardException(
code='telemetry_enable_license_missing',
msg='Telemetry data is licensed under the Community Data License Agreement - '
'Sharing - Version 1.0 (https://cdla.io/sharing-1-0/). To enable, add '
'{"license": "sharing-1-0"} to the request payload.'
)
mgr.remote('telemetry', 'on', license_name)
else:
mgr.remote('telemetry', 'off')
| 8,363 | 33.85 | 98 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/controllers/user.py
|
# -*- coding: utf-8 -*-
import time
from datetime import datetime
import cherrypy
from ceph_argparse import CephString
from .. import mgr
from ..exceptions import DashboardException, PasswordPolicyException, \
PwdExpirationDateNotValid, UserAlreadyExists, UserDoesNotExist
from ..security import Scope
from ..services.access_control import SYSTEM_ROLES, PasswordPolicy
from ..services.auth import JwtManager
from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \
RESTController, allow_empty_body, validate_ceph_type
USER_SCHEMA = ([{
"username": (str, 'Username of the user'),
"roles": ([str], 'User Roles'),
"name": (str, 'User Name'),
"email": (str, 'User email address'),
"lastUpdate": (int, 'Details last updated'),
"enabled": (bool, 'Is the user enabled?'),
"pwdExpirationDate": (str, 'Password Expiration date'),
"pwdUpdateRequired": (bool, 'Is Password Update Required?')
}], '')
def validate_password_policy(password, username=None, old_password=None):
"""
:param password: The password to validate.
:param username: The name of the user (optional).
:param old_password: The old password (optional).
:return: Returns the password complexity credits.
:rtype: int
:raises DashboardException: If a password policy fails.
"""
pw_policy = PasswordPolicy(password, username, old_password)
try:
pw_policy.check_all()
return pw_policy.complexity_credits
except PasswordPolicyException as ex:
raise DashboardException(msg=str(ex),
code='password_policy_validation_failed',
component='user')
@APIRouter('/user', Scope.USER)
@APIDoc("Display User Details", "User")
class User(RESTController):
@staticmethod
def _user_to_dict(user):
result = user.to_dict()
del result['password']
return result
@staticmethod
def _get_user_roles(roles):
all_roles = dict(mgr.ACCESS_CTRL_DB.roles)
all_roles.update(SYSTEM_ROLES)
try:
return [all_roles[rolename] for rolename in roles]
except KeyError:
raise DashboardException(msg='Role does not exist',
code='role_does_not_exist',
component='user')
@EndpointDoc("Get List Of Users",
responses={200: USER_SCHEMA})
def list(self):
users = mgr.ACCESS_CTRL_DB.users
result = [User._user_to_dict(u) for _, u in users.items()]
return result
def get(self, username):
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
except UserDoesNotExist:
raise cherrypy.HTTPError(404)
return User._user_to_dict(user)
@validate_ceph_type([('username', CephString())], 'user')
def create(self, username=None, password=None, name=None, email=None,
roles=None, enabled=True, pwdExpirationDate=None, pwdUpdateRequired=True):
if not username:
raise DashboardException(msg='Username is required',
code='username_required',
component='user')
user_roles = None
if roles:
user_roles = User._get_user_roles(roles)
if password:
validate_password_policy(password, username)
try:
user = mgr.ACCESS_CTRL_DB.create_user(username, password, name,
email, enabled, pwdExpirationDate,
pwdUpdateRequired)
except UserAlreadyExists:
raise DashboardException(msg='Username already exists',
code='username_already_exists',
component='user')
except PwdExpirationDateNotValid:
raise DashboardException(msg='Password expiration date must not be in '
'the past',
code='pwd_past_expiration_date',
component='user')
if user_roles:
user.set_roles(user_roles)
mgr.ACCESS_CTRL_DB.save()
return User._user_to_dict(user)
def delete(self, username):
session_username = JwtManager.get_username()
if session_username == username:
raise DashboardException(msg='Cannot delete current user',
code='cannot_delete_current_user',
component='user')
try:
mgr.ACCESS_CTRL_DB.delete_user(username)
except UserDoesNotExist:
raise cherrypy.HTTPError(404)
mgr.ACCESS_CTRL_DB.save()
def set(self, username, password=None, name=None, email=None, roles=None,
enabled=None, pwdExpirationDate=None, pwdUpdateRequired=False):
if JwtManager.get_username() == username and enabled is False:
raise DashboardException(msg='You are not allowed to disable your user',
code='cannot_disable_current_user',
component='user')
try:
user = mgr.ACCESS_CTRL_DB.get_user(username)
except UserDoesNotExist:
raise cherrypy.HTTPError(404)
user_roles = []
if roles:
user_roles = User._get_user_roles(roles)
if password:
validate_password_policy(password, username)
user.set_password(password)
if pwdExpirationDate and \
(pwdExpirationDate < int(time.mktime(datetime.utcnow().timetuple()))):
raise DashboardException(
msg='Password expiration date must not be in the past',
code='pwd_past_expiration_date', component='user')
user.name = name
user.email = email
if enabled is not None:
user.enabled = enabled
user.pwd_expiration_date = pwdExpirationDate
user.set_roles(user_roles)
user.pwd_update_required = pwdUpdateRequired
mgr.ACCESS_CTRL_DB.save()
return User._user_to_dict(user)
@APIRouter('/user')
@APIDoc("Get User Password Policy Details", "UserPasswordPolicy")
class UserPasswordPolicy(RESTController):
@Endpoint('POST')
@allow_empty_body
def validate_password(self, password, username=None, old_password=None):
"""
Check if the password meets the password policy.
:param password: The password to validate.
:param username: The name of the user (optional).
:param old_password: The old password (optional).
:return: An object with properties valid, credits and valuation.
'credits' contains the password complexity credits and
'valuation' the textual summary of the validation.
"""
result = {'valid': False, 'credits': 0, 'valuation': None}
try:
result['credits'] = validate_password_policy(password, username, old_password)
if result['credits'] < 15:
result['valuation'] = 'Weak'
elif result['credits'] < 20:
result['valuation'] = 'OK'
elif result['credits'] < 25:
result['valuation'] = 'Strong'
else:
result['valuation'] = 'Very strong'
result['valid'] = True
except DashboardException as ex:
result['valuation'] = str(ex)
return result
@APIRouter('/user/{username}')
@APIDoc("Change User Password", "UserChangePassword")
class UserChangePassword(BaseController):
@Endpoint('POST')
def change_password(self, username, old_password, new_password):
session_username = JwtManager.get_username()
if username != session_username:
raise DashboardException(msg='Invalid user context',
code='invalid_user_context',
component='user')
try:
user = mgr.ACCESS_CTRL_DB.get_user(session_username)
except UserDoesNotExist:
raise cherrypy.HTTPError(404)
if not user.compare_password(old_password):
raise DashboardException(msg='Invalid old password',
code='invalid_old_password',
component='user')
validate_password_policy(new_password, username, old_password)
user.set_password(new_password)
mgr.ACCESS_CTRL_DB.save()
| 8,616 | 39.07907 | 90 |
py
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress.config.ts
|
import { defineConfig } from 'cypress'
export default defineConfig({
video: true,
videoUploadOnPasses: false,
defaultCommandTimeout: 120000,
responseTimeout: 45000,
viewportHeight: 1080,
viewportWidth: 1920,
projectId: 'k7ab29',
reporter: 'cypress-multi-reporters',
reporterOptions: {
reporterEnabled: 'spec, mocha-junit-reporter',
mochaJunitReporterReporterOptions: {
mochaFile: 'cypress/reports/results-[hash].xml',
},
},
retries: 1,
env: {
LOGIN_USER: 'admin',
LOGIN_PWD: 'admin',
CEPH2_URL: 'https://localhost:4202/',
},
chromeWebSecurity: false,
eyesIsDisabled: false,
eyesFailCypressOnDiff: true,
eyesDisableBrowserFetching: false,
eyesLegacyHooks: true,
eyesTestConcurrency: 5,
eyesPort: 35321,
e2e: {
// We've imported your old cypress plugins here.
// You may want to clean this up later by importing these.
setupNodeEvents(on, config) {
return require('./cypress/plugins/index.js')(on, config)
},
baseUrl: 'https://localhost:4200/',
excludeSpecPattern: ['*.po.ts', '**/orchestrator/**'],
experimentalSessionAndOrigin: true,
specPattern: 'cypress/e2e/**/*-spec.{js,jsx,ts,tsx}',
},
})
| 1,207 | 27.093023 | 62 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/page-helper.po.ts
|
interface Page {
url: string;
id: string;
}
export abstract class PageHelper {
pages: Record<string, Page>;
/**
* Decorator to be used on Helper methods to restrict access to one particular URL. This shall
* help developers to prevent and highlight mistakes. It also reduces boilerplate code and by
* thus, increases readability.
*/
static restrictTo(page: string): Function {
return (target: any, propertyKey: string, descriptor: PropertyDescriptor) => {
const fn: Function = descriptor.value;
descriptor.value = function (...args: any) {
cy.location('hash').should((url) => {
expect(url).to.eq(
page,
`Method ${target.constructor.name}::${propertyKey} is supposed to be ` +
`run on path "${page}", but was run on URL "${url}"`
);
});
fn.apply(this, args);
};
};
}
/**
* Navigates to the given page or to index.
* Waits until the page component is loaded
*/
navigateTo(name: string = null) {
name = name || 'index';
const page = this.pages[name];
cy.visit(page.url);
cy.get(page.id);
}
/**
* Navigates back and waits for the hash to change
*/
navigateBack() {
cy.location('hash').then((hash) => {
cy.go('back');
cy.location('hash').should('not.be', hash);
});
}
/**
* Navigates to the edit page
*/
navigateEdit(name: string, select = true, breadcrumb = true) {
if (select) {
this.navigateTo();
this.getFirstTableCell(name).click();
}
cy.contains('Creating...').should('not.exist');
cy.contains('button', 'Edit').click();
if (breadcrumb) {
this.expectBreadcrumbText('Edit');
}
}
/**
* Checks the active breadcrumb value.
*/
expectBreadcrumbText(text: string) {
cy.get('.breadcrumb-item.active').should('have.text', text);
}
getTabs() {
return cy.get('.nav.nav-tabs a');
}
getTab(tabName: string) {
return cy.contains('.nav.nav-tabs a', tabName);
}
getTabText(index: number) {
return this.getTabs().its(index).text();
}
getTabsCount(): any {
return this.getTabs().its('length');
}
/**
* Helper method to navigate/click a tab inside the expanded table row.
* @param selector The selector of the expanded table row.
* @param name The name of the row which should expand.
* @param tabName Name of the tab to be navigated/clicked.
*/
clickTab(selector: string, name: string, tabName: string) {
this.getExpandCollapseElement(name).click();
cy.get(selector).within(() => {
this.getTab(tabName).click();
});
}
/**
* Helper method to select an option inside a select element.
* This method will also expect that the option was set.
* @param option The option text (not value) to be selected.
*/
selectOption(selectionName: string, option: string) {
cy.get(`select[name=${selectionName}]`).select(option);
return this.expectSelectOption(selectionName, option);
}
/**
* Helper method to expect a set option inside a select element.
* @param option The selected option text (not value) that is to
* be expected.
*/
expectSelectOption(selectionName: string, option: string) {
return cy.get(`select[name=${selectionName}] option:checked`).contains(option);
}
getLegends() {
return cy.get('legend');
}
getToast() {
return cy.get('.ngx-toastr');
}
/**
* Waits for the table to load its data
* Should be used in all methods that access the datatable
*/
private waitDataTableToLoad() {
cy.get('cd-table').should('exist');
cy.get('datatable-scroller, .empty-row');
}
getDataTables() {
this.waitDataTableToLoad();
return cy.get('cd-table .dataTables_wrapper');
}
private getTableCountSpan(spanType: 'selected' | 'found' | 'total') {
return cy.contains('.datatable-footer-inner .page-count span', spanType);
}
// Get 'selected', 'found', or 'total' row count of a table.
getTableCount(spanType: 'selected' | 'found' | 'total') {
this.waitDataTableToLoad();
return this.getTableCountSpan(spanType).then(($elem) => {
const text = $elem
.filter((_i, e) => e.innerText.includes(spanType))
.first()
.text();
return Number(text.match(/(\d+)\s+\w*/)[1]);
});
}
// Wait until selected', 'found', or 'total' row count of a table equal to a number.
expectTableCount(spanType: 'selected' | 'found' | 'total', count: number) {
this.waitDataTableToLoad();
this.getTableCountSpan(spanType).should(($elem) => {
const text = $elem.first().text();
expect(Number(text.match(/(\d+)\s+\w*/)[1])).to.equal(count);
});
}
getTableRow(content: string) {
this.waitDataTableToLoad();
this.searchTable(content);
return cy.contains('.datatable-body-row', content);
}
getTableRows() {
this.waitDataTableToLoad();
return cy.get('datatable-row-wrapper');
}
/**
* Returns the first table cell.
* Optionally, you can specify the content of the cell.
*/
getFirstTableCell(content?: string) {
this.waitDataTableToLoad();
if (content) {
this.searchTable(content);
return cy.contains('.datatable-body-cell-label', content);
} else {
return cy.get('.datatable-body-cell-label').first();
}
}
getTableCell(columnIndex: number, exactContent: string, partialMatch = false) {
this.waitDataTableToLoad();
this.clearTableSearchInput();
this.searchTable(exactContent);
if (partialMatch) {
return cy.contains(
`datatable-body-row datatable-body-cell:nth-child(${columnIndex})`,
exactContent
);
}
return cy.contains(
`datatable-body-row datatable-body-cell:nth-child(${columnIndex})`,
new RegExp(`^${exactContent}$`)
);
}
existTableCell(name: string, oughtToBePresent = true) {
const waitRule = oughtToBePresent ? 'be.visible' : 'not.exist';
this.getFirstTableCell(name).should(waitRule);
}
getExpandCollapseElement(content?: string) {
this.waitDataTableToLoad();
if (content) {
return cy.contains('.datatable-body-row', content).find('.tc_expand-collapse');
} else {
return cy.get('.tc_expand-collapse').first();
}
}
/**
* Gets column headers of table
*/
getDataTableHeaders(index = 0) {
this.waitDataTableToLoad();
return cy.get('.datatable-header').its(index).find('.datatable-header-cell');
}
/**
* Grabs striped tables
*/
getStatusTables() {
return cy.get('.table.table-striped');
}
filterTable(name: string, option: string) {
this.waitDataTableToLoad();
cy.get('.tc_filter_name > button').click();
cy.contains(`.tc_filter_name .dropdown-item`, name).click();
cy.get('.tc_filter_option > button').click();
cy.contains(`.tc_filter_option .dropdown-item`, option).click();
}
setPageSize(size: string) {
cy.get('cd-table .dataTables_paginate input').first().clear({ force: true }).type(size);
}
searchTable(text: string) {
this.waitDataTableToLoad();
this.setPageSize('10');
cy.get('[aria-label=search]').first().clear({ force: true }).type(text);
}
clearTableSearchInput() {
this.waitDataTableToLoad();
return cy.get('cd-table .search button').first().click();
}
// Click the action button
clickActionButton(action: string) {
cy.get('.table-actions button.dropdown-toggle').first().click(); // open submenu
cy.get(`button.${action}`).click(); // click on "action" menu item
}
/**
* This is a generic method to delete table rows.
* It will select the first row that contains the provided name and delete it.
* After that it will wait until the row is no longer displayed.
* @param name The string to search in table cells.
* @param columnIndex If provided, search string in columnIndex column.
*/
delete(name: string, columnIndex?: number, section?: string) {
// Selects row
const getRow = columnIndex
? this.getTableCell.bind(this, columnIndex, name, true)
: this.getFirstTableCell.bind(this);
getRow(name).click();
let action: string;
section === 'hosts' ? (action = 'remove') : (action = 'delete');
// Clicks on table Delete/Remove button
this.clickActionButton(action);
// Convert action to SentenceCase and Confirms deletion
const actionUpperCase = action.charAt(0).toUpperCase() + action.slice(1);
cy.get('cd-modal .custom-control-label').click();
cy.contains('cd-modal button', actionUpperCase).click();
// Wait for modal to close
cy.get('cd-modal').should('not.exist');
// Waits for item to be removed from table
getRow(name).should('not.exist');
}
}
| 8,772 | 27.3 | 97 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/a11y/dashboard.e2e-spec.ts
|
import { DashboardPageHelper } from '../ui/dashboard.po';
describe('Dashboard Main Page', { retries: 0 }, () => {
const dashboard = new DashboardPageHelper();
beforeEach(() => {
cy.login();
dashboard.navigateTo();
});
describe('Dashboard accessibility', () => {
it('should have no accessibility violations', () => {
cy.injectAxe();
cy.checkAccessibility(
{
exclude: [['.cd-navbar-main']]
},
{
rules: {
'page-has-heading-one': { enabled: false }
}
}
);
});
});
});
| 585 | 20.703704 | 57 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/a11y/navigation.e2e-spec.ts
|
import { NavigationPageHelper } from '../ui/navigation.po';
describe('Navigation accessibility', { retries: 0 }, () => {
const shared = new NavigationPageHelper();
beforeEach(() => {
cy.login();
shared.navigateTo();
});
it('top-nav should have no accessibility violations', () => {
cy.injectAxe();
cy.checkAccessibility('.cd-navbar-top');
});
it('sidebar should have no accessibility violations', () => {
cy.injectAxe();
cy.checkAccessibility('nav[id=sidebar]');
});
});
| 513 | 23.47619 | 63 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/block/images.e2e-spec.ts
|
import { PoolPageHelper } from '../pools/pools.po';
import { ImagesPageHelper } from './images.po';
describe('Images page', () => {
const pools = new PoolPageHelper();
const images = new ImagesPageHelper();
const poolName = 'e2e_images_pool';
before(() => {
cy.login();
// Need pool for image testing
pools.navigateTo('create');
pools.create(poolName, 8, 'rbd');
pools.existTableCell(poolName);
});
after(() => {
// Deletes images test pool
pools.navigateTo();
pools.delete(poolName);
pools.navigateTo();
pools.existTableCell(poolName, false);
});
beforeEach(() => {
cy.login();
images.navigateTo();
});
it('should open and show breadcrumb', () => {
images.expectBreadcrumbText('Images');
});
it('should show four tabs', () => {
images.getTabsCount().should('eq', 4);
});
it('should show text for all tabs', () => {
images.getTabText(0).should('eq', 'Images');
images.getTabText(1).should('eq', 'Namespaces');
images.getTabText(2).should('eq', 'Trash');
images.getTabText(3).should('eq', 'Overall Performance');
});
describe('create, edit & delete image test', () => {
const imageName = 'e2e_images#image';
const newImageName = 'e2e_images#image_new';
it('should create image', () => {
images.createImage(imageName, poolName, '1');
images.getFirstTableCell(imageName).should('exist');
});
it('should edit image', () => {
images.editImage(imageName, poolName, newImageName, '2');
images.getFirstTableCell(newImageName).should('exist');
});
it('should delete image', () => {
images.delete(newImageName);
});
});
describe('move to trash, restore and purge image tests', () => {
const imageName = 'e2e_trash#image';
const newImageName = 'e2e_newtrash#image';
before(() => {
cy.login();
// Need image for trash testing
images.createImage(imageName, poolName, '1');
images.getFirstTableCell(imageName).should('exist');
});
it('should move the image to the trash', () => {
images.moveToTrash(imageName);
images.getFirstTableCell(imageName).should('exist');
});
it('should restore image to images table', () => {
images.restoreImage(imageName, newImageName);
images.getFirstTableCell(newImageName).should('exist');
});
it('should purge trash in images trash tab', () => {
images.getFirstTableCell(newImageName).should('exist');
images.moveToTrash(newImageName);
images.purgeTrash(newImageName, poolName);
});
});
});
| 2,604 | 27.010753 | 66 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/block/images.po.ts
|
import { PageHelper } from '../page-helper.po';
export class ImagesPageHelper extends PageHelper {
pages = {
index: { url: '#/block/rbd', id: 'cd-rbd-list' },
create: { url: '#/block/rbd/create', id: 'cd-rbd-form' }
};
// Creates a block image and fills in the name, pool, and size fields.
// Then checks if the image is present in the Images table.
createImage(name: string, pool: string, size: string) {
this.navigateTo('create');
cy.get('#name').type(name); // Enter in image name
// Select image pool
cy.contains('Loading...').should('not.exist');
this.selectOption('pool', pool);
cy.get('#pool').should('have.class', 'ng-valid'); // check if selected
// Enter in the size of the image
cy.get('#size').type(size);
// Click the create button and wait for image to be made
cy.get('[data-cy=submitBtn]').click();
this.getFirstTableCell(name).should('exist');
}
editImage(name: string, pool: string, newName: string, newSize: string) {
this.navigateEdit(name);
// Wait until data is loaded
cy.get('#pool').should('contain.value', pool);
cy.get('#name').clear().type(newName);
cy.get('#size').clear().type(newSize); // click the size box and send new size
cy.get('[data-cy=submitBtn]').click();
this.getExpandCollapseElement(newName).click();
cy.get('.table.table-striped.table-bordered').contains('td', newSize);
}
// Selects RBD image and moves it to the trash,
// checks that it is present in the trash table
moveToTrash(name: string) {
// wait for image to be created
cy.get('.datatable-body').first().should('not.contain.text', '(Creating...)');
this.getFirstTableCell(name).click();
// click on the drop down and selects the move to trash option
cy.get('.table-actions button.dropdown-toggle').first().click();
cy.get('button.move-to-trash').click();
cy.get('[data-cy=submitBtn]').should('be.visible').click();
// Clicks trash tab
cy.contains('.nav-link', 'Trash').click();
this.getFirstTableCell(name).should('exist');
}
// Checks trash tab table for image and then restores it to the RBD Images table
// (could change name if new name is given)
restoreImage(name: string, newName?: string) {
// clicks on trash tab
cy.contains('.nav-link', 'Trash').click();
// wait for table to load
this.getFirstTableCell(name).click();
cy.contains('button', 'Restore').click();
// wait for pop-up to be visible (checks for title of pop-up)
cy.get('cd-modal #name').should('be.visible');
// If a new name for the image is passed, it changes the name of the image
if (newName !== undefined) {
// click name box and send new name
cy.get('cd-modal #name').clear().type(newName);
}
cy.get('[data-cy=submitBtn]').click();
// clicks images tab
cy.contains('.nav-link', 'Images').click();
this.getFirstTableCell(newName).should('exist');
}
// Enters trash tab and purges trash, thus emptying the trash table.
// Checks if Image is still in the table.
purgeTrash(name: string, pool?: string) {
// clicks trash tab
cy.contains('.nav-link', 'Trash').click();
cy.contains('button', 'Purge Trash').click();
// Check for visibility of modal container
cy.get('.modal-header').should('be.visible');
// If purgeing a specific pool, selects that pool if given
if (pool !== undefined) {
this.selectOption('poolName', pool);
cy.get('#poolName').should('have.class', 'ng-valid'); // check if pool is selected
}
cy.get('[data-cy=submitBtn]').click();
// Wait for image to delete and check it is not present
this.getFirstTableCell(name).should('not.exist');
}
}
| 3,747 | 32.765766 | 88 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/block/iscsi.e2e-spec.ts
|
import { IscsiPageHelper } from './iscsi.po';
describe('Iscsi Page', () => {
const iscsi = new IscsiPageHelper();
beforeEach(() => {
cy.login();
iscsi.navigateTo();
});
it('should open and show breadcrumb', () => {
iscsi.expectBreadcrumbText('Overview');
});
it('should check that tables are displayed and legends are correct', () => {
// Check tables are displayed
iscsi.getDataTables().its(0).should('be.visible');
iscsi.getDataTables().its(1).should('be.visible');
// Check that legends are correct
iscsi.getLegends().its(0).should('contain.text', 'Gateways');
iscsi.getLegends().its(1).should('contain.text', 'Images');
});
});
| 687 | 26.52 | 78 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/block/iscsi.po.ts
|
import { PageHelper } from '../page-helper.po';
export class IscsiPageHelper extends PageHelper {
pages = {
index: { url: '#/block/iscsi/overview', id: 'cd-iscsi' }
};
}
| 179 | 21.5 | 60 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/block/mirroring.e2e-spec.ts
|
import { PoolPageHelper } from '../pools/pools.po';
import { MirroringPageHelper } from './mirroring.po';
describe('Mirroring page', () => {
const pools = new PoolPageHelper();
const mirroring = new MirroringPageHelper();
beforeEach(() => {
cy.login();
mirroring.navigateTo();
});
it('should open and show breadcrumb', () => {
mirroring.expectBreadcrumbText('Mirroring');
});
it('should show three tabs', () => {
mirroring.getTabsCount().should('eq', 3);
});
it('should show text for all tabs', () => {
mirroring.getTabText(0).should('eq', 'Issues (0)');
mirroring.getTabText(1).should('eq', 'Syncing (0)');
mirroring.getTabText(2).should('eq', 'Ready (0)');
});
describe('rbd mirroring bootstrap', () => {
const poolName = 'rbd-mirror';
beforeEach(() => {
// login to the second ceph cluster
cy.ceph2Login();
cy.login();
pools.navigateTo('create');
pools.create(poolName, 8, 'rbd');
pools.navigateTo();
pools.existTableCell(poolName, true);
mirroring.navigateTo();
});
it('should generate and import the bootstrap token between clusters', () => {
const url: string = Cypress.env('CEPH2_URL');
mirroring.navigateTo();
mirroring.generateToken(poolName);
cy.get('@token').then((bootstrapToken) => {
// pass the token to the origin as an arg
const args = { name: poolName, bootstrapToken: String(bootstrapToken) };
// can't use any imports or functions inside the origin
// so writing the code to copy the token inside the origin manually
// rather than using a function call
// @ts-ignore
cy.origin(url, { args }, ({ name, bootstrapToken }) => {
// Create an rbd pool in the second cluster
// Login to the second cluster
// Somehow its not working with the cypress login function
cy.visit('#/pool/create').wait(100);
cy.get('[name=username]').type('admin');
cy.get('#password').type('admin');
cy.get('[type=submit]').click();
cy.get('input[name=name]').clear().type(name);
cy.get(`select[name=poolType]`).select('replicated');
cy.get(`select[name=poolType] option:checked`).contains('replicated');
cy.get('.float-start.me-2.select-menu-edit').click();
cy.get('.popover-body').should('be.visible');
// Choose rbd as the application label
cy.get('.select-menu-item-content').contains('rbd').click();
cy.get('cd-submit-button').click();
cy.get('cd-pool-list').should('exist');
cy.visit('#/block/mirroring').wait(1000);
cy.get('.table-actions button.dropdown-toggle').first().click();
cy.get('[aria-label="Import Bootstrap Token"]').click();
cy.get('cd-bootstrap-import-modal').within(() => {
cy.get(`label[for=${name}]`).click();
cy.get('textarea[id=token]').wait(100).type(bootstrapToken);
cy.get('button[type=submit]').click();
});
});
});
// login again since origin removes all the cookies
// sessions, localStorage items etc..
cy.login();
mirroring.navigateTo();
mirroring.checkPoolHealthStatus(poolName, 'OK');
});
});
describe('checks that edit mode functionality shows in the pools table', () => {
const poolName = 'mirroring_test';
beforeEach(() => {
pools.navigateTo('create'); // Need pool for mirroring testing
pools.create(poolName, 8, 'rbd');
pools.navigateTo();
pools.existTableCell(poolName, true);
});
it('tests editing mode for pools', () => {
mirroring.navigateTo();
mirroring.editMirror(poolName, 'Pool');
mirroring.getFirstTableCell('pool').should('be.visible');
mirroring.editMirror(poolName, 'Image');
mirroring.getFirstTableCell('image').should('be.visible');
mirroring.editMirror(poolName, 'Disabled');
mirroring.getFirstTableCell('disabled').should('be.visible');
});
afterEach(() => {
pools.navigateTo();
pools.delete(poolName);
});
});
});
| 4,180 | 34.432203 | 82 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/block/mirroring.po.ts
|
import { PageHelper } from '../page-helper.po';
const pages = {
index: { url: '#/block/mirroring', id: 'cd-mirroring' }
};
export class MirroringPageHelper extends PageHelper {
pages = pages;
poolsColumnIndex = {
name: 1,
health: 6
};
/**
* Goes to the mirroring page and edits a pool in the Pool table. Clicks on the
* pool and chooses an option (either pool, image, or disabled)
*/
@PageHelper.restrictTo(pages.index.url)
editMirror(name: string, option: string) {
// Clicks the pool in the table
this.getFirstTableCell(name).click();
// Clicks the Edit Mode button
cy.contains('button', 'Edit Mode').click();
// Clicks the drop down in the edit pop-up, then clicks the Update button
cy.get('.modal-content').should('be.visible');
this.selectOption('mirrorMode', option);
// Clicks update button and checks if the mode has been changed
cy.contains('button', 'Update').click();
cy.contains('.modal-dialog', 'Edit pool mirror mode').should('not.exist');
const val = option.toLowerCase(); // used since entries in table are lower case
this.getFirstTableCell(val).should('be.visible');
}
@PageHelper.restrictTo(pages.index.url)
generateToken(poolName: string) {
cy.get('[aria-label="Create Bootstrap Token"]').first().click();
cy.get('cd-bootstrap-create-modal').within(() => {
cy.get(`label[for=${poolName}]`).click();
cy.get('button[type=submit]').click();
cy.get('textarea[id=token]').wait(200).invoke('val').as('token');
cy.get('[aria-label="Back"]').click();
});
}
@PageHelper.restrictTo(pages.index.url)
checkPoolHealthStatus(poolName: string, status: string) {
cy.get('cd-mirroring-pools').within(() => {
this.getTableCell(this.poolsColumnIndex.name, poolName)
.parent()
.find(`datatable-body-cell:nth-child(${this.poolsColumnIndex.health}) .badge`)
.should(($ele) => {
const newLabels = $ele.toArray().map((v) => v.innerText);
expect(newLabels).to.include(status);
});
});
}
}
| 2,087 | 32.677419 | 86 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.e2e-spec.ts
|
import { ConfigurationPageHelper } from './configuration.po';
describe('Configuration page', () => {
const configuration = new ConfigurationPageHelper();
beforeEach(() => {
cy.login();
configuration.navigateTo();
});
describe('breadcrumb test', () => {
it('should open and show breadcrumb', () => {
configuration.expectBreadcrumbText('Configuration');
});
});
describe('fields check', () => {
beforeEach(() => {
configuration.getExpandCollapseElement().click();
});
it('should check that details table opens (w/o tab header)', () => {
configuration.getStatusTables().should('be.visible');
configuration.getTabs().should('not.exist');
});
});
describe('edit configuration test', () => {
const configName = 'client_cache_size';
beforeEach(() => {
configuration.clearTableSearchInput();
configuration.getTableCount('found').as('configFound');
});
after(() => {
configuration.configClear(configName);
});
it('should click and edit a configuration and results should appear in the table', () => {
configuration.edit(
configName,
['global', '1'],
['mon', '2'],
['mgr', '3'],
['osd', '4'],
['mds', '5'],
['client', '6']
);
});
it('should verify modified filter is applied properly', () => {
configuration.filterTable('Modified', 'no');
configuration.getTableCount('found').as('unmodifiedConfigs');
// Modified filter value to yes
configuration.filterTable('Modified', 'yes');
configuration.getTableCount('found').as('modifiedConfigs');
cy.get('@configFound').then((configFound) => {
cy.get('@unmodifiedConfigs').then((unmodifiedConfigs) => {
const modifiedConfigs = Number(configFound) - Number(unmodifiedConfigs);
configuration.getTableCount('found').should('eq', modifiedConfigs);
});
});
// Modified filter value to no
configuration.filterTable('Modified', 'no');
cy.get('@configFound').then((configFound) => {
cy.get('@modifiedConfigs').then((modifiedConfigs) => {
const unmodifiedConfigs = Number(configFound) - Number(modifiedConfigs);
configuration.getTableCount('found').should('eq', unmodifiedConfigs);
});
});
});
});
});
| 2,372 | 29.423077 | 94 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/configuration.po.ts
|
import { PageHelper } from '../page-helper.po';
export class ConfigurationPageHelper extends PageHelper {
pages = {
index: { url: '#/configuration', id: 'cd-configuration' }
};
/**
* Clears out all the values in a config to reset before and after testing
* Does not work for configs with checkbox only, possible future PR
*/
configClear(name: string) {
const valList = ['global', 'mon', 'mgr', 'osd', 'mds', 'client']; // Editable values
this.navigateEdit(name);
// Waits for the data to load
cy.contains('.card-header', `Edit ${name}`);
for (const i of valList) {
cy.get(`#${i}`).clear();
}
// Clicks save button and checks that values are not present for the selected config
cy.get('[data-cy=submitBtn]').click();
// Enter config setting name into filter box
this.searchTable(name);
// Expand row
this.getExpandCollapseElement(name).click();
// Checks for visibility of details tab
this.getStatusTables().should('be.visible');
for (const i of valList) {
// Waits until values are not present in the details table
this.getStatusTables().should('not.contain.text', i + ':');
}
}
/**
* Clicks the designated config, then inputs the values passed into the edit function.
* Then checks if the edit is reflected in the config table.
* Takes in name of config and a list of tuples of values the user wants edited,
* each tuple having the desired value along with the number tehey want for that value.
* Ex: [global, '2'] is the global value with an input of 2
*/
edit(name: string, ...values: [string, string][]) {
this.navigateEdit(name);
// Waits for data to load
cy.contains('.card-header', `Edit ${name}`);
values.forEach((valtuple) => {
// Finds desired value based off given list
cy.get(`#${valtuple[0]}`).type(valtuple[1]); // of values and inserts the given number for the value
});
// Clicks save button then waits until the desired config is visible, clicks it,
// then checks that each desired value appears with the desired number
cy.get('[data-cy=submitBtn]').click();
// Enter config setting name into filter box
this.searchTable(name);
// Checks for visibility of config in table
this.getExpandCollapseElement(name).should('be.visible').click();
// Clicks config
values.forEach((value) => {
// iterates through list of values and
// checks if the value appears in details with the correct number attatched
cy.contains('.table.table-striped.table-bordered', `${value[0]}\: ${value[1]}`);
});
}
}
| 2,639 | 33.736842 | 106 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/create-cluster.po.ts
|
import { PageHelper } from '../page-helper.po';
import { NotificationSidebarPageHelper } from '../ui/notification.po';
import { HostsPageHelper } from './hosts.po';
import { ServicesPageHelper } from './services.po';
const pages = {
index: { url: '#/expand-cluster', id: 'cd-create-cluster' }
};
export class CreateClusterWizardHelper extends PageHelper {
pages = pages;
createCluster() {
cy.get('cd-create-cluster').should('contain.text', 'Please expand your cluster first');
cy.get('[name=expand-cluster]').click();
cy.get('cd-wizard').should('exist');
}
doSkip() {
cy.get('[name=skip-cluster-creation]').click();
cy.contains('cd-modal button', 'Continue').click();
cy.get('cd-dashboard').should('exist');
const notification = new NotificationSidebarPageHelper();
notification.open();
notification.getNotifications().should('contain', 'Cluster expansion skipped by user');
}
}
export class CreateClusterHostPageHelper extends HostsPageHelper {
pages = {
index: { url: '#/expand-cluster', id: 'cd-wizard' },
add: { url: '', id: 'cd-host-form' }
};
columnIndex = {
hostname: 1,
labels: 2,
status: 3,
services: 0
};
}
export class CreateClusterServicePageHelper extends ServicesPageHelper {
pages = {
index: { url: '#/expand-cluster', id: 'cd-wizard' },
create: { url: '', id: 'cd-service-form' }
};
columnIndex = {
service_name: 1,
placement: 2,
running: 0,
size: 0,
last_refresh: 0
};
}
| 1,512 | 25.54386 | 91 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/crush-map.e2e-spec.ts
|
import { CrushMapPageHelper } from './crush-map.po';
describe('CRUSH map page', () => {
const crushmap = new CrushMapPageHelper();
beforeEach(() => {
cy.login();
crushmap.navigateTo();
});
describe('breadcrumb test', () => {
it('should open and show breadcrumb', () => {
crushmap.expectBreadcrumbText('CRUSH map');
});
});
describe('fields check', () => {
it('should check that title & table appears', () => {
// Check that title (CRUSH map viewer) appears
crushmap.getPageTitle().should('equal', 'CRUSH map viewer');
// Check that title appears once OSD is clicked
crushmap.getCrushNode(0).click();
crushmap
.getLegends()
.invoke('text')
.then((legend) => {
crushmap.getCrushNode(0).should('have.text', legend);
});
// Check that table appears once OSD is clicked
crushmap.getDataTables().should('be.visible');
});
});
});
| 959 | 24.945946 | 66 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/crush-map.po.ts
|
import { PageHelper } from '../page-helper.po';
export class CrushMapPageHelper extends PageHelper {
pages = { index: { url: '#/crush-map', id: 'cd-crushmap' } };
getPageTitle() {
return cy.get('cd-crushmap .card-header').text();
}
getCrushNode(idx: number) {
return cy.get('.node-name.type-osd').eq(idx);
}
}
| 331 | 22.714286 | 63 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/hosts.e2e-spec.ts
|
import { HostsPageHelper } from './hosts.po';
describe('Hosts page', () => {
const hosts = new HostsPageHelper();
beforeEach(() => {
cy.login();
hosts.navigateTo();
});
describe('breadcrumb and tab tests', () => {
it('should open and show breadcrumb', () => {
hosts.expectBreadcrumbText('Hosts');
});
it('should show two tabs', () => {
hosts.getTabsCount().should('eq', 2);
});
it('should show hosts list tab at first', () => {
hosts.getTabText(0).should('eq', 'Hosts List');
});
it('should show overall performance as a second tab', () => {
hosts.getTabText(1).should('eq', 'Overall Performance');
});
});
describe('services link test', () => {
it('should check at least one host is present', () => {
hosts.check_for_host();
});
});
});
| 837 | 22.942857 | 65 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/hosts.po.ts
|
import { PageHelper } from '../page-helper.po';
const pages = {
index: { url: '#/hosts', id: 'cd-hosts' },
add: { url: '#/hosts/(modal:add)', id: 'cd-host-form' }
};
export class HostsPageHelper extends PageHelper {
pages = pages;
columnIndex = {
hostname: 2,
services: 3,
labels: 4,
status: 5
};
check_for_host() {
this.getTableCount('total').should('not.be.eq', 0);
}
add(hostname: string, exist?: boolean, maintenance?: boolean, labels: string[] = []) {
cy.get(`${this.pages.add.id}`).within(() => {
cy.get('#hostname').type(hostname);
if (maintenance) {
cy.get('label[for=maintenance]').click();
}
if (exist) {
cy.get('#hostname').should('have.class', 'ng-invalid');
}
});
if (labels.length) {
this.selectPredefinedLabels(labels);
}
cy.get('cd-submit-button').click();
// back to host list
cy.get(`${this.pages.index.id}`);
}
selectPredefinedLabels(labels: string[]) {
cy.get('a[data-testid=select-menu-edit]').click();
for (const label of labels) {
cy.get('.popover-body div.select-menu-item-content').contains(label).click();
}
}
checkExist(hostname: string, exist: boolean) {
this.getTableCell(this.columnIndex.hostname, hostname, true)
.parent()
.find(`datatable-body-cell:nth-child(${this.columnIndex.hostname}) span`)
.should(($elements) => {
const hosts = $elements.toArray().map((v) => v.innerText);
if (exist) {
expect(hosts).to.include(hostname);
} else {
expect(hosts).to.not.include(hostname);
}
});
}
remove(hostname: string) {
super.delete(hostname, this.columnIndex.hostname, 'hosts');
}
// Add or remove labels on a host, then verify labels in the table
editLabels(hostname: string, labels: string[], add: boolean) {
this.getTableCell(this.columnIndex.hostname, hostname, true).click();
this.clickActionButton('edit');
// add or remove label badges
if (add) {
cy.get('cd-modal').find('.select-menu-edit').click();
for (const label of labels) {
cy.contains('cd-modal .badge', new RegExp(`^${label}$`)).should('not.exist');
cy.get('.popover-body input').type(`${label}{enter}`);
}
} else {
for (const label of labels) {
cy.contains('cd-modal .badge', new RegExp(`^${label}$`))
.find('.badge-remove')
.click();
}
}
cy.get('cd-modal cd-submit-button').click();
this.checkLabelExists(hostname, labels, add);
}
checkLabelExists(hostname: string, labels: string[], add: boolean) {
// Verify labels are added or removed from Labels column
// First find row with hostname, then find labels in the row
this.getTableCell(this.columnIndex.hostname, hostname, true)
.click()
.parent()
.find(`datatable-body-cell:nth-child(${this.columnIndex.labels}) .badge`)
.should(($ele) => {
const newLabels = $ele.toArray().map((v) => v.innerText);
for (const label of labels) {
if (add) {
expect(newLabels).to.include(label);
} else {
expect(newLabels).to.not.include(label);
}
}
});
}
@PageHelper.restrictTo(pages.index.url)
maintenance(hostname: string, exit = false, force = false) {
this.clearTableSearchInput();
if (force) {
this.getTableCell(this.columnIndex.hostname, hostname, true).click();
this.clickActionButton('enter-maintenance');
cy.get('cd-modal').within(() => {
cy.contains('button', 'Continue').click();
});
this.getTableCell(this.columnIndex.hostname, hostname, true)
.parent()
.find(`datatable-body-cell:nth-child(${this.columnIndex.status}) .badge`)
.should(($ele) => {
const status = $ele.toArray().map((v) => v.innerText);
expect(status).to.include('maintenance');
});
}
if (exit) {
this.getTableCell(this.columnIndex.hostname, hostname, true)
.click()
.parent()
.find(`datatable-body-cell:nth-child(${this.columnIndex.status})`)
.then(($ele) => {
const status = $ele.toArray().map((v) => v.innerText);
if (status[0].includes('maintenance')) {
this.clickActionButton('exit-maintenance');
}
});
this.getTableCell(this.columnIndex.hostname, hostname, true)
.parent()
.find(`datatable-body-cell:nth-child(${this.columnIndex.status})`)
.should(($ele) => {
const status = $ele.toArray().map((v) => v.innerText);
expect(status).to.not.include('maintenance');
});
} else {
this.getTableCell(this.columnIndex.hostname, hostname, true).click();
this.clickActionButton('enter-maintenance');
this.getTableCell(this.columnIndex.hostname, hostname, true)
.parent()
.find(`datatable-body-cell:nth-child(${this.columnIndex.status}) .badge`)
.should(($ele) => {
const status = $ele.toArray().map((v) => v.innerText);
expect(status).to.include('maintenance');
});
}
}
@PageHelper.restrictTo(pages.index.url)
drain(hostname: string) {
this.getTableCell(this.columnIndex.hostname, hostname, true).click();
this.clickActionButton('start-drain');
this.checkLabelExists(hostname, ['_no_schedule'], true);
this.clickTab('cd-host-details', hostname, 'Daemons');
cy.get('cd-host-details').within(() => {
cy.wait(20000);
this.expectTableCount('total', 0);
});
}
checkServiceInstancesExist(hostname: string, instances: string[]) {
this.getTableCell(this.columnIndex.hostname, hostname, true)
.parent()
.find(`datatable-body-cell:nth-child(${this.columnIndex.services}) .badge`)
.should(($ele) => {
const serviceInstances = $ele.toArray().map((v) => v.innerText);
for (const instance of instances) {
expect(serviceInstances).to.include(instance);
}
});
}
}
| 6,081 | 31.698925 | 88 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/inventory.po.ts
|
import { PageHelper } from '../page-helper.po';
const pages = {
index: { url: '#/inventory', id: 'cd-inventory' }
};
export class InventoryPageHelper extends PageHelper {
pages = pages;
identify() {
// Nothing we can do, just verify the form is there
this.getFirstTableCell().click();
cy.contains('cd-table-actions button', 'Identify').click();
cy.get('cd-modal').within(() => {
cy.get('#duration').select('15 minutes');
cy.get('#duration').select('10 minutes');
cy.get('cd-back-button').click();
});
cy.get('cd-modal').should('not.exist');
cy.get(`${this.pages.index.id}`);
}
}
| 636 | 26.695652 | 63 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/logs.e2e-spec.ts
|
import { PoolPageHelper } from '../pools/pools.po';
import { LogsPageHelper } from './logs.po';
describe('Logs page', () => {
const logs = new LogsPageHelper();
const pools = new PoolPageHelper();
const poolname = 'e2e_logs_test_pool';
const today = new Date();
let hour = today.getHours();
if (hour > 12) {
hour = hour - 12;
}
const minute = today.getMinutes();
beforeEach(() => {
cy.login();
});
describe('breadcrumb and tab tests', () => {
beforeEach(() => {
logs.navigateTo();
});
it('should open and show breadcrumb', () => {
logs.expectBreadcrumbText('Logs');
});
it('should show three tabs', () => {
logs.getTabsCount().should('eq', 3);
});
it('should show cluster logs tab at first', () => {
logs.getTabText(0).should('eq', 'Cluster Logs');
});
it('should show audit logs as a second tab', () => {
logs.getTabText(1).should('eq', 'Audit Logs');
});
it('should show daemon logs as a third tab', () => {
logs.getTabText(2).should('eq', 'Daemon Logs');
});
});
describe('audit logs respond to pool creation and deletion test', () => {
it('should create pool and check audit logs reacted', () => {
pools.navigateTo('create');
pools.create(poolname, 8);
pools.navigateTo();
pools.existTableCell(poolname, true);
logs.checkAuditForPoolFunction(poolname, 'create', hour, minute);
});
it('should delete pool and check audit logs reacted', () => {
pools.navigateTo();
pools.delete(poolname);
logs.checkAuditForPoolFunction(poolname, 'delete', hour, minute);
});
});
});
| 1,664 | 25.854839 | 75 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/logs.po.ts
|
import { PageHelper } from '../page-helper.po';
export class LogsPageHelper extends PageHelper {
pages = {
index: { url: '#/logs', id: 'cd-logs' }
};
checkAuditForPoolFunction(poolname: string, poolfunction: string, hour: number, minute: number) {
this.navigateTo();
// sometimes the modal from deleting pool is still present at this point.
// This wait makes sure it isn't
cy.contains('.modal-dialog', 'Delete Pool').should('not.exist');
// go to audit logs tab
cy.contains('.nav-link', 'Audit Logs').click();
// Enter an earliest time so that no old messages with the same pool name show up
cy.get('.ngb-tp-input').its(0).clear();
if (hour < 10) {
cy.get('.ngb-tp-input').its(0).type('0');
}
cy.get('.ngb-tp-input').its(0).type(`${hour}`);
cy.get('.ngb-tp-input').its(1).clear();
if (minute < 10) {
cy.get('.ngb-tp-input').its(1).type('0');
}
cy.get('.ngb-tp-input').its(1).type(`${minute}`);
// Enter the pool name into the filter box
cy.get('input.form-control.ng-valid').first().clear().type(poolname);
cy.get('.tab-pane.active')
.get('.card-body')
.get('.message')
.should('contain.text', poolname)
.and('contain.text', `pool ${poolfunction}`);
}
checkAuditForConfigChange(configname: string, setting: string, hour: number, minute: number) {
this.navigateTo();
// go to audit logs tab
cy.contains('.nav-link', 'Audit Logs').click();
// Enter an earliest time so that no old messages with the same config name show up
cy.get('.ngb-tp-input').its(0).clear();
if (hour < 10) {
cy.get('.ngb-tp-input').its(0).type('0');
}
cy.get('.ngb-tp-input').its(0).type(`${hour}`);
cy.get('.ngb-tp-input').its(1).clear();
if (minute < 10) {
cy.get('.ngb-tp-input').its(1).type('0');
}
cy.get('.ngb-tp-input').its(1).type(`${minute}`);
// Enter the config name into the filter box
cy.get('input.form-control.ng-valid').first().clear().type(configname);
cy.get('.tab-pane.active')
.get('.card-body')
.get('.message')
.should('contain.text', configname)
.and('contain.text', setting);
}
}
| 2,211 | 30.15493 | 99 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/mgr-modules.e2e-spec.ts
|
import { Input, ManagerModulesPageHelper } from './mgr-modules.po';
describe('Manager modules page', () => {
const mgrmodules = new ManagerModulesPageHelper();
beforeEach(() => {
cy.login();
mgrmodules.navigateTo();
});
describe('breadcrumb test', () => {
it('should open and show breadcrumb', () => {
mgrmodules.expectBreadcrumbText('Manager Modules');
});
});
describe('verifies editing functionality for manager modules', () => {
it('should test editing on balancer module', () => {
const balancerArr: Input[] = [
{
id: 'crush_compat_max_iterations',
newValue: '123',
oldValue: '25'
}
];
mgrmodules.editMgrModule('balancer', balancerArr);
});
it('should test editing on dashboard module', () => {
const dashboardArr: Input[] = [
{
id: 'GRAFANA_API_PASSWORD',
newValue: 'rafa',
oldValue: ''
}
];
mgrmodules.editMgrModule('dashboard', dashboardArr);
});
it('should test editing on devicehealth module', () => {
const devHealthArray: Input[] = [
{
id: 'mark_out_threshold',
newValue: '1987',
oldValue: '2419200'
},
{
id: 'pool_name',
newValue: 'sox',
oldValue: '.mgr'
},
{
id: 'retention_period',
newValue: '1999',
oldValue: '15552000'
},
{
id: 'scrape_frequency',
newValue: '2020',
oldValue: '86400'
},
{
id: 'sleep_interval',
newValue: '456',
oldValue: '600'
},
{
id: 'warn_threshold',
newValue: '567',
oldValue: '7257600'
}
];
mgrmodules.editMgrModule('devicehealth', devHealthArray);
});
});
});
| 1,889 | 23.230769 | 72 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/mgr-modules.po.ts
|
import { PageHelper } from '../page-helper.po';
export class Input {
id: string;
oldValue: string;
newValue: string;
}
export class ManagerModulesPageHelper extends PageHelper {
pages = { index: { url: '#/mgr-modules', id: 'cd-mgr-module-list' } };
/**
* Selects the Manager Module and then fills in the desired fields.
*/
editMgrModule(name: string, inputs: Input[]) {
this.navigateEdit(name);
for (const input of inputs) {
// Clears fields and adds edits
cy.get(`#${input.id}`).clear().type(input.newValue);
}
cy.contains('button', 'Update').click();
// Checks if edits appear
this.getExpandCollapseElement(name).should('be.visible').click();
for (const input of inputs) {
cy.get('.datatable-body').last().contains(input.newValue);
}
// Clear mgr module of all edits made to it
this.navigateEdit(name);
// Clears the editable fields
for (const input of inputs) {
if (input.oldValue) {
const id = `#${input.id}`;
cy.get(id).clear();
if (input.oldValue) {
cy.get(id).type(input.oldValue);
}
}
}
// Checks that clearing represents in details tab of module
cy.contains('button', 'Update').click();
this.getExpandCollapseElement(name).should('be.visible').click();
for (const input of inputs) {
if (input.oldValue) {
cy.get('.datatable-body')
.eq(1)
.should('contain', input.id)
.and('not.contain', input.newValue);
}
}
}
}
| 1,544 | 25.637931 | 72 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/monitors.e2e-spec.ts
|
import { MonitorsPageHelper } from './monitors.po';
describe('Monitors page', () => {
const monitors = new MonitorsPageHelper();
beforeEach(() => {
cy.login();
monitors.navigateTo();
});
describe('breadcrumb test', () => {
it('should open and show breadcrumb', () => {
monitors.expectBreadcrumbText('Monitors');
});
});
describe('fields check', () => {
it('should check status table is present', () => {
// check for table header 'Status'
monitors.getLegends().its(0).should('have.text', 'Status');
// check for fields in table
monitors
.getStatusTables()
.should('contain.text', 'Cluster ID')
.and('contain.text', 'monmap modified')
.and('contain.text', 'monmap epoch')
.and('contain.text', 'quorum con')
.and('contain.text', 'quorum mon')
.and('contain.text', 'required con')
.and('contain.text', 'required mon');
});
it('should check In Quorum and Not In Quorum tables are present', () => {
// check for there to be two tables
monitors.getDataTables().should('have.length', 2);
// check for table header 'In Quorum'
monitors.getLegends().its(1).should('have.text', 'In Quorum');
// check for table header 'Not In Quorum'
monitors.getLegends().its(2).should('have.text', 'Not In Quorum');
// verify correct columns on In Quorum table
monitors.getDataTableHeaders(0).contains('Name');
monitors.getDataTableHeaders(0).contains('Rank');
monitors.getDataTableHeaders(0).contains('Public Address');
monitors.getDataTableHeaders(0).contains('Open Sessions');
// verify correct columns on Not In Quorum table
monitors.getDataTableHeaders(1).contains('Name');
monitors.getDataTableHeaders(1).contains('Rank');
monitors.getDataTableHeaders(1).contains('Public Address');
});
});
});
| 1,921 | 30 | 77 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/monitors.po.ts
|
import { PageHelper } from '../page-helper.po';
export class MonitorsPageHelper extends PageHelper {
pages = {
index: { url: '#/monitor', id: 'cd-monitor' }
};
}
| 171 | 20.5 | 52 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/osds.e2e-spec.ts
|
import { OSDsPageHelper } from './osds.po';
describe('OSDs page', () => {
const osds = new OSDsPageHelper();
beforeEach(() => {
cy.login();
osds.navigateTo();
});
describe('breadcrumb and tab tests', () => {
it('should open and show breadcrumb', () => {
osds.expectBreadcrumbText('OSDs');
});
it('should show two tabs', () => {
osds.getTabsCount().should('eq', 2);
osds.getTabText(0).should('eq', 'OSDs List');
osds.getTabText(1).should('eq', 'Overall Performance');
});
});
describe('check existence of fields on OSD page', () => {
it('should check that number of rows and count in footer match', () => {
osds.getTableCount('total').then((text) => {
osds.getTableRows().its('length').should('equal', text);
});
});
it('should verify that buttons exist', () => {
cy.contains('button', 'Create');
cy.contains('button', 'Cluster-wide configuration');
});
describe('by selecting one row in OSDs List', () => {
beforeEach(() => {
osds.getExpandCollapseElement().click();
});
it('should show the correct text for the tab labels', () => {
cy.get('#tabset-osd-details > a').then(($tabs) => {
const tabHeadings = $tabs.map((_i, e) => e.textContent).get();
expect(tabHeadings).to.eql([
'Devices',
'Attributes (OSD map)',
'Metadata',
'Device health',
'Performance counter',
'Performance Details'
]);
});
});
});
});
});
| 1,587 | 26.859649 | 76 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/osds.po.ts
|
import { PageHelper } from '../page-helper.po';
const pages = {
index: { url: '#/osd', id: 'cd-osd-list' },
create: { url: '#/osd/create', id: 'cd-osd-form' }
};
export class OSDsPageHelper extends PageHelper {
pages = pages;
columnIndex = {
id: 3,
status: 5
};
create(deviceType: 'hdd' | 'ssd', hostname?: string, expandCluster = false) {
cy.get('[aria-label="toggle advanced mode"]').click();
// Click Primary devices Add button
cy.get('cd-osd-devices-selection-groups[name="Primary"]').as('primaryGroups');
cy.get('@primaryGroups').find('button').click();
// Select all devices with `deviceType`
cy.get('cd-osd-devices-selection-modal').within(() => {
cy.get('.modal-footer .tc_submitButton').as('addButton').should('be.disabled');
this.filterTable('Type', deviceType);
if (hostname) {
this.filterTable('Hostname', hostname);
}
if (expandCluster) {
this.getTableCount('total').should('be.gte', 1);
}
cy.get('@addButton').click();
});
if (!expandCluster) {
cy.get('@primaryGroups').within(() => {
this.getTableCount('total').as('newOSDCount');
});
cy.get(`${pages.create.id} .card-footer .tc_submitButton`).click();
cy.get(`cd-osd-creation-preview-modal .modal-footer .tc_submitButton`).click();
}
}
@PageHelper.restrictTo(pages.index.url)
checkStatus(id: number, status: string[]) {
this.searchTable(`id:${id}`);
this.expectTableCount('found', 1);
cy.get(`datatable-body-cell:nth-child(${this.columnIndex.status}) .badge`).should(($ele) => {
const allStatus = $ele.toArray().map((v) => v.innerText);
for (const s of status) {
expect(allStatus).to.include(s);
}
});
}
@PageHelper.restrictTo(pages.index.url)
ensureNoOsd(id: number) {
this.searchTable(`id:${id}`);
this.expectTableCount('found', 0);
this.clearTableSearchInput();
}
@PageHelper.restrictTo(pages.index.url)
deleteByIDs(osdIds: number[], replace?: boolean) {
this.getTableRows().each(($el) => {
const rowOSD = Number(
$el.find('datatable-body-cell .datatable-body-cell-label').get(this.columnIndex.id - 1)
.textContent
);
if (osdIds.includes(rowOSD)) {
cy.wrap($el).click();
}
});
this.clickActionButton('delete');
if (replace) {
cy.get('cd-modal label[for="preserve"]').click();
}
cy.get('cd-modal label[for="confirmation"]').click();
cy.contains('cd-modal button', 'Delete').click();
cy.get('cd-modal').should('not.exist');
}
}
| 2,614 | 29.764706 | 97 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/services.po.ts
|
import { PageHelper } from '../page-helper.po';
const pages = {
index: { url: '#/services', id: 'cd-services' },
create: { url: '#/services/(modal:create)', id: 'cd-service-form' }
};
export class ServicesPageHelper extends PageHelper {
pages = pages;
columnIndex = {
service_name: 2,
placement: 3,
running: 4,
size: 5,
last_refresh: 6
};
serviceDetailColumnIndex = {
daemonName: 2,
status: 4
};
check_for_service() {
this.getTableCount('total').should('not.be.eq', 0);
}
private selectServiceType(serviceType: string) {
return this.selectOption('service_type', serviceType);
}
clickServiceTab(serviceName: string, tabName: string) {
this.getExpandCollapseElement(serviceName).click();
cy.get('cd-service-details').within(() => {
this.getTab(tabName).click();
});
}
addService(
serviceType: string,
exist?: boolean,
count = 1,
snmpVersion?: string,
snmpPrivProtocol?: boolean,
unmanaged = false
) {
cy.get(`${this.pages.create.id}`).within(() => {
this.selectServiceType(serviceType);
switch (serviceType) {
case 'rgw':
cy.get('#service_id').type('foo');
unmanaged ? cy.get('label[for=unmanaged]').click() : cy.get('#count').type(String(count));
break;
case 'ingress':
if (unmanaged) {
cy.get('label[for=unmanaged]').click();
}
this.selectOption('backend_service', 'rgw.foo');
cy.get('#service_id').should('have.value', 'rgw.foo');
cy.get('#virtual_ip').type('192.168.100.1/24');
cy.get('#frontend_port').type('8081');
cy.get('#monitor_port').type('8082');
break;
case 'nfs':
cy.get('#service_id').type('testnfs');
unmanaged ? cy.get('label[for=unmanaged]').click() : cy.get('#count').type(String(count));
break;
case 'snmp-gateway':
this.selectOption('snmp_version', snmpVersion);
cy.get('#snmp_destination').type('192.168.0.1:8443');
if (snmpVersion === 'V2c') {
cy.get('#snmp_community').type('public');
} else {
cy.get('#engine_id').type('800C53F00000');
this.selectOption('auth_protocol', 'SHA');
if (snmpPrivProtocol) {
this.selectOption('privacy_protocol', 'DES');
cy.get('#snmp_v3_priv_password').type('testencrypt');
}
// Credentials
cy.get('#snmp_v3_auth_username').type('test');
cy.get('#snmp_v3_auth_password').type('testpass');
}
break;
default:
cy.get('#service_id').type('test');
unmanaged ? cy.get('label[for=unmanaged]').click() : cy.get('#count').type(String(count));
break;
}
if (serviceType === 'snmp-gateway') {
cy.get('cd-submit-button').dblclick();
} else {
cy.get('cd-submit-button').click();
}
});
if (exist) {
cy.get('#service_id').should('have.class', 'ng-invalid');
} else {
// back to service list
cy.get(`${this.pages.index.id}`);
}
}
editService(name: string, daemonCount: string) {
this.navigateEdit(name, true, false);
cy.get(`${this.pages.create.id}`).within(() => {
cy.get('#service_type').should('be.disabled');
cy.get('#service_id').should('be.disabled');
cy.get('#count').clear().type(daemonCount);
cy.get('cd-submit-button').click();
});
}
checkServiceStatus(daemon: string, expectedStatus = 'running') {
let daemonNameIndex = this.serviceDetailColumnIndex.daemonName;
let statusIndex = this.serviceDetailColumnIndex.status;
// since hostname row is hidden from the hosts details table,
// we'll need to manually override the indexes when this check is being
// done for the daemons in host details page. So we'll get the url and
// verify if the current page is not the services index page
cy.url().then((url) => {
if (!url.includes(pages.index.url)) {
daemonNameIndex = 1;
statusIndex = 3;
}
cy.get('cd-service-daemon-list').within(() => {
this.getTableCell(daemonNameIndex, daemon, true)
.parent()
.find(`datatable-body-cell:nth-child(${statusIndex}) .badge`)
.should(($ele) => {
const status = $ele.toArray().map((v) => v.innerText);
expect(status).to.include(expectedStatus);
});
});
});
}
expectPlacementCount(serviceName: string, expectedCount: string) {
this.getTableCell(this.columnIndex.service_name, serviceName)
.parent()
.find(`datatable-body-cell:nth-child(${this.columnIndex.placement})`)
.should(($ele) => {
const running = $ele.text().split(';');
expect(running).to.include(`count:${expectedCount}`);
});
}
checkExist(serviceName: string, exist: boolean) {
this.getTableCell(this.columnIndex.service_name, serviceName).should(($elements) => {
const services = $elements.map((_, el) => el.textContent).get();
if (exist) {
expect(services).to.include(serviceName);
} else {
expect(services).to.not.include(serviceName);
}
});
}
isUnmanaged(serviceName: string, unmanaged: boolean) {
this.getTableCell(this.columnIndex.service_name, serviceName)
.parent()
.find(`datatable-body-cell:nth-child(${this.columnIndex.placement})`)
.should(($ele) => {
const placement = $ele.text().split(';');
unmanaged
? expect(placement).to.include('unmanaged')
: expect(placement).to.not.include('unmanaged');
});
}
deleteService(serviceName: string) {
const getRow = this.getTableCell.bind(this, this.columnIndex.service_name);
getRow(serviceName).click();
// Clicks on table Delete button
this.clickActionButton('delete');
// Confirms deletion
cy.get('cd-modal .custom-control-label').click();
cy.contains('cd-modal button', 'Delete').click();
// Wait for modal to close
cy.get('cd-modal').should('not.exist');
this.checkExist(serviceName, false);
}
daemonAction(daemon: string, action: string) {
cy.get('cd-service-daemon-list').within(() => {
this.getTableRow(daemon).click();
this.clickActionButton(action);
});
}
}
| 6,410 | 30.895522 | 100 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/users.e2e-spec.ts
|
import { UsersPageHelper } from './users.po';
describe('Cluster Ceph Users', () => {
const users = new UsersPageHelper();
beforeEach(() => {
cy.login();
users.navigateTo();
});
describe('breadcrumb and tab tests', () => {
it('should open and show breadcrumb', () => {
users.expectBreadcrumbText('Ceph Users');
});
});
describe('Cluster users table', () => {
const entityName = 'client.test';
const entity = 'mgr';
const caps = 'allow r';
it('should verify the table is not empty', () => {
users.checkForUsers();
});
it('should verify the keys are hidden', () => {
users.verifyKeysAreHidden();
});
it('should create a new user', () => {
users.navigateTo('create');
users.create(entityName, entity, caps);
users.existTableCell(entityName, true);
});
it('should edit a user', () => {
const newCaps = 'allow *';
users.edit(entityName, 'allow *');
users.existTableCell(entityName, true);
users.checkCaps(entityName, [`${entity}: ${newCaps}`]);
});
it('should delete a user', () => {
users.delete(entityName);
});
});
});
| 1,172 | 23.957447 | 61 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/cluster/users.po.ts
|
import { PageHelper } from '../page-helper.po';
const pages = {
index: { url: '#/ceph-users', id: 'cd-crud-table' },
create: { url: '#/cluster/user/create', id: 'cd-crud-form' }
};
export class UsersPageHelper extends PageHelper {
pages = pages;
columnIndex = {
entity: 2,
capabilities: 3,
key: 4
};
checkForUsers() {
this.getTableCount('total').should('not.be.eq', 0);
}
verifyKeysAreHidden() {
this.getTableCell(this.columnIndex.entity, 'osd.0')
.parent()
.find(`datatable-body-cell:nth-child(${this.columnIndex.key}) span`)
.should(($ele) => {
const serviceInstances = $ele.toArray().map((v) => v.innerText);
expect(serviceInstances).not.contains(/^[a-z0-9]+$/i);
});
}
@PageHelper.restrictTo(pages.create.url)
create(entityName: string, entityType: string, caps: string) {
cy.get('#formly_2_string_user_entity_0').type(entityName);
cy.get('#formly_5_string_entity_0').type(entityType);
cy.get('#formly_5_string_cap_1').type(caps);
cy.get("[aria-label='Create User']").should('exist').click();
cy.get('cd-crud-table').should('exist');
}
edit(name: string, newCaps: string) {
this.navigateEdit(name);
cy.get('#formly_5_string_cap_1').clear().type(newCaps);
cy.get("[aria-label='Edit User']").should('exist').click();
cy.get('cd-crud-table').should('exist');
}
checkCaps(entityName: string, capabilities: string[]) {
this.getTableCell(this.columnIndex.entity, entityName)
.click()
.parent()
.find(`datatable-body-cell:nth-child(${this.columnIndex.capabilities}) .badge`)
.should(($ele) => {
const newCaps = $ele.toArray().map((v) => v.innerText);
for (const cap of capabilities) {
expect(newCaps).to.include(cap);
}
});
}
}
| 1,829 | 29.5 | 85 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/01-global.feature.po.ts
|
import { And, Given, Then, When } from 'cypress-cucumber-preprocessor/steps';
import { UrlsCollection } from './urls.po';
const urlsCollection = new UrlsCollection();
Given('I am logged in', () => {
cy.login();
});
Given('I am on the {string} page', (page: string) => {
cy.visit(urlsCollection.pages[page].url);
cy.get(urlsCollection.pages[page].id).should('exist');
});
Then('I should be on the {string} page', (page: string) => {
cy.get(urlsCollection.pages[page].id).should('exist');
});
And('I should see a button to {string}', (button: string) => {
cy.get(`[aria-label="${button}"]`).should('be.visible');
});
When('I click on {string} button', (button: string) => {
cy.get(`[aria-label="${button}"]`).first().click();
});
// When you are clicking on an action in the table actions dropdown button
When('I click on {string} button from the table actions', (button: string) => {
cy.get('.table-actions button.dropdown-toggle').first().click();
cy.get(`[aria-label="${button}"]`).first().click();
});
And('select options {string}', (labels: string) => {
if (labels) {
cy.get('a[data-testid=select-menu-edit]').click();
for (const label of labels.split(', ')) {
cy.get('.popover-body div.select-menu-item-content').contains(label).click();
}
}
});
And('{string} option {string}', (action: string, labels: string) => {
if (labels) {
if (action === 'add') {
cy.get('cd-modal').find('.select-menu-edit').click();
for (const label of labels.split(', ')) {
cy.get('.popover-body input').type(`${label}{enter}`);
}
} else {
for (const label of labels.split(', ')) {
cy.contains('cd-modal .badge', new RegExp(`^${label}$`))
.find('.badge-remove')
.click();
}
}
}
});
/**
* Fills in the given field using the value provided
* @param field ID of the field that needs to be filled out.
* @param value Value that should be filled in the field.
*/
And('enter {string} {string}', (field: string, value: string) => {
cy.get('cd-modal').within(() => {
cy.get(`input[id=${field}]`).type(value);
});
});
And('I click on submit button', () => {
cy.get('[data-cy=submitBtn]').click();
});
/**
* Selects any row on the datatable if it matches the given name
*/
When('I select a row {string}', (row: string) => {
cy.get('cd-table .search input').first().clear().type(row);
cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).click();
});
Then('I should see the modal', () => {
cy.get('cd-modal').should('exist');
});
Then('I should not see the modal', () => {
cy.get('cd-modal').should('not.exist');
});
/**
* Some modals have an additional confirmation to be provided
* by ticking the 'Are you sure?' box.
*/
Then('I check the tick box in modal', () => {
cy.get('cd-modal .custom-control-label').click();
});
And('I confirm to {string}', (action: string) => {
cy.contains('cd-modal button', action).click();
cy.get('cd-modal').should('not.exist');
});
Then('I should see an error in {string} field', (field: string) => {
cy.get('cd-modal').within(() => {
cy.get(`input[id=${field}]`).should('have.class', 'ng-invalid');
});
});
Then('I should see a row with {string}', (row: string) => {
cy.get('cd-table .search input').first().clear().type(row);
cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).should(
'exist'
);
});
Then('I should not see a row with {string}', (row: string) => {
cy.get('cd-table .search input').first().clear().type(row);
cy.contains(`datatable-body-row datatable-body-cell .datatable-body-cell-label`, row).should(
'not.exist'
);
});
Then('I should see rows with following entries', (entries) => {
entries.hashes().forEach((entry: any) => {
cy.get('cd-table .search input').first().clear().type(entry.hostname);
cy.contains(
`datatable-body-row datatable-body-cell .datatable-body-cell-label`,
entry.hostname
).should('exist');
});
});
And('I should see row {string} have {string}', (row: string, options: string) => {
if (options) {
cy.get('cd-table .search input').first().clear().type(row);
for (const option of options.split(',')) {
cy.contains(
`datatable-body-row datatable-body-cell .datatable-body-cell-label .badge`,
option
).should('exist');
}
}
});
And('I should see row {string} does not have {string}', (row: string, options: string) => {
if (options) {
cy.get('cd-table .search input').first().clear().type(row);
for (const option of options.split(',')) {
cy.contains(
`datatable-body-row datatable-body-cell .datatable-body-cell-label .badge`,
option
).should('not.exist');
}
}
});
And('I go to the {string} tab', (names: string) => {
for (const name of names.split(', ')) {
cy.contains('.nav.nav-tabs a', name).click();
}
});
And('select {string} {string}', (selectionName: string, option: string) => {
cy.get(`select[name=${selectionName}]`).select(option);
cy.get(`select[name=${selectionName}] option:checked`).contains(option);
});
When('I expand the row {string}', (row: string) => {
cy.contains('.datatable-body-row', row).first().find('.tc_expand-collapse').click();
});
And('I should see row {string} have {string} on this tab', (row: string, options: string) => {
if (options) {
cy.get('cd-table').should('exist');
cy.get('datatable-scroller, .empty-row');
cy.get('.datatable-row-detail').within(() => {
cy.get('cd-table .search input').first().clear().type(row);
for (const option of options.split(',')) {
cy.contains(
`datatable-body-row datatable-body-cell .datatable-body-cell-label span`,
option
).should('exist');
}
});
}
});
| 5,845 | 30.095745 | 96 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/grafana.feature.po.ts
|
import { Then, When } from 'cypress-cucumber-preprocessor/steps';
import 'cypress-iframe';
function getIframe() {
cy.frameLoaded('#iframe');
return cy.iframe();
}
Then('I should see the grafana panel {string}', (panels: string) => {
getIframe().within(() => {
for (const panel of panels.split(', ')) {
cy.get('.grafana-app')
.wait(100)
.within(() => {
cy.get(`[aria-label="${panel} panel"]`).should('be.visible');
});
}
});
});
When('I view the grafana panel {string}', (panels: string) => {
getIframe().within(() => {
for (const panel of panels.split(', ')) {
cy.get('.grafana-app')
.wait(100)
.within(() => {
cy.get(`[aria-label="${panel} panel"]`).within(() => {
cy.get('h2').click();
});
cy.get('[aria-label="Panel header item View"]').click();
});
}
});
});
Then('I should not see {string} in the panel {string}', (value: string, panels: string) => {
getIframe().within(() => {
for (const panel of panels.split(', ')) {
cy.get('.grafana-app')
.wait(100)
.within(() => {
cy.get(`[aria-label="${panel} panel"]`)
.should('be.visible')
.within(() => {
cy.get('span').first().should('not.have.text', value);
});
});
}
});
});
Then(
'I should see the legends {string} in the graph {string}',
(legends: string, panels: string) => {
getIframe().within(() => {
for (const panel of panels.split(', ')) {
cy.get('.grafana-app')
.wait(100)
.within(() => {
cy.get(`[aria-label="${panel} panel"]`)
.should('be.visible')
.within(() => {
for (const legend of legends.split(', ')) {
cy.get(`button`).contains(legend);
}
});
});
}
});
}
);
Then('I should not see No Data in the graph {string}', (panels: string) => {
getIframe().within(() => {
for (const panel of panels.split(', ')) {
cy.get('.grafana-app')
.wait(100)
.within(() => {
cy.get(`[aria-label="${panel} panel"]`)
.should('be.visible')
.within(() => {
cy.get('div.datapoints-warning').should('not.exist');
});
});
}
});
});
| 2,391 | 26.181818 | 92 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/urls.po.ts
|
import { PageHelper } from '../page-helper.po';
export class UrlsCollection extends PageHelper {
pages = {
// Cluster expansion
welcome: { url: '#/expand-cluster', id: 'cd-create-cluster' },
// Landing page
dashboard: { url: '#/dashboard', id: 'cd-dashboard' },
// Hosts
hosts: { url: '#/hosts', id: 'cd-hosts' },
'add hosts': { url: '#/hosts/(modal:add)', id: 'cd-host-form' },
// Services
services: { url: '#/services', id: 'cd-services' },
'create services': { url: '#/services/(modal:create)', id: 'cd-service-form' },
// Physical Disks
'physical disks': { url: '#/inventory', id: 'cd-inventory' },
// Monitors
monitors: { url: '#/monitor', id: 'cd-monitor' },
// OSDs
osds: { url: '#/osd', id: 'cd-osd-list' },
'create osds': { url: '#/osd/create', id: 'cd-osd-form' },
// Configuration
configuration: { url: '#/configuration', id: 'cd-configuration' },
// Crush Map
'crush map': { url: '#/crush-map', id: 'cd-crushmap' },
// Mgr modules
'mgr-modules': { url: '#/mgr-modules', id: 'cd-mgr-module-list' },
// Logs
logs: { url: '#/logs', id: 'cd-logs' },
// RGW Daemons
'rgw daemons': { url: '#/rgw/daemon', id: 'cd-rgw-daemon-list' }
};
}
| 1,267 | 27.177778 | 83 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/common/create-cluster/create-cluster.feature.po.ts
|
import { Given, Then } from 'cypress-cucumber-preprocessor/steps';
Given('I am on the {string} section', (page: string) => {
cy.get('cd-wizard').within(() => {
cy.get('.nav-link').should('contain.text', page).first().click();
cy.get('.nav-link.active').should('contain.text', page);
});
});
Then('I should see a message {string}', () => {
cy.get('cd-create-cluster').should('contain.text', 'Please expand your cluster first');
});
| 447 | 33.461538 | 89 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.e2e-spec.ts
|
import { FilesystemsPageHelper } from './filesystems.po';
describe('File Systems page', () => {
const filesystems = new FilesystemsPageHelper();
beforeEach(() => {
cy.login();
filesystems.navigateTo();
});
describe('breadcrumb test', () => {
it('should open and show breadcrumb', () => {
filesystems.expectBreadcrumbText('File Systems');
});
});
});
| 385 | 21.705882 | 57 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/filesystems/filesystems.po.ts
|
import { PageHelper } from '../page-helper.po';
export class FilesystemsPageHelper extends PageHelper {
pages = { index: { url: '#/cephfs', id: 'cd-cephfs-list' } };
}
| 171 | 27.666667 | 63 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/01-hosts.e2e-spec.ts
|
import { HostsPageHelper } from '../cluster/hosts.po';
describe('Hosts page', () => {
const hosts = new HostsPageHelper();
beforeEach(() => {
cy.login();
hosts.navigateTo();
});
describe('when Orchestrator is available', () => {
beforeEach(function () {
cy.fixture('orchestrator/inventory.json').as('hosts');
cy.fixture('orchestrator/services.json').as('services');
});
it('should not add an exsiting host', function () {
const hostname = Cypress._.sample(this.hosts).name;
hosts.navigateTo('add');
hosts.add(hostname, true);
});
it('should drain and remove a host and then add it back', function () {
const hostname = Cypress._.last(this.hosts)['name'];
// should drain the host first before deleting
hosts.drain(hostname);
hosts.remove(hostname);
// add it back
hosts.navigateTo('add');
hosts.add(hostname);
hosts.checkExist(hostname, true);
});
it('should display inventory', function () {
for (const host of this.hosts) {
hosts.clickTab('cd-host-details', host.name, 'Physical Disks');
cy.get('cd-host-details').within(() => {
hosts.expectTableCount('total', host.devices.length);
});
}
});
it('should display daemons', function () {
for (const host of this.hosts) {
hosts.clickTab('cd-host-details', host.name, 'Daemons');
cy.get('cd-host-details').within(() => {
hosts.getTableCount('total').should('be.gte', 0);
});
}
});
it('should edit host labels', function () {
const hostname = Cypress._.sample(this.hosts).name;
const labels = ['foo', 'bar'];
hosts.editLabels(hostname, labels, true);
hosts.editLabels(hostname, labels, false);
});
it('should enter host into maintenance', function () {
const hostname = Cypress._.sample(this.hosts).name;
const serviceList = new Array();
this.services.forEach((service: any) => {
if (hostname === service.hostname) {
serviceList.push(service.daemon_type);
}
});
let enterMaintenance = true;
serviceList.forEach((service: string) => {
if (service === 'mgr' || service === 'alertmanager') {
enterMaintenance = false;
}
});
if (enterMaintenance) {
hosts.maintenance(hostname);
}
});
it('should exit host from maintenance', function () {
const hostname = Cypress._.sample(this.hosts).name;
hosts.maintenance(hostname, true);
});
});
});
| 2,594 | 29.174419 | 75 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/03-inventory.e2e-spec.ts
|
import { InventoryPageHelper } from '../cluster/inventory.po';
describe('Physical Disks page', () => {
const inventory = new InventoryPageHelper();
beforeEach(() => {
cy.login();
inventory.navigateTo();
});
it('should have correct devices', () => {
cy.fixture('orchestrator/inventory.json').then((hosts) => {
const totalDiskCount = Cypress._.sumBy(hosts, 'devices.length');
inventory.expectTableCount('total', totalDiskCount);
for (const host of hosts) {
inventory.filterTable('Hostname', host['name']);
inventory.getTableCount('found').should('be.eq', host.devices.length);
}
});
});
it('should identify device', () => {
inventory.identify();
});
});
| 730 | 27.115385 | 78 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/04-osds.e2e-spec.ts
|
import { OSDsPageHelper } from '../cluster/osds.po';
import { DashboardPageHelper } from '../ui/dashboard.po';
describe('OSDs page', () => {
const osds = new OSDsPageHelper();
const dashboard = new DashboardPageHelper();
beforeEach(() => {
cy.login();
osds.navigateTo();
});
describe('when Orchestrator is available', () => {
it('should create and delete OSDs', () => {
osds.getTableCount('total').as('initOSDCount');
osds.navigateTo('create');
osds.create('hdd');
cy.get('@newOSDCount').then((newCount) => {
cy.get('@initOSDCount').then((oldCount) => {
const expectedCount = Number(oldCount) + Number(newCount);
// check total rows
osds.expectTableCount('total', expectedCount);
// landing page is easier to check OSD status
dashboard.navigateTo();
dashboard.infoCardBody('OSDs').should('contain.text', `${expectedCount} total`);
dashboard.infoCardBody('OSDs').should('contain.text', `${expectedCount} up`);
dashboard.infoCardBody('OSDs').should('contain.text', `${expectedCount} in`);
cy.wait(30000);
expect(Number(newCount)).to.be.gte(2);
// Delete the first OSD we created
osds.navigateTo();
const deleteOsdId = Number(oldCount);
osds.deleteByIDs([deleteOsdId], false);
osds.ensureNoOsd(deleteOsdId);
cy.wait(30000);
// Replace the second OSD we created
const replaceID = Number(oldCount) + 1;
osds.deleteByIDs([replaceID], true);
osds.checkStatus(replaceID, ['destroyed']);
});
});
});
});
});
| 1,681 | 32.64 | 90 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/05-services.e2e-spec.ts
|
import { ServicesPageHelper } from '../cluster/services.po';
describe('Services page', () => {
const services = new ServicesPageHelper();
const serviceName = 'rgw.foo';
beforeEach(() => {
cy.login();
services.navigateTo();
});
describe('when Orchestrator is available', () => {
it('should create an rgw service', () => {
services.navigateTo('create');
services.addService('rgw');
services.checkExist(serviceName, true);
});
it('should edit a service', () => {
const count = '2';
services.editService(serviceName, count);
services.expectPlacementCount(serviceName, count);
});
it('should create and delete an ingress service', () => {
services.navigateTo('create');
services.addService('ingress');
services.checkExist('ingress.rgw.foo', true);
services.deleteService('ingress.rgw.foo');
});
});
});
| 910 | 24.305556 | 61 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/03-create-cluster-create-services.e2e-spec.ts
|
/* tslint:disable*/
import {
CreateClusterServicePageHelper,
CreateClusterWizardHelper
} from '../../cluster/create-cluster.po';
/* tslint:enable*/
describe('Create cluster create services page', () => {
const createCluster = new CreateClusterWizardHelper();
const createClusterServicePage = new CreateClusterServicePageHelper();
const createService = (serviceType: string, serviceName: string, count = 1) => {
cy.get('[aria-label=Create]').first().click();
createClusterServicePage.addService(serviceType, false, count);
createClusterServicePage.checkExist(serviceName, true);
};
beforeEach(() => {
cy.login();
createCluster.navigateTo();
createCluster.createCluster();
cy.get('.nav-link').contains('Create Services').click();
});
it('should check if title contains Create Services', () => {
cy.get('.title').should('contain.text', 'Create Services');
});
describe('when Orchestrator is available', () => {
const serviceName = 'mds.test';
it('should create an mds service', () => {
createService('mds', serviceName);
});
it('should edit a service', () => {
const daemonCount = '2';
createClusterServicePage.editService(serviceName, daemonCount);
createClusterServicePage.expectPlacementCount(serviceName, daemonCount);
});
it('should delete mds service', () => {
createClusterServicePage.deleteService('mds.test');
});
});
});
| 1,450 | 29.87234 | 82 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/04-create-cluster-create-osds.e2e-spec.ts
|
/* tslint:disable*/
import { CreateClusterWizardHelper } from '../../cluster/create-cluster.po';
import { OSDsPageHelper } from '../../cluster/osds.po';
/* tslint:enable*/
const osds = new OSDsPageHelper();
describe('Create cluster create osds page', () => {
const createCluster = new CreateClusterWizardHelper();
beforeEach(() => {
cy.login();
createCluster.navigateTo();
createCluster.createCluster();
cy.get('.nav-link').contains('Create OSDs').click();
});
it('should check if title contains Create OSDs', () => {
cy.get('.title').should('contain.text', 'Create OSDs');
});
describe('when Orchestrator is available', () => {
it('should create OSDs', () => {
const hostnames = ['ceph-node-00', 'ceph-node-01'];
for (const hostname of hostnames) {
osds.create('hdd', hostname, true);
// Go to the Review section and Expand the cluster
// because the drive group spec is only stored
// in frontend and will be lost when refreshed
cy.get('.nav-link').contains('Review').click();
cy.get('button[aria-label="Next"]').click();
cy.get('cd-dashboard').should('exist');
createCluster.navigateTo();
createCluster.createCluster();
cy.get('.nav-link').contains('Create OSDs').click();
}
});
});
});
| 1,336 | 31.609756 | 76 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/05-create-cluster-review.e2e-spec.ts
|
/* tslint:disable*/
import {
CreateClusterHostPageHelper,
CreateClusterWizardHelper
} from '../../cluster/create-cluster.po';
/* tslint:enable*/
describe('Create Cluster Review page', () => {
const createCluster = new CreateClusterWizardHelper();
const createClusterHostPage = new CreateClusterHostPageHelper();
beforeEach(() => {
cy.login();
createCluster.navigateTo();
createCluster.createCluster();
cy.get('.nav-link').contains('Review').click();
});
describe('navigation link test', () => {
it('should check if active nav-link is of Review section', () => {
cy.get('.nav-link.active').should('contain.text', 'Review');
});
});
describe('fields check', () => {
it('should check cluster resources table is present', () => {
// check for table header 'Cluster Resources'
createCluster.getLegends().its(0).should('have.text', 'Cluster Resources');
// check for fields in table
createCluster.getStatusTables().should('contain.text', 'Hosts');
createCluster.getStatusTables().should('contain.text', 'Storage Capacity');
createCluster.getStatusTables().should('contain.text', 'CPUs');
createCluster.getStatusTables().should('contain.text', 'Memory');
});
it('should check Host Details table is present', () => {
// check for there to be two tables
createCluster.getDataTables().should('have.length', 1);
// verify correct columns on Host Details table
createCluster.getDataTableHeaders(0).contains('Hostname');
createCluster.getDataTableHeaders(0).contains('Labels');
createCluster.getDataTableHeaders(0).contains('CPUs');
createCluster.getDataTableHeaders(0).contains('Cores');
createCluster.getDataTableHeaders(0).contains('Total Memory');
createCluster.getDataTableHeaders(0).contains('Raw Capacity');
createCluster.getDataTableHeaders(0).contains('HDDs');
createCluster.getDataTableHeaders(0).contains('Flash');
createCluster.getDataTableHeaders(0).contains('NICs');
});
it('should check default host name is present', () => {
createClusterHostPage.check_for_host();
});
});
});
| 2,191 | 31.716418 | 81 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/06-cluster-check.e2e-spec.ts
|
/* tslint:disable*/
import { CreateClusterWizardHelper } from '../../cluster/create-cluster.po';
import { HostsPageHelper } from '../../cluster/hosts.po';
import { ServicesPageHelper } from '../../cluster/services.po';
/* tslint:enable*/
describe('when cluster creation is completed', () => {
const createCluster = new CreateClusterWizardHelper();
const services = new ServicesPageHelper();
const hosts = new HostsPageHelper();
const hostnames = ['ceph-node-00', 'ceph-node-01', 'ceph-node-02', 'ceph-node-03'];
beforeEach(() => {
cy.login();
});
it('should redirect to dashboard landing page after cluster creation', () => {
createCluster.navigateTo();
createCluster.createCluster();
// Explicitly skip OSD Creation Step so that it prevents from
// deploying OSDs to the hosts automatically.
cy.get('.nav-link').contains('Create OSDs').click();
cy.get('button[aria-label="Skip this step"]').click();
cy.get('.nav-link').contains('Review').click();
cy.get('button[aria-label="Next"]').click();
cy.get('cd-dashboard').should('exist');
});
describe('Hosts page', () => {
beforeEach(() => {
hosts.navigateTo();
});
it('should add one more host', () => {
hosts.navigateTo('add');
hosts.add(hostnames[3]);
hosts.checkExist(hostnames[3], true);
});
it('should check if monitoring stacks are running on the root host', { retries: 2 }, () => {
const monitoringStack = ['alertmanager', 'grafana', 'node-exporter', 'prometheus'];
hosts.clickTab('cd-host-details', 'ceph-node-00', 'Daemons');
for (const daemon of monitoringStack) {
cy.get('cd-host-details').within(() => {
services.checkServiceStatus(daemon);
});
}
});
it('should have removed "_no_schedule" label', () => {
for (const hostname of hostnames) {
hosts.checkLabelExists(hostname, ['_no_schedule'], false);
}
});
it('should display inventory', () => {
hosts.clickTab('cd-host-details', hostnames[1], 'Physical Disks');
cy.get('cd-host-details').within(() => {
hosts.getTableCount('total').should('be.gte', 0);
});
});
it('should display daemons', () => {
hosts.clickTab('cd-host-details', hostnames[1], 'Daemons');
cy.get('cd-host-details').within(() => {
hosts.getTableCount('total').should('be.gte', 0);
});
});
it('should check if mon daemon is running on all hosts', () => {
for (const hostname of hostnames) {
hosts.clickTab('cd-host-details', hostname, 'Daemons');
cy.get('cd-host-details').within(() => {
services.checkServiceStatus('mon');
});
}
});
});
});
| 2,740 | 32.024096 | 96 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/07-osds.e2e-spec.ts
|
/* tslint:disable*/
import { OSDsPageHelper } from '../../cluster/osds.po';
/* tslint:enable*/
describe('OSDs page', () => {
const osds = new OSDsPageHelper();
beforeEach(() => {
cy.login();
osds.navigateTo();
});
it('should check if atleast 3 osds are created', { retries: 3 }, () => {
// we have created a total of more than 3 osds throughout
// the whole tests so ensuring that atleast
// 3 osds are listed in the table. Since the OSD
// creation can take more time going with
// retry of 3
for (let id = 0; id < 3; id++) {
osds.checkStatus(id, ['in', 'up']);
}
});
});
| 628 | 25.208333 | 74 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/08-hosts.e2e-spec.ts
|
/* tslint:disable*/
import { HostsPageHelper } from '../../cluster/hosts.po';
import { ServicesPageHelper } from '../../cluster/services.po';
/* tslint:enable*/
describe('Host Page', () => {
const hosts = new HostsPageHelper();
const services = new ServicesPageHelper();
const hostnames = ['ceph-node-00', 'ceph-node-01', 'ceph-node-02', 'ceph-node-03'];
beforeEach(() => {
cy.login();
hosts.navigateTo();
});
// rgw is needed for testing the force maintenance
it('should create rgw services', () => {
services.navigateTo('create');
services.addService('rgw', false, 4);
services.checkExist('rgw.foo', true);
});
it('should check if rgw daemon is running on all hosts', () => {
for (const hostname of hostnames) {
hosts.clickTab('cd-host-details', hostname, 'Daemons');
cy.get('cd-host-details').within(() => {
services.checkServiceStatus('rgw');
});
}
});
it('should force maintenance and exit', () => {
hosts.maintenance(hostnames[3], true, true);
});
it('should drain, remove and add the host back', () => {
hosts.drain(hostnames[3]);
hosts.remove(hostnames[3]);
hosts.navigateTo('add');
hosts.add(hostnames[3]);
hosts.checkExist(hostnames[3], true);
});
it('should show the exact count of daemons', () => {
hosts.checkServiceInstancesExist(hostnames[0], ['mgr: 1', 'prometheus: 1']);
});
});
| 1,418 | 27.959184 | 85 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/09-services.e2e-spec.ts
|
/* tslint:disable*/
import { ServicesPageHelper } from '../../cluster/services.po';
/* tslint:enable*/
describe('Services page', () => {
const services = new ServicesPageHelper();
const mdsDaemonName = 'mds.test';
beforeEach(() => {
cy.login();
services.navigateTo();
});
it('should check if rgw service is created', () => {
services.checkExist('rgw.foo', true);
});
it('should create an mds service', () => {
services.navigateTo('create');
services.addService('mds', false);
services.checkExist(mdsDaemonName, true);
services.clickServiceTab(mdsDaemonName, 'Daemons');
cy.get('cd-service-details').within(() => {
services.checkServiceStatus(mdsDaemonName);
});
});
it('should stop a daemon', () => {
services.clickServiceTab(mdsDaemonName, 'Daemons');
services.checkServiceStatus(mdsDaemonName);
services.daemonAction('mds', 'stop');
cy.get('cd-service-details').within(() => {
services.checkServiceStatus(mdsDaemonName, 'stopped');
});
});
it('should restart a daemon', () => {
services.checkExist(mdsDaemonName, true);
services.clickServiceTab(mdsDaemonName, 'Daemons');
services.daemonAction('mds', 'restart');
cy.get('cd-service-details').within(() => {
services.checkServiceStatus(mdsDaemonName, 'running');
});
});
it('should redeploy a daemon', () => {
services.checkExist(mdsDaemonName, true);
services.clickServiceTab(mdsDaemonName, 'Daemons');
services.daemonAction('mds', 'stop');
cy.get('cd-service-details').within(() => {
services.checkServiceStatus(mdsDaemonName, 'stopped');
});
services.daemonAction('mds', 'redeploy');
cy.get('cd-service-details').within(() => {
services.checkServiceStatus(mdsDaemonName, 'running');
});
});
it('should start a daemon', () => {
services.checkExist(mdsDaemonName, true);
services.clickServiceTab(mdsDaemonName, 'Daemons');
services.daemonAction('mds', 'stop');
cy.get('cd-service-details').within(() => {
services.checkServiceStatus(mdsDaemonName, 'stopped');
});
services.daemonAction('mds', 'start');
cy.get('cd-service-details').within(() => {
services.checkServiceStatus(mdsDaemonName, 'running');
});
});
it('should delete an mds service', () => {
services.deleteService(mdsDaemonName);
});
it('should create and delete snmp-gateway service with version V2c', () => {
services.navigateTo('create');
services.addService('snmp-gateway', false, 1, 'V2c');
services.checkExist('snmp-gateway', true);
services.clickServiceTab('snmp-gateway', 'Daemons');
cy.get('cd-service-details').within(() => {
services.checkServiceStatus('snmp-gateway');
});
services.deleteService('snmp-gateway');
});
it('should create and delete snmp-gateway service with version V3', () => {
services.navigateTo('create');
services.addService('snmp-gateway', false, 1, 'V3', true);
services.checkExist('snmp-gateway', true);
services.clickServiceTab('snmp-gateway', 'Daemons');
cy.get('cd-service-details').within(() => {
services.checkServiceStatus('snmp-gateway');
});
services.deleteService('snmp-gateway');
});
it('should create and delete snmp-gateway service with version V3 and w/o privacy protocol', () => {
services.navigateTo('create');
services.addService('snmp-gateway', false, 1, 'V3', false);
services.checkExist('snmp-gateway', true);
services.clickServiceTab('snmp-gateway', 'Daemons');
cy.get('cd-service-details').within(() => {
services.checkServiceStatus('snmp-gateway');
});
services.deleteService('snmp-gateway');
});
it('should create ingress as unmanaged', () => {
services.navigateTo('create');
services.addService('ingress', false, undefined, undefined, undefined, true);
services.checkExist('ingress.rgw.foo', true);
services.isUnmanaged('ingress.rgw.foo', true);
services.deleteService('ingress.rgw.foo');
});
it('should check if exporter daemons are running', () => {
services.clickServiceTab('ceph-exporter', 'Daemons');
cy.get('cd-service-details').within(() => {
services.checkServiceStatus('ceph-exporter', 'running');
});
});
});
| 4,296 | 31.308271 | 102 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/10-nfs-exports.e2e-spec.ts
|
/* tslint:disable*/
import { ServicesPageHelper } from '../../cluster/services.po';
import { NFSPageHelper } from '../../orchestrator/workflow/nfs/nfs-export.po';
import { BucketsPageHelper } from '../../rgw/buckets.po';
/* tslint:enable*/
describe('nfsExport page', () => {
const nfsExport = new NFSPageHelper();
const services = new ServicesPageHelper();
const buckets = new BucketsPageHelper();
const bucketName = 'e2e.nfs.bucket';
// @TODO: uncomment this when a CephFS volume can be created through Dashboard.
// const fsPseudo = '/fsPseudo';
const rgwPseudo = '/rgwPseudo';
const editPseudo = '/editPseudo';
const backends = ['CephFS', 'Object Gateway'];
const squash = 'no_root_squash';
const client: object = { addresses: '192.168.0.10' };
beforeEach(() => {
cy.login();
nfsExport.navigateTo();
});
describe('breadcrumb test', () => {
it('should open and show breadcrumb', () => {
nfsExport.expectBreadcrumbText('NFS');
});
});
describe('Create, edit and delete', () => {
it('should create an NFS cluster', () => {
services.navigateTo('create');
services.addService('nfs');
services.checkExist('nfs.testnfs', true);
services.clickServiceTab('nfs.testnfs', 'Daemons');
services.checkServiceStatus('nfs');
});
it('should create a nfs-export with RGW backend', () => {
buckets.navigateTo('create');
buckets.create(bucketName, 'dashboard', 'default-placement');
nfsExport.navigateTo();
nfsExport.existTableCell(rgwPseudo, false);
nfsExport.navigateTo('create');
nfsExport.create(backends[1], squash, client, rgwPseudo, bucketName);
nfsExport.existTableCell(rgwPseudo);
});
// @TODO: uncomment this when a CephFS volume can be created through Dashboard.
// it('should create a nfs-export with CephFS backend', () => {
// nfsExport.navigateTo();
// nfsExport.existTableCell(fsPseudo, false);
// nfsExport.navigateTo('create');
// nfsExport.create(backends[0], squash, client, fsPseudo);
// nfsExport.existTableCell(fsPseudo);
// });
it('should show Clients', () => {
nfsExport.clickTab('cd-nfs-details', rgwPseudo, 'Clients (1)');
cy.get('cd-nfs-details').within(() => {
nfsExport.getTableCount('total').should('be.gte', 0);
});
});
it('should edit an export', () => {
nfsExport.editExport(rgwPseudo, editPseudo);
nfsExport.existTableCell(editPseudo);
});
it('should delete exports and bucket', () => {
nfsExport.delete(editPseudo);
buckets.navigateTo();
buckets.delete(bucketName);
});
});
});
| 2,677 | 31.26506 | 83 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/orchestrator/workflow/nfs/nfs-export.po.ts
|
/* tslint:disable*/
import { PageHelper } from '../../../page-helper.po';
/* tslint:enable*/
const pages = {
index: { url: '#/nfs', id: 'cd-nfs-list' },
create: { url: '#/nfs/create', id: 'cd-nfs-form' }
};
export class NFSPageHelper extends PageHelper {
pages = pages;
@PageHelper.restrictTo(pages.create.url)
create(backend: string, squash: string, client: object, pseudo: string, rgwPath?: string) {
this.selectOption('cluster_id', 'testnfs');
// select a storage backend
this.selectOption('name', backend);
if (backend === 'CephFS') {
this.selectOption('fs_name', 'myfs');
cy.get('#security_label').click({ force: true });
} else {
cy.get('input[data-testid=rgw_path]').type(rgwPath);
}
cy.get('input[name=pseudo]').type(pseudo);
this.selectOption('squash', squash);
// Add clients
cy.get('button[name=add_client]').click({ force: true });
cy.get('input[name=addresses]').type(client['addresses']);
// Check if we can remove clients and add it again
cy.get('span[name=remove_client]').click({ force: true });
cy.get('button[name=add_client]').click({ force: true });
cy.get('input[name=addresses]').type(client['addresses']);
cy.get('cd-submit-button').click();
}
editExport(pseudo: string, editPseudo: string) {
this.navigateEdit(pseudo);
cy.get('input[name=pseudo]').clear().type(editPseudo);
cy.get('cd-submit-button').click();
// Click the export and check its details table for updated content
this.getExpandCollapseElement(editPseudo).click();
cy.get('.active.tab-pane').should('contain.text', editPseudo);
}
}
| 1,655 | 30.245283 | 93 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/pools/pools.e2e-spec.ts
|
import { PoolPageHelper } from './pools.po';
describe('Pools page', () => {
const pools = new PoolPageHelper();
const poolName = 'pool_e2e_pool-test';
beforeEach(() => {
cy.login();
pools.navigateTo();
});
describe('breadcrumb and tab tests', () => {
it('should open and show breadcrumb', () => {
pools.expectBreadcrumbText('Pools');
});
it('should show two tabs', () => {
pools.getTabsCount().should('equal', 2);
});
it('should show pools list tab at first', () => {
pools.getTabText(0).should('eq', 'Pools List');
});
it('should show overall performance as a second tab', () => {
pools.getTabText(1).should('eq', 'Overall Performance');
});
});
describe('Create, update and destroy', () => {
it('should create a pool', () => {
pools.existTableCell(poolName, false);
pools.navigateTo('create');
pools.create(poolName, 8, 'rbd');
pools.existTableCell(poolName);
});
it('should edit a pools placement group', () => {
pools.existTableCell(poolName);
pools.edit_pool_pg(poolName, 32);
});
it('should show updated configuration field values', () => {
pools.existTableCell(poolName);
const bpsLimit = '4 B/s';
pools.edit_pool_configuration(poolName, bpsLimit);
});
it('should delete a pool', () => {
pools.delete(poolName);
});
});
});
| 1,413 | 25.185185 | 65 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/pools/pools.po.ts
|
import { PageHelper } from '../page-helper.po';
const pages = {
index: { url: '#/pool', id: 'cd-pool-list' },
create: { url: '#/pool/create', id: 'cd-pool-form' }
};
export class PoolPageHelper extends PageHelper {
pages = pages;
private isPowerOf2(n: number) {
// tslint:disable-next-line: no-bitwise
return expect((n & (n - 1)) === 0, `Placement groups ${n} are not a power of 2`).to.be.true;
}
@PageHelper.restrictTo(pages.create.url)
create(name: string, placement_groups: number, ...apps: string[]) {
cy.get('input[name=name]').clear().type(name);
this.isPowerOf2(placement_groups);
this.selectOption('poolType', 'replicated');
this.expectSelectOption('pgAutoscaleMode', 'on');
this.selectOption('pgAutoscaleMode', 'off'); // To show pgNum field
cy.get('input[name=pgNum]').clear().type(`${placement_groups}`);
this.setApplications(apps);
cy.get('cd-submit-button').click();
}
edit_pool_pg(name: string, new_pg: number, wait = true) {
this.isPowerOf2(new_pg);
this.navigateEdit(name);
cy.get('input[name=pgNum]').clear().type(`${new_pg}`);
cy.get('cd-submit-button').click();
const str = `${new_pg} active+clean`;
this.getTableRow(name);
if (wait) {
this.getTableRow(name).contains(str);
}
}
edit_pool_configuration(name: string, bpsLimit: string) {
this.navigateEdit(name);
cy.get('.collapsible').click();
cy.get('cd-rbd-configuration-form')
.get('input[name=rbd_qos_bps_limit]')
.clear()
.type(`${bpsLimit}`);
cy.get('cd-submit-button').click();
this.navigateEdit(name);
cy.get('.collapsible').click();
cy.get('cd-rbd-configuration-form')
.get('input[name=rbd_qos_bps_limit]')
.should('have.value', bpsLimit);
}
private setApplications(apps: string[]) {
if (!apps || apps.length === 0) {
return;
}
cy.get('.float-start.me-2.select-menu-edit').click();
cy.get('.popover-body').should('be.visible');
apps.forEach((app) => cy.get('.select-menu-item-content').contains(app).click());
}
}
| 2,095 | 28.521127 | 96 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/buckets.e2e-spec.ts
|
import { BucketsPageHelper } from './buckets.po';
describe('RGW buckets page', () => {
const buckets = new BucketsPageHelper();
const bucket_name = 'e2ebucket';
beforeEach(() => {
cy.login();
buckets.navigateTo();
});
describe('breadcrumb tests', () => {
it('should open and show breadcrumb', () => {
buckets.expectBreadcrumbText('Buckets');
});
});
describe('create, edit & delete bucket tests', () => {
it('should create bucket', () => {
buckets.navigateTo('create');
buckets.create(bucket_name, BucketsPageHelper.USERS[0], 'default-placement');
buckets.getFirstTableCell(bucket_name).should('exist');
});
it('should edit bucket', () => {
buckets.edit(bucket_name, BucketsPageHelper.USERS[1]);
buckets.getDataTables().should('contain.text', BucketsPageHelper.USERS[1]);
});
it('should delete bucket', () => {
buckets.delete(bucket_name);
});
it('should check default encryption is SSE-S3', () => {
buckets.navigateTo('create');
buckets.checkForDefaultEncryption();
});
it('should create bucket with object locking enabled', () => {
buckets.navigateTo('create');
buckets.create(bucket_name, BucketsPageHelper.USERS[0], 'default-placement', true);
buckets.getFirstTableCell(bucket_name).should('exist');
});
it('should not allow to edit versioning if object locking is enabled', () => {
buckets.edit(bucket_name, BucketsPageHelper.USERS[1], true);
buckets.getDataTables().should('contain.text', BucketsPageHelper.USERS[1]);
buckets.delete(bucket_name);
});
});
describe('Invalid Input in Create and Edit tests', () => {
it('should test invalid inputs in create fields', () => {
buckets.testInvalidCreate();
});
it('should test invalid input in edit owner field', () => {
buckets.navigateTo('create');
buckets.create(bucket_name, BucketsPageHelper.USERS[0], 'default-placement');
buckets.testInvalidEdit(bucket_name);
buckets.navigateTo();
buckets.delete(bucket_name);
});
});
});
| 2,118 | 30.626866 | 89 |
ts
|
null |
ceph-main/src/pybind/mgr/dashboard/frontend/cypress/e2e/rgw/buckets.po.ts
|
import { PageHelper } from '../page-helper.po';
const pages = {
index: { url: '#/rgw/bucket', id: 'cd-rgw-bucket-list' },
create: { url: '#/rgw/bucket/create', id: 'cd-rgw-bucket-form' }
};
export class BucketsPageHelper extends PageHelper {
static readonly USERS = ['dashboard', 'testid'];
pages = pages;
versioningStateEnabled = 'Enabled';
versioningStateSuspended = 'Suspended';
private selectOwner(owner: string) {
return this.selectOption('owner', owner);
}
private selectPlacementTarget(placementTarget: string) {
return this.selectOption('placement-target', placementTarget);
}
private selectLockMode(lockMode: string) {
return this.selectOption('lock_mode', lockMode);
}
@PageHelper.restrictTo(pages.create.url)
create(name: string, owner: string, placementTarget: string, isLocking = false) {
// Enter in bucket name
cy.get('#bid').type(name);
// Select bucket owner
this.selectOwner(owner);
cy.get('#owner').should('have.class', 'ng-valid');
// Select bucket placement target:
this.selectPlacementTarget(placementTarget);
cy.get('#placement-target').should('have.class', 'ng-valid');
if (isLocking) {
cy.get('#lock_enabled').click({ force: true });
// Select lock mode:
this.selectLockMode('Compliance');
cy.get('#lock_mode').should('have.class', 'ng-valid');
cy.get('#lock_retention_period_days').type('3');
}
// Click the create button and wait for bucket to be made
cy.contains('button', 'Create Bucket').click();
this.getFirstTableCell(name).should('exist');
}
@PageHelper.restrictTo(pages.create.url)
checkForDefaultEncryption() {
cy.get("cd-helper[aria-label='toggle encryption helper']").click();
cy.get("a[aria-label='click here']").click();
cy.get('cd-modal').within(() => {
cy.get('input[id=s3Enabled]').should('be.checked');
});
}
@PageHelper.restrictTo(pages.index.url)
edit(name: string, new_owner: string, isLocking = false) {
this.navigateEdit(name);
cy.get('input[name=placement-target]').should('have.value', 'default-placement');
this.selectOwner(new_owner);
// If object locking is enabled versioning shouldn't be visible
if (isLocking) {
cy.get('input[id=versioning]').should('be.disabled');
cy.contains('button', 'Edit Bucket').click();
// wait to be back on buckets page with table visible and click
this.getExpandCollapseElement(name).click();
// check its details table for edited owner field
cy.get('.table.table-striped.table-bordered')
.first()
.should('contains.text', new_owner)
.as('bucketDataTable');
// Check versioning enabled:
cy.get('@bucketDataTable').find('tr').its(2).find('td').last().should('have.text', new_owner);
cy.get('@bucketDataTable').find('tr').its(11).find('td').last().as('versioningValueCell');
return cy.get('@versioningValueCell').should('have.text', this.versioningStateEnabled);
}
// Enable versioning
cy.get('input[id=versioning]').should('not.be.checked');
cy.get('label[for=versioning]').click();
cy.get('input[id=versioning]').should('be.checked');
cy.contains('button', 'Edit Bucket').click();
// wait to be back on buckets page with table visible and click
this.getExpandCollapseElement(name).click();
// check its details table for edited owner field
cy.get('.table.table-striped.table-bordered')
.first()
.should('contains.text', new_owner)
.as('bucketDataTable');
// Check versioning enabled:
cy.get('@bucketDataTable').find('tr').its(2).find('td').last().should('have.text', new_owner);
cy.get('@bucketDataTable').find('tr').its(11).find('td').last().as('versioningValueCell');
cy.get('@versioningValueCell').should('have.text', this.versioningStateEnabled);
// Disable versioning:
this.navigateEdit(name);
cy.get('label[for=versioning]').click();
cy.get('input[id=versioning]').should('not.be.checked');
cy.contains('button', 'Edit Bucket').click();
// Check versioning suspended:
this.getExpandCollapseElement(name).click();
return cy.get('@versioningValueCell').should('have.text', this.versioningStateSuspended);
}
testInvalidCreate() {
this.navigateTo('create');
cy.get('#bid').as('nameInputField'); // Grabs name box field
// Gives an invalid name (too short), then waits for dashboard to determine validity
cy.get('@nameInputField').type('rq');
cy.contains('button', 'Create Bucket').click(); // To trigger a validation
// Waiting for website to decide if name is valid or not
// Check that name input field was marked invalid in the css
cy.get('@nameInputField')
.should('not.have.class', 'ng-pending')
.and('have.class', 'ng-invalid');
// Check that error message was printed under name input field
cy.get('#bid + .invalid-feedback').should(
'have.text',
'Bucket names must be 3 to 63 characters long.'
);
// Test invalid owner input
// select some valid option. The owner drop down error message will not appear unless a valid user was selected at
// one point before the invalid placeholder user is selected.
this.selectOwner(BucketsPageHelper.USERS[1]);
// select the first option, which is invalid because it is a placeholder
this.selectOwner('-- Select a user --');
cy.get('@nameInputField').click();
// Check that owner drop down field was marked invalid in the css
cy.get('#owner').should('have.class', 'ng-invalid');
// Check that error message was printed under owner drop down field
cy.get('#owner + .invalid-feedback').should('have.text', 'This field is required.');
// Check invalid placement target input
this.selectOwner(BucketsPageHelper.USERS[1]);
// The drop down error message will not appear unless a valid option is previsously selected.
this.selectPlacementTarget('default-placement');
this.selectPlacementTarget('-- Select a placement target --');
cy.get('@nameInputField').click(); // Trigger validation
cy.get('#placement-target').should('have.class', 'ng-invalid');
cy.get('#placement-target + .invalid-feedback').should('have.text', 'This field is required.');
// Clicks the Create Bucket button but the page doesn't move.
// Done by testing for the breadcrumb
cy.contains('button', 'Create Bucket').click(); // Clicks Create Bucket button
this.expectBreadcrumbText('Create');
// content in fields seems to subsist through tests if not cleared, so it is cleared
cy.get('@nameInputField').clear();
return cy.contains('button', 'Cancel').click();
}
testInvalidEdit(name: string) {
this.navigateEdit(name);
cy.get('input[id=versioning]').should('exist').and('not.be.checked');
// Chooses 'Select a user' rather than a valid owner on Edit Bucket page
// and checks if it's an invalid input
// select the first option, which is invalid because it is a placeholder
this.selectOwner('-- Select a user --');
cy.contains('button', 'Edit Bucket').click();
// Check that owner drop down field was marked invalid in the css
cy.get('#owner').should('have.class', 'ng-invalid');
// Check that error message was printed under owner drop down field
cy.get('#owner + .invalid-feedback').should('have.text', 'This field is required.');
this.expectBreadcrumbText('Edit');
}
}
| 7,514 | 36.019704 | 118 |
ts
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.